text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
# Data visualization of iris Dataset this is exploration of iris dataset using python visualizing modules for more info :- https://en.wikipedia.org/wiki/Iris_(plant) ``` # importing dapendent libraries import numpy as np import pandas as pd #The subprocess module allows you to spawn new processes, connect to their input/output/error pipes, and obtain their return codes. This module intends to replace several older modules and functions: # os.system ,os.spawn* ,os.popen* ,popen2.* , commands.* from subprocess import check_output %matplotlib inline import pandas as pd import warnings # in case due to compatiblity issues posed by the data visualization libraries import seaborn as sns # graph visualization library import matplotlib.pyplot as plt sns.set(style="darkgrid",color_codes = True) #loading the flower dataset iris = pd.read_csv("Iris.csv") #now for checking the dataframe of a particular type of species iris_ve = iris.loc[iris['Species']== "Iris-versicolor"] iris_ve.describe() #finding the statistics of species and their count iris["Species"].value_counts() #plotting the species according to the diffrent features displayed by the dataframe sns.FacetGrid(iris,hue="Species",size = 10)\ .map(plt.scatter,"SepalWidthCm","PetalWidthCm") \ .add_legend() #now plotting between sepal length and petal length sns.FacetGrid(iris,hue="Species",size = 10)\ .map(plt.scatter,"SepalLengthCm","PetalLengthCm") \ .add_legend() # thus the above two plots shows that sepal and petal length have more linear relation and can be classified # now plotting multivariable visualization # for more info , comsult https://pandas.pydata.org/pandas-docs/stable/visualization.html from pandas.tools.plotting import parallel_coordinates #just plotting the diffrent species accoring to the diffrent features ,one taken at a time r = parallel_coordinates(iris.drop("Id",axis=1),"Species") import matplotlib r.savefig(fname, dpi=None, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None) #plotting the figure with the paragraph ax = df.plot() fig = ax.get_figure() fig.savefig('plot.png') #now using bokeh library to plot bar and plotting diffrent species according to their color , length from bokeh.plotting import figure, output_notebook, show N = 4000 #x = np.random.random(size=N) * 100 #y = np.random.random(size=N) * 100 x = iris.PetalLengthCm y = iris.SepalLengthCm radii = np.random.random(size=N) * 1.5 colors = ["#%02x%02x%02x" % (int(r), int(g), 150) for r, g in zip(np.floor(50+2*x), np.floor(30+2*y))] output_notebook() Plots="resize,crosshair,pan,wheel_zoom,box_zoom,reset,tap,previewsave,box_select,poly_select,lasso_select" p = figure(tools=Plots) p.scatter(x,y,radius=0.02, fill_alpha=0.9,line_color=None) #just testing the plotting of the Petal length and sepal length ( although quite odd) show(p) #showing histogram plot having the count of species according to length from bokeh.charts import Histogram hist = Histogram(iris, values="PetalLengthCm", color="Species", legend="top_right", bins=12) show(hist) from bokeh.charts import Bar, output_file, show p = Bar(iris, label='PetalLengthCm', values='PetalWidthCm', agg='median', group='Species', title="Y = Median PetalWidthCm X PetalLengthCm, grouped by Species", legend='top_right') show(p) ``` # traning model on iris data-set using knn classifier to predict the species using there features ``` #here we will be using the sklearn library which has prebuild many machine learning methods and datasets from sklearn.datasets import load_iris iris = load_iris() #store the values of data in X X = iris.data # store the output trained answers in Y y = iris.target #importing the knn clasifier function from sklearn.neighbors import KNeighborsClassifier # now we will "Instantiate" the "estimator" #where # "Estimator" is scikit-learn's term for model # "Instantiate" means "make an instance of" knn = KNeighborsClassifier(n_neighbors=1) print(knn) # now fiiting the input ufeatures and observed outputs and fitting the data knn.fit(X,y) #Now for predicting the outputs , passing the matrix of values X_new = [[3, 5, 4, 2], [5, 4, 3, 2]] knn.predict(X_new) ```
github_jupyter
# Masakhane - Reverse Machine Translation for African Languages (Using JoeyNMT) > ## NB >### - The purpose of this Notebook is to build models that translate African languages(target language) *into* English(source language). This will allow us to in future be able to make translations from one African language to the other. If you'd like to translate *from* English, please use [this](https://github.com/masakhane-io/masakhane-mt/blob/master/starter_notebook.ipynb) starter notebook instead. >### - We call this reverse training because normally we build models that make translations from the source language(English) to the target language. But in this case we are doing the reverse; building models that make translations from the target language to the source(English) ## Note before beginning: ### - The idea is that you should be able to make minimal changes to this in order to get SOME result for your own translation corpus. ### - The tl;dr: Go to the **"TODO"** comments which will tell you what to update to get up and running ### - If you actually want to have a clue what you're doing, read the text and peek at the links ### - With 100 epochs, it should take around 7 hours to run in Google Colab ### - Once you've gotten a result for your language, please attach and email your notebook that generated it to masakhanetranslation@gmail.com ### - If you care enough and get a chance, doing a brief background on your language would be amazing. See examples in [(Martinus, 2019)](https://arxiv.org/abs/1906.05685) ## Retrieve your data & make a parallel corpus If you are wanting to use the JW300 data referenced on the Masakhane website or in our GitHub repo, you can use `opus-tools` to convert the data into a convenient format. `opus_read` from that package provides a convenient tool for reading the native aligned XML files and to convert them to TMX format. The tool can also be used to fetch relevant files from OPUS on the fly and to filter the data as necessary. [Read the documentation](https://pypi.org/project/opustools-pkg/) for more details. Once you have your corpus files in TMX format (an xml structure which will include the sentences in your target language and your source language in a single file), we recommend reading them into a pandas dataframe. Thankfully, Jade wrote a silly `tmx2dataframe` package which converts your tmx file to a pandas dataframe. ``` #from google.colab import drive #drive.mount('/content/drive') # TODO: Set your source and target languages. Keep in mind, these traditionally use language codes as found here: # These will also become the suffix's of all vocab and corpus files used throughout import os source_language = "en" target_language = "sn" lc = False # If True, lowercase the data. seed = 42 # Random seed for shuffling. tag = "baseline" # Give a unique name to your folder - this is to ensure you don't rewrite any models you've already submitted os.environ["src"] = source_language # Sets them in bash as well, since we often use bash scripts os.environ["tgt"] = target_language os.environ["tag"] = tag # This will save it to a folder in our gdrive instead! !mkdir -p "/content/drive/My Drive/masakhane/$tgt-$src-$tag" os.environ["gdrive_path"] = "/content/drive/My Drive/masakhane/%s-%s-%s" % (target_language, source_language, tag) !echo $gdrive_path # Install opus-tools ! pip install opustools-pkg # Downloading our corpus ! opus_read -d JW300 -s $src -t $tgt -wm moses -w jw300.$src jw300.$tgt -q # extract the corpus file ! gunzip JW300_latest_xml_$src-$tgt.xml.gz # Download the global test set. ! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-any.en # And the specific test set for this language pair. os.environ["trg"] = target_language os.environ["src"] = source_language ! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-$trg.en ! mv test.en-$trg.en test.en ! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-$trg.$trg ! mv test.en-$trg.$trg test.$trg # Read the test data to filter from train and dev splits. # Store english portion in set for quick filtering checks. en_test_sents = set() filter_test_sents = "test.en-any.en" j = 0 with open(filter_test_sents) as f: for line in f: en_test_sents.add(line.strip()) j += 1 print('Loaded {} global test sentences to filter from the training/dev data.'.format(j)) import pandas as pd # TMX file to dataframe source_file = 'jw300.' + source_language target_file = 'jw300.' + target_language source = [] target = [] skip_lines = [] # Collect the line numbers of the source portion to skip the same lines for the target portion. with open(source_file) as f: for i, line in enumerate(f): # Skip sentences that are contained in the test set. if line.strip() not in en_test_sents: source.append(line.strip()) else: skip_lines.append(i) with open(target_file) as f: for j, line in enumerate(f): # Only add to corpus if corresponding source was not skipped. if j not in skip_lines: target.append(line.strip()) print('Loaded data and skipped {}/{} lines since contained in test set.'.format(len(skip_lines), i)) df = pd.DataFrame(zip(source, target), columns=['source_sentence', 'target_sentence']) # if you get TypeError: data argument can't be an iterator is because of your zip version run this below #df = pd.DataFrame(list(zip(source, target)), columns=['source_sentence', 'target_sentence']) df.head(3) ``` ## Pre-processing and export It is generally a good idea to remove duplicate translations and conflicting translations from the corpus. In practice, these public corpora include some number of these that need to be cleaned. In addition we will split our data into dev/test/train and export to the filesystem. ``` # drop duplicate translations df_pp = df.drop_duplicates() # drop conflicting translations # (this is optional and something that you might want to comment out # depending on the size of your corpus) df_pp.drop_duplicates(subset='source_sentence', inplace=True) df_pp.drop_duplicates(subset='target_sentence', inplace=True) # Shuffle the data to remove bias in dev set selection. df_pp = df_pp.sample(frac=1, random_state=seed).reset_index(drop=True) # Install fuzzy wuzzy to remove "almost duplicate" sentences in the # test and training sets. ! pip install fuzzywuzzy ! pip install python-Levenshtein import time from fuzzywuzzy import process import numpy as np from os import cpu_count from functools import partial from multiprocessing import Pool # reset the index of the training set after previous filtering df_pp.reset_index(drop=False, inplace=True) # Remove samples from the training data set if they "almost overlap" with the # samples in the test set. # Filtering function. Adjust pad to narrow down the candidate matches to # within a certain length of characters of the given sample. def fuzzfilter(sample, candidates, pad): candidates = [x for x in candidates if len(x) <= len(sample)+pad and len(x) >= len(sample)-pad] if len(candidates) > 0: return process.extractOne(sample, candidates)[1] else: return np.nan # start_time = time.time() # ### iterating over pandas dataframe rows is not recomended, let use multi processing to apply the function # with Pool(cpu_count()-1) as pool: # scores = pool.map(partial(fuzzfilter, candidates=list(en_test_sents), pad=5), df_pp['source_sentence']) # hours, rem = divmod(time.time() - start_time, 3600) # minutes, seconds = divmod(rem, 60) # print("done in {}h:{}min:{}seconds".format(hours, minutes, seconds)) # # Filter out "almost overlapping samples" # df_pp = df_pp.assign(scores=scores) # df_pp = df_pp[df_pp['scores'] < 95] # This section does the split between train/dev for the parallel corpora then saves them as separate files # We use 1000 dev test and the given test set. import csv # Do the split between dev/train and create parallel corpora num_dev_patterns = 1000 # Optional: lower case the corpora - this will make it easier to generalize, but without proper casing. if lc: # Julia: making lowercasing optional df_pp["source_sentence"] = df_pp["source_sentence"].str.lower() df_pp["target_sentence"] = df_pp["target_sentence"].str.lower() # Julia: test sets are already generated dev = df_pp.tail(num_dev_patterns) # Herman: Error in original stripped = df_pp.drop(df_pp.tail(num_dev_patterns).index) with open("train."+source_language, "w") as src_file, open("train."+target_language, "w") as trg_file: for index, row in stripped.iterrows(): src_file.write(row["source_sentence"]+"\n") trg_file.write(row["target_sentence"]+"\n") with open("dev."+source_language, "w") as src_file, open("dev."+target_language, "w") as trg_file: for index, row in dev.iterrows(): src_file.write(row["source_sentence"]+"\n") trg_file.write(row["target_sentence"]+"\n") #stripped[["source_sentence"]].to_csv("train."+source_language, header=False, index=False) # Herman: Added `header=False` everywhere #stripped[["target_sentence"]].to_csv("train."+target_language, header=False, index=False) # Julia: Problematic handling of quotation marks. #dev[["source_sentence"]].to_csv("dev."+source_language, header=False, index=False) #dev[["target_sentence"]].to_csv("dev."+target_language, header=False, index=False) # Doublecheck the format below. There should be no extra quotation marks or weird characters. ! head train.* ! head dev.* ``` --- ## Installation of JoeyNMT JoeyNMT is a simple, minimalist NMT package which is useful for learning and teaching. Check out the documentation for JoeyNMT [here](https://joeynmt.readthedocs.io) ``` # Install JoeyNMT ! git clone https://github.com/joeynmt/joeynmt.git ! cd joeynmt; pip3 install . # Install Pytorch with GPU support v1.7.1. ! pip install torch==1.9.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html ``` # Preprocessing the Data into Subword BPE Tokens - One of the most powerful improvements for agglutinative languages (a feature of most Bantu languages) is using BPE tokenization [ (Sennrich, 2015) ](https://arxiv.org/abs/1508.07909). - It was also shown that by optimizing the umber of BPE codes we significantly improve results for low-resourced languages [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021) [(Martinus, 2019)](https://arxiv.org/abs/1906.05685) - Below we have the scripts for doing BPE tokenization of our data. We use 4000 tokens as recommended by [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021). You do not need to change anything. Simply running the below will be suitable. ``` # One of the huge boosts in NMT performance was to use a different method of tokenizing. # Usually, NMT would tokenize by words. However, using a method called BPE gave amazing boosts to performance # Do subword NMT from os import path os.environ["src"] = source_language # Sets them in bash as well, since we often use bash scripts os.environ["tgt"] = target_language # Learn BPEs on the training data. os.environ["data_path"] = path.join("joeynmt", "data",target_language + source_language ) # Herman! ! subword-nmt learn-joint-bpe-and-vocab --input train.$src train.$tgt -s 4000 -o bpe.codes.4000 --write-vocabulary vocab.$src vocab.$tgt # Apply BPE splits to the development and test data. ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < train.$src > train.bpe.$src ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < train.$tgt > train.bpe.$tgt ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < dev.$src > dev.bpe.$src ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < dev.$tgt > dev.bpe.$tgt ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < test.$src > test.bpe.$src ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < test.$tgt > test.bpe.$tgt # Create directory, move everyone we care about to the correct location ! mkdir -p $data_path ! cp train.* $data_path ! cp test.* $data_path ! cp dev.* $data_path ! cp bpe.codes.4000 $data_path ! ls $data_path # Also move everything we care about to a mounted location in google drive (relevant if running in colab) at gdrive_path ! cp train.* "$gdrive_path" ! cp test.* "$gdrive_path" ! cp dev.* "$gdrive_path" ! cp bpe.codes.4000 "$gdrive_path" ! ls "$gdrive_path" # Create that vocab using build_vocab ! sudo chmod 777 joeynmt/scripts/build_vocab.py ! joeynmt/scripts/build_vocab.py joeynmt/data/$tgt$src/train.bpe.$src joeynmt/data/$tgt$src/train.bpe.$tgt --output_path joeynmt/data/$tgt$src/vocab.txt # Some output ! echo "BPE Shona Sentences" ! tail -n 5 test.bpe.$tgt ! echo "Combined BPE Vocab" ! tail -n 10 joeynmt/data/$tgt$src/vocab.txt # Herman # Also move everything we care about to a mounted location in google drive (relevant if running in colab) at gdrive_path ! cp train.* "$gdrive_path" ! cp test.* "$gdrive_path" ! cp dev.* "$gdrive_path" ! cp bpe.codes.4000 "$gdrive_path" ! ls "$gdrive_path" ``` # Creating the JoeyNMT Config JoeyNMT requires a yaml config. We provide a template below. We've also set a number of defaults with it, that you may play with! - We used Transformer architecture - We set our dropout to reasonably high: 0.3 (recommended in [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021)) Things worth playing with: - The batch size (also recommended to change for low-resourced languages) - The number of epochs (we've set it at 30 just so it runs in about an hour, for testing purposes) - The decoder options (beam_size, alpha) - Evaluation metrics (BLEU versus Crhf4) ``` # This creates the config file for our JoeyNMT system. It might seem overwhelming so we've provided a couple of useful parameters you'll need to update # (You can of course play with all the parameters if you'd like!) name = '%s%s' % (target_language, source_language) # gdrive_path = os.environ["gdrive_path"] # Create the config config = """ name: "{target_language}{source_language}_reverse_transformer" data: src: "{target_language}" trg: "{source_language}" train: "data/{name}/train.bpe" dev: "data/{name}/dev.bpe" test: "data/{name}/test.bpe" level: "bpe" lowercase: False max_sent_length: 100 src_vocab: "data/{name}/vocab.txt" trg_vocab: "data/{name}/vocab.txt" testing: beam_size: 5 alpha: 1.0 training: #load_model: "{gdrive_path}/models/{name}_transformer/1.ckpt" # if uncommented, load a pre-trained model from this checkpoint random_seed: 42 optimizer: "adam" normalization: "tokens" adam_betas: [0.9, 0.999] scheduling: "noam" # TODO: try switching from plateau to Noam scheduling patience: 5 # For plateau: decrease learning rate by decrease_factor if validation score has not improved for this many validation rounds. learning_rate_factor: 0.5 # factor for Noam scheduler (used with Transformer) learning_rate_warmup: 1000 # warmup steps for Noam scheduler (used with Transformer) decrease_factor: 0.7 loss: "crossentropy" learning_rate: 0.0003 learning_rate_min: 0.00000001 weight_decay: 0.0 label_smoothing: 0.1 batch_size: 4096 batch_type: "token" eval_batch_size: 3600 eval_batch_type: "token" batch_multiplier: 1 early_stopping_metric: "ppl" epochs: 2 # TODO: Decrease for when playing around and checking of working. Around 30 is sufficient to check if its working at all validation_freq: 1000 # TODO: Set to at least once per epoch. logging_freq: 100 eval_metric: "bleu" model_dir: "models/{name}_reverse_transformer" overwrite: True # TODO: Set to True if you want to overwrite possibly existing models. shuffle: True use_cuda: True max_output_length: 100 print_valid_sents: [0, 1, 2, 3] keep_last_ckpts: 3 model: initializer: "xavier" bias_initializer: "zeros" init_gain: 1.0 embed_initializer: "xavier" embed_init_gain: 1.0 tied_embeddings: True tied_softmax: True encoder: type: "transformer" num_layers: 6 num_heads: 4 # TODO: Increase to 8 for larger data. embeddings: embedding_dim: 256 # TODO: Increase to 512 for larger data. scale: True dropout: 0.2 # typically ff_size = 4 x hidden_size hidden_size: 256 # TODO: Increase to 512 for larger data. ff_size: 1024 # TODO: Increase to 2048 for larger data. dropout: 0.3 decoder: type: "transformer" num_layers: 6 num_heads: 4 # TODO: Increase to 8 for larger data. embeddings: embedding_dim: 256 # TODO: Increase to 512 for larger data. scale: True dropout: 0.2 # typically ff_size = 4 x hidden_size hidden_size: 256 # TODO: Increase to 512 for larger data. ff_size: 1024 # TODO: Increase to 2048 for larger data. dropout: 0.3 """.format(name=name, gdrive_path=os.environ["gdrive_path"], source_language=source_language, target_language=target_language) with open("joeynmt/configs/transformer_reverse_{name}.yaml".format(name=name),'w') as f: f.write(config) ``` # Train the Model This single line of joeynmt runs the training using the config we made above ``` pip install torch==1.9.0 # Train the model # You can press Ctrl-C to stop. And then run the next cell to save your checkpoints! !cd joeynmt; python3 -m joeynmt train configs/transformer_reverse_$tgt$src.yaml # Copy the created models from the notebook storage to google drive for persistant storage !cp -r joeynmt/models/${tgt}${src}_reverse_transformer/* "$gdrive_path" # Output our validation accuracy ! cat "$gdrive_path/validations.txt" # Test our model ! cd joeynmt; python3 -m joeynmt test "$gdrive_path/config.yaml" ```
github_jupyter
<a href="https://www.bigdatauniversity.com"><img src = "https://ibm.box.com/shared/static/jvcqp2iy2jlx2b32rmzdt0tx8lvxgzkp.png" width = 300, align = "center"></a> <h1 align=center><font size = 5> RESTRICTED BOLTZMANN MACHINES</font></h1> ### Introduction __Restricted Boltzmann Machine (RBM):__ RBMs are shallow neural nets that learn to reconstruct data by themselves in an unsupervised fashion. ## Table of Contents <div class="alert alert-block alert-info" style="margin-top: 20px"> - <p><a href="#ref1">Initialization</a></p> - <p><a href="#ref2">RBM Layers</a></p> - <p><a href="#ref3">How an RBM works in Deep Learning</a></p> - <p><a href="#ref4">RBM training</a></p> <p></p> </div> ---------------- #### How does it work? Simply, RBM takes the inputs and translates them to a set of numbers that represents them. Then, these numbers can be translated back to reconstruct the inputs. Through several forward and backward passes, the RBM will be trained, and a trained RBM can reveal which features are the most important ones when detecting patterns. #### Why are RBMs important? It can automatically extract __meaningful__ features from a given input. #### What are the applications of RBM? RBM is useful for <a href='http://www.cs.utoronto.ca/~hinton/absps/netflixICML.pdf'> Collaborative Filtering</a>, dimensionality reduction, classification, regression, feature learning, topic modeling and even __Deep Belief Networks__. #### Is RBM a generative model? RBM is a generative model. What is a generative model? First, lets see what is different betwee discriminative and generative model: __Discriminative:__Consider a classification problem in which we want to learn to distinguish between Sedan cars (y = 1) and SUV cars (y = 0), based on some features of an cars. Given a training set, an algorithm like logistic regression tries to find a straight line—that is, a decision boundary—that separates the suv and sedan. __Generative:__ looking at cars, we can build a model of what Sedan cars look like. Then, looking at SUVs, we can build a separate model of what SUV cars look like. Finally, to classify a new car, we can match the new car against the Sedan model, and match it against the SUV model, to see whether the new car looks more like the SUV or Sedan. Generative Models specify a probability distribution over a dataset of input vectors. we can do both supervise and unsupervise tasks with generative models: - In an unsupervised task, we try to form a model for P(x), where x is an input vector. - In the supervised task, we first form a model for P(x|y), where y is the label for x. For example, if y indicates whether an example is a SUV (0) or a Sedan (1), then p(x|y = 0) models the distribution of SUVs’ features, and p(x|y = 1) models the distribution of Sedans’ features. If we manage to find P(x|y) and P(y), then we can use `bayes rule` to estimate P(y|x), because: p(y|x) = p(x|y)p(y)/p(x) Can we build a generative model, and then use it to create synthetic data by directly sampling from the modelled probability distributions? Lets see. <a id="ref1"></a> ### Initialization First we have to load the utility file which contains different utility functions that are not connected in any way to the networks presented in the tutorials, but rather help in processing the outputs into a more understandable way. ``` import urllib.request with urllib.request.urlopen("http://deeplearning.net/tutorial/code/utils.py") as url: response = url.read() target = open('utils.py', 'w') target.write(response.decode('utf-8')) target.close() ``` Now, we load in all the packages that we use to create the net including the TensorFlow package: ``` import tensorflow as tf import numpy as np from tensorflow.examples.tutorials.mnist import input_data #!pip install pillow from PIL import Image #import Image from utils import tile_raster_images import matplotlib.pyplot as plt %matplotlib inline ``` We will be using the MINST dataset to practice the usage of RBMs. The following cell loads the MINST dataset. ``` mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels ``` ---------------- <a id="ref2"></a> ### RBM layers An RBM has two layers. The first layer of the RBM is called the __visible__ (or input layer). MNIST images have 784 pixels, so the visible layer must have 784 input nodes. The second layer is the __hidden__ layer, which possesses i neurons in our case. Each hidden unit has a binary state, which we’ll call it __si__, and turns either on or off (i.e., si = 1 or si = 0) with a probability that is a logistic function of the inputs it receives from the other j visible units, called for example, p(si = 1). For our case, we'll use 500 nodes in the hidden layer, so i = 500. <img src="https://ibm.box.com/shared/static/eu26opvcefgls6vnwuo29uwp0nudmokh.png" alt="RBM Model" style="width: 400px;"/> Each node in the first layer also has a __bias__. We will denote the bias as “vb” for the visible units. The _vb_ is shared among all visible units. Here we define the __bias__ of second layer as well. We will denote the bias as “hb” for the hidden units. The _hb_ is shared among all visible units ``` vb = tf.placeholder("float", [784]) hb = tf.placeholder("float", [500]) ``` We have to define weights among the input layer and hidden layer nodes. In the weight matrix, the rows are equal to the input nodes, and the columns are equal to the output nodes. Let __W__ be the Tensor of 784x500 (784 - number of visible neurons, 500 - number of hidden neurons) that represents weights between neurons. ``` W = tf.placeholder("float", [784, 500]) ``` ---------------- <a id="ref3"></a> ### What RBM can do after training? Think RBM as a model that have been trained, and now it can calculate the probability of observing a case (e.g. wet road) given some hidden/latent values (e.g. raining). That is, the RBM can be viewed as a generative model that assigns a probability to each possible `binary state vectors` over its visible units (v). What are the possible __binary states vectors__? - The visible layer can have different binary states, or so called, configurations. For example, in the 7 unit visible layer (above photo), it has ${2^{7}}$ differnt configurations, and each configuration has its probablity (assuming we dont have any bias). - (0,0,0,0,0,0,0) --> p(config1)=p(v1)=p(s1=0,s2=0, .., s7=0) - (0,0,0,0,0,0,1) --> p(config2)=p(v2)=p(s1=0,s2=1, .., s7=1) - (0,0,0,0,0,1,0) --> p(config3)=p(v3)=p(s1=1,s2=0, .., s7=0) - (0,0,0,0,0,1,1) --> p(config4)=p(v4)=p(s1=1,s2=1, .., s7=1) - etc. So, for example if we have 784 units in the visible layer, it will generates a probability distribution over all the ${2^{784}}$ possible visible vectors, i.e, p(v). Now, it would be really cool, if a model (after training) can calculate the probablity of visible layer, given hidden layer values. ### How to train an RBM? RBM has two phases: 1) Forward Pass, and 2) Backward Pass or Reconstruction: __Phase 1) Forward pass:__ Processing happens in each node in the hidden layer. That is, input data from all visible nodes are being passed to all hidden nodes. This computation begins by making stochastic decisions about whether to transmit that input or not (i.e. to determine the state of each hidden layer). At the hidden layer's nodes, __X__ is multiplied by a __W__ and added to __h_bias__. The result of those two operations is fed into the sigmoid function, which produces the node’s output/state. As a result, one output is produced for each hidden node. So, for each row in the training set, __a tensor of probabilities__ is generated, which in our case it is of size [1x500], and totally 55000 vectors (_h0_=[55000x500]). Then, we take the tensor of probabilities (as from a sigmoidal activation) and make samples from all the distributions, __h0__. That is, we sample the activation vector from the probability distribution of hidden layer values. Samples are used to estimate the negative phase gradient which will be explained later. ``` X = tf.placeholder("float", [None, 784]) _h0 = tf.nn.sigmoid(tf.matmul(X, W) + hb) #probabilities of the hidden units h0 = tf.nn.relu(tf.sign(_h0 - tf.random_uniform(tf.shape(_h0)))) #sample_h_given_X ``` Before we go further, let's look at an example of sampling: ``` with tf.Session() as sess: a = tf.constant([0.7, 0.1, 0.8, 0.2]) print (sess.run(a)) b = sess.run(tf.random_uniform(tf.shape(a))) print (b) print (sess.run(a-b)) print (sess.run(tf.sign( a - b))) print (sess.run(tf.nn.relu(tf.sign( a - b)))) ``` __Phase 2) Backward Pass (Reconstruction):__ The RBM reconstructs data by making several forward and backward passes between the visible and hidden layers. So, in the second phase (i.e. reconstruction phase), the samples from the hidden layer (i.e. h0) play the role of input. That is, __h0__ becomes the input in the backward pass. The same weight matrix and visible layer biases are used to go through the sigmoid function. The produced output is a reconstruction which is an approximation of the original input. ``` _v1 = tf.nn.sigmoid(tf.matmul(h0, tf.transpose(W)) + vb) v1 = tf.nn.relu(tf.sign(_v1 - tf.random_uniform(tf.shape(_v1)))) #sample_v_given_h h1 = tf.nn.sigmoid(tf.matmul(v1, W) + hb) ``` Reconstruction steps: - Get one data point from data set, like _x_, and pass it through the net - Pass 0: (x) -> (x:-:_h0) -> (h0:-:v1) (v1 is reconstruction of the first pass) - Pass 1: (v1) -> (v1:-:h1) -> (_h0:-:v2) (v2 is reconstruction of the second pass) - Pass 2: (v2) -> (v2:-:h2) -> (_h1:-:v3) (v3 is reconstruction of the third pass) - Pass n: (vn) -> (vn:-:hn+1) -> (_hn:-:vn+1)(vn is reconstruction of the nth pass) ---------------- <a id="ref4"></a> ### How to calculate gradients? In order to train an RBM, we have to maximize the product of probabilities assigned to the training set V (a matrix, where each row of it is treated as a visible vector v): <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/d42e9f5aad5e1a62b11b119c9315236383c1864a" > Or equivalently, maximize the expected log probability of V: <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/ba0ceed99dca5ff1d21e5ace23f5f2223f19efc0" > Equivalently, we can define the objective function as __the average negative log-likelihood__ and try to minimize it. To achieve this, we need the partial derivative of this function in respect to all of its parameters. And it can be shown that the above equation is indirectly the weights and biases function, so minimization of the objective function here means modifying/optimizing the weight vector W. So, we can use __stochastic gradient descent__ to find the optimal weight and consequently minimize the objective function. When we derive, it give us 2 terms, called positive and negative gradient. These negative and positive phases reflect their effect on the probability density defined by the model. The positive one depends on observations (X), and the second one depends on only the model. The __Positive phase__ increases the probability of training data. The __Negative phase__ decreases the probability of samples generated by the model. The negative phase is hard to compute, so we use a method called __Contrastive Divergence (CD)__ to approximate it. It is designed in such a way that at least the direction of the gradient estimate is somewhat accurate, even when the size is not (In real world models, more accurate techniques like CD-k or PCD are used to train RBMs). During the calculation of CD, we have to use __Gibbs sampling__ to sample from our model distribution. Contrastive Divergence is actually matrix of values that is computed and used to adjust values of the W matrix. Changing W incrementally leads to training of W values. Then on each step (epoch), W is updated to a new value W' through the equation below: $W' = W + alpha * CD$ __ What is Alpha?__ Here, alpha is some small step rate and is also known as the "learning rate". __How can we calculate CD?__ We can perform single-step Contrastive Divergence (CD-1) taking the following steps: 1. Take a training sample from X, compute the probabilities of the hidden units and sample a hidden activation vector h0 from this probability distribution. - $\_h0 = sigmoid(X \otimes W + hb)$ - $h0 = sampleProb(h0)$ 2. Compute the [outer product](https://en.wikipedia.org/wiki/Outer_product) of X and h0 and call this the positive gradient. - $w\_pos\_grad = X \otimes h0$ (Reconstruction in the first pass) 3. From h, reconstruct v1, and then take a sample of the visible units, then resample the hidden activations h1 from this. (**Gibbs sampling step**) - $\_v1 = sigmoid(h0 \otimes transpose(W) + vb)$ - $v1 = sample_prob(v1)$ (Sample v given h) - $h1 = sigmoid(v1 \otimes W + hb)$ 4. Compute the outer product of v1 and h1 and call this the negative gradient. - $w\_neg\_grad = v1 \otimes h1$ (Reconstruction 1) 5. Now, CD equals the positive gradient minus the - negative gradient, CD is a matrix of size 784x500. - $CD = (w\_pos\_grad - w\_neg\_grad) / datapoints$ 6. Update the weight to be CD times some learning rate - $W' = W + alpha*CD$ 7. At the end of the algorithm, the visible nodes will store the value of the sample. #### What is sampling here (sampleProb)? In forward pass: We randomly set the values of each hi to be 1 with probability $sigmoid(v \otimes W + hb)$. In reconstruction: We randomly set the values of each vi to be 1 with probability $ sigmoid(h \otimes transpose(W) + vb)$. ``` alpha = 1.0 w_pos_grad = tf.matmul(tf.transpose(X), h0) w_neg_grad = tf.matmul(tf.transpose(v1), h1) CD = (w_pos_grad - w_neg_grad) / tf.to_float(tf.shape(X)[0]) update_w = W + alpha * CD update_vb = vb + alpha * tf.reduce_mean(X - v1, 0) update_hb = hb + alpha * tf.reduce_mean(h0 - h1, 0) ``` ### What is objective function? __Goal: Maximize the likelihood of our data being drawn from that distribution__ __Calculate error:__ In each epoch, we compute the "error" as a sum of the squared difference between step 1 and step n, e.g the error shows the difference between the data and its reconstruction. __Note:__ tf.reduce_mean computes the mean of elements across dimensions of a tensor. ``` err = tf.reduce_mean(tf.square(X - v1)) ``` Let's start a session and initialize the variables: ``` cur_w = np.zeros([784, 500], np.float32) cur_vb = np.zeros([784], np.float32) cur_hb = np.zeros([500], np.float32) prv_w = np.zeros([784, 500], np.float32) prv_vb = np.zeros([784], np.float32) prv_hb = np.zeros([500], np.float32) sess = tf.Session() init = tf.global_variables_initializer() sess.run(init) ``` Let look at the error of the first run: ``` sess.run(err, feed_dict={X: trX, W: prv_w, vb: prv_vb, hb: prv_hb}) ``` To recall, the whole algorithm works as: - For each epoch, and for each batch do: - Compute CD as: - For each data point in batch do: - w_pos_grad = 0, w_neg_grad= 0 (matrices) - Pass data point through net, calculating v (reconstruction) and h - update w_pos_grad = w_pos_grad + X$\otimes$h0 - update w_neg_grad = w_neg_grad + v1$\otimes$h1 - CD = average of pos_grad and neg_grad by dividing them by the amount of data points. - Update weights and biases W' = W + alpha * CD - Calculate error - Repeat for the next epoch until error is small or after some fixed number of epochs. ``` #Parameters epochs = 5 batchsize = 100 weights = [] errors = [] for epoch in range(epochs): for start, end in zip( range(0, len(trX), batchsize), range(batchsize, len(trX), batchsize)): batch = trX[start:end] cur_w = sess.run(update_w, feed_dict={ X: batch, W: prv_w, vb: prv_vb, hb: prv_hb}) cur_vb = sess.run(update_vb, feed_dict={ X: batch, W: prv_w, vb: prv_vb, hb: prv_hb}) cur_hb = sess.run(update_hb, feed_dict={ X: batch, W: prv_w, vb: prv_vb, hb: prv_hb}) prv_w = cur_w prv_vb = cur_vb prv_hb = cur_hb if start % 10000 == 0: errors.append(sess.run(err, feed_dict={X: trX, W: cur_w, vb: cur_vb, hb: cur_hb})) weights.append(cur_w) print ('Epoch: %d' % epoch,'reconstruction error: %f' % errors[-1]) plt.plot(errors) plt.xlabel("Batch Number") plt.ylabel("Error") plt.show() ``` What is the last weight after training? ``` uw = weights[-1].T print (uw) # a weight matrix of shape (500,784) ``` We can take each hidden unit and visualize the connections between that hidden unit and each element in the input vector. Let's plot the current weights: __tile_raster_images__ helps in generating a easy to grasp image from a set of samples or weights. It transform the __uw__ (with one flattened image per row of size 784), into an array (of size $25*20$) in which images are reshaped and layed out like tiles on a floor. ``` tile_raster_images(X=cur_w.T, img_shape=(28, 28), tile_shape=(25, 20), tile_spacing=(1, 1)) import matplotlib.pyplot as plt from PIL import Image %matplotlib inline image = Image.fromarray(tile_raster_images(X=cur_w.T, img_shape=(28, 28) ,tile_shape=(25, 20), tile_spacing=(1, 1))) ### Plot image plt.rcParams['figure.figsize'] = (18.0, 18.0) imgplot = plt.imshow(image) imgplot.set_cmap('gray') ``` Each tile in the above visualization corresponds to a vector of connections between a hidden unit and visible layer's units. Let's look at one of the learned weights corresponding to one of hidden units for example. In this particular square, the gray color represents weight = 0, and the whiter it is, the more positive the weights are (closer to 1). Conversely, the darker pixels are, the more negative the weights. The positive pixels will increase the probability of activation in hidden units (after muliplying by input/visible pixels), and negative pixels will decrease the probability of a unit hidden to be 1 (activated). So, why is this important? So we can see that this specific square (hidden unit) can detect a feature (e.g. a "/" shape) and if it exists in the input. ``` from PIL import Image image = Image.fromarray(tile_raster_images(X=cur_w.T[10:11], img_shape=(28, 28),tile_shape=(1, 1), tile_spacing=(1, 1))) ### Plot image plt.rcParams['figure.figsize'] = (4.0, 4.0) imgplot = plt.imshow(image) imgplot.set_cmap('gray') ``` Let's look at the reconstruction of an image. First we plot one of images: ``` sample_case = trX[1:2] img = Image.fromarray(tile_raster_images(X=sample_case, img_shape=(28, 28),tile_shape=(1, 1), tile_spacing=(1, 1))) plt.rcParams['figure.figsize'] = (2.0, 2.0) imgplot = plt.imshow(img) imgplot.set_cmap('gray') #you can experiment different colormaps (Greys,winter,autumn) ``` Now let's pass this image through the net: ``` hh0 = tf.nn.sigmoid(tf.matmul(X, W) + hb) vv1 = tf.nn.sigmoid(tf.matmul(hh0, tf.transpose(W)) + vb) feed = sess.run(hh0, feed_dict={ X: sample_case, W: prv_w, hb: prv_hb}) rec = sess.run(vv1, feed_dict={ hh0: feed, W: prv_w, vb: prv_vb}) ``` Here we plot the reconstructed image: ``` img = Image.fromarray(tile_raster_images(X=rec, img_shape=(28, 28),tile_shape=(1, 1), tile_spacing=(1, 1))) plt.rcParams['figure.figsize'] = (2.0, 2.0) imgplot = plt.imshow(img) imgplot.set_cmap('gray') ``` ## Want to learn more? Running deep learning programs usually needs a high performance platform. PowerAI speeds up deep learning and AI. Built on IBM's Power Systems, PowerAI is a scalable software platform that accelerates deep learning and AI with blazing performance for individual users or enterprises. The PowerAI platform supports popular machine learning libraries and dependencies including Tensorflow, Caffe, Torch, and Theano. You can download a [free version of PowerAI](https://cocl.us/ML0120EN_PAI). Also, you can use Data Science Experience to run these notebooks faster with bigger datasets. Data Science Experience is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, DSX enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of DSX users today with a free account at [Data Science Experience](https://cocl.us/ML0120EN_DSX)This is the end of this lesson. Hopefully, now you have a deeper and intuitive understanding regarding the LSTM model. Thank you for reading this notebook, and good luck on your studies. Created by: <a href = "https://ca.linkedin.com/in/saeedaghabozorgi">Saeed Aghabozorgi</a>, Gabriel Garcez Barros Souza ### References: https://en.wikipedia.org/wiki/Restricted_Boltzmann_machine http://deeplearning.net/tutorial/rbm.html http://deeplearning4j.org/restrictedboltzmannmachine.html http://imonad.com/rbm/restricted-boltzmann-machine/
github_jupyter
``` import pandas as pd import numpy as np from scipy import stats import math import matplotlib as mpl import matplotlib.pyplot as plt import pylab as pl %matplotlib inline from scipy.stats import kde import seaborn as sns import gdal import os print('All packages imported succesfully') # Define the year year = '2018' #year = '2017' # Make some strings and some directories to save things in str_ECDF = 'ECDF_'+str(year)+'_calibrated' os.mkdir('../results/'+str_ECDF) str_histogram = 'histogram_'+str(year)+'_calibrated' os.mkdir('../results/'+str_histogram) str_textfiles = 'textfiles_'+str(year)+'_calibrated' os.mkdir('../results/'+str_textfiles) str_topo = 'topo_2DH_'+str(year)+'_calibrated' os.mkdir('../results/'+str_topo) str_stats = 'stats_'+str(year)+'_calibrated' # The next line will need to be commented out if the script has been run more than once os.mkdir('../results/stats') # Define the rank from the input filename run = ['r16_1', 'r16_2','r16_3','r16_4','r16_5'] # Define which files you want to print out from the rank list above, accessed by position in the list. allfiles = range(0,5) # Define the limits for the topo 2DH figs xlim_yearly=(0,6) def datasets(rank,year): ######################################### # Bring in all the geotiff data, reshape and remove some types of grid cells rs_data = gdal.Open('../data/aligned_resampled_'+str(year)+'_calibrated/rs_interpolated_'+str(year)+'_resample.tif') wiAssim_data = gdal.Open('../data/aligned_resampled_'+str(year)+'_calibrated/snowd_wiAssim_'+rank+'_'+str(year)+'_resample.tif') woAssim_data = gdal.Open('../data/aligned_resampled_'+str(year)+'_calibrated/snowd_woAssim_'+str(year)+'_resample.tif') ### Bring in the remote sensing geotiff as a numpy array, and then data frame rs_np = np.array(rs_data.GetRasterBand(1).ReadAsArray()) rs_df = pd.DataFrame(rs_np) rs = rs_df.values.flatten().tolist() #print(len(rs)) # ### Bring in the wiAssim geotiff as a numpy array, and then data frame wiAssim_np = np.array(wiAssim_data.GetRasterBand(1).ReadAsArray()) wiAssim_df = pd.DataFrame(wiAssim_np) wiAssim = wiAssim_df.values.flatten().tolist() #print(len(wiAssim)) # ### Bring in the woAssim geotiff as a numpy array, and then data frame woAssim_np = np.array(woAssim_data.GetRasterBand(1).ReadAsArray()) woAssim_df = pd.DataFrame(woAssim_np) woAssim = woAssim_df.values.flatten().tolist() #print(len(woAssim)) ### Bring in the model veg file geotiff as a data frame veg_geo = gdal.Open('../data/topo_veg/tp_veg_resample_'+str(year)+'.tif') veg_np = np.array(veg_geo.GetRasterBand(1).ReadAsArray()) veg_df = pd.DataFrame(veg_np) veg = veg_df.values.flatten().tolist() #print(len(veg)) ### Bring in the model DEM file geotiff as a data frame topo_geo = gdal.Open('../data/topo_veg/tp_topo_resample_'+str(year)+'.tif') topo_np = np.array(topo_geo.GetRasterBand(1).ReadAsArray()) topo_df = pd.DataFrame(topo_np) topo = topo_df.values.flatten().tolist() #print(len(veg)) # Create a dataframe from the multiple lists # Name the column headers and column contents dataset = pd.DataFrame({'rs': rs,'wiAssim': wiAssim,'woAssim': woAssim,'veg': veg,'topo': topo}) #print(len(data_2017)) ## Remove the negative and no-data values dataset_rzero = dataset[dataset.rs >= 0] print(len(dataset_rzero),'# of '+str(year)+' Grid cells from '+rank+' simulation that are greater than zero') # Remove glacier cells dataset_rg = dataset_rzero[dataset_rzero.veg != 20] print(len(dataset_rg),'# of '+str(year)+' Grid cells from '+rank+' simulation without glacier cell values') # Remove the high end rs values data = dataset_rg[dataset_rg.rs <=10] print(len(data),'# of '+str(year)+' Grid cells from '+rank+' simulation without extreme remote sensing values') return data def rmse(predictions, targets): differences = predictions - targets differences_squared = differences ** 2 mean_of_differences_squared = differences_squared.mean() rmse_val = np.sqrt(mean_of_differences_squared) return rmse_val def RMSE_wiAssim(data): # Run the RMSE function on the wiAssim dataset predictions_wi = data.wiAssim targets = data.rs rmse_values_wi = rmse(predictions_wi,targets) return rmse_values_wi def RMSE_woAssim(data): # Run the RMSE function on the woAssim dataset predictions_wo = data.woAssim targets = data.rs rmse_values_wo = rmse(predictions_wo,targets) return rmse_values_wo def histogram_plot(data): sns.set(style="white", palette="muted", color_codes=True) sns.set_context("notebook",font_scale=1.5, rc={"lines.linewidth": 2.5}) # RS Dataset RS = data.rs rs_sd = np.std(RS) rs_m = np.median(RS) rs_max = np.max(RS) bins_rs = int((rs_max/rs_max)*100) print(bins_rs,'nbins RS') print(round(rs_m,3), 'RS Median') print(rs_max,'RS Max') # woAssim woAssim = data.woAssim wo_sd = np.std(woAssim) wo_m = np.median(woAssim) wo_max = np.max(woAssim) bins_wo = int((wo_max/rs_max)*100) print(bins_wo,'nbins No CSO') print(round(wo_m,3), 'Median No CSO') print(wo_max,'Max No CSO') # wiAssim wiAssim = data.wiAssim wi_sd = np.std(wiAssim) wi_m = np.median(wiAssim) wi_max = np.max(wiAssim) bins_wi = int((wi_max/rs_max)*100) print(bins_wi, 'nbins With CSO') print(round(wi_m,3), 'Median With CSO') print(wi_max, 'Max With CSO') # Plot a simple histogram with binsize determined automatically sns.distplot(RS, kde=False, color="slategray", bins=bins_rs, label='RS Dataset') # Plot a simple histogram with binsize determined automatically sns.distplot(woAssim, kde=False, color="plum", bins=bins_wo, label='NoAssim') # Plot a simple histogram with binsize determined automatically sns.distplot(wiAssim, kde=False, color="steelblue", bins=bins_wi, label='Best CSO') plt.xlim(0,8) plt.tick_params(labelsize=16) plt.xlabel('Snow Depth (m)', fontsize=18) plt.ylabel('Count', fontsize=18) #plt.title('LiDAR vs SnowModel Histograms',fontsize=22) plt.axvline(rs_m, color='slategray', linestyle='dashed',label='RS Median') plt.axvline(wo_m, color='plum', linestyle='dashed', label='NoAssim Median') plt.axvline(wi_m, color='steelblue', linestyle='dashed', label='Best CSO Median') plt.legend(fontsize=14) plt.tight_layout() plt.savefig('../results/'+str_histogram+'/histogram_'+rank+'_'+str(year)+'_wiAssim.png',dpi=400) plt.close('all') def ECDFplot(data): from statsmodels.distributions.empirical_distribution import ECDF RS = ECDF(data.rs) woAssim = ECDF(data.woAssim) wiAssim = ECDF(data.wiAssim) # rs dataset rs = data.rs rs_sd = np.std(rs) rs_m = np.mean(rs) rs_me = np.median(rs) # woAssim wo = data.woAssim wo_sd = np.std(wo) wo_m = np.mean(wo) wo_me = np.median(wo) # wiAssim wi = data.wiAssim wi_sd = np.std(wi) wi_m = np.mean(wi) wi_me = np.median(wi) plt.xlim(0,8) plt.xlabel('Snow Depth (m)', fontsize=18) plt.plot(RS.x,RS.y, color='slategray',linewidth=3.0,linestyle='dashed', label='RS') plt.plot(woAssim.x,woAssim.y, color='plum',linewidth=3.0,label='NoAssim') plt.plot(wiAssim.x,wiAssim.y, color='steelblue',linewidth=3.0, label='Best CSO') plt.legend() plt.tight_layout() plt.savefig('../results/'+str_ECDF+'/ECDF_'+rank+'_'+str(year)+'_wiAssim.png', dpi= 400) plt.close('all') return rs_me,wo_me,wi_me,rs_m,wo_m,wi_m def KSstat(data): # Generate Kolmogorov-Smirnov Stat rs = data.rs wo = data.woAssim wi = data.wiAssim print(stats.ks_2samp(rs, wo),'KS Stat for RS to No Assimilation') print(stats.ks_2samp(rs, wi),'KS Stat for RS to CSO Assimilation') ks_wo = stats.ks_2samp(rs, wo) ks_wi = stats.ks_2samp(rs, wi) return ks_wo, ks_wi def write_text(rank,year): # Write some things to text file path = '../results/'+str_textfiles+'/woAssim_'+rank+'_'+str(year)+'_info.txt' file = open(path,'w') file.write('Variables of interest for'+rank+'_'+str(year)+'\n') file.write('The RMSE (m) for wiAssim = '+str(round(rmse_values_wi,3))+'\n') file.write('The RMSE (m) for woAssim = '+str(round(rmse_values_wo,3))+'\n') file.write('The RS median value = '+str(round(rs_me,3))+'\n') file.write('The woAssim median value = '+str(round(wo_me,3))+'\n') file.write('The wiAssim median value = '+str(round(wi_me,3))+'\n') file.write('The RS mean value = '+str(round(rs_m,3))+'\n') file.write('The woAssim mean value = '+str(round(wo_m,3))+'\n') file.write('The wiAssim mean value = '+str(round(wi_m,3))+'\n') file.write('The KS value for wiAssim = '+str(ks_wi)+'\n') file.write('The KS value for woAssim = '+str(ks_wo)+'\n') file.close() def wiAssim_2dh(data): sns.set(style="white", palette="muted", color_codes=True) sns.set_context("notebook",font_scale=1.5, rc={"lines.linewidth": 2.5}) wiAssim = data.wiAssim topo = data.topo # sns.scatterplot(x=wiAssim, y=topo, color='steelblue') # plt.ylabel('Elevation (m)', fontsize=18) # plt.xlabel('Snow Depth (m) ', fontsize=18) # plt.tight_layout() # #plt.title('NoAssim '+str(year)) # plt.savefig('results/hypsometry/scatter_'+rank+'_'+str(year)+'_wiAssim.png',dpi=400) g = sns.jointplot(wiAssim, topo, kind='kde', height=8, space=0, xlim=xlim_yearly, ylim=(0, 2000), color='steelblue') g.set_axis_labels('Modeled Snow Depth (m)', 'Terrain DEM (m)') # Make sure to include the best/med/ninety in the title g.savefig('../results/'+str_topo+'/'+rank+'_'+str_topo+'.png',dpi=400) plt.close('all') def RS_2dh(data): sns.set(style="white", palette="muted", color_codes=True) sns.set_context("notebook",font_scale=1.5, rc={"lines.linewidth": 2.5}) rs = data.rs topo = data.topo # sns.scatterplot(x=rs, y=topo, color='slategray') # plt.ylabel('Elevation (m)', fontsize=18) # plt.xlabel('Snow Depth (m) ', fontsize=18) # plt.tight_layout() # #plt.title('RS '+str(year)) # plt.savefig('results/hypsometry/scatterRS_'+rank+'_'+str(year)+'.png',dpi=400) g = sns.jointplot(rs, topo, kind='kde', height=8, space=0, xlim=xlim_yearly, ylim=(0, 2000), color='slategray') g.set_axis_labels('Snow Depth (m) RS', 'Terrain DEM (m)') # Make sure to include the best/med/ninety in the title g.savefig('../results/'+str_topo+'/RS_'+str_topo+'.png',dpi=400) plt.close('all') def woAssim_2dh(data): sns.set(style="white", palette="muted", color_codes=True) sns.set_context("notebook",font_scale=1.5, rc={"lines.linewidth": 2.5}) woAssim = data.woAssim topo = data.topo # sns.scatterplot(x=woAssim, y=topo, color='plum') # plt.ylabel('Elevation (m)', fontsize=18) # plt.xlabel('Snow Depth (m) ', fontsize=18) # plt.tight_layout() # #plt.title('NoAssim '+str(year)) # plt.savefig('results/hypsometry/scatter_'+rank+'_'+str(year)+'_woAssim.png',dpi=400) g = sns.jointplot(woAssim, topo, kind='kde', height=8, space=0, xlim=xlim_yearly, ylim=(0, 2000), color='plum') g.set_axis_labels('Modeled Snow Depth (m)', 'Terrain DEM (m)') # Make sure to include the best/med/ninety in the title g.savefig('../results/'+str_topo+'/woAssim_'+str_topo+'.png',dpi=400) plt.close('all') # For loop # Run the for loop, chooses the input files, timeslices and links to the functions for i in allfiles: # Pull in the datasets rank = run[i] print('Working on run '+rank+' remote sensing analysis') data = datasets(run[i],year) rmse_values_wi = RMSE_wiAssim(data) rmse_values_wo = RMSE_woAssim(data) print('Working on run '+rank+' Histogram') histogram_plot(data) print('Working on run '+rank+' other stats') wiAssim_2dh(data) rs_me,wo_me,wi_me,rs_m,wo_m,wi_m = ECDFplot(data) ks_wo,ks_wi = KSstat(data) write_text(run[i],year) RS_2dh(data) woAssim_2dh(data) print('All RS plots created successfully') text_path = '../results/'+str_textfiles # Create of list the names of all text files in the directory path (defined above). text_dir = os.listdir(text_path) print(text_dir) column_names = ["name", "rmse", "ks_stat","median","mean"] df = pd.DataFrame() # Run the for loop, chooses the input files, timeslices and links to the functions for i in allfiles: with open (text_path+'/'+text_dir[i], 'rt') as myfile: line = myfile.readlines() name = line[0] name_run = name[25:-6] print(name_run) rmse = line[1] rmse_value = rmse[27:-1] print(rmse_value) ks_stat = line[9] ks_value = ks_stat[52:59] print(ks_value) median = line[5] med = median[27:-1] mean = line[8] me = mean[25:-1] mylist = [name_run, rmse_value, ks_value,med,me] data = pd.DataFrame([mylist], columns = column_names) df = pd.concat([df,data], ignore_index=True) print(df) df.to_csv('../results/stats/'+str_stats+'.csv') ```
github_jupyter
<a href="https://colab.research.google.com/github/chemaar/python-programming-course/blob/master/Lab_5b_Data_Structures_Tuples_Sets_Dictionaries.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Lab 5b: Data structures: Tuples, Sets and Dictionaries In this notebook, we propose and solve some exercises about basic data structures implemented through Python lists (matrix), tuples, sets and dictionaries. * **In these exercises, we can always proceed solving the problems in a generic way or taking advantage of Python capabilities. As a recommendation, first, try the generic way (applicable to any programming language) and, then, using Python** * **As a good programming practice, our test cases should ensure that all branches of the code are executed at least once.** ## List of exercises 1. Write a program that a given a number $n \in (0,10)$ creates and displays a square matrix initialized with all positions to 1. * Input: 3 * Expected output: ``` 111 111 111 ``` ``` matrix = [] n = int(input("Introduce the value of n: ")) if n>0 and n<10: for i in range(n): row = [] for j in range (n): row.append(1) matrix.append(row) #Manual display for i in range(n): for j in range(n): print(matrix[i][j], end="", sep=",") print("") #Pythonic version: comprehension lists matrix = [ [1 for i in range(n)] for j in range(n)] print(matrix) ``` 2. Write a program that given a matrix, displays whether is a square matrix. * Input: reuse the code before to create a matrix * Expected output: ``` n = 3 matrix = [ [1 for i in range(n)] for j in range(n)] is_square = True if matrix and len(matrix)>0: n_rows = len(matrix) n_cols = len(matrix[0]) #dimension is_square = n_cols == n_rows i = 1 while is_square and i<n_rows: is_square = is_square and n_cols == len(matrix[i]) i = i + 1 print("The matrix is square: ", is_square) ``` 3. Write a program that a given a matrix ($n x m$) with random numbers between $(0,10)$, calculates and displays the transpose matrix.$t(j,i) = m(i,j)$ * Input: n = 3, m = 2 ``` [10, 0, 0] [5, 1, 0] ``` * Expected output: ``` [10, 5] [0, 1] [0, 0] ``` * Dependencies: To create the matrix with random numbers, we are going to introduce the library of random numbers in [Python](https://docs.python.org/3/library/random.html). ``` import random print(random.random()) # Random float x, 0.0 <= x < 1.0 print(random.uniform(0, 10)) # Random float x, 0.0 <= x < 10.0 print(random.randint(0, 10)) # Integer from 0 to 10, endpoints included print(random.randrange(0, 101, 2)) # Even integer from 0 to 100 print(random.choice('AEIOU')) #Get a randomized element print(random.shuffle([1,2,3])) #Shuffle numbers of a list print(random.sample([1, 2, 3], 2)) #Create a matrix n = 3 #columns m = 2 #rows matrix = [ [random.randint(0, 10) for i in range(n)] for j in range(m)] print(matrix) print("Matrix") for row in range(m): print(matrix[row]) #Transposing t_matrix = [] for i in range(len(matrix[0])): #the new matrix has m rows t_row = [] for j in range(len(matrix)): #and n columns t_row.append(matrix[j][i]) t_matrix.append(t_row) print("Transposed matrix") for row in range(len(t_matrix)): print(t_matrix[row]) #Pythonic way print("(Python version) Transposed matrix") t_matrix = [[matrix[j][i] for j in range(len(matrix))] for i in range(len(matrix[0]))] for row in range(len(t_matrix)): print(t_matrix[row]) ``` 4. Write a program creates and displays an identity matrix of dimension $n \in (0,10)$. * Input: $n=5$ * Expected output: ``` [1, 0, 0, 0, 0] [0, 1, 0, 0, 0] [0, 0, 1, 0, 0] [0, 0, 0, 1, 0] [0, 0, 0, 0, 1] ``` ``` matrix = [] n = int(input("Introduce the value of n: ")) if n>0 and n<10: for i in range(n): row = [] for j in range (n): if i == j: row.append(1) else: row.append(0) matrix.append(row) for row in range(len(matrix)): print(matrix[row]) ``` 5. Write a program that makes the sum of two matrix $A$ and $B$. The resulting matrix $C$ is one in which each element $c(i,j) = a(i,j) + b(i,j)$. * Input: ``` A: [2, 1, 10] [3, 1, 1] B: [2, 9, 1] [10, 7, 6] ``` * Expected output: ``` [4, 10, 11] [13, 8, 7] ``` ``` n = 3 #columns m = 2 #rows A = [ [random.randint(0, 10) for i in range(n)] for j in range(m)] B = [ [random.randint(0, 10) for i in range(n)] for j in range(m)] C = [] print("A") for row in range(len(A)): print(A[row]) print("B") for row in range(len(B)): print(B[row]) if len(A) == len(B) and len(A[0])==len(B[0]): for i in range(len(A)): row = [] for j in range(len(A[0])): row.append(A[i][j] + B[i][j]) C.append(row) print("C") for row in range(len(C)): print(C[row]) else: print("The number of rows and columns must be the same.") #Pythonic way print("(Python version) C") C = [ [A[i][j] + B[i][j] for j in range(len(A[0]))] for i in range(len(A)) ] for row in range(len(C)): print(C[row]) ``` 6. The [Game of Life](https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life), also known simply as Life, is a cellular automaton devised by the British mathematician John Horton Conway in 1970. Write a program to implement the Game of Life. The program will receive as an input the size of the board, $n$, and the number of generations. In each generation, the board will be updated following the next rules: * A living cell with less than 2 living neighbors-->die * A living cell with 2 or 3 living neighbors-->survives * A living cell with more than 3 living neighbors->die * A dead cell with exactly living neighbors->new born, alive Improvement: the simulation will end after $n$ generations or when there is no change. ``` import random import time import matplotlib.pyplot as plt def plot_board(board): rows = len(board) cols = rows plt.figure(figsize = (cols, rows)) plt.axes(aspect='equal') for i, j in [(i, j) for i in range(rows) for j in range(cols)]: if board[i][j]: plt.plot(i, j, 'o', color='black', markeredgecolor='w', ms=10) plt.show() def demo_pattern(): return [ [ True, True, True, True, False, False, True, False, False, False], [ False, True, True, False, False, True, True, True, False, False], [ True, True, False, True, False, True, False, False, True, True], [ False, True, False, False, False, False, False, False, False, False], [ False, True, True, True, False, True, True, True, False, False], [ True, False, True, True, False, True, True, True, True, True], [ True, False, True, False, False, True, True, False, True, False], [ True, False, False, True, False, False, False, True, False, False], [ True, False, False, True, False, True, True, True, True, False], [ False, False, True, True, False, False, False, False, True, True] ] def pretty_print(board): if board: for row in range(len(board)): for column in range(len(board[0])): if board[row][column]: print("\t\u2665", end = "") else: print("\t\u271D", end = "") print() def show(board): if board: for row in range(len(board)): print(board[row]) def count_alive_neighbours(board, row, col): alive = 0 if board and row >= 0 and row < len(board) and col >= 0 and col < len(board[0]): nrows = len(board) ncols = len(board[0]) alive += (1 if board[row][col+1] else 0) if col+1<ncols else 0 alive += (1 if board[row][col-1] else 0) if col-1>=0 else 0 alive += (1 if board[row+1][col] else 0) if row+1<nrows else 0 alive += (1 if board[row-1][col] else 0) if row-1>=0 else 0 alive += (1 if board[row-1][col-1] else 0) if row-1>=0 and col-1>=0 else 0 alive += (1 if board[row-1][col+1] else 0) if row-1>=0 and col+1<ncols else 0 alive += (1 if board[row+1][col-1] else 0) if row+1<nrows and col-1>=0 else 0 alive += (1 if board[row+1][col+1] else 0) if row+1<nrows and col+1<ncols else 0 return alive if __name__== "__main__": n = 5 generations = 10 current_generation = [ [random.choice([True,False]) for j in range(n)] for i in range(n) ] alive_neighbours = 0 alive_cells = 0 for gen in range(generations): print("Generation ", gen) pretty_print(current_generation) #plot_board(current_generation) alive_cells = 0 #A living cell with less than 2 living neighbors-->die #A living cell with 2 or 3 living neighbors-->survives #A living cell with more than 3 living neighbors->die #A dead cell with exactly living neighbors->new born, alive new_generation = current_generation.copy() size = len(current_generation) for i in range(size): for j in range(size): alive_neighbours= count_alive_neighbours(current_generation, i, j) if current_generation[i][j]: if alive_neighbours<2: new_generation [i][j] = False elif (alive_neighbours>=2 and alive_neighbours<=3): new_generation [i][j] = current_generation[i][j] elif alive_neighbours>3: new_generation [i][j] = False else: new_generation [i][j] = alive_neighbours == 3 #time.sleep(0.2) #The new generation becomes the current generation current_generation = new_generation.copy() ``` 7. Write a program to manage a shopping cart with the following features. * There is a list of products: rice (1 euro per package), apple (0.30 euro per unit) and milk (1 euro). * The program shall ask the user to introduce a product in the shopping cart indicating the number of units. Each product can be added just once. * The program shall display a menu with the following options: ``` Shopping Cart -------------------- 1-See shopping cart. (Shows the current shopping cart and calculates the cost.) 2-See products. (Shows the list of products and its unit cots.) 3-Add product. (Asks the user for a product and a number of units. if the product is none, the program will come back to the menu) 4-Make the order. (Clears the cart.) 5-Exit. (The program ends.) ``` ``` if __name__== "__main__": NAME, COST = (0,1) products = [("rice", 1), ("apple", 0.30), ("milk", 1)] shopping_cart = set() option = "" while option != "5": print("\n\nShopping cart manager\n") print("1-See shopping cart.") print("2-See products.") print("3-Add product.") print("4-Make the order.") print("5-Exit.") option = input("Introduce your option: ") if option == "1": if len(shopping_cart) == 0: print("The shopping cart is empty") else: for product, quantity in shopping_cart: print("Product: {}, units: {}, total cost: {}".format(product[NAME], quantity, quantity*product[COST])) elif option == "2": for product in products: print("Product: {}, cost: {}".format(product[NAME], product[COST])) elif option == "3": added = False while not added: p = input("Introduce product name or (none to come back to the menu): ") added = p == "none" if not added: q = int(input("Introduce quantity: ")) selected_products = [item for item in products if item[NAME] == p] if len(selected_products) > 0: selected_product = selected_products[0] else: selected_product = None if selected_product and q > 0 : if len([ item for item in shopping_cart if selected_product[NAME] == item[0][NAME]]) == 0: shopping_cart.add((selected_product,q)) added = True else: print("The product is already in the cart.") else: print("The product name or the quantity is not correct.") elif option == "4": print("The order has been processed.") shopping_cart.clear() elif option == "5": print("The program is going to end.") else: print("Invalid option") ``` 8. Implement the shopping cart using dictionaries. ``` if __name__== "__main__": products = { "rice": 1, "apple": 0.30, "milk":1} shopping_cart = {} option = "" while option != "5": print("\n\nShopping cart manager\n") print("1-See shopping cart.") print("2-See products.") print("3-Add product.") print("4-Make the order.") print("5-Exit.") option = input("Introduce your option: ") if option == "1": if len(shopping_cart) == 0: print("The shopping cart is empty") else: for product in shopping_cart.keys(): product_cost = products[product] quantity = shopping_cart[product] print("Product: {}, units: {}, total cost: {}".format(products[product], quantity, quantity*product_cost)) elif option == "2": for product, cost in products.items(): print("Product: {}, cost: {}".format(product, cost)) elif option == "3": added = False while not added: p = input("Introduce product name or (none to come back to the menu): ") added = p == "none" if not added: q = int(input("Introduce quantity: ")) if q > 0: shopping_cart[p] = q added = True else: print("The product name or the quantity is not correct.") elif option == "4": print("The order has been processed.") shopping_cart.clear() elif option == "5": print("The program is going to end.") else: print("Invalid option") ``` 9. Implement the TIC, TAC, TOE game. * The program shall display the board in each iteration. * The program shall ask the user for the coordinates to situate a value. ``` def print_board(board): size = len(board) for row in range(size): for col in range(size): print(str(board[row][col])+"\t|", end= "") print() if __name__ == "__main__": size = 3 board = [ ["" for j in range(size)] for i in range(size)] current_player = "X" other_player = "O" end_game = False situated = 0 while not end_game: print("Turn of player: "+current_player) print_board(board) set_position = False while not set_position: x =int(input("Select position x:")) y =int(input("Select position y:")) if x>=0 and x<=size and y>=0 and y<=size and board[x][y] == "": #Place board[x][y] = current_player set_position = True situated = situated + 1 else: print("The position is already set.") #Check if current player is winner by rows winner = False row = 0 while not winner and row<size: winner = board[row].count(current_player) == size row = row + 1 #Check if current player is winner by cols col = 0 while not winner and col<size: row = 0 matches = 0 while not winner and row < size: if board[row][col] == current_player: matches = matches + 1 row = row + 1 col = col + 1 winner = matches == size #Check if current player is winner in main diagonal matches = 0 if not winner: for i in range(size): if board[i][i] == current_player: matches = matches + 1 winner = matches == size #Check if current player is winner in secondary diagonal if not winner: matches = 0 for i in range(size): if board[i][size-i -1] == current_player: matches = matches + 1 winner = matches == size end_game = winner or situated == 9 current_player, other_player = other_player, current_player if winner: print("The winner is: ", other_player) else: print("Draw") print_board(board) ``` 10. Implement the previous program making use of just one vector and slicing capabilities of Python lists. ``` def print_board(board): for i in range(n): print(board[n*i:n*(i+1)]) if __name__=="__main__": n = 3 size = 9 board = ["" for x in range(n*n)] current_player = "X" other_player = "O" end_game = False situated = 0 while not end_game: print("Turn of player: "+current_player) print_board(board) set_position = False while not set_position: x =int(input("Select position x:")) y =int(input("Select position y:")) if x>=0 and y >= 0 and (x*n+y)<size and board[x*n+y] == "": #Place board[x*n+y] = current_player set_position = True situated = 0 else: print("The position is already set.") #Check if current player is winner by rows winner = False i = 0 while not winner and i<n: winner = board[n*i:n*(i+1)].count(current_player) == n i = i + 1 #Check if current player is winner by cols i = 0 while not winner and i<n: winner = board[i:size:n].count(current_player) == n i = i + 1 if not winner: #Check if current player is winner in the main diagonal winner = board[:size:n+1].count(current_player) == n if not winner: #Check if current player is winner in the secondary diagonal winner = board[n-1:size-1:n-1].count(current_player) == n end_game = winner or situated == 9 current_player, other_player = other_player, current_player if winner: print("The winner is: ", other_player) else: print("Draw") print_board(board) ```
github_jupyter
## Plot Scoreboard Using Python and Plotly ##### ABOUT THE AUTHOR: This notebook was contributed by [Plotly user Emilia Petrisor](https://plot.ly/~empet). You can follow Emilia on Twitter [@mathinpython](https://twitter.com/mathinpython) or [Github](https://github.com/empet). ### Two Scoreboards for Republican Presidential Candidates Starting with August 6, 2015, The New York Times updates from time to time a scoreboard for the republican presidential [candidates](http://www.nytimes.com/interactive/2015/08/06/upshot/2016-republican-presidential-candidates-dashboard.html). In this IPython (Jupyter) Notebook we generate the scoreboard published on August 14, respectively August 17, as [Heatmap(s)](https://plot.ly/python/heatmaps/) objects in Python Plotly. Inspecting the web page [source code](view-source:http://www.nytimes.com/interactive/2015/08/06/upshot/2016-republican-presidential-candidates-dashboard.html?abt=0002&abg=0) we found out that the scoreboard heatmap in The New York Times is generated with [http://colorzilla.com/gradient-editor/]( http://colorzilla.com/gradient-editor/). To identify the color code of each of the 16 colors defining the color gradient in The New York Times dashboard we install `ColorZilla` [Chrome extension](https://chrome.google.com/webstore/detail/colorzilla/bhlhnicpbhignbdhedgjhgdocnmhomnp?hl=en). When the *newtimes* page is opened, we choose the *Web page color analyzer* in the `ColorZilla` menu and read succesively the color codes. The corresponding [Plotly colorscale](https://plot.ly/python/heatmaps-contours-and-2dhistograms-tutorial/#Custom-color-scales-in-Plotly) is defined as follows: ``` newyorktimes_cs=[[0.0, '#8B0000'], [0.06666666666666667, '#9E051B'], [0.13333333333333333, '#B0122C'], [0.2, '#C0223B'], [0.26666666666666666, '#CF3447'], [0.3333333333333333, '#DB4551'], [0.4, '#E75758'], [0.4666666666666667, '#F06A5E'], [0.5333333333333333, '#F87D64'], [0.6, '#FE906A'], [0.6666666666666666, '#FFA474'], [0.7333333333333333, '#FFB880'], [0.8, '#FFCB91'], [0.8666666666666667, '#FFDEA7'], [0.9333333333333333, '#FFEEC1'], [1.0, '#FFFFE0']] ``` Below we give the table of rankings as for 14 August, by the factors in the list with the same name. ``` tab_vals14=[[1,2,3,4,5,6,6,8,9,9,9,12,13,13,13,13], [1,7,5,12,5,4,12,7,2,3,12,7,7,12,7,12], [4,7,2,1,10,5,6,7,9,12,3,14,12,11,15,16], [2,9,4,1,3,8,10,11,6,5,6,14,14,12,14,13], [1,3,4,14,8,2,13,12,7,6,9,16,5,10,12,15]] candidates=['Bush', 'Rubio', 'Walker', 'Trump', 'Kasich', 'Cruz', 'Fiorina', 'Huckabee', 'Paul']+\ ['Christie', 'Carson', 'Santorum', 'Perry', 'Jindal', 'Graham', 'Pataki'] factors=['Prediction Market', 'NationalEndorsements', 'Iowa Polls']+\ ['New Hampshire Polls', 'Money Raised'] ``` First we define a simple Plotly Heatmap: ``` import plotly.plotly as py from plotly.graph_objs import * data14=Data([Heatmap(z=tab_vals14, y=factors, x=candidates, colorscale=newyorktimes_cs, showscale=False )]) width = 900 height =450 anno_text="Data source:\ <a href='http://www.nytimes.com/interactive/2015/08/06/upshot/\ 2016-republican-presidential-candidates-dashboard.html'> [1]</a>" title = "A scoreboard for republican candidates as of August 14, 2015" layout = Layout( title=' ', font=Font( family='Balto, sans-serif', size=12, color='rgb(68,68,68)' ), showlegend=False, xaxis=XAxis( title='', showgrid=True, side='top' ), yaxis=YAxis( title='', autorange='reversed', showgrid=True, autotick=False, dtick=1 ), autosize=False, height=height, width=width, margin=Margin( l=135, r=40, b=85, t=170 ) ) annotations = Annotations([ Annotation( showarrow=False, text=anno_text, xref='paper', yref='paper', x=0, y=-0.1, xanchor='left', yanchor='bottom', font=Font( size=11 ) )]) fig=Figure(data=data14, layout=layout) fig['layout'].update( title=title, annotations=annotations ) py.sign_in('empet', 'my_api_key') py.iplot(fig,filename='Heatmap-republican-candidates-14') ``` Now we go further and update the above Figure with data available on August 17, and moreover we annotate the Heatmap, displaying the candidate ranking on each cell. ``` tab_vals17=[[1,2,3,4,5,6,7,7,9,9,11,11,13,13,13,13], [1,7,5,12,5,4,7,12,2,12,3, 7,7,12,7,12], [4,7,2,1,10,5,7, 6, 9,3, 12, 14,12,11,15,16], [2,9,4,1,3,8,11, 10, 6,6, 5, 14,14,12,14,13], [1,3,4,14,8,2,12, 13, 7,9, 6,16,5,10,11,15]] candidates17=['Bush', 'Rubio', 'Walker', 'Trump', 'Kasich', 'Cruz', 'Huckabee', 'Fiorina','Paul']+\ ['Carson', 'Christie', 'Santorum', 'Perry', 'Jindal', 'Graham', 'Pataki'] ``` The first row in `tab_vals17` changed relative to the same row in `tab_vals14`, by swapping their positions the candidates (Fiorina, Huckabee) and (Christie, Carson), and correspondingly the other rows. ``` fig['data'].update(Data([Heatmap(z=tab_vals17, y=factors, x=candidates17, colorscale=newyorktimes_cs, showscale=False )])) for i, row in enumerate(tab_vals17): for j, val in enumerate(row): annotations.append( Annotation( text=str(val), x=candidates[j], y=factors[i], xref='x1', yref='y1', font=dict(color='white' if tab_vals17[i][j]<12 else 'rgb(150,150,150)'), showarrow=False)) fig['layout'].update( title="A scoreboard for republican candidates as of August 17, 2015 <br> Annotated heatmap", annotations=annotations ) py.iplot(fig,filename='Annotated heatmap-republican-candidates-17') from IPython.core.display import HTML def css_styling(): styles = open("./custom.css", "r").read() return HTML(styles) css_styling() from IPython.display import HTML, display display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="https://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) import publisher publisher.publish('scoreboard-republican-candidates', '/ipython-notebooks/scoreboard-heatmaps/', 'Two Scoreboards for Republican Presidential Candidates', 'Plot Scoreboard Using Python and Plotly') ```
github_jupyter
# Your first neural network In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more. ``` %matplotlib inline %load_ext autoreload %autoreload 2 %config InlineBackend.figure_format = 'retina' import numpy as np import pandas as pd import matplotlib.pyplot as plt ``` ## Load and prepare the data A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon! ``` data_path = 'Bike-Sharing-Dataset/hour.csv' rides = pd.read_csv(data_path) rides.head() ``` ## Checking out the data This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the `cnt` column. You can see the first few rows of the data above. Below is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model. ``` rides[:24*10].plot(x='dteday', y='cnt') ``` ### Dummy variables Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to `get_dummies()`. ``` dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday'] for each in dummy_fields: dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False) rides = pd.concat([rides, dummies], axis=1) fields_to_drop = ['instant', 'dteday', 'season', 'weathersit', 'weekday', 'atemp', 'mnth', 'workingday', 'hr'] data = rides.drop(fields_to_drop, axis=1) data.head() ``` ### Scaling target variables To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1. The scaling factors are saved so we can go backwards when we use the network for predictions. ``` # Save data for approximately the last 21 days test_data = data[-21*24:] # Now remove the test data from the data set data = data[:-21*24] quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed'] # Store scalings in a dictionary so we can convert back later scaled_features = {} for each in quant_features: mean, std = data[each].mean(), data[each].std() scaled_features[each] = [mean, std] data.loc[:, each] = (data[each] - mean)/std test_data.loc[:, each] = (test_data[each] - mean)/std ``` ### Splitting the data into training, testing, and validation sets We'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders. ``` # Separate the data into features and targets target_fields = ['cnt', 'casual', 'registered'] features, targets = data.drop(target_fields, axis=1), data[target_fields] test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields] ``` We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set). ``` # Hold out the last 60 days or so of the remaining data as a validation set train_features, train_targets = features[:-60*24], targets[:-60*24] val_features, val_targets = features[-60*24:], targets[-60*24:] ``` ## Time to build the network Below you'll build your network. We've built out the structure. You'll implement both the forward pass and backwards pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes. <img src="assets/neural_network.png" width=300px> The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called *forward propagation*. We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called *backpropagation*. > **Hint:** You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$. Below, you have these tasks: 1. Implement the sigmoid function to use as the activation function. Set `self.activation_function` in `__init__` to your sigmoid function. 2. Implement the forward pass in the `train` method. 3. Implement the backpropagation algorithm in the `train` method, including calculating the output error. 4. Implement the forward pass in the `run` method. ``` ############# # In the my_answers.py file, fill out the TODO sections as specified ############# from my_answers import NeuralNetwork def MSE(y, Y): return np.mean((y-Y)**2) ``` ## Unit tests Run these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly before you starting trying to train it. These tests must all be successful to pass the project. ``` import unittest inputs = np.array([[0.5, -0.2, 0.1]]) targets = np.array([[0.4]]) test_w_i_h = np.array([[0.1, -0.2], [0.4, 0.5], [-0.3, 0.2]]) test_w_h_o = np.array([[0.3], [-0.1]]) class TestMethods(unittest.TestCase): ########## # Unit tests for data loading ########## def test_data_path(self): # Test that file path to dataset has been unaltered self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv') def test_data_loaded(self): # Test that data frame loaded self.assertTrue(isinstance(rides, pd.DataFrame)) ########## # Unit tests for network functionality ########## def test_activation(self): network = NeuralNetwork(3, 2, 1, 0.5) # Test that the activation function is a sigmoid self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5)))) def test_train(self): # Test that weights are updated correctly on training network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() network.train(inputs, targets) self.assertTrue(np.allclose(network.weights_hidden_to_output, np.array([[ 0.37275328], [-0.03172939]]))) self.assertTrue(np.allclose(network.weights_input_to_hidden, np.array([[ 0.10562014, -0.20185996], [0.39775194, 0.50074398], [-0.29887597, 0.19962801]]))) def test_run(self): # Test correctness of run method network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() self.assertTrue(np.allclose(network.run(inputs), 0.09998924)) suite = unittest.TestLoader().loadTestsFromModule(TestMethods()) unittest.TextTestRunner().run(suite) ``` ## Training the network Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops. You'll also be using a method known as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later. ### Choose the number of iterations This is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, this process can have sharply diminishing returns and can waste computational resources if you use too many iterations. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. The ideal number of iterations would be a level that stops shortly after the validation loss is no longer decreasing. ### Choose the learning rate This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. Normally a good choice to start at is 0.1; however, if you effectively divide the learning rate by n_records, try starting out with a learning rate of 1. In either case, if the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge. ### Choose the number of hidden nodes In a model where all the weights are optimized, the more hidden nodes you have, the more accurate the predictions of the model will be. (A fully optimized model could have weights of zero, after all.) However, the more hidden nodes you have, the harder it will be to optimize the weights of the model, and the more likely it will be that suboptimal weights will lead to overfitting. With overfitting, the model will memorize the training data instead of learning the true pattern, and won't generalize well to unseen data. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose. You'll generally find that the best number of hidden nodes to use ends up being between the number of input and output nodes. ``` import sys #################### ### Set the hyperparameters in you myanswers.py file ### #################### from my_answers import iterations, learning_rate, hidden_nodes, output_nodes N_i = train_features.shape[1] network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate) losses = {'train':[], 'validation':[]} for ii in range(iterations): # Go through a random batch of 128 records from the training data set batch = np.random.choice(train_features.index, size=128) X, y = train_features.iloc[batch].values, train_targets.iloc[batch]['cnt'] network.train(X, y) # Printing out the training progress train_loss = MSE(np.array(network.run(train_features)).T, train_targets['cnt'].values) val_loss = MSE(np.array(network.run(val_features)).T, val_targets['cnt'].values) sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \ + "% ... Training loss: " + str(train_loss)[:5] \ + " ... Validation loss: " + str(val_loss)[:5]) sys.stdout.flush() losses['train'].append(train_loss) losses['validation'].append(val_loss) plt.plot(losses['train'], label='Training loss') plt.plot(losses['validation'], label='Validation loss') plt.legend() _ = plt.ylim() ``` ## Check out your predictions Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly. ``` fig, ax = plt.subplots(figsize=(8,4)) mean, std = scaled_features['cnt'] predictions = np.array(network.run(test_features)).T*std + mean ax.plot(predictions[0], label='Prediction') ax.plot((test_targets['cnt']*std + mean).values, label='Data') ax.set_xlim(right=len(predictions)) ax.legend() dates = pd.to_datetime(rides.iloc[test_data.index]['dteday']) dates = dates.apply(lambda d: d.strftime('%b %d')) ax.set_xticks(np.arange(len(dates))[12::24]) _ = ax.set_xticklabels(dates[12::24], rotation=45) ``` ## OPTIONAL: Thinking about your results(this question will not be evaluated in the rubric). Answer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does? > **Note:** You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter #### Your answer below
github_jupyter
I want to make two files that will allow me to build a model that is ready for the data on the MRNet collab. This means that one file will contain a list of image locations: ``` MRNet-v1.0/valid/sagittal/1130.npy MRNet-v1.0/valid/coronal/1130.npy MRNet-v1.0/valid/axial/1130.npy MRNet-v1.0/valid/sagittal/1131.npy MRNet-v1.0/valid/coronal/1131.npy MRNet-v1.0/valid/axial/1131.npy ... ``` and the other file will contain label info and train/valid/test splits: ``` case,abnormal,ACL,meniscal,split 1130.npy,0,0,0,train 1131.npy,1,1,0,valid 1132.npy,1,0,1,test ... ``` The model interface will look like: ### Training `python train.py model-name --rundir path-to-output-dir --label {abnormal,acl,meniscal,all} --series {axial,coronal,sagittal,all} --full` ### Evaluate `python eval.py model-path --label {abnormal,acl,meniscal,all} --split {train,valid,test,all}` ### Infer `python infer.py input-data-csv-filename output-prediction-csv-path -m model-path` ``` import os data_path = '/domino/datasets/local/mrnet/MRNet-v1.0' image_paths = [] for split in ['train', 'valid']: split_folder = os.path.join(data_path, split) for series in os.listdir(split_folder): if series == '.DS_Store': continue series_folder = os.path.join(split_folder, series) for filename in os.listdir(series_folder): if filename == '.DS_Store': continue image_paths.append(os.path.join(series_folder, filename)) with open('../mrnet-image-paths.csv', 'w') as fout: fout.write('\n'.join(image_paths)) !head ../mrnet-image-paths.csv import pandas as pd import numpy as np def load_labels(split, diagnosis): df = pd.read_csv( os.path.join(data_path, '{}-{}.csv'.format(split, diagnosis)), header=None, names=['case', diagnosis], dtype={'case': str, diagnosis: np.int} ) df['split'] = split if split == 'train' else 'test' print(df.groupby(diagnosis).count()) print(df.head()) return df train_abnormal = load_labels('train', 'abnormal') train_acl = load_labels('train', 'acl') train_meniscus = load_labels('train', 'meniscus') # Our test set will be what they are calling "valid" until we are ready to submit test_abnormal = load_labels('valid', 'abnormal') test_acl = load_labels('valid', 'acl') test_meniscus = load_labels('valid', 'meniscus') # Now we want to combine all of those data sets # import functools dfs = [ pd.concat([train_acl, test_acl], ignore_index=True), pd.concat([train_meniscus, test_meniscus], ignore_index=True) ] # df = functools.reduce(lambda a, b: pd.merge(a, b, on=['case', 'split'], suffixes=(False, False), how='outer'), dfs) # df.tail() df = pd.concat([train_abnormal, test_abnormal], ignore_index=True) df.set_index('case', inplace=True) for _df in dfs: assert(np.all(df.loc[_df.case, 'split'].values == _df.split.values)) df = df.join(_df.set_index('case').drop('split', axis='columns'), how='outer') df = df[['abnormal', 'acl', 'meniscus', 'split']] df.index = df.index.map(lambda c: c + '.npy') df.head() df.to_csv('../mrnet-labels.csv', index=True) !head ../mrnet-labels.csv ``` Let's set some of the train set as the valid set. ``` import pandas as pd df = pd.read_csv('../mrnet-labels.csv', index_col=0) df.head() df.groupby('split').count() df.groupby('split').sum() df.groupby('split').mean() sample = df[df.split == 'train'].sample(120) df.loc[sample.index, 'split'] = 'valid' df.groupby('split').sum() df.groupby('split').mean() df.groupby('split').count() df.to_csv('../mrnet-labels-3way.csv', index=True) ``` The validation split that I made is not ideal because it has so few of the positive ACL labels and so few of the negative abnormal labels. I'll try to fix that below. ``` import pandas as pd df = pd.read_csv('../mrnet-labels.csv', index_col=0) df.head() df.groupby('split').describe() df.groupby('split').count() df.groupby('split').sum() df = pd.read_csv('../mrnet-labels.csv', index_col=0) sample = pd.concat([ df[(df.split == 'train') & (df.abnormal == 0)].sample(35), df[(df.split == 'train') & (df.acl == 1)].sample(40), df[(df.split == 'train') & (df.acl == 0)].sample(40), df[(df.split == 'train') & (df.meniscus == 1)].sample(7), ]) print(len(set(sample.index))) assert(len(set(sample.index)) == 120) df.loc[sample.index, 'split'] = 'valid' df.groupby('split').sum() ``` Good! It looks like valid has a similar distribution as test. Let's be sure. ``` splits = ['train', 'valid', 'test'] for d in ['abnormal', 'acl', 'meniscus']: pos_count = [((df.split == split) & (df[d] == 1)).sum() for split in splits] print('{}{}\t{}\t{}\t{}'.format(d, ' ' if d == 'acl' else '', *pos_count)) neg_count = [((df.split == split) & (df[d] == 0)).sum() for split in splits] print('{}{}\t{}\t{}\t{}'.format(d, ' ' if d == 'acl' else '', *neg_count)) all_count = [((df.split == split)).sum() for split in splits] print('{}{}\t{}\t{}\t{}'.format(d, ' ' if d == 'acl' else '', *all_count)) ``` That was a weird ass sampling strategy, but the classes are more balanced now. ``` len(set(df.index)) len(df) df.to_csv('../mrnet-labels-3way.csv', index=True) ```
github_jupyter
# Estimating Free Parking This notebook illustrates how to re-estimate a single model component for ActivitySim. This process includes running ActivitySim in estimation mode to read household travel survey files and write out the estimation data bundles used in this notebook. To review how to do so, please visit the other notebooks in this directory. # Load libraries ``` import os import larch # !conda install larch -c conda-forge # for estimation import pandas as pd ``` We'll work in our `test` directory, where ActivitySim has saved the estimation data bundles. ``` os.chdir('test') ``` # Load data and prep model for estimation ``` modelname = "free_parking" from activitysim.estimation.larch import component_model model, data = component_model(modelname, return_data=True) ``` # Review data loaded from the EDB The next step is to read the EDB, including the coefficients, model settings, utilities specification, and chooser and alternative data. ### Coefficients ``` data.coefficients ``` #### Utility specification ``` data.spec ``` ### Chooser data ``` data.chooser_data ``` # Estimate With the model setup for estimation, the next step is to estimate the model coefficients. Make sure to use a sufficiently large enough household sample and set of zones to avoid an over-specified model, which does not have a numerically stable likelihood maximizing solution. Larch has a built-in estimation methods including BHHH, and also offers access to more advanced general purpose non-linear optimizers in the `scipy` package, including SLSQP, which allows for bounds and constraints on parameters. BHHH is the default and typically runs faster, but does not follow constraints on parameters. ``` model.estimate() ``` ### Estimated coefficients ``` model.parameter_summary() ``` # Output Estimation Results ``` from activitysim.estimation.larch import update_coefficients result_dir = data.edb_directory/"estimated" update_coefficients( model, data, result_dir, output_file=f"{modelname}_coefficients_revised.csv", ); ``` ### Write the model estimation report, including coefficient t-statistic and log likelihood ``` model.to_xlsx( result_dir/f"{modelname}_model_estimation.xlsx", data_statistics=False, ) ``` # Next Steps The final step is to either manually or automatically copy the `*_coefficients_revised.csv` file to the configs folder, rename it to `*_coefficients.csv`, and run ActivitySim in simulation mode. ``` pd.read_csv(result_dir/f"{modelname}_coefficients_revised.csv") ```
github_jupyter
## RIHAD VARIAWA, Data Scientist - Who has fun LEARNING, EXPLORING & GROWING ## Polynomials Some of the equations we've looked at so far include expressions that are actually *polynomials*; but what *is* a polynomial, and why should you care? A polynomial is an algebraic expression containing one or more *terms* that each meet some specific criteria. Specifically: - Each term can contain: - Numeric values that are coefficients or constants (for example 2, -5, <sup>1</sup>/<sub>7</sub>) - Variables (for example, x, y) - Non-negative integer exponents (for example <sup>2</sup>, <sup>64</sup>) - The terms can be combined using arithmetic operations - but **not** division by a variable. For example, the following expression is a polynomial: \begin{equation}12x^{3} + 2x - 16 \end{equation} When identifying the terms in a polynomial, it's important to correctly interpret the arithmetic addition and subtraction operators as the sign for the term that follows. For example, the polynomial above contains the following three terms: - 12x<sup>3</sup> - 2x - -16 The terms themselves include: - Two coefficients(12 and 2) and a constant (-16) - A variable (x) - An exponent (<sup>3</sup>) A polynomial that contains three terms is also known as a *trinomial*. Similarly, a polynomial with two terms is known as a *binomial* and a polynomial with only one term is known as a *monomial*. So why do we care? Well, polynomials have some useful properties that make them easy to work with. for example, if you multiply, add, or subtract a polynomial, the result is always another polynomial. ## Standard Form for Polynomials Techbnically, you can write the terms of a polynomial in any order; but the *standard form* for a polynomial is to start with the highest *degree* first and constants last. The degree of a term is the highest order (exponent) in the term, and the highest order in a polynomial determines the degree of the polynomial itself. For example, consider the following expression: \begin{equation}3x + 4xy^{2} - 3 + x^{3} \end{equation} To express this as a polynomial in the standard form, we need to re-order the terms like this: \begin{equation}x^{3} + 4xy^{2} + 3x - 3 \end{equation} ## Simplifying Polynomials We saw previously how you can simplify an equation by combining *like terms*. You can simplify polynomials in the same way. For example, look at the following polynomial: \begin{equation}x^{3} + 2x^{3} - 3x - x + 8 - 3 \end{equation} In this case, we can combine x<sup>3</sup> and 2x<sup>3</sup> by adding them to make 3x<sup>3</sup>. Then we can add -3x and -x (which is really just a shorthand way to say -1x) to get -4x, and then add 8 and -3 to get 5. Our simplified polynomial then looks like this: \begin{equation}3x^{3} - 4x + 5 \end{equation} We can use Python to compare the original and simplified polynomials to check them - using an arbitrary random value for ***x***: ``` from random import randint x = randint(1,100) (x**3 + 2*x**3 - 3*x - x + 8 - 3) == (3*x**3 - 4*x + 5) ``` ## Adding Polynomials When you add two polynomials, the result is a polynomial. Here's an example: \begin{equation}(3x^{3} - 4x + 5) + (2x^{3} + 3x^{2} - 2x + 2) \end{equation} because this is an addition operation, you can simply add all of the like terms from both polynomials. To make this clear, let's first put the like terms together: \begin{equation}3x^{3} + 2x^{3} + 3x^{2} - 4x -2x + 5 + 2 \end{equation} This simplifies to: \begin{equation}5x^{3} + 3x^{2} - 6x + 7 \end{equation} We can verify this with Python: ``` from random import randint x = randint(1,100) (3*x**3 - 4*x + 5) + (2*x**3 + 3*x**2 - 2*x + 2) == 5*x**3 + 3*x**2 - 6*x + 7 ``` ## Subtracting Polynomials Subtracting polynomials is similar to adding them but you need to take into account that one of the polynomials is a negative. Consider this expression: \begin{equation}(2x^{2} - 4x + 5) - (x^{2} - 2x + 2) \end{equation} The key to performing this calculation is to realize that the subtraction of the second polynomial is really an expression that adds -1(x<sup>2</sup> - 2x + 2); so you can use the distributive property to multiply each of the terms in the polynomial by -1 (which in effect simply reverses the sign for each term). So our expression becomes: \begin{equation}(2x^{2} - 4x + 5) + (-x^{2} + 2x - 2) \end{equation} Which we can solve as an addition problem. First place the like terms together: \begin{equation}2x^{2} + -x^{2} + -4x + 2x + 5 + -2 \end{equation} Which simplifies to: \begin{equation}x^{2} - 2x + 3 \end{equation} Let's check that with Python: ``` from random import randint x = randint(1,100) (2*x**2 - 4*x + 5) - (x**2 - 2*x + 2) == x**2 - 2*x + 3 ``` ## Multiplying Polynomials To multiply two polynomials, you need to perform the following two steps: 1. Multiply each term in the first polynomial by each term in the second polynomial. 2. Add the results of the multiplication operations, combining like terms where possible. For example, consider this expression: \begin{equation}(x^{4} + 2)(2x^{2} + 3x - 3) \end{equation} Let's do the first step and multiply each term in the first polynomial by each term in the second polynomial. The first term in the first polynomial is x<sup>4</sup>, and the first term in the second polynomial is 2x<sup>2</sup>, so multiplying these gives us 2x<sup>6</sup>. Then we can multiply the first term in the first polynomial (x<sup>4</sup>) by the second term in the second polynomial (3x), which gives us 3x<sup>5</sup>, and so on until we've multipled all of the terms in the first polynomial by all of the terms in the second polynomial, which results in this: \begin{equation}2x^{6} + 3x^{5} - 3x^{4} + 4x^{2} + 6x - 6 \end{equation} We can verify a match between this result and the original expression this with the following Python code: ``` from random import randint x = randint(1,100) (x**4 + 2)*(2*x**2 + 3*x - 3) == 2*x**6 + 3*x**5 - 3*x**4 + 4*x**2 + 6*x - 6 ``` ## Dividing Polynomials When you need to divide one polynomial by another, there are two approaches you can take depending on the number of terms in the divisor (the expression you're dividing by). ### Dividing Polynomials Using Simplification In the simplest case, division of a polynomial by a monomial, the operation is really just simplification of a fraction. For example, consider the following expression: \begin{equation}(4x + 6x^{2}) \div 2x \end{equation} This can also be written as: \begin{equation}\frac{4x + 6x^{2}}{2x} \end{equation} One approach to simplifying this fraction is to split it it into a separate fraction for each term in the dividend (the expression we're dividing), like this: \begin{equation}\frac{4x}{2x} + \frac{6x^{2}}{2x}\end{equation} Then we can simplify each fraction and add the results. For the first fraction, 2x goes into 4x twice, so the fraction simplifies to 2; and for the second, 6x<sup>2</sup> is 2x mutliplied by 3x. So our answer is 2 + 3x: \begin{equation}2 + 3x\end{equation} Let's use Python to compare the original fraction with the simplified result for an arbitrary value of ***x***: ``` from random import randint x = randint(1,100) (4*x + 6*x**2) / (2*x) == 2 + 3*x ``` ### Dividing Polynomials Using Long Division Things get a little more complicated for divisors with more than one term. Suppose we have the following expression: \begin{equation}(x^{2} + 2x - 3) \div (x - 2) \end{equation} Another way of writing this is to use the long-division format, like this: \begin{equation} x - 2 |\overline{x^{2} + 2x - 3} \end{equation} We begin long-division by dividing the highest order divisor into the highest order dividend - so in this case we divide x into x<sup>2</sup>. X goes into x<sup>2</sup> x times, so we put an x on top and then multiply it through the divisor: \begin{equation} \;\;\;\;x \end{equation} \begin{equation}x - 2 |\overline{x^{2} + 2x - 3} \end{equation} \begin{equation} \;x^{2} -2x \end{equation} Now we'll subtract the remaining dividend, and then carry down the -3 that we haven't used to see what's left: \begin{equation} \;\;\;\;x \end{equation} \begin{equation}x - 2 |\overline{x^{2} + 2x - 3} \end{equation} \begin{equation}- (x^{2} -2x) \end{equation} \begin{equation}\;\;\;\;\;\overline{\;\;\;\;\;\;\;\;\;\;4x -3} \end{equation} OK, now we'll divide our highest order divisor into the highest order of the remaining dividend. In this case, x goes into 4x four times, so we'll add a 4 to the top line, multiply it through the divisor, and subtract the remaining dividend: \begin{equation} \;\;\;\;\;\;\;\;x + 4 \end{equation} \begin{equation}x - 2 |\overline{x^{2} + 2x - 3} \end{equation} \begin{equation}- (x^{2} -2x) \end{equation} \begin{equation}\;\;\;\;\;\overline{\;\;\;\;\;\;\;\;\;\;4x -3} \end{equation} \begin{equation}- (\;\;\;\;\;\;\;\;\;\;\;\;4x -8) \end{equation} \begin{equation}\;\;\;\;\;\overline{\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;5} \end{equation} We're now left with just 5, which we can't divide further by x - 2; so that's our remainder, which we'll add as a fraction. The solution to our division problem is: \begin{equation}x + 4 + \frac{5}{x-2} \end{equation} Once again, we can use Python to check our answer: ``` from random import randint x = randint(3,100) (x**2 + 2*x -3)/(x-2) == x + 4 + (5/(x-2)) ```
github_jupyter
# Visualizing The Dataset ``` !wget = 'https://raw.githubusercontent.com/Doodies/Github-Stars-Predictor/master/dataset/data.csv' !ls # Ignore warnings import warnings warnings.filterwarnings('ignore') # Handle table-like data and matrices import numpy as np import pandas as pd # Visualisation import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.pylab as pylab import seaborn as sns # Configure visualisations %matplotlib inline color = sns.color_palette() pd.options.mode.chained_assignment = None pd.options.display.max_columns = 999 # mpl.style.use( 'ggplot' ) sns.set_style( 'whitegrid' ) pylab.rcParams[ 'figure.figsize' ] = 8,6 data = pd.read_csv('data.csv') data.head(3) ``` ## Visualizing CreatedAt ``` time_columns = ['createdAt','updatedAt','pushedAt'] for i in time_columns: data[i] = data[i].apply(lambda x : x.replace('T',' ').replace('Z','')) import datetime df = data df['createdAt'] = df['createdAt'].apply(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d %H:%M:%S').strftime('%A')) df.head() df['createdAt'].value_counts().plot.bar() print(df['createdAt'].value_counts()) ``` #### Result implies that most of repositories are created at Tuesday and least at weekends days (Saturday and Sunday) ## Visualizing UpdatedAt ``` df['updatedAt'] = df['updatedAt'].apply(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d %H:%M:%S').strftime('%A')) df['updatedAt'].value_counts().plot.bar() print(df['updatedAt'].value_counts()) ``` #### Number of updates are high on Friday and least on Sunday ## Visulaization between forkCount , subscribersCount , Stars ``` df1 = pd.DataFrame() df1['forkCount'] = df['forkCount'] df1['subscribersCount'] = df['subscribersCount'] df1['stars'] = df['stars'] sns.pairplot(df1) ``` ## Plotting Scatter plot of Stars Count ``` def plot_feature(df, col): plt.scatter(range(df.shape[0]), np.sort(df[col].values)) plt.xlabel('index', fontsize=12) plt.ylabel(col, fontsize=12) plt.show() plot_feature(data, "stars") len(data[data['stars'] > 20000]) ``` ## Checking Relationship between Stars and forkCount ``` sns.jointplot(x = "stars", y = "forkCount", data = data) data[data['prMergedComments'] == 0].shape newDf = data.select_dtypes(include=[np.number]) newDf.head() ``` ## Checking the Percentage of zeroes in all columns ``` cols = newDf.columns zero_rows = [] zero_per = [] for i in cols: zero_rows.append(newDf[newDf[i] == 0].shape[0]) zero_per.append(newDf[newDf[i] == 0].shape[0] * 100 / float(data.shape[0])) df = pd.DataFrame({'col':cols,'numberRows':zero_rows,'zero_per':zero_per}) df.sort_values(['numberRows'], ascending=False) ``` ## Plotting Difference Between Numbers of Users and Organization ``` data.type.value_counts() data.type.value_counts().plot.bar() ``` ## Checking Top 20 Primary Languages ``` data.primaryLanguage.value_counts()[:20].plot(kind = 'bar', color = color[1]) data.head(3) ``` ## Changing the time to their Hours Representation ``` time_columns = ['createdAt','updatedAt','pushedAt'] for i in time_columns: data[i] = data[i].apply(lambda x : x.replace('T',' ').replace('Z','')) from datetime import datetime for i in time_columns: data[i] = data[i].apply(lambda x: int(datetime.strptime(x,'%Y-%m-%d %H:%M:%S').strftime('%s')) / (60 * 60)) data.loc[:, time_columns] data.head(3) ``` ## Website Url to Binary Form 1(Yes)/0(No) ``` data['websiteUrl'] = data['websiteUrl'].fillna('') data['websiteUrl'] = data['websiteUrl'].apply(lambda x : 1 if len(x) > 0 else 0) data['websiteUrl'].value_counts().plot.bar() data.head(3) ``` ## Description Word and Character Count ``` data['description'] = data['description'].fillna('') data['desWordCount'] = data['description'].apply(lambda x: len(x.split(' '))) data['desCharCount'] = data['description'].apply(len) data.head(3) ``` ## hasWikiEnabled to Binary Form ``` print(data['hasWikiEnabled'].value_counts()) data['hasWikiEnabled'].value_counts().plot.bar() data['hasWikiEnabled'] = data['hasWikiEnabled'].apply(lambda x : 1 if x else 0) ``` ## License to OneHotEncoding ``` set(data['license']) data['license'].value_counts(dropna = False)[:10].plot.bar() data['license'] = data['license'].fillna('') license_cols = ['mit_license','nan_license','apache_license','other_license','remain_license'] for i in license_cols: if i.startswith('mit'): data[i] = data['license'].apply(lambda x: 1 if x == 'MIT License' else 0) elif i.startswith('nan'): data[i] = data['license'].apply(lambda x: int(len(x) == 0)) elif i.startswith('apache'): data[i] = data['license'].apply(lambda x: 1 if x == 'Apache License 2.0' else 0) elif i.startswith('other'): data[i] = data['license'].apply(lambda x: 1 if x == 'Other' else 0) data['remain_license'] = (data[license_cols[:-1]].sum(axis=1) == 0).astype(int) data.head() ``` ## Primary Language to OneHotEncoding ``` data.primaryLanguage.isnull().sum() data.primaryLanguage.value_counts()[:20].plot.bar() lang_cols = np.array(data.primaryLanguage.value_counts()[:6].index) data[lang_cols[0]] = data.primaryLanguage.apply(lambda x : int(x == 'JavaScript')) data[lang_cols[1]] = data.primaryLanguage.apply(lambda x : int(x == 'Python')) data[lang_cols[2]] = data.primaryLanguage.apply(lambda x : int(x == 'Java')) data[lang_cols[3]] = data.primaryLanguage.apply(lambda x : int(x == 'Objective-C')) data[lang_cols[4]] = data.primaryLanguage.apply(lambda x : int(x == 'Ruby')) data[lang_cols[5]] = data.primaryLanguage.apply(lambda x : int(x == 'PHP')) data['other_language'] = (data[lang_cols].sum(axis=1) == 0).astype(int) data.head(3) ``` ## type to binary form 1(User) / 0(Org) ``` newDf = data data['type'] = data.type.apply(lambda x : 1 if x == 'user' else 0) data.members.isnull().sum() data.type.value_counts() data.head(3) ``` ## Setting nan members , organizations , gists , giststar , gistComments , followers , following to 0 ``` data['members'] = data['members'].fillna(0) data['organizations'] = data['organizations'].fillna(0) data['gists'] = data['gists'].fillna(0) data['gistStar'] = data['gistStar'].fillna(0) data['gistComments'] = data['gistComments'].fillna(0) data['following'] = data['following'].fillna(0) data['followers'] = data['followers'].fillna(0) data.isnull().sum() ``` ## Plotting Missing Data ``` def plot_missing_data(df): missing_df = df.isnull().sum(axis=0).reset_index() missing_df.columns = ['column_name', 'missing_count'] missing_df = missing_df.loc[missing_df['missing_count']>0] missing_df = missing_df.sort_values(by='missing_count') ind = np.arange(missing_df.shape[0]) width = 0.9 fig, ax = plt.subplots(figsize=(12,9)) rects = ax.barh(ind, missing_df.missing_count.values, color=color[3]) ax.set_yticks(ind) ax.set_yticklabels(missing_df.column_name.values, rotation='horizontal') ax.set_xlabel("Count of missing values") ax.set_title("Number of missing values in each column") plt.show() plot_missing_data(data) ``` ## removing the null numCommits(of master branch) rows ``` data.commits.isnull().sum() data = data[data.commits.notnull()] data.shape data.commits.isnull().sum() ``` ## Removing the null reponame rows ``` print(data.reponame.isnull().sum()) data.shape data = data[data.reponame.notnull()] print(data.reponame.isnull().sum()) data.shape data.head() data.select_dtypes(include = np.bool) ``` ## Dropping Columns ``` data.columns ## Columns to drop Df = data col = ['description' , 'isArchived' , 'license' ,'location' , 'login' , 'primaryLanguage' , 'reponame' , 'siteAdmin'] data = data.drop(col , axis = 1) def count_dtypes(df): pd.options.display.max_rows = 65 dtype_df = df.dtypes.reset_index() dtype_df.columns = ["Count", "Column Type"] return dtype_df.groupby("Column Type").aggregate('count').reset_index() count_dtypes(data) data.head(3) ``` ## Saving it as a PreProcessing file ``` data.to_csv('PreprocessData.csv') !ls from google.colab import files files.download('PreprocessData.csv') ```
github_jupyter
# COVID-19 Open Research Dataset (CORD-19) - https://pages.semanticscholar.org/coronavirus-research - https://www.kaggle.com/acmiyaguchi/cord-19-citation-network-with-deduping/output - https://lg-covid-19-hotp.cs.duke.edu/ ### Stats Papers in CORD-19: - has valid DOI External papers cited by CORD-19 papers: - total - with DOI - scraped ### Schema https://ai2-semanticscholar-cord-19.s3-us-west-2.amazonaws.com/2020-03-13/json_schema.txt ``` # JSON schema of full text documents { "paper_id": <str>, # 40-character sha1 of the PDF "metadata": { "title": <str>, "authors": [ # list of author dicts, in order { "first": <str>, "middle": <list of str>, "last": <str>, "suffix": <str>, "affiliation": <dict>, "email": <str> }, ... ], "abstract": [ # list of paragraphs in the abstract { "text": <str>, "cite_spans": [ # list of character indices of inline citations # e.g. citation "[7]" occurs at positions 151-154 in "text" # linked to bibliography entry BIBREF3 { "start": 151, "end": 154, "text": "[7]", "ref_id": "BIBREF3" }, ... ], "ref_spans": <list of dicts similar to cite_spans>, # e.g. inline reference to "Table 1" "section": "Abstract" }, ... ], "body_text": [ # list of paragraphs in full body # paragraph dicts look the same as above { "text": <str>, "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Introduction" }, ... { ..., "section": "Conclusion" } ], "bib_entries": { "BIBREF0": { "ref_id": <str>, "title": <str>, "authors": <list of dict> # same structure as earlier, # but without `affiliation` or `email` "year": <int>, "venue": <str>, "volume": <str>, "issn": <str>, "pages": <str>, "other_ids": { "DOI": [ <str> ] } }, "BIBREF1": {}, ... "BIBREF25": {} }, "ref_entries": "FIGREF0": { "text": <str>, # figure caption text "type": "figure" }, ... "TABREF13": { "text": <str>, # table caption text "type": "table" } }, "back_matter": <list of dict> # same structure as body_text } } ``` ### Target output - With text `text,text_b,label` - With IDs `doc_id,doc_id_b,label` ``` import pandas as pd import os import math import random import pickle import json import re import numpy as np from tqdm import tqdm_notebook as tqdm from collections import defaultdict import requests import time from sklearn.model_selection import StratifiedKFold from fuzzywuzzy import fuzz from experiments.environment import get_env env = get_env() n_splits = 4 scraper_dir = './output/cord19/' cord19_dir = os.path.join(env['datasets_dir'], 'cord-19') dummy_id = '21a4369f83891bf6975dd916c0aa495d5df8709e' meta_df = pd.read_csv(os.path.join(cord19_dir, 'metadata.csv'), index_col=0, dtype={'doi': str}) meta_df.tail() len(meta_df['doi'].unique()) / len(meta_df) id2meta = {row['sha']: row for idx, row in meta_df.iterrows() if row['sha']} len(id2meta) id2meta[dummy_id]['doi'] ``` # Load paper data ``` subsets = ['biorxiv_medrxiv', 'comm_use_subset', 'custom_license', 'noncomm_use_subset'] id2paper = {} has_doi = 0 bib_count = 0 cits = [] # from_doi, to_doi, <section title> for ss in subsets: ss_dir = os.path.join(cord19_dir, ss) # iterate over files for fn in os.listdir(ss_dir): if not fn.endswith('.json'): continue fp = os.path.join(ss_dir, fn) with open(fp, 'r') as f: paper = json.load(f) if paper['paper_id'] not in id2meta: continue meta = id2meta[paper['paper_id']] paper['_meta'] = dict(meta) id2paper[paper['paper_id']] = paper # has valid DOI if isinstance(meta['doi'], str) and len(meta['doi']) > 10: # iterate over body text for paragraph in paper['body_text']: # iterate over each citation marker for cit in paragraph['cite_spans']: # find corresponding bib entry if cit['ref_id'] in paper['bib_entries']: bib = paper['bib_entries'][cit['ref_id']] bib_count += 1 # only use bib entries with DOI if 'DOI' in bib['other_ids']: has_doi += 1 for out_doi in bib['other_ids']['DOI']: cits.append(( meta['doi'], out_doi, paragraph['section'] )) #break #break id2paper[dummy_id]['metadata']['title'] # Load paper data from disk (scraped) if os.path.exists(os.path.join(scraper_dir, 'doi2paper.json')): with open(os.path.join(scraper_dir, 'doi2paper.json'), 'r') as f: doi2s2paper = json.load(f) print(f'Loaded {len(doi2s2paper):,} from disk') else: doi2s2paper = None for k in doi2s2paper: print(k) break # Load paper data from CORD-19 doi2paper = {id2meta[pid]['doi']: paper for pid, paper in id2paper.items() if pid in id2meta} print(f'Loaded {len(doi2paper)} from CORD-19') # Merge CORD-19 + S2 print(f'Paper count: {len(id2paper)}') print(f'DOI exists: {has_doi/bib_count} (total: {bib_count}; doi: {has_doi})') print(f'Citation pairs: {len(cits)}') #cits_with_doi = [c for c in cits if c[0] in doi2paper and c[1] in doi2paper] cits_with_doi = [c for c in cits if (c[0] in doi2paper or c[0] in doi2s2paper) and (c[1] in doi2paper or c[1] in doi2s2paper)] # CORD-19 only: Citations with DOI: 30655 (0.09342419246206499) # + S2: Citations with DOI: 170454 (0.5194756908148369) print(f'Citations with DOI: {len(cits_with_doi)} ({len(cits_with_doi)/len(cits)})') missing_papers = [c[0] for c in cits if c[0] not in doi2paper] missing_papers += [c[1] for c in cits if c[1] not in doi2paper] print(f'Missing paper data, but DOI: {len(missing_papers)}') unique_missing_papers = set(missing_papers) print(f'Unique DOIs of missing papers: {len(unique_missing_papers)}') unique_cits = {(c[0], c[1]) for c in cits_with_doi} len(unique_cits) # section titles sect_titles_count = defaultdict(int) for from_doi, to_doi, sect_title in cits_with_doi: for t in normalize_section(sect_title).split(' and '): sect_titles_count[t] += 1 import operator top_sect_titles = sorted(sect_titles_count.items(), key=operator.itemgetter(1)) top_sect_titles.reverse() top_sect_titles # normalize section title def normalize_section(title): return title.strip().lower()\ .replace('conclusions', 'conclusion')\ .replace('concluding remarks', 'conclusion')\ .replace('future perspectives', 'future work')\ .replace('future directions', 'future work')\ .replace('viruses.', 'virus')\ .replace('viruses', 'virus') #.replace('conclusion and future perspectives', 'conclusion')\ #.replace('materials and methods', 'methods') # resolve 'and' titles def resolve_and_sect_titles(cits): for from_doi, to_doi, sect_title in cits: for t in normalize_section(sect_title).split(' and '): yield (from_doi, to_doi, t) normalized_cits_with_doi = resolve_and_sect_titles(cits_with_doi) list(resolve_and_sect_titles(cits_with_doi[:3])) cits_df = pd.DataFrame(normalized_cits_with_doi, columns=['from_doi', 'to_doi', 'citing_section']) cits_df print(f'After normalization: {len(cits_df):,} (before: {len(cits_with_doi):,})') cits_df['citing_section'] = [normalize_section(t) for t in cits_df['citing_section'].values] top_sections = 10 cits_df['citing_section'].value_counts()[:top_sections] labels = list(filter(lambda t: t, cits_df['citing_section'].value_counts()[:top_sections].keys())) labels def to_label(t, labels): t = normalize_section(t) if t in labels: return t else: return 'other' label_col = 'label' cits_df[label_col] = [to_label(t, labels) for t in cits_df['citing_section']] cits_df.drop_duplicates(['from_doi', 'to_doi', 'label'], keep='first', inplace=True) len(cits_df) tmp_df = cits_df.groupby(['from_doi', 'to_doi']).label.agg([('label_count', 'count'), (label_col, ','.join)]).reset_index() tmp_df tmp_df['label_count'].value_counts() for k, p in doi2paper.items(): break p['abstract'][0]['text'] #p['metadata']['title'] for k, p in doi2s2paper.items(): break p['title'] p['abstract'] ``` # Generate output as TSV ``` def get_text_from_doi(doi): text = '' sep = '\n' if doi in doi2s2paper: # from s2 scraper #text += doi2s2paper[doi]['title'] if doi2s2paper[doi]['abstract']: #text += '\n' + doi2s2paper[doi]['abstract'] text = doi2s2paper[doi]['title'] + sep + doi2s2paper[doi]['abstract'] elif doi in doi2paper: #text += doi2paper[doi]['metadata']['title'] if len(doi2paper[doi]['abstract']) > 0: #text += doi2paper[doi]['metadata']['title'] + '\n' + doi2paper[doi]['abstract'][0]['text'] text = doi2paper[doi]['metadata']['title'] + sep + doi2paper[doi]['abstract'][0]['text'] else: raise ValueError('DOI not found') return text # Positive samples pos_rows = [] for idx, r in tmp_df.iterrows(): text = get_text_from_doi(r['from_doi']) text_b = get_text_from_doi(r['to_doi']) # Filter out empty texts if text != '' and text_b != '': pos_rows.append((r['from_doi'], r['to_doi'], text, text_b, r[label_col])) ``` # Negative sampling Requirements: - no connected with citation - no co-citation - no shared author - no shared venue ``` all_dois = list(map(str, set(list(doi2s2paper.keys()) + list(doi2paper.keys())))) print(f'Total DOIs: {len(all_dois):,}') def get_cit_pair(a, b): # ensure citation pair is always in same order if a > b: return (a, b) else: return (b, a) cits_set = set([get_cit_pair(from_doi, to_doi) for from_doi, to_doi, label in cits_with_doi]) print(f'Total citation count: {len(cits_set):,}') # co cits from_to_cits = defaultdict(set) for from_doi, to_doi, label in cits_with_doi: from_to_cits[from_doi].add(to_doi) cocits_set = set() for from_cit, to_cits in from_to_cits.items(): for a in to_cits: for b in to_cits: cocits_set.add(get_cit_pair(a,b)) print(f'total co-citation count: {len(cocits_set):,}') # shared author def get_authors(doi): if doi in doi2s2paper: s2paper = doi2s2paper[doi] last_names = [a['name'].split()[-1].lower() for a in s2paper['authors']] return last_names elif doi in doi2paper: paper = doi2paper[doi] last_names = [a['last'].lower() for a in paper['metadata']['authors']] return last_names else: raise ValueError(f'DOI not found: {doi}') def have_no_shared_authors(a_doi, b_doi): try: a_authors = set(get_authors(a_doi)) b_authors = set(get_authors(b_doi)) overlap = a_authors & b_authors if len(overlap) == 0: return True else: return False except ValueError: return False # has same venue def get_venue(doi): if doi in doi2s2paper: s2paper = doi2s2paper[doi] return s2paper['venue'].lower().strip() elif doi in doi2paper: paper = doi2paper[doi] venue = paper['_meta']['journal'] if isinstance(venue, float) and math.isnan(venue): return '' else: return venue.lower().strip() else: raise ValueError(f'DOI not found: {doi}') def have_not_same_venue(a_doi, b_doi): a_venue = get_venue(a_doi) b_venue = get_venue(b_doi) if a_venue == "" or b_venue == "": # cant answer if venue is not set return False if fuzz.ratio(a_venue, b_venue) < 0.75: # fuzzy string matching score must be low! return True else: return False negative_label = 'none' #negative_needed = 10000 #105492 # len(df) negative_ratio = 0.5 negative_needed = math.ceil(len(pos_rows) * 0.5) negative_rows = [] negative_pairs = set() tries = 0 # Negatives needed: 52,746 (ratio: 0.5) print(f'Negatives needed: {negative_needed:,} (ratio: {negative_ratio})') while len(negative_pairs) < negative_needed: a = random.choice(all_dois) b = random.choice(all_dois) if a == b: tries += 1 continue pair = tuple((a,b)) if pair in negative_pairs: continue cit_pair = get_cit_pair(a,b) if cit_pair in cits_set: tries += 1 continue if cit_pair in cocits_set: tries += 1 continue if not have_no_shared_authors(a, b): tries += 1 continue if not have_not_same_venue(a, b): tries += 1 continue text = get_text_from_doi(a) text_b = get_text_from_doi(b) if text == '' or text_b == '': continue # None of the criteria above matches... negative_pairs.add(pair) negative_rows.append(( a, b, text, text_b, negative_label, )) # Found 45,923 negative rows (tried 16,069,718 random samples) print(f'Found {len(negative_rows):,} negative rows (tried {tries:,} random samples)') ``` # Merge pos + neg samples ``` # construct df = pd.DataFrame(pos_rows + negative_rows, columns=['from_doi', 'to_doi', 'text', 'text_b', label_col]) print(f'Total df rows: {len(df)}') df.drop_duplicates(['text', 'text_b'], keep='first', inplace=True) print(f'After drop_duplicates - df rows: {len(df)}') df df[label_col].value_counts() # Sample data (for debugging & development) sample_df = df.sample(n=1000, weights=df[label_col].value_counts()[df[label_col]].values.tolist()) display(sample_df[label_col].value_counts()) sample_kf = StratifiedKFold(n_splits=4, random_state=0, shuffle=True) for train_index, test_index in sample_kf.split(sample_df.index.tolist(), sample_df[label_col].values.tolist()): k = 'sample_1k' split_dir = os.path.join(cord19_dir, 'splits', str(k)) if not os.path.exists(split_dir): os.makedirs(split_dir) split_train_df = sample_df.iloc[train_index] split_test_df = sample_df.iloc[test_index] print(f'Total: {len(sample_df):,}; Train: {len(split_train_df):,}; Test: {len(split_test_df):,}') split_train_df.to_csv(os.path.join(split_dir, 'train.tsv'), sep='\t', index=False) split_test_df.to_csv(os.path.join(split_dir, 'test.tsv'), sep='\t', index=False) break # we only need one sample set! del sample_kf labels # Full training and test set kf = StratifiedKFold(n_splits=n_splits, random_state=0, shuffle=True) # Stratified K-Folds cross-validator for k, (train_index, test_index) in enumerate(kf.split(df.index.tolist(), df[label_col].values.tolist()), 1): split_dir = os.path.join(cord19_dir, 'splits', str(k)) if not os.path.exists(split_dir): os.makedirs(split_dir) split_train_df = df.iloc[train_index] split_test_df = df.iloc[test_index] print(f'Total: {len(df):,}; Train: {len(split_train_df):,}; Test: {len(split_test_df):,}') split_train_df.to_csv(os.path.join(split_dir, 'train.tsv'), sep='\t', index=False) split_test_df.to_csv(os.path.join(split_dir, 'test.tsv'), sep='\t', index=False) #break # ... ``` # Scrape missing paper data from S2 API with DOI ``` with open(os.path.join(scraper_dir, 'unique_missing_papers.json'), 'r') as f: unique_missing_papers = set(json.load(f)) len(unique_missing_papers) # list of DOIs with open(os.path.join(scraper_dir, 'unique_missing_papers.json'), 'w') as f: json.dump(list(unique_missing_papers), f) errors = set() notfound = set() doi2paper = {} with open(os.path.join(scraper_dir, 'doi2paper.json'), 'r') as f: doi2paper = json.load(f) with open(os.path.join(scraper_dir, 'errors.json'), 'r') as f: errors = set(json.load(f)) with open(os.path.join(scraper_dir, 'notfound.json'), 'r') as f: notfound = set(json.load(f)) print(f'doi2paper: {len(doi2paper)}') # 12451, 22938 print(f'errors: {len(errors)}') print(f'notfound: {len(notfound)}') check_points = [] api_url = 'http://api.semanticscholar.org/v1/paper/' offset = 0 for i, doi in enumerate(tqdm(unique_missing_papers, total=len(unique_missing_papers))): if i < offset: # skip continue if doi in doi2paper or doi in errors or doi in notfound: continue res = requests.get(api_url + doi) if res.status_code == 200: try: doi2paper[doi] = res.json() except ValueError: print(f'Error cannot parse JSON: {doi}') errors.add(doi) elif res.status_code == 429: print(f'Stop! Rate limit reached at: {i}') break elif res.status_code == 403: print(f'Stop! Forbidden / rate limit reached at: {i}') break elif res.status_code == 404: notfound.add(doi) else: print(f'Error status: {res.status_code} - {doi}') errors.add(doi) if (i % 10000) == 0 and i > 0: with open(os.path.join(scraper_dir, 'doi2paper.json'), 'w') as f: json.dump(doi2paper, f) with open(os.path.join(scraper_dir, 'errors.json'), 'w') as f: json.dump(list(errors), f) with open(os.path.join(scraper_dir, 'notfound.json'), 'w') as f: json.dump(list(notfound), f) time.sleep(2.5) i res.json() requests.get(api_url + '10.4314/ovj.v8i1.5').json() with open(os.path.join(scraper_dir, 'doi2paper.json'), 'w') as f: json.dump(doi2paper, f) with open(os.path.join(scraper_dir, 'errors.json'), 'w') as f: json.dump(list(errors), f) with open(os.path.join(scraper_dir, 'notfound.json'), 'w') as f: json.dump(list(notfound), f) len(doi2paper) # 22938 len(errors) # 98 len(notfound) # 1526 ``` # Citation network dedup ``` cit_df = pd.read_parquet(os.path.join(cord19_dir, 't60_citation_index.parquet')) for idx, row in cit_df.iterrows(): break row row.approx_citation_ids has_shared_authors(a,b) last_names = [a['name'].split()[-1].lower() for a in s2paper['authors']] last_names for doi, paper in doi2paper.items(): break paper last_names = [a['last'].lower() for a in paper['metadata']['authors']] last_names s2paper['venue'] math.isnan(paper['_meta']['journal']) ``` # Validate train/test ``` error_pairs = [] for k in range(1, n_splits+1): split_dir = os.path.join(cord19_dir, 'splits', str(k)) split_train_df = pd.read_csv(os.path.join(split_dir, 'train.tsv'), sep='\t') split_test_df = pd.read_csv(os.path.join(split_dir, 'test.tsv'), sep='\t') train_pairs = split_train_df[['text', 'text_b']].values.tolist() test_pairs = split_test_df[['text', 'text_b']].values.tolist() for p in test_pairs: if p in train_pairs: #raise ValueError('ERROR - Test pair exists also in train!') error_pairs.append(p) len(error_pairs) error_pairs p ``` # Meta data - title - year - venue - authors - arxiv_id - s2_id - doi - cord19_id ``` meta_rows = [] # Cord 19 for i, (doi, p) in enumerate(doi2paper.items()): m = p['_meta'] meta_rows.append({ 'doi': doi, 'title': m['title'], 'authors': m['authors'], 'year': int(m['publish_time'].split('-')[0]), 'venue': '' if isinstance(m['journal'], float) and math.isnan(m['journal']) else m['journal'], 's2_id': p['paper_id'], 'arxiv_id': '', 'in_citations_count': -1, 'out_citations_count': len(p['bib_entries']), }) # S2 paper for i, (doi, p) in enumerate(doi2s2paper.items()): meta_rows.append({ 'doi': doi, 'title': p['title'], 'authors': '; '.join([a['name'] for a in p['authors']]), 'year': int(p['year'] or 0), 'venue': p['venue'] or '', 's2_id': p['paperId'], 'arxiv_id': p['arxivId'] or '', 'in_citations_count': len(p['citations']), 'out_citations_count': len(p['references']), }) meta_df = pd.DataFrame(meta_rows) #, dtype={'year': 'number'}) meta_df meta_df['year'].value_counts() meta_df.to_csv(os.path.join(cord19_dir, 'meta.csv'), index=False) json.dumps(meta_rows) #meta_rows = [] if i > 10: break #p['abstract'][0]['text'] #p['metadata']['title'] meta_rows p.keys() p.keys() p['_meta']['authors'] p['paper_id'] ```
github_jupyter
``` import sys, os import numpy as np import pandas as pd from matplotlib import pyplot as plt from scipy.stats import bayes_mvs as bayesest import time from PyEcoLib.simulator import Simulator %matplotlib inline mean_size = 1 # femto liter doubling_time = 18 #min tmax = 180 #min sample_time = 2 #min div_steps = 10 ncells = 5000 gr = np.log(2)/doubling_time if not os.path.exists('./data'): os.makedirs('./data') #data path if not os.path.exists('./figures'): os.makedirs('./figures') #Figures path start = time.time() sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = div_steps) sim.divstrat(tmax = tmax, sample_time = 0.1*doubling_time, nameDSM = "./data/dataDSMadder.csv") print('It took', np.int(time.time()-start), 'seconds.') start = time.time() sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = div_steps,lamb = 2) sim.divstrat(tmax = tmax, sample_time = 0.1*doubling_time, nameDSM = "./data/dataDSMsizer.csv") print('It took', np.int(time.time()-start), 'seconds.') start = time.time() sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = div_steps,lamb = 0.5) sim.divstrat(tmax = tmax, sample_time = 0.1*doubling_time, nameDSM = "./data/dataDSMtimer.csv") print('It took', np.int(time.time()-start), 'seconds.') start = time.time() sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = div_steps) sim.szdyn(tmax = tmax, sample_time= 0.1*doubling_time, nameCRM = "./data/dataCRM1.csv") print('It took', np.int(time.time()-start), 'seconds.') CV2sz = 0.02 v0 = mean_size*np.random.gamma(shape=1/CV2sz,scale=CV2sz,size=ncells) start = time.time() sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = div_steps,V0array=v0) sim.szdyn(tmax = tmax, sample_time= 0.1*doubling_time, nameCRM = "./data/dataCRM2.csv") print('It took', np.int(time.time()-start), 'seconds.') CV2div = 0.002 CV2gr = 0.02 start = time.time() sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = div_steps, CV2div = CV2div, CV2gr = CV2gr) sim.szdyn(tmax = tmax, sample_time= 0.1*doubling_time, nameCRM = "./data/dataCRM3.csv") print('It took', np.int(time.time()-start), 'seconds.') data1=pd.read_csv("./data/dataCRM1.csv") timearray1=data1.time.unique() mnszarray1=[] cvszarray1=[] errcv2szarray1=[] errmnszarray1=[] df=data1 del df['time'] for m in range(len(df)): szs=df.loc[m, :].values.tolist() mean_cntr, var_cntr, std_cntr = bayesest(szs,alpha=0.95) mnszarray1.append(np.mean(szs)) errmnszarray1.append(mean_cntr[1][1]-mean_cntr[0]) cvszarray1.append(np.var(szs)/np.mean(szs)**2) errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3 errcv2szarray1.append(errv) data1=pd.read_csv("./data/dataCRM2.csv") timearray2=data1.time.unique() mnszarray2=[] cvszarray2=[] errcv2szarray2=[] errmnszarray2=[] df=data1 del df['time'] for m in range(len(df)): szs=df.loc[m, :].values.tolist() mean_cntr, var_cntr, std_cntr = bayesest(szs,alpha=0.95) mnszarray2.append(np.mean(szs)) errmnszarray2.append(mean_cntr[1][1]-mean_cntr[0]) cvszarray2.append(np.var(szs)/np.mean(szs)**2) errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3 errcv2szarray2.append(errv) data1=pd.read_csv("./data/dataCRM3.csv") timearray3=data1.time.unique() mnszarray3=[] cvszarray3=[] errcv2szarray3=[] errmnszarray3=[] df=data1 del df['time'] for m in range(len(df)): szs=df.loc[m, :].values.tolist() mean_cntr, var_cntr, std_cntr = bayesest(szs,alpha=0.95) mnszarray3.append(np.mean(szs)) errmnszarray3.append(mean_cntr[1][1]-mean_cntr[0]) cvszarray3.append(np.var(szs)/np.mean(szs)**2) errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3 errcv2szarray3.append(errv) start = time.time() sim = Simulator(ncells=1, gr = gr, sb=mean_size, steps = div_steps) sim.szdynFSP(tmax = tmax, nameFSP = "./data/dataFSP0.csv") print('It took', np.int(time.time()-start), 'seconds.') start = time.time() CV2sz = 0.02 sim = Simulator(ncells=1, gr = gr, sb=mean_size, steps = div_steps) sim.szdynFSP(tmax = tmax, nameFSP = "./data/dataFSP.csv",CV2sz=CV2sz) print('It took', np.int(time.time()-start), 'seconds.') fig, ax = plt.subplots(2,3, figsize=(16,6),sharex=True) data=pd.read_csv("./data/dataCRM1.csv") tt=data.time del data['time'] mmar=data.columns for column in df.columns[0:10]: ax[0,0].plot(tt/doubling_time,data[column],c="#B9B9B9",label='_nolegend_') data=pd.read_csv("./data/dataCRM2.csv") tt=data.time del data['time'] mmar=data.columns for column in df.columns[0:10]: ax[0,1].plot(tt/doubling_time,data[column],c="#B9B9B9",label='_nolegend_') data=pd.read_csv("./data/dataCRM3.csv") tt=data.time del data['time'] mmar=data.columns for column in df.columns[0:10]: ax[0,2].plot(tt/doubling_time,data[column],c="#B9B9B9") ax[0,0].plot(np.array(timearray1)/doubling_time,mnszarray1,lw=2) ax[0,0].fill_between(np.array(timearray1)/doubling_time,np.array(mnszarray1)-np.array(errmnszarray1),np.array(mnszarray1) +np.array(errmnszarray1),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA") ax[1,0].plot(np.array(timearray1)/doubling_time,cvszarray1,lw=2) ax[1,0].fill_between(np.array(timearray1)/doubling_time,np.array(cvszarray1)-np.array(errcv2szarray1),np.array(cvszarray1) +np.array(errcv2szarray1),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0) ax[0,1].plot(np.array(timearray2)/doubling_time,mnszarray2,lw=2) ax[0,1].fill_between(np.array(timearray2)/doubling_time,np.array(mnszarray2)-np.array(errmnszarray2),np.array(mnszarray2) +np.array(errmnszarray2),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA") ax[1,1].plot(np.array(timearray2)/doubling_time,cvszarray2,lw=2) ax[1,1].fill_between(np.array(timearray2)/doubling_time,np.array(cvszarray2)-np.array(errcv2szarray2),np.array(cvszarray2) +np.array(errcv2szarray2),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0) ax[0,2].plot(np.array(timearray3)/doubling_time,mnszarray3,lw=2) ax[0,2].fill_between(np.array(timearray3)/doubling_time,np.array(mnszarray3)-np.array(errmnszarray3),np.array(mnszarray3) +np.array(errmnszarray3),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA") ax[1,2].plot(np.array(timearray3)/doubling_time,cvszarray3,lw=2) ax[1,2].fill_between(np.array(timearray3)/doubling_time,np.array(cvszarray3)-np.array(errcv2szarray3),np.array(cvszarray3) +np.array(errcv2szarray3),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0) ax[0,0].set_title("Stochastic division",fontsize=15) ax[0,1].set_title("Finite Initial Distribution",fontsize=15) ax[0,2].set_title("Noisy Splitting",fontsize=15) data=pd.read_csv("./data/dataFSP.csv") ax[0,1].plot(data.time/doubling_time,data.Meansize,ls='--',c='g',label="Numeric") ax[1,1].plot(data.time/doubling_time,data.VarSize/data.Meansize**2,ls='--',c='g') data=pd.read_csv("./data/dataFSP0.csv") ax[0,0].plot(data.time/doubling_time,data.Meansize,ls='--',c='g',label="Numeric") ax[1,0].plot(data.time/doubling_time,data.VarSize/data.Meansize**2,ls='--',c='g') ax[0,0].legend(fontsize=15) ax[0,1].legend(fontsize=15) ax[0,0].set_ylabel(r"$\langle s\rangle$ $(\mu m)$",size=15) ax[1,0].set_ylabel("$C_V^2(s)$",size=15) ax[1,0].set_xlabel(r"$t/\tau$",size=15) ax[1,1].set_xlabel(r"$t/\tau$",size=15) ax[1,2].set_xlabel(r"$t/\tau$",size=15) for l in [0,1]: for m in [0,1,2]: ax[l,m].set_xlim([0,6]) taqui=np.arange(0,7,step=1) ax[l,m].set_xticks(np.array(taqui)) ax[l,m].grid() ax[l,m].tick_params(axis='x', labelsize=12) ax[l,m].tick_params(axis='y', labelsize=12) for axis in ['bottom','left']: ax[l,m].spines[axis].set_linewidth(2) ax[l,m].tick_params(axis='both', width=2,length=6) for axis in ['top','right']: ax[l,m].spines[axis].set_linewidth(0) ax[l,m].tick_params(axis='both', width=0,length=6) taqui=np.arange(0,0.13,step=0.02) ax[1,m].set_yticks(np.array(taqui)) taqui=np.arange(0.5,3,step=.5) ax[0,m].set_yticks(np.array(taqui)) ax[1,m].set_ylim([0,0.13]) ax[0,m].set_ylim([0.5,3]) plt.subplots_adjust(hspace=0.15,wspace=0.2) #ax[1].plot(time4,np.array(allvarsz4),c='r') #ax[0].plot(time4,mean_size*np.array(allmeansz4),c='r',label="Numeric") plt.savefig('./figures/size_statistics_comp1.eps',bbox_inches='tight') plt.savefig('./figures/size_statistics_comp1.svg',bbox_inches='tight') plt.savefig('./figures/size_statistics_comp1.png',bbox_inches='tight') data2=pd.read_csv("./data/dataDSMadder.csv") data2=data2[data2.time>5*doubling_time] quantnumber=5 pvadd2=data2 CV2darr1=[] deltarr1=[] sbarr1=[] errcv2darr1=[] errdeltarr1=[] errsbarr1=[] for i in range(quantnumber): lperv0=np.percentile(pvadd2.S_b,i*100/quantnumber) hperv0=np.percentile(pvadd2.S_b,(i+1)*100/quantnumber) quanta1=pvadd2[pvadd2.S_b>lperv0] quanta2=quanta1[quanta1.S_b<hperv0] mean_cntr, var_cntr, std_cntr = bayesest((quanta2.S_d-quanta2.S_b)/np.mean(pvadd2.S_d-pvadd2.S_b),alpha=0.95) meanv0_cntr, varv0_cntr, stdv0_cntr = bayesest(quanta2.S_b/np.mean(pvadd2.S_b),alpha=0.95) CV2darr1.append(var_cntr[0]/mean_cntr[0]**2) deltarr1.append(mean_cntr[0]) sbarr1.append(meanv0_cntr[0]) errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3 errcv2darr1.append(errv) errdeltarr1.append(mean_cntr[1][1]-mean_cntr[0]) errsbarr1.append(meanv0_cntr[1][1]-meanv0_cntr[0]) data3=pd.read_csv("./data/dataDSMsizer.csv") data3=data3[data3.time>5*doubling_time] quantnumber=5 pvadd2=data3 CV2darr2=[] deltarr2=[] sbarr2=[] errcv2darr2=[] errdeltarr2=[] errsbarr2=[] for i in range(quantnumber): lperv0=np.percentile(pvadd2.S_b,i*100/quantnumber) hperv0=np.percentile(pvadd2.S_b,(i+1)*100/quantnumber) quanta1=pvadd2[pvadd2.S_b>lperv0] quanta2=quanta1[quanta1.S_b<hperv0] mean_cntr, var_cntr, std_cntr = bayesest((quanta2.S_d-quanta2.S_b)/np.mean(pvadd2.S_d-pvadd2.S_b),alpha=0.95) meanv0_cntr, varv0_cntr, stdv0_cntr = bayesest(quanta2.S_b/np.mean(pvadd2.S_b),alpha=0.95) CV2darr2.append(var_cntr[0]/mean_cntr[0]**2) deltarr2.append(mean_cntr[0]) sbarr2.append(meanv0_cntr[0]) errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3 errcv2darr2.append(errv) errdeltarr2.append(mean_cntr[1][1]-mean_cntr[0]) errsbarr2.append(meanv0_cntr[1][1]-meanv0_cntr[0]) data4=pd.read_csv("./data/dataDSMtimer.csv") data4=data4[data4.time>5*doubling_time] quantnumber=5 pvadd2=data4 CV2darr3=[] deltarr3=[] sbarr3=[] errcv2darr3=[] errdeltarr3=[] errsbarr3=[] for i in range(quantnumber): lperv0=np.percentile(pvadd2.S_b,i*100/quantnumber) hperv0=np.percentile(pvadd2.S_b,(i+1)*100/quantnumber) quanta1=pvadd2[pvadd2.S_b>lperv0] quanta2=quanta1[quanta1.S_b<hperv0] mean_cntr, var_cntr, std_cntr = bayesest((quanta2.S_d-quanta2.S_b)/np.mean(pvadd2.S_d-pvadd2.S_b),alpha=0.95) meanv0_cntr, varv0_cntr, stdv0_cntr = bayesest(quanta2.S_b/np.mean(pvadd2.S_b),alpha=0.95) CV2darr3.append(var_cntr[0]/mean_cntr[0]**2) deltarr3.append(mean_cntr[0]) sbarr3.append(meanv0_cntr[0]) errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3 errcv2darr3.append(errv) errdeltarr3.append(mean_cntr[1][1]-mean_cntr[0]) errsbarr3.append(meanv0_cntr[1][1]-meanv0_cntr[0]) print(np.mean(pvadd2.S_b)) print(np.mean(pvadd2.S_d-pvadd2.S_b)) sim = Simulator(ncells=1, gr = gr, sb=mean_size, steps = div_steps,lamb=0.5) sbar=np.linspace(0.5,1.5,100)*mean_size cv2tim=[] delttim=[] for i in sbar: Added,cv2=sim.SdStat(i) cv2tim.append(cv2) delttim.append(Added) sim = Simulator(ncells=1, gr = gr, sb=mean_size, steps = div_steps) sbar=np.linspace(0.5,1.5,100)*mean_size cv2ad=[] deltad=[] for i in sbar: Added,cv2=sim.SdStat(i) cv2ad.append(cv2) deltad.append(Added) sim = Simulator(ncells=1, gr = gr, sb=mean_size, steps = div_steps,lamb=2) sbar=np.linspace(0.5,1.5,100)*mean_size cv2sz=[] deltsz=[] for i in sbar: Added,cv2=sim.SdStat(i) cv2sz.append(cv2) deltsz.append(Added) fig, ax = plt.subplots(1,2, figsize=(12,4)) #ax[0].scatter(data2.S_b/np.mean(data2.S_b),(data2.S_d-data2.S_b)/np.mean(data2.S_b),s=2) #ax[0].scatter(data3.S_b/np.mean(data3.S_b),(data2.S_d-data3.S_b)/np.mean(data3.S_b),s=2) #ax[0].scatter(data4.S_b/np.mean(data4.S_b),(data4.S_d-data2.S_b)/np.mean(data4.S_b),s=2) ax[0].errorbar(np.array(sbarr1),np.array(deltarr1),xerr=errsbarr1,yerr=errdeltarr1, fmt='o',mec='k',capsize=5,markersize='8',elinewidth=3,c='k') ax[1].errorbar(np.array(sbarr1),CV2darr1,xerr=errsbarr1,yerr=errcv2darr1, fmt='o',mec='k',capsize=5,markersize='8',elinewidth=3,c='k') ax[0].errorbar(np.array(sbarr2),np.array(deltarr2),xerr=errsbarr2,yerr=errdeltarr2, fmt='o',mec='k',capsize=5,markersize='8',elinewidth=3,c='r') ax[1].errorbar(np.array(sbarr2),CV2darr2,xerr=errsbarr2,yerr=errcv2darr2, fmt='o',mec='k',capsize=5,markersize='8',elinewidth=3,c='r') ax[0].errorbar(np.array(sbarr3),np.array(deltarr3),xerr=errsbarr3,yerr=errdeltarr3, fmt='o',mec='k',capsize=5,markersize='8',elinewidth=3,c='g') ax[1].errorbar(np.array(sbarr3),CV2darr3,xerr=errsbarr3,yerr=errcv2darr3, fmt='o',mec='k',capsize=5,markersize='8',elinewidth=3,c='g') ax[1].set_ylim([0,0.3]) ax[0].set_xlabel("$s_b/\overline{s_b}$",size=20) ax[1].set_xlabel("$s_b/\overline{s_b}$",size=20) ax[0].set_ylabel("$\Delta/\overline{s_b}$",size=15) ax[1].set_ylabel("$C_V^2(\Delta)$",size=15) #ax[0].set_xlim([0.5,1.5]) for l in [0,1]: #ax[l].set_xlim([0.2,2]) ax[l].grid() ax[l].tick_params(axis='x', labelsize=15) ax[l].tick_params(axis='y', labelsize=15) for axis in ['bottom','left']: ax[l].spines[axis].set_linewidth(2) ax[l].tick_params(axis='both', width=2,length=6) for axis in ['top','right']: ax[l].spines[axis].set_linewidth(0) ax[l].tick_params(axis='both', width=0,length=6) ax[0].plot(np.array(sbar)/mean_size, np.array(delttim)/mean_size, lw=2,c='g',label="$\lambda=0.5$") ax[1].plot(np.array(sbar)/mean_size, cv2tim, lw=2,c='g') ax[0].plot(np.array(sbar)/mean_size, np.array(deltad)/mean_size, lw=2,c='k',label="$\lambda=1$") ax[1].plot(np.array(sbar)/mean_size, cv2ad, lw=2,c='k') ax[0].plot(np.array(sbar)/mean_size, np.array(deltsz)/mean_size, lw=2,c='r',label="$\lambda=2$") ax[1].plot(np.array(sbar)/mean_size, cv2sz, lw=2,c='r') ax[0].set_ylim(0.75,1.35) ax[1].set_ylim(0.03,0.17) ax[0].text(0.55,1.27,"$\lambda = 2$",rotation=-35,fontsize=10) ax[0].text(0.55,1.01,"$\lambda = 1$",fontsize=10) ax[0].text(0.55,0.87,"$\lambda = 0.5$",rotation=35,fontsize=10) ax[1].text(0.5,0.05,"$\lambda = 2$",rotation=15,fontsize=10) ax[1].text(0.5,0.11,"$\lambda = 1$",fontsize=10) ax[1].text(0.5,0.155,"$\lambda = 0.5$",rotation=-10,fontsize=10) #ax[0].set_ylim([0.7,1.5]) plt.savefig('./figures/div_strategy.eps',bbox_inches='tight') plt.savefig('./figures/div_strategy.svg',bbox_inches='tight') plt.savefig('./figures/div_strategy.png',bbox_inches='tight') fig, ax = plt.subplots(2,4, figsize=(16,5)) data=pd.read_csv("./data/dataCRM1.csv") tt=data.time del data['time'] for column in data.columns[0:10]: ax[0,0].plot(tt/doubling_time,data[column],c="#B9B9B9",label='_nolegend_') data=pd.read_csv("./data/dataCRM2.csv") tt=data.time del data['time'] for column in data.columns[0:10]: ax[0,1].plot(tt/doubling_time,data[column],c="#B9B9B9",label='_nolegend_') data=pd.read_csv("./data/dataCRM3.csv") tt=data.time del data['time'] for column in data.columns[0:10]: ax[0,2].plot(tt/doubling_time,data[column],c="#B9B9B9",label='_nolegend_') ax[0,0].plot(np.array(timearray1)/doubling_time,mnszarray1,lw=2) ax[0,0].fill_between(np.array(timearray1)/doubling_time,np.array(mnszarray1)-np.array(errmnszarray1),np.array(mnszarray1) +np.array(errmnszarray1),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA") ax[1,0].plot(np.array(timearray1)/doubling_time,cvszarray1,lw=2) ax[1,0].fill_between(np.array(timearray1)/doubling_time,np.array(cvszarray1)-np.array(errcv2szarray1),np.array(cvszarray1) +np.array(errcv2szarray1),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0) ax[0,1].plot(np.array(timearray2)/doubling_time,mnszarray2,lw=2) ax[0,1].fill_between(np.array(timearray2)/doubling_time,np.array(mnszarray2)-np.array(errmnszarray2),np.array(mnszarray2) +np.array(errmnszarray2),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA") ax[1,1].plot(np.array(timearray2)/doubling_time,cvszarray2,lw=2) ax[1,1].fill_between(np.array(timearray2)/doubling_time,np.array(cvszarray2)-np.array(errcv2szarray2),np.array(cvszarray2) +np.array(errcv2szarray2),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0) ax[0,2].plot(np.array(timearray3)/doubling_time,mnszarray3,lw=2) ax[0,2].fill_between(np.array(timearray3)/doubling_time,np.array(mnszarray3)-np.array(errmnszarray3),np.array(mnszarray3) +np.array(errmnszarray3),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA") ax[1,2].plot(np.array(timearray3)/doubling_time,cvszarray3,lw=2) ax[1,2].fill_between(np.array(timearray3)/doubling_time,np.array(cvszarray3)-np.array(errcv2szarray3),np.array(cvszarray3) +np.array(errcv2szarray3),alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0) #ax[0].set_ylim([1,1.7]) #ax[1].set_ylim([0,0.15]) ax[0,0].set_title("Stochastic division",fontsize=15) ax[0,1].set_title("Finite Initial Distribution",fontsize=15) ax[0,2].set_title("Noisy Splitting",fontsize=15) data=pd.read_csv("./data/dataFSP.csv") ax[0,1].plot(data.time/doubling_time,data.Meansize,ls='--',c='g',label="Numeric") ax[1,1].plot(data.time/doubling_time,data.VarSize/data.Meansize**2,ls='--',c='g') data=pd.read_csv("./data/dataFSP0.csv") ax[0,0].plot(data.time/doubling_time,data.Meansize,ls='--',c='g',label="Numeric") ax[1,0].plot(data.time/doubling_time,data.VarSize/data.Meansize**2,ls='--',c='g') ax[0,0].legend(fontsize=10) ax[0,1].legend(fontsize=10) ax[0,2].legend(fontsize=10) #ax[0,1].legend(fontsize=10) ax[0,3].errorbar(np.array(sbarr1),np.array(deltarr1),xerr=errsbarr1,yerr=errdeltarr1, fmt='o',mec='k',capsize=3,markersize='6',elinewidth=3,c='k') ax[1,3].errorbar(np.array(sbarr1),CV2darr1,xerr=errsbarr1,yerr=errcv2darr1, fmt='o',mec='k',capsize=3,markersize='6',elinewidth=3,c='k') ax[0,3].errorbar(np.array(sbarr2),np.array(deltarr2),xerr=errsbarr2,yerr=errdeltarr2, fmt='o',mec='k',capsize=3,markersize='6',elinewidth=3,c='r') ax[1,3].errorbar(np.array(sbarr2),CV2darr2,xerr=errsbarr2,yerr=errcv2darr2, fmt='o',mec='k',capsize=3,markersize='6',elinewidth=3,c='r') ax[0,3].errorbar(np.array(sbarr3),np.array(deltarr3),xerr=errsbarr3,yerr=errdeltarr3, fmt='o',mec='k',capsize=3,markersize='6',elinewidth=3,c='g') ax[1,3].errorbar(np.array(sbarr3),CV2darr3,xerr=errsbarr3,yerr=errcv2darr3, fmt='o',mec='k',capsize=3,markersize='6',elinewidth=3,c='g') ax[0,3].plot(np.array(sbar)/mean_size, np.array(delttim)/mean_size, lw=2,c='g',label="$\lambda=0.5$") ax[1,3].plot(np.array(sbar)/mean_size, cv2tim, lw=2,c='g') ax[0,3].plot(np.array(sbar)/mean_size, np.array(deltad)/mean_size, lw=2,c='k',label="$\lambda=1$") ax[1,3].plot(np.array(sbar)/mean_size, cv2ad, lw=2,c='k') ax[0,3].plot(np.array(sbar)/mean_size, np.array(deltsz)/mean_size, lw=2,c='r',label="$\lambda=2$") ax[1,3].plot(np.array(sbar)/mean_size, cv2sz, lw=2,c='r') ax[0,0].set_ylabel(r"$\langle s\rangle$ $(fl)$",size=15) ax[1,0].set_ylabel("$C_V^2(s)$",size=15) ax[1,0].set_xlabel(r"$t/\tau$",size=15) ax[1,1].set_xlabel(r"$t/\tau$",size=15) ax[1,2].set_xlabel(r"$t/\tau$",size=15) ax[1,3].set_xlabel(r"$s_b/\overline{s_b}$",size=15) #ax[0].set_ylim([1,1.7]) #ax[1].set_ylim([0,0.15]) for l in [0,1]: for m in [0,1,2,3]: ax[l,m].grid() ax[l,m].tick_params(axis='x', labelsize=12) ax[l,m].tick_params(axis='y', labelsize=12) for axis in ['bottom','left']: ax[l,m].spines[axis].set_linewidth(2) ax[l,m].tick_params(axis='both', width=2,length=6) for axis in ['top','right']: ax[l,m].spines[axis].set_linewidth(0) ax[l,m].tick_params(axis='both', width=0,length=6) if m !=3: ax[l,m].set_xlim([0,6]) taqui=np.arange(0,7,step=1) ax[l,m].set_xticks(np.array(taqui)) taqui=np.arange(0,0.13,step=0.02) ax[1,m].set_yticks(np.array(taqui)) taqui=np.arange(0.5,3.5,step=0.5) ax[0,m].set_yticks(np.array(taqui)) ax[1,m].set_ylim([0,0.13]) ax[0,m].set_ylim([0.5,2.9]) plt.subplots_adjust(hspace=0.3,wspace=0.35) if not os.path.exists('./figures'): os.makedirs('./figures') ax[0,0].set_title("Stochastic division",fontsize=15) ax[0,1].set_title("Finite Initial Distribution",fontsize=15) ax[0,2].set_title("Noisy Splitting",fontsize=15) ax[0,3].set_title("Division Strategy",fontsize=15) #ax[0,3].legend(fontsize = 10) ax[0,3].set_ylim(0.75,1.35) ax[1,3].set_ylim(0.03,0.17) ax[0,3].text(0.5,1.31,"$\lambda = 2$",rotation=-35,fontsize=10) ax[0,3].text(0.5,1.01,"$\lambda = 1$",fontsize=10) ax[0,3].text(0.5,0.9,"$\lambda = 0.5$",rotation=35,fontsize=10) ax[1,3].text(0.5,0.055,"$\lambda = 2$",rotation=12,fontsize=10) ax[1,3].text(0.5,0.11,"$\lambda = 1$",fontsize=10) ax[1,3].text(0.5,0.16,"$\lambda = 0.5$",rotation=-10,fontsize=10) ax[0,3].set_ylabel(r"$\Delta/\overline{s_b}$",size=15) ax[1,3].set_ylabel(r"$C_v^2(\Delta)$",size=15) #ax[0].legend(fontsize=15) #ax[1].plot(time4,np.array(allvarsz4),c='r') #ax[0].plot(time4,mean_size*np.array(allmeansz4),c='r',label="Numeric") ax[0,0].text(-1,3,"a)",fontsize=15) ax[0,1].text(-1,3.,"b)",fontsize=15) ax[0,2].text(-1,3.,"c)",fontsize=15) ax[1,0].text(-1,0.13,"e)",fontsize=15) ax[1,1].text(-1,0.13,"f)",fontsize=15) ax[1,2].text(-1,0.13,"g)",fontsize=15) ax[0,3].text(0.25,1.35,"d)",fontsize=15) ax[1,3].text(0.25,0.17,"h)",fontsize=15) plt.savefig('./figures/size_statistics_comparison.svg',bbox_inches='tight') plt.savefig('./figures/size_statistics_comparison.png',bbox_inches='tight') plt.savefig('./figures/size_statistics_comparison.eps',bbox_inches='tight') data=pd.read_csv("./data/dataCRM1.csv") taumax=50 tauarr1=range(taumax) tarr=data.time.tolist() corarr1=[] for tau in tauarr1: xx=[] yy=[] for i in range(len(tarr)-tau): df=data[data.time==tarr[i]] del df['time'] df2=data[data.time==tarr[i+tau]] del df2['time'] #print(df.iloc[0].tolist()) A=df.iloc[0].tolist() B=df2.iloc[0].tolist() for m in range(len(A)): xx.append(A[m]) for m in range(len(A)): yy.append(B[m]) #xx.append() #yy.append(df2.tolist()) corarr1.append(np.corrcoef(xx,yy)[0][1]) #print() data=pd.read_csv("./data/dataCRM2.csv") taumax=50 tauarr2=range(taumax) tarr=data.time.tolist() corarr2=[] for tau in tauarr2: xx=[] yy=[] for i in range(len(tarr)-tau): df=data[data.time==tarr[i]] del df['time'] df2=data[data.time==tarr[i+tau]] del df2['time'] #print(df.iloc[0].tolist()) A=df.iloc[0].tolist() B=df2.iloc[0].tolist() for m in range(len(A)): xx.append(A[m]) for m in range(len(A)): yy.append(B[m]) corarr2.append(np.corrcoef(xx,yy)[0][1]) start = time.time() CV2sz = 0.02 v0 = mean_size*np.random.gamma(shape=1/CV2sz,scale=CV2sz,size=ncells) sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = 10,V0array=v0) sim.szdyn(tmax = tmax, sample_time= 0.1*doubling_time, nameCRM = "./data/dataCRM10stp.csv") print('It took', np.int(time.time()-start), 'seconds.') data=pd.read_csv("./data/dataCRM10stp.csv") taumax=50 tauarr10=range(taumax) tarr=data.time.tolist() corarr10=[] for tau in tauarr10: xx=[] yy=[] for i in range(len(tarr)-tau): df=data[data.time==tarr[i]] del df['time'] df2=data[data.time==tarr[i+tau]] del df2['time'] #print(df.iloc[0].tolist()) A=df.iloc[0].tolist() B=df2.iloc[0].tolist() for m in range(len(A)): xx.append(A[m]) for m in range(len(A)): yy.append(B[m]) #xx.append() #yy.append(df2.tolist()) corarr10.append(np.corrcoef(xx,yy)[0][1]) start = time.time() CV2sz = 0.02 v0 = mean_size*np.random.gamma(shape=1/CV2sz,scale=CV2sz,size=ncells) sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = 50,V0array=v0) sim.szdyn(tmax = tmax, sample_time= 0.1*doubling_time, nameCRM = "./data/dataCRM50stp.csv") print('It took', np.int(time.time()-start), 'seconds.') data=pd.read_csv("./data/dataCRM50stp.csv") taumax=50 tauarr50=range(taumax) tarr=data.time.tolist() corarr50=[] for tau in tauarr50: xx=[] yy=[] for i in range(len(tarr)-tau): df=data[data.time==tarr[i]] del df['time'] df2=data[data.time==tarr[i+tau]] del df2['time'] #print(df.iloc[0].tolist()) A=df.iloc[0].tolist() B=df2.iloc[0].tolist() for m in range(len(A)): xx.append(A[m]) for m in range(len(A)): yy.append(B[m]) #xx.append() #yy.append(df2.tolist()) corarr50.append(np.corrcoef(xx,yy)[0][1]) start = time.time() CV2sz = 0.02 v0 = mean_size*np.random.gamma(shape=1/CV2sz,scale=CV2sz,size=ncells) sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = 1,V0array=v0) sim.szdyn(tmax = tmax, sample_time= 0.1*doubling_time, nameCRM = "./data/dataCRM1stp.csv") print('It took', np.int(time.time()-start), 'seconds.') data=pd.read_csv("./data/dataCRM1stp.csv") taumax=50 tauarr1stp=range(taumax) tarr=data.time.tolist() corarr1stp=[] for tau in tauarr1stp: xx=[] yy=[] for i in range(len(tarr)-tau): df=data[data.time==tarr[i]] del df['time'] df2=data[data.time==tarr[i+tau]] del df2['time'] #print(df.iloc[0].tolist()) A=df.iloc[0].tolist() B=df2.iloc[0].tolist() for m in range(len(A)): xx.append(A[m]) for m in range(len(A)): yy.append(B[m]) #xx.append() #yy.append(df2.tolist()) corarr1stp.append(np.corrcoef(xx,yy)[0][1]) start = time.time() CV2sz = 0.02 v0 = mean_size*np.random.gamma(shape=1/CV2sz,scale=CV2sz,size=ncells) sim = Simulator(ncells=ncells, gr = gr, sb=mean_size, steps = 50,V0array=v0,CV2div=0.005,CV2gr=0.02) sim.szdyn(tmax = tmax, sample_time= 0.1*doubling_time, nameCRM = "./data/dataCRM50stpns.csv") print('It took', np.int(time.time()-start), 'seconds.') data=pd.read_csv("./data/dataCRM50stpns.csv") taumax=50 tauarr50ns=range(taumax) tarr=data.time.tolist() corarr50ns=[] for tau in tauarr50ns: xx=[] yy=[] for i in range(len(tarr)-tau): df=data[data.time==tarr[i]] del df['time'] df2=data[data.time==tarr[i+tau]] del df2['time'] #print(df.iloc[0].tolist()) A=df.iloc[0].tolist() B=df2.iloc[0].tolist() for m in range(len(A)): xx.append(A[m]) for m in range(len(A)): yy.append(B[m]) #xx.append() #yy.append(df2.tolist()) corarr50ns.append(np.corrcoef(xx,yy)[0][1]) fig, ax = plt.subplots(1,1, figsize=(6,4)) plt.plot(1.8*np.array(tauarr1stp)/18,corarr1stp,lw=3,ls="--",label="1 steps") #plt.plot(1.8*np.array(tauarr2)/18,corarr2,lw=3,ls=":",label="Finite initial variance") plt.plot(1.8*np.array(tauarr10)/18,corarr10,lw=3,label="10 steps",ls=":") plt.plot(1.8*np.array(tauarr50)/18,corarr50,lw=3,label="50 steps",ls="-.") plt.plot(1.8*np.array(tauarr50ns)/18,corarr50ns,lw=3,label="50 steps + Noise") plt.grid() ax.set_ylabel(r"$\rho(t')$",fontsize=15) ax.set_xlabel(r"$t'/\tau$",fontsize=15) ax.tick_params(axis='x', labelsize=15) ax.tick_params(axis='y', labelsize=15) for axis in ['bottom','left']: ax.spines[axis].set_linewidth(2) ax.tick_params(axis='both', width=2,length=6) for axis in ['top','right']: ax.spines[axis].set_linewidth(0) ax.tick_params(axis='both', width=0,length=6) plt.legend(fontsize=15) #x=np.linspace(0,5,30) #plt.plot(x,np.exp(-x*np.log(2))) plt.savefig('./figures/size_autocorrelation.svg',bbox_inches='tight') plt.savefig('./figures/size_autocorrelation.png',bbox_inches='tight') plt.savefig('./figures/size_autocorrelation.eps',bbox_inches='tight') ```
github_jupyter
``` import _init_paths from IPython.display import Image ``` # Step 1: Browse the Ocean Marketplace Using the Graphical User Interface To work with Ocean Protocol you need to set up a digital wallet. First, create a metamask account. There are many guides for doing this available online (e.g. [here](https://docs.oceanprotocol.com/tutorials/metamask-setup/)). Switch from the Ethereum Mainnet to the Rinkeby Test Network from your metamask wallet. Rinkeby is where you can test with no real transaction fees. Instead we use Rinkeby tokens that can be requested from faucets. Ocean tokens are needed to purchase datasets on the Ocean Marketplace. You can request Rinkeby Ocean from the Ocean faucet [here](https://faucet.rinkeby.oceanprotocol.com/). Making transactions on Ocean Marketplace (e.g. purchasing a dataset) also costs gas in ETH. We can request Rinkeby ETH from the faucet [here](https://faucet.rinkeby.io/) (you will need to make a tweet). Now that we have some Rinkeby Ocean and ETH in our wallet, we can browse and purchase datasets on the [Ocean Marketplace](https://market.oceanprotocol.com/). When you enter a Web3 app in the browser you may need to sign in with your wallet. Make sure that you are browsing datasets on the Rinkeby network (see image below). ``` display(Image(filename='images/marketplace-network.png', width = 400)) ``` Check out the available datasets by Algovera [here](https://market.oceanprotocol.com/search?sort=created&sortOrder=desc&text=0x2338e4e94AEe1817701F65f2c751f7c844b0e43b). For this tutorial, we will work with the CryptoPunks image dataset. While the images for these NFTs are freely available online, we have uploaded it as a private dataset to practice the workflow. In future, we hope that the use of private datasets with generative art models opens up new use cases, such as collaborations between artists who don't want to lose control of their datasets and models. You can see the dataset on the Ocean marketplace [here](https://market.oceanprotocol.com/asset/did:op:C9D0568838fa670baEe7195Ea443b32EfCAc2281). In the traditional data science workflow, a data scientist downloads a dataset locally before running their code on it. In this scenario, the data comes to the code running on it. In contrast, private datasets on the marketplace cannot be downloaded. Instead, a data scientist can send code to the data itself where it runs the computations before returning the results. This is called Compute-to-Data (C2D), which is similar to Federated Learning. On the Ocean Marketplace, data providers should provide a sample of the data to give an idea of the quality of the data as well as the data interface through which it can be accessed. Download the sample data for CryptoPunks through the Marketplace GUI and inspect it (always make sure to only download samples from data providers that you trust!). ``` display(Image(filename='images/download-sample.png', width = 400)) ``` # Step 2: Browse the Ocean Marketplace Using the Ocean Python Library Now lets do the same through the Ocean Python library. We have installed the library for you in the JupyterHub instance. If you need to do this yourself in future, it's simple (you can view the readme [here](https://github.com/oceanprotocol/ocean.py)). We need to connect to the Ethereum network via an Ethereum node. We have set the config parameters for you in a config file. We are currently using [Infura](https://infura.io) for this but will be migrating to a full Ethereum Erigon node asap for increased decentralization. ``` from ocean_lib.ocean.ocean import Ocean from ocean_lib.config import Config config = Config('config.ini') ocean = Ocean(config) print(f"config.network_url = '{config.network_url}'") print(f"config.metadata_cache_uri = '{config.metadata_cache_uri}'") print(f"config.provider_url = '{config.provider_url}'") ``` Next, export your private key from your metamask wallet. We highly recommend doing this with a wallet that has no real tokens in it (only Rinkeby tokens). For more info on private keys, see [this](https://github.com/oceanprotocol/ocean.py/blob/main/READMEs/wallets.md) from the ocean.py documentation: *The whole point of crypto wallets is to store private keys. Wallets have various tradeoffs of cost, convienence, and security. For example, hardware wallets tend to be more secure but less convenient and not free. It can also be useful to store private keys locally on your machine, for testing, though only with a small amount of value at stake (keep the risk down). Do not store your private keys on anything public, unless you want your tokens to disappear. For example, don't store your private keys in GitHub or expose them on frontend webpage code.* With this in mind, you can directly load your private key into the notebook. We use an envvar rather than storing it in code that might be pushed to a repo. We copy this in for a new session (you may need to restart the notebook server). Here's how we export an environmental variable using an example key (replace this with your actual private key.). From your console: ```console export MY_TEST_KEY=0xaefd8bc8725c4b3d15fbe058d0f58f4d852e8caea2bf68e0f73acb1aeec19baa ``` Now initialize your wallet: ``` import os from ocean_lib.web3_internal.wallet import Wallet wallet = Wallet(ocean.web3, private_key=os.getenv('MY_TEST_KEY'), transaction_timeout=20, block_confirmations=config.block_confirmations) print(f"public address = '{wallet.address}'") ``` This should print out the public key of your metamask wallet. Check that it matches the one displayed in your metamask. Let's check the balances in our wallet. These should match the amount you received from the faucets (minus any you've since spent). ``` from ocean_lib.web3_internal.currency import from_wei # wei is the smallest denomination of ether e.g. like cents from ocean_lib.models.btoken import BToken #BToken is ERC20 OCEAN_token = BToken(ocean.web3, ocean.OCEAN_address) print(f"ETH balance = '{from_wei(ocean.web3.eth.get_balance(wallet.address))}'") print(f"OCEAN balance = '{from_wei(OCEAN_token.balanceOf(wallet.address))}'") ``` Now let's download a dataset. For the CryptoPunks Image dataset [here](https://market.oceanprotocol.com/asset/did:op:C9D0568838fa670baEe7195Ea443b32EfCAc2281), copy the decentralized identifier (DID). ``` display(Image(filename='images/did.png', width = 400)) did = "did:op:C9D0568838fa670baEe7195Ea443b32EfCAc2281" asset = ocean.assets.resolve(did) print(f"Data token info = '{asset.values['dataTokenInfo']}'") print(f"Dataset name = '{asset.metadata['main']['name']}'") ``` We can get the URL to the sample data from the associated metadata: ``` from pathlib import Path sample_link = asset.metadata['additionalInformation']['links'][0]['url'] ID = Path(sample_link).parts[4] print(f"Sample link = '{sample_link}'") ``` Download the data from the command line (TO DO: fix+streamline this. For now, the sample is downloaded manually in the JupyterHub instance): ``` download_dir = Path('data') dataset_name = "punks-sample" download_path = str(download_dir / (dataset_name + '.tgz')) if not download_dir.exists(): download_dir.mkdir(parents=True) !gdown --id {ID} -O {download_path } ``` Unzip the downloaded file with: ``` !tar -xvzf {download_path} -C {str(download_dir)} ``` Now lets inspect the sample data. The data provider should provide this in the same format as the whole dataset. This helps us as data scientists to write scripts that run on both the sample data and the whole dataset. We call this the **interface** of the data. ``` sample_dir = download_dir / dataset_name print(f"Sub-directories: {sorted(list(sample_dir.glob('*')))}") ``` We have punks with clear backgrounds and punks with teal backgrounds. ``` clear_dir, teal_dir = sorted(list(sample_dir.glob('*'))) print(f"Images with clear backgrounds: {sorted(list(clear_dir.glob('*')))}") print(f"Images with clear backgrounds: {sorted(list(teal_dir.glob('*')))}") clear_images = sorted(list(clear_dir.glob('*'))) teal_images = sorted(list(teal_dir.glob('*'))) import matplotlib.pyplot as plt import matplotlib.image as mpimg img0 = mpimg.imread(clear_images[0]) img1 = mpimg.imread(clear_images[1]) fig, ax = plt.subplots(1,2) ax[0].imshow(img0) ax[1].imshow(img1) [a.axis('off') for a in ax] plt.show() img0 = mpimg.imread(teal_images[0]) img1 = mpimg.imread(teal_images[1]) fig, ax = plt.subplots(1,2) ax[0].imshow(img0) ax[1].imshow(img1) [a.axis('off') for a in ax] plt.show() ``` The next step is to write some code to convert the raw data into a format that runs with StyleGAN2. We could write this on the sample data before sending it to run on the full dataset.
github_jupyter
``` from pathlib import Path from datetime import date , datetime import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings from scipy.optimize import curve_fit from sqlalchemy import create_engine, text warnings.filterwarnings("ignore") #Creando la conexión con las bases de datos postgres local Nota: postgres deja en minuscula los encabezados, por eso toca pasarlos a mayuscula de nuevo aunque esto ya se hiciera en el archivo de lectura #engine=create_engine(f'postgresql://postgres:postgres@localhost:5432/HACKATON', max_overflow=20) #table_comp = pd.read_sql_table('datosoficiales',engine) #table_comp.columns = table_comp.columns.str.upper() #Si quiere probar los resultados con postgres, descomente las tres lineas anteriores y deje como comentario la siguiente table_comp = pd.read_csv('Datos/Tablas resumen/Tabla general datos oficiales.csv') table_comp ``` # 1 ``` table2020=table_comp[table_comp['YYYY']==2020].reset_index(drop=True) #Agrupo por campo campo2020 = pd.pivot_table(table2020, values='ACEITE', index=['CAMPO'], aggfunc=np.sum).sort_values(by='ACEITE', ascending=False, na_position='first') #Al agrupar también ordené de mayor a menor, por tanto el top 5 es igual a las primeras 5 filas top2020campos=campo2020[0:5] campo2020 top2020campos plot = top2020campos.plot(kind='bar', title='Top 5 de campos de mayor producción de petróleo en 2020') ``` # 2 ``` table2018=table_comp[table_comp['YYYY']==2018].reset_index(drop=True) campo2018cas= table2018[table2018['DEPARTAMENTO']=='CASANARE'].drop('DEPARTAMENTO',axis=1).reset_index() #Se divide en 12 porque en formato largo aparecera 12 veces cada conteo, uno por cada mes campo2018cascount=(campo2018cas.groupby('OPERADORA')['CAMPO'].count()/12).to_frame() campo2018cascount=campo2018cascount[campo2018cascount['CAMPO']>=5].sort_values(['CAMPO'],ascending=False) campo2018cascount plot = campo2018cascount.plot(kind='bar', title='Operadoras que resportaron producción en más de 5 campos de Casanare en 2018') ``` # 3 ``` contrato2018 = pd.pivot_table(table2018, values='ACEITE', index=['CONTRATO'], aggfunc=np.sum).sort_values(by='ACEITE', ascending=False, na_position='first') #Divide en 1millon para dejar las unidades en MMstb top2018contratos=contrato2018[0:5]/1000000 top2018contratos plot = top2018contratos.plot(kind='bar', title='Top 5 de contratos de mayor producción de petróleo en 2018') ``` # 4 ``` table2019=table_comp[table_comp['YYYY']==2019].reset_index(drop=True) operadora2019 = pd.pivot_table(table2019, values='ACEITE', index=['OPERADORA','MES'], aggfunc=np.sum).sort_values(by='ACEITE', ascending=False, na_position='first').reset_index(level='MES') operadora2019ago=operadora2019[operadora2019['MES']=='AGOSTO'].sort_values(by='ACEITE', ascending=False, na_position='first').drop('MES',axis=1) topoperadora2019ago=operadora2019ago[0:10] operadora2019 topoperadora2019ago plot = topoperadora2019ago.plot(kind='bar', title='Top 10 de operadoras con mayor producción de petróleo en agosto de 2019') ``` # 5 ``` #Lista para realizar el filtro T1=['enero','febrero','marzo'] T2=['abril','mayo','junio'] #Agrupo por mes meses2019 = pd.pivot_table(table2019, values='ACEITE', index=['MES'], aggfunc=np.sum).sort_values(by='ACEITE', ascending=False, na_position='first').reset_index(level='MES') #Aplico formato al texto para realizar el filtro correctamente meses2019['MES']=meses2019['MES'].apply(lambda x: x.lower()) #Aplico los filtros de los trimestres y guardo los resultados T12019=meses2019[meses2019['MES'].isin(T1)].reset_index(drop=True) T22019=meses2019[meses2019['MES'].isin(T2)].reset_index(drop=True) #Agrupo por mes meses2020 = pd.pivot_table(table2020, values='ACEITE', index=['MES'], aggfunc=np.sum).sort_values(by='ACEITE', ascending=False, na_position='first').reset_index(level='MES') #Aplico formato al texto para realizar el filtro correctamente meses2020['MES']=meses2020['MES'].apply(lambda x: x.lower()) #Aplico los filtros de los trimestres y guardo los resultados T12020=meses2020[meses2020['MES'].isin(T1)].reset_index(drop=True) T22020=meses2020[meses2020['MES'].isin(T2)].reset_index(drop=True) #Genero un data frame para guardas los resultados de los filtros y agrupaciones trim1920 = pd.DataFrame({"Año": [2019,2020], "Trimestre 1": [T12019['ACEITE'].sum(),T12020['ACEITE'].sum()], "Trimestre 2": [T22019['ACEITE'].sum(),T22020['ACEITE'].sum()]}) #Subo a indice la columna año trim1920.set_index("Año") coloresMedallas = ['#FFD700','#C0C0C0'] trim1920.plot(kind = 'bar', width=0.8, figsize=(10,4), color = coloresMedallas); ```
github_jupyter
## AUC in CatBoost [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/catboost/catboost/tree/master/catboost/tutorial/metrics/AUC_tutorial.ipynb) The tutorial is dedicated to The Area Under the Receiver Operating Characteristic (ROC) Curve (AUC) and it's implementations in CatBoost for binary classification, multiclass classification and ranking problems. ### Library installation ``` !pip install --upgrade catboost !pip install --upgrade ipywidgets !pip install shap !pip install --upgrade sklearn # !pip install --upgrade numpy !jupyter nbextension enable --py widgetsnbextension import catboost print(catboost.__version__) !python --version import pandas as pd import os import numpy as np np.set_printoptions(precision=4) import catboost import sklearn import matplotlib.pyplot as plt from catboost import datasets, Pool from catboost.utils import get_roc_curve, eval_metric from sklearn import metrics from sklearn.model_selection import train_test_split ``` ### About AUC The **ROC curve** shows the *model's ability to distinguishing between classes*. The model which randomly assigns a class to object is a 'bad' classifier and has a diagonal ROC curve. The better is the classifier, the higher is the ROC curve. The ROC curve is plotted with TPR, [True Positive Rate](https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Basic_concept), on the y-axis against the FPR, [False Positive Rate](https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Basic_concept), on the x-axis. The curve also could be interpreted in terms of [Sensitivity and Specificity](https://en.wikipedia.org/wiki/Sensitivity_and_specificity) of the model with Sensitivity on the y-axis and (1-Specificity) on the x-axis. Building and visualizing the ROC curve could be used *to measure classification algorithm performance with different probability boundaries* and *select the probability boundary* required to achieve the specified false-positive or false-negative rate. **AUC** is the Area Under the ROC Curve. The best AUC = 1 for a model that ranks all the objects right (all objects with class 1 are assigned higher probabilities then objects of class 0). AUC for the 'bad' classifier is equal to 0.5. AUC is used for binary classification, multiclass classification, and ranking problems. *AUC measures the proportion of correctly ordered objects and the capability of the model to distinguish between the classes*. The AUC has an important statistical property: the *AUC of a classifier is equivalent to the probability that the classifier will rank a randomly chosen positive instance higher than a randomly chosen negative instance*. ### CatBoost implementation CatBoost implements AUC for [binary classification](https://catboost.ai/docs/concepts/loss-functions-classification.html), [multiclassification](https://catboost.ai/docs/concepts/loss-functions-multiclassification.html) and [ranking](https://catboost.ai/docs/concepts/loss-functions-ranking.html) problems. AUC is used during training to detect overfitting and select the best model. AUC is also used as a metric for predictions evaluation and comparison with [utils.eval_metric](https://https://catboost.ai/docs/concepts/python-reference_utils_eval_metric.html). See examples of models fitting and AUC calculation with CatBoost in the section [How to use AUC in Catboost on real datasets?](#scrollTo=BP1nOBpvFzPV) ### Useful links * Read more about ROC on [Wikipedia](https://en.wikipedia.org/wiki/Receiver_operating_characteristic). * To get an understanding of the ROC curve building from scratch see [the article](https://people.inf.elte.hu/kiss/11dwhdm/roc.pdf). * To get an understanding of AUC$\mu$ for multiclass classification see [the article](http://proceedings.mlr.press/v97/kleiman19a/kleiman19a.pdf). # Binary classification ## Calculation rules for binary classification AUC for binary classification is calculated according to the following formula: $$ AUC = \frac{\sum_{t_i = 0, t_j = 1} I(a_i, a_j) w_i w_j}{\sum_{t_i = 0, t_j = 1}{w_i w_j}}, $$ where $a_i, a_j$ - predictions (probabilities) of objects to belong to the class 1 given by the algorithm. The sum is calculated on all pairs of objects $i, j$ such that: $t_i = 0, t_j = 1$ where $t$ is true true class label. $I$ is an indicator function equal to 1 if objects $i, j$ are ordered correctly: $$ I(a_i, a_j) = \begin{cases} 1 & \quad \text{if } a_i < a_j\\ 0.5 & \quad \text{if } a_i = a_j\\ 0 & \quad \text{if } a_i > a_j\\ \end{cases} $$ A user could assign weights to each object to specify the importance of each object in metric calculation. ## Simple example of AUC calculation for binary classification Let's look at a simple example of ROC curve building and AUC calculation: We have a sample of 10 objects, 4 from class 1 and 6 from class 0 ('class' column). All objects are equally important (weights = 1). Assume we predicted probabilities of objects to come from class 1 ('prediction' column). ``` toy_example = pd.DataFrame({'class': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0], 'prediction': [0.9, 0.4, 0.6, 0.2, 0.8, 0.25, 0.15, 0.4, 0.3, 0.1]}) ``` To build the ROC curve we need to sort the dataset in descending order of predicted probabilities. ``` toy_example.sort_values(by='prediction', ascending=False, inplace=True) toy_example ``` The denominator $\sum_{t_i = 0, t_j = 1}{w_i w_j}$ is equal to the number of pairs of objects $i, j$ such that true class $t_i = 0, t_j = 1$. As there are 4 objects from class 1 and 6 objects from class 0, the denominator is equal to 24. For each object from class 1 count the number of objects from class 0, which are below (has less probability) in the sorted dataset. We add +1 of objects ordered correctly (e.g. objects with IDs 0 and 4) and +0.5 if probabilities are equal (e.g. objects with IDs 1 and 7). For example, the object with ID = 1 adds +4.5 to AUC numerator as correctly ordered with objects with IDs: 8, 3, 6, 9 and has equal prediction with object 7: ![Binary AUC calculation visualization](https://habrastorage.org/webt/9m/hp/y5/9mhpy5d28dzxyvaeh_tby9ls2yo.jpeg) ``` denominator = sum(toy_example['class'] == 1) * sum(toy_example['class'] == 0) numerator = (6 + 5 + 4.5 + 2) manually_calculated_auc = numerator / denominator print('Example of manually calculated ROC AUC = {0:.4f}'.format(manually_calculated_auc)) ``` Let's calculate the same metric with Catboost and ensure everything is right: ``` catboost_auc = eval_metric(toy_example['class'], toy_example['prediction'], 'AUC')[0] print('Example of ROC AUC calculated with CatBoost = {0:.4f}'.format(catboost_auc)) ``` ## ROC curve To build a ROC curve put the number of objects of class 1 ($n_1$) in the square with side 1 along the $ y $-axis, and the number of objects of class 0 ($n_0$) along the $ x $-axis. Going by the list of target labels sorted by model predictions in descending order will add $\frac{1}{n_1}$ along the $y$-axis encountering an object of class 1 and $\frac{1}{n_0}$ along the $x$-axis encountering an object of class 0. Let's get FPR, TPR with sklearn and visualize the curve. ``` toy_example ``` Going from top to bottom by the 'class' column we go up on the plot if the class is 1 or right if the class is 0. When we meet objects with equal prediction add the number of 1 along y and the number of 0 along x and go by the diagonal of the rectangle. ``` fpr, tpr, _ = sklearn.metrics.roc_curve(toy_example['class'], toy_example['prediction']) plt.figure() lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve') plt.fill_between(fpr, tpr, y2=0, color='darkorange', alpha=0.3, label='Area Under Curve (area = %0.2f)' % catboost_auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic curve example') plt.legend(loc="lower right") plt.show() ``` Let's have a look at the toy_example objects and understand how each object impacted the ROC curve: ![Binary AUC plot explained](https://habrastorage.org/webt/zc/jr/vx/zcjrvx5bn5darmm8soa7km5du3a.jpeg) ## AUC: Calculation with weights CatBoost allows us to assign a weight to each object in the dataset for AUC calculation according to [the formula above](#scrollTo=4RbdSB4EMBZs). If no weights assigned, all weights are equal to 1 and thus AUC is proportional to the number of correctly ordered pairs. If weights assigned this property is changed. Weights are useful for unbalanced datasets. If there is a class or a group of objects with a small number of samples in the train dataset it could be reasonable to increase the weights for corresponding objects. For example if we assign weight 10 to one object it can be understood as adding 9 of the same objects to the dataset. Let's calculate AUC for an example of the wrong and right classification of one important object with weight = 10. ``` toy_example['weight'] = 1 roc_auc = catboost.utils.eval_metric(toy_example['class'], toy_example['prediction'], 'AUC', weight=toy_example['weight'])[0] print('ROC AUC with default weights = {0:.4f}'.format(roc_auc)) # seting weight=10 to important object with correct ordering toy_example.iloc[0, 2] = 10 toy_example roc_auc = catboost.utils.eval_metric(toy_example['class'], toy_example['prediction'], 'AUC', weight=toy_example['weight'])[0] print('AUC: = {0:.4f}'.format(roc_auc)) print('Important object is correctly ordered, AUC increases') # set weight=10 to important object with wrong ordering toy_example['weight'] = 1 toy_example.iloc[1, 2] = 10 toy_example roc_auc = catboost.utils.eval_metric(toy_example['class'], toy_example['prediction'], 'AUC', weight=toy_example['weight'])[0] print('AUC: = {0:.4f}'.format(roc_auc)) print('Important object is incorrectly ordered, AUC decreases') ``` # AUC for multiclass classification There are two AUC metrics implemented for multiclass classification in Catboost. The first is **OneVsAll**. AUC value is calculated separately for each class according to the binary classification calculation principles. The second is **AUC$\mu$**, which is designed to meet the following properties: * Property 1. If a model gives the correct label the highest probability on every example, then AUC = 1 * Property 2. Random guessing on examples yields AUC = 0.5 * Property 3. AUC is insensitive to class skew AUC$\mu$ could be interpreted as an average of pairwise AUCs between the classes. Details could be found in the [article](http://proceedings.mlr.press/v97/kleiman19a/kleiman19a.pdf). ### OneVsAll AUC OneVsAll AUC in Catboost returns $n$ AUC values for $n$-class classification. The value is calculated separately for each class $k$ numbered from $0$ to $K–1$ according [to the binary classification calculation rules](#scrollTo=4RbdSB4EMBZs). The objects of class $k$ are considered positive, while all others are considered negative. Let's create a small example and calculate OneVsAll AUC for 3 classes. ``` classes = [0, 1, 1, 0, 1, 1, 1, 0, 1, 2] probas =\ np.array([[0.4799, 0.3517, 0.3182, 0.3625, 0.336 , 0.3034, 0.4284, 0.5497, 0.231 , 0.27 ], [0.2601, 0.3052, 0.3637, 0.3742, 0.3808, 0.3995, 0.3038, 0.2258, 0.264 , 0.4581], [0.2601, 0.3431, 0.3182, 0.2633, 0.2832, 0.2971, 0.2678, 0.2245,0.506 , 0.271 ]]) ``` Let's calculate ROC AUC for each class using binary classification rules: ``` one_vs_all_df_example = pd.DataFrame({'class_0': classes, 'class_1': classes, 'class_2': classes, 'probability_of_0': probas[0, :], 'probability_of_1': probas[1, :], 'probability_of_2': probas[2, :]}) one_vs_all_df_example.loc[one_vs_all_df_example['class_0'] != 0, 'class_0'] = -1 one_vs_all_df_example.loc[one_vs_all_df_example['class_0'] == 0, 'class_0'] = 1 one_vs_all_df_example.loc[one_vs_all_df_example['class_0'] == -1, 'class_0'] = 0 one_vs_all_df_example.loc[one_vs_all_df_example['class_1'] != 1, 'class_1'] = 0 one_vs_all_df_example.loc[one_vs_all_df_example['class_1'] == 1, 'class_1'] = 1 one_vs_all_df_example.loc[one_vs_all_df_example['class_2'] != 2, 'class_2'] = 0 one_vs_all_df_example.loc[one_vs_all_df_example['class_2'] == 2, 'class_2'] = 1 one_vs_all_df_example roc_aucs = [] for i in range(3): roc_aucs.append(catboost.utils.eval_metric(one_vs_all_df_example['class_' + str(i)], one_vs_all_df_example['probability_of_' + str(i)], 'AUC')[0]) print('Binary Classification ROC AUC for class {0} = {1:.4f}'.format(i, roc_aucs[-1])) ``` Let's calculate the same metric with OneVsAll Catboost AUC and ensure everything is right: ``` aucs = eval_metric(classes, probas, 'AUC:type=OneVsAll') for i in range(len(aucs)): print('OneVsAll ROC AUC for class {0} = {1:.4f}'.format(i, aucs[i])) ``` #### Visualizing ROC curves ``` plt.figure() lw = 2 colors = ['r', 'y', 'c'] for i in range(3): fpr, tpr, _ = sklearn.metrics.roc_curve(one_vs_all_df_example['class_' + str(i)], one_vs_all_df_example['probability_of_' + str(i)]) plt.plot(fpr, tpr, color=colors[i], lw=lw, label='ROC curve for class {0}, area = {1:.4f}'.format(i, roc_aucs[i]), alpha=0.5) plt.fill_between(fpr, tpr, y2=0, color=colors[i], alpha=0.1) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic curve example') plt.legend(loc="lower right") plt.show() ``` #### OneVsAll AUC with weights ``` # setting a big weight to incorrectly classified object of class 1 (misclassified as 2) AUCs = eval_metric(classes, probas, weight=[1, 1, 1, 1, 1, 1, 1, 1, 10, 1], metric='AUC:type=OneVsAll') print('CalBoost OneVsAll AUC with weights:') print('AUC:class=0 = {0:.4f}, AUC:class=1 = {1:.4f}, AUC:class=2 = {2:.4f}'.format(AUCs[0], AUCs[1], AUCs[2])) # setting a big weight to correctly classified object of class 0 AUCs = eval_metric(classes, probas, weight=[10, 1, 1, 1, 1, 1, 1, 1, 1, 1], metric='AUC:type=OneVsAll') print('CalBoost OneVsAll AUC with weights:') print('AUC:class=0 = {0:.4f}, AUC:class=1 = {1:.4f}, AUC:class=2 = {2:.4f}'.format(AUCs[0], AUCs[1], AUCs[2])) ``` Weights of the objects would affect AUC values for all classes as all objects of the other classes are used in the calculation as objects of class 0. OneVsAll AUC measures the ability of the classifier to distinguish objects of one class from another. An advantage of OneVsAll is the ability to monitor performance on different classes separately. ## AUC$\mu$ AUC$\mu$ could be simply used as an evaluation metric to prevent overfitting, while OneVsAll AUC could not as it contains a metric value for each class and AUCs for different classes may vary inconsistently during training. Let's have a look at the AUC$\mu$ formula: $$ AUC\mu = \frac{2}{K(K - 1)}\sum_{i<j} S(i, j) $$ $K$ is a number of classes, $i < j ≤ K$ are the classes' numbers. $S(i, j)$ is a separability measure between classes $i$ and $j$ defined as: $$ S(i, j) = \frac{1}{n_i n_j}\sum_{a \in D_i, b \in D_j} \hat{I} \circ O(y^{(a)}, y^{(b)}, \hat{p}^{(a)}, \hat{p}^{(b)}, v_{i, j}), $$ where * $\hat I$ is indicator function applied to results of $O$, which returns 1 if $O$ returns a positive value, 0 if $O$ returns a negative value, and 0.5 if $O$ returns 0. * $O$ is an orientation function indicating if the two instances are ordered correctly, incorrectly, or tied. $O$ returns a positive value if the predictions are ranked correctly, a negative value if they are ranked incorrectly, and 0 if their rank is tied. * $a$, $b$ are objects from classes $i$, $j$ respectively, * $\hat{p}^{(a)}$, $\hat{p}^{(b)}$ are vectors of predicted probabilities of the object to belong to the classes, * $y^{(a)}$, $y^{(b)}$ are true labels of objects $a$, $b$ (one-hot vectors), * $v_{i, j}$ is a two-class decision hyperplane normal vector. $O$ is caclulates as following: $O(y^{(a)}, y^{(b)}, \hat{p}^{(a)}, \hat{p}^{(b)}, v_{i, j}) = v_{i, j} (y^{(a)} - y^{(b)}) v_{i, j} (\hat{p}^{(a)} - \hat{p}^{(b)})$ Vectors $v_{i, j}$ come from misclassification cost matrix $A$, which is defined manually following the learning task. $v_{i,j} = A_{i,.} - A_{j,.}$ $A_{i,j}$ is the cost of classifying an instance as class $i$ when its true class is $j$. Then $A$ defines a partition on the $(K − 1)$−simplex and induces decision boundaries between the $K$ classes. Default misclassification cost matrix values are 1 everywhere, except the diagonal where they are 0. It is called the argmax partition matrix as it assigns a class with maximal probability. Here is a partition for a 3-class classification problem with the argmax partition matrix A. For a user-defined partition matrix the boundaries (marked by arrows) are shifted. ![argmax partition matrix visualisation](https://habrastorage.org/webt/h_/9y/jh/h_9yjhimj9yidj3y1nritttbhsc.jpeg) Follow [the article](http://proceedings.mlr.press/v97/kleiman19a/kleiman19a.pdf) for more details. CatBoost allows us to set the misclassification cost matrix $A$ and objects' weights to calculate $AUC\mu$. Let's calculate $AUC\mu$ for a very small example with 3 classes and 4 objects: ``` classes = np.array([2, 1, 0, 2]) y = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, 1]]) probas = np.array([[0.3, 0.5, 0.2], [0.4, 0.5, 0.1], [0.4, 0.15, 0.45], [0.05, 0.5, 0.45]]) K = 3 A = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]) def get_I(value): if value > 0: return 1 elif value == 0: return 0.5 return 0 ``` Let's compute $S(i, j)$ for each ordered classes' pair: ``` # i = 0 (objects: y_2), j = 1 (objects: y_1) v_01 = A[0, :] - A[1, :] s_01 = get_I(v_01.dot(y[2] - y[1]) * v_01.dot(probas[2] - probas[1])) # i = 0 (objects: y_2), j = 2 (objects: y_0, y_3) v_02 = A[0, :] - A[2, :] s_02 = 1 / (1 * 2) * (get_I(v_02.dot(y[2] - y[0]) * v_02.dot(probas[2] - probas[0])) + \ get_I(v_02.dot(y[2] - y[3]) * v_02.dot(probas[2] - probas[3]))) # i = 1 (objects: y_1), j = 2 (objects: y_0, y_3) v_12 = A[1, :] - A[2, :] s_12 = 1 / (1 * 2) * (get_I(v_12.dot(y[1] - y[0]) * v_02.dot(probas[1] - probas[0])) + \ get_I(v_12.dot(y[1] - y[3]) * v_02.dot(probas[1] - probas[3]))) AUC_mu = 2 / (K * (K - 1)) * (s_01 + s_02 + s_12) print('AUC_mu = {:.4f}'.format(AUC_mu)) ``` Let's calculate the same metric with Catboost $AUC\mu$ and ensure everything is right: ``` print('Catboost AUC_mu = {:.4f}'.format(eval_metric(classes, probas.T, 'AUC:type=Mu')[0])) ``` Let's calculate OneVsAll AUC for the same data: ``` AUCs = eval_metric(classes, probas.T, metric='AUC:type=OneVsAll') print('AUC:class=0 = {0:.4f}, AUC:class=1 = {1:.4f}, AUC:class=2 = {2:.4f}'.format(AUCs[0], AUCs[1], AUCs[2])) ``` As we can see OneVsAll AUC and $AUC\mu$ are pretty high, but actually algorithm performance is bad, accuracy is 0.25. Thus it is reasonable to evaluate other important metrics during training. ``` print('Accuracy = {:.4f}'.format(eval_metric(classes, probas.T, metric='Accuracy')[0])) ``` #### $AUC\mu$ with misclassification cost matrix and weights ``` AUC = eval_metric(classes, probas.T, weight=[1, 10, 1, 1], metric='AUC:type=Mu;misclass_cost_matrix=0/0.5/2/1/0/1/0/0.5/0') print('Catboost AUC_mu with weights = {:.4f}'.format(AUC[0])) ``` $AUC\mu$ is a good generalizing metric to estimate algorithm ability to separate classes in multiclassification problems. See more about different approaches to multiclass AUC calculation and their properties in section 4.3 of [the article](http://proceedings.mlr.press/v97/kleiman19a/kleiman19a.pdf). AUC$\mu$ could be interpreted as an average of pairwise AUCs between the classes. AUC$\mu$ is a single value unlike OneVsAll AUC and thus it could be used as an evaluation metric during training for overfitting detection and trees pruning. # AUC for ranking There are two variants of AUC for Ranking in CatBoost. ### The Classic AUC is used for models with Logloss and CrossEntropy loss functions. It has the same formula as AUC for binary classification: $$ \frac{\sum I(a_i, a_j) w_i w_j}{\sum{w_i w_j}} $$ $a_i, a_j$ - predicted rank of objects i, j. The sum is calculated on all pairs of objects $i, j$ such that: $t_i = 0, t_j = 1$ where $t$ is relevance. $$ I(a_i, a_j) = \begin{cases} 1 & \quad \text{if } a_i < a_j\\ 0.5 & \quad \text{if } a_i = a_j\\ 0 & \quad \text{if } a_i > a_j\\ \end{cases} $$ The formula above suits for simple tasks with target values equal to 1 and 0. What if we have more target values? If the target type is not binary, then every object $i$ with target value $t$ and weight $w$ is replaced with two objects for the metric calculation: * $\sigma_1$ with weight $wt$ and target value 1 * $\sigma_2$ with weight $w(1 - t)$ and target value 0. Target values must be in the range [0; 1]. Let's calculate Classic AUC for Ranking for a small example. Assume we have 2 documents, 2 requests, corresponding relevances in the range [1, 5] and predicted relevance in the range [0, 1]: ``` rank_df = pd.DataFrame({'request_id': [1, 1, 2, 2], 'doc_id': [1, 2, 1, 2], 'relevance': [2, 5, 4, 1], 'pred':[0.3, 0.7, 0.2, 0.6], 'weight': [1, 1, 1, 1]}) #map relevence to [0, 1] rank_df.loc[:, 'relevance'] /= 5 rank_df.sort_values(by='pred', axis=0, inplace=True) rank_df ``` Let's create objects $\sigma$ required for calculation and update weights: ``` rank_df_sigma = pd.concat([ rank_df,rank_df ],ignore_index=True) rank_df_sigma['new_relevance'] = 0 rank_df_sigma.loc[:3, 'new_relevance'] = 1 rank_df_sigma['new_weight'] = rank_df_sigma['weight'] rank_df_sigma.loc[rank_df_sigma['new_relevance'] == 1, 'new_weight'] = rank_df_sigma['relevance'] * rank_df_sigma['weight'] rank_df_sigma.loc[rank_df_sigma['new_relevance'] == 0, 'new_weight'] = (1 - rank_df_sigma['relevance']) * rank_df_sigma['weight'] rank_df_sigma.sort_values(by='pred', axis=0, ascending=False, inplace=True) rank_df_sigma ``` Let's calculate AUC: ``` # finding sum of weights for denominator weights1 = rank_df_sigma.loc[rank_df_sigma['new_relevance'] == 1, 'new_weight'].to_numpy() weights0 = rank_df_sigma.loc[rank_df_sigma['new_relevance'] == 0, 'new_weight'].to_numpy() ``` Calculation details for object #3: ![classical ranking AUC calculation](https://habrastorage.org/webt/gq/in/iy/gqiniyqqhdgpd3fqckbdlwgvk4w.jpeg) ``` denominator = np.sum(weights1.reshape(-1, 1)@weights0.reshape(1, -1)) # for each object of class 1 find objects of class 0 below and add composition of weights and indicatior function to the numerator numerator = 1 * (0.8 + 0.6 + 0.2) + 0.2 * (0.8 * 0.5 + 0.6 + 0.2) + 0.4 * (0.6 * 0.5 + 0.2) + 0.8 * 0.2 * 0.5 manually_calculated_auc = numerator / denominator print('Example of manually calculated Classic ROC AUC for ranking = {0:.4f}'.format(manually_calculated_auc)) print('CatBoost Classic ROC AUC for ranking = {0:.4f}'.format(eval_metric(rank_df['relevance'], rank_df['pred'], 'AUC:type=Classic')[0])) ``` ## Ranking AUC Ranking AUC is used for ranking loss functions. $$ \frac{\sum I(a_i, a_j) w_i w_j}{\sum{w_i w_j}} $$ $a_i, a_j$ - predicted rank of objects i, j. The sum is calculated on all pairs of objects $i, j$ such that: $t_i < t_j$ where $t$ is relevance. $$ I(a_i, a_j) = \begin{cases} 1 & \quad \text{if } a_i < a_j\\ 0.5 & \quad \text{if } a_i = a_j\\ 0 & \quad \text{if } a_i > a_j\\ \end{cases} $$ Let's compute Ranking AUC for the small example above: ``` rank_df.sort_values(by='relevance', axis=0, inplace=True, ascending=False) rank_df ``` Calculation details: ![ranking AUC calculation details](https://habrastorage.org/webt/rk/8w/ka/rk8wkacwlc9sw3tqq2bgitqti-m.jpeg) ``` n = rank_df['relevance'].unique().shape[0] denominator = (n * (n-1)) / 2 #number of ordered pairs # for object #1 three object below are less relevant by real relevance and predictions, the other are incorrectly ranked numerator = 3 manually_calculated_auc = numerator / denominator print('Example of manually calculated Ranking ROC AUC = {0:.4f}'.format(manually_calculated_auc)) print('CatBoost Ranking ROC AUC = {0:.4f}'.format(eval_metric(rank_df['relevance'], rank_df['pred'], 'AUC:type=Ranking', group_id=rank_df['request_id'])[0])) ``` Ranking AUC is directly measuring the quantity of correctly ranked pairs. The problem is the metric is calculated regardless of groups. In our example, we have groups (requests) and we may want to measure quality inside the groups, but currently, AUC is calculated over the dataset and AUC penalizes us if, for example, top-one document for one request is ranked lower than top-one for another document while true ranks are opposite (but still top-one for each document). One more problem of AUC for Ranking is that it does not distinguish the top object from the others. If we have a lot of irrelevant documents, which are ranked irrelevant by the model, but bad ranking in top-10, AUC could still be high because of that "tail" of irrelevant objects, ranked lower than relevant. #Overview AUC is a widely used metric to measure the ability of a model to distinguish classes and correctly order objects from different classes / with different relevance. AUC is not differentiable and thus cannot be used as a loss function, but it is pretty informative and useful as a metric. We have examined the following AUC types: * AUC for binary classification. It is a classical metric, which measures the model quality. ROC curve allows selecting the probability threshold to meet required False Positive or False Negative Rates. * AUC for multiclass classification: OneVsAll and $AUC\mu$. OneVsAll AUC helps to control algorithm performance for each class, but can not be used as a metric to prevent overfitting. On the contrary, $AUC\mu$ is good as a generalizing metric, but will not detect a problem with one class. * AUC for ranking: Classic AUC and Ranking AUC. AUC suits for ranking tasks because it is designed to measure how the algorithm distinguishes between classes and it is simply transferring to distinguishing between relevant and irrelevant objects. But AUC does not take into account that in ranking problems the first $n$ positions are very important and treats each correctly ordered pair of documents equally. # How to use AUC in Catboost on real datasets? AUC implemented in CatBoost allows using weights to consider object and classes' importance in metric calculation. You can use AUC in Catboost during training as a validation metric to prevent overfitting and select the best model and as a metric to compare different models' results. Let's consider an example of using AUC for a multiclassification task. ### AUC for multiclass classification #### Loading dataset We will use the [Wine recognition dataset](https://scikit-learn.org/stable/datasets/index.html#wine-dataset ) for multiclass classification problem ``` !wget https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data columns = ['Class', 'Alcohol', 'Malic_acid', 'Ash', 'Alcalinity_of_ash', 'Magnesium', 'Total_phenols', 'Flavanoids', 'Nonflavanoid_phenols', 'Proanthocyanins', 'Color_intensity', 'Hue', 'OD280OD315_of_diluted_wines', 'Proline'] train_df = pd.read_csv('wine.data', names=columns) train_df.head(5) ``` #### Creating pool ``` y = train_df['Class'] X = train_df.drop(columns='Class') y.unique() from sklearn.model_selection import train_test_split X_train, X_validation, y_train, y_validation = train_test_split(X, y, train_size=0.5, random_state=42) train_pool = Pool(X_train, y_train) validation_pool = Pool(X_validation, y_validation) ``` #### Training model ``` from catboost import CatBoostClassifier model = CatBoostClassifier( iterations=20, eval_metric='AUC', custom_metric=['AUC:type=OneVsAll;hints=skip_train~false', 'Accuracy'], # metric_period=10, loss_function='MultiClass', train_dir='model_dir', random_seed=42) model.fit( train_pool, eval_set=validation_pool, verbose=True, ); ``` Model is shrinked to the first best $AUC\mu$ value (17 iteration) #### Calculating metrics ``` AUCs = model.eval_metrics(validation_pool, metrics=['AUC:type=Mu', 'AUC:type=OneVsAll']) print('AUC:type=OneVsAll:') print('AUC:class=0 = {0:.4f}, AUC:class=1 = {1:.4f}, AUC:class=2 = {2:.4f}'.format(AUCs['AUC:class=0'][-1], AUCs['AUC:class=1'][-1], AUCs['AUC:class=2'][-1])) print('\nAUC:type=Mu:') print('AUC:type=Mu = {0:.5f}'.format(AUCs['AUC:type=Mu'][-1])) ``` Using weigths and misclassification cost matrix: ``` weights = np.random.rand(X_validation.shape[0]) weights[10] = 100 validation_pool = Pool(X_validation, y_validation, weight=weights) AUC = model.eval_metrics(validation_pool, metrics=['AUC:type=Mu', 'AUC:type=Mu;use_weights=True', 'AUC:type=Mu;misclass_cost_matrix=0/0.5/2/1/0/1/0/0.5/0;use_weights=True']) print('AUC:type=Mu = {0:.5f}'.format(AUC['AUC:type=Mu'][-1])) print('AUC:type=Mu;use_weights=True = {0:.5f}'.format(AUC['AUC:use_weights=true;type=Mu'][-1])) print('AUC:use_weights=true;type=Mu;misclass_cost_matrix = {0:.5f}'.format(AUC['AUC:use_weights=true;type=Mu;misclass_cost_matrix=0/0.5/2/1/0/1/0/0.5/0'][-1])) ```
github_jupyter
<a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_08_1_kaggle_intro.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # T81-558: Applications of Deep Neural Networks **Module 8: Kaggle Data Sets** * Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx) * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). # Module 8 Material * **Part 8.1: Introduction to Kaggle** [[Video]](https://www.youtube.com/watch?v=v4lJBhdCuCU&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_1_kaggle_intro.ipynb) * Part 8.2: Building Ensembles with Scikit-Learn and Keras [[Video]](https://www.youtube.com/watch?v=LQ-9ZRBLasw&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_2_keras_ensembles.ipynb) * Part 8.3: How Should you Architect Your Keras Neural Network: Hyperparameters [[Video]](https://www.youtube.com/watch?v=1q9klwSoUQw&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_3_keras_hyperparameters.ipynb) * Part 8.4: Bayesian Hyperparameter Optimization for Keras [[Video]](https://www.youtube.com/watch?v=sXdxyUCCm8s&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_4_bayesian_hyperparameter_opt.ipynb) * Part 8.5: Current Semester's Kaggle [[Video]](https://www.youtube.com/watch?v=PHQt0aUasRg&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_5_kaggle_project.ipynb) # Part 8.1: Introduction to Kaggle [Kaggle](http://www.kaggle.com) runs competitions in which data scientists compete in order to provide the best model to fit the data. A common project to get started with Kaggle is the [Titanic data set](https://www.kaggle.com/c/titanic-gettingStarted). Most Kaggle competitions end on a specific date. Website organizers have currently scheduled the Titanic competition to end on December 31, 20xx (with the year usually rolling forward). However, they have already extended the deadline several times, and an extension beyond 2014 is also possible. Second, the Titanic data set is considered a tutorial data set. In other words, there is no prize, and your score in the competition does not count towards becoming a Kaggle Master. ### Kaggle Ranks Kaggle ranks are achieved by earning gold, silver and bronze medals. * [Kaggle Top Users](https://www.kaggle.com/rankings) * [Current Top Kaggle User's Profile Page](https://www.kaggle.com/stasg7) * [Jeff Heaton's (your instructor) Kaggle Profile](https://www.kaggle.com/jeffheaton) * [Current Kaggle Ranking System](https://www.kaggle.com/progression) ### Typical Kaggle Competition A typical Kaggle competition will have several components. Consider the Titanic tutorial: * [Competition Summary Page](https://www.kaggle.com/c/titanic) * [Data Page](https://www.kaggle.com/c/titanic/data) * [Evaluation Description Page](https://www.kaggle.com/c/titanic/details/evaluation) * [Leaderboard](https://www.kaggle.com/c/titanic/leaderboard) ### How Kaggle Competitions are Scored Kaggle is provided with a data set by the competition sponsor, as seen in Figure 8.SCORE. This data set is divided up as follows: * **Complete Data Set** - This is the complete data set. * **Training Data Set** - You are provided both the inputs and the outcomes for the training portion of the data set. * **Test Data Set** - You are provided the complete test data set; however, you are not given the outcomes. Your submission is your predicted outcomes for this data set. * **Public Leaderboard** - You are not told what part of the test data set contributes to the public leaderboard. Your public score is calculated based on this part of the data set. * **Private Leaderboard** - You are not told what part of the test data set contributes to the public leaderboard. Your final score/rank is calculated based on this part. You do not see your private leaderboard score until the end. **Figure 8.SCORE: How Kaggle Competitions are Scored** ![How Kaggle Competitions are Scored](https://raw.githubusercontent.com/jeffheaton/t81_558_deep_learning/master/images/class_3_kaggle.png "How Kaggle Competitions are Scored") ### Preparing a Kaggle Submission Code need not be submitted to Kaggle. For competitions, you are scored entirely on the accuracy of your sbmission file. A Kaggle submission file is always a CSV file that contains the **Id** of the row you are predicting and the answer. For the titanic competition, a submission file looks something like this: ``` PassengerId,Survived 892,0 893,1 894,1 895,0 896,0 897,1 ... ``` The above file states the prediction for each of various passengers. You should only predict on ID's that are in the test file. Likewise, you should render a prediction for every row in the test file. Some competitions will have different formats for their answers. For example, a multi-classification will usually have a column for each class and your predictions for each class. # Select Kaggle Competitions There have been many interesting competitions on Kaggle, these are some of my favorites. ## Predictive Modeling * [Otto Group Product Classification Challenge](https://www.kaggle.com/c/otto-group-product-classification-challenge) * [Galaxy Zoo - The Galaxy Challenge](https://www.kaggle.com/c/galaxy-zoo-the-galaxy-challenge) * [Practice Fusion Diabetes Classification](https://www.kaggle.com/c/pf2012-diabetes) * [Predicting a Biological Response](https://www.kaggle.com/c/bioresponse) ## Computer Vision * [Diabetic Retinopathy Detection](https://www.kaggle.com/c/diabetic-retinopathy-detection) * [Cats vs Dogs](https://www.kaggle.com/c/dogs-vs-cats) * [State Farm Distracted Driver Detection](https://www.kaggle.com/c/state-farm-distracted-driver-detection) ## Time Series * [The Marinexplore and Cornell University Whale Detection Challenge](https://www.kaggle.com/c/whale-detection-challenge) ## Other * [Helping Santa's Helpers](https://www.kaggle.com/c/helping-santas-helpers) # Module 8 Assignment You can find the first assignment here: [assignment 8](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class8.ipynb)
github_jupyter
## Dependencies ``` import json from tweet_utility_scripts import * from transformers import TFDistilBertModel, DistilBertConfig from tokenizers import BertWordPieceTokenizer from tensorflow.keras.models import Model from tensorflow.keras import optimizers, metrics, losses from tensorflow.keras.callbacks import EarlyStopping, TensorBoard from tensorflow.keras.layers import Dense, Input, Dropout, GlobalAveragePooling1D, GlobalMaxPooling1D, Concatenate SEED = 0 seed_everything(SEED) warnings.filterwarnings("ignore") ``` # Load data ``` database_base_path = '/kaggle/input/tweet-dataset-split-distilbert-uncased-128/' hold_out = pd.read_csv(database_base_path + 'hold-out.csv') train = hold_out[hold_out['set'] == 'train'] validation = hold_out[hold_out['set'] == 'validation'] display(hold_out.head()) # Unzip files !tar -xvf /kaggle/input/tweet-dataset-split-distilbert-uncased-128/hold_out.tar.gz base_data_path = 'hold_out/' x_train = np.load(base_data_path + 'x_train.npy') y_train = np.load(base_data_path + 'y_train.npy') x_valid = np.load(base_data_path + 'x_valid.npy') y_valid = np.load(base_data_path + 'y_valid.npy') y_train = y_train.astype('float') y_valid = y_valid.astype('float') # Delete data dir shutil.rmtree(base_data_path) ``` # Model parameters ``` tokenizer_path = database_base_path + 'vocab.txt' base_path = '/kaggle/input/qa-transformers/distilbert/' model_path = 'model.h5' config = { "MAX_LEN": 128, "BATCH_SIZE": 64, "EPOCHS": 20, "LEARNING_RATE": 1e-5, "ES_PATIENCE": 3, "question_size": 3, "smooth_factor": .25, "base_model_path": base_path + 'distilbert-base-uncased-distilled-squad-tf_model.h5', "config_path": base_path + 'distilbert-base-uncased-distilled-squad-config.json' } with open('config.json', 'w') as json_file: json.dump(json.loads(json.dumps(config)), json_file) dist = [pow(config['smooth_factor'], i) for i in range(1, config['MAX_LEN'])] def smooth_labels_dist(y, dist=dist): center = y.argmax() y[center+1:] = dist[:len(y)-center-1] y[:center] = dist[::-1][-center:] return y np.apply_along_axis(smooth_labels_dist, -1, y_train) np.apply_along_axis(smooth_labels_dist, -1, y_valid) ``` # Model ``` module_config = DistilBertConfig.from_pretrained(config['config_path'], output_hidden_states=False) def model_fn(MAX_LEN): input_ids = Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids') attention_mask = Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask') token_type_ids = Input(shape=(MAX_LEN,), dtype=tf.int32, name='token_type_ids') base_model = TFDistilBertModel.from_pretrained(config['base_model_path'], config=module_config, name="base_model") sequence_output = base_model({'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids}) last_state = sequence_output[0] x = GlobalAveragePooling1D()(last_state) y_start = Dense(MAX_LEN, activation='sigmoid', name='y_start')(x) y_end = Dense(MAX_LEN, activation='sigmoid', name='y_end')(x) model = Model(inputs=[input_ids, attention_mask, token_type_ids], outputs=[y_start, y_end]) model.compile(optimizers.Adam(lr=config['LEARNING_RATE']), loss=losses.BinaryCrossentropy(), metrics=[metrics.BinaryAccuracy()]) return model model = model_fn(config['MAX_LEN']) model.summary() ``` # Train ``` tb_callback = TensorBoard(log_dir='./') es = EarlyStopping(monitor='val_loss', mode='min', patience=config['ES_PATIENCE'], restore_best_weights=True, verbose=1) history = model.fit(list(x_train), list(y_train), validation_data=(list(x_valid), list(y_valid)), callbacks=[es, tb_callback], epochs=config['EPOCHS'], batch_size=config['BATCH_SIZE'], verbose=2).history model.save_weights(model_path) # Compress logs dir !tar -cvzf train.tar.gz train !tar -cvzf validation.tar.gz validation # Delete logs dir if os.path.exists('/kaggle/working/train/'): shutil.rmtree('/kaggle/working/train/') if os.path.exists('/kaggle/working/validation/'): shutil.rmtree('/kaggle/working/validation/') ``` # Model loss graph ``` sns.set(style="whitegrid") plot_metrics(history, metric_list=['loss', 'y_start_loss', 'y_end_loss', 'y_start_binary_accuracy', 'y_start_binary_accuracy']) ``` # Tokenizer ``` tokenizer = BertWordPieceTokenizer(tokenizer_path , lowercase=True) tokenizer.save('./') ``` # Model evaluation ``` train_preds = model.predict(list(x_train)) valid_preds = model.predict(list(x_valid)) train['start'] = train_preds[0].argmax(axis=-1) train['end'] = train_preds[1].argmax(axis=-1) train["end"].clip(0, train["text_len"], inplace=True) train["start"].clip(0, train["end"], inplace=True) train['prediction'] = train.apply(lambda x: decode(x['start'], x['end'], x['text'], config['question_size'], tokenizer), axis=1) train["prediction"].fillna('', inplace=True) validation['start'] = valid_preds[0].argmax(axis=-1) validation['end'] = valid_preds[1].argmax(axis=-1) validation["end"].clip(0, validation["text_len"], inplace=True) validation["start"].clip(0, validation["end"], inplace=True) validation['prediction'] = validation.apply(lambda x: decode(x['start'], x['end'], x['text'], config['question_size'], tokenizer), axis=1) validation["prediction"].fillna('', inplace=True) display(evaluate_model(train, validation)) ``` # Visualize predictions ``` print('Train set') display(train.head(10)) print('Validation set') display(validation.head(10)) ```
github_jupyter
# Creating Keras DNN model **Learning Objectives** 1. Create input layers for raw features 1. Create feature columns for inputs 1. Create DNN dense hidden layers and output layer 1. Build DNN model tying all of the pieces together 1. Train and evaluate ## Introduction In this notebook, we'll be using Keras to create a DNN model to predict the weight of a baby before it is born. We'll start by defining the CSV column names, label column, and column defaults for our data inputs. Then, we'll construct a tf.data Dataset of features and the label from the CSV files and create inputs layers for the raw features. Next, we'll set up feature columns for the model inputs and build a deep neural network in Keras. We'll create a custom evaluation metric and build our DNN model. Finally, we'll train and evaluate our model. Each learning objective will correspond to a __#TODO__ in the [student lab notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/tree/master/courses/machine_learning/deepdive2/end_to_end_ml/labs/keras_dnn_babyweight.ipynb) -- try to complete that notebook first before reviewing this solution notebook. ## Set up environment variables and load necessary libraries ``` !sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst !pip install --user google-cloud-bigquery==1.25.0 ``` **Note**: Restart your kernel to use updated packages. Kindly ignore the deprecation warnings and incompatibility errors related to google-cloud-storage. Import necessary libraries. ``` from google.cloud import bigquery import pandas as pd import datetime import os import shutil import matplotlib.pyplot as plt import tensorflow as tf print(tf.__version__) ``` Set environment variables so that we can use them throughout the notebook. ``` %%bash export PROJECT=$(gcloud config list project --format "value(core.project)") echo "Your current GCP Project Name is: "$PROJECT PROJECT = "cloud-training-demos" # Replace with your PROJECT ``` ## Create ML datasets by sampling using BigQuery We'll begin by sampling the BigQuery data to create smaller datasets. Let's create a BigQuery client that we'll use throughout the lab. ``` bq = bigquery.Client(project = PROJECT) ``` We need to figure out the right way to divide our hash values to get our desired splits. To do that we need to define some values to hash within the module. Feel free to play around with these values to get the perfect combination. ``` modulo_divisor = 100 train_percent = 80.0 eval_percent = 10.0 train_buckets = int(modulo_divisor * train_percent / 100.0) eval_buckets = int(modulo_divisor * eval_percent / 100.0) ``` We can make a series of queries to check if our bucketing values result in the correct sizes of each of our dataset splits and then adjust accordingly. Therefore, to make our code more compact and reusable, let's define a function to return the head of a dataframe produced from our queries up to a certain number of rows. ``` def display_dataframe_head_from_query(query, count=10): """Displays count rows from dataframe head from query. Args: query: str, query to be run on BigQuery, results stored in dataframe. count: int, number of results from head of dataframe to display. Returns: Dataframe head with count number of results. """ df = bq.query( query + " LIMIT {limit}".format( limit=count)).to_dataframe() return df.head(count) ``` For our first query, we're going to use the original query above to get our label, features, and columns to combine into our hash which we will use to perform our repeatable splitting. There are only a limited number of years, months, days, and states in the dataset. Let's see what the hash values are. We will need to include all of these extra columns to hash on to get a fairly uniform spread of the data. Feel free to try less or more in the hash and see how it changes your results. ``` # Get label, features, and columns to hash and split into buckets hash_cols_fixed_query = """ SELECT weight_pounds, is_male, mother_age, plurality, gestation_weeks, year, month, CASE WHEN day IS NULL THEN CASE WHEN wday IS NULL THEN 0 ELSE wday END ELSE day END AS date, IFNULL(state, "Unknown") AS state, IFNULL(mother_birth_state, "Unknown") AS mother_birth_state FROM publicdata.samples.natality WHERE year > 2000 AND weight_pounds > 0 AND mother_age > 0 AND plurality > 0 AND gestation_weeks > 0 """ display_dataframe_head_from_query(hash_cols_fixed_query) ``` Using `COALESCE` would provide the same result as the nested `CASE WHEN`. This is preferable when all we want is the first non-null instance. To be precise the `CASE WHEN` would become `COALESCE(wday, day, 0) AS date`. You can read more about it [here](https://cloud.google.com/bigquery/docs/reference/standard-sql/conditional_expressions). Next query will combine our hash columns and will leave us just with our label, features, and our hash values. ``` data_query = """ SELECT weight_pounds, is_male, mother_age, plurality, gestation_weeks, FARM_FINGERPRINT( CONCAT( CAST(year AS STRING), CAST(month AS STRING), CAST(date AS STRING), CAST(state AS STRING), CAST(mother_birth_state AS STRING) ) ) AS hash_values FROM ({CTE_hash_cols_fixed}) """.format(CTE_hash_cols_fixed=hash_cols_fixed_query) display_dataframe_head_from_query(data_query) ``` The next query is going to find the counts of each of the unique 657484 `hash_values`. This will be our first step at making actual hash buckets for our split via the `GROUP BY`. ``` # Get the counts of each of the unique hash of our splitting column first_bucketing_query = """ SELECT hash_values, COUNT(*) AS num_records FROM ({CTE_data}) GROUP BY hash_values """.format(CTE_data=data_query) display_dataframe_head_from_query(first_bucketing_query) ``` The query below performs a second layer of bucketing where now for each of these bucket indices we count the number of records. ``` # Get the number of records in each of the hash buckets second_bucketing_query = """ SELECT ABS(MOD(hash_values, {modulo_divisor})) AS bucket_index, SUM(num_records) AS num_records FROM ({CTE_first_bucketing}) GROUP BY ABS(MOD(hash_values, {modulo_divisor})) """.format( CTE_first_bucketing=first_bucketing_query, modulo_divisor=modulo_divisor) display_dataframe_head_from_query(second_bucketing_query) ``` The number of records is hard for us to easily understand the split, so we will normalize the count into percentage of the data in each of the hash buckets in the next query. ``` # Calculate the overall percentages percentages_query = """ SELECT bucket_index, num_records, CAST(num_records AS FLOAT64) / ( SELECT SUM(num_records) FROM ({CTE_second_bucketing})) AS percent_records FROM ({CTE_second_bucketing}) """.format(CTE_second_bucketing=second_bucketing_query) display_dataframe_head_from_query(percentages_query) ``` We'll now select the range of buckets to be used in training. ``` # Choose hash buckets for training and pull in their statistics train_query = """ SELECT *, "train" AS dataset_name FROM ({CTE_percentages}) WHERE bucket_index >= 0 AND bucket_index < {train_buckets} """.format( CTE_percentages=percentages_query, train_buckets=train_buckets) display_dataframe_head_from_query(train_query) ``` We'll do the same by selecting the range of buckets to be used evaluation. ``` # Choose hash buckets for validation and pull in their statistics eval_query = """ SELECT *, "eval" AS dataset_name FROM ({CTE_percentages}) WHERE bucket_index >= {train_buckets} AND bucket_index < {cum_eval_buckets} """.format( CTE_percentages=percentages_query, train_buckets=train_buckets, cum_eval_buckets=train_buckets + eval_buckets) display_dataframe_head_from_query(eval_query) ``` Lastly, we'll select the hash buckets to be used for the test split. ``` # Choose hash buckets for testing and pull in their statistics test_query = """ SELECT *, "test" AS dataset_name FROM ({CTE_percentages}) WHERE bucket_index >= {cum_eval_buckets} AND bucket_index < {modulo_divisor} """.format( CTE_percentages=percentages_query, cum_eval_buckets=train_buckets + eval_buckets, modulo_divisor=modulo_divisor) display_dataframe_head_from_query(test_query) ``` In the below query, we'll `UNION ALL` all of the datasets together so that all three sets of hash buckets will be within one table. We added `dataset_id` so that we can sort on it in the query after. ``` # Union the training, validation, and testing dataset statistics union_query = """ SELECT 0 AS dataset_id, * FROM ({CTE_train}) UNION ALL SELECT 1 AS dataset_id, * FROM ({CTE_eval}) UNION ALL SELECT 2 AS dataset_id, * FROM ({CTE_test}) """.format(CTE_train=train_query, CTE_eval=eval_query, CTE_test=test_query) display_dataframe_head_from_query(union_query) ``` Lastly, we'll show the final split between train, eval, and test sets. We can see both the number of records and percent of the total data. It is really close to that we were hoping to get. ``` # Show final splitting and associated statistics split_query = """ SELECT dataset_id, dataset_name, SUM(num_records) AS num_records, SUM(percent_records) AS percent_records FROM ({CTE_union}) GROUP BY dataset_id, dataset_name ORDER BY dataset_id """.format(CTE_union=union_query) display_dataframe_head_from_query(split_query) ``` Now that we know that our splitting values produce a good global splitting on our data, here's a way to get a well-distributed portion of the data in such a way that the train, eval, test sets do not overlap and takes a subsample of our global splits. ``` # every_n allows us to subsample from each of the hash values # This helps us get approximately the record counts we want every_n = 1000 splitting_string = "ABS(MOD(hash_values, {0} * {1}))".format(every_n, modulo_divisor) def create_data_split_sample_df(query_string, splitting_string, lo, up): """Creates a dataframe with a sample of a data split. Args: query_string: str, query to run to generate splits. splitting_string: str, modulo string to split by. lo: float, lower bound for bucket filtering for split. up: float, upper bound for bucket filtering for split. Returns: Dataframe containing data split sample. """ query = "SELECT * FROM ({0}) WHERE {1} >= {2} and {1} < {3}".format( query_string, splitting_string, int(lo), int(up)) df = bq.query(query).to_dataframe() return df train_df = create_data_split_sample_df( data_query, splitting_string, lo=0, up=train_percent) eval_df = create_data_split_sample_df( data_query, splitting_string, lo=train_percent, up=train_percent + eval_percent) test_df = create_data_split_sample_df( data_query, splitting_string, lo=train_percent + eval_percent, up=modulo_divisor) print("There are {} examples in the train dataset.".format(len(train_df))) print("There are {} examples in the validation dataset.".format(len(eval_df))) print("There are {} examples in the test dataset.".format(len(test_df))) ``` ## Preprocess data using Pandas We'll perform a few preprocessing steps to the data in our dataset. Let's add extra rows to simulate the lack of ultrasound. That is we'll duplicate some rows and make the `is_male` field be `Unknown`. Also, if there is more than child we'll change the `plurality` to `Multiple(2+)`. While we're at it, we'll also change the plurality column to be a string. We'll perform these operations below. Let's start by examining the training dataset as is. ``` train_df.head() ``` Also, notice that there are some very important numeric fields that are missing in some rows (the count in Pandas doesn't count missing data) ``` train_df.describe() ``` It is always crucial to clean raw data before using in machine learning, so we have a preprocessing step. We'll define a `preprocess` function below. Note that the mother's age is an input to our model so users will have to provide the mother's age; otherwise, our service won't work. The features we use for our model were chosen because they are such good predictors and because they are easy enough to collect. ``` def preprocess(df): """ Preprocess pandas dataframe for augmented babyweight data. Args: df: Dataframe containing raw babyweight data. Returns: Pandas dataframe containing preprocessed raw babyweight data as well as simulated no ultrasound data masking some of the original data. """ # Clean up raw data # Filter out what we don"t want to use for training df = df[df.weight_pounds > 0] df = df[df.mother_age > 0] df = df[df.gestation_weeks > 0] df = df[df.plurality > 0] # Modify plurality field to be a string twins_etc = dict(zip([1,2,3,4,5], ["Single(1)", "Twins(2)", "Triplets(3)", "Quadruplets(4)", "Quintuplets(5)"])) df["plurality"].replace(twins_etc, inplace=True) # Clone data and mask certain columns to simulate lack of ultrasound no_ultrasound = df.copy(deep=True) # Modify is_male no_ultrasound["is_male"] = "Unknown" # Modify plurality condition = no_ultrasound["plurality"] != "Single(1)" no_ultrasound.loc[condition, "plurality"] = "Multiple(2+)" # Concatenate both datasets together and shuffle return pd.concat( [df, no_ultrasound]).sample(frac=1).reset_index(drop=True) ``` Let's process the train, eval, test set and see a small sample of the training data after our preprocessing: ``` train_df = preprocess(train_df) eval_df = preprocess(eval_df) test_df = preprocess(test_df) train_df.head() train_df.tail() ``` Let's look again at a summary of the dataset. Note that we only see numeric columns, so `plurality` does not show up. ``` train_df.describe() ``` ## Write to .csv files In the final versions, we want to read from files, not Pandas dataframes. So, we write the Pandas dataframes out as csv files. Using csv files gives us the advantage of shuffling during read. This is important for distributed training because some workers might be slower than others, and shuffling the data helps prevent the same data from being assigned to the slow workers. ``` # Define columns columns = ["weight_pounds", "is_male", "mother_age", "plurality", "gestation_weeks"] # Write out CSV files train_df.to_csv( path_or_buf="train.csv", columns=columns, header=False, index=False) eval_df.to_csv( path_or_buf="eval.csv", columns=columns, header=False, index=False) test_df.to_csv( path_or_buf="test.csv", columns=columns, header=False, index=False) %%bash wc -l *.csv %%bash head *.csv %%bash tail *.csv %%bash ls *.csv %%bash head -5 *.csv ``` ## Create Keras model ### Set CSV Columns, label column, and column defaults. Now that we have verified that our CSV files exist, we need to set a few things that we will be using in our input function. * `CSV_COLUMNS` is going to be our header name of our column. Make sure that they are in the same order as in the CSV files * `LABEL_COLUMN` is the header name of the column that is our label. We will need to know this to pop it from our features dictionary. * `DEFAULTS` is a list with the same length as `CSV_COLUMNS`, i.e. there is a default for each column in our CSVs. Each element is a list itself with the default value for that CSV column. ``` # Determine CSV, label, and key columns # Create list of string column headers, make sure order matches. CSV_COLUMNS = ["weight_pounds", "is_male", "mother_age", "plurality", "gestation_weeks"] # Add string name for label column LABEL_COLUMN = "weight_pounds" # Set default values for each CSV column as a list of lists. # Treat is_male and plurality as strings. DEFAULTS = [[0.0], ["null"], [0.0], ["null"], [0.0]] ``` ### Make dataset of features and label from CSV files. Next, we will write an input_fn to read the data. Since we are reading from CSV files we can save ourselves from trying to recreate the wheel and can use `tf.data.experimental.make_csv_dataset`. This will create a CSV dataset object. However we will need to divide the columns up into features and a label. We can do this by applying the map method to our dataset and popping our label column off of our dictionary of feature tensors. ``` def features_and_labels(row_data): """Splits features and labels from feature dictionary. Args: row_data: Dictionary of CSV column names and tensor values. Returns: Dictionary of feature tensors and label tensor. """ label = row_data.pop(LABEL_COLUMN) return row_data, label # features, label def load_dataset(pattern, batch_size=1, mode='eval'): """Loads dataset using the tf.data API from CSV files. Args: pattern: str, file pattern to glob into list of files. batch_size: int, the number of examples per batch. mode: 'train' | 'eval' to determine if training or evaluating. Returns: `Dataset` object. """ # Make a CSV dataset dataset = tf.data.experimental.make_csv_dataset( file_pattern=pattern, batch_size=batch_size, column_names=CSV_COLUMNS, column_defaults=DEFAULTS, ignore_errors=True) # Map dataset to features and label dataset = dataset.map(map_func=features_and_labels) # features, label # Shuffle and repeat for training if mode == 'train': dataset = dataset.shuffle(buffer_size=1000).repeat() # Take advantage of multi-threading; 1=AUTOTUNE dataset = dataset.prefetch(buffer_size=1) return dataset ``` ### Create input layers for raw features. We'll need to get the data to read in by our input function to our model function, but just how do we go about connecting the dots? We can use Keras input layers [(tf.Keras.layers.Input)](https://www.tensorflow.org/api_docs/python/tf/keras/Input) by defining: * shape: A shape tuple (integers), not including the batch size. For instance, shape=(32,) indicates that the expected input will be batches of 32-dimensional vectors. Elements of this tuple can be None; 'None' elements represent dimensions where the shape is not known. * name: An optional name string for the layer. Should be unique in a model (do not reuse the same name twice). It will be autogenerated if it isn't provided. * dtype: The data type expected by the input, as a string (float32, float64, int32...) ``` # TODO 1 def create_input_layers(): """Creates dictionary of input layers for each feature. Returns: Dictionary of `tf.Keras.layers.Input` layers for each feature. """ inputs = { colname: tf.keras.layers.Input( name=colname, shape=(), dtype="float32") for colname in ["mother_age", "gestation_weeks"]} inputs.update({ colname: tf.keras.layers.Input( name=colname, shape=(), dtype="string") for colname in ["is_male", "plurality"]}) return inputs ``` ### Create feature columns for inputs. Next, define the feature columns. `mother_age` and `gestation_weeks` should be numeric. The others, `is_male` and `plurality`, should be categorical. Remember, only dense feature columns can be inputs to a DNN. ``` # TODO 2 def categorical_fc(name, values): """Helper function to wrap categorical feature by indicator column. Args: name: str, name of feature. values: list, list of strings of categorical values. Returns: Indicator column of categorical feature. """ cat_column = tf.feature_column.categorical_column_with_vocabulary_list( key=name, vocabulary_list=values) return tf.feature_column.indicator_column(categorical_column=cat_column) def create_feature_columns(): """Creates dictionary of feature columns from inputs. Returns: Dictionary of feature columns. """ feature_columns = { colname : tf.feature_column.numeric_column(key=colname) for colname in ["mother_age", "gestation_weeks"] } feature_columns["is_male"] = categorical_fc( "is_male", ["True", "False", "Unknown"]) feature_columns["plurality"] = categorical_fc( "plurality", ["Single(1)", "Twins(2)", "Triplets(3)", "Quadruplets(4)", "Quintuplets(5)", "Multiple(2+)"]) return feature_columns ``` ### Create DNN dense hidden layers and output layer. So we've figured out how to get our inputs ready for machine learning but now we need to connect them to our desired output. Our model architecture is what links the two together. Let's create some hidden dense layers beginning with our inputs and end with a dense output layer. This is regression so make sure the output layer activation is correct and that the shape is right. ``` # TODO 3 def get_model_outputs(inputs): """Creates model architecture and returns outputs. Args: inputs: Dense tensor used as inputs to model. Returns: Dense tensor output from the model. """ # Create two hidden layers of [64, 32] just in like the BQML DNN h1 = tf.keras.layers.Dense(64, activation="relu", name="h1")(inputs) h2 = tf.keras.layers.Dense(32, activation="relu", name="h2")(h1) # Final output is a linear activation because this is regression output = tf.keras.layers.Dense( units=1, activation="linear", name="weight")(h2) return output ``` ### Create custom evaluation metric. We want to make sure that we have some useful way to measure model performance for us. Since this is regression, we would like to know the RMSE of the model on our evaluation dataset, however, this does not exist as a standard evaluation metric, so we'll have to create our own by using the true and predicted labels. ``` def rmse(y_true, y_pred): """Calculates RMSE evaluation metric. Args: y_true: tensor, true labels. y_pred: tensor, predicted labels. Returns: Tensor with value of RMSE between true and predicted labels. """ return tf.sqrt(tf.reduce_mean((y_pred - y_true) ** 2)) ``` ### Build DNN model tying all of the pieces together. Excellent! We've assembled all of the pieces, now we just need to tie them all together into a Keras Model. This is a simple feedforward model with no branching, side inputs, etc. so we could have used Keras' Sequential Model API but just for fun we're going to use Keras' Functional Model API. Here we will build the model using [tf.keras.models.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) giving our inputs and outputs and then compile our model with an optimizer, a loss function, and evaluation metrics. ``` # TODO 4 def build_dnn_model(): """Builds simple DNN using Keras Functional API. Returns: `tf.keras.models.Model` object. """ # Create input layer inputs = create_input_layers() # Create feature columns feature_columns = create_feature_columns() # The constructor for DenseFeatures takes a list of numeric columns # The Functional API in Keras requires: LayerConstructor()(inputs) dnn_inputs = tf.keras.layers.DenseFeatures( feature_columns=feature_columns.values())(inputs) # Get output of model given inputs output = get_model_outputs(dnn_inputs) # Build model and compile it all together model = tf.keras.models.Model(inputs=inputs, outputs=output) model.compile(optimizer="adam", loss="mse", metrics=[rmse, "mse"]) return model print("Here is our DNN architecture so far:\n") model = build_dnn_model() print(model.summary()) ``` We can visualize the DNN using the Keras plot_model utility. ``` tf.keras.utils.plot_model( model=model, to_file="dnn_model.png", show_shapes=False, rankdir="LR") ``` ## Run and evaluate model ### Train and evaluate. We've built our Keras model using our inputs from our CSV files and the architecture we designed. Let's now run our model by training our model parameters and periodically running an evaluation to track how well we are doing on outside data as training goes on. We'll need to load both our train and eval datasets and send those to our model through the fit method. Make sure you have the right pattern, batch size, and mode when loading the data. ``` # TODO 5 TRAIN_BATCH_SIZE = 32 NUM_TRAIN_EXAMPLES = 10000 * 5 # training dataset repeats, it'll wrap around NUM_EVALS = 5 # how many times to evaluate # Enough to get a reasonable sample, but not so much that it slows down NUM_EVAL_EXAMPLES = 10000 trainds = load_dataset( pattern="train*", batch_size=TRAIN_BATCH_SIZE, mode='train') evalds = load_dataset( pattern="eval*", batch_size=1000, mode='eval').take(count=NUM_EVAL_EXAMPLES // 1000) steps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_EVALS) logdir = os.path.join( "logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S")) tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=logdir, histogram_freq=1) history = model.fit( trainds, validation_data=evalds, epochs=NUM_EVALS, steps_per_epoch=steps_per_epoch, callbacks=[tensorboard_callback]) ``` ### Visualize loss curve ``` # Plot import matplotlib.pyplot as plt nrows = 1 ncols = 2 fig = plt.figure(figsize=(10, 5)) for idx, key in enumerate(["loss", "rmse"]): ax = fig.add_subplot(nrows, ncols, idx+1) plt.plot(history.history[key]) plt.plot(history.history["val_{}".format(key)]) plt.title("model {}".format(key)) plt.ylabel(key) plt.xlabel("epoch") plt.legend(["train", "validation"], loc="upper left"); ``` ### Save the model ``` OUTPUT_DIR = "babyweight_trained" shutil.rmtree(OUTPUT_DIR, ignore_errors=True) EXPORT_PATH = os.path.join( OUTPUT_DIR, datetime.datetime.now().strftime("%Y%m%d%H%M%S")) tf.saved_model.save( obj=model, export_dir=EXPORT_PATH) # with default serving function print("Exported trained model to {}".format(EXPORT_PATH)) !ls $EXPORT_PATH ``` Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
github_jupyter
# Introduction Here we show how to calculate the merger rate density of systems merging at a single redshift z. By now we assume you understand the indiviual pipelines of; ClassCOMPAS: -- (handling the (mock) data needed) ClassMSSFR: -- defining the model for metallicity specific SFR selection_effects: -- module to calculate probility detecting a system Here we show which additional steps are needed for the calculation. All these steps are done by default in the ClassCosmicIntegrator. We highlight the steps here outside the function for clarity since the ClassCosmicIntegrator merely acts as a giant for loop over multiple redshifts and a way to conveniently store the results # Paths ``` import os pathNoteBook = os.getcwd() pathScripts = pathNoteBook + '/PythonScripts/' pathData = "/home/cneijssel/Desktop/Test/" ``` # Imports ``` import numpy as np import sys import matplotlib.pyplot as plt from astropy.cosmology import WMAP9 as cosmology from scipy.optimize import newton #custom scripts sys.path.append(pathScripts) import ClassCOMPAS import ClassMSSFR ``` # 1- Set up data and MSSFR model ``` # Create instance COMPAS data class COMPAS = ClassCOMPAS.COMPASData(path=pathData, fileName='COMPAS_output.h5') #Set the type of DCO of interest and recover their parameters COMPAS.Mlower = 5 COMPAS.Mupper = 150 COMPAS.binaryFraction =0.7 COMPAS.setGridAndMassEvolved() COMPAS.setCOMPASDCOmask() #Pessimistic BBHs COMPAS.setCOMPASData() # The MSSFR model #use the metallicityGrid of the Data metallicityGrid = COMPAS.metallicityGrid #Create instance Class MSSFR = ClassMSSFR.MSSFR(metallicityGrid=metallicityGrid) #Set the MSSFR model MSSFR.SFRprescription = 'Neijssel et al. (2019)' MSSFR.Zprescription = 'logNormal' MSSFR.logNormalPrescription ='Neijssel Phenomenological' ``` # 2 - Define the redshifts The entire calculation depends on defining a redshift at which the DCOs merge. Then using the delaytimes and astropy you need to recover when the systems were born First define our cosmology ``` # see top notebook mergerRedshift = 0.2 #Define an age when the first stars formed based on redshift firstSFR = cosmology.age(10).value #single value in units of Gyr ageUniverseAtMergerGyr = cosmology.age(mergerRedshift) #Recover the array delaytimes in Units of Gyr delayTimeGyr = np.divide(COMPAS.delayTimes, 1000) #Calculate the age at which they were born ageBirth = ageUniverseAtMergerGyr.value - delayTimeGyr #If they are born before first SFR mask them maskUnreal = ageBirth<firstSFR #I set those to minus 1 to label them #This way we can use this as a mask everywhere instead of having #to slice the data in different ways. Useful for larger calculations ageBirth[maskUnreal] = -1 ``` Note that the above might further reduce the number of DCOs in the data, despite we use the flag merger in a Hubble time. This because that flag assumes redshift is zero. When we change our reference frame to higher redshifts, then the universe is younger and therefore fewer systems will be able to merge in time We set the unphysical systems to -1. This because later when we loop over redshifts the number of possible systems can vary. However I want to fill in the rates in a predefined 2D array of fixed shape (nr systems, nr of redshift bins). Hence I assume the largest array (size of nr of DCOs) and set the rate to zero in case. Note that the MSSFR script also depends on the mask of systems being unreal of -1. (see returnMSSFR()) To recover the redshift it is a bit tricky. Astropy can quicly calculate the age from redshift, but the inverse is more difficult. In the code we use look up the nearest value in a dense precalculated table. Here we use a older method to calculate (for credits see source code classCosmicintegrator) which is considerably slower. ``` redshiftsBirth = np.zeros(len(ageBirth)) for nr, age in enumerate(ageBirth): if age != -1: redshiftsBirth[nr] = newton(lambda x: cosmology.age(x).value-age, 0) else: redshiftsBirth[nr] = -1 print("nr of DCOs %s, nr DCOs merging %s"\ %(len(COMPAS.delayTimes), np.sum(ageBirth!=-1))) ``` # Calculate the rate of systems per metallicity The code is structured to do the calculation per subpopulation of DCOs of a single metallicity. Note that if the system was not physically possible (age == -1) then the rate is set to zero. ``` #create an array for rate per system merging at redshift z ratePerSystem = np.zeros(len(COMPAS.delayTimes)) for nrZ, Z in enumerate(metallicityGrid): maskZ = COMPAS.metallicitySystems == Z #give MSSFR per system which has metallicity Z [Msun/dGpc3/dyr] mssfr = MSSFR.returnMSSFR(metallicity=Z,\ agesBirth =ageBirth[maskZ], redshiftBirth=redshiftsBirth[maskZ]) #Calculate rate using amount of Msun evolved [dN/dGpc3/dyr] RatesZ = np.divide(mssfr, COMPAS.totalMassEvolvedPerZ[nrZ]) #Fill the rates in the defined array according to mask ratePerSystem[maskZ] = RatesZ print(metallicityGrid) print(ratePerSystem) print(np.sum(ratePerSystem)) # Using the rates in a histogram chirpMasses = COMPAS.mChirp binsM = np.linspace(0,30,100) dM = np.diff(binsM) center= (binsM[1:]+binsM[:-1])/2. #remember that the reight is essentially a weight per system y , _ = np.histogram(chirpMasses, bins=binsM, \ weights=ratePerSystem) dydMchirp = np.divide(y, dM) fig, axes = plt.subplots(1,1, figsize=(9,8)) axes.plot(center, dydMchirp) axes.set_xlabel('chirp mass [Msun]', fontsize=20) axes.set_ylabel('rate [yr-1 Gpc-3]', fontsize=20) axes.set_title('merger rate density at z=%s'\ %(mergerRedshift), fontsize=20) plt.tight_layout() plt.show() ``` Now here we only have the chirp mass distribution at a single redshift per unit volume. The next step is to do it over a range of redshifts and get the absolute rates.
github_jupyter
# Voice Recognition Using a triplet loss model, output an encoding that represents signatures of voices. ## Imports & Setup ``` from multiprocessing.pool import ThreadPool import os import time import h5py import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras import tensorflow_datasets as tfds import tensorflow_addons as tfa import h5py from tensorflow.keras.optimizers import ( Adam ) from tensorflow.keras.layers import ( Layer, Input ) from paiutils.neural_network import Trainer, Predictor from paiutils import audio from paiutils.analytics import Analyzer from paiutils.util_funcs import load_directory_database, save_directory_database pyaudio = audio.pyaudio gpus = tf.config.experimental.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) ``` ## Create Data Load table ``` sample_rate = 16000 metadata = pd.read_csv('D:\\cv_data\\data.tsv', delimiter='\t') metadata['path'] = metadata['path'].str.split('.').str[0] metadata['path'] += '.wav' ``` Shrink data ``` counts = metadata.groupby('sentence')['path'].count() metadata2 = metadata[metadata['sentence'].isin(counts[10 <= counts].index)] counts = metadata2.groupby('client_id')['path'].count() metadata2 = metadata2[metadata2['client_id'].isin(counts[10 <= counts].index)] metadata2['accent'].value_counts() metadata2 = metadata[metadata['sentence'].str.len() > 10] metadata2 = metadata2[metadata2['down_votes'] == 0] #metadata2 = metadata2[metadata2['accent'].isin(['us'])] counts = metadata2.groupby('sentence')['path'].count() metadata2 = metadata2[metadata2['sentence'].isin(counts[10 <= counts].index)] counts = metadata2.groupby('client_id')['path'].count() metadata2 = metadata2[metadata2['client_id'].isin(counts[10 <= counts].index)] metadata2 = metadata2.groupby('client_id').sample(10) len(metadata2), len(metadata2['client_id'].unique()) print(len(metadata2)/len(metadata), len(metadata2), len(metadata2['client_id'].unique()), metadata2.groupby('client_id')['path'].count().mean()) metadata = metadata2 del metadata2 labels = metadata['client_id'].unique() metadata['client_nid'] = metadata['client_id'].replace(labels, np.arange(len(labels))) metadata.to_csv('D:\\cv_mutated\\metadata.tsv', sep='\t') del metadata['Unnamed: 0'] vali_ids = pd.Series(metadata['client_id'].unique()).sample(100) vali_ids = metadata['client_id'].isin(vali_ids) train_ids = pd.Series(metadata.loc[~vali_ids, 'client_id'].unique()) train_ids = metadata['client_id'].isin(train_ids) train_metadata = metadata[train_ids] vali_metadata = metadata[vali_ids] train_metadata.to_csv('D:\\cv_mutated\\train_metadata.tsv', sep='\t') vali_metadata.to_csv('D:\\cv_mutated\\vali_metadata.tsv', sep='\t') ``` Save data ``` num_mutations = 4 length = 3 backgrounds = [] for file in os.listdir('D:\\cv_mutated\\background'): x, sr, at = audio.load(f'D:\\cv_mutated\\background\\{file}', rate=sample_rate) backgrounds += np.array_split(x, np.arange(0, len(x), sample_rate * length)[1:])[:-1] for path in train_metadata['path'].sample(5000): x, sr, at = audio.load(f'D:\\cv_data\\{path}', rate=sample_rate) x = np.roll(x, int(len(x) * np.random.random())) x = audio.set_duration(x, sample_rate, length) backgrounds.append(x) backgrounds = np.array(backgrounds, dtype=np.float32) num_mutations = 4 length = 3 lens = [] xs = np.empty((10 * (num_mutations + 1), sample_rate * length), dtype=np.float32) for group_id, data in metadata[['client_id', 'path']].groupby('client_id'): paths = data['path'] ndx = 0 for path in paths.values: x, sr, at = audio.load(f'D:\\cv_data\\{path}') x = audio.vad_trim_all(x, sr, .03, 1) lens.append(audio.calc_duration(x, sample_rate)) xs[ndx] = audio.set_duration(x, sample_rate, length) ndx += 1 for _ in range(num_mutations): m = np.random.random() #print(m) y = np.roll(x, int(len(x) * m)) y = audio.set_duration(y, sample_rate, length) #y = x.copy() m = np.random.uniform(.5, 1.5) if m > 1: m = 1 / (2 - m) #print(m) y = audio.adjust_volume(y, m) m = np.maximum(np.random.uniform(-.2, .2), 0) #print(m) ym = y[y > 0].mean() y += np.random.uniform(low=-ym, high=ym, size=len(y)) * m if np.random.random() > .5: #m = np.random.uniform(.05, .2) m = .2 m1 = np.random.randint(len(backgrounds)) #print(m, m1) y += audio.set_power(backgrounds[m1], audio.calc_rms(y) * m) y = np.clip(y, -1, 1) m = np.random.uniform(.97, 1.03) if m > 1: m = 1 / (2 - m) #print(m) y = audio.adjust_speed(y, sample_rate, m) y = audio.set_duration(y, sample_rate, length) #audio.play(backgrounds[m1], sample_rate) #audio.play(y, sample_rate) xs[ndx] = y ndx += 1 with h5py.File(f'D:\\cv_mutated\\{group_id}.h5', 'w') as file: file.create_dataset('data', data=xs) np.array(lens).mean() ``` ## Load Data Load metadata and split into train and validation ``` sample_rate = 16000 length = 3 class_size = (4 + 1) * 10 metadata = pd.read_csv('D:\\cv_mutated\\metadata.tsv', delimiter='\t') del metadata['Unnamed: 0'] train_metadata = pd.read_csv('D:\\cv_mutated\\train_metadata.tsv', sep='\t') train_metadata_sample = train_metadata[train_metadata['client_id'].isin(pd.Series(train_metadata['client_id'].unique()).sample(200))] vali_metadata = pd.read_csv('D:\\cv_mutated\\vali_metadata.tsv', sep='\t') ``` Set nids for metadata ``` train_labels = train_metadata['client_id'].unique() train_metadata['client_nid'] = train_metadata['client_id'].replace(train_labels, np.arange(len(train_labels))) train_labels_sample = train_metadata_sample['client_id'].unique() train_metadata_sample['client_nid'] = train_metadata_sample['client_id'].replace(train_labels_sample, np.arange(len(train_labels_sample))) vali_labels = vali_metadata['client_id'].unique() vali_metadata['client_nid'] = vali_metadata['client_id'].replace(vali_labels, np.arange(len(vali_labels))) ``` Load data ``` #train_anchors = np.empty((len(train_metadata['client_id'].unique()), class_size, sample_rate * length), dtype=np.float32) #for group_id, data in train_metadata[['client_id', 'client_nid', 'path']].groupby('client_id'): # with h5py.File(f'D:\\cv_mutated\\{group_id}.h5', 'r') as hf: # train_anchors[int(data['client_nid'].values[0])] = hf['data'][:] train_anchors_sample = np.empty((len(train_metadata_sample['client_id'].unique()), class_size, sample_rate * length), dtype=np.float32) for group_id, data in train_metadata_sample[['client_id', 'client_nid', 'path']].groupby('client_id'): with h5py.File(f'D:\\cv_mutated\\{group_id}.h5', 'r') as hf: train_anchors_sample[int(data['client_nid'].values[0])] = hf['data'][:] vali_anchors = np.empty((len(vali_metadata['client_id'].unique()), class_size, sample_rate * length), dtype=np.float32) for group_id, data in vali_metadata[['client_id', 'client_nid', 'path']].groupby('client_id'): with h5py.File(f'D:\\cv_mutated\\{group_id}.h5', 'r') as hf: vali_anchors[int(data['client_nid'].values[0])] = hf['data'][:] #train_anchors.nbytes / 1000000000, train_anchors_sample.nbytes / 1000000000, vali_anchors.nbytes / 1000000000 #num_mutations = 1 #train_mutated_anchors = np.empty( # (len(train_metadata['client_id'].unique()), train_anchors.shape[1] * num_mutations, sample_rate * length), dtype=np.float32 #) #for cid in range(train_anchors.shape[0]): # count = 0 # for ndx in range(train_anchors.shape[1]): # x = train_anchors[cid, ndx] # for _ in range(num_mutations): # m = np.random.random() # #print(m) # y = np.roll(x, int(len(x) * m)) # #y = x.copy() # m = np.random.uniform(.5, 1.5) # if m > 1: # m = 1 / (2 - m) # #print(m) # y = audio.adjust_volume(y, m) # m = np.maximum(np.random.uniform(-.2, .2), 0) # #print(m) # ym = y[y > 0].mean() # y += np.random.uniform(low=-ym, high=ym, size=len(y)) * m # y = np.clip(-1, 1, y) # m = np.random.uniform(.97, 1.03) # if m > 1: # m = 1 / (2 - m) # #print(m) # y = audio.adjust_speed(y, sample_rate, m) # y = audio.set_duration(y, sample_rate, length) # #audio.play(x, sample_rate) # #audio.play(y, sample_rate) # train_mutated_anchors[cid, count] = y # count += 1 #train_anchors = np.hstack([train_anchors, train_mutated_anchors]) #train_anchors = np.expand_dims(train_anchors, axis=-1) train_anchors_sample = np.expand_dims(train_anchors_sample, axis=-1) vali_anchors = np.expand_dims(vali_anchors, axis=-1) ``` ## Model ``` x0 = keras.layers.Input(shape=train_anchors_sample.shape[2:]) x = keras.layers.Conv1D(32, 3, 2, activation='swish', kernel_initializer='he_normal', padding='same')(x0) x = keras.layers.Dropout(0.1)(x) x = keras.layers.Conv1D(32, 3, 2, activation='swish', kernel_initializer='he_normal', padding='same')(x) x = keras.layers.Dropout(0.1)(x) x = keras.layers.Conv1D(32, 3, 2, activation='swish', kernel_initializer='he_normal', padding='same')(x) x = keras.layers.Dropout(0.1)(x) x = keras.layers.Conv1D(32, 3, 2, activation='swish', kernel_initializer='he_normal', padding='same')(x) x = keras.layers.Dropout(0.1)(x) x = keras.layers.Conv1D(64, 3, 2, activation='swish', kernel_initializer='he_normal', padding='same')(x) x = keras.layers.Dropout(0.1)(x) x = keras.layers.Conv1D(64, 3, 2, activation='swish', kernel_initializer='he_normal', padding='same')(x) x = keras.layers.Dropout(0.1)(x) x = keras.layers.Conv1D(64, 3, 2, activation='swish', kernel_initializer='he_normal', padding='same')(x) x = keras.layers.Dropout(0.1)(x) x = keras.layers.Conv1D(64, 3, 2, activation='swish', kernel_initializer='he_normal', padding='same')(x) x = keras.layers.Dropout(0.1)(x) x = keras.layers.Conv1D(128, 3, 2, activation='swish', kernel_initializer='he_normal', padding='same')(x) x = keras.layers.Dropout(0.1)(x) x = keras.layers.Conv1D(128, 3, 2, activation='swish', kernel_initializer='he_normal', padding='same')(x) x = keras.layers.Dropout(0.1)(x) x = keras.layers.Conv1D(128, 3, 2, activation='swish', kernel_initializer='he_normal', padding='same')(x) x = keras.layers.Dropout(0.1)(x) x = keras.layers.Conv1D(128, 3, 2, activation='swish', kernel_initializer='he_normal', padding='same')(x) x = keras.layers.Dropout(0.1)(x) #x = keras.layers.GlobalAveragePooling1D()(x) x = keras.layers.Flatten()(x) dense_layer = keras.layers.Dense(128, activation=None, kernel_initializer='he_normal')(x) norm_layer = keras.layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1))(dense_layer) model = keras.Model(inputs=x0, outputs=norm_layer) model.summary() ``` ### Train ``` def triplet_generator_load(metadata, batch_size, reload, sample_size, class_size, threads=4): while True: ids = pd.Series(metadata['client_id'].unique()).sample(sample_size) metadata_sample = metadata[metadata['client_id'].isin(ids)] anchors = np.empty((sample_size, class_size, sample_rate * length), dtype=np.float32) #for group_id, data in metadata_sample[['client_id', 'client_nid', 'path']].groupby('client_id'): # with h5py.File(f'D:\\cv_mutated\\{group_id}.h5', 'r') as hf: # anchors[int(data['client_nid'].values[0])] = hf['data'][:] def load(x): nonlocal anchors ndx, (group_id, data) = x with h5py.File(f'D:\\cv_mutated\\{group_id}.h5', 'r') as hf: anchors[ndx] = hf['data'][:] #s = time.time() with ThreadPool(threads) as p: p.map(load, zip(np.arange(sample_size), metadata_sample[['client_id', 'path']].groupby('client_id'))) #print(time.time()-s) anchors = np.expand_dims(anchors, axis=-1) for _ in range(reload): a_ndxs = np.random.choice(np.arange(len(anchors)), size=batch_size) a_ndxs2 = np.random.choice(np.arange(len(anchors[0])), size=batch_size) p_ndxs = np.random.choice(np.arange(len(anchors[0])), size=batch_size) n_ndxs = np.random.choice(np.arange(len(anchors) - 1), size=batch_size) n_ndxs[n_ndxs >= a_ndxs] += 1 n_ndxs2 = np.random.choice(np.arange(len(anchors[0])), size=batch_size) nn_ndxs = np.random.choice(np.arange(len(anchors[0])), size=batch_size) a_batch = anchors[a_ndxs, a_ndxs2] p_batch = anchors[a_ndxs, p_ndxs] n_batch = anchors[n_ndxs, n_ndxs2] nn_batch = anchors[n_ndxs, nn_ndxs] ndxs = np.hstack([a_ndxs, a_ndxs, n_ndxs, n_ndxs]) batch = np.vstack([a_batch, p_batch, n_batch, nn_batch]) yield batch, ndxs def triplet_generator(anchors, batch_size): while True: a_ndxs = np.random.choice(np.arange(len(anchors)), size=batch_size) a_ndxs2 = np.random.choice(np.arange(len(anchors[0])), size=batch_size) p_ndxs = np.random.choice(np.arange(len(anchors[0])), size=batch_size) n_ndxs = np.random.choice(np.arange(len(anchors) - 1), size=batch_size) n_ndxs[n_ndxs >= a_ndxs] += 1 n_ndxs2 = np.random.choice(np.arange(len(anchors[0])), size=batch_size) nn_ndxs = np.random.choice(np.arange(len(anchors[0])), size=batch_size) a_batch = anchors[a_ndxs, a_ndxs2] p_batch = anchors[a_ndxs, p_ndxs] n_batch = anchors[n_ndxs, n_ndxs2] nn_batch = anchors[n_ndxs, nn_ndxs] ndxs = np.hstack([a_ndxs, a_ndxs, n_ndxs, n_ndxs]) batch = np.vstack([a_batch, p_batch, n_batch, nn_batch]) yield batch, ndxs def paccuracy(y_true, y_pred): batch_size = len(y_true) // 4 anchors = y_pred[:batch_size, :] postives = y_pred[batch_size:batch_size * 2, :] negatives = y_pred[batch_size * 2: batch_size * 3] nnegatives = y_pred[batch_size * 3:] a = tf.norm(postives - anchors, axis=-1) a = tf.reduce_sum(tf.cast(a < 1.0, tf.int32), axis=-1) b = tf.norm(negatives - nnegatives, axis=-1) b = tf.reduce_sum(tf.cast(b < 1.0, tf.int32), axis=-1) return (a + b) / (batch_size * 2) def naccuracy(y_true, y_pred): batch_size = len(y_true) // 4 anchors = y_pred[:batch_size, :] postives = y_pred[batch_size:batch_size * 2, :] negatives = y_pred[batch_size * 2: batch_size * 3] nnegatives = y_pred[batch_size * 3:] a = tf.norm(negatives - anchors, axis=-1) a = tf.reduce_sum(tf.cast(a > 1.0, tf.int32), axis=-1) b = tf.norm(postives - negatives, axis=-1) b = tf.reduce_sum(tf.cast(b > 1.0, tf.int32), axis=-1) c = tf.norm(nnegatives - anchors, axis=-1) c = tf.reduce_sum(tf.cast(c > 1.0, tf.int32), axis=-1) d = tf.norm(postives - nnegatives, axis=-1) d = tf.reduce_sum(tf.cast(d > 1.0, tf.int32), axis=-1) return (a + b + c + d) / (batch_size * 4) batch_size = 32 steps = int(len(train_metadata['client_id'].unique()) * class_size / batch_size) // 4 vali_steps = int(len(vali_anchors) * len(vali_anchors[0]) / batch_size) // 4 trainer = Trainer(model, {'train': triplet_generator_load(train_metadata, batch_size, steps // 4, 2000, class_size, threads=10), 'validation': triplet_generator(vali_anchors, batch_size)}) trainer.model.compile(optimizer=Adam(.001), loss=tfa.losses.TripletSemiHardLoss(1.0), metrics=[paccuracy, naccuracy]) trainer.load('20210826_221821_513997') #for ndx, layer in enumerate(trainer.model.layers[:-2]): # model.layers[ndx].set_weights(layer.get_weights()) #trainer.model = model callbacks = [keras.callbacks.TensorBoard(log_dir='logs/training/' + time.strftime(r'%Y%m%d_%H%M%S'), histogram_freq=1)] for _ in range(4): trainer.train(epochs=10, steps_per_epoch=steps, validation_steps=vali_steps, callbacks=callbacks) path = trainer.save('') ``` ## Test ``` #path = '20210818_093422_095947' predictor = Predictor(path) xs = [] ys = [] labels = [] count = 0 for ndx, anchor_class in enumerate(train_anchors_sample): for anchor in anchor_class: xs.append(predictor.predict(anchor)) ys.append(count) count += 1 labels.append(ndx) analyzer = Analyzer(xs, ys, labels) analyzer.plot(analyzer.tsne(2, n_jobs=-1)) xs = [] ys = [] labels = [] count = 0 for ndx, anchor_class in enumerate(vali_anchors): for anchor in anchor_class: xs.append(predictor.predict(anchor)) ys.append(count) count += 1 labels.append(ndx) analyzer = Analyzer(xs, ys, labels) analyzer.plot(analyzer.tsne(2, n_jobs=-1)) ``` ## Live Test ``` path = '20210822_164154_339789' predictor = Predictor(path) def live_recognition(predictor, length, rate): p = pyaudio.PyAudio() patype = pyaudio.paInt16 stream = p.open(format=patype, channels=1, rate=rate, input=True, frames_per_buffer=4096) unique_recordings = [] frames = [] while True: frames = np.append(frames, np.frombuffer(stream.read(4096), dtype='int16') / np.iinfo('int16').max) if len(frames) > rate * length: x = frames[-rate * length:] x = audio.vad_trim_all(x, rate, .03, 1) x = audio.set_duration(x, rate, 3) x = predictor.predict(x) unique_recordings.append(x) print(np.linalg.norm(np.array(unique_recordings) - x, axis=-1)) frames = [] stream.stop_stream() stream.close() p.terminate() live_recognition(predictor, 3, sample_rate) voices = [] while True: input('enter to record') x, sr, a = audio.record(3, sample_rate) #audio.plot(x) x = audio.vad_trim_all(x, sample_rate, .03, 1) x = audio.set_duration(x, sample_rate, 2) #audio.plot(x) #audio.play(x, sr) x = predictor.predict(x) distances = [] for voice in voices: distances.append((np.linalg.norm(voice - x))) print(distances) voices.append(x) ```
github_jupyter
# Classifying Fashion-MNIST Now it's your turn to build and train a neural network. You'll be using the [Fashion-MNIST dataset](https://github.com/zalandoresearch/fashion-mnist), a drop-in replacement for the MNIST dataset. MNIST is actually quite trivial with neural networks where you can easily achieve better than 97% accuracy. Fashion-MNIST is a set of 28x28 greyscale images of clothes. It's more complex than MNIST, so it's a better representation of the actual performance of your network, and a better representation of datasets you'll use in the real world. <img src='https://github.com/iArunava/Intro-to-Deep-Learning-with-Pytorch-Udacity-Solutions/blob/master/intro-to-pytorch/assets/fashion-mnist-sprite.png?raw=1' width=500px> In this notebook, you'll build your own neural network. For the most part, you could just copy and paste the code from Part 3, but you wouldn't be learning. It's important for you to write the code yourself and get it to work. Feel free to consult the previous notebooks though as you work through this. First off, let's load the dataset through torchvision. ``` !pip install torch torchvision # Helper functions def view_classify(img, ps, version="MNIST"): ''' Function for viewing an image and it's predicted classes. ''' ps = ps.data.numpy().squeeze() fig, (ax1, ax2) = plt.subplots(figsize=(6,9), ncols=2) ax1.imshow(img.resize_(1, 28, 28).numpy().squeeze()) ax1.axis('off') ax2.barh(np.arange(10), ps) ax2.set_aspect(0.1) ax2.set_yticks(np.arange(10)) if version == "MNIST": ax2.set_yticklabels(np.arange(10)) elif version == "Fashion": ax2.set_yticklabels(['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle Boot'], size='small'); ax2.set_title('Class Probability') ax2.set_xlim(0, 1.1) plt.tight_layout() def imshow(image, ax=None, title=None, normalize=True): """Imshow for Tensor.""" if ax is None: fig, ax = plt.subplots() image = image.numpy().transpose((1, 2, 0)) if normalize: mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) image = std * image + mean image = np.clip(image, 0, 1) ax.imshow(image) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['left'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.tick_params(axis='both', length=0) ax.set_xticklabels('') ax.set_yticklabels('') return ax import torch import torch.nn as nn import torch.nn.functional as F from torchvision import datasets, transforms import matplotlib.pyplot as plt import numpy as np # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) # Download and load the training data trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # Download and load the test data testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) ``` Here we can see one of the images. ``` image, label = next(iter(trainloader)) imshow(image[0,:]); ``` ## Building the network Here you should define your network. As with MNIST, each image is 28x28 which is a total of 784 pixels, and there are 10 classes. You should include at least one hidden layer. We suggest you use ReLU activations for the layers and to return the logits or log-softmax from the forward pass. It's up to you how many layers you add and the size of those layers. ``` # TODO: Define your network architecture here # Hyperparameters inp = 784 hid1 = 128 hid2 = 64 outl = 10 epochs = 5 lr = 0.003 model = nn.Sequential(nn.Linear(inp, hid1), nn.ReLU(), nn.Linear(hid1, hid2), nn.ReLU(), nn.Linear(hid2, outl), nn.LogSoftmax(dim=1)) ``` # Train the network Now you should create your network and train it. First you'll want to define [the criterion](http://pytorch.org/docs/master/nn.html#loss-functions) ( something like `nn.CrossEntropyLoss`) and [the optimizer](http://pytorch.org/docs/master/optim.html) (typically `optim.SGD` or `optim.Adam`). Then write the training code. Remember the training pass is a fairly straightforward process: * Make a forward pass through the network to get the logits * Use the logits to calculate the loss * Perform a backward pass through the network with `loss.backward()` to calculate the gradients * Take a step with the optimizer to update the weights By adjusting the hyperparameters (hidden units, learning rate, etc), you should be able to get the training loss below 0.4. ``` # TODO: Create the network, define the criterion and optimizer criterion = nn.NLLLoss() optimizer = torch.optim.SGD(model.parameters(), lr=lr) # TODO: Train the network here for epoch in range(epochs): running_loss = 0 for images, labels in trainloader: images = images.view(images.shape[0], -1) optimizer.zero_grad() logits = model(images) loss = criterion(logits, labels) loss.backward() optimizer.step() running_loss += loss.item() else: print ('Loss: ', running_loss / len(trainloader)) %matplotlib inline %config InlineBackend.figure_format = 'retina' #import helper # Test out your network! dataiter = iter(testloader) images, labels = dataiter.next() img = images[0] # Convert 2D image to 1D vector img = img.resize_(1, 784) # TODO: Calculate the class probabilities (softmax) for img ps = torch.exp(model(img)) print (ps) # Plot the image and probabilities #helper.view_classify(img.resize_(1, 28, 28), ps, version='Fashion') view_classify(img.resize_(1, 28, 28), ps, version='Fashion') ```
github_jupyter
# Control and Flow ## Turing completeness Now that we understand how we can use objects to store and model our data, we only need to be able to control the flow of our program in order to have a program that can, in principle, do anything! Specifically we need to be able to: * Control whether a program statement should be executed or not, based on a variable. "Conditionality" * Jump back to an earlier point in the program, and run some statements again. "Branching" Once we have these, we can write computer programs to process information in arbitrary ways: we are *Turing Complete*! ## Conditionality Conditionality is achieved through Python's `if` statement: ``` x = -3 if x < 0: print(x, "is negative") print("This is controlled") print("Always run this") ``` The **controlled** statements are indented. Once we remove the indent, the statements will once again happen regardless of whether the `if` statement is true of false. ## Else and Elif Python's if statement has optional elif (else-if) and else clauses: ``` x = -3 if x < 0: print("x is negative") else: print("x is positive") x = 5 if x < 0: print("x is negative") elif x == 0: print("x is zero") else: print("x is positive") ``` Try editing the value of x here, and note which section of the code is run and which are not. ``` choice = "dlgkhdglkhgkjhdkjgh" if choice == "high": print(1) elif choice == "medium": print(2) else: print(3) ``` ## Comparison `True` and `False` are used to represent **boolean** (true or false) values. ``` 1 > 2 ``` Comparison on strings is alphabetical - letters earlier in the alphabet are 'lower' than later letters. ``` "A" < "Z" "UCL" > "King's" ``` There's no automatic conversion of the **string** True to the **boolean variable** `True`: ``` True == "True" ``` Be careful not to compare values of different types. At best, the language won't allow it and an issue an error, at worst it will allow it and do some behind-the-scenes conversion that may be surprising. ``` "1" < 2 ``` Any statement that evaluates to `True` or `False` can be used to control an `if` Statement. Experiment with numbers (integers and floats) - what is equivalent to `True`? ``` 0 == False ``` ## Automatic Falsehood Various other things automatically count as true or false, which can make life easier when coding: ``` mytext = "Hello" if mytext: print("Mytext is not empty") mytext2 = "" if mytext2: print("Mytext2 is not empty") ``` We can use logical `not` and logical `and` to combine true and false: ``` x = 3.2 if not (x > 0 and type(x) == int): print(x, "is not a positive integer") ``` `not` also understands magic conversion from false-like things to True or False. ``` not not "Who's there!" #  Thanks to Mysterious Student bool("") bool("James") bool([]) bool(["a"]) bool({}) bool({"name": "James"}) bool(0) bool(1) not 2 == 3 ``` But subtly, although these quantities evaluate True or False in an if statement, they're not themselves actually True or False under ==: ``` [] == False bool([]) == bool(False) ``` ## Indentation In Python, indentation is semantically significant. You can choose how much indentation to use, so long as you are consistent, but four spaces is conventional. Please do not use tabs. In the notebook, and most good editors, when you press `<tab>`, you get four spaces. ``` if x > 0: print(x) ``` ##  Pass A statement expecting identation must have some indented code or it will create an error. This can be annoying when commenting things out (with `#`) inside a loop or conditional statement. ``` if x > 0: # print x print("Hello") ``` So the `pass` statement is used to do nothing. ``` if x > 0: pass print("Hello") ```
github_jupyter
# Data Science - Python and Pandas ## Table of Content 1. [Introduction](#introduction)<br> 1.1. [Series and DataFrames](#series)<br> 1.2. [Data Selection](#selection)<br> 2. [Transform data](#transform)<br> 2.1. [Adding and deleting columns](#columns)<br> 2.2. [Cleaning Data](#cleaning)<br> 2.3. [Merging Data](#merging)<br> 2.4. [Grouping Data](#grouping)<br> 3. [Visualise data](#visualise)<br> 4. [Further learning](#extra)<br> <a id="introduction"></a> ## 1. Introduction <b> - Pandas is an open-source Python Library providing high-performance data manipulation and analysis. </b> <b> - With the combination of Python and Pandas, we can accomplish five typical steps in the processing and analysis of data, regardless of the origin of data — load, prepare, manipulate, model, and analyze. <i> <b> Let's start with loading the required Python packages and uploading our data into the notebook. </b> </i> - Select each cell by clicking on it, and then click on the `Run` button at the top of the notebook (or use `Shift+Enter`), to run the cells in the notebook. - The numbers in front of the cells tell you in which order you have run them, for instance `[1]`. - When you see a `[*]` the cell is currently running and `[]` means you have not run the cell yet. <b> ** Please note that it is important to run all cells** </b> ``` import numpy as np import pandas as pd ``` <b> With Pandas you can easily read contents from a CSV file using the 'read_csv' function. Let us load the file by running the next cell. </b> ``` df = pd.read_csv('https://raw.githubusercontent.com/IBMDeveloperUK/crime-data-workshop/master/data/london-borough-profiles.csv',encoding = 'unicode_escape') ``` #### Let's take a look at the data that was loaded into the notebook. - To begin with, you can use functions such as df.head() or df.tail() to view the first five or last five lines in your dataset respectively. - Alternatively you can add a number between the brackets () to specify the number of lines you want to display. eg. df.head(50) - Use df.dtyes to check the different variables and what datatype they have. ``` df.head(50) ``` ### Explore some of the following commands: ``` #returns the column labels df.columns #returns values without the labels df.values #Displays the number of rows len(df) #Displayes how many rows and columns there are df.shape ``` > <b> *Tip*: </b> If you would like to add more cells to run additional commands,activate the above cell by clicking on it and then click on the '+' option at the top of the notebook. This will add extra cells. Click on the buttons with the upwards and downwards arrows to move the cells up and down to change their order as needed <a id="series"></a> ### 1.1 Series and DataFrames Pandas has two main data structures: `Series` and `DataFrames`. <b> Series </b> is a one-dimensional labelled array capable of holding data of any type (integer, string, float, python objects, etc.). - Has an associated label with each element or an 'index' <b> Data frame </b> is a two-dimensional data structure, i.e., data is aligned in a tabular fashion in rows and columns. - Dataframe has labelled axes (rows and columns) It is very similar to a SQL table or a spreadsheet data representation. <div class="alert alert-info" style="font-size:100%"> <b>Please refer this link <a href="http://pandas.pydata.org/pandas-docs/stable/getting_started/10min.html">10 minute introduction</a> for a quick overview of Pandas.<br> </div> ``` s = pd.Series([1, 3, 5, np.nan, 6, 8]) s ``` <b> You can create a DataFrame in many ways, by loading a file or using a NumPy array and a date for the index. </b> <b><i> NumPy </i></b> Python library which allows for handling multi-dimentional arrays and provides a collection of functions to perform arithmetic operations on these arrays <div class="alert alert-info" style="font-size:100%"> <b> Please refer this <a href="https://docs.scipy.org/doc/numpy-1.15.0/user/quickstart.html"> NumPy Tutorial</a> for an overview of NumPy.<br> </div> #### Two examples to create a Dataframe ``` dates = pd.date_range('20130101', periods=6) dates numbers = np.random.randn(6, 4) numbers df1 = pd.DataFrame(numbers, index=dates, columns=list('ABCD')) df1 df2 = pd.DataFrame({'A': 1., 'B': pd.Timestamp('20130102'), 'C': pd.Series(1, index=list(range(4)), dtype='float32'), 'D': np.array([3] * 4, dtype='int32'), 'E': pd.Categorical(["test", "train", "test", "train"]), 'F': 'foo'}) df2.head() ``` You can use function `type()` to check the data type of each variable ``` print('Data type of s is '+str(type(s))) print('Data type of s is '+str(type(dates))) print('Data type of s is '+str(type(numbers))) print('Data type of df is '+str(type(df1))) type(df) ``` <a id="selection"></a> ## 1.2 Data Selection #### In this section, we will look at indexing and selecting data, which is key for us to perform analysis and visualization of the data as we progress in the notebook #### For more information on Indexing and Selecting data, please refer the link [here](https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html). To begin, lets make a copy of our exisiting Dataframe ``` boroughs = df.copy() ``` We will now set the Area Code (`Code`) as the index. You can see below that this changes the table: ``` boroughs = boroughs.set_index(['Code']) boroughs.head() ``` #### `.loc[]` <b> You can access individual or groups of rows and columns with labels using .loc[]. (This only works for the column that was set to the index) </b> ``` boroughs.loc['E09000001', 'Inland_Area_(Hectares)'] boroughs.loc['E09000001':'E09000004', ['Inland_Area_(Hectares)', 'Average_Age,_2017']] ``` #### Or select by position with `.iloc[]`. <b> You can select a single row, multiple rows (or columns) at particular positions in the index. This function is integer based (from 0 to length-1 of the axis)</b> ``` boroughs.iloc[0] boroughs.iloc[:,1] boroughs.iloc[:,0:2] boroughs.iloc[2:4,0:2] ``` <b> Using the exisiting dataset, you can also use one or more column names to create a new DataFrame. </b> ``` boroughs2 = boroughs[['Inland_Area_(Hectares)', 'Average_Age,_2017']] boroughs2 ``` ## Filtering <b> Selecting rows based on a certain condition can be done with Boolean indexing </b> <b> Boolean Indexing uses the actual values of the data in the DataFrame as opposed to the row/column labels or index positions ``` boroughs['Average_Age,_2017'] > 39 ``` <b> If you want to select the rows and see all the data add `boroughs[]` around your function, as shown below ``` boroughs[boroughs['Average_Age,_2017'] > 39] ``` #### You can also combine different columns using `&`, `|` and `==` Operators ``` boroughs[(boroughs['Average_Age,_2017'] > 39) & (boroughs['Political_control_in_council'] == 'Cons')] boroughs[(boroughs['Political_control_in_council'] == 'Lab') | (boroughs['Political_control_in_council'] == 'Lib Dem')] ``` <b> With the above commands you can now start exploring the data some more. </b> <b> Let us try to answer the below questions by writing some code, Use the tips provided below to attempt these by yourself. Add as many cells needed. </b> <ol> <li>Which borough has the largest population density per hectare in 2017? </li> <li>What are the maximum and minimum number of new migrants in 2015/2016? And for which boroughs?</li> <li> Which borough is happiest? </li> <li> Which borough has the third largest population by county of birth in 2011 as 'Romania' </li> </ol> > *Tips*: - Find the maximum of a row with for instance `boroughs['Population'].max()` - Extract the value from a cell in a DataFrame with `.value[]` - Print a value with `print()` for instance: `print(boroughs['area'][0])` for the first row. If you calculate multiple values in one cell you will need this, else the answers will not be displayed. - To extract an entire row use `idxmax()` which returns column with maximum value, and `.loc[]` to return row of the index - To see the answer uncomment the line in the cell that contains `%load` (by deleting the `#`) and then run the cell, but try to find your own solution first in the cell above the solution! ``` # Answer 1: borough with the largest Density boroughs[boroughs['Population_density_(per_hectare)_2017'] == boroughs['Population_density_(per_hectare)_2017'].max()] # Answer 2: maximum and minimum number of new migrants print (boroughs['New_migrant_(NINo)_rates,_(2015/16)'].min()) print (boroughs['New_migrant_(NINo)_rates,_(2015/16)'].max()) # Answer 3: happiest borough boroughs['Area_name'].loc[boroughs['Happiness_score_2011-14_(out_of_10)'].idxmax()] #Answer 4 boroughs[boroughs['Third_largest_migrant_population_by_country_of_birth_(2011)'] == 'Romania'] ``` <a id="transform"></a> ## 2. Transform Data <b> When looking at data there are always transformations needed to get it in the format you need for your analysis, visualisations or models. </b> <b> These are a few examples of the endless possibilities. The best way to learn is to find a dataset and try to answer questions with the data. </b> <a id="columns"></a> ### 2.1 Adding and deleting columns Adding a column can be done by using the <b> `new` </b> function Which can then be dropped using the <b> `drop` </b> function ``` boroughs['new'] = 1 boroughs.head() boroughs = boroughs.drop(columns='new') boroughs.head() ``` <a id="cleaning"></a> ### 2.2 Cleaning Data <b> Things to check: </b> - Is the data tidy: each variable forms a column, each observation forms a row and each type of observational unit forms a table. - Are all columns in the right data format? - Are there missing values? - Are there unrealistic outliers? Get a quick overview of the numeric data using the `.describe()` function. If any of the numeric columns are missing this is a probably because of a wrong data type. ``` boroughs.describe() ``` <a id="Renaming"></a> ### 2.2 Renaming Columns You can change names of columns using the rename function. One of the biggest advantages of using the rename function is that we can change as many column names as we want. <b> Ex : DataFrame.rename(columns={'Coun':'Country', 'Emp':'Employment', 'Cont' : 'Continent'}, </b> In the below example we are changing the name of a single column. We are also going to use <b> inplace=True </b> to change column names in place. ``` boroughs.rename(columns={'%_of_largest_migrant_population_(2011)':'largest_migrant_population_2011(%)'}, inplace=True) boroughs.columns boroughs.head() ``` <a id="merging"></a> ### 2.3 Merging Data <b> Pandas allows several different options to combine data. </b> This [documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html) has lots of examples. <b> To understand how we can combine and merge data, lets create two new Dataframes, cities and cities2 </b> You can combine data with `.append()` or `.concat()`: <b> Concat gives the flexibility to join based on the axis( all rows or all columns) </b> <b> Append is the specific case(axis=0, join='outer') of concat </b> ``` data = {'city': ['London','Manchester','Birmingham','Leeds','Glasgow'], 'population': [9787426, 2553379, 2440986, 1777934,1209143], 'area': [1737.9, 630.3, 598.9, 487.8, 368.5 ]} cities = pd.DataFrame(data) data2 = {'city': ['Liverpool','Southampton'], 'population': [864122, 855569], 'area': [199.6, 192.0]} cities2 = pd.DataFrame(data2) ``` We can now use the `append()`option to combine both these Dataframes ``` cities = cities.append(cities2) cities data = {'city': ['London','Manchester','Birmingham','Leeds','Glasgow'], 'density': [5630,4051,4076,3645,3390]} cities3 = pd.DataFrame(data) cities3 ``` <b> An extra column can be added with `.merge()` with an outer join using the city names </b> ``` cities = pd.merge(cities, cities3, how='outer', sort=True,on='city') cities ``` <a id="grouping"></a> ### 2.4 Grouping Data Grouping data is a quick way to calculate values for classes in your DataFrame. <b> `groupby` </b> A groupby operation involves a combination of splitting the object, applying a function, and combining the results. This can be used to group large amounts of data and compute operations on these groups. ``` boroughs.columns boroughs.head() boroughs.groupby(['Inner/_Outer_London']).mean() boroughs.groupby(['Area_name','%_of_resident_population_born_abroad_(2015)']).max().head(10) ``` <a id="explore"></a> ## 3. Visualizing Data <b> Visualization in Pandas uses the `Matplotlib` library. This plotting library uses an object oriented API to embed plots into applications. </b> <b> Some of the examples are a line Plot, Histograms, Scatter plot, Image Plot, 3D Plot to name a few. </b> <b> In the first example we will be using `matplotlib.pyplot` which is a collection of command style functions that make matplotlib work like MATLAB </b> ``` %matplotlib inline import matplotlib.pyplot as plt boroughs = boroughs.reset_index() ``` <b> The default plot is a line chart that uses the index for the x-axis: </b> ``` boroughs['Employment_rate_(%)_(2015)'].plot(); ``` To create a plot that makes more sense for this data have a look at the [documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/visualization.html) for all options. <b> For the above example, a histogram might work better. You can change the number of `bins` to get the desired output on your histogram </b> ``` boroughs['Employment_rate_(%)_(2015)'].plot.hist(bins=10); ``` <b> You can change the size of your chart using the `figsize` option </b> ``` boroughs['Employment_rate_(%)_(2015)'].plot.hist(bins=15,figsize=(10,5)); ``` <b> In a plot command you can select the data directly using the labels. The below plot shows the Employment Rate only in Outer London </b> ``` boroughs['Employment_rate_(%)_(2015)'][boroughs['Inner/_Outer_London']=='Outer London'].plot.hist(bins=15,figsize=(10,5)); ``` <b> To add the Employment Rate for Inner London as well, simply repeat the plot command with a different selection of the data </b> ``` boroughs['Employment_rate_(%)_(2015)'][boroughs['Inner/_Outer_London']=='Outer London'].plot.hist(bins=15,figsize=(10,5)); boroughs['Employment_rate_(%)_(2015)'][boroughs['Inner/_Outer_London']=='Inner London'].plot.hist(bins=15,figsize=(10,5)); ``` <b> The above plot is difficult to read as the histograms have overlapped. </b> <b> You can fix this by changing the colours and making them transparant. The legend() method adds a legend to the plot, which is useful in describing different parts of the chart. </b> <b> To add a legend each histogram needs to be assigned to an object `ax` that is used to then create a legend, as shown in the example below </b> ``` ax = boroughs['Employment_rate_(%)_(2015)'][boroughs['Inner/_Outer_London']=='Outer London'].plot.hist( bins=15,figsize=(10,5),alpha=0.5,color='#1A4D3B'); ax = boroughs['Employment_rate_(%)_(2015)'][boroughs['Inner/_Outer_London']=='Inner London'].plot.hist( bins=15,figsize=(10,5),alpha=0.5,color='#4D1A39'); ax.legend(['Outer London','Inner London']); ``` <b> There are various options available to change every aspect of your chart. Mentioned below are some of the example options available for you to work with. Go ahead and create a new chart and customise options as needed ``` boroughs['Population_density_(per_hectare)_2017'].plot.hist( bins=15, title="Population Density", legend=False, fontsize=14, grid=False, linestyle='--', edgecolor='black', color='darkred', linewidth=3); ``` ## Seaborn <b> Seaborn is a Python data visualization library based on matplotlib. It is an easy to use visualisation package that works well with Pandas DataFrames. Below are a few examples using Seaborn. Refer this [documentation](https://seaborn.pydata.org/index.html) for information on a wide range of plots you can use. </b> ``` import seaborn as sns ``` <b> For our first example lets look at a distribution plot using `distplot` which shows a distribution of observations in your data. We have used the `dropna()` function to remove rows and columns with Null/NaN values </b> ``` sns.distplot(boroughs['Population_density_(per_hectare)_2017'].dropna()); ``` <b> For our next few examples lets look at categorical plots using `catplot` Use this plot to visualize a relationship involving categorical data. There are Categorical scatterplots, distribution plots and estimate plots.</b> <b> The `kind` parameter selects the underlying axes-level function to use </b> <b> A few options you can choose from are `box, violin, swarm ,bar, stripplot,boxen` etc. The default representation in catplot() uses a scatterplot. </b> ``` sns.catplot(x='Turnout_at_2014_local_elections', y='Political_control_in_council', data=boroughs); ``` <b> In the below examples we are using the `kind` option to look at the different categorical plots </b> ``` sns.catplot(x='Median_House_Price,_2015', y='Area_name', kind='swarm', data=boroughs); sns.catplot(x="Employment_rate_(%)_(2015)", y="Largest_migrant_population_by_country_of_birth_(2011)", kind="box", data=boroughs); sns.catplot(x="Turnout_at_2014_local_elections", y="Political_control_in_council", kind="violin", data=boroughs); ``` <b>TRY IT YOURSELF</b> <ol> <li>Create two histograms that compare the Gross Annual pay for Male and Female Employees using `.plot.hist()`</li> <li>Create a bar plot comparing the median house prices for different boroughs</li> <li>Create a scatter plot comparing the Median House price and percentage of area that is greenspace </li> </ol> </div> <ul></ul> <ul></ul> <ul></ul> > *Tips*: - To add two histograms to one plot you can repeat `.plot()` in the same cell - Add a legend by assiging each histogram to an object `ax`, which is used to create a legend - To customise the size of your maps, use the example of `[fig, ax]`, which customises the figsize for each map in other examples above <b> Add additional cells to try it yourself </b> ``` # Answer 1 ax = boroughs['Gross_Annual_Pay_-_Female_(2016)'].plot.hist(bins=15,figsize=(10,5),alpha=0.5); ax = boroughs['Gross_Annual_Pay_-_Male_(2016)'].plot.hist(bins=15,figsize=(10,5),alpha=0.5); ax.legend(['female','male']); # Answer 2 [fig, ax] = plt.subplots(1, figsize=(7,7)) sns.barplot(x="Median_House_Price,_2015", y="Area_name", data=boroughs, ax=ax); # Answer 3 [fig, ax] = plt.subplots(1, figsize=(7,7)) ax=sns.scatterplot(y='Median_House_Price,_2015', x='%_of_area_that_is_Greenspace,_2005', data=boroughs,ax=ax); ``` <a id="extra"></a> # 4. Further learning <b> - Load your own dataset into a new notebook and play around with the data to practice what you have learned. </b> <b> - Create your own plots using other examples. Refer the below links to get started </b> <br> [Pandas plot examples](https://pandas.pydata.org/pandas-docs/stable/user_guide/visualization.html) </br> <br> [Seaborn gallery](https://seaborn.pydata.org/examples/index.html) </br> <b> 4. Additonal reference material on Pandas </b> <br> 4.1. [Pandas workshop by Alexander Hensdorf](https://github.com/alanderex/pydata-pandas-workshop) <br> 4.2. [Pandas tutorial by Joris van den Bossche](https://github.com/jorisvandenbossche/pandas-tutorial) <br> 4.3. [Python Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/) <br> ### Authors Margriet Groenendijk is a Data & AI Developer Advocate for IBM. She develops and presents talks and workshops about data science and AI. She is active in the local developer communities through attending, presenting and organising meetups. She has a background in climate science where she explored large observational datasets of carbon uptake by forests during her PhD, and global scale weather and climate models as a postdoctoral fellow.  Yamini Rao is a Developer Advocate for IBM. She compiles Developer scenarios, workshops and training material based on IBM Cloud technologies to demonstrate value. She also works as a community manager, collaborating with local developer communites to organise workshops and meetups. She has a background in computer science and has worked extensively as an Implementation Engineer for various IBM Analytical tools. Copyright © 2019 IBM. This notebook and its source code are released under the terms of the MIT License.
github_jupyter
**author**: lukethompson@gmail.com<br> **date**: 8 Oct 2017<br> **language**: Python 3.5<br> **license**: BSD3<br> ## sequence_prevalence.ipynb ``` import pandas as pd import numpy as np import locale import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline locale.setlocale(locale.LC_ALL, 'en_US') def list_otu_studies(df, index): return(set([x.split('.')[0] for x in df.loc[index]['list_samples'].split(',')])) locale.format("%d", 1255000, grouping=True) ``` ## QC-filtered samples ``` path_otus = '../../data/sequence-lookup/otu_summary.emp_deblur_90bp.qc_filtered.rare_5000.tsv' # gunzip first num_samples = '24,910' num_studies = '96' df_otus = pd.read_csv(path_otus, sep='\t', index_col=0) df_otus['studies'] = [list_otu_studies(df_otus, index) for index in df_otus.index] df_otus['num_studies'] = [len(x) for x in df_otus.studies] df_otus.shape df_otus.num_samples.max() (df_otus.num_samples > 100).value_counts() / 307572 (df_otus.num_studies > 10).value_counts() / 307572 df_otus.num_studies.max() ``` ### Per-study endemism Objective: Determine the number of OTUs that are study-dependent (or EMPO-dependent). For a given OTU, is it found in only one study's samples or in multiple studies (Venn diagram)? ``` df_otus.num_studies.max() fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12,4)) ax[0].hist(df_otus.num_studies, bins=np.concatenate(([], np.arange(1, 110, 1)))) ax[0].set_xlim([0, 98]) ax[0].set_xticks(np.concatenate((np.array([1.5]), np.arange(10.5, 92, 10)))) ax[0].set_xticklabels(['1', '10', '20', '30', '40', '50', '60', '70', '80', '90']); ax[1].hist(df_otus.num_studies, bins=np.concatenate(([], np.arange(1, 110, 1)))) ax[1].set_yscale('log') ax[1].set_ylim([5e-1, 1e6]) ax[1].set_xlim([0, 98]) ax[1].set_xticks(np.concatenate((np.array([1.5]), np.arange(10.5, 92, 10)))) ax[1].set_xticklabels(['1', '10', '20', '30', '40', '50', '60', '70', '80', '90']); fig.text(0.5, 0.0, 'Number of studies a tag sequence was observed in (out of %s)' % num_studies, ha='center', va='center') fig.text(0.0, 0.5, 'Number of tag sequences (out of %s)' % locale.format("%d", df_otus.shape[0], grouping=True), ha='center', va='center', rotation='vertical') exactly1 = df_otus.num_studies.value_counts()[1] num_otus = df_otus.shape[0] # fig.text(0.3, 0.51, '%s tag sequences (%.1f%%) found in only a single study\n\n\n\n\n\n%s tag sequences (%.1f%%) found in >1 study' % # (locale.format("%d", exactly1, grouping=True), # (exactly1/num_otus*100), # locale.format("%d", num_otus-exactly1, grouping=True), # ((num_otus-exactly1)/num_otus*100)), # ha='center', va='center', fontsize=10) plt.tight_layout() plt.savefig('hist_endemism_90bp_qcfiltered.pdf', bbox_inches='tight') ``` ### Per-sample endemism ``` fig = plt.figure(figsize=(12,4)) plt.subplot(121) mybins = np.concatenate(([], np.arange(1, 110, 1))) n, bins, patches = plt.hist(df_otus.num_samples, bins=mybins) plt.axis([0, 92, 0, 4.5e4]) plt.xticks(np.concatenate((np.array([1.5]), np.arange(10.5, 92, 10))), ['1', '10', '20', '30', '40', '50', '60', '70', '80', '90']); plt.subplot(122) mybins = np.concatenate(([], np.arange(1, max(df_otus.num_samples)+100, 100))) n, bins, patches = plt.hist(df_otus.num_samples, bins=mybins) plt.yscale('log') plt.axis([-100, 9200, 5e-1, 10e5]) plt.xticks([50, 1050, 2050, 3050, 4050, 5050, 6050, 7050, 8050, 9050], ['1-100', '1001-1100', '2001-2100', '3001-3100', '4001-4100', '5001-5100', '6001-6100', '7001-7100', '8001-8100', '9001-9100'], rotation=45, ha='right'); fig.text(0.5, 0.0, 'Number of samples a tag sequence was observed in (out of %s)' % num_samples, ha='center', va='center') fig.text(0.0, 0.6, 'Number of tag sequences (out of %s)' % locale.format("%d", df_otus.shape[0], grouping=True), ha='center', va='center', rotation='vertical') exactly1 = df_otus.num_samples.value_counts()[1] num_otus = df_otus.shape[0] # fig.text(0.3, 0.6, '%s sequences (%.1f%%) found in only a single sample\n\n\n\n\n\n%s sequences (%.1f%%) found in >1 sample' % # (locale.format("%d", exactly1, grouping=True), # (exactly1/num_otus*100), # locale.format("%d", num_otus-exactly1, grouping=True), # ((num_otus-exactly1)/num_otus*100)), # ha='center', va='center', fontsize=10) plt.tight_layout() plt.savefig('hist_otus_90bp_qcfiltered.pdf', bbox_inches='tight') ``` ### Abundance vs. prevalence ``` plt.scatter(df_otus.num_samples, df_otus.total_obs, alpha=0.1) plt.xscale('log') plt.yscale('log') plt.xlabel('Number of samples a tag sequence was observed in (out of %s)' % num_samples) plt.ylabel('Total number of tag sequence observations') plt.savefig('scatter_otus_90bp_qcfiltered.png') ``` ## Subset 2k ``` path_otus = '../../data/sequence-lookup/otu_summary.emp_deblur_90bp.subset_2k.rare_5000.tsv' # gunzip first num_samples = '2000' df_otus = pd.read_csv(path_otus, sep='\t', index_col=0) df_otus['studies'] = [list_otu_studies(df_otus, index) for index in df_otus.index] df_otus['num_studies'] = [len(x) for x in df_otus.studies] df_otus.num_samples.max() df_otus.num_samples.value_counts().head() plt.figure(figsize=(12,3)) mybins = np.concatenate(([-8.99, 1.01], np.arange(10, max(df_otus.num_samples), 10))) n, bins, patches = plt.hist(df_otus.num_samples, bins=mybins) plt.yscale('log') plt.axis([-10, 600, 5e-1, 1e6]) plt.xticks([-4, 5.5, 15.5, 104.5, 204.5, 304.5, 404.5, 474.5, 574.5], ['exactly 1', '2-9', '10-19', '100-109', '200-209', '300-309', '400-409', '470-479', '570-579'], rotation=45, ha='right', fontsize=9); plt.xlabel('Number of samples OTU observed in (out of %s)' % num_samples) plt.ylabel('Number of OTUs (out of %s)' % df_otus.shape[0]) plt.savefig('hist_otus_90bp_subset2k.pdf') plt.scatter(df_otus.num_samples, df_otus.total_obs, alpha=0.1) plt.xscale('log') plt.yscale('log') plt.xlabel('Number of samples OTU observed in (out of %s)' % num_samples) plt.ylabel('Total number of OTU observations') plt.savefig('scatter_otus_90bp_subset2k.png') fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12,4)) ax[0].hist(df_otus.num_studies, bins=df_otus.num_studies.max()) ax[1].hist(df_otus.num_studies, bins=df_otus.num_studies.max()) ax[1].set_yscale('log') ax[1].set_ylim([5e-1, 1e6]) fig.text(0.5, 0.03, 'Number of studies OTU found in (out of %s)' % num_samples, ha='center', va='center') fig.text(0.07, 0.5, 'Number of OTUs', ha='center', va='center', rotation='vertical') exactly1 = df_otus.num_studies.value_counts()[1] num_otus = df_otus.shape[0] fig.text(0.3, 0.5, '%s OTUs (%.1f%%) found in only a single study\n\n\n\n\n\n\n\n\n%s OTUs (%.1f%%) found in >1 study' % (locale.format("%d", exactly1, grouping=True), (exactly1/num_otus*100), locale.format("%d", num_otus-exactly1, grouping=True), ((num_otus-exactly1)/num_otus*100)), ha='center', va='center', fontsize=10) plt.savefig('hist_endemism_90bp_subset2k.pdf') ```
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Datasets/Terrain/us_lithology.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Terrain/us_lithology.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Datasets/Terrain/us_lithology.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Terrain/us_lithology.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`. The magic command `%%capture` can be used to hide output from a specific cell. ``` # %%capture # !pip install earthengine-api # !pip install geehydro ``` Import libraries ``` import ee import folium import geehydro ``` Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()` if you are running this notebook for this first time or if you are getting an authentication error. ``` # ee.Authenticate() ee.Initialize() ``` ## Create an interactive map This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function. The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`. ``` Map = folium.Map(location=[40, -100], zoom_start=4) Map.setOptions('HYBRID') ``` ## Add Earth Engine Python script ``` dataset = ee.Image('CSP/ERGo/1_0/US/lithology') lithology = dataset.select('b1') lithologyVis = { 'min': 0.0, 'max': 20.0, 'palette': [ '356EFF', 'ACB6DA', 'D6B879', '313131', 'EDA800', '616161', 'D6D6D6', 'D0DDAE', 'B8D279', 'D5D378', '141414', '6DB155', '9B6D55', 'FEEEC9', 'D6B879', '00B7EC', 'FFDA90', 'F8B28C' ], } Map.setCenter(-105.8636, 40.3439, 11) Map.addLayer(lithology, lithologyVis, 'Lithology') ``` ## Display Earth Engine data layers ``` Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True) Map ```
github_jupyter
## Project:7 Reduce Data Dimensionality for the House Dataset Attribute ### Problem Statement When the number of features is very large relative to the number of observations in your dataset, certain algorithms struggle to train effective models. Dimensionality reduction is the process of reducing the number of random variables under consideration, by obtaining a set of principal variables. We are going to use principal component analysis, to reduce the data dimensions for the housing data attributes. ## Data Description: • Zoning_Class: Identifies the general zoning classification of the sale • Building_Class: Identifies the type of dwelling involved in the sale • Lot_Extent: Linear feet of street connected to property • Lot_Size: Lot size in square feet • Road_Type: Type of road access to property • Lane_Type: Type of alley access to property • Property_Shape: General shape of property • Land_Outline: Flatness of the property • Utility_Type: Type of utilities available • Lot configuration: Lot configuration • Property_Slope: Slope of property • Neighborhood: Physical locations within Ames city limits • Condition1: Proximity to various conditions • Condition2: Proximity to various conditions (if more than one is present) • House_Type: Type of dwelling • House_Design: Style of dwelling • Overall_Material: Rates the overall material and finish of the house • House_Condition: Rates the overall condition of the house • Construction_Year: Original construction date • Remodel_Year: Remodel date (same as construction date if no remodeling or additions) • Roof_Design: Type of roof • Roof_Quality: Roof material • Exterior1st: Exterior covering on house • Exterior2nd: Exterior covering on house (if more than one material) • Brick_Veneer_Type: Masonry veneer type • Brick_Veneer_Area: Masonry veneer area in square feet • Exterior_Material: Evaluates the quality of the material on the exterior • Exterior_Condition: Evaluates the present condition of the material on the exterior • Foundation_Type: Type of foundation • Basement_Height: Evaluates the height of the basement • Basement_Condition: Evaluates the general condition of the basement • Exposure_Level: Refers to walkout or garden level walls • BsmtFinType1: Rating of basement finished area • BsmtFinSF1: Type 1 finished square feet • BsmtFinType2: Rating of basement finished area (if multiple types) • BsmtFinSF2: Type 2 finished square feet • BsmtUnfSF: Unfinished square feet of basement area • Total_Basement_Area: Total square feet of basement area • Heating_Type: Type of heating • Heating_Quality: Heating quality and condition • Air_Conditioning: Central air conditioning • Electrical_System: Electrical system • First_Floor_Area: First Floor square feet • Second_Floor_Area: Second floor square feet • LowQualFinSF: Low quality finished square feet (all floors) • Grade_Living_Area: Above grade (ground) living area square feet • Underground_Full_Bathroom: Basement full bathrooms • Underground_Half_Bathroom: Basement half bathrooms • Full_Bathroom_Above_Grade: Full bathrooms above grade • Half_Bathroom_Above_Grade: Half baths above grade • Bedroom: Bedrooms above grade (does NOT include basement bedrooms) • Kitchen: Kitchens above grade • Kitchen_Quality: Kitchen quality • Rooms_Above_Grade: Total rooms above grade (does not include bathrooms) • Functional_Rate: Home functionality (Assume typical unless deductions are warranted) • Fireplaces: Number of fireplaces • Fireplace_Quality: quality of fireplaces • Garage: Garage location • Garage_Built_Year: Year garage was built • Garage_Finish_Year: Interior finish of the garage • Garage_Size: Size of garage in car capacity • Garage_Area: Size of garage in square feet • Garage_Quality: Garage quality • Garage_Condition: Garage condition • Pavedd_Drive: Paved driveway • W_Deck_Area: Wood deck area in square feet • Open_Lobby_Area: Open porch area in square feet • Enclosed_Lobby_Area: Enclosed porch area in square feet • Three_Season_Lobby_Area: Three season porch area in square feet • Screen_Lobby_Area: Screen porch area in square feet • Pool_Area: Pool area in square feet • Pool_Quality: Pool quality • Fence_Quality: quality of fence • Miscellaneous_Feature: Miscellaneous feature not covered in other categories • Miscellaneous_Value: $Value of miscellaneous feature • Month_Sold: Month Sold (MM) • Year_Sold: Year Sold (YYYY) • Sale_Type: Type of sale • Sale_Condition: Condition of sale # Loading Important Library ``` import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from scipy.stats import norm from sklearn.preprocessing import StandardScaler from scipy import stats import warnings warnings.filterwarnings('ignore') %matplotlib inline ``` # Check out the dataset ``` #Reading Datasetdf df = pd.read_csv("house_price.csv") df.head() ``` # Exploratory Data Analysis ``` #Columns of dataset df.columns df.shape ``` House price has 81 columns(79 features + id and target SalePrice) and 1460 entries (number of rows or house sales) ``` #Counting numerical variables numerical_var = df.dtypes[df.dtypes != "object"].index print("Number of Numerical features: ", len(numerical_var)) #Counting categorical variables categorical_var = df.dtypes[df.dtypes == "object"] print("Number of Categorical features: ", len(categorical_var)) #filter numeric column only data_num = df[numerical_var] #calculating correlation among numeric variable corr_matrix = data_num.corr() #filter correlation values above 0.5 filter_corr = corr_matrix[corr_matrix > 0.5] #plot correlation matrix plt.figure(figsize=(20,12)) sns.heatmap(filter_corr, cmap="coolwarm", annot=True); ``` Based on the above correlation matrix, correlation among the variables been observed. For example, "salePrice" are correlated with "OverallQual", "YearBuilt", "YearRemodAdd", "TotalBsmt", "1stFirSF", "GrLivArea", "FullBaath", "TotRmsAbvGrd", "GarageCars", and "GarageArea". It also show the multicollinearity. For example: the correlation betwen GarageCars and GarageArea is very high(0.88) # SalePrice ``` sns.distplot(df["SalePrice"]) plt.show() ``` From the above result, we can notice that "SalePrice" are not normal distribution. It is positively skewed. A few people have very expensive house # Relationship with Numerical variables # Overall Quality ``` #Scatter plot OverallQual/SalePrice data = pd.concat([df["SalePrice"], df["OverallQual"]], axis = 1) data.plot.scatter(x="OverallQual", y="SalePrice", ylim=(0,800000)); ``` From the above result, We can say that the price of the house will be high when the overall quality of the house is high. We can notice that for the same quality different price. Why? because "salePrice" is correlated with other variables and "OverallQual" might be correlated with other variables. # Year Built ``` data = pd.concat([df["SalePrice"], df["YearBuilt"]], axis =1) data.plot.scatter(x="YearBuilt", y="SalePrice", ylim=(0,800000)); ``` From the above plot, We can observe that the price of a house is comparatively more than the house was built recently. we also can notice that there are some outliers which means even if the house is too old, the price of the house is moderately high due to influences of other variables. # Ground Living Area ``` #scatter plot GrLivArea/SalePrice data = pd.concat([df["SalePrice"], df["GrLivArea"]], axis =1) data.plot.scatter(x="GrLivArea", y="SalePrice", ylim=(0,800000)); ``` While the size of the ground living area is increasing, the price of the houses is increasing. But even if the ground living area is high, the price of the house is low and when the ground living area is high, the price of the house is too high. Why?. We know the influence of the other variables affects the price change of house. When "GrLivARea" of a house is highly correlated with other variables, the price of the house is increasing and when "GrLivArea" of a house is not highly correlated with other variables, the price of the house is decreasing. # TotalBsmtSF ``` data = pd.concat([df["SalePrice"], df["TotalBsmtSF"]], axis =1) data.plot.scatter(x="TotalBsmtSF", y="SalePrice", ylim=(0,800000)); ``` We Can notice that "SalePrice" and "TotalBsmtSF" is with a linear relationship. We can see the value of "SalePrice" is going straight when the value of "TotalBsmtSF" is 0. Total square feet of basement area of a house is high but the price of the house is low due to influence of other variables. # Visualizing Categorical Variables with "SalePrice". ## House Style ``` df.boxplot(column="SalePrice", #Column to plot by="HouseStyle", #Column to split upon figsize=(8,8)) ``` ## Foundation ``` df.boxplot(column="SalePrice", #column to plot by="Foundation", #column to split upon figsize=(8,8)) ``` From the above plot, We can observe that if a house has "Poured Concrete" foundation , the price of the house is higher than the other house prices. ## Garage Quality ``` df.boxplot(column = "SalePrice", by="GarageQual", figsize=(8,8)) ``` From the ove plot, We can observe that if a house has a good garage, the price of the house is higher than other huse prices. Some houses have an excellent garage. So the price of this kind of house is higher than all. So far, We have compared some variables with the target variable. We observed that what is the variables impact on target variable based on EDA. If we want to reduce dimension We can take only variables. This is one kind of way. ### Now we will use PCA to reduce the dimension of this dataset. # Before apply PCA we have to handle missing value. ``` total_missing_value = df.isnull().sum().sort_values(ascending=False) total_missing_value ``` # Imputing missing values PoolQc: Data description says NA means "No Pool". In this data set, it has huge ratio of missing value(99%) and majority of house have no pool all in general. ``` df["PoolQC"] = df["PoolQC"].fillna("None") ``` ## MiscFeature Data description says NA means "no misc feature" ``` df["MiscFeature"] = df["MiscFeature"].fillna("None") ``` ## Alley Data description says NA means "no alley access" ``` df["Alley"] = df["Alley"].fillna("None") ``` ## Fence ``` df["fence"] = df["Fence"].fillna("None") ``` ## FireplaceQu ``` df["FireplaceQu"] = df["FireplaceQu"].fillna("None") ``` ## LotFrontage The area of each street connected to the house property most likely have a similar area to other houses in its neughborhood. so we can fill in missing values by the median LotFrontage of the neighborhood. ``` df["LotFrontage"] = df.groupby("Neighborhood")["LotFrontage"].transform(lambda x: x.fillna(x.median())) ``` ### GarageType, GarageFinish, GarageQual and GarageCond : Replacing missing data with None ``` for i in ("GarageType", "GarageFinish", "GarageQual", "GarageCond"): df[i] = df[i].fillna("None") ``` ### GarageYrBlt, GarageArea and GarageCars : Replacing missing data with 0 (Since No garage = No cars in such garage.) ``` for i in ("GarageYrBlt", "GarageArea", "GarageCars"): df[i] = df[i].fillna(0) ``` BsmtFinSF1, BsmtFinSF2, BsmtUnfSF, TotalBsmtSF, BsmtFullBath and BsmtHalfBath : missing values are likely zero for having no basement. for i in ("BsmtFinSF1", "BsmtFinSF2", "BsmtUnfSF", "TotalBsmtSF", "BsmtFullBath", "BsmtHalfBath") : df[i] = df[i].fillna(0) #### BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1 and BsmtFinType2 : For all these categorical basement-related features, NaN means that there is no basement. ``` for i in ("BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2"): df[i] = df[i].fillna("None") ``` #### MasVnrArea and MasVnType : NA most likely means no masonry veneer for these houses. We can fill 0 for the area and None for the type. ``` df["MasVnrType"] = df["MasVnrType"].fillna("None") df["MasVnrArea"] = df["MasVnrType"].fillna(0) ``` #### MSZoning(The general zoning classification) : "RL" is by far the most common value. So we can fill in missing values with "RL" ``` df["MSZoning"] = df["MSZoning"].fillna(df["MSZoning"].mode()[0]) ``` #### Utilites : For this categorical feature all records are "AllPub", except for one "NoSeWa" and 2 NA. Since the house with "NoSewa" is in the training set, this feature won't help in predictive modelling. We can then safely remove it. ``` df = df.drop(["Utilities"], axis = 1) ``` #### Functional : Data Description says NA means typical ``` df["Functional"] = df["Functional"].fillna("Typ") ``` #### Electrical : It has one NA value. Since this feature has mostly "SBrkr", we can set that for the missing value ``` df["Electrical"] = df["Electrical"].fillna(df["Electrical"].mode()[0]) ``` #### KitchenQual : Only one NA value, and same as Electrical, we set "TA"(which is the most frequent) for the missing value in KitchenQual. ``` df["KitchenQual"] = df["KitchenQual"].fillna(df["KitchenQual"].mode()[0]) ``` #### Exterior1st and Exterior2nd : Again Both Exterior 1 & 2 have only on emisssing value. We will just substitute in the most common string. ``` df["Exterior1st"] = df["Exterior1st"].fillna(df["Exterior1st"].mode()[0]) df["Exterior2nd"] = df["Exterior2nd"].fillna(df["Exterior2nd"].mode()[0]) ``` #### SaleType : Fill in again with most frequent which is "WD" ``` df["SaleType"] = df["SaleType"].fillna(df["SaleType"].mode()[0]) ``` #### MSSubClass : NA most likely means No building class. We can replace missing values with None ``` df["MSSubClass"] = df["MSSubClass"].fillna("None") categorical_var = df.dtypes[df.dtypes == "object"] ``` ## Transforming Some numerical variables that are really categorical ``` df["OverallCond"].dtype ``` If we take the variable "OverallCond" which rates the overall condition of the house. So value of this coulmn is from 1 to 10. 10 Very Excellent 9 Excellent 8 Very Good 7 Good 6 Above Average 5 Average 4 Below Average 3 Fair 2 Poor 1 Very Poor The datatype of this column is in "int64", but is should be in categorical. ``` #Changing OverallCond into a categorical variable df["OverallCond"] = df["OverallCond"].astype(str) #MSSubClass = The building class df["MSSubClass"] = df["MSSubClass"].apply(str) #Year and month sold are transformed into categorical features. df["YrSold"] = df["YrSold"].astype(str) df["MoSold"] = df["MoSold"].astype(str) ``` ## Label Encoding to some Categorical variables that may contain information in their ordering set ``` from sklearn.preprocessing import LabelEncoder cols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond', 'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1', 'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope', 'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond', 'YrSold', 'MoSold') #Process columns, apply LabelEncoder to categorical features for c in cols: label_ec = LabelEncoder() label_ec.fit(list(df[c].values)) df[c] = label_ec.transform(list(df[c].values))df df.head() df = pd.get_dummies(df) print(df.shape) df.head() ``` # Remove Target Variable ``` x = df.drop("SalePrice", axis=1) ``` # Standardizing input variables ``` from sklearn.preprocessing import StandardScaler import numpy as np #Standardized the dataset sc_x= StandardScaler() x_std = sc_x.fit_transform(x) ``` ### PCA from scratch using pyhon # 1. Computing the mean vector ##### Find the mean for each column The mean vector is often referred to as the centroid and the variance-covariance matrix as the dispersion or dispersion matrix. The mean vector consists of the means of each variable. ``` import numpy as np mean_vec = np.mean(x_std, axis = 0) ``` # 2. Computing the Covariance Matrix ##### Find the covariance among variables The variance-covariance matrix consists of the variances of the variables along the main diagonal and the covariances between each pair of variables in the other matrix positions. Covariance Matrix: it is a symmetric, dxd matrix (where d is the number of features) The diagonal entries of the covariance matrix are the variances and the other entries are the covariances. For this reason, the covariance matrix is sometimes called the _variance-covariance matrix_. ``` cov_mat = (x_std - mean_vec).T.dot((x_std - mean_vec))/ (x_std.shape[0]-1) print(cov_mat) ``` # 3. Computing eigenvectors and corresponding eigenvalues ##### Find eigenvalues and eigenvectors ``` eig_vals, eig_vecs = np.linalg.eig(cov_mat) print(eig_vecs) print(eig_vals) ``` If eigen value is high for eigen vector that means vector has a lot of variance. # 4.Sorting the eigenvectors by decreasing eigenvalues ``` # Make a list of (eigenvalue, eigenvectors) tuples eig_pairs = [[np.abs(eig_vals[i]), eig_vecs[:,i]] for i in range(len(eig_vals))] # Sort the (eigenvalue, eigenvector) tuples from high to low eig_pairs.sort(key=lambda x: x[0], reverse=True) # Visually confirm that the list is correctly sorted by decreasing eigenvalues print("Eigenvalues in descending order:") for i in eig_pairs: print(i[0]) ``` All the eigen values are sorted in an descending order. # 5. Select components based on eigen values we have 229 components from which we have to select components based on eigen value which has high value. Here, filter out eigen values which has above 0.5 ``` pairs = np.array(eig_pairs) count = 0 components = [] for i in range(len(eig_pairs)): if eig_pairs[i][0]>0.5: count += 1 components.append(pairs[i]) print("Number of components:" + str(count)) ``` When we select eigen value as 0.5 or above 0.5, we get 141 components. 0.5 is not a threshold eigen value. we use this value to check the percentage of information the selected components carry. if the selected components gives less information, we select more components and this can be achieved by setting eigen value below 0.5. ### Let's check how much information the selected components contains. ``` #Calculate Explained Variance total = 0 ein = [] for i in range(len(components)): total += components[i][0] ein.append(components[i][0]) #divide eigen value by total eigen value and then multiple with 100 for the selected components var_exp = [(i/ sum(eig_vals))*100 for i in sorted(ein, reverse=True)] cum_var_exp = np.cumsum(var_exp) cum_var_exp ``` We can notice that 141 components has 93.8% information among all components. When we select 141 components we lose 6.2% information. # 6. Select components based on scree plot ``` #import numpy as np #import matplotlib #import matplotlib.pyplot as plt #Make a random array and then make it positive-definite num_vars = len(eig_vals) num_obs = 9 fig = plt.figure(figsize=(8,5)) sing_vals = np.arange(num_vars) + 1 plt.plot(sing_vals, eig_vals, "ro-", linewidth=1) plt.title("Scree Plot") plt.xlabel("Principal Components") plt.ylabel("Eigenvalue") plt.xlim(0,230) plt.show() ``` Scree Plot shows eigen value for each components. starting components has high value and in middle, some component has higher than others. What can we do using scree plot ? We can select components based scree plot. If we select 150 components based on scree plot, we have to check how much information has the selected components. The following plot to shows percentage of variance explained by each of the selected components. ``` #Calculate Explained Variance total = sum(eig_vals) #Divide eigen value by total eigen value and then multiple with 100 for the selected components var_exp = [(i / total)*100 for i in sorted(eig_vals, reverse = True)] cum_var_exp = np.cumsum(var_exp) plt.figure() plt.scatter([150], [cum_var_exp[150]], color='g') plt.plot(cum_var_exp) plt.xlabel("Principal Components") plt.ylabel("Variance (%)") #for each components plt.title("Dataset Explained Variance") plt.show() ``` From the above plot, we can observe that the variance remains constant after the number of components reaches 150 ``` cum_var_exp[150] ``` From the above result, We can see that the selected 150 components has 96% information. Here we lose 4% information. We have to select less component than original variable, at the same time, the selected component must contain as much as information. # 7. Deriving the new Data Set ##### Finally, We select only 150 components. Filter out eigen vector of the selected components. ``` N = 229 M = 150 a = np.ndarray(shape = (N, 0)) for i in range(M): b = eig_pairs[i][1].reshape(N,1) a = np.hstack((a,b)) ``` ### Perform matrix calculation of original dataset with eigen vector of the selected components. ``` a # Projection onto the new feature space Y = x_std.dot(a) Y Y.shape ``` We reduced the number of columns from 221 to 150.
github_jupyter
# ATMOSPHERIC DATA ANALYSIS WITH DASK AND XARRAY In this tutorial, we will provide an introduction to the Xarray and Dask packages and provide examples of how to use them for the analysis of atmospheric data. We have broken the tutorial into topical sections, each with its own Jupyter Notebook. In this notebook, we'll just provide a quick overview of the tutorial and provide some useful links/references. # Summary Dask is a Python-based "flexible parallel computing library for analytic computing." Dask provides a simple interface to parallel data analysis tasks without requiring parallel computing knowledge from the user. Xarray is a Python package that provides an in-memory data model (and operations on that data) consistent with the Common Data Model used by NetCDF. Together, Dask and Xarray provide a parallel-computing platform ideal for analysis of Earth System Science data. Users interact with Xarray data objects without needing to think about the parallel operations being performed by Dask "under the hood." In essence, Dask and Xarray provide a platform that can scale a user's analysis from their laptop to a high-performance computing (HPC) center or the cloud, without needing to change a single line of code! # Outline | Est. Time | Title | Who | Notebook | | ----------| ----- | --- | ---- | | 5 min. | 0.0 - Introduction | Joe Hamman | [link](./1.0.scientific_python_ecosystem.ipynb) | | 10 min. | 1.0 - Scientific Python | Joe Hamman | [link](./1.0.scientific_python_ecosystem.ipynb)| | 10 min. | 1.1 - Xarray - Data Structures | Joe Hamman | [link](./1.1.xarray-data-structures.ipynb) | | 10 min. | 1.2 - Xarray - the Power of Labels | Joe Hamman | [link](./1.2.xarray-the-power-of-labels.ipynb) | | 10 min. | 1.3 - Xarray - Computation Toolkit | Joe Hamman | [link](./1.3.xarray-computation-toolkit.ipynb) | | 10 min. | 1.4 - Xarray - Visualization | Joe Hamman | [link](./1.4.xarray-visualization.ipynb) | | 10 min. | 1.5 - Xarray - Input and Output | Joe Hamman | [link](./1.5.xarray-input-and-output.ipynb) | | -------- | ----------------- Break -----------------| ------------ | ---- | | 5 min. | 2.0 - Dask - Introduction | Kevin Paul | [link](./2.0.intro_to_dask.ipynb) | | 10 min. | 2.1 - Dask - Delayed | Kevin Paul | [link](./2.1.dask_delayed.ipynb) | | 10 min. | 2.2 - Dask - Schedulers | Kevin Paul | [link](./2.2.dask_schedulers.ipynb) | | 10 min. | 2.3 - Dask - Parallelism | Kevin Paul | [link](./2.3.dask_parallelism.ipynb) | | 10 min. | 2.4 - Dask - Profiling and Diagnostics | Kevin Paul | [link](./2.4.dask_profiling_and_diagnostics.ipynb) | | 10 min. | 2.5 - Dask - Distributed | Kevin Paul | [link](./2.5.dask_distributed.ipynb) | | 10 min. | 2.6 - Dask - Arrays | Kevin Paul | [link](./2.6.dask_arrays.ipynb) | | 45 min. | 3.0 - Applications | - | [link](./3.0.applications.ipynb) | # Links - Miniconda: - Xarray Documentation: http://xarray.pydata.org - Dask Documentation: http://dask.pydata.org - Tutorial Contents: https://github.com/pangeo-data/pangeo-tutorial-sea-2018 - Pangeo: https://pangeo-data.github.io **Cheyenne Users:** - Cheyennne Documentation: https://www2.cisl.ucar.edu/resources/computational-systems/cheyenne - Dask-jobqueue: https://github.com/dask/dask-jobqueue **Google Cloud Pangeo Users:** - JupyterHub: http://pangeo.pydata.org
github_jupyter
### continuing where I left in the `sckitlearnNB.ipynb` notebook. Will be creating a voting system to increase our accuracy in predicting ``` import nltk import random from nltk.corpus import movie_reviews import pickle from nltk.classify import ClassifierI from statistics import mode ## defing the voteclassifier class class VoteClassifier(ClassifierI): def __init__(self, *classifiers): self._classifiers = classifiers def classify(self, features): votes = [] for c in self._classifiers: v = c.classify(features) votes.append(v) return mode(votes) def confidence(self, features): votes = [] for c in self._classifiers: v = c.classify(features) votes.append(v) choice_votes = votes.count(mode(votes)) conf = choice_votes / len(votes) return conf # pickle_obj = open("documents.pickle", "wb") documents = [(list(movie_reviews.words(fileid)), category) for category in movie_reviews.categories() for fileid in movie_reviews.fileids(category)] # pickle.dump(documents, pickle_obj) # pickle_obj.close() # pickle_obj = open("documents.pickle", "rb") # documents = pickle.load(pickle_obj) # pickle_obj.close() random.shuffle(documents) all_words = [] for w in movie_reviews.words(): all_words.append(w.lower()) all_words = nltk.FreqDist(all_words) word_features = list(all_words.keys())[:3000] def find_features(document): words = set(document) features = {} for w in word_features: features[w] = (w in words) return features #print((find_features(movie_reviews.words('neg/cv000_29416.txt')))) featuresets = [(find_features(rev), category) for (rev, category) in documents] training_set = featuresets[:1900] testing_set = featuresets[1900:] ``` ### Loading all the classifiers from their respective pickle files ``` original_nb = open("naive_bayes.pickle", "rb") naive_bayes_classifier = pickle.load(original_nb) original_nb.close() pickle_file = open("MNB_pickle.pickle", "rb") MNB_classifier = pickle.load(pickle_file) pickle_file.close() pickle_file = open("BNB_pickle.pickle", "rb") BernoulliNB_classifier = pickle.load(pickle_file) pickle_file.close() pickle_file = open("LogisticRegression.pickle", "rb") LogisticRegression_classifier = pickle.load(pickle_file) pickle_file.close() pickle_file = open("SGDClassifier.pickle", "rb") SGDClassifier_classifier = pickle.load(pickle_file) pickle_file.close() pickle_file = open("LinearSVC.pickle", "rb") LinearSVC_classifier = pickle.load(pickle_file) pickle_file.close() pickle_file = open("NuSVC_classifier.pickle", "rb") NuSVC_classifier = pickle.load(pickle_file) pickle_file.close() print("naive bayes: ", (nltk.classify.accuracy(naive_bayes_classifier, testing_set))*100) print("MNB_classifier: ", (nltk.classify.accuracy(MNB_classifier, testing_set))*100) print("BernoulliNB_classifier: ", (nltk.classify.accuracy(BernoulliNB_classifier, testing_set))*100) print("LogisticRegression_classifier: ", (nltk.classify.accuracy(LogisticRegression_classifier, testing_set))*100) print("SGDClassifier_classifier: ", (nltk.classify.accuracy(SGDClassifier_classifier, testing_set))*100) print("LinearSVC_classifier: ", (nltk.classify.accuracy(LinearSVC_classifier, testing_set))*100) print("NuSVC_classifier: ", (nltk.classify.accuracy(NuSVC_classifier, testing_set))*100) ``` ## passing it voting class function ``` voted_classifier = VoteClassifier( naive_bayes_classifier, MNB_classifier, BernoulliNB_classifier, LogisticRegression_classifier, SGDClassifier_classifier, LinearSVC_classifier, NuSVC_classifier ) print("Voted classifier accuracy : ", (nltk.classify.accuracy(voted_classifier, testing_set))*100) print("Classification:", voted_classifier.classify(testing_set[0][0]), "Confidence %:",voted_classifier.confidence(testing_set[0][0])*100) print("Classification:", voted_classifier.classify(testing_set[1][0]), "Confidence %:",voted_classifier.confidence(testing_set[1][0])*100) print("Classification:", voted_classifier.classify(testing_set[2][0]), "Confidence %:",voted_classifier.confidence(testing_set[2][0])*100) print("Classification:", voted_classifier.classify(testing_set[3][0]), "Confidence %:",voted_classifier.confidence(testing_set[3][0])*100) print("Classification:", voted_classifier.classify(testing_set[4][0]), "Confidence %:",voted_classifier.confidence(testing_set[4][0])*100) print("Classification:", voted_classifier.classify(testing_set[5][0]), "Confidence %:",voted_classifier.confidence(testing_set[5][0])*100) ```
github_jupyter
# Deep Reinforcement Learning using AlphaZero methodology Please see https://applied-data.science/blog/how-to-build-your-own-alphazero-ai-using-python-and-keras/ for further notes on the codebase ## 1. First load the core libraries ``` # -*- coding: utf-8 -*- # %matplotlib inline import numpy as np np.set_printoptions(suppress=True) from shutil import copyfile import random from keras.utils import plot_model from game import Game, GameState from agent import Agent from memory import Memory from model import Residual_CNN from funcs import playMatches, playMatchesBetweenVersions import loggers as lg from settings import run_folder, run_archive_folder import initialise import pickle ``` ## 2. Now run this block to start the learning process This block loops for ever, continually learning from new game data. The current best model and memories are saved in the run folder so you can kill the process and restart from the last checkpoint. ``` lg.logger_main.info('=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*') lg.logger_main.info('=*=*=*=*=*=. NEW LOG =*=*=*=*=*') lg.logger_main.info('=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*') env = Game() # If loading an existing neural network, copy the config file to root if initialise.INITIAL_RUN_NUMBER != None: copyfile(run_archive_folder + env.name + '/run' + str(initialise.INITIAL_RUN_NUMBER).zfill(4) + '/config.py', './config.py') import config ######## LOAD MEMORIES IF NECESSARY ######## if initialise.INITIAL_MEMORY_VERSION == None: memory = Memory(config.MEMORY_SIZE) else: print('LOADING MEMORY VERSION ' + str(initialise.INITIAL_MEMORY_VERSION) + '...') memory = pickle.load( open( run_archive_folder + env.name + '/run' + str(initialise.INITIAL_RUN_NUMBER).zfill(4) + "/memory/memory" + str(initialise.INITIAL_MEMORY_VERSION).zfill(4) + ".p", "rb" ) ) ######## LOAD MODEL IF NECESSARY ######## # create an untrained neural network objects from the config file current_NN = Residual_CNN(config.REG_CONST, config.LEARNING_RATE, (2,) + env.grid_shape, env.action_size, config.HIDDEN_CNN_LAYERS) best_NN = Residual_CNN(config.REG_CONST, config.LEARNING_RATE, (2,) + env.grid_shape, env.action_size, config.HIDDEN_CNN_LAYERS) #If loading an existing neural netwrok, set the weights from that model if initialise.INITIAL_MODEL_VERSION != None: best_player_version = initialise.INITIAL_MODEL_VERSION print('LOADING MODEL VERSION ' + str(initialise.INITIAL_MODEL_VERSION) + '...') m_tmp = best_NN.read(env.name, initialise.INITIAL_RUN_NUMBER, best_player_version) current_NN.model.set_weights(m_tmp.get_weights()) best_NN.model.set_weights(m_tmp.get_weights()) #otherwise just ensure the weights on the two players are the same else: best_player_version = 0 best_NN.model.set_weights(current_NN.model.get_weights()) #copy the config file to the run folder copyfile('./config.py', run_folder + 'config.py') plot_model(current_NN.model, to_file=run_folder + 'models/model.png', show_shapes = True) print('\n') ######## CREATE THE PLAYERS ######## current_player = Agent('current_player', env.state_size, env.action_size, config.MCTS_SIMS, config.CPUCT, current_NN) best_player = Agent('best_player', env.state_size, env.action_size, config.MCTS_SIMS, config.CPUCT, best_NN) #user_player = User('player1', env.state_size, env.action_size) iteration = 0 while 1: iteration += 1 reload(lg) reload(config) print('ITERATION NUMBER ' + str(iteration)) lg.logger_main.info('BEST PLAYER VERSION: %d', best_player_version) print('BEST PLAYER VERSION ' + str(best_player_version)) ######## SELF PLAY ######## print('SELF PLAYING ' + str(config.EPISODES) + ' EPISODES...') _, memory, _, _ = playMatches(best_player, best_player, config.EPISODES, lg.logger_main, turns_until_tau0 = config.TURNS_UNTIL_TAU0, memory = memory) print('\n') memory.clear_stmemory() if len(memory.ltmemory) >= config.MEMORY_SIZE: ######## RETRAINING ######## print('RETRAINING...') current_player.replay(memory.ltmemory) print('') if iteration % 5 == 0: pickle.dump( memory, open( run_folder + "memory/memory" + iteration + ".p", "wb" ) ) lg.logger_memory.info('====================') lg.logger_memory.info('NEW MEMORIES') lg.logger_memory.info('====================') memory_samp = random.sample(memory.ltmemory, min(1000, len(memory.ltmemory))) for s in memory_samp: current_value, current_probs, _ = current_player.get_preds(s['state']) best_value, best_probs, _ = best_player.get_preds(s['state']) lg.logger_memory.info('MCTS VALUE FOR %s: %f', s['playerTurn'], s['value']) lg.logger_memory.info('CUR PRED VALUE FOR %s: %f', s['playerTurn'], current_value) lg.logger_memory.info('BES PRED VALUE FOR %s: %f', s['playerTurn'], best_value) lg.logger_memory.info('THE MCTS ACTION VALUES: %s', ['%.2f' % elem for elem in s['AV']] ) lg.logger_memory.info('CUR PRED ACTION VALUES: %s', ['%.2f' % elem for elem in current_probs]) lg.logger_memory.info('BES PRED ACTION VALUES: %s', ['%.2f' % elem for elem in best_probs]) lg.logger_memory.info('ID: %s', s['state'].id) lg.logger_memory.info('INPUT TO MODEL: %s', current_player.model.convertToModelInput(s['state'])) s['state'].render(lg.logger_memory) ######## TOURNAMENT ######## print('TOURNAMENT...') scores, _, points, sp_scores = playMatches(best_player, current_player, config.EVAL_EPISODES, lg.logger_tourney, turns_until_tau0 = 0, memory = None) print('\nSCORES') print(scores) print('\nSTARTING PLAYER / NON-STARTING PLAYER SCORES') print(sp_scores) #print(points) print('\n\n') if scores['current_player'] > scores['best_player'] * config.SCORING_THRESHOLD: best_player_version = best_player_version + 1 best_NN.model.set_weights(current_NN.model.get_weights()) best_NN.write(env.name, best_player_version) else: print('MEMORY SIZE: ' + str(len(memory.ltmemory))) ``` ## The following panels are not involved in the learning process ### Play matches between versions (use -1 for human player) ``` from game import Game from funcs import playMatchesBetweenVersions import loggers as lg env = Game() playMatchesBetweenVersions(env, 1, 1, 1, 10, lg.logger_tourney, 0) ``` ### Pass a particular game state through the neural network (setup below for Connect4) ``` gs = GameState(np.array([ 0,0,0,0,0,0,0, 0,0,0,0,0,0,0, 0,0,0,0,0,0,0, 0,0,0,0,0,0,0, 0,0,0,0,0,0,0, 0,0,0,0,0,0,0 ]), 1) preds = current_player.get_preds(gs) print(preds) ``` ### See the layers of the current neural network ``` current_player.model.viewLayers() ``` ### Output a diagram of the neural network architecture ``` from keras.utils import plot_model plot_model(current_NN.model, to_file=run_folder + 'models/model.png', show_shapes = True) ```
github_jupyter
In this lesson, I'll be talking about **imports** in Python, giving some tips for working with unfamiliar libraries (and the objects they return), and digging into the guts of Python just a bit to talk about **operator overloading**. ## Imports So far we've talked about types and functions which are built-in to the language. But one of the best things about Python (especially if you're a data scientist) is the vast number of high-quality custom libraries that have been written for it. Some of these libraries are in the "standard library", meaning you can find them anywhere you run Python. Others libraries can be easily added, even if they aren't always shipped with Python. Either way, we'll access this code with **imports**. We'll start our example by importing `math` from the standard library. ``` import math print("It's math! It has type {}".format(type(math))) ``` `math` is a module. A module is just a collection of variables (a *namespace*, if you like) defined by someone else. We can see all the names in `math` using the built-in function `dir()`. ``` print(dir(math)) ``` We can access these variables using dot syntax. Some of them refer to simple values, like `math.pi`: ``` print("pi to 4 significant digits = {:.4}".format(math.pi)) ``` But most of what we'll find in the module are functions, like `math.log`: ``` math.log(32, 2) ``` Of course, if we don't know what `math.log` does, we can call `help()` on it: ``` help(math.log) ``` We can also call `help()` on the module itself. This will give us the combined documentation for *all* the functions and values in the module (as well as a high-level description of the module). Click the "output" button to see the whole `math` help page. ``` help(math) ``` ### Other import syntax If we know we'll be using functions in `math` frequently we can import it under a shorter alias to save some typing (though in this case "math" is already pretty short). ``` import math as mt mt.pi ``` > You may have seen code that does this with certain popular libraries like Pandas, Numpy, Tensorflow, or Matplotlib. For example, it's a common convention to `import numpy as np` and `import pandas as pd`. The `as` simply renames the imported module. It's equivalent to doing something like: ``` import math mt = math ``` Wouldn't it be great if we could refer to all the variables in the `math` module by themselves? i.e. if we could just refer to `pi` instead of `math.pi` or `mt.pi`? Good news: we can do that. ``` from math import * print(pi, log(32, 2)) ``` `import *` makes all the module's variables directly accessible to you (without any dotted prefix). Bad news: some purists might grumble at you for doing this. Worse: they kind of have a point. ``` from math import * from numpy import * print(pi, log(32, 2)) ``` What the what? But it worked before! These kinds of "star imports" can occasionally lead to weird, difficult-to-debug situations. The problem in this case is that the `math` and `numpy` modules both have functions called `log`, but they have different semantics. Because we import from `numpy` second, its `log` overwrites (or "shadows") the `log` variable we imported from `math`. A good compromise is to import only the specific things we'll need from each module: ``` from math import log, pi from numpy import asarray ``` ### Submodules We've seen that modules contain variables which can refer to functions or values. Something to be aware of is that they can also have variables referring to *other modules*. ``` import numpy print("numpy.random is a", type(numpy.random)) print("it contains names such as...", dir(numpy.random)[-15:] ) ``` So if we import `numpy` as above, then calling a function in the `random` "submodule" will require *two* dots. ``` # Roll 10 dice rolls = numpy.random.randint(low=1, high=6, size=10) rolls ``` # Oh the places you'll go, oh the objects you'll see So after 6 lessons, you're a pro with ints, floats, bools, lists, strings, and dicts (right?). Even if that were true, it doesn't end there. As you work with various libraries for specialized tasks, you'll find that they define their own types which you'll have to learn to work with. For example, if you work with the graphing library `matplotlib`, you'll be coming into contact with objects it defines which represent Subplots, Figures, TickMarks, and Annotations. `pandas` functions will give you DataFrames and Series. In this section, I want to share with you a quick survival guide for working with strange types. ## Three tools for understanding strange objects In the cell above, we saw that calling a `numpy` function gave us an "array". We've never seen anything like this before (not in this course anyways). But don't panic: we have three familiar builtin functions to help us here. **1: `type()`** (what is this thing?) ``` type(rolls) ``` **2: `dir()`** (what can I do with it?) ``` print(dir(rolls)) # What am I trying to do with this dice roll data? Maybe I want the average roll, in which case the "mean" # method looks promising... rolls.mean() # Or maybe I just want to get back on familiar ground, in which case I might want to check out "tolist" rolls.tolist() ``` **3: `help()`** (tell me more) ``` # That "ravel" attribute sounds interesting. I'm a big classical music fan. help(rolls.ravel) # Okay, just tell me everything there is to know about numpy.ndarray # (Click the "output" button to see the novel-length output) help(rolls) ``` (Of course, you might also prefer to check out [the online docs](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.ndarray.html)) ### Operator overloading What's the value of the below expression? ``` [3, 4, 1, 2, 2, 1] + 10 ``` What a silly question. Of course it's an error. But what about... ``` rolls + 10 ``` We might think that Python strictly polices how pieces of its core syntax behave such as `+`, `<`, `in`, `==`, or square brackets for indexing and slicing. But in fact, it takes a very hands-off approach. When you define a new type, you can choose how addition works for it, or what it means for an object of that type to be equal to something else. The designers of lists decided that adding them to numbers wasn't allowed. The designers of `numpy` arrays went a different way (adding the number to each element of the array). Here are a few more examples of how `numpy` arrays interact unexpectedly with Python operators (or at least differently from lists). ``` # At which indices are the dice less than or equal to 3? rolls <= 3 xlist = [[1,2,3],[2,4,6],] # Create a 2-dimensional array x = numpy.asarray(xlist) print("xlist = {}\nx =\n{}".format(xlist, x)) # Get the last element of the second row of our numpy array x[1,-1] # Get the last element of the second sublist of our nested list? xlist[1,-1] ``` numpy's `ndarray` type is specialized for working with multi-dimensional data, so it defines its own logic for indexing, allowing us to index by a tuple to specify the index at each dimension. ### When does 1 + 1 not equal 2? Things can get weirder than this. You may have heard of (or even used) tensorflow, a Python library popularly used for deep learning. It makes extensive use of operator overloading. ``` import tensorflow as tf # Create two constants, each with value 1 a = tf.constant(1) b = tf.constant(1) # Add them together to get... a + b ``` `a + b` isn't 2, it is (to quote tensorflow's documentation)... > a symbolic handle to one of the outputs of an `Operation`. It does not hold the values of that operation's output, but instead provides a means of computing those values in a TensorFlow `tf.Session`. It's important just to be aware of the fact that this sort of thing is possible and that libraries will often use operator overloading in non-obvious or magical-seeming ways. Understanding how Python's operators work when applied to ints, strings, and lists is no guarantee that you'll be able to immediately understand what they do when applied to a tensorflow `Tensor`, or a numpy `ndarray`, or a pandas `DataFrame`. Once you've had a little taste of DataFrames, for example, an expression like the one below starts to look appealingly intuitive: ```python # Get the rows with population over 1m in South America df[(df['population'] > 10**6) & (df['continent'] == 'South America')] ``` But why does it work? The example above features something like **5** different overloaded operators. What's each of those operations doing? It can help to know the answer when things start going wrong. #### Curious how it all works? Have you ever called `help()` or `dir()` on an object and wondered what the heck all those names with the double-underscores were? ``` print(dir(list)) ``` This turns out to be directly related to operator overloading. When Python programmers want to define how operators behave on their types, they do so by implementing methods with special names beginning and ending with 2 underscores such as `__lt__`, `__setattr__`, or `__contains__`. Generally, names that follow this double-underscore format have a special meaning to Python. So, for example, the expression `x in [1, 2, 3]` is actually calling the list method `__contains__` behind-the-scenes. It's equivalent to (the much uglier) `[1, 2, 3].__contains__(x)`. If you're curious to learn more, you can check out [Python's official documentation](https://docs.python.org/3.4/reference/datamodel.html#special-method-names), which describes many, many more of these special "underscores" methods. We won't be defining our own types in these lessons (if only there was time!), but I hope you'll get to experience the joys of defining your own wonderful, weird types later down the road. # Your turn! Head over to **[the final coding exercise](https://www.kaggle.com/kernels/fork/1275190)** for one more round of coding questions involving imports, working with unfamiliar objects, and, of course, more gambling. --- *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161283) to chat with other Learners.*
github_jupyter
**This notebook is an exercise in the [Data Cleaning](https://www.kaggle.com/learn/data-cleaning) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/parsing-dates).** --- In this exercise, you'll apply what you learned in the **Parsing dates** tutorial. # Setup The questions below will give you feedback on your work. Run the following cell to set up the feedback system. ``` from learntools.core import binder binder.bind(globals()) from learntools.data_cleaning.ex3 import * print("Setup Complete") ``` # Get our environment set up The first thing we'll need to do is load in the libraries and dataset we'll be using. We'll be working with a dataset containing information on earthquakes that occured between 1965 and 2016. ``` # modules we'll use import pandas as pd import numpy as np import seaborn as sns import datetime # read in our data earthquakes = pd.read_csv("../input/earthquake-database/database.csv") # set seed for reproducibility np.random.seed(0) ``` # 1) Check the data type of our date column You'll be working with the "Date" column from the `earthquakes` dataframe. Investigate this column now: does it look like it contains dates? What is the dtype of the column? ``` # TODO: Your code here! earthquakes['Date'].head() ``` Once you have answered the question above, run the code cell below to get credit for your work. ``` # Check your answer (Run this code cell to receive credit!) q1.check() # Line below will give you a hint #q1.hint() ``` # 2) Convert our date columns to datetime Most of the entries in the "Date" column follow the same format: "month/day/four-digit year". However, the entry at index 3378 follows a completely different pattern. Run the code cell below to see this. ``` earthquakes[3378:3383] ``` This does appear to be an issue with data entry: ideally, all entries in the column have the same format. We can get an idea of how widespread this issue is by checking the length of each entry in the "Date" column. ``` date_lengths = earthquakes.Date.str.len() date_lengths.value_counts() ``` Looks like there are two more rows that has a date in a different format. Run the code cell below to obtain the indices corresponding to those rows and print the data. ``` indices = np.where([date_lengths == 24])[1] print('Indices with corrupted data:', indices) earthquakes.loc[indices] ``` Given all of this information, it's your turn to create a new column "date_parsed" in the `earthquakes` dataset that has correctly parsed dates in it. **Note**: When completing this problem, you are allowed to (but are not required to) amend the entries in the "Date" and "Time" columns. Do not remove any rows from the dataset. ``` # TODO: Your code here earthquakes.loc[3378, "Date"] = "02/23/1975" earthquakes.loc[7512, "Date"] = "04/28/1985" earthquakes.loc[20650, "Date"] = "03/13/2011" earthquakes['date_parsed'] = pd.to_datetime(earthquakes['Date'], infer_datetime_format=True) # Check your answer q2.check() # Lines below will give you a hint or solution code q2.hint() q2.solution() ``` # 3) Select the day of the month Create a Pandas Series `day_of_month_earthquakes` containing the day of the month from the "date_parsed" column. ``` # try to get the day of the month from the date column day_of_month_earthquakes = earthquakes['date_parsed'].dt.day # Check your answer q3.check() # Lines below will give you a hint or solution code #q3.hint() #q3.solution() ``` # 4) Plot the day of the month to check the date parsing Plot the days of the month from your earthquake dataset. ``` # TODO: Your code here! day_of_month_earthquakes = day_of_month_earthquakes.dropna() # plot the day of the month sns.distplot(day_of_month_earthquakes, kde=False, bins=31) ``` Does the graph make sense to you? ``` # Check your answer (Run this code cell to receive credit!) q4.check() # Line below will give you a hint #q4.hint() ``` # (Optional) Bonus Challenge For an extra challenge, you'll work with a [Smithsonian dataset](https://www.kaggle.com/smithsonian/volcanic-eruptions) that documents Earth's volcanoes and their eruptive history over the past 10,000 years Run the next code cell to load the data. ``` volcanos = pd.read_csv("../input/volcanic-eruptions/database.csv") ``` Try parsing the column "Last Known Eruption" from the `volcanos` dataframe. This column contains a mixture of text ("Unknown") and years both before the common era (BCE, also known as BC) and in the common era (CE, also known as AD). ``` volcanos['Last Known Eruption'].sample(5) ``` # (Optional) More practice If you're interested in graphing time series, [check out this tutorial](https://www.kaggle.com/residentmario/time-series-plotting-optional). You can also look into passing columns that you know have dates in them the `parse_dates` argument in `read_csv`. (The documention [is here](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html).) Do note that this method can be very slow, but depending on your needs it may sometimes be handy to use. # Keep going In the next lesson, learn how to [**work with character encodings**](https://www.kaggle.com/alexisbcook/character-encodings). --- *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/172650) to chat with other Learners.*
github_jupyter
# Data Preprocessing :label:`sec_pandas` So far we have introduced a variety of techniques for manipulating data that are already stored in tensors. To apply deep learning to solving real-world problems, we often begin with preprocessing raw data, rather than those nicely prepared data in the tensor format. Among popular data analytic tools in Python, the `pandas` package is commonly used. Like many other extension packages in the vast ecosystem of Python, `pandas` can work together with tensors. So, we will briefly walk through steps for preprocessing raw data with `pandas` and converting them into the tensor format. We will cover more data preprocessing techniques in later chapters. ## Reading the Dataset As an example, we begin by creating an artificial dataset that is stored in a csv (comma-separated values) file `../data/house_tiny.csv`. Data stored in other formats may be processed in similar ways. Below we write the dataset row by row into a csv file. ``` import os os.makedirs(os.path.join('..', 'data'), exist_ok=True) data_file = os.path.join('..', 'data', 'house_tiny.csv') with open(data_file, 'w') as f: f.write('NumRooms,Alley,Price\n') # Column names f.write('NA,Pave,127500\n') # Each row represents a data example f.write('2,NA,106000\n') f.write('4,NA,178100\n') f.write('NA,NA,140000\n') ``` To load the raw dataset from the created csv file, we import the `pandas` package and invoke the `read_csv` function. This dataset has four rows and three columns, where each row describes the number of rooms ("NumRooms"), the alley type ("Alley"), and the price ("Price") of a house. ``` # If pandas is not installed, just uncomment the following line: # !pip install pandas import pandas as pd data = pd.read_csv(data_file) print(data) ``` ## Handling Missing Data Note that "NaN" entries are missing values. To handle missing data, typical methods include *imputation* and *deletion*, where imputation replaces missing values with substituted ones, while deletion ignores missing values. Here we will consider imputation. By integer-location based indexing (`iloc`), we split `data` into `inputs` and `outputs`, where the former takes the first two columns while the latter only keeps the last column. For numerical values in `inputs` that are missing, we replace the "NaN" entries with the mean value of the same column. ``` inputs, outputs = data.iloc[:, 0:2], data.iloc[:, 2] inputs = inputs.fillna(inputs.mean()) print(inputs) ``` For categorical or discrete values in `inputs`, we consider "NaN" as a category. Since the "Alley" column only takes two types of categorical values "Pave" and "NaN", `pandas` can automatically convert this column to two columns "Alley_Pave" and "Alley_nan". A row whose alley type is "Pave" will set values of "Alley_Pave" and "Alley_nan" to 1 and 0. A row with a missing alley type will set their values to 0 and 1. ``` inputs = pd.get_dummies(inputs, dummy_na=True) print(inputs) ``` ## Conversion to the Tensor Format Now that all the entries in `inputs` and `outputs` are numerical, they can be converted to the tensor format. Once data are in this format, they can be further manipulated with those tensor functionalities that we have introduced in :numref:`sec_ndarray`. ``` import tensorflow as tf X, y = tf.constant(inputs.values), tf.constant(outputs.values) X, y ``` ## Summary * Like many other extension packages in the vast ecosystem of Python, `pandas` can work together with tensors. * Imputation and deletion can be used to handle missing data. ## Exercises Create a raw dataset with more rows and columns. 1. Delete the column with the most missing values. 2. Convert the preprocessed dataset to the tensor format. [Discussions](https://discuss.d2l.ai/t/195)
github_jupyter
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 # Gremlin examples using the air-routes data set ![image.png](attachment:image.png) ## Introduction In this Notebook we examine various Gremlin queries that explore a property graph that contains a model of the worldwide air route network. There are vertices for airports, countries and continents. There are edges between airports and between the countries,continents and airports. Each airport has a set of properties and the edges between airports have a property that represent the distance in miles. You can find the data set along with many other samples of code and data at the following GitHub repository: https://github.com/krlawrence/graph ### Let's check our configuration and Neptune cluster status Before we start running any queries let's first of all make sure we are connected to Neptune and that the cluster is ready. Note that `%graph_notebook_config` will return the current configuration of your notebook. If, for example, you wanted to connect to a different Amazon Neptune endpoint you could create a cell with `%%graph_notebook_config` at the top, paste the results from running `%graph_notebook_config` into it, make the required edits and run the cell. Commands that start with a single percent sign are known as **_line magics_** and commands that start with two percent signs are known as **_cell magics_**. ``` %graph_notebook_version %graph_notebook_config %status ``` ### Using a different endpoint You can change the endpoint that this notebook is connected to at any time using the `%graph_notebook_host` or `%%graph_notebook_config` commands. Edit and run the cell below whenever you need to change the endpoint you are connected to. ``` %graph_notebook_host your-hostname-here ``` ## Loading data ### Now let's load some property graph data. The cell below makes it easy to load the air-routes data into your Neptune cluster in a form compatible with Gremlin (property graph) queries. When you run the cell you will be prompted to select a `Data model` and a `Data set`. Select `Property_Graph` and `airports` respectively. **You only need to do this once**. If you have already loaded the data previously you do not need to load it again. ``` %seed ``` ## Seeing a visual representation of your query The results of any Gremlin query that returns a `path` can be explored visually. When such queries are run you will see a `Graph` tab in the query results area alongside the `Console` tab. Specific visualization features will be explained using examples throughout this notebook. As Gremlin queries allow for `by` modulators to be used to modify the representation of `path` results, there are some rules concerning how results will be rendered visually. These rules are worth remembering. The default behavior for vertex and edge `path` results that are not modified using `by` modulators is to use their labels to annotate the visualization. In some cases, the Neptune Notebook visualizer can figure out for itself whether an element in a `path` result represents a vertex or an edge and, in some cases, the direction the edge follows. Two simple examples of such queries are shown below. As the first query does not contain any edge information in the `path` result the edge direction cannot be determined automatically by the visualizer. `g.V().hasLabel('airport').out().path().limit(5)` In this case, the edge direction can be determined as there is an `outE` step included in the query. `g.V().hasLabel('airport').outE().inV().path().limit(5)` When no `by` modulators are provided, the visualizer will use the vertex and edge labels to annotate the elements of the diagram. However, when `by` modulators are used, it is not possible for the visualizer in all cases to decide on its own which path elements represent a vertex and which represent an edge. Here is an example of such a query: `g.V().hasLabel('airport').outE().inV().path().by('code').by('dist').limit(5)` It is also not always possible for the visualizer to decide which direction an edge follows. In this case and in the case where `by` modulators are used, the visualizer allows for some special hints to be provided to assist in producing the desired diagram. ### Query visualization hints You can specify query visualization hints using either `-p` or `--path-pattern` after the `%%gremin` cell "magic". The syntax in general is: `%%gremlin -p | --path-pattern <comma separated hints>` The names of the hints reflect the Gremlin steps most commonly used when traversing between vertices and behave accordingly. The hints used should match the corresponding Gremlin steps used in the query. The hints used can be any combination of those shown in the list below, separated by commas. The list must not contain any spaces between the commas. - v - inv - outv - e - ine - oute We can provide visualization hints for the query shown earlier as follows: `%%gremlin -p v,oute,inv` `g.V().hasLabel('airport').outE().inV().path().by('code').by('dist').limit(5)` You will find that query in the cell below. Try running it with and without the hints present and observe the differences to the visualization by selecting the `Graph` tab. Notice that without the hint the visualizer is unable to determine if the `dist` property relates to a vertex or an edge and therefore defaults to using a vertex. ### A note about `elementMap` Starting with the 2.1.5 release of the graph-notebook project, the `elementMap` step now receives special processing that reduces the need for path query hints. If you have a query of the form: `g.V().has('code','SAF').outE().inV().path().by(elementMap())` there is no need to provide additional `-p` hints as an `elementMap` result includes information about the direction of an edge and the vertices it is connected to. This notebook still contains examples that use `-p` as there remain cases where it is useful. However updates have been made to include queries that take advantage of `elementMap`. There are also some examples that use `valueMap` as well. ### Controlling how vertex and edge labels are drawn All of these settings are optional. By default, vertices in the visualization display their label text if it is available and not overridden by the query and visualization options. The parameter `$my_mapping` refers to a Python variable that you create in a different cell that provides a mapping from a vertex label to the name of a property you prefer be used to label that vertex in the visualization. For example `my_mapping = '{"airport":"city","country":"code"}` This can then be used as follows: `%%gremlin -d $my_mapping` or `%%gremlin --display-property $my_mapping` Similarly, the `-de` or `--edge_display_property` options can be used to specify how to label edges. In cases where each vertex will be labelled usding a single property name the -d option does not require a map. The property name is all that is needed. The same is true for edges and the -de option. `%%gremlin -d city -de dist` In order for the `-d` and `de` options to take effect, the path results need to contain key/value pair maps. This can be acieved using Gremlin steps such as: - `path().by(ValueMap(true))` - `path().by(valueMap().with(WithOptions.tokens))` - `path().by(elementMap())` By default labels shown in the visualization are truncated at 10 characters and the last 3 of the ten are replaced by an ellipsis. You can override this setting using the `-l` or `--label-max-length` option. For example: `%%gremlin -l 20` Vertices in the visualization are colored (grouped) using their labels by default. You can change this using the `-g` option. For example, to group nodes by the `country` property: `%%gremlin -g country` Grouping can be completely disabled using the `--ignore-groups` option. ### Adjusting the visualization layout and other settings You can further adjust many of the visualization settings using the two commands - `%graph_notebook_vis_options` - `%%graph_notebook_vis_options` It is also possible to control the way that vertices are grouped with each group having a different color. This is done using the `-g` or `group-by` option. There are examples of how to use these settings and links to additional documentation near [the bottom of this notebook](#Changing-the-visualization-settings). ## Capturing results and variable substitution The results from any query can be captured in a variable using the `--store-to` option. For example: ``` %%gremlin --store-to res g.V().has('code','AUS').values('city') ``` The variable `res` can then be passed to Python code running in another cell, for example: ``` print(res[0]) Austin ``` Results from other queries, or simply Python variables created in other cells can be substituted back into queries using the `${variable_name}` formulation. For example in one cell, run the following: ``` city = res[0] ``` and then in the next cell run this query: ``` %%gremlin g.V().has('city','${res}').values('code') AUS ``` ### Getting help You can get help at any time using the `--help` option as follows (the cell body needs at least one character in it for `--help` to work). ``` %%gremlin --help x ``` ### A simple example Run the two cells below to see some of the visualization hints at work. The hints are demonstrated throughout this notebook. ``` my_node_labels = '{"airport":"city"}' my_edge_labels = '{"route":"dist"}' %%gremlin -p v,oute,inv -d $my_node_labels -l 20 -g country -de $my_edge_labels g.V().has('code','LHR').outE().inV().path().by(valueMap(true)).limit(5) ``` We can rewrite the query to generate a path populated by `elementMap` results. We no longer need to specify any `-p` hints in this case as the path contains all the information needed to produce a correct visualization.. ``` %%gremlin -d $my_node_labels -l 20 -g country -de $my_edge_labels g.V().has('code','LHR').outE().inV().path().by(elementMap()).limit(5) ``` In cases where each vertex will be labelled usding a single property name the `-d` options does not require a map. The property name is all that is needed. The same is true for edges and the `-de` option. ``` %%gremlin -d city -l 20 -g country -de dist g.V().has('code','LHR').outE().inV().path().by(elementMap()).limit(5) ``` ## Example Gremlin Queries The notebook cells that follow contain various queries that showcase both features of the Gremlin query language and features of the Amazon Neptune Notebooks. ### Let's find out a bit about the vertices and edges in the graph Each vertex has a label that essentially gives it a type. ``` %%gremlin g.V().groupCount().by(label).unfold() ``` As with vertices, every edge has a label. ``` %%gremlin g.E().groupCount().by(label).unfold() ``` ### Finding all routes from Cozumel (CZM) Try running the query below and clicking on the Graph tab to see the route map for flights from Cozumel. As `by` modulators are used to return the airport code, the resultant path that you can see under the Console tab, does not have any information in it that can be used to figure out which direction the path is in. To help with the visualization provided under the Graph tab, a hint can be provided to tell the notebook how you would like the edges to be drawn and where you want the arrows placed. In this case a hint of `-p v,inv` is used. This informs the rendering algorithm that the path has an initial vertex that is connected to other vertices by outgoing edges. The hint notation mirrors that used by the corresponding Gremlin steps hence `v,inv` means there is a starting vertex connected by outgoing edges to other vertices (in Gremlin terminology an incoming vertex is what you find at the end of an outgoing edge). If you remove the hint and run the query again you will see that the arrows disappear as there is not enough information in the path result to determine the direction of the edges. To specify this hint you can either use `-p` or `--path-pattern`. The hint is only applicable in cases where the result generated is a `path`. Only queries that return a `path` will cause a visual rendering to be offered. ``` %%gremlin -p v,inv g.V().hasLabel('airport').has('code','CZM').out('route').path().by('code') ``` We can also use a shorthand form of the `has` step to achieve the same results as the prior query. ``` %%gremlin -p v,inv g.V().has('airport','code','CZM').out('route').path().by('code') ``` Let's run the same query but this time return the city name ``` %%gremlin -p v,inv g.V().has('airport','code','CZM').out('route').path().by('city') ``` ### Using the details view ![image.png](attachment:image.png) Run the query below, select the Graph tab, select a node in the graph and click on the details button (the one with three .- one above the other). As you select different vertices the details view window will update. This is useful when you want to explore lots of properties for multiple vertices using the Graph view. ``` my_node_labels = '{"airport":"code"}' %%gremlin -p v,inv -d $my_node_labels g.V().has('airport','code','CZM'). out('route'). path(). by(valueMap(true,'code','city','region','desc','lat','lon','country')) ``` ### Coloring Results by Group Run the query below, select the Graph tab, and you will see that the vertices in the graph are now grouped (colored) using the country code. ``` my_node_labels = '{"airport":"code"}' %%gremlin -p v,inv -g country -d $my_node_labels g.V().has('airport','code','CZM'). out('route'). path(). by(valueMap(true, 'code','city','region','desc','lat','lon', 'country')) ``` To group vertices together by color we us the `-g` or `--group-by` switch for the `%%gremlin` command. This specifies the property of the vertex to use to group items together. If no property is specified than items will be grouped by the `T.label` property if present. If it is not present, or the property specified does not exist for a particular vertex then the vertex is added to a default group. Grouping of items is enabled by default but can be disabled using the `--ignore-groups` switch. ### How long are the routes from Cozumel? In order to find the distance of a route we need to look at the edge properties. This requires us to include an `outE` step in the query. You will see this used again later and explained in more detail near the end of this notebook. The results are ordered by ascending distance. This query has been spread out over several lines and indented to aid readability. In general, for longer Gremlin queries, spreading the steps out over multiple lines in this way helps both readers and maintainers of your work more quickly see what is going on. ``` %%gremlin -p v,oute,inv g.V(). has('airport','code','CZM'). outE('route').as('a'). inV(). order(). by(select('a').values('dist')). path(). by('code'). by('dist') ``` ### Exploring countries The query below finds the countries in the graph. You can use the Search box to find a country quickly after running the query. The results are ordered in ascending order by country name. ``` %%gremlin g.V().hasLabel('country'). order().by('desc'). local( values('code','desc'). fold()) ``` ### Exploring countries and airports The query below selects five countries at random and finds all the airports located in those countries. Each time you run the query you should see different results. Try looking at the graph by clicking on the Graph tab. If you want to see a few more countries in the results you can increase the value used by the `sample` step. You can also zoom the visualization in and out by clicking on the + and - icons. **Clicking on the fullscreen icon in the Graph view will increase the size of the canvas.** ![image.png](attachment:image.png) ``` %%gremlin -p v,inv -l20 g.V().hasLabel('country'). sample(5). out('contains'). path(). by('desc'). by('city') ``` We can rewrite the query to produce a result that has all the airports grouped with their country using a `project` step. This time the code rather than the full name for each airport is used. ``` %%gremlin g.V().hasLabel('country'). sample(5). project('country','airports'). by('desc'). by(out('contains').values('code').fold()) ``` ### Finding all airports in the United Kingdom (UK) The query below finds the vertex representing the United Kingdom in the graph and from there finds all of the airports connected to it by 'contains' edges. The results are sorted in ascending order using the airport code. ``` %%gremlin g.V().has('country','code','UK'). out('contains'). order(). by('code'). project('code','desc'). by('code'). by('desc') ``` ### Which airports have the most routes? In this query we find out how many outgoing routes each airport has and create a group where the key is the airport code and the value is the number of routes. We then sort that group in descending order and return the top 20 results. Try changing the value of the `limit` step to see more or less results. Once again, note how the query has been split over multiple lines and indented to aid readability. ``` %%gremlin g.V().hasLabel('airport'). group(). by('code'). by(out().count()). order(local). by(values,desc). unfold(). limit(20) ``` The query below shows a different way to find the airports with the most routes and uses a `project` step to produce the final results. This time the city name rather than the airport code is used in the output. ``` %%gremlin g.V().hasLabel('airport'). order(). by(out().count(),desc). limit(20). project("Airport","Routes"). by('city'). by(out().count()) ``` ### Let's find some routes from Austin (AUS) to Wellington (WLG) The query below uses a `repeat` step to find five routes from Austin in the United States to Wellington in New Zealand. ``` %%gremlin -p v,inv,inv,inv g.V().has('airport','code','AUS'). repeat(out().simplePath()). until(has('code','WLG')). limit(5). path(). by('code') ``` To also retrieve the distance between airports we need to inspect some edge properties. We can do that by replacing `out()` with `outE().inV()` which you can read as "look at outgoing edges and the vertices at the other end of the edge". When you run the query below you will see edges in the result. In the next query we will retrieve the distances from those edges and improve the way that vertices are labelled. If you click on the Graph tab you will see that the visualizer has used the vertex and edge labels to annotate the diagram as no `by` modulators were provided in the query. ``` %%gremlin -p v,oute,inv,oute,inv,oute,inv g.V().has('airport','code','AUS'). repeat(outE().inV().simplePath()). until(has('code','WLG')). limit(5). path() ``` ### Finding the total distance of selected routes Now we have the edge results in the query we can retrieve the distances. ``` %%gremlin -p v,oute,inv,oute,inv,oute,inv g.V().has('airport','code','AUS'). repeat(outE().inV().simplePath()). until(has('code','WLG')). limit(5). path(). by('code'). by('dist') ``` We can add a few additional hints so that the nodes drawn are grouped by country, the text length is increased to 20 and for each airport the city name is displayed in the visualization. ``` my_node_labels = '{"airport":"city"}' %%gremlin -p v,oute,inv,oute,inv,oute,inv -g country -d $my_node_labels -l 20 g.V().has('airport','code','AUS'). repeat(outE().inV().simplePath()). until(has('code','WLG')). limit(5). path(). by(valueMap(true)). by('dist') ``` We can also take advantage of `elementMap` to avoid the need to supply any `-p` hints. ``` my_node_labels = '{"airport":"city"}' my_edge_labels = '{"route":"dist"}' %%gremlin -g country -d $my_node_labels -de $my_edge_labels -l 20 g.V().has('airport','code','AUS'). repeat(outE().inV().simplePath()). until(has('code','WLG')). limit(5). path(). by(elementMap()) ``` Putting everything together we can introduce a `sack` step to track the total distance of each route found. ``` %%gremlin g.withSack(0). V(). has('airport','code','AUS'). repeat( outE().sack(sum).by('dist'). inV().simplePath()). until(has('code','WLG')). limit(5). order(). by(sack()). local( union( path(). by('code'). by('dist'). unfold(), sack()). fold()) ``` ### Where can I fly to from Santa Fe, New Mexico, nonstop? The query below finds the cities you can fly to nonstop from Santa Fe, New Mexico. ``` %%gremlin g.V(). has('code','SAF'). out('route'). values('code') ``` The cell below can be used to produce a simple visual representation of the results. As we have not specified additional grouping query hints and the path just contains airport code strings, default coloring is used. In this case a `-p` hint is needed in order for the nodes and edges to be drawn correctly. ``` %%gremlin -p v,oute,inv -l20 g.V(). has('code','SAF'). outE(). inV(). path(). by('city'). by() ``` We can rewrite the query to generate a path populated by `elementMap` results and remove the `-p` hints. ``` my_node_labels = '{"airport":"city"}' my_edge_labels = '{"route":"dist"}' %%gremlin -g region -d $my_node_labels -de $my_edge_labels -l 20 g.V(). has('code','SAF'). outE(). inV(). path(). by(elementMap()) ``` ### How many places can I fly to from Santa Fe, New Mexico, with one stop? We can modify the previous query to answer the question "How many places can you get to from Santa Fe, New Mexico, with one stop but excluding the places you can get to nonstop?". If you remove the line containing the `where` step you will see the result goes up by four which is the number of airports we found in the previous query. The `simplePath` step also guarantees that we do not include flights back to Santa Fe in the results. ``` %%gremlin g.V(). has('code','SAF'). out('route'). aggregate('one'). out(). simplePath(). where(without('one')). dedup(). count() ``` Rather than just counting the places, we can change the query to list all of the city names. ``` %%gremlin g.V(). has('code','SAF'). out('route'). aggregate('one'). out(). simplePath(). where(without('one')). dedup(). values('city'). order() ``` ### What is the minimum number of stops needed to get from Austin (AUS) to Agra (AGR) ? The query below looks to see if you can get from Austin to Agra with 1 stop. You could also use a `times` step to do this but the use of `loops` allows us to be a little more sophisticated. If you run the query as-is you will get no results as you cannot get from AUS to AGR with just one stop. Try changing the values used by the `is` step to 3 and then 4 and re-running the query each time. You will notice that we are able to answer the question "Can I get to Agra from Austin with 2 or 3 stops?". The query ends once 10 results have been found or the loop counter reaches the target specified by the `is` step. ``` my_node_labels = '{"airport":"code"}' %%gremlin -p v,inv,inv,inv,inv -g country -d $my_node_labels g.V(). has('airport','code','AUS'). repeat(out('route').simplePath()). until(has('code','AGR').or().loops().is(1)). has('code','AGR'). limit(10). path(). by(valueMap(true)) ``` ### Flights from New Zealand to Australia The query below looks for any flights originating in New Zealand and terminating in Australia. The `valueMap` is restricted to just contain the city name, the country code as well as the vertex ID and label. ``` my_node_labels = '{"airport":"city"}' %%gremlin -p v,inv -l 12 -d $my_node_labels -g country g.V().has('country','NZ'). out('route'). has('country','AU'). path(). by(valueMap('city','country').with(WithOptions.tokens)) ``` ### Examining a slightly larger result set visually The query below retrieves a selection of routes from the graph. If you click on the Graph tab you will see the vertices and edges. You may want to zoom in and out (using the scroll wheel on your mouse or the zoom gesture on your touchpad) and also pan the drawing by holding down the left mouse button while the pointer is not over a vertex or an edge and dragging. You can also move vertices around if they are overlapping by dragging them. Switching to the fullscreen view is also helpful when there are a lot of results to explore. You can also zoom the visualization in and out by clicking on the + and - icons. To see even more results try changing the value used by the `limit` step from 200 to 300 or 400. ``` %%gremlin -p v,oute,inv g.V().outE().inV().path().by('code').by().limit(200) ``` We can add some query hints to make the diagram a l ittle more colorful. ``` my_node_labels = '{"airport":"code"}' my_edge_labels = '{"route":"dist"}' %%gremlin -p v,oute,inv -g country -d $my_node_labels -de $my_edge_labels g.V().outE().inV().path().by(valueMap(true)).limit(200) ``` The next query also produces a result that is fun to explore using the `Graph` tab. ``` my_node_labels = '{"airport":"city"}' %%gremlin -p v,oute,inv,oute,inv,oute,inv,oute,inv -g country -d $my_node_labels g.V(). has('code','AUS'). repeat(outE().inV().where(without('x')).store('x')). times(4). limit(50). path(). by(valueMap(true)). by('dist') ``` ## Changing the visualization settings The Amazon Neptune Notebooks use an open source library called [Vis.js](https://github.com/visjs) to assist with drawing the graph diagrams. Vis.js provides a rich set of customizable settings. The documentation for most of the visualization settings used in this notebook can be found [here](https://visjs.org/) and in particular the graph network drawing documentation can be found [here](https://visjs.github.io/vis-network/docs/network/). To see the current settings used by your notebook you can use the `%graph_notebook_vis_options` line magic command. Try running the cell below. To change any of these settings create a new cell and use `%%graph_notebook_vis_options` to change them (note the two percent signs indicating a cell magic). These settings give you a lot of flexibility to customize your visualizations in whichever way you find most pleasing. ``` %graph_notebook_vis_options ``` ### Producing a hierarchical diagram For some types of query using a hierarchical view is quite pleasing. Try running the cell below to change a few settings and then run the next cell that demonstrates the changes. There is another cell after that one you can use to restore the default settings. ``` %%graph_notebook_vis_options { "nodes": { "borderWidthSelected": 0, "borderWidth": 0, "color": { "background": "rgba(210, 229, 255, 1)", "border": "transparent", "highlight": { "background": "rgba(9, 104, 178, 1)", "border": "rgba(8, 62, 100, 1)" } }, "shadow": { "enabled": false }, "shape": "circle", "widthConstraint": { "minimum": 70, "maximum": 70 }, "font": { "face": "courier new", "color": "black", "size": 12 } }, "edges": { "color": { "inherit": false }, "smooth": { "enabled": true, "type": "straightCross" }, "arrows": { "to": { "enabled": true, "type": "arrow" } }, "font": { "face": "courier new" } }, "interaction": { "hover": true, "hoverConnectedEdges": true, "selectConnectedEdges": false }, "physics": { "hierarchicalRepulsion": { "centralGravity": 0 }, "minVelocity": 0.75, "solver": "hierarchicalRepulsion" }, "layout": { "hierarchical": { "enabled": true, "direction": "LR", "sortMethod": "directed", "edgeMinimization":false } } } ``` Run the query in the cell below to see the results of changing to a hierarchical network view. ``` %%gremlin -p v,inv,inv,inv g.V(). has('code','AUS'). repeat(out().simplePath()). until(has('code','WLG')). path().by('code'). limit(5) ``` ### Changing Group Colors and Adding Icons One of the features that is also available is the ability to change the color, add an image, or associate a particular icon representation for a group. Run the two cells below and you will see that all airports in Mexico are shown with the Mexican flag, all airports in the US are shown as a blue flag, and all airports in Canada are shown in red. ``` %%graph_notebook_vis_options { "groups": { "['CA']": {"color": "red"}, "['MX']": {"shape": "image", "image":"https://cdn.countryflags.com/thumbs/mexico/flag-round-250.png"}, "['US']": { "shape": "icon", "icon": { "face": "FontAwesome", "code": "\uf024", "color": "blue" } } } } %%gremlin -g country g.V().has('airport','code','CZM'). out('route'). path(). by(valueMap('code','city','region','desc','lat','lon', 'country'). order(local). by(keys)) ``` ### Restoring default settings Running the cell below will return all visualization settings to their default values. ``` %graph_notebook_vis_options reset ``` ## Variable substitution and reuse The `--store-to` option can be used to capture query results in a Python variable. ``` %%gremlin --store-to res g.V().has('region','US-TX').count() ``` The results can then be accessed from a Python cell. Note that the results are returned in a list so have to be accessed accordingly. ``` print(f'There are {res[0]} airports in Texas') ``` A variable can also be substituted into a query. ``` city = 'Los Angeles' %%gremlin g.V().has('city','${city}').values('desc','code') ``` ## Seeing more data If you run the cell below, the notebook will use as much of the browser window (width wise) as it can ``` from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Word embeddings <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/text/word_embeddings"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/text/word_embeddings.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/text/word_embeddings.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/text/word_embeddings.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> This tutorial introduces word embeddings. It contains complete code to train word embeddings from scratch on a small dataset, and to visualize these embeddings using the [Embedding Projector](http://projector.tensorflow.org) (shown in the image below). <img src="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/text/images/embedding.jpg?raw=1" alt="Screenshot of the embedding projector" width="400"/> ## Representing text as numbers Machine learning models take vectors (arrays of numbers) as input. When working with text, the first thing we must do come up with a strategy to convert strings to numbers (or to "vectorize" the text) before feeding it to the model. In this section, we will look at three strategies for doing so. ### One-hot encodings As a first idea, we might "one-hot" encode each word in our vocabulary. Consider the sentence "The cat sat on the mat". The vocabulary (or unique words) in this sentence is (cat, mat, on, sat, the). To represent each word, we will create a zero vector with length equal to the vocabulary, then place a one in the index that corresponds to the word. This approach is shown in the following diagram. <img src="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/text/images/one-hot.png?raw=1" alt="Diagram of one-hot encodings" width="400" /> To create a vector that contains the encoding of the sentence, we could then concatenate the one-hot vectors for each word. Key point: This approach is inefficient. A one-hot encoded vector is sparse (meaning, most indices are zero). Imagine we have 10,000 words in the vocabulary. To one-hot encode each word, we would create a vector where 99.99% of the elements are zero. ### Encode each word with a unique number A second approach we might try is to encode each word using a unique number. Continuing the example above, we could assign 1 to "cat", 2 to "mat", and so on. We could then encode the sentence "The cat sat on the mat" as a dense vector like [5, 1, 4, 3, 5, 2]. This appoach is efficient. Instead of a sparse vector, we now have a dense one (where all elements are full). There are two downsides to this approach, however: * The integer-encoding is arbitrary (it does not capture any relationship between words). * An integer-encoding can be challenging for a model to interpret. A linear classifier, for example, learns a single weight for each feature. Because there is no relationship between the similarity of any two words and the similarity of their encodings, this feature-weight combination is not meaningful. ### Word embeddings Word embeddings give us a way to use an efficient, dense representation in which similar words have a similar encoding. Importantly, we do not have to specify this encoding by hand. An embedding is a dense vector of floating point values (the length of the vector is a parameter you specify). Instead of specifying the values for the embedding manually, they are trainable parameters (weights learned by the model during training, in the same way a model learns weights for a dense layer). It is common to see word embeddings that are 8-dimensional (for small datasets), up to 1024-dimensions when working with large datasets. A higher dimensional embedding can capture fine-grained relationships between words, but takes more data to learn. <img src="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/text/images/embedding2.png?raw=1" alt="Diagram of an embedding" width="400"/> Above is a diagram for a word embedding. Each word is represented as a 4-dimensional vector of floating point values. Another way to think of an embedding is as "lookup table". After these weights have been learned, we can encode each word by looking up the dense vector it corresponds to in the table. ## Setup ``` import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import tensorflow_datasets as tfds tfds.disable_progress_bar() ``` ## Using the Embedding layer Keras makes it easy to use word embeddings. Let's take a look at the [Embedding](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding) layer. The Embedding layer can be understood as a lookup table that maps from integer indices (which stand for specific words) to dense vectors (their embeddings). The dimensionality (or width) of the embedding is a parameter you can experiment with to see what works well for your problem, much in the same way you would experiment with the number of neurons in a Dense layer. ``` embedding_layer = layers.Embedding(1000, 5) ``` When you create an Embedding layer, the weights for the embedding are randomly initialized (just like any other layer). During training, they are gradually adjusted via backpropagation. Once trained, the learned word embeddings will roughly encode similarities between words (as they were learned for the specific problem your model is trained on). If you pass an integer to an embedding layer, the result replaces each integer with the vector from the embedding table: ``` #Using the Embedding layer result = embedding_layer(tf.constant([1,2,3])) result.numpy() ``` For text or sequence problems, the Embedding layer takes a 2D tensor of integers, of shape `(samples, sequence_length)`, where each entry is a sequence of integers. It can embed sequences of variable lengths. You could feed into the embedding layer above batches with shapes `(32, 10)` (batch of 32 sequences of length 10) or `(64, 15)` (batch of 64 sequences of length 15). The returned tensor has one more axis than the input, the embedding vectors are aligned along the new last axis. Pass it a `(2, 3)` input batch and the output is `(2, 3, N)` ``` # 2D tensor of integers, of shape (samples, sequence_length) # embedding layer : (batch of 32 sequences of length 10) or (batch of 64 sequences of length 15) # output is (2, 3, N) result = embedding_layer(tf.constant([[0,1,2],[3,4,5]])) result.shape ``` When given a batch of sequences as input, an embedding layer returns a 3D floating point tensor, of shape `(samples, sequence_length, embedding_dimensionality)`. To convert from this sequence of variable length to a fixed representation there are a variety of standard approaches. You could use an RNN, Attention, or pooling layer before passing it to a Dense layer. This tutorial uses pooling because it's simplest. The [Text Classification with an RNN](text_classification_rnn.ipynb) tutorial is a good next step. ## Learning embeddings from scratch In this tutorial you will train a sentiment classifier on IMDB movie reviews. In the process, the model will learn embeddings from scratch. We will use to a preprocessed dataset. To load a text dataset from scratch see the [Loading text tutorial](../load_data/text.ipynb). ``` (train_data, test_data), info = tfds.load( 'imdb_reviews/subwords8k', split = (tfds.Split.TRAIN, tfds.Split.TEST), with_info=True, as_supervised=True) ``` Get the encoder (`tfds.features.text.SubwordTextEncoder`), and have a quick look at the vocabulary. The "\_" in the vocabulary represent spaces. Note how the vocabulary includes whole words (ending with "\_") and partial words which it can use to build larger words: ``` encoder = info.features['text'].encoder encoder.subwords[:20] ``` Movie reviews can be different lengths. We will use the `padded_batch` method to standardize the lengths of the reviews. ``` # Movie reviews can be different lengths. # We will use the padded_batch method to standardize the lengths of the reviews. train_batches = train_data.shuffle(1000).padded_batch(10) test_batches = test_data.shuffle(1000).padded_batch(10) ``` As imported, the text of reviews is integer-encoded (each integer represents a specific word or word-part in the vocabulary). Note the trailing zeros, because the batch is padded to the longest example. ``` #the text of reviews is integer-encoded #(each integer represents a specific word or word-part in the vocabulary). train_batch, train_labels = next(iter(train_batches)) train_batch.numpy() ``` ### Create a simple model We will use the [Keras Sequential API](../../guide/keras) to define our model. In this case it is a "Continuous bag of words" style model. * Next the Embedding layer takes the integer-encoded vocabulary and looks up the embedding vector for each word-index. These vectors are learned as the model trains. The vectors add a dimension to the output array. The resulting dimensions are: `(batch, sequence, embedding)`. * Next, a GlobalAveragePooling1D layer returns a fixed-length output vector for each example by averaging over the sequence dimension. This allows the model to handle input of variable length, in the simplest way possible. * This fixed-length output vector is piped through a fully-connected (Dense) layer with 16 hidden units. * The last layer is densely connected with a single output node. Using the sigmoid activation function, this value is a float between 0 and 1, representing a probability (or confidence level) that the review is positive. Caution: This model doesn't use masking, so the zero-padding is used as part of the input, so the padding length may affect the output. To fix this, see the [masking and padding guide](../../guide/keras/masking_and_padding). ``` #Create a simple model using KERAS embedding_dim=16 model = keras.Sequential([ layers.Embedding(encoder.vocab_size, embedding_dim), layers.GlobalAveragePooling1D(), layers.Dense(16, activation='relu'), layers.Dense(1) ]) model.summary() ``` ### Compile and train the model ``` # Compile and train the model model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy']) history = model.fit( train_batches, epochs=10, validation_data=test_batches, validation_steps=20) ``` With this approach our model reaches a validation accuracy of around 88% (note the model is overfitting, training accuracy is significantly higher). ``` import matplotlib.pyplot as plt history_dict = history.history acc = history_dict['accuracy'] val_acc = history_dict['val_accuracy'] loss=history_dict['loss'] val_loss=history_dict['val_loss'] epochs = range(1, len(acc) + 1) plt.figure(figsize=(12,9)) plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() plt.figure(figsize=(12,9)) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend(loc='lower right') plt.ylim((0.5,1)) plt.show() ``` ## Retrieve the learned embeddings Next, let's retrieve the word embeddings learned during training. This will be a matrix of shape `(vocab_size, embedding-dimension)`. ``` e = model.layers[0] weights = e.get_weights()[0] print(weights.shape) # shape: (vocab_size, embedding_dim) ``` We will now write the weights to disk. To use the [Embedding Projector](http://projector.tensorflow.org), we will upload two files in tab separated format: a file of vectors (containing the embedding), and a file of meta data (containing the words). ``` import io encoder = info.features['text'].encoder out_v = io.open('vecs.tsv', 'w', encoding='utf-8') out_m = io.open('meta.tsv', 'w', encoding='utf-8') for num, word in enumerate(encoder.subwords): vec = weights[num+1] # skip 0, it's padding. out_m.write(word + "\n") out_v.write('\t'.join([str(x) for x in vec]) + "\n") out_v.close() out_m.close() ``` If you are running this tutorial in [Colaboratory](https://colab.research.google.com), you can use the following snippet to download these files to your local machine (or use the file browser, *View -> Table of contents -> File browser*). ``` try: from google.colab import files except ImportError: pass else: files.download('vecs.tsv') files.download('meta.tsv') ``` ## Visualize the embeddings To visualize our embeddings we will upload them to the embedding projector. Open the [Embedding Projector](http://projector.tensorflow.org/) (this can also run in a local TensorBoard instance). * Click on "Load data". * Upload the two files we created above: `vecs.tsv` and `meta.tsv`. The embeddings you have trained will now be displayed. You can search for words to find their closest neighbors. For example, try searching for "beautiful". You may see neighbors like "wonderful". Note: your results may be a bit different, depending on how weights were randomly initialized before training the embedding layer. Note: experimentally, you may be able to produce more interpretable embeddings by using a simpler model. Try deleting the `Dense(16)` layer, retraining the model, and visualizing the embeddings again. <img src="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/text/images/embedding.jpg?raw=1" alt="Screenshot of the embedding projector" width="400"/> ## Next steps This tutorial has shown you how to train and visualize word embeddings from scratch on a small dataset. * To learn about recurrent networks see the [Keras RNN Guide](../../guide/keras/rnn.ipynb). * To learn more about text classification (including the overall workflow, and if you're curious about when to use embeddings vs one-hot encodings) we recommend this practical text classification [guide](https://developers.google.com/machine-learning/guides/text-classification/step-2-5). ``` ```
github_jupyter
``` import os from dotenv import load_dotenv load_dotenv() import sys sys.path.insert(1, '../src/') import dhs_data as dd import matplotlib.pyplot as plt %matplotlib qt dhs_geo_file = os.environ.get("DHS_DATA_DIR") + "/IAGE71FL_geographic_data/IAGE71FL.shp" dhs_geo_data = dd.DHSGeographicData(dhs_geo_file) dhs_geo_data.clean() dhs_geo_data.country_gdf.shape, dhs_geo_data.country_cleaned_gdf.shape dhs_geo_data.extract_dhs() dhs_geo_data.country_extracted_gdf.head() dhs_geo_data.country_extracted_gdf.plot() (sites, weights) = dhs_geo_data.get_sites_and_radii() dhs_geo_data.country_extracted_gdf.head() import laguerre_voronoi_2d as lv2d # Compute the power triangulation of the circles tri_list, vor_vert = lv2d.get_power_triangulation(sites, weights) # Compute the Voronoi cells voronoi_cell_map = lv2d.get_voronoi_cells(sites, vor_vert, tri_list) len(voronoi_cell_map) poly_lst = dhs_geo_data.get_shapely_polygons(voronoi_cell_map) len(poly_lst) ctry_dhs_weighted_voronoi = dhs_geo_data.country_extracted_gdf p = ctry_dhs_weighted_voronoi.shape[0] ctry_dhs_weighted_voronoi.head() dhs_geo_data.combine_dhs_voronoi(poly_lst) dhs_geo_data.ctry_dhs_weighted_voronoi.head() gadm_file = os.environ.get("GADM_DATA_DIR") + "/gadm36_IND_0.shp" gadm_simplified_file = os.environ.get("OUT_DIR") + "/gadm36_IND_0_simplified.shp" india_voronoi_clipped_file = os.environ.get("OUT_DIR") + "/IAGE71FL_Voronoi_Clipped.shp" import geopandas as gpd import fiona fiona.supported_drivers final_gdf = dhs_geo_data.ctry_dhs_weighted_voronoi.copy() final_gdf.crs = "EPSG:4326" final_gdf.to_file(os.environ.get("OUT_DIR") + "/IAGE71FL_Voronoi.shp", driver = 'ESRI Shapefile') india_bdry = gpd.read_file(gadm_file) type(india_bdry.geometry), type(final_gdf.geometry) simplified_india_bdry = india_bdry.simplify(0.01, preserve_topology=True) import matplotlib.pyplot as plt %matplotlib qt simplified_india_bdry.plot() simplified_india_bdry.crs = "EPSG:4326" simplified_india_bdry.to_file(os.environ.get("OUT_DIR") + "/gadm36_IND_0_simplified.shp", driver = 'ESRI Shapefile') india_weighted_voronoi_clipped = gpd.clip(final_gdf, simplified_india_bdry.geometry) india_weighted_voronoi_clipped.crs = "EPSG:4326" india_weighted_voronoi_clipped.to_file(india_voronoi_clipped_file, driver = 'ESRI Shapefile') # Plot two geodataframes together fig, ax = plt.subplots() dhs_geo_data.country_extracted_gdf.plot(ax=ax, color='purple', markersize=15, alpha=0.5) india_weighted_voronoi_clipped.boundary.plot(ax=ax, color='green', facecolor=None, alpha=0.5) plt.show() ```
github_jupyter
# Decorators 101 ``` def deco(func): def inner(): print('running inner()') return inner @deco def target(): print('running target()') target() ``` # When Python Executes Decorators ``` registry= [] def register(func): print('running register(%s)' % func) registry.append(func) return func @register def f1(): print('running f1()') @register def f2(): print('running f2()') def f3(): print('running f3()') def main(): print('running main()') print('registry ->', registry) f1() f2() f3() main() ``` # Decorator-Enhanced Strategy Pattern ``` promos = [] def promotion(promo_func): promos.append(promo_func) return promo_func @promotion def fidelity(order): """5% discount for customers with 1000 or more fidelity points""" return order.total() * .05 if order.customer.fidelity >= 1000 else 0 @promotion def bulk_item(order): """10% discount for each LineItem with 20 or more units""" discount = 0 for item in order.cart: if item.quantity >= 20: discount += item.total() * .1 return discount @promotion def large_order(order): """7% discount for oders with 10 or more distinct items""" distinct_items = {item.product for item in order.cart} if len(distinct_items) >= 10: return order.total() * .07 return 0 def best_promo(order): """Select best discount available""" return max(promo(order) for promo in promos) ``` # Variable Scope Rules ``` def f1(a): print(a) print(b) f1(3) b=6 f1(3) def f2(a): print(a) print(b) b=9 f2(3) def f3(a): global b print(a) print(b) b=9 f3(3) ``` # Closures ``` class Averager(): def __init__(self): self.series = [] def __call__(self, new_value): self.series.append(new_value) total = sum(self.series) return total/len(self.series) avg = Averager() avg(10) avg(11) avg(12) def make_averager(): series = [] def averager(new_value): series.append(new_value) total = sum(series) return total/len(series) return averager avg = make_averager() avg(10) avg(11) avg(12) avg.__code__.co_varnames avg.__code__.co_freevars avg.__closure__ avg.__closure__[0].cell_contents ``` # The nonlocal Declaration ``` def make_averager(): count = 0 total = 0 def averager(new_value): nonlocal count, total count += 1 total += new_value return total / count return averager avg = make_averager() avg(10) ``` # Implementing a Simple Decorator ``` import time def clock(func): def clocked(*args): t0 = time.perf_counter() result = func(*args) elapsed = time.perf_counter() - t0 name = func.__name__ arg_str = ', '.join(repr(arg) for arg in args) print('[%0.8fs] %s(%s) -> %r' % (elapsed, name, arg_str, result)) return result return clocked @clock def snooze(seconds): time.sleep(seconds) @clock def factorial(n): return 1 if n < 2 else n*factorial(n-1) print('*' * 40, 'Calling snooze(.123)') snooze(.123) print('*' * 40, 'Calling factorial(6)') print('6! =', factorial(6)) factorial.__name__ import functools def clock(func): @functools.wraps(func) def clocked(*args, **kwargs): t0 = time.time() result = func(*args, **kwargs) elapsed = time.time() - t0 name = func.__name__ arg_lst = [] if args: arg_lst.append(', '.join(repr(arg) for arg in args)) if kwargs: pairs = ['%s=%r' % (k, w) for k, w in sorted(kwargs.items())] arg_lst.append(', '.join(pairs)) arg_str = ', '.join(arg_lst) print('[%0.8fs] %s(%s) -> %r' % (elapsed, name, arg_str, result)) return result return clocked @clock def snooze(seconds): time.sleep(seconds) @clock def factorial(n): return 1 if n < 2 else n*factorial(n-1) print('*' * 40, 'Calling snooze(.123)') snooze(.123) print('*' * 40, 'Calling factorial(6)') print('6! =', factorial(6)) factorial.__name__ ``` # Decorators in the Standard Library ## Memoization with functools.lru_cache ``` from clockdeco import clock @clock def fibonacci(n): if n < 2: return n return fibonacci(n-2) + fibonacci(n-1) print(fibonacci(6)) import functools @functools.lru_cache() @clock def fibonacci(n): if n < 2: return n return fibonacci(n-2) + fibonacci(n-1) print(fibonacci(6)) ``` ## Generic Functions with Single Dispatch ``` import html def htmlize(obj): content = html.escape(repr(obj)) return '<pre>{}</pre>'.format(content) htmlize({1, 2, 3}) htmlize(abs) htmlize('Heimlich & co.\n- a game') htmlize(42) print(htmlize(['alpha', 66, {3, 2, 1}])) from functools import singledispatch from collections import abc import numbers import html @singledispatch def htmlize(obj): content = html.escape(repr(obj)) return '<pre>{}</pre>'.format(content) @htmlize.register(str) def _(text): content = html.escape(text).replace('\n', '<br>\n') return '<p>{0}</p>'.format(content) @htmlize.register(numbers.Integral) def _(n): return '<pre>{0} (0x{0:x})</pre>'.format(n) @htmlize.register(tuple) @htmlize.register(abc.MutableSequence) def _(seq): inner = '</li>\n<li>'.join(htmlize(item) for item in seq) return '<ul>\n<li>' + inner + '</li>\n</ul>' htmlize({1, 2, 3}) htmlize(abs) htmlize('Heimlich & co.\n- a game') htmlize(42) print(htmlize(['alpha', 66, {3, 2, 1}])) ``` # Parameterized Decorators ``` registry = [] def register(func): print('running register(%s)' % func) registry.append(func) return func @register def f1(): print('running f1()') print('running main()') print('registry ->', registry) f1() ``` ## A parameterized Registration Decorator ``` registry = set() def register(active=True): def decorate(func): print('running register(active=%s)->decorate(%s)' % (active, func)) if active: registry.add(func) else: registry.discard(func) return func return decorate @register(active=False) def f1(): print('running f1()') @register() def f2(): print('running f2()') def f3(): print('running f3()') f1() repr(registry) from registration_param import * registry register()(f3) registry register(active=False)(f2) registry ``` ## The parameterized Clock Decorator ``` import time DEFAULT_FMT = '[{elapsed:0.8f}s] {name}({args}) -> {result}' def clock(fmt=DEFAULT_FMT): def decorate(func): def clocked(*_args): t0 = time.time() _result = func(*_args) elapsed = time.time() - t0 name = func.__name__ args = ', '.join(repr(arg) for arg in _args) result = repr(_result) print(fmt.format(**locals())) return result return clocked return decorate @clock() def snooze(seconds): time.sleep(seconds) for i in range(3): snooze(.123) import time from clockdeco_param import clock @clock('{name}: {elapsed}s') def snooze(seconds): time.sleep(seconds) for i in range(3): snooze(.123) @clock('{name}({args}) dt={elapsed:0.3f}s') def snooze(seconds): time.sleep(seconds) for i in range(3): snooze(.123) ```
github_jupyter
``` #hide #skip ! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab #hide #all_slow ``` To use `fastai.medical.imaging` you'll need to: ```bash conda install pyarrow pip install pydicom kornia opencv-python scikit-image ``` To run this tutorial on Google Colab, you'll need to uncomment the following two lines and run the cell: ``` #!conda install pyarrow #!pip install pydicom kornia opencv-python scikit-image nbdev from fastai.basics import * from fastai.callback.all import * from fastai.vision.all import * from fastai.medical.imaging import * import pydicom import pandas as pd #hide from nbdev.showdoc import * ``` # Tutorial - Binary classification of chest X-rays > In this tutorial we will build a classifier that distinguishes between chest X-rays with pneumothorax and chest X-rays without pneumothorax. The image data is loaded directly from the DICOM source files, so no prior DICOM data handling is needed. This tutorial also goes through what DICOM images are and review at a high level how to evaluate the results of the classifier. ## Download and import of X-ray DICOM files First, we will use the `untar_data` function to download the _siim_small_ folder containing a subset (250 DICOM files, \~30MB) of the [SIIM-ACR Pneumothorax Segmentation](https://doi.org/10.1007/s10278-019-00299-9) \[1\] dataset. The downloaded _siim_small_ folder will be stored in your _\~/.fastai/data/_ directory. The variable `pneumothorax-source` will store the absolute path to the _siim_small_ folder as soon as the download is complete. ``` pneumothorax_source = untar_data(URLs.SIIM_SMALL) ``` The _siim_small_ folder has the following directory/file structure: ![siim_folder_structure.jpg](images/siim_folder_structure.jpeg) ## What are DICOMs? **DICOM**(**D**igital **I**maging and **CO**mmunications in **M**edicine) is the de-facto standard that establishes rules that allow medical images(X-Ray, MRI, CT) and associated information to be exchanged between imaging equipment from different vendors, computers, and hospitals. The DICOM format provides a suitable means that meets health infomation exchange (HIE) standards for transmision of health related data among facilites and HL7 standards which is the messaging standard that enables clinical applications to exchange data DICOM files typically have a `.dcm` extension and provides a means of storing data in separate ‘tags’ such as patient information as well as image/pixel data. A DICOM file consists of a header and image data sets packed into a single file. By extracting data from these tags one can access important information regarding the patient demographics, study parameters, etc. 16 bit DICOM images have values ranging from `-32768` to `32768` while 8-bit greyscale images store values from `0` to `255`. The value ranges in DICOM images are useful as they correlate with the [Hounsfield Scale](https://en.wikipedia.org/wiki/Hounsfield_scale) which is a quantitative scale for describing radiodensity ### Plotting the DICOM data To analyze our dataset, we load the paths to the DICOM files with the `get_dicom_files` function. When calling the function, we append _train/_ to the `pneumothorax_source` path to choose the folder where the DICOM files are located. We store the path to each DICOM file in the `items` list. ``` items = get_dicom_files(pneumothorax_source/f"train/") ``` Next, we split the `items` list into a train `trn` and validation `val` list using the `RandomSplitter` function: ``` trn,val = RandomSplitter()(items) ``` Pydicom is a python package for parsing DICOM files, making it easier to access the `header` of the DICOM as well as coverting the raw `pixel_data` into pythonic structures for easier manipulation. `fastai.medical.imaging` uses `pydicom.dcmread` to load the DICOM file. To plot an X-ray, we can select an entry in the `items` list and load the DICOM file with `dcmread`. ``` patient = 7 xray_sample = items[patient].dcmread() ``` To view the `header` ``` xray_sample ``` Explanation of each element is beyond the scope of this tutorial but [this](http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.3.html#sect_C.7.6.3.1.4) site has some excellent information about each of the entries Some key pointers on the tag information above: - **Pixel Data** (7fe0 0010) - This is where the raw pixel data is stored. The order of pixels encoded for each image plane is left to right, top to bottom, i.e., the upper left pixel (labeled 1,1) is encoded first - **Photometric Interpretation** (0028, 0004) - also known as color space. In this case it is `MONOCHROME2` where pixel data is represented as a single monochrome image plane where low values=dark, high values=bright. If the colorspace was `MONOCHROME` then the low values=bright and high values=dark info. - **Samples per Pixel** (0028, 0002) - This should be 1 as this image is monochrome. This value would be 3 if the color space was RGB for example - **Bits Stored** (0028 0101) - Number of bits stored for each pixel sample. Typical 8 bit images have a pixel range between `0` and `255` - **Pixel Represenation**(0028 0103) - can either be unsigned(0) or signed(1) - **Lossy Image Compression** (0028 2110) - `00` image has not been subjected to lossy compression. `01` image has been subjected to lossy compression. - **Lossy Image Compression Method** (0028 2114) - states the type of lossy compression used (in this case `ISO_10918_1` represents JPEG Lossy Compression) - **Pixel Data** (7fe0, 0010) - Array of 161452 elements represents the image pixel data that pydicom uses to convert the pixel data into an image. What does `PixelData` look like? ``` xray_sample.PixelData[:200] ``` Because of the complexity in interpreting `PixelData`, pydicom provides an easy way to get it in a convenient form: `pixel_array` which returns a `numpy.ndarray` containing the pixel data: ``` xray_sample.pixel_array, xray_sample.pixel_array.shape ``` You can then use the `show` function to view the image ``` xray_sample.show() ``` You can also conveniently create a dataframe with all the `tag` information as columns for all the images in a dataset by using `from_dicoms` ``` dicom_dataframe = pd.DataFrame.from_dicoms(items) dicom_dataframe[:5] ``` Next, we need to load the labels for the dataset. We import the _labels.csv_ file using pandas and print the first five entries. The **file** column shows the relative path to the _.dcm_ file and the **label** column indicates whether the chest x-ray has a pneumothorax or not. ``` df = pd.read_csv(pneumothorax_source/f"labels.csv") df.head() ``` Now, we use the `DataBlock` class to prepare the DICOM data for training. As we are dealing with DICOM images, we need to use `PILDicom` as the `ImageBlock` category. This is so the `DataBlock` will know how to open the DICOM images. As this is a binary classification task we will use `CategoryBlock` ``` pneumothorax = DataBlock(blocks=(ImageBlock(cls=PILDicom), CategoryBlock), get_x=lambda x:pneumothorax_source/f"{x[0]}", get_y=lambda x:x[1], batch_tfms=aug_transforms(size=224)) dls = pneumothorax.dataloaders(df.values, num_workers=0) ``` Additionally, we plot a first batch with the specified transformations: ``` dls = pneumothorax.dataloaders(df.values) dls.show_batch(max_n=16) ``` ## Training We can then use the `cnn_learner` function and initiate the training. ``` learn = cnn_learner(dls, resnet34, metrics=accuracy) ``` Note that if you do not select a loss or optimizer function, fastai will try to choose the best selection for the task. You can check the loss function by calling `loss_func` ``` learn.loss_func ``` And you can do the same for the optimizer by calling `opt_func` ``` learn.opt_func ``` Use `lr_find` to try to find the best learning rate ``` learn.lr_find() learn.fit_one_cycle(1) learn.predict(pneumothorax_source/f"train/Pneumothorax/000004.dcm") ``` When predicting on an image `learn.predict` returns a tuple (class, class tensor and [probabilities of each class]).In this dataset there are only 2 classes `No Pneumothorax` and `Pneumothorax` hence the reason why each probability has 2 values, the first value is the probability whether the image belongs to `class 0` or `No Pneumothorax` and the second value is the probability whether the image belongs to `class 1` or `Pneumothorax` ``` tta = learn.tta(use_max=True) learn.show_results(max_n=16) interp = Interpretation.from_learner(learn) interp.plot_top_losses(2) ``` ## Result Evaluation Medical models are predominantly high impact so it is important to know how good a model is at detecting a certain condition. This model has an accuracy of 56%. Accuracy can be defined as the number of correctly predicted data points out of all the data points. However in this context we can define accuracy as the probability that the model is correct and the patient has the condition **PLUS** the probability that the model is correct and the patient does not have the condition There are some other key terms that need to be used when evaluating medical models: **False Positive & False Negative** - **False Positive** is an error in which a test result improperly indicates presence of a condition, such as a disease (the result is positive), when in reality it is not present - **False Negative** is an error in which a test result improperly indicates no presence of a condition (the result is negative), when in reality it is present **Sensitivity & Specificity** - **Sensitivity or True Positive Rate** is where the model classifies a patient has the disease given the patient actually does have the disease. Sensitivity quantifies the avoidance of false negatives Example: A new test was tested on 10,000 patients, if the new test has a sensitivity of 90% the test will correctly detect 9,000 (True Positive) patients but will miss 1000 (False Negative) patients that have the condition but were tested as not having the condition - **Specificity or True Negative Rate** is where the model classifies a patient as not having the disease given the patient actually does not have the disease. Specificity quantifies the avoidance of false positives [Understanding and using sensitivity, specificity and predictive values](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2636062/) is a great paper if you are interested in learning more about understanding sensitivity, specificity and predictive values. **PPV and NPV** Most medical testing is evaluated via **PPV** (Positive Predictive Value) or **NPV** (Negative Predictive Value). **PPV** - if the model predicts a patient has a condition what is the probability that the patient actually has the condition **NPV** - if the model predicts a patient does not have a condition what is the probability that the patient actually does not have the condition The ideal value of the PPV, with a perfect test, is 1 (100%), and the worst possible value would be zero The ideal value of the NPV, with a perfect test, is 1 (100%), and the worst possible value would be zero **Confusion Matrix** The confusion matrix is plotted against the `valid` dataset ``` interp = ClassificationInterpretation.from_learner(learn) losses,idxs = interp.top_losses() len(dls.valid_ds)==len(losses)==len(idxs) interp.plot_confusion_matrix(figsize=(7,7)) ``` You can also reproduce the results interpreted from plot_confusion_matrix like so: ``` upp, low = interp.confusion_matrix() tn, fp = upp[0], upp[1] fn, tp = low[0], low[1] print(tn, fp, fn, tp) ``` Note that **Sensitivity = True Positive/(True Positive + False Negative)** ``` sensitivity = tp/(tp + fn) sensitivity ``` In this case the model has a sensitivity of 40% and hence is only capable of correctly detecting 40% True Positives (i.e. who have Pneumothorax) but will miss 60% of False Negatives (patients that actually have Pneumothorax but were told they did not! Not a good situation to be in). This is also know as a **Type II error** **Specificity = True Negative/(False Positive + True Negative)** ``` specificity = tn/(fp + tn) specificity ``` The model has a specificity of 63% and hence can correctly detect 63% of the time that a patient does **not** have Pneumothorax but will incorrectly classify that 37% of the patients have Pneumothorax (False Postive) but actually do not. This is also known as a **Type I error** **Positive Predictive Value (PPV)** ``` ppv = tp/(tp+fp) ppv ``` In this case the model performs poorly in correctly predicting patients with Pneumothorax **Negative Predictive Value (NPV)** ``` npv = tn/(tn+fn) npv ``` This model is better at predicting patients with No Pneumothorax **Calculating Accuracy** The accuracy of this model as mentioned before was 56% but how was this calculated? We can consider accuracy as: **accuracy = sensitivity x prevalence + specificity * (1 - prevalence)** Where **prevalence** is a statistical concept referring to the number of cases of a disease that are present in a particular population at a given time. The prevalence in this case is how many patients in the valid dataset have the condition compared to the total number. To view the files in the valid dataset you call `dls.valid_ds.cat` ``` val = dls.valid_ds.cat #val[0] ``` There are 15 Pneumothorax images in the valid set (which has a total of 50 images and can be checked by using `len(dls.valid_ds)`) so the prevalence here is 15/50 = 0.3 ``` prevalence = 15/50 prevalence accuracy = (sensitivity * prevalence) + (specificity * (1 - prevalence)) accuracy ``` _**Citations:**_ \[1\] _Filice R et al. Crowdsourcing pneumothorax annotations using machine learning annotations on the NIH chest X-ray dataset. J Digit Imaging (2019). https://doi.org/10.1007/s10278-019-00299-9_
github_jupyter
``` import requests from bs4 import BeautifulSoup import re import os import time # URL url = 'http://www.slmpd.org/CrimeReport.aspx' # Path to save location path = 'raw_data/' def get_filename(headers): """Parses out the filename from a response header.""" return headers['content-disposition'].split('=')[1] ``` ###### Get all the dataset eventtargets There five hidden form parameters on the page.<br><br> \__EVENTARGUMENT<br> \__EVENTVALIDATION<br> \__VIEWSTATE<br> \__EVENTTARGET<br> \__VIEWSTATEGENERATOR<br> The first page can be requested by a normal get request to the url. The other page requests require 'Argument' and 'Target' arguments. Target, Validation, State, and Generator are required for requests for the files. Some combination of them uniquely identify which file should be returned. The values for Target are reused, so at least one of those other parameters is necesary. ``` payload = {} # The first page request is a get to the url. r = requests.get(url) soup = BeautifulSoup(r.content, "html.parser") # Get the three hidden parameter values. payload_raw = soup.find_all('input') payload = {x['name']:x['value'] for x in payload_raw} # List to hold eventtargets. datasets_eventtargets_raw = [] # Get the data for this page and store it. links = soup.find_all(href=re.compile("javascript:__doPostBack\('.*D',''\)")) datasets_eventtargets_raw.append((1, dict(payload), links)) # Set EventTarget for page requesting. payload['__EVENTTARGET'] = 'GridView1' # Loop through all pages. for i in range(2,7): # Set the eventargument value in the payload. payload['__EVENTARGUMENT'] = 'Page$' + str(i) # Request the page, make a soup object, get all relevant tags. r = requests.post(url, data=payload) soup = BeautifulSoup(r.content, "html.parser") # Get the three hidden parameter values. inputs_raw = soup.find_all('input') inputs = {x['name']:x['value'] for x in inputs_raw} links = soup.find_all(href=re.compile("javascript:__doPostBack\('.*D',''\)")) datasets_eventtargets_raw.append((i, inputs, links)) ``` datasets_eventtargets_raw now contains a list of tuples 1. page number 2. dict of inputs for three of the five parameters to the js function 3. list of js function calls that contain the argument values for requesting the files ``` # Get list of files that have already been downloaded. file_list = set(os.listdir('raw_data/')) # Loop through the list of tuples and use the payload dict from each tuple to call all the files from the list in that tuple. pat = re.compile(r"\(\'(.+?)\'\)?") for tup in datasets_eventtargets_raw: # Parse out the argument value and filename (for validating responses). datasets_eventtargets = [(pat.findall(x['href'])[0], x.text) for x in tup[2]] # Get the three common arguments for all the files on this page. payload = tup[1] # Add a blank fourth. payload['__EVENTARGUMENT'] = '' # Loop through the parsed file arguments and request the files. for t in datasets_eventtargets: if t[1] not in file_list: # Check if this file has already been downloaded. payload['__EVENTTARGET'] = t[0] r = requests.post(url, data=payload) if get_filename(r.headers) == t[1]: # Save the file. # TODO: Should rename the files so year is first so they sort correctly. with open(os.path.join(path, get_filename(r.headers)), 'wb') as f: f.write(r.content) else: print('Error with page: ' + str(tup[0]) + ', argument: ' + t[0]) time.sleep(5) #to avoid connection issues with the server else: print(t[1] + ' has already been downloaded.') ```
github_jupyter
## Load libraries ``` !pip install -q -r requirements.txt !git clone https://github.com/davidtvs/pytorch-lr-finder.git && mv pytorch-lr-finder lrfinder !pip install -q -r lrfinder/requirements.txt import sys import os import numpy as np import pandas as pd from PIL import Image import torch import torch.nn as nn import torch.utils.data as D from torch.optim.lr_scheduler import ExponentialLR import torch.nn.functional as F from torch.autograd import Variable from torchvision import models, transforms from ignite.engine import Events from scripts.ignite import create_supervised_evaluator, create_supervised_trainer from ignite.metrics import Loss, Accuracy from ignite.contrib.handlers.tqdm_logger import ProgressBar from ignite.handlers import EarlyStopping, ModelCheckpoint from ignite.contrib.handlers import LinearCyclicalScheduler, CosineAnnealingScheduler from tqdm import tqdm_notebook from sklearn.model_selection import train_test_split from scripts.evaluate import eval_model from lrfinder.lr_finder import LRFinder import warnings warnings.filterwarnings('ignore') ``` ## Define dataset and model ``` img_dir = '../input/rxrxairgb' path_data = '../input/rxrxaicsv' device = 'cuda' batch_size = 256 learning_rate=1e-10 torch.manual_seed(0) model_name = 'resnet18' jitter = (0.6, 1.4) class ImagesDS(D.Dataset): # taken textbook from https://arxiv.org/pdf/1812.01187.pdf transform_train = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.ColorJitter(brightness=jitter, contrast=jitter, saturation=jitter, hue=.1), transforms.RandomHorizontalFlip(p=0.5), # PCA Noise should go here, transforms.ToTensor(), transforms.Normalize(mean=(123.68, 116.779, 103.939), std=(58.393, 57.12, 57.375)) ]) transform_validation = transforms.Compose([ transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=(123.68, 116.779, 103.939), std=(58.393, 57.12, 57.375)) ]) def __init__(self, df, img_dir=img_dir, mode='train', validation=False, site=1): self.records = df.to_records(index=False) self.site = site self.mode = mode self.img_dir = img_dir self.len = df.shape[0] self.validation = validation @staticmethod def _load_img_as_tensor(file_name, validation): with Image.open(file_name) as img: if not validation: return ImagesDS.transform_train(img) else: return ImagesDS.transform_validation(img) def _get_img_path(self, index, site=1): experiment, well, plate = self.records[index].experiment, self.records[index].well, self.records[index].plate return f'{self.img_dir}/{self.mode}/{experiment}_{plate}_{well}_s{site}.jpeg' def __getitem__(self, index): img1 = self._load_img_as_tensor(self._get_img_path(index), self.validation) if self.mode == 'train': return img1, int(self.records[index].sirna) else: return img1, self.records[index].id_code def __len__(self): return self.len # dataframes for training, cross-validation, and testing df = pd.read_csv(path_data+'/train.csv') df_train, df_val = train_test_split(df, test_size = 0.1, random_state=42) df_test = pd.read_csv(path_data+'/test.csv') # pytorch training dataset & loader ds = ImagesDS(df_train, mode='train', validation=False) loader = D.DataLoader(ds, batch_size=batch_size, shuffle=True, num_workers=4) # pytorch cross-validation dataset & loader ds_val = ImagesDS(df_val, mode='train', validation=True) val_loader = D.DataLoader(ds_val, batch_size=batch_size, shuffle=True, num_workers=4) # pytorch test dataset & loader ds_test = ImagesDS(df_test, mode='test', validation=True) tloader = D.DataLoader(ds_test, batch_size=batch_size, shuffle=False, num_workers=4) classes = 1108 model = getattr(models, model_name)(pretrained=True) num_ftrs = model.fc.in_features model.fc = nn.Linear(num_ftrs, classes) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) lr_finder = LRFinder(model, optimizer, criterion, device=device) lr_finder.range_test(loader, end_lr=10, num_iter=200, step_mode="exp") lr_finder.plot() ```
github_jupyter
# collections module ``` # some utilities import pprint from data_utils import get_article_items pp = pprint.PrettyPrinter(indent=4) ``` # OrderedDict ``` # A type of dictionary that keeps track of the order of the keys as they are added. # dictionaries in python <=3.5, in python 3.6 and up order is kept by default using just `dict` data = {'b': 2, 'a': 1, 'c':3} for key in data: print(key) from collections import OrderedDict # now with an OrderedDict o_dict = OrderedDict(sorted(data.items())) for key in o_dict: print(key) ``` # defaultdict ``` from collections import defaultdict # The defaultdict is a subclass of Python’s dict that accepts a default_factory as its primary argument. abstract = get_article_items()[0]['abstract']['content'][0]['text'] words = abstract.split(' ') # with normal dict reg_dict = {} for word in words: if word in reg_dict: reg_dict[word] += 1 else: reg_dict[word] = 1 print(reg_dict) # now with a defaultdict d_dict = defaultdict(int) for word in words: d_dict[word] += 1 print(d_dict) ``` # namedtuple ``` from collections import namedtuple # factory function for creating tuple subclasses with named fields data = get_article_items()[0] Article = namedtuple('Article', ['title', 'doi']) article = Article(title=data['title'], doi=data['doi']) print(Article) print('title: ', article.title) print('doi: ', article.doi) ``` # ChainMap ``` from collections import ChainMap # A ChainMap is a class that provides the ability to link multiple mappings together # such that they end up being a single unit. base_config = {'ip': '127.0.0.1', 'port': 8080, 'proc_name': 'base', 'threading': False} custom_config = {'proc_name': 'custom', 'port': 8081} os_config = {'threading': True} # overrides values from left >>> right CONFIG = ChainMap(custom_config, os_config, base_config) for k, v in CONFIG.items(): print('{0}: {1}'.format(k, v)) ``` # Counter ``` # convenient and fast tallies from collections import Counter """ Small task to grab some article abstracts, find the 20 most common words used. """ # get a list of abstract texts abstracts = [a['abstract']['content'][0]['text'] for a in get_article_items()] # join them all up into one string and then seperate each word by space into a list # e.g. ['The', 'brain', 'needs', 'to', 'predict', 'how', 'the', 'body', 'reacts', ...] all_words = ' '.join(abstracts).split(' ') # a possible way to get the count using just dict result = {} for word in all_words: if result.get(word): result[word] += 1 else: result[word] = 1 print(result) # now you have to find the 20 most common # ??... # an easier way using Counter() result = Counter(all_words) print(result.most_common(20)) ```
github_jupyter
## In the last chapter, you learned how to graphically explore data. In this chapter, you will compute useful summary statistics, which serve to concisely describe salient features of a data set with a few numbers. ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.datasets import load_iris import warnings warnings.filterwarnings('ignore') data = load_iris() df = pd.DataFrame(data['data']) df.columns = ['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)'] df['species'] = '' df['species'][:50] = 'setosa' df['species'][50:100] = 'versicolor' df['species'][100:150] = 'virginica' df.head() versicolor_petal_length = df[df['species']=='versicolor']['petal length (cm)'].values setosa_petal_length = df[df['species'] == 'setosa']['petal length (cm)'].values virginica_petal_length = df[df['species'] == 'virginica']['petal length (cm)'].values ``` ## Computing means The mean of all measurements gives an indication of the typical magnitude of a measurement. It is computed using np.mean(). ``` # Compute the mean: mean_length_vers mean_length_vers = np.mean(versicolor_petal_length) mean_length_vers ``` ## Computing percentiles In this exercise, you will compute the percentiles of petal length of Iris versicolor ``` # Specify array of percentiles: percentiles percentiles = np.array([2.5, 25, 50, 75, 97.5]) # Compute percentiles: ptiles_vers ptiles_vers = np.percentile(versicolor_petal_length, percentiles) ptiles_vers ``` ## Comparing percentiles to ECDF To see how the percentiles relate to the ECDF, you will plot the percentiles of Iris versicolor petal lengths you calculated in the last exercise on the ECDF plot you generated in chapter 1. The percentile variables from the previous exercise are available in the workspace as ptiles_vers and percentiles. Note that to ensure the Y-axis of the ECDF plot remains between 0 and 1, you will need to rescale the percentiles array accordingly - in this case, dividing it by 100. ``` #!pip install ipynb from ipynb.fs.full.Module1_Graphical_exploratory_data_analysis import ecdf; x_vers, y_vers = ecdf(versicolor_petal_length) # Plot the ECDF plt.figure(figsize = (8,6)) plt.plot(x_vers, y_vers, '.') plt.xlabel('petal length (cm)') plt.ylabel('ECDF') # Overlay percentiles as red x's plt.plot(ptiles_vers, percentiles/100, marker='D', color='red', linestyle='none') ``` ## Box-and-whisker plot Making a box plot for the petal lengths is unnecessary because the iris data set is not too large and the bee swarm plot works fine. However, it is always good to get some practice. Make a box plot of the iris petal lengths. You have a pandas DataFrame, df, which contains the petal length data, in your namespace. Inspect the data frame df in the IPython shell using df.head() to make sure you know what the pertinent columns are. ``` # Create box plot with Seaborn's default settings sns.boxplot(x = 'species', y = 'petal length (cm)' ,data = df) # Label the axes plt.xlabel('species') plt.ylabel('petal length (cm)') ``` ## Computing the variance It is important to have some understanding of what commonly-used functions are doing under the hood. Though you may already know how to compute variances, this is a beginner course that does not assume so. In this exercise, we will explicitly compute the variance of the petal length of Iris veriscolor using the equations. We will then use np.var() to compute it. __Instructions__ - Create an array called differences that is the difference between the petal lengths (versicolor_petal_length) and the mean petal length. The variable versicolor_petal_length is already in your namespace as a NumPy array so you can take advantage of NumPy's vectorized operations. - Square each element in this array. For example, x**2 squares each element in the array x. Store the result as diff_sq. - Compute the mean of the elements in diff_sq using np.mean(). Store the result as variance_explicit. - Compute the variance of versicolor_petal_length using np.var(). Store the result as variance_np. - Print both variance_explicit and variance_np in one print call to make sure they are consistent. ``` # Array of differences to mean: differences differences = np.array(versicolor_petal_length - np.mean(versicolor_petal_length)) # Square the differences: diff_sq diff_sq = differences**2 # Compute the mean square difference: variance_explicit variance_explicit = np.mean(diff_sq) # Compute the variance using NumPy: variance_np variance_np = np.var(versicolor_petal_length) # Print the results variance_explicit ,variance_np ``` ## The standard deviation and the variance The standard deviation is the square root of the variance. You will see this for yourself by computing the standard deviation using np.std() and comparing it to what you get by computing the variance with np.var() and then computing the square root. ``` # Compute the variance: variance variance = np.var(versicolor_petal_length) # Print the square root of the variance print(np.sqrt(variance)) # Print the standard deviation print(np.std(versicolor_petal_length)) ``` ## Scatter plots When you made bee swarm plots, box plots, and ECDF plots in previous exercises, you compared the petal lengths of different species of iris. But what if you want to compare two properties of a single species? This is exactly what we will do in this exercise. We will make a scatter plot of the petal length and width measurements of Anderson's Iris versicolor flowers. If the flower scales (that is, it preserves its proportion as it grows), we would expect the length and width to be correlated. ``` versicolor_petal_width = df[df['species']=='versicolor']['petal width (cm)'].values setosa_petal_width = df[df['species'] == 'setosa']['petal width (cm)'].values virginica_petal_width = df[df['species'] == 'virginica']['petal width (cm)'].values # Make a scatter plot plt.plot(versicolor_petal_length, versicolor_petal_width, marker = '.', linestyle = 'none') # Label the axes plt.xlabel('petal length(cm)') plt.ylabel('petal width (cm)') ``` ## Computing the covariance The covariance may be computed using the Numpy function np.cov(). For example, we have two sets of data x and y, np.cov(x, y) returns a 2D array where entries [0,1] and [1,0] are the covariances. Entry [0,0] is the variance of the data in x, and entry [1,1] is the variance of the data in y. This 2D output array is called the covariance matrix, since it organizes the self- and covariance. __Instructions__ - Use np.cov() to compute the covariance matrix for the petal length (versicolor_petal_length) and width (versicolor_petal_width) of I. versicolor. - Print the covariance matrix. - Extract the covariance from entry [0,1] of the covariance matrix. Note that by symmetry, entry [1,0] is the same as entry [0,1]. ``` # Compute the covariance matrix: covariance_matrix covariance_matrix = np.cov(versicolor_petal_length, versicolor_petal_width) # Print covariance matrix print(covariance_matrix) # Extract covariance of length and width of petals: petal_cov petal_cov = covariance_matrix[0,1] # Print the length/width covariance print(petal_cov) ``` ## Computing the Pearson correlation coefficient the Pearson correlation coefficient, also called the Pearson r, is often easier to interpret than the covariance. It is computed using the np.corrcoef() function. Like np.cov(), it takes two arrays as arguments and returns a 2D array. Entries [0,0] and [1,1] are necessarily equal to 1 (can you think about why?), and the value we are after is entry [0,1]. In this exercise, you will write a function, pearson_r(x, y) that takes in two arrays and returns the Pearson correlation coefficient. You will then use this function to compute it for the petal lengths and widths of I. versicolor. ``` def pearson_r(x, y): """Compute Pearson correlation coefficient between two arrays.""" # Compute correlation matrix: corr_mat corr_mat = np.corrcoef(x, y) # Return entry [0,1] return corr_mat[0,1] # Compute Pearson correlation coefficient for I. versicolor r = pearson_r(versicolor_petal_width, versicolor_petal_length) # Print the result r ```
github_jupyter
⊕ [Tutorial: Python in 10 Minutes - ArangoDB database](https://www.arangodb.com/tutorials/tutorial-python/) ``` from pyArango.connection import * conn = Connection(username="root", password="") ``` When this code executes, it initializes the server connection on the conn variable. By default, pyArango attempts to establish a connection to http://127.0.0.1:8529. That is, it wants to initialize a remote connection to your local host on port 8529. If you are hosting ArangoDB on a different server or use a different port, you need to set these options when you instantiate the Connection class. ``` # db = conn.createDatabase(name="school") db = conn["school"] db >>> studentsCollection = db.createCollection(name="Students") >>> db["Students"] ``` ## 创建集合 ArangoDB将文档和边缘分组到集合中。这类似于关系数据库中表的概念,但关键的区别是集合是无模式的。 在pyArango中,您可以通过调用createCollection()给定数据库上的方法来创建集合。例如,在上一节中,您创建了一个school数据库。您可能希望为该学生提供该数据库的集合。 ``` # >>> studentsCollection = db.createCollection(name="Students") >>> db["Students"] ``` ## 创建文档 设置数据库和集合后,您可以开始向它们添加数据。继续与Relational数据库进行比较,其中集合是表,文档是该表上的一行。但是,与行不同,文档是无模式的。您可以包含应用程序所需的任何值排列。 例如,将学生添加到集合中: ``` >>> doc1 = studentsCollection.createDocument() >>> doc1["name"] = "John Smith" >>> doc1 >>> doc2 = studentsCollection.createDocument() >>> doc2["firstname"] = "Emily" >>> doc2["lastname"] = "Bronte" >>> doc2 ``` 该文档显示_id为“无”,因为您尚未将其保存到ArangoDB。这意味着变量存在于Python代码中,但不存在于数据库中。ArangoDB _id通过将集合名称与值配对来构造值_key。它还为您处理分配,您只需设置密钥并保存文档。 ``` >>> doc1._key = "johnsmith" >>> doc1.save() >>> doc1 studentsCollection["johnsmith"] ``` 您可能希望从数据库中删除文档。这可以通过该delete()方法完成。 ``` >>> tom = studentsCollection["johnsmith"] >>> tom.delete() # >>> studentsCollection["johnsmith"] students = [('Oscar', 'Wilde', 3.5), ('Thomas', 'Hobbes', 3.2), ('Mark', 'Twain', 3.0), ('Kate', 'Chopin', 3.8), ('Fyodor', 'Dostoevsky', 3.1), ('Jane', 'Austen',3.4), ('Mary', 'Wollstonecraft', 3.7), ('Percy', 'Shelley', 3.5), ('William', 'Faulkner', 3.8), ('Charlotte', 'Bronte', 3.0)] for (first, last, gpa) in students: doc = studentsCollection.createDocument() doc['name'] = "%s %s" % (first, last) doc['gpa'] = gpa doc['year'] = 2017 doc._key = ''.join([first, last]).lower() doc.save() # 访问数据库中的文档。最简单的方法是使用_key值。 def report_gpa(document): print("Student: %s" % document['name']) print("GPA: %s" % document['gpa']) kate = studentsCollection['katechopin'] report_gpa(kate) # 当您从ArangoDB中读取文档到您的应用程序时,您将创建该文档的本地副本。然后,您可以对文档进行操作,对其进行任何更改,然后使用该save()方法将结果推送到数据库。 def update_gpa(key, new_gpa): doc = studentsCollection[key] doc['gpa'] = new_gpa doc.save() # 您可能希望对给定集合中的所有文档进行操作。使用该fetchAll()方法,您可以检索并迭代文档列表。例如,假设这是学期结束,你想知道哪些学生的平均成绩高于3.5: def top_scores(col, gpa): print("Top Soring Students:") for student in col.fetchAll(): if student['gpa'] >= gpa: print("- %s" % student['name']) top_scores(studentsCollection, 3.5) ``` ## AQL用法 除了上面显示的Python方法之外,ArangoDB还提供了一种查询语言(称为AQL),用于检索和修改数据库上的文档。在pyArango中,您可以使用该AQLQuery()方法发出这些查询。 例如,假设您要检索ArangoDB中所有文档的密钥: ``` aql = "FOR x IN Students RETURN x._key" queryResult = db.AQLQuery(aql, rawResults=True, batchSize=100) for key in queryResult: print(key) ``` 在上面的示例中,该AQLQuery()方法将AQL查询作为参数,并带有两个附加选项: * rawResults 定义是否需要查询返回的实际结果。 * batchSize 当查询返回的结果多于给定值时,pyArango驱动程序会自动请求新批次。 请记住,文件的顺序不能保证。如果您需要按特定顺序排列结果,请在AQL查询中添加sort子句。 ⊕ [Graph | Example AQL Queries for Graphs | Cookbook | ArangoDB Documentation](https://www.arangodb.com/docs/stable/cookbook/graph-example-actors-and-movies.html) During this solution we will be using arangosh to create and query the data. All the AQL queries are strings and can simply be copied over to your favorite driver instead of arangosh. Create a Test Dataset in arangosh: ... 示例是使用nodejs编写的. ## 使用AQL插入文档 与上面的文档创建类似,您也可以使用AQL将文档插入ArangoDB。这是通过INSERT使用bindVars该AQLQuery()方法选项的语句完成的。 ``` >>> doc = {'_key': 'denisdiderot', 'name': 'Denis Diderot', 'gpa': 3.7} >>> bind = {"doc": doc} >>> aql = "INSERT @doc INTO Students LET newDoc = NEW RETURN newDoc" >>> queryResult = db.AQLQuery(aql, bindVars=bind) queryResult[0] ``` ## 使用AQL更新文档 如果数据库中已存在文档并且您想要修改该文档中的数据,则可以使用该UPDATE语句。例如,假设您在CSV文件中收到学生的更新成绩点平均值。 首先,检查其中一个学生的GPA以查看旧值: ``` db["Students"]["katechopin"] # 然后遍历文件,更新每个学生的GPA: with open("grades.csv", "r") as f: grades = f.read().split(',') for key,gpa in grades.items(): doc = {"gpa": float(gpa)} bind = {"doc": doc, "key": key} aql = "UPDATE @key WITH @doc IN Stdents LET pdated NEW RETRN updated" db.AQLQuery(aql, bindVars=bind) # 最后,再次检查学生的GPA。 db["Students"]["katechopin"] ```
github_jupyter
# Machine Learning artifacts management This notebook contains steps and code to demonstrate how to manage and clean up Watson Machine Learning instance. This notebook contains steps and code to work with [ibm-watson-machine-learning](https://pypi.python.org/pypi/ibm-watson-machine-learning) library available in PyPI repository. This notebook introduces commands for listing artifacts, getting artifacts details and deleting them. Some familiarity with Python is helpful. This notebook uses Python 3.8. ## Learning goals The learning goals of this notebook are: - List Watson Machine Learning artifacts. - Get artifacts details. - Delete artifacts. ## Contents This notebook contains the following parts: 1. [Setup](#setup) 2. [Manage pipelines](#pipelines) 3. [Manage model definitions](#model_definitions) 4. [Manage models](#models) 5. [Manage functions](#functions) 6. [Manage experiments](#experiments) 7. [Manage trainings](#trainings) 8. [Manage deployments](#deployments) 9. [Summary and next steps](#summary) <a id="setup"></a> ## 1. Set up the environment Before you use the sample code in this notebook, you must perform the following setup tasks: - Contact with your Cloud Pack for Data administrator and ask him for your account credentials ### Connection to WML Authenticate the Watson Machine Learning service on IBM Cloud Pack for Data. You need to provide platform `url`, your `username` and `api_key`. ``` username = 'PASTE YOUR USERNAME HERE' api_key = 'PASTE YOUR API_KEY HERE' url = 'PASTE THE PLATFORM URL HERE' wml_credentials = { "username": username, "apikey": api_key, "url": url, "instance_id": 'openshift', "version": '4.0' } ``` Alternatively you can use `username` and `password` to authenticate WML services. ``` wml_credentials = { "username": ***, "password": ***, "url": ***, "instance_id": 'openshift', "version": '4.0' } ``` ### Install and import the `ibm-watson-machine-learning` package **Note:** `ibm-watson-machine-learning` documentation can be found <a href="http://ibm-wml-api-pyclient.mybluemix.net/" target="_blank" rel="noopener no referrer">here</a>. ``` !pip install -U ibm-watson-machine-learning from ibm_watson_machine_learning import APIClient client = APIClient(wml_credentials) ``` ### Working with spaces First of all, you need to create a space that will be used for your work. If you do not have space already created, you can use `{PLATFORM_URL}/ml-runtime/spaces?context=icp4data` to create one. - Click New Deployment Space - Create an empty space - Go to space `Settings` tab - Copy `space_id` and paste it below **Tip**: You can also use SDK to prepare the space for your work. More information can be found [here](https://github.com/IBM/watson-machine-learning-samples/blob/master/cpd4.0/notebooks/python_sdk/instance-management/Space%20management.ipynb). **Action**: Assign space ID below ``` space_id = 'PASTE YOUR SPACE ID HERE' ``` You can use `list` method to print all existing spaces. ``` client.spaces.list(limit=10) ``` To be able to interact with all resources available in Watson Machine Learning, you need to set **space** which you will be using. ``` client.set.default_space(space_id) ``` <a id="pipelines"></a> ## 2. Manage pipelines List existing pipelines. If you want to list only part of pipelines use `client.pipelines.list(limit=n_pipelines)`. ``` client.pipelines.list(limit=10) ``` Get pipelines details. If you want to get only part of pipelines details use `client.pipelines.get_details(limit=n_pipelines)`. You can get each pipeline details by calling `client.pipelines.get_details()` and providing pipeline id from listed pipelines. ``` pipelines_details = client.pipelines.get_details(limit=10) print(pipelines_details) ``` Delete all pipelines. You can delete one pipeline by calling `client.pipelines.delete()` and providing pipeline id from listed pipelines. ``` for pipeline in pipelines_details['resources']: client.pipelines.delete(pipeline['metadata']['id']) ``` <a id="model_definitions"></a> ## 3. Manage model definitions List existing model definitions. If you want to list only part of model definitions use `client.model_definitions.list(limit=n_model_definitions)`. ``` client.model_definitions.list(limit=10) ``` Get model definiton details by copying model definition uid from above cell and running `client.model_definitions.get_details(model_definition_guid)`. ``` model_definition_guid = "PUT_YOUR_MODEL_DEFINITION_GUID" model_definitions_details = client.model_definitions.get_details(model_definition_guid) print(model_definitions_details) ``` Delete model definitions by calling `client.model_definitions.delete(model_definition_guid)`. ``` client.model_definitions.delete(model_definition_guid) ``` <a id="models"></a> ## 4. Manage models List existing models. If you want to list only part of models use `client.repository.list_models(limit=n_models)`. ``` client.repository.list_models(limit=10) ``` Get model details by copying model uid from above cell and running `client.repository.get_details(model_guid)`. ``` model_guid = "PUT_YOUR_MODEL_GUID" model_details = client.repository.get_details(model_guid) print(model_details) ``` To download selected model from repository use: ``` client.repository.download(model_guid, <path_to_model>) # To obtain serialized model first decompress it !tar xzvf <path_to_model> ``` ``` client.repository.download(model_guid) ``` Instead of downloading model can be also loaded directly to runtime using: ``` model = client.repository.load(model_guid) # Loaded model can be used to perform prediction locally # If loaded model was a scikit-learn pipeline we can use 'predict' method model.predict(<test_data>) ``` ``` client.repository.load(model_guid) ``` Delete model from repository by calling `client.repository.delete(model_guid)`. ``` client.repository.delete(model_guid) ``` <a id="functions"></a> ## 5. Manage functions List existing functions. If you want to list only part of functions use `client.repository.list_functions(limit=n_functions)`. ``` client.repository.list_functions(limit=10) ``` Get function details by copying function uid from above cell and running `client.repository.get_details(function_guid)`. ``` function_guid = "PUT_YOUR_FUNCTION_GUID" function_details = client.repository.get_details(function_guid) print(function_details) ``` Delete function from repository by calling `client.repository.delete(function_guid)`. ``` client.repository.delete(function_guid) ``` <a id="experiments"></a> ## 6. Manage experiments List existing experiments. If you want to list only part of experiments use `client.pipelines.list(limit=n_experiments)`. ``` client.experiments.list(limit=10) ``` Get experiments details. If you want to get only part of experiments details use `client.experiments.get_details(limit=n_experiments)`. You can get each experiment details by calling `client.experiments.get_details()` and providing experiment id from listed experiments. ``` experiments_details = client.experiments.get_details() print(experiments_details) ``` Delete all experiments. You can delete one experiment by calling `client.experiments.delete()` and providing experiment id from listed experiments. ``` for experiment in experiments_details['resources']: client.experiments.delete(experiment['metadata']['id']) ``` <a id="trainings"></a> ## 7. Manage trainings List existing trainings. If you want to list only part of trainings use `client.training.list(limit=n_trainings)`. ``` client.training.list(limit=10) ``` Get trainings details. If you want to get only part of trainings details use `client.training.get_details(limit=n_trainings)`. You can get each training details by calling `client.training.get_details()` and providing training id from listed trainings. ``` trainings_details = client.training.get_details(limit=10) print(trainings_details) ``` Delete all trainings. You can delete one training by calling `client.training.cancel()` and providing training id from listed trainings. **Note** The `client.training.cancel()` method has `hard_delete` parameter. Please change it to: - True - to delete the completed or canceled training runs. - False - to cancel the currently running training run. Default value is `False`. ``` for training in trainings_details['resources']: client.training.cancel(training['metadata']['id']) ``` <a id="deployments"></a> ## 8. Manage deployments List existing deployments. If you want to list only part of deployments use `client.deployments.list(limit=n_deployments)`. ``` client.deployments.list(limit=10) ``` Get deployments details. If you want to get only part of deployments details use `client.deployments.get_details(limit=n_deployments)`. You can get each deployment details by calling `client.deployments.get_details()` and providing deployment id from listed deployments. ``` deployments_details = client.deployments.get_details() print(deployments_details) ``` Delete all deployments. You can delete one deployment by calling `client.deployments.delete()` and providing deployment id from listed deployments. ``` for deployment in deployments_details['resources']: client.deployments.delete(deployment['metadata']['id']) ``` <a id="summary"></a> ## 9. Summary and next steps You successfully completed this notebook! You learned how to use ibm-watson-machine-learning client for Watson Machine Learning instance management and clean up. Check out our <a href="https://dataplatform.cloud.ibm.com/docs/content/analyze-data/wml-setup.html" target="_blank" rel="noopener noreferrer">Online Documentation</a> for more samples, tutorials, documentation, how-tos, and blog posts. ### Authors **Szymon Kucharczyk**, Software Engineer at IBM. Copyright © 2020, 2021 IBM. This notebook and its source code are released under the terms of the MIT License.
github_jupyter
# Multiple linear regression model ### Let's mimic the process of building our trading model of SPY, base on the historical data of different stock markets ``` import pandas as pd import statsmodels.formula.api as smf import numpy as np import matplotlib.pyplot as plt %matplotlib inline import warnings warnings.filterwarnings("ignore") # import all stock market data into DataFrame aord = pd.DataFrame.from_csv('../data/indice/ALLOrdinary.csv') nikkei = pd.DataFrame.from_csv('../data/indice/Nikkei225.csv') hsi = pd.DataFrame.from_csv('../data/indice/HSI.csv') daxi = pd.DataFrame.from_csv('../data/indice/DAXI.csv') cac40 = pd.DataFrame.from_csv('../data/indice/CAC40.csv') sp500 = pd.DataFrame.from_csv('../data/indice/SP500.csv') dji = pd.DataFrame.from_csv('../data/indice/DJI.csv') nasdaq = pd.DataFrame.from_csv('../data/indice/nasdaq_composite.csv') spy = pd.DataFrame.from_csv('../data/indice/SPY.csv') nasdaq.head() ``` ## Step 1: Data Munging ``` # Due to the timezone issues, we extract and calculate appropriate stock market data for analysis # Indicepanel is the DataFrame of our trading model indicepanel=pd.DataFrame(index=spy.index) indicepanel['spy']=spy['Open'].shift(-1)-spy['Open'] indicepanel['spy_lag1']=indicepanel['spy'].shift(1) indicepanel['sp500']=sp500["Open"]-sp500['Open'].shift(1) indicepanel['nasdaq']=nasdaq['Open']-nasdaq['Open'].shift(1) indicepanel['dji']=dji['Open']-dji['Open'].shift(1) indicepanel['cac40']=cac40['Open']-cac40['Open'].shift(1) indicepanel['daxi']=daxi['Open']-daxi['Open'].shift(1) indicepanel['aord']=aord['Close']-aord['Open'] indicepanel['hsi']=hsi['Close']-hsi['Open'] indicepanel['nikkei']=nikkei['Close']-nikkei['Open'] indicepanel['Price']=spy['Open'] indicepanel.head() # Lets check whether do we have NaN values in indicepanel indicepanel.isnull().sum() # We can use method 'fillna()' from dataframe to forward filling the Nan values # Then we can drop the reminding Nan values indicepanel = indicepanel.fillna(method='ffill') indicepanel = indicepanel.dropna() # Lets check whether do we have Nan values in indicepanel now indicepanel.isnull().sum() # save this indicepanel for part 4.5 path_save = '../data/indice/indicepanel.csv' indicepanel.to_csv(path_save) print(indicepanel.shape) ``` ## Step 2: Data Spliting ``` #split the data into (1)train set and (2)test set Train = indicepanel.iloc[-2000:-1000, :] Test = indicepanel.iloc[-1000:, :] print(Train.shape, Test.shape) ``` ## Step 3: Explore the train data set ``` # Generate scatter matrix among all stock markets (and the price of SPY) to observe the association from pandas.tools.plotting import scatter_matrix sm = scatter_matrix(Train, figsize=(10, 10)) ``` ## Step 4: Check the correlation of each index between spy ``` # Find the indice with largest correlation corr_array = Train.iloc[:, :-1].corr()['spy'] print(corr_array) formula = 'spy~spy_lag1+sp500+nasdaq+dji+cac40+aord+daxi+nikkei+hsi' lm = smf.ols(formula=formula, data=Train).fit() lm.summary() ``` ## Step 5: Make prediction ``` Train['PredictedY'] = lm.predict(Train) Test['PredictedY'] = lm.predict(Test) plt.scatter(Train['spy'], Train['PredictedY']) ``` ## Step 6: Model evaluation - Statistical standard We can measure the performance of our model using some statistical metrics - **RMSE**, **Adjusted $R^2$ ** ``` # RMSE - Root Mean Squared Error, Adjusted R^2 def adjustedMetric(data, model, model_k, yname): data['yhat'] = model.predict(data) SST = ((data[yname] - data[yname].mean())**2).sum() SSR = ((data['yhat'] - data[yname].mean())**2).sum() SSE = ((data[yname] - data['yhat'])**2).sum() r2 = SSR/SST adjustR2 = 1 - (1-r2)*(data.shape[0] - 1)/(data.shape[0] -model_k -1) RMSE = (SSE/(data.shape[0] -model_k -1))**0.5 return adjustR2, RMSE def assessTable(test, train, model, model_k, yname): r2test, RMSEtest = adjustedMetric(test, model, model_k, yname) r2train, RMSEtrain = adjustedMetric(train, model, model_k, yname) assessment = pd.DataFrame(index=['R2', 'RMSE'], columns=['Train', 'Test']) assessment['Train'] = [r2train, RMSEtrain] assessment['Test'] = [r2test, RMSEtest] return assessment # Get the assement table fo our model assessTable(Test, Train, lm, 9, 'spy') ```
github_jupyter
``` import tensorflow as tf class Alexnet: def __init__(self, input_size, output_dimension, learning_rate): self.X = tf.placeholder(tf.float32, (None, input_size, input_size, 3)) self.Y = tf.placeholder(tf.float32, (None, output_dimension)) kernel = tf.Variable(tf.truncated_normal([11, 11, 3, 64], stddev=1e-1)) bias = tf.Variable(tf.constant(0.0, shape=[64]),trainable=True) conv1 = tf.nn.relu(tf.nn.conv2d(self.X, kernel, [1, 4, 4, 1], padding='SAME') + bias) lrn1 = tf.nn.local_response_normalization(conv1, alpha=1e-4, beta=0.75, depth_radius=2, bias=2.0) pool1 = tf.nn.max_pool(lrn1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') kernel = tf.Variable(tf.truncated_normal([5, 5, 64, 192], stddev=1e-1)) bias = tf.Variable(tf.constant(0.0, shape=[192]), trainable=True) conv2 = tf.nn.relu(tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME') + bias) lrn2 = tf.nn.local_response_normalization(conv2, alpha=1e-4, beta=0.75, depth_radius=2, bias=2.0) pool2 = tf.nn.max_pool(lrn2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') kernel = tf.Variable(tf.truncated_normal([3, 3, 192, 384], stddev=1e-1)) bias = tf.Variable(tf.constant(0.0, shape=[384]), trainable=True) conv3 = tf.nn.relu(tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME') + bias) kernel = tf.Variable(tf.truncated_normal([3, 3, 384, 256], stddev=1e-1)) bias = tf.Variable(tf.constant(0.0, shape=[256]), trainable=True) conv4 = tf.nn.relu(tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME') + bias) kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], stddev=1e-1)) bias = tf.Variable(tf.constant(0.0, shape=[256]), trainable=True) conv5 = tf.nn.relu(tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME') + bias) pool5 = tf.nn.max_pool(conv5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') pulled_shape = int(pool5.shape[1]) * int(pool5.shape[2]) * int(pool5.shape[3]) pulled_pool = tf.reshape(pool5, (-1, pulled_shape)) w = tf.Variable(tf.truncated_normal([pulled_shape, 4096], stddev=1e-1)) b = tf.Variable(tf.constant(0.0, shape=[4096]), trainable=True) fully1 = tf.nn.relu(tf.matmul(pulled_pool, w) + b) w = tf.Variable(tf.truncated_normal([4096, 4096], stddev=1e-1)) b = tf.Variable(tf.constant(0.0, shape=[4096]), trainable=True) fully2 = tf.nn.relu(tf.matmul(fully1, w) + b) w = tf.Variable(tf.truncated_normal([4096, output_dimension], stddev=1e-1)) b = tf.Variable(tf.constant(0.0, shape=[output_dimension]), trainable=True) self.logits = tf.matmul(fully2, w) + b self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = self.logits, labels = self.Y)) self.optimizer = tf.train.AdagradOptimizer(learning_rate = learning_rate).minimize(self.cost) self.correct_prediction = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1)) self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, "float")) import time import numpy as np import matplotlib.pyplot as plt import _pickle as cPickle from scipy.misc import imresize from sklearn.cross_validation import train_test_split from train import train def unpickle(file): with open(file, 'rb') as fo: dict = cPickle.load(fo,encoding='latin1') return dict unique_name = unpickle('cifar-10-batches-py/batches.meta')['label_names'] batches = unpickle('cifar-10-batches-py/data_batch_1') train_X, test_X, train_Y, test_Y = train_test_split(batches['data'], batches['labels'], test_size =0.2) BATCH_SIZE = 5 # alexnet original IMG_SIZE = 224 LEARNING_RATE = 0.0001 sess = tf.InteractiveSession() model = Alexnet(IMG_SIZE, len(unique_name), LEARNING_RATE) sess.run(tf.global_variables_initializer()) RESULTS = train(sess, model, 20, BATCH_SIZE, len(unique_name), IMG_SIZE, train_X, test_X, train_Y, test_Y) import matplotlib.pyplot as plt import seaborn as sns sns.set() plt.figure(figsize=(15,5)) plt.subplot(1, 2, 1) plt.plot(np.arange(len(RESULTS[0])), RESULTS[0],label='entropy cost') plt.legend() plt.subplot(1, 2, 2) plt.plot(np.arange(len(RESULTS[0])), RESULTS[1],label='accuracy training') plt.plot(np.arange(len(RESULTS[0])), RESULTS[2],label='accuracy testing') plt.legend() plt.show() ```
github_jupyter
# Generation of exome coverage projection model from FFPE samples --- ## Background: DNA extracted from limited amounts of formalin-fixed and paraffin-embedded (FFPE) archived tissue is variable in quality and quantity, resulting in low exome library complexity and leads to reduced coverage depth and experimental failure. Existing depth prediction strategies require prior shallow sequencing, wasteful of time and labor when the failure rate is high. ## Purpose: The overall goal of this model is to predict exome depth (or coverage), from limited parameters obtained during the DNA library preparation of samples for suboptimal FFPE DNA samples in order to help guide the decisions whether a sample should be sequenced or not. Currently (an older version of) this model is being implemented in a QC tool called [PROCEED](https://share.streamlit.io/danielanach/proceed/main/PROCEED.py) (PROjeCt ExomE Depth) for use by experimental scientists. --- # Table of contents 1. [Process input data](#process-data) 2. [Train multiple linear regression model](#model-data) 3. [Evaluate model](#evaluate-model) --- ``` import json import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy import seaborn as sns import sklearn from sklearn.model_selection import train_test_split from sklearn import metrics import statsmodels.formula.api as smf import warnings warnings.filterwarnings(action='ignore') %matplotlib inline sns.set_context('paper') # Set the font to be serif, rather than sans sns.set(font='arial', font_scale=1.5) # Make the background white, and specify the # specific font family sns.set_style('white', {'font.family': 'arial', 'axes.grid': False, 'font.size':12, 'axes.labelsize':10}) ``` # 1. Process input data. <a class="anchor" id="process-data"></a> This data is a compilation of both published and un-published FFPE DNA libraries which have been exome sequenced and includes bulk FFPE samples and laser-capture micro-dissected samples from oral premalignant lesions [1] and breast pre-cancer [2-3]. For which we have both some pre-sequencing, as well as post-sequencing statistics. **References:** > 1. Gutkind,J.S. et al. (2021) Inhibition of mTOR signaling and clinical activity of metformin in oral premalignant lesions. *JCI Insight*, 6. > 2. Nachmanson,D. et al. The breast pre-cancer atlas illustrates the molecular and micro-environmental diversity of ductal carcinoma in situ. npj Breast Cancer, *In Press*. > 3. Nachmanson,D. et al. (2020) Mutational profiling of micro-dissected pre-malignant lesions from archived specimens. *BMC Med. Genomics*, 13, 173. ## 1.1 Reading in data frame ``` data = pd.read_csv('ffpe_DNA_seq_training_data_clean.csv') print('Data dimensions: {}'.format(data.shape)) ``` ## 1.2 Choosing representative sample from DNA replicates In the case that multiple libraries were prepared from the same FFPE DNA extraction, we are only keeping data from one of these libraries. Since decisions about which samples to continue on through exome sequencing typically tend to be those that have little input DNA, we chose to keep the lowest DNA input amount. ``` data = data.sort_values(by='input_DNA_ng').drop_duplicates(subset='dna', keep='first') print('Data dimensions: {}'.format(data.shape)) ``` ## 1.3 Statistics of select data features ``` num_samples = len(data['case'].unique()) print('Number of unique samples: {}'.format(num_samples)) ``` Number of libraries represented in each kit ``` data.groupby(by='kit').size() ``` Average DNA input as well as the range of represented input values ``` median_input = np.median(data['input_DNA_ng']) print('Median DNA input = {} ng'.format(median_input)) print('Range DNA input = ({}, {})'.format(min(data['input_DNA_ng']), max(data['input_DNA_ng']))) ``` ## 1.4 Explore and visualize all continuous pre-sequencing metrics How are the continuous features correlated with one another? ``` data[['pcr_cycles','total_reads_M','post_pcr1_dna','average_coverage']].corr() sns.pairplot(data[['pcr_cycles','total_reads_M','post_pcr1_dna','average_coverage']]) plt.show() ``` All of the variables are correlated with average coverage, so should be useful in a multiple-linear regression model. ### Plot all the features together, and also include sequencing kit type, to visualize the relationships between them. ``` # Note, I am capping the post-pcr1 DNA at 3ug for visualization purposes data['post_pcr1_dna_clip'] = np.clip(data['post_pcr1_dna'], a_min=0, a_max=3000) plt.figure(figsize=(3,3), dpi=150) sns.scatterplot(data=data, x='post_pcr1_dna_clip', y='average_coverage', palette='mako_r', edgecolor='k', alpha=0.85, size='total_reads_M', hue='pcr_cycles', style='kit', style_order=['Swift 2S', 'XTHS', 'Ultra_II_NEB_FS'], markers=['o', 'D', 's'], s=200) plt.xlim(-50, 3200) plt.ylim(-5, 150) sns.despine(right=True) plt.xlabel('Post-PCR1 yield (ng)', fontweight='bold') plt.ylabel('Mean exome coverage', fontweight='bold') plt.tick_params(axis='both', which='major', size=2, width=2, direction='out', bottom='on') plt.legend(bbox_to_anchor=(1.05, 1), frameon=False) plt.savefig('figures/PCR1_yield_and_exome_coverage.pdf', dpi=150, bbox_inches='tight') plt.show() ``` ## 1.5 Identifying optimal normalization transformation Looking at different ways of normalizing the features and which reduce skew in each feature the most: 1. Raw (no normalization) 2. Log transformation 3. Square root transformation 4. Box-cox transformation This is a bit overkill but normalizing the features did improve the overall model fit, and some of the features were indeed quite skewed. Note this will make the model coefficients unintuitive. ``` cont_features = ['pcr_cycles', 'total_reads_M', 'post_pcr1_dna', 'average_coverage'] fontsize = 8 for i in cont_features: fig, axes = plt.subplots(nrows=1, ncols=4, dpi=150, figsize=(6, 1)) plt.subplots_adjust(wspace=0.5) # Raw data sns.distplot(data[i], bins=10, ax=axes[0]) skew = data[i].skew() axes[0].set_xlabel('Raw\nSkew = {:.2f}'.format(skew), fontsize=fontsize) # Log transform sns.distplot(np.log(data[i]), bins=10, ax=axes[1]) skew = np.log(data[i]).skew() axes[1].set_xlabel('Log transform\nSkew = {:.2f}'.format(skew), fontsize=fontsize) # Square root transform sns.distplot(np.sqrt(data[i]), bins=10, ax=axes[2]) skew = np.sqrt(data[i]).skew() axes[2].set_xlabel('Square root\nSkew = {:.2f}'.format(skew), fontsize=fontsize) # Box-cox sns.distplot(scipy.stats.boxcox(data[i])[0], bins=10, ax=axes[3]) skew = pd.Series(scipy.stats.boxcox(data[i])[0]).skew() axes[3].set_xlabel('Box cox\nSkew = {:.2f}'.format(skew), fontsize=fontsize) # Adjusting x-tick fontsize and ylabels for all axes for ax in axes: ax.tick_params(axis='both', which='major', labelsize=8, pad=0) ax.set_ylabel(None) axes[0].set_ylabel('Density', fontsize=12, fontweight='bold') plt.suptitle(i, y=1.1, fontsize=12, fontweight='bold') plt.show() ``` ## 1.6 Normalizing the data: ``` # For box-cox normalization saving the lambda so that can apply to new input data as needed lambda_dct = {} data['pcr_cycles_norm'], lambda_dct['pcr_cycles'] = scipy.stats.boxcox(data['pcr_cycles']) data['total_reads_norm'], lambda_dct['total_reads_M'] = scipy.stats.boxcox(data['total_reads_M']) data['post_pcr1_dna_norm'], lambda_dct['post_pcr1_dna'] = scipy.stats.boxcox(data['post_pcr1_dna']) data['average_coverage_norm'] = np.sqrt(data['average_coverage']) with open('../boxcox_lambda_exponents.json', 'w') as convert_file: convert_file.write(json.dumps(lambda_dct)) data['kit'].unique() ``` For the categorical variable kit, since there are three options and they need to be be transformed into a dummy variable, one of the kit types will need to be dropped. ``` data['kit_encoded'] = [1 if k == 'XTHS' else 0 for k in data['kit']] ``` --- # 2. Construct the multiple linear regression. <a class="anchor" id="model-data"></a> **Modeling the average exome coverage based on:** * Total reads * PCR 1 cycles * PCR 1 DNA yield * Kit type used ``` formula = 'average_coverage_norm ~ total_reads_norm + pcr_cycles_norm + post_pcr1_dna_norm + kit_encoded' # Using 70% of data to train and 30% to evaluate train, test = sklearn.model_selection.train_test_split(data, test_size=0.2, random_state=4) # Fit model to training data train_model = smf.ols(formula=formula, data=train).fit() print(train_model.summary()) ``` Overall, the above model is predictive of mean coverage, all of the pre-sequencing metrics we have are useful in the prediction of average coverage. We are getting a multicollinearity warning, this was explored in another notebook but essentially adding in interaction terms for other correlated features (i.e. pcr_cycles and total reads), barely changed the model and still resulted in multi-collinearity warning. Additionally the variance inflation factor (VIF) was <= 2 for each of the features, so this is quite a minor violation of no multi-collinearity though something to be mindful of. ## Writing parameters of model to dictionary ``` model_params_dct = dict(train_model.params) with open('../model_params.json', 'w') as convert_file: convert_file.write(json.dumps(model_params_dct)) ``` --- # 3. Evaluate model. <a class="anchor" id="evaluate-model"></a> ## 3.1 Estimate overall goodness of fit Using root mean squared error (RMSE) of coverage predictions by splitting the data 20:80 over 100 iterations ``` formula = 'average_coverage_norm ~ total_reads_norm + pcr_cycles_norm + post_pcr1_dna_norm + kit_encoded' rmse_lst = [] predicted_all = [] observed_all = [] for i in range(0,100): train, test = sklearn.model_selection.train_test_split(data, test_size=0.2) # Fit model to training data test_model = smf.ols(formula=formula, data=train).fit() # Predict average coverage prediction = test_model.predict(test) # Both the prediction and the actual values of coverage will be squared # since in the normalization the square root was taken predicted_values = np.square(prediction) observed_values = np.square(test['average_coverage_norm']) # Measuring the root mean squared error RMSE rmse = np.sqrt(sklearn.metrics.mean_squared_error(observed_values, predicted_values)) rmse_lst.append(rmse) predicted_all.extend(predicted_values) observed_all.extend(observed_values) ``` Plotting histogram of RMSE over all iterations ``` plt.figure(figsize=(2,2), dpi=150) sns.distplot(rmse_lst, bins=10) plt.xlabel('Root mean squared error', fontweight='bold', fontsize=12) plt.ylabel('Density', fontweight='bold', fontsize=12) sns.despine(right=True) plt.tick_params(axis='both', which='major', size=2, width=2, direction='out', bottom='on') plt.axvline(x=np.average(rmse_lst), linestyle='--', color='k') plt.text(18, 0.18, 'Mean RMSE = {:.3}'.format(np.average(rmse_lst)), fontsize=10) plt.savefig('figures/RMSE_average_coverage.pdf', dpi=150, bbox_inches='tight') plt.show() ``` Over 100 different iterations of cross validation, we estimated the average RMSE of the predicted average exome coverage to be 13.7. For the application of deciding which libraries to sequence, this is a fairly reasonable error. To get a more intuitive understanding of the relationship between predicted and observed over cross-validation, this is a visualization of predicted coverage versus observed. ``` plt.figure(figsize=(2,2), dpi=150) sns.scatterplot(y=observed_all, x=predicted_all, s=10, alpha=0.8, edgecolor='k') plt.ylabel('Observed mean coverage', fontweight='bold', fontsize=12) plt.xlabel('Predicted mean coverage', fontweight='bold', fontsize=12) x = predicted_all y = observed_all plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x)), color='k', linestyle='--') sns.despine(right=True) plt.tick_params(axis='both', which='major', size=2, width=2, direction='out', bottom='on') plt.savefig('figures/Cross_validation_scatter.pdf', dpi=150, bbox_inches='tight') plt.show() ``` ## 3.2 Measure the "importance" of each feature Quantifying the contribution of each variable to the predictive performance by removing that variable and evaluating the partial model. ``` model = smf.ols(formula=formula, data=data).fit() total_rsquared_adj = model.rsquared_adj rsquared_adj_lst = [] rsquared_adj_params = ['Reads', 'PCR cycles', 'PCR yield', 'Kit' , 'PCR cycles and yield'] for f in ['average_coverage_norm ~ pcr_cycles_norm + post_pcr1_dna_norm + kit_encoded', 'average_coverage_norm ~ total_reads_norm + post_pcr1_dna_norm + kit_encoded', 'average_coverage_norm ~ total_reads_norm + pcr_cycles_norm + kit_encoded', 'average_coverage_norm ~ total_reads_norm + pcr_cycles_norm + post_pcr1_dna_norm', 'average_coverage_norm ~ total_reads_norm + kit_encoded']: model = smf.ols(formula=f, data=data).fit() rsquared_adj = model.rsquared_adj rsquared_adj_lst.append(rsquared_adj) model_eval_df = pd.DataFrame(zip(rsquared_adj_params, rsquared_adj_lst), columns=['variable', 'rsquared_adj']) model_eval_df['delta_rsquared_adj'] = total_rsquared_adj - model_eval_df['rsquared_adj'] model_eval_df['total_rsquared_adj'] = total_rsquared_adj model_eval_df = model_eval_df.sort_values(by='rsquared_adj') ``` Plotting the results each of the variable drops. ``` plt.figure(figsize=(2,2), dpi=150) sns.barplot(data=model_eval_df, y='variable', x='rsquared_adj', color='grey', edgecolor='k') plt.xlabel('R-squared adjusted', fontweight='bold') plt.ylabel('Model excluding', fontweight='bold') sns.despine(right = True) plt.tick_params(axis='both', which='major', size=2, width=2, direction='out', bottom='on') plt.xlim(0, 1) plt.axvline(x=total_rsquared_adj, color='r', linestyle='--') plt.savefig('figures/Rsquared_adj_variable_Eval.pdf', dpi=150, bbox_inches='tight') ``` Overall, models excluding PCR yield or excluding PCR cycles, had the largest decrease in accuracy, $R^{2}$=0.61 and 0.43 respectively, with the removal of both resulting in $R^{2}$=0.16, suggesting these had the greatest contribution in the prediction. --- ## Conclusion: Overall here I developed and trained an exome sequencing depth prediction model trained on pre-sequencing metrics from 166 library preparation conditions, varying sample (*N=124*), input DNA (2-309ng) or library kit (*N=3*). A limited set of parameters (kit, reads, PCR cycles, and PCR yield) were predictive of mean exome depth ($R^{2}$=0.85). Despite the extreme variability of FFPE derived DNA quantity and quality, as well as many other factors that contribute to sequencing performance excluded from this model, there is reasonable coverage predictions from very limited measurements that do not require sample sequencing. This model will be used as the basis of a shared web-application to assist experimental users in quality assessment prior to sequencing.
github_jupyter
# Brython-Radiant A Brython Framework for Web Apps development. ![GitHub top language](https://img.shields.io/github/languages/top/un-gcpds/brython-radiant?) ![PyPI - License](https://img.shields.io/pypi/l/radiant?) ![PyPI](https://img.shields.io/pypi/v/radiant?) ![PyPI - Status](https://img.shields.io/pypi/status/radiant?) ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/radiant?) ![GitHub last commit](https://img.shields.io/github/last-commit/un-gcpds/brython-radiant?) ![CodeFactor Grade](https://img.shields.io/codefactor/grade/github/UN-GCPDS/brython-radiant?) [![Documentation Status](https://readthedocs.org/projects/radiant/badge/?version=latest)](https://radiant-framework.readthedocs.io/en/latest/?badge=latest) Radiant is a [Brython](https://brython.info/) framework for the quick development of web apps wuth pure Python/Brython syntax which means that there is no need to care about (if you don't want) HTML, CSS, or Javascript. Run over [Tornado](https://www.tornadoweb.org/) servers and includes support to [Websockets](notebooks/02-additional_features.ipynb#WebSockets), [Python Scripts](notebooks/02-additional_features.ipynb#Python-scripting) and [MDC](notebooks/02-additional_features.ipynb#Custom-themes). ## Instalation ``` pip install radiant ``` ## Usage ``` # Radiant modules from radiant.server import RadiantAPI # Brython modules from browser import document, html # This modules are faked after `radiant` inport # Main class inheriting RadiantAPI class BareMinimum(RadiantAPI): # Constructor def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) #----------------------------------------------------------- # Brython code (finally) document.select_one('body') <= html.H1('Hello World') # # ...all your brython code #----------------------------------------------------------- # Run server if __name__ == '__main__': BareMinimum() # Radiant modules from radiant.server import RadiantAPI, RadiantServer # import RadiantServer for advanced options from browser import document, html # Main class inheriting RadiantAPI class BareMinimum(RadiantAPI): def __init__(self, *args, **kwargs): """""" super().__init__(*args, **kwargs) #----------------------------------------------------------- # Brython code document.select_one('body') <= html.H1('Hello World') # # ...all your brython code #----------------------------------------------------------- if __name__ == '__main__': # Advance options RadiantServer('BareMinimum', host='localhost', port=5000, brython_version='3.9.1', debug_level=0, ) ``` ## How to works This is basically a set of scripts that allows the same file run from _Python_ and _Brython_, when is running under _Python_ a [Tornado](https://www.tornadoweb.org/) server is created and configure the local path for serving static files, and a custom HTML template is configured in runtime to import the same script, this time under _Brython_, is very simple.
github_jupyter
ref: https://www.kaggle.com/nischaydnk/inference-best-lb <br/> ref: https://www.kaggle.com/markpeng/final-best-lb-cleaned ``` #!/usr/bin/env python # coding: utf-8 import sys sys.path.append('../input/iterative-stratification/iterative-stratification-master') sys.path.append('../input/umaplearn/umap') import os os.makedirs('model', exist_ok=True) os.makedirs('interim', exist_ok=True) BATCH_SIZE = 2048 from scipy.sparse.csgraph import connected_components from umap import UMAP from iterstrat.ml_stratifiers import MultilabelStratifiedKFold import numpy as np import random import pandas as pd import matplotlib.pyplot as plt import os import copy import seaborn as sns import time from sklearn import preprocessing from sklearn.metrics import log_loss from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA,FactorAnalysis from sklearn.manifold import TSNE import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim print(torch.cuda.is_available()) import warnings NB = '25' IS_TRAIN = False MODEL_DIR = "../input/kibuna-nn-hs-1024-last-train/model" # "../model" INT_DIR = "interim" # "../interim" NSEEDS = 5 # 5 DEVICE = ('cuda' if torch.cuda.is_available() else 'cpu') EPOCHS = 15 LEARNING_RATE = 5e-3 WEIGHT_DECAY = 1e-5 EARLY_STOPPING_STEPS = 10 EARLY_STOP = False NFOLDS = 5 # 5 PMIN = 0.0005 PMAX = 0.9995 SMIN = 0.0 SMAX = 1.0 train_features = pd.read_csv('../input/lish-moa/train_features.csv') train_targets_scored = pd.read_csv('../input/lish-moa/train_targets_scored.csv') train_targets_nonscored = pd.read_csv('../input/lish-moa/train_targets_nonscored.csv') test_features = pd.read_csv('../input/lish-moa/test_features.csv') sample_submission = pd.read_csv('../input/lish-moa/sample_submission.csv') train_targets_nonscored = train_targets_nonscored.loc[:, train_targets_nonscored.sum() != 0] print(train_targets_nonscored.shape) for c in train_targets_nonscored.columns: if c != "sig_id": train_targets_nonscored[c] = np.maximum(PMIN, np.minimum(PMAX, train_targets_nonscored[c])) print("(nsamples, nfeatures)") print(train_features.shape) print(train_targets_scored.shape) print(train_targets_nonscored.shape) print(test_features.shape) print(sample_submission.shape) GENES = [col for col in train_features.columns if col.startswith('g-')] CELLS = [col for col in train_features.columns if col.startswith('c-')] def seed_everything(seed=1903): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything(seed=1903) # GENES n_comp = 90 n_dim = 45 data = pd.concat([pd.DataFrame(train_features[GENES]), pd.DataFrame(test_features[GENES])]) if IS_TRAIN: fa = FactorAnalysis(n_components=n_comp, random_state=1903).fit(data[GENES]) pd.to_pickle(fa, f'{MODEL_DIR}/{NB}_factor_analysis_g.pkl') umap = UMAP(n_components=n_dim, random_state=1903).fit(data[GENES]) pd.to_pickle(umap, f'{MODEL_DIR}/{NB}_umap_g.pkl') else: fa = pd.read_pickle(f'{MODEL_DIR}/{NB}_factor_analysis_g.pkl') umap = pd.read_pickle(f'{MODEL_DIR}/{NB}_umap_g.pkl') data2 = (fa.transform(data[GENES])) data3 = (umap.transform(data[GENES])) train2 = data2[:train_features.shape[0]] test2 = data2[-test_features.shape[0]:] train3 = data3[:train_features.shape[0]] test3 = data3[-test_features.shape[0]:] train2 = pd.DataFrame(train2, columns=[f'fa_G-{i}' for i in range(n_comp)]) train3 = pd.DataFrame(train3, columns=[f'umap_G-{i}' for i in range(n_dim)]) test2 = pd.DataFrame(test2, columns=[f'fa_G-{i}' for i in range(n_comp)]) test3 = pd.DataFrame(test3, columns=[f'umap_G-{i}' for i in range(n_dim)]) train_features = pd.concat((train_features, train2, train3), axis=1) test_features = pd.concat((test_features, test2, test3), axis=1) #CELLS n_comp = 50 n_dim = 25 data = pd.concat([pd.DataFrame(train_features[CELLS]), pd.DataFrame(test_features[CELLS])]) if IS_TRAIN: fa = FactorAnalysis(n_components=n_comp, random_state=1903).fit(data[CELLS]) pd.to_pickle(fa, f'{MODEL_DIR}/{NB}_factor_analysis_c.pkl') umap = UMAP(n_components=n_dim, random_state=1903).fit(data[CELLS]) pd.to_pickle(umap, f'{MODEL_DIR}/{NB}_umap_c.pkl') else: fa = pd.read_pickle(f'{MODEL_DIR}/{NB}_factor_analysis_c.pkl') umap = pd.read_pickle(f'{MODEL_DIR}/{NB}_umap_c.pkl') data2 = (fa.transform(data[CELLS])) data3 = (umap.transform(data[CELLS])) train2 = data2[:train_features.shape[0]] test2 = data2[-test_features.shape[0]:] train3 = data3[:train_features.shape[0]] test3 = data3[-test_features.shape[0]:] train2 = pd.DataFrame(train2, columns=[f'fa_C-{i}' for i in range(n_comp)]) train3 = pd.DataFrame(train3, columns=[f'umap_C-{i}' for i in range(n_dim)]) test2 = pd.DataFrame(test2, columns=[f'fa_C-{i}' for i in range(n_comp)]) test3 = pd.DataFrame(test3, columns=[f'umap_C-{i}' for i in range(n_dim)]) train_features = pd.concat((train_features, train2, train3), axis=1) test_features = pd.concat((test_features, test2, test3), axis=1) from sklearn.preprocessing import QuantileTransformer for col in (GENES + CELLS): vec_len = len(train_features[col].values) vec_len_test = len(test_features[col].values) raw_vec = pd.concat([train_features, test_features])[col].values.reshape(vec_len+vec_len_test, 1) if IS_TRAIN: transformer = QuantileTransformer(n_quantiles=100, random_state=123, output_distribution="normal") transformer.fit(raw_vec) pd.to_pickle(transformer, f'{MODEL_DIR}/{NB}_{col}_quantile_transformer.pkl') else: transformer = pd.read_pickle(f'{MODEL_DIR}/{NB}_{col}_quantile_transformer.pkl') train_features[col] = transformer.transform(train_features[col].values.reshape(vec_len, 1)).reshape(1, vec_len)[0] test_features[col] = transformer.transform(test_features[col].values.reshape(vec_len_test, 1)).reshape(1, vec_len_test)[0] print(train_features.shape) print(test_features.shape) train = train_features.merge(train_targets_nonscored, on='sig_id') train = train[train['cp_type']!='ctl_vehicle'].reset_index(drop=True) test = test_features[test_features['cp_type']!='ctl_vehicle'].reset_index(drop=True) target = train[train_targets_nonscored.columns] train = train.drop('cp_type', axis=1) test = test.drop('cp_type', axis=1) print(target.shape) print(train_features.shape) print(test_features.shape) print(train.shape) print(test.shape) target_cols = target.drop('sig_id', axis=1).columns.values.tolist() folds = train.copy() mskf = MultilabelStratifiedKFold(n_splits=NFOLDS) for f, (t_idx, v_idx) in enumerate(mskf.split(X=train, y=target)): folds.loc[v_idx, 'kfold'] = int(f) folds['kfold'] = folds['kfold'].astype(int) folds print(train.shape) print(folds.shape) print(test.shape) print(target.shape) print(sample_submission.shape) class MoADataset: def __init__(self, features, targets): self.features = features self.targets = targets def __len__(self): return (self.features.shape[0]) def __getitem__(self, idx): dct = { 'x' : torch.tensor(self.features[idx, :], dtype=torch.float), 'y' : torch.tensor(self.targets[idx, :], dtype=torch.float) } return dct class TestDataset: def __init__(self, features): self.features = features def __len__(self): return (self.features.shape[0]) def __getitem__(self, idx): dct = { 'x' : torch.tensor(self.features[idx, :], dtype=torch.float) } return dct def train_fn(model, optimizer, scheduler, loss_fn, dataloader, device): model.train() final_loss = 0 for data in dataloader: optimizer.zero_grad() inputs, targets = data['x'].to(device), data['y'].to(device) outputs = model(inputs) loss = loss_fn(outputs, targets) loss.backward() optimizer.step() scheduler.step() final_loss += loss.item() final_loss /= len(dataloader) return final_loss def valid_fn(model, loss_fn, dataloader, device): model.eval() final_loss = 0 valid_preds = [] for data in dataloader: inputs, targets = data['x'].to(device), data['y'].to(device) outputs = model(inputs) loss = loss_fn(outputs, targets) final_loss += loss.item() valid_preds.append(outputs.sigmoid().detach().cpu().numpy()) final_loss /= len(dataloader) valid_preds = np.concatenate(valid_preds) return final_loss, valid_preds def inference_fn(model, dataloader, device): model.eval() preds = [] for data in dataloader: inputs = data['x'].to(device) with torch.no_grad(): outputs = model(inputs) preds.append(outputs.sigmoid().detach().cpu().numpy()) preds = np.concatenate(preds) return preds class Model(nn.Module): def __init__(self, num_features, num_targets, hidden_size): super(Model, self).__init__() self.batch_norm1 = nn.BatchNorm1d(num_features) self.dropout1 = nn.Dropout(0.15) self.dense1 = nn.utils.weight_norm(nn.Linear(num_features, hidden_size)) self.batch_norm2 = nn.BatchNorm1d(hidden_size) self.dropout2 = nn.Dropout(0.3) self.dense2 = nn.Linear(hidden_size, hidden_size) self.batch_norm3 = nn.BatchNorm1d(hidden_size) self.dropout3 = nn.Dropout(0.25) self.dense3 = nn.utils.weight_norm(nn.Linear(hidden_size, num_targets)) def forward(self, x): x = self.batch_norm1(x) x = self.dropout1(x) x = F.leaky_relu(self.dense1(x)) x = self.batch_norm2(x) x = self.dropout2(x) x = F.leaky_relu(self.dense2(x)) x = self.batch_norm3(x) x = self.dropout3(x) x = self.dense3(x) return x def process_data(data): data = pd.get_dummies(data, columns=['cp_time','cp_dose']) return data feature_cols = [c for c in process_data(folds).columns if c not in target_cols] feature_cols = [c for c in feature_cols if c not in ['kfold','sig_id']] len(feature_cols) num_features=len(feature_cols) num_targets=len(target_cols) hidden_size=2048 def run_training(fold, seed): seed_everything(seed) train = process_data(folds) test_ = process_data(test) trn_idx = train[train['kfold'] != fold].index val_idx = train[train['kfold'] == fold].index train_df = train[train['kfold'] != fold].reset_index(drop=True) valid_df = train[train['kfold'] == fold].reset_index(drop=True) x_train, y_train = train_df[feature_cols].values, train_df[target_cols].values x_valid, y_valid = valid_df[feature_cols].values, valid_df[target_cols].values train_dataset = MoADataset(x_train, y_train) valid_dataset = MoADataset(x_valid, y_valid) trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True) validloader = torch.utils.data.DataLoader(valid_dataset, batch_size=BATCH_SIZE, shuffle=False) model = Model( num_features=num_features, num_targets=num_targets, hidden_size=hidden_size, ) model.to(DEVICE) optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY) scheduler = optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.2, div_factor=1e3, max_lr=1e-2, epochs=EPOCHS, steps_per_epoch=len(trainloader)) loss_fn = nn.BCEWithLogitsLoss() early_stopping_steps = EARLY_STOPPING_STEPS early_step = 0 oof = np.zeros((len(train), target.iloc[:, 1:].shape[1])) best_loss = np.inf best_loss_epoch = -1 if IS_TRAIN: for epoch in range(EPOCHS): train_loss = train_fn(model, optimizer, scheduler, loss_fn, trainloader, DEVICE) valid_loss, valid_preds = valid_fn(model, loss_fn, validloader, DEVICE) if valid_loss < best_loss: best_loss = valid_loss best_loss_epoch = epoch oof[val_idx] = valid_preds torch.save(model.state_dict(), f"{MODEL_DIR}/{NB}-nonscored-SEED{seed}-FOLD{fold}_.pth") elif(EARLY_STOP == True): early_step += 1 if (early_step >= early_stopping_steps): break if epoch % 10 == 0 or epoch == EPOCHS-1: print(f"seed: {seed}, FOLD: {fold}, EPOCH: {epoch}, train_loss: {train_loss:.6f}, valid_loss: {valid_loss:.6f}, best_loss: {best_loss:.6f}, best_loss_epoch: {best_loss_epoch}") #--------------------- PREDICTION--------------------- x_test = test_[feature_cols].values testdataset = TestDataset(x_test) testloader = torch.utils.data.DataLoader(testdataset, batch_size=BATCH_SIZE, shuffle=False) model = Model( num_features=num_features, num_targets=num_targets, hidden_size=hidden_size, ) model.load_state_dict(torch.load(f"{MODEL_DIR}/{NB}-nonscored-SEED{seed}-FOLD{fold}_.pth")) model.to(DEVICE) if not IS_TRAIN: valid_loss, valid_preds = valid_fn(model, loss_fn, validloader, DEVICE) oof[val_idx] = valid_preds predictions = np.zeros((len(test_), target.iloc[:, 1:].shape[1])) predictions = inference_fn(model, testloader, DEVICE) return oof, predictions def run_k_fold(NFOLDS, seed): oof = np.zeros((len(train), len(target_cols))) predictions = np.zeros((len(test), len(target_cols))) for fold in range(NFOLDS): oof_, pred_ = run_training(fold, seed) predictions += pred_ / NFOLDS oof += oof_ return oof, predictions SEED = range(NSEEDS) oof = np.zeros((len(train), len(target_cols))) predictions = np.zeros((len(test), len(target_cols))) time_start = time.time() for seed in SEED: oof_, predictions_ = run_k_fold(NFOLDS, seed) oof += oof_ / len(SEED) predictions += predictions_ / len(SEED) print(f"elapsed time: {time.time() - time_start}") train[target_cols] = oof test[target_cols] = predictions print(oof.shape) print(predictions.shape) train.to_pickle(f"{INT_DIR}/{NB}-train_nonscore_pred.pkl") test.to_pickle(f"{INT_DIR}/{NB}-test_nonscore_pred.pkl") train[target_cols] = np.maximum(PMIN, np.minimum(PMAX, train[target_cols])) valid_results = train_targets_nonscored.drop(columns=target_cols).merge(train[['sig_id']+target_cols], on='sig_id', how='left').fillna(0) y_true = train_targets_nonscored[target_cols].values y_true = y_true > 0.5 y_pred = valid_results[target_cols].values score = 0 for i in range(len(target_cols)): score_ = log_loss(y_true[:, i], y_pred[:, i]) score += score_ / target.shape[1] print("CV log_loss: ", score) ``` # Stage-2 ``` EPOCHS = 25 nonscored_target = [c for c in train[train_targets_nonscored.columns] if c != "sig_id"] nonscored_target train = pd.read_pickle(f"{INT_DIR}/{NB}-train_nonscore_pred.pkl") test = pd.read_pickle(f"{INT_DIR}/{NB}-test_nonscore_pred.pkl") train = train.merge(train_targets_scored, on='sig_id') target = train[train_targets_scored.columns] for col in (nonscored_target): vec_len = len(train[col].values) vec_len_test = len(test[col].values) raw_vec = train[col].values.reshape(vec_len, 1) if IS_TRAIN: transformer = QuantileTransformer(n_quantiles=100, random_state=0, output_distribution="normal") transformer.fit(raw_vec) pd.to_pickle(transformer, f"{MODEL_DIR}/{NB}_{col}_quantile_nonscored.pkl") else: transformer = pd.read_pickle(f"{MODEL_DIR}/{NB}_{col}_quantile_nonscored.pkl") train[col] = transformer.transform(raw_vec).reshape(1, vec_len)[0] test[col] = transformer.transform(test[col].values.reshape(vec_len_test, 1)).reshape(1, vec_len_test)[0] target_cols = target.drop('sig_id', axis=1).columns.values.tolist() train folds = train.copy() mskf = MultilabelStratifiedKFold(n_splits=NFOLDS) for f, (t_idx, v_idx) in enumerate(mskf.split(X=train, y=target)): folds.loc[v_idx, 'kfold'] = int(f) folds['kfold'] = folds['kfold'].astype(int) folds print(train.shape) print(folds.shape) print(test.shape) print(target.shape) print(sample_submission.shape) def process_data(data): data = pd.get_dummies(data, columns=['cp_time','cp_dose']) return data feature_cols = [c for c in process_data(folds).columns if c not in target_cols] feature_cols = [c for c in feature_cols if c not in ['kfold','sig_id']] len(feature_cols) num_features=len(feature_cols) num_targets=len(target_cols) hidden_size=2048 def run_training(fold, seed): seed_everything(seed) train = process_data(folds) test_ = process_data(test) trn_idx = train[train['kfold'] != fold].index val_idx = train[train['kfold'] == fold].index train_df = train[train['kfold'] != fold].reset_index(drop=True) valid_df = train[train['kfold'] == fold].reset_index(drop=True) x_train, y_train = train_df[feature_cols].values, train_df[target_cols].values x_valid, y_valid = valid_df[feature_cols].values, valid_df[target_cols].values train_dataset = MoADataset(x_train, y_train) valid_dataset = MoADataset(x_valid, y_valid) trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True) validloader = torch.utils.data.DataLoader(valid_dataset, batch_size=BATCH_SIZE, shuffle=False) model = Model( num_features=num_features, num_targets=num_targets, hidden_size=hidden_size, ) model.to(DEVICE) optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY) # scheduler = optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.3, div_factor=1000, # max_lr=1e-2, epochs=EPOCHS, steps_per_epoch=len(trainloader)) scheduler = optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.2, div_factor=1e3, max_lr=1e-2, epochs=EPOCHS, steps_per_epoch=len(trainloader)) loss_fn = nn.BCEWithLogitsLoss() early_stopping_steps = EARLY_STOPPING_STEPS early_step = 0 oof = np.zeros((len(train), target.iloc[:, 1:].shape[1])) best_loss = np.inf best_loss_epoch = -1 if IS_TRAIN: for epoch in range(EPOCHS): train_loss = train_fn(model, optimizer, scheduler, loss_fn, trainloader, DEVICE) valid_loss, valid_preds = valid_fn(model, loss_fn, validloader, DEVICE) if valid_loss < best_loss: best_loss = valid_loss best_loss_epoch = epoch oof[val_idx] = valid_preds torch.save(model.state_dict(), f"{MODEL_DIR}/{NB}-scored-SEED{seed}-FOLD{fold}_.pth") elif(EARLY_STOP == True): early_step += 1 if (early_step >= early_stopping_steps): break if epoch % 10 == 0 or epoch == EPOCHS-1: print(f"seed: {seed}, FOLD: {fold}, EPOCH: {epoch}, train_loss: {train_loss:.6f}, valid_loss: {valid_loss:.6f}, best_loss: {best_loss:.6f}, best_loss_epoch: {best_loss_epoch}") #--------------------- PREDICTION--------------------- x_test = test_[feature_cols].values testdataset = TestDataset(x_test) testloader = torch.utils.data.DataLoader(testdataset, batch_size=BATCH_SIZE, shuffle=False) model = Model( num_features=num_features, num_targets=num_targets, hidden_size=hidden_size, ) model.load_state_dict(torch.load(f"{MODEL_DIR}/{NB}-scored-SEED{seed}-FOLD{fold}_.pth")) model.to(DEVICE) if not IS_TRAIN: valid_loss, valid_preds = valid_fn(model, loss_fn, validloader, DEVICE) oof[val_idx] = valid_preds predictions = np.zeros((len(test_), target.iloc[:, 1:].shape[1])) predictions = inference_fn(model, testloader, DEVICE) return oof, predictions def run_k_fold(NFOLDS, seed): oof = np.zeros((len(train), len(target_cols))) predictions = np.zeros((len(test), len(target_cols))) for fold in range(NFOLDS): oof_, pred_ = run_training(fold, seed) predictions += pred_ / NFOLDS oof += oof_ return oof, predictions SEED = range(NSEEDS) #[0, 1, 2, 3 ,4]#, 5, 6, 7, 8, 9, 10] oof = np.zeros((len(train), len(target_cols))) predictions = np.zeros((len(test), len(target_cols))) time_start = time.time() for seed in SEED: oof_, predictions_ = run_k_fold(NFOLDS, seed) oof += oof_ / len(SEED) predictions += predictions_ / len(SEED) print(f"elapsed time: {time.time() - time_start}") train[target_cols] = oof test[target_cols] = predictions train.to_pickle(f"{INT_DIR}/{NB}-train-score-pred.pkl") test.to_pickle(f"{INT_DIR}/{NB}-test-score-pred.pkl") train[target_cols] = np.maximum(PMIN, np.minimum(PMAX, train[target_cols])) valid_results = train_targets_scored.drop(columns=target_cols).merge(train[['sig_id']+target_cols], on='sig_id', how='left').fillna(0) y_true = train_targets_scored[target_cols].values y_true = y_true > 0.5 y_pred = valid_results[target_cols].values score = 0 for i in range(len(target_cols)): score_ = log_loss(y_true[:, i], y_pred[:, i]) score += score_ / target.shape[1] print("CV log_loss: ", score) ``` # Stage-3 ``` train = pd.read_pickle(f"{INT_DIR}/{NB}-train-score-pred.pkl") test = pd.read_pickle(f"{INT_DIR}/{NB}-test-score-pred.pkl") EPOCHS = 25 PMIN = 0.0005 PMAX = 0.9995 for c in train_targets_scored.columns: if c != "sig_id": train_targets_scored[c] = np.maximum(PMIN, np.minimum(PMAX, train_targets_scored[c])) train_targets_scored.columns train = train[train_targets_scored.columns] train.columns = [c + "_pred" if (c != 'sig_id' and c in train_targets_scored.columns) else c for c in train.columns] test = test[train_targets_scored.columns] test.columns = [c + "_pred" if (c != 'sig_id' and c in train_targets_scored.columns) else c for c in test.columns] train train = train.merge(train_targets_scored, on='sig_id') target = train[train_targets_scored.columns] from sklearn.preprocessing import QuantileTransformer scored_target_pred = [c + "_pred" for c in train_targets_scored.columns if c != 'sig_id'] for col in (scored_target_pred): vec_len = len(train[col].values) vec_len_test = len(test[col].values) raw_vec = train[col].values.reshape(vec_len, 1) # transformer.fit(raw_vec) if IS_TRAIN: transformer = QuantileTransformer(n_quantiles=100, random_state=0, output_distribution="normal") transformer.fit(raw_vec) pd.to_pickle(transformer, f"{MODEL_DIR}/{NB}_{col}_quantile_scored.pkl") else: transformer = pd.read_pickle(f"{MODEL_DIR}/{NB}_{col}_quantile_scored.pkl") train[col] = transformer.transform(raw_vec).reshape(1, vec_len)[0] test[col] = transformer.transform(test[col].values.reshape(vec_len_test, 1)).reshape(1, vec_len_test)[0] target_cols = target.drop('sig_id', axis=1).columns.values.tolist() train folds = train.copy() mskf = MultilabelStratifiedKFold(n_splits=NFOLDS) for f, (t_idx, v_idx) in enumerate(mskf.split(X=train, y=target)): folds.loc[v_idx, 'kfold'] = int(f) folds['kfold'] = folds['kfold'].astype(int) folds print(train.shape) print(folds.shape) print(test.shape) print(target.shape) print(sample_submission.shape) folds def process_data(data): return data feature_cols = [c for c in folds.columns if c not in target_cols] feature_cols = [c for c in feature_cols if c not in ['kfold','sig_id']] len(feature_cols) feature_cols folds EPOCHS = 25 num_features=len(feature_cols) num_targets=len(target_cols) hidden_size=1024 def run_training(fold, seed): seed_everything(seed) train = process_data(folds) test_ = process_data(test) trn_idx = train[train['kfold'] != fold].index val_idx = train[train['kfold'] == fold].index train_df = train[train['kfold'] != fold].reset_index(drop=True) valid_df = train[train['kfold'] == fold].reset_index(drop=True) x_train, y_train = train_df[feature_cols].values, train_df[target_cols].values x_valid, y_valid = valid_df[feature_cols].values, valid_df[target_cols].values train_dataset = MoADataset(x_train, y_train) valid_dataset = MoADataset(x_valid, y_valid) trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True) validloader = torch.utils.data.DataLoader(valid_dataset, batch_size=BATCH_SIZE, shuffle=False) model = Model( num_features=num_features, num_targets=num_targets, hidden_size=hidden_size, ) model.to(DEVICE) optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY) scheduler = optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.2, div_factor=1e3, max_lr=1e-2, epochs=EPOCHS, steps_per_epoch=len(trainloader)) loss_fn = nn.BCEWithLogitsLoss() early_stopping_steps = EARLY_STOPPING_STEPS early_step = 0 oof = np.zeros((len(train), target.iloc[:, 1:].shape[1])) best_loss = np.inf best_loss_epoch = -1 if IS_TRAIN: for epoch in range(EPOCHS): train_loss = train_fn(model, optimizer, scheduler, loss_fn, trainloader, DEVICE) valid_loss, valid_preds = valid_fn(model, loss_fn, validloader, DEVICE) if valid_loss < best_loss: best_loss = valid_loss best_loss_epoch = epoch oof[val_idx] = valid_preds torch.save(model.state_dict(), f"{MODEL_DIR}/{NB}-scored2-SEED{seed}-FOLD{fold}_.pth") elif(EARLY_STOP == True): early_step += 1 if (early_step >= early_stopping_steps): break if epoch % 10 == 0 or epoch == EPOCHS-1: print(f"seed: {seed}, FOLD: {fold}, EPOCH: {epoch}, train_loss: {train_loss:.6f}, valid_loss: {valid_loss:.6f}, best_loss: {best_loss:.6f}, best_loss_epoch: {best_loss_epoch}") #--------------------- PREDICTION--------------------- x_test = test_[feature_cols].values testdataset = TestDataset(x_test) testloader = torch.utils.data.DataLoader(testdataset, batch_size=BATCH_SIZE, shuffle=False) model = Model( num_features=num_features, num_targets=num_targets, hidden_size=hidden_size, ) model.load_state_dict(torch.load(f"{MODEL_DIR}/{NB}-scored2-SEED{seed}-FOLD{fold}_.pth")) model.to(DEVICE) if not IS_TRAIN: valid_loss, valid_preds = valid_fn(model, loss_fn, validloader, DEVICE) oof[val_idx] = valid_preds predictions = np.zeros((len(test_), target.iloc[:, 1:].shape[1])) predictions = inference_fn(model, testloader, DEVICE) return oof, predictions def run_k_fold(NFOLDS, seed): oof = np.zeros((len(train), len(target_cols))) predictions = np.zeros((len(test), len(target_cols))) for fold in range(NFOLDS): oof_, pred_ = run_training(fold, seed) predictions += pred_ / NFOLDS oof += oof_ return oof, predictions SEED = range(NSEEDS) # [0, 1, 2, 3 ,4]#, 5, 6, 7, 8, 9, 10] oof = np.zeros((len(train), len(target_cols))) predictions = np.zeros((len(test), len(target_cols))) time_start = time.time() for seed in SEED: oof_, predictions_ = run_k_fold(NFOLDS, seed) oof += oof_ / len(SEED) predictions += predictions_ / len(SEED) print(f"elapsed time: {time.time() - time_start}") train[target_cols] = oof test[target_cols] = predictions train.to_pickle(f"{INT_DIR}/{NB}-train-score-stack-pred.pkl") test.to_pickle(f"{INT_DIR}/{NB}-test-score-stack-pred.pkl") train[target_cols] = np.maximum(PMIN, np.minimum(PMAX, train[target_cols])) valid_results = train_targets_scored.drop(columns=target_cols).merge(train[['sig_id']+target_cols], on='sig_id', how='left').fillna(0) y_true = train_targets_scored[target_cols].values y_true = y_true > 0.5 y_pred = valid_results[target_cols].values y_pred = np.minimum(SMAX, np.maximum(SMIN, y_pred)) score = 0 for i in range(len(target_cols)): score_ = log_loss(y_true[:, i], y_pred[:, i]) score += score_ / target.shape[1] print("CV log_loss: ", score) sub = sample_submission.drop(columns=target_cols).merge(test[['sig_id']+target_cols], on='sig_id', how='left').fillna(0) sub.to_csv('submission_2stageNN_with_ns_oldcv_0.01822.csv', index=False) sub ```
github_jupyter
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Goal" data-toc-modified-id="Goal-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Goal</a></span></li><li><span><a href="#Var" data-toc-modified-id="Var-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Var</a></span></li><li><span><a href="#Init" data-toc-modified-id="Init-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Init</a></span></li><li><span><a href="#Load" data-toc-modified-id="Load-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Load</a></span></li><li><span><a href="#Summary" data-toc-modified-id="Summary-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Summary</a></span><ul class="toc-item"><li><span><a href="#contig-lengths" data-toc-modified-id="contig-lengths-5.1"><span class="toc-item-num">5.1&nbsp;&nbsp;</span>contig lengths</a></span></li><li><span><a href="#Extensive-misassemblies" data-toc-modified-id="Extensive-misassemblies-5.2"><span class="toc-item-num">5.2&nbsp;&nbsp;</span>Extensive misassemblies</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Notes" data-toc-modified-id="Notes-5.2.0.1"><span class="toc-item-num">5.2.0.1&nbsp;&nbsp;</span>Notes</a></span></li></ul></li></ul></li><li><span><a href="#Chimeras" data-toc-modified-id="Chimeras-5.3"><span class="toc-item-num">5.3&nbsp;&nbsp;</span>Chimeras</a></span></li><li><span><a href="#Edit-distance" data-toc-modified-id="Edit-distance-5.4"><span class="toc-item-num">5.4&nbsp;&nbsp;</span>Edit distance</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Notes" data-toc-modified-id="Notes-5.4.0.1"><span class="toc-item-num">5.4.0.1&nbsp;&nbsp;</span>Notes</a></span></li></ul></li></ul></li><li><span><a href="#True-error-label-overlap" data-toc-modified-id="True-error-label-overlap-5.5"><span class="toc-item-num">5.5&nbsp;&nbsp;</span>True error label overlap</a></span><ul class="toc-item"><li><span><a href="#extensive-misassemblies-&amp;-chimeras" data-toc-modified-id="extensive-misassemblies-&amp;-chimeras-5.5.1"><span class="toc-item-num">5.5.1&nbsp;&nbsp;</span>extensive misassemblies &amp; chimeras</a></span></li><li><span><a href="#edit-distance,--extensive-misassemblies,-&amp;-chimeras" data-toc-modified-id="edit-distance,--extensive-misassemblies,-&amp;-chimeras-5.5.2"><span class="toc-item-num">5.5.2&nbsp;&nbsp;</span>edit distance, extensive misassemblies, &amp; chimeras</a></span><ul class="toc-item"><li><span><a href="#Notes" data-toc-modified-id="Notes-5.5.2.1"><span class="toc-item-num">5.5.2.1&nbsp;&nbsp;</span>Notes</a></span></li></ul></li></ul></li></ul></li><li><span><a href="#Predictors-of-Extensive-misassemblies" data-toc-modified-id="Predictors-of-Extensive-misassemblies-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>Predictors of Extensive misassemblies</a></span></li><li><span><a href="#Predictors-of-high-edit-dist" data-toc-modified-id="Predictors-of-high-edit-dist-7"><span class="toc-item-num">7&nbsp;&nbsp;</span>Predictors of high edit dist</a></span><ul class="toc-item"><li><span><a href="#Coverage" data-toc-modified-id="Coverage-7.1"><span class="toc-item-num">7.1&nbsp;&nbsp;</span>Coverage</a></span></li><li><span><a href="#Number-of-SNPs" data-toc-modified-id="Number-of-SNPs-7.2"><span class="toc-item-num">7.2&nbsp;&nbsp;</span>Number of SNPs</a></span></li></ul></li><li><span><a href="#sessionInfo" data-toc-modified-id="sessionInfo-8"><span class="toc-item-num">8&nbsp;&nbsp;</span>sessionInfo</a></span></li></ul></div> # Goal * Assess the results of `DeepMAsED` run on 10 genomes # Var ``` work_dir = '/ebio/abt3_projects/software/dev/DeepMAsED/tests/output_n10/' mega_feats_file = file.path(work_dir, 'map', '1', 'megahit', 'features.tsv.gz') meta_feats_file = file.path(work_dir, 'map', '1', 'metaspades', 'features.tsv.gz') ``` # Init ``` library(dplyr) library(tidyr) library(ggplot2) library(data.table) ``` # Load ``` cmd = paste(c('gunzip -c', mega_feats_file), collapse=' ') mega_feats = fread(cmd, sep='\t') mega_feats %>% nrow %>% print mega_feats %>% head cmd = paste(c('gunzip -c', meta_feats_file), collapse=' ') meta_feats = fread(cmd, sep='\t') meta_feats %>% nrow %>% print meta_feats %>% head feats = rbind(mega_feats, meta_feats) mega_feats = meta_feats = NULL feats %>% nrow %>% print ``` # Summary ## contig lengths ``` # contig lengths contig_lens = feats$contig %>% table %>% as.data.frame colnames(contig_lens) = c('contig', 'length') contig_lens %>% nrow %>% print contig_lens$length %>% summary %>% print # contig lengths p = feats %>% group_by(contig, assembler) %>% summarize(contig_len = max(position) + 1) %>% ungroup() %>% ggplot(aes(contig_len)) + geom_histogram(bins=30) + scale_x_log10() + labs(x='Contig length', y='Number of contigs') + facet_grid(assembler ~ .) + theme_bw() options(repr.plot.width=4, repr.plot.height=4) plot(p) ``` ## Extensive misassemblies * as determined by metaQUAST ``` feats_f = feats %>% filter(Extensive_misassembly != '') %>% distinct(contig, Extensive_misassembly, assembler) feats_f %>% nrow %>% print p = feats_f %>% group_by(contig, assembler) %>% summarize(n = n()) %>% ungroup() %>% ggplot(aes(assembler, n)) + geom_bar(stat='identity') + labs(y='No. of\n"Extensive misassembly"\ncontigs') + theme_bw() + theme( axis.text.x = element_text(angle=55, hjust=1) ) options(repr.plot.width=3, repr.plot.height=3) plot(p) p = feats_f %>% ggplot(aes(Extensive_misassembly, fill=assembler)) + geom_bar(position='dodge') + theme_bw() + theme( axis.text.x = element_text(angle=55, hjust=1) ) options(repr.plot.width=5, repr.plot.height=4) plot(p) ``` #### Notes * 'relocation;relocation' = 2 relocations for the same contig ## Chimeras ``` # true errors: chimeras ## all observations listed as chimeric feats$chimeric %>% table %>% print # number of chimeric contigs p = feats %>% mutate(chimeric = ifelse(is.na(chimeric), TRUE, chimeric)) %>% group_by(contig, assembler) %>% summarize(chimeric = any(chimeric)) %>% group_by(assembler) %>% summarize(n_chimeras = sum(chimeric)) %>% ungroup() %>% ggplot(aes(assembler, n_chimeras)) + geom_bar(stat='identity') + labs(y='No. of chimeric contigs') + theme_bw() options(repr.plot.width=3, repr.plot.height=3) plot(p) ``` ## Edit distance ``` # true errors: edit distance feats_s = feats %>% group_by(contig, assembler) %>% summarize(edit_dist_norm = first(edit_dist_norm)) %>% ungroup() feats_s$edit_dist_norm %>% summary %>% print ``` #### Notes * An 'NA' for edit distance means that the contig didn't align to any reference! * ie., a really 'bad' contig ``` # true errors: edit distance feats_s = feats %>% group_by(contig, assembler) %>% summarize(edit_dist_norm = first(edit_dist_norm)) %>% ungroup() %>% mutate(edit_dist_norm = ifelse(is.na(edit_dist_norm), 1, edit_dist_norm)) feats_s$edit_dist_norm %>% summary %>% print # pseudo count for 0 edit dist (for log-trans) pseudo_low_edit_dist = min(feats_s$edit_dist_norm[feats_s$edit_dist_norm > 0]) / 2 pseudo_low_edit_dist # edit distances p = feats_s %>% mutate(edit_dist_norm = ifelse(edit_dist_norm == 0, pseudo_low_edit_dist, edit_dist_norm)) %>% ggplot(aes(assembler, edit_dist_norm)) + geom_boxplot() + scale_y_log10() + labs(y='Normalized\nedit distance') + theme_bw() options(repr.plot.width=3, repr.plot.height=3) plot(p) # edit distances p = feats_s %>% mutate(edit_dist_norm = ifelse(edit_dist_norm == 0, pseudo_low_edit_dist, edit_dist_norm)) %>% ggplot(aes(edit_dist_norm)) + geom_histogram(bins=50) + scale_x_log10() + facet_grid(assembler ~ .) + labs(y='Normalized\nedit distance') + theme_bw() options(repr.plot.width=5, repr.plot.height=4) plot(p) ``` ## True error label overlap * amount of overlap among 'true error' columns ### extensive misassemblies & chimeras ``` feats_f = feats %>% filter(Extensive_misassembly != '' | chimeric == 'True') %>% distinct(contig, assembler, Extensive_misassembly, chimeric) feats_f %>% nrow %>% print p = feats_f %>% group_by(assembler, chimeric, Extensive_misassembly) %>% summarize(n = n()) %>% ungroup() %>% ggplot(aes(Extensive_misassembly, n, fill=chimeric)) + geom_bar(stat='identity', position='dodge') + facet_grid(assembler ~ .) + labs(y='No. of contigs') + theme_bw() + theme( axis.text.x = element_text(angle=55, hjust=1) ) options(repr.plot.width=5, repr.plot.height=4) plot(p) ``` ### edit distance, extensive misassemblies, & chimeras ``` feats_f = feats %>% distinct(contig, assembler, Extensive_misassembly, chimeric, edit_dist_norm) %>% mutate(Extensive_misassembly = ifelse(Extensive_misassembly == '', 'None', Extensive_misassembly), edit_dist_norm = ifelse(edit_dist_norm == 0, pseudo_low_edit_dist, edit_dist_norm)) feats_f %>% nrow %>% print p = feats_f %>% ggplot(aes(Extensive_misassembly, edit_dist_norm, fill=chimeric)) + geom_boxplot() + geom_hline(yintercept=0.001, linetype='dashed', color='red', alpha=0.4) + scale_y_log10() + labs(x='Extensive misassembly', y='Normalized\nedit distance') + facet_grid(assembler ~ .) + theme_bw() + theme( axis.text.x = element_text(angle=55, hjust=1) ) options(repr.plot.width=6.5, repr.plot.height=4.5) plot(p) ``` #### Notes * The normalized edit distance cutoff of 0.001 (dashed red line) is arbitrary # Predictors of Extensive misassemblies ``` # summarizing continuous variable features feats_s = feats %>% mutate(edit_dist_norm = ifelse(is.na(edit_dist_norm), 1, edit_dist_norm), chimeric = ifelse(is.na(chimeric), TRUE, chimeric), Extensive_misassembly = ifelse(Extensive_misassembly == '', 'No Ext. Mis-asmbl', 'Ext. Mis-asmbl')) %>% group_by(contig, assembler) %>% summarize(chimeric = any(chimeric), Extensive_misassembly = first(Extensive_misassembly), edit_dist_norm = first(edit_dist_norm), min_coverage = min(coverage), median_coverage = median(coverage), max_coverage = max(coverage), min_num_SNPs = min(num_SNPs), median_num_SNPs = median(num_SNPs), max_num_SNPs = max(num_SNPs)) %>% ungroup() feats_s %>% summary p = feats_s %>% ggplot(aes(max_num_SNPs, max_coverage, shape=chimeric)) + geom_point(alpha=0.4) + labs(y='Max coverage', x='Max no. SNPs') + facet_grid(assembler ~ Extensive_misassembly) + theme_bw() options(repr.plot.width=6.5, repr.plot.height=4.5) plot(p) ``` # Predictors of high edit dist ``` # summarizing continuous variable features feats_s = feats %>% mutate(edit_dist_norm = ifelse(is.na(edit_dist_norm), 1, edit_dist_norm), chimeric = ifelse(is.na(chimeric), TRUE, chimeric)) %>% group_by(contig, assembler) %>% summarize(chimeric = any(chimeric), edit_dist_norm = first(edit_dist_norm), min_coverage = min(coverage), median_coverage = median(coverage), max_coverage = max(coverage), min_num_SNPs = min(num_SNPs), median_num_SNPs = median(num_SNPs), max_num_SNPs = max(num_SNPs)) %>% ungroup() feats_s %>% summary ``` ## Coverage ``` p = feats_s %>% mutate(edit_dist_norm = ifelse(edit_dist_norm == 0, pseudo_low_edit_dist, edit_dist_norm)) %>% ggplot(aes(min_coverage, edit_dist_norm)) + geom_point(alpha=0.3) + scale_y_log10() + labs(x='Min coverage', y='Normalized\nedit distance') + facet_grid(assembler ~ chimeric) + theme_bw() options(repr.plot.width=6.5, repr.plot.height=4.5) plot(p) p = feats_s %>% mutate(edit_dist_norm = ifelse(edit_dist_norm == 0, pseudo_low_edit_dist, edit_dist_norm)) %>% ggplot(aes(median_coverage, edit_dist_norm)) + geom_point(alpha=0.3) + scale_y_log10() + labs(x='Median coverage', y='Normalized\nedit distance') + facet_grid(assembler ~ chimeric) + theme_bw() options(repr.plot.width=6.5, repr.plot.height=4.5) plot(p) p = feats_s %>% mutate(edit_dist_norm = ifelse(edit_dist_norm == 0, pseudo_low_edit_dist, edit_dist_norm)) %>% ggplot(aes(max_coverage, edit_dist_norm)) + geom_point(alpha=0.3) + scale_y_log10() + labs(x='Max coverage', y='Normalized\nedit distance') + facet_grid(assembler ~ chimeric) + theme_bw() options(repr.plot.width=6.5, repr.plot.height=4.5) plot(p) ``` ## Number of SNPs ``` p = feats_s %>% mutate(edit_dist_norm = ifelse(edit_dist_norm == 0, pseudo_low_edit_dist, edit_dist_norm)) %>% ggplot(aes(min_num_SNPs, edit_dist_norm)) + geom_point(alpha=0.3) + scale_y_log10() + labs(x='Min No. of SNPs', y='Normalized\nedit distance') + facet_grid(assembler ~ chimeric) + theme_bw() options(repr.plot.width=6.5, repr.plot.height=4.5) plot(p) p = feats_s %>% mutate(edit_dist_norm = ifelse(edit_dist_norm == 0, pseudo_low_edit_dist, edit_dist_norm)) %>% ggplot(aes(median_num_SNPs, edit_dist_norm)) + geom_point(alpha=0.3) + scale_y_log10() + labs(x='Median No. of SNPs', y='Normalized\nedit distance') + facet_grid(assembler ~ chimeric) + theme_bw() options(repr.plot.width=6.5, repr.plot.height=4.5) plot(p) p = feats_s %>% mutate(edit_dist_norm = ifelse(edit_dist_norm == 0, pseudo_low_edit_dist, edit_dist_norm)) %>% ggplot(aes(max_num_SNPs, edit_dist_norm)) + geom_point(alpha=0.3) + scale_y_log10() + labs(x='Max No. of SNPs', y='Normalized\nedit distance') + facet_grid(assembler ~ chimeric) + theme_bw() options(repr.plot.width=6.5, repr.plot.height=4.5) plot(p) ``` # sessionInfo ``` sessionInfo() ```
github_jupyter
# Adversarial Robustness - CIFAR-10 - Robustified Classifier via the Convex Outer Adversarial Polytope --- In this notebook, two state-of-the-art CNNs of 4 convolutional layers prior to the fully-connected part (trained on the CIFAR-10 normally and robustly using the the "Convex Outer Adversarial Polytope" described in [Provable Defenses against Adversarial Examples via the Convex Outer Adversarial Polytope](https://arxiv.org/pdf/1711.00851.pdf) by Wong and Kolter) and their corresponding best accuracy distillated models go under the same series of adversarial attacks to compare their "out-of-the-box" adversarial robustness. The adversarial attacks are generated using cleverhans library [Technical Report on the cleverhans v2.1.0 Adversarial Examples Library](https://arxiv.org/pdf/1610.00768.pdf) by Papernot et al. The adversarial attacks tested: * Projected Gradient Descent [Towards deep learning models resistant to adversarial attacks](https://arxiv.org/pdf/1706.06083.pdf) by Madry et al. The distillated models have been generated using the following technique: * "Matching the Logits" as it is described by Ba & Caruana on [Do Deep Nets Really Need to be Deep?](https://arxiv.org/pdf/1312.6184.pdf) ## Load the Neccesary Packages ``` from google.colab import drive drive.mount('/content/drive') !pip install tensorflow==1.15.0 !pip install cleverhans import keras from keras.datasets import mnist from keras.models import Model from keras.layers import Dense, Dropout, Flatten ,Input from keras.layers import Conv2D, MaxPooling2D, Reshape, Add from keras.metrics import categorical_accuracy from keras.callbacks import EarlyStopping, ModelCheckpoint from keras.layers import Activation from keras.utils.generic_utils import get_custom_objects from tensorflow.python.keras import backend as K from keras.preprocessing.image import array_to_img,img_to_array import matplotlib.pyplot as plt import numpy as np import tensorflow as tf import keras from keras import optimizers from keras.callbacks import ReduceLROnPlateau, EarlyStopping from keras.models import Model from keras.layers import Lambda, concatenate, Activation from keras.losses import categorical_crossentropy as logloss from keras.metrics import categorical_accuracy, top_k_categorical_accuracy from keras import backend as K from keras.applications.mobilenet import preprocess_input import matplotlib.pyplot as plt %matplotlib inline from scipy.special import softmax from sklearn.model_selection import train_test_split from keras import models from keras import layers from scipy.io import loadmat session = tf.Session() session = tf.compat.v1.Session() keras.backend.set_session(session) ``` ## Attack Models ### Load Model and Data ``` def get_CNN_model(): #CNN network for classification cifar_model = models.Sequential(name='CNN') cifar_model.add(layers.Conv2D(32, 4, data_format='channels_first', activation='relu', strides = 2, padding='same', input_shape=(3, 32, 32), name='0')) cifar_model.add(layers.Conv2D(32, 4, data_format='channels_first', activation='relu', strides = 2, padding='same', name='2')) cifar_model.add(layers.Conv2D(64, 4, data_format='channels_first',activation='relu', strides = 2, padding='same', name='4')) cifar_model.add(layers.Conv2D(64, 4, data_format='channels_first',activation='relu', strides = 2, padding='same', name='6')) cifar_model.add(Flatten(name='8')) cifar_model.add(layers.Dense(512, activation='relu', name='9')) cifar_model.add(layers.Dense(10, name='11')) return cifar_model def get_SCNN_model_layer2(channels_l1, channels_l2): #CNN network for classification svhn_model = models.Sequential(name='SCNN') svhn_model.add(layers.Conv2D(channels_l1, 4, data_format='channels_first', activation='relu', strides = 2, padding='same', input_shape=(3, 32, 32), name='0')) svhn_model.add(layers.Conv2D(channels_l2, 4, data_format='channels_first', activation='relu', strides = 2, padding='same', name='2')) svhn_model.add(Flatten(name='8')) svhn_model.add(layers.Dense(100, activation='relu', name='9')) svhn_model.add(layers.Dense(10, name="logit")) return svhn_model # Restore the data x_acc_test_old=np.load('/content/drive/My Drive/Colab Notebooks/SVHN/cifar_data/x_acc_test_cnn.npy') y_acc_test=np.load('/content/drive/My Drive/Colab Notebooks/SVHN/cifar_data/y_labels_acc_test.npy') x_acc_test=[] for i in range(10000): test = x_acc_test_old[i,:,:,:].T test_new = [] for j in range(3): test_new.append(test[j].T) x_acc_test.append(np.array(test_new)) x_acc_test_cnn = np.array(x_acc_test) ``` ### Define the PGD Attacks ``` from cleverhans.utils_keras import KerasModelWrapper from cleverhans.attacks import ProjectedGradientDescent def pgd_attack(svhn_model, epsilon): wrap = KerasModelWrapper(svhn_model) pgd = ProjectedGradientDescent(wrap, sess=session) pgd_params = {'eps': epsilon, 'eps_iter': 0.5*epsilon, 'nb_iter': 100, 'clip_min': 0., 'clip_max': 1.} adv_x = pgd.generate_np(x_acc_test_cnn, **pgd_params) score, adv_acc = svhn_model.evaluate(adv_x, keras.utils.to_categorical(y_acc_test, num_classes=10), batch_size=128) print("The PGD validation accuracy is: {}".format(adv_acc)) return score, adv_acc ``` ### Projected Gradient Descent #### Baseline & Robust (Teacher) ``` epsilon_accuracies_teacher_pgd = [] epsilon_values_teacher_pgd = [] # Load the model and wrap the network svhn_model = get_CNN_model() svhn_model = keras.models.load_model('/content/Baseline_Keras_CIFAR.h5') output = svhn_model.layers[-1].output output = Activation('softmax')(output) svhn_model = Model(svhn_model.input, output, name='SCNN') svhn_model.compile(optimizer='adam', loss='mse', metrics=['accuracy']) score, acc = svhn_model.evaluate(x_acc_test_cnn, keras.utils.to_categorical(y_acc_test, num_classes=10), batch_size=128) print ("Test Accuracy: %.5f" % acc) score, adv_acc = pgd_attack(svhn_model, 0.01) print ("0.01 acc:" + str(adv_acc)) for epsilon in np.arange(0.0, 0.1, 0.001): score, adv_acc = pgd_attack(svhn_model, epsilon) epsilon_accuracies_teacher_pgd.append(adv_acc) epsilon_values_teacher_pgd.append(epsilon) for epsilon in np.arange(0.1, 0.3, 0.1): score, adv_acc = pgd_attack(svhn_model, epsilon) epsilon_accuracies_teacher_pgd.append(adv_acc) epsilon_values_teacher_pgd.append(epsilon) plt.xlabel('Epsilon'); plt.ylabel('Accuracy After Attack'); plt.plot(epsilon_values_teacher_pgd, epsilon_accuracies_teacher_pgd) print ('Baseline:') print (epsilon_accuracies_teacher_pgd) print (epsilon_values_teacher_pgd) ########################################### epsilon_accuracies_teacher_pgd = [] epsilon_values_teacher_pgd = [] # Load the model and wrap the network svhn_model = get_CNN_model() svhn_model = keras.models.load_model('/content/Robust_Keras_CIFAR.h5') output = svhn_model.layers[-1].output output = Activation('softmax')(output) svhn_model = Model(svhn_model.input, output, name='SCNN') svhn_model.compile(optimizer='adam', loss='mse', metrics=['accuracy']) score, acc = svhn_model.evaluate(x_acc_test_cnn, keras.utils.to_categorical(y_acc_test, num_classes=10), batch_size=128) print ("Test Accuracy: %.5f" % acc) score, adv_acc = pgd_attack(svhn_model, 0.01) print ("0.01 acc:" + str(adv_acc)) for epsilon in np.arange(0.0, 0.1, 0.001): score, adv_acc = pgd_attack(svhn_model, epsilon) epsilon_accuracies_teacher_pgd.append(adv_acc) epsilon_values_teacher_pgd.append(epsilon) for epsilon in np.arange(0.1, 0.3, 0.1): score, adv_acc = pgd_attack(svhn_model, epsilon) epsilon_accuracies_teacher_pgd.append(adv_acc) epsilon_values_teacher_pgd.append(epsilon) plt.xlabel('Epsilon'); plt.ylabel('Accuracy After Attack'); plt.plot(epsilon_values_teacher_pgd, epsilon_accuracies_teacher_pgd) print ('Robust at 0.01:') print (epsilon_accuracies_teacher_pgd) print (epsilon_values_teacher_pgd) ``` #### Robust Retrained ``` epsilon_accuracies_teacher_pgd = [] epsilon_values_teacher_pgd = [] svhn_model = get_SCNN_model_layer2(16, 64) svhn_model = keras.models.load_model('/content/ReTrain_Keras_CIFAR.h5') output = svhn_model.layers[-1].output output = Activation('softmax')(output) svhn_model = Model(svhn_model.input, output, name='SCNN') svhn_model.compile(optimizer='adam', loss='mse', metrics=['accuracy']) score, acc = svhn_model.evaluate(x_acc_test_cnn, keras.utils.to_categorical(y_acc_test, num_classes=10), batch_size=128) print ("Test Accuracy: %.5f" % acc) score, adv_acc = pgd_attack(svhn_model, 0.01) print ("0.01 acc:" + str(adv_acc)) for epsilon in np.arange(0.0, 0.1, 0.001): score, adv_acc = pgd_attack(svhn_model, epsilon) epsilon_accuracies_teacher_pgd.append(adv_acc) epsilon_values_teacher_pgd.append(epsilon) for epsilon in np.arange(0.1, 0.3, 0.1): score, adv_acc = pgd_attack(svhn_model, epsilon) epsilon_accuracies_teacher_pgd.append(adv_acc) epsilon_values_teacher_pgd.append(epsilon) plt.xlabel('Epsilon'); plt.ylabel('Accuracy After Attack'); plt.plot(epsilon_values_teacher_pgd, epsilon_accuracies_teacher_pgd) print ('Robust Temperature ReTrained at 0.01:') print (epsilon_accuracies_teacher_pgd) print (epsilon_values_teacher_pgd) ``` #### Baseline & Robust (Student - Pruned) ``` epsilon_accuracies_teacher_pgd = [] epsilon_values_teacher_pgd = [] # Load the model and wrap the network svhn_model = get_SCNN_model_layer2(16, 32) svhn_model = keras.models.load_model('/content/SCNN_MIMIC_SVHN_h16_32.h5') output = svhn_model.layers[-1].output output = Activation('softmax')(output) svhn_model = Model(svhn_model.input, output, name='SCNN') svhn_model.compile(optimizer='adam', loss='mse', metrics=['accuracy']) score, acc = svhn_model.evaluate(x_acc_test_cnn, keras.utils.to_categorical(y_acc_test, num_classes=10), batch_size=128) print ("Test Accuracy: %.5f" % acc) score, adv_acc = pgd_attack(svhn_model, 0.01) print ("0.01 acc:" + str(adv_acc)) for epsilon in np.arange(0.0, 0.1, 0.001): score, adv_acc = pgd_attack(svhn_model, epsilon) epsilon_accuracies_teacher_pgd.append(adv_acc) epsilon_values_teacher_pgd.append(epsilon) for epsilon in np.arange(0.1, 0.3, 0.1): score, adv_acc = pgd_attack(svhn_model, epsilon) epsilon_accuracies_teacher_pgd.append(adv_acc) epsilon_values_teacher_pgd.append(epsilon) plt.xlabel('Epsilon'); plt.ylabel('Accuracy After Attack'); plt.plot(epsilon_values_teacher_pgd, epsilon_accuracies_teacher_pgd) print ('Baseline Distillated:') print (epsilon_accuracies_teacher_pgd) print (epsilon_values_teacher_pgd) ########################################### epsilon_accuracies_teacher_pgd = [] epsilon_values_teacher_pgd = [] # Load the model and wrap the network svhn_model = get_SCNN_model_layer2(16, 64) svhn_model = keras.models.load_model('/content/SCNN_MIMIC_SVHN_h16_64.h5') output = svhn_model.layers[-1].output output = Activation('softmax')(output) svhn_model = Model(svhn_model.input, output, name='SCNN') svhn_model.compile(optimizer='adam', loss='mse', metrics=['accuracy']) score, acc = svhn_model.evaluate(x_acc_test_cnn, keras.utils.to_categorical(y_acc_test, num_classes=10), batch_size=128) print ("Test Accuracy: %.5f" % acc) score, adv_acc = pgd_attack(svhn_model, 0.01) print ("0.01 acc:" + str(adv_acc)) for epsilon in np.arange(0.0, 0.1, 0.001): score, adv_acc = pgd_attack(svhn_model, epsilon) epsilon_accuracies_teacher_pgd.append(adv_acc) epsilon_values_teacher_pgd.append(epsilon) for epsilon in np.arange(0.1, 0.3, 0.1): score, adv_acc = pgd_attack(svhn_model, epsilon) epsilon_accuracies_teacher_pgd.append(adv_acc) epsilon_values_teacher_pgd.append(epsilon) plt.xlabel('Epsilon'); plt.ylabel('Accuracy After Attack'); plt.plot(epsilon_values_teacher_pgd, epsilon_accuracies_teacher_pgd) print ('Robust Distillated:') print (epsilon_accuracies_teacher_pgd) print (epsilon_values_teacher_pgd) ``` ## Results ``` epsilon_values = [0.0, 0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009000000000000001, 0.01, 0.011, 0.012, 0.013000000000000001, 0.014, 0.015, 0.016, 0.017, 0.018000000000000002, 0.019, 0.02, 0.021, 0.022, 0.023, 0.024, 0.025, 0.026000000000000002, 0.027, 0.028, 0.029, 0.03, 0.031, 0.032, 0.033, 0.034, 0.035, 0.036000000000000004, 0.037, 0.038, 0.039, 0.04, 0.041, 0.042, 0.043000000000000003, 0.044, 0.045, 0.046, 0.047, 0.048, 0.049, 0.05, 0.051000000000000004, 0.052000000000000005, 0.053, 0.054, 0.055, 0.056, 0.057, 0.058, 0.059000000000000004, 0.06, 0.061, 0.062, 0.063, 0.064, 0.065, 0.066, 0.067, 0.068, 0.069, 0.07, 0.07100000000000001, 0.07200000000000001, 0.073, 0.074, 0.075, 0.076, 0.077, 0.078, 0.079, 0.08, 0.081, 0.082, 0.083, 0.084, 0.085, 0.08600000000000001, 0.08700000000000001, 0.088, 0.089, 0.09, 0.091, 0.092, 0.093, 0.094, 0.095, 0.096, 0.097, 0.098, 0.099, 0.1, 0.2] #Baseline: baseline=[0.5932999849319458, 0.5867999792098999, 0.5723000168800354, 0.5496000051498413, 0.5264999866485596, 0.49790000915527344, 0.4657000005245209, 0.43320000171661377, 0.4032999873161316, 0.3720000088214874, 0.3465000092983246, 0.3188000023365021, 0.2928999960422516, 0.27320000529289246, 0.2538999915122986, 0.23489999771118164, 0.2184000015258789, 0.20839999616146088, 0.19550000131130219, 0.18619999289512634, 0.17569999396800995, 0.16979999840259552, 0.16200000047683716, 0.15839999914169312, 0.15360000729560852, 0.14630000293254852, 0.14399999380111694, 0.1412000060081482, 0.1378999948501587, 0.13539999723434448, 0.13230000436306, 0.12970000505447388, 0.12919999659061432, 0.12880000472068787, 0.1281999945640564, 0.12720000743865967, 0.12710000574588776, 0.12800000607967377, 0.12620000541210175, 0.12690000236034393, 0.12389999628067017, 0.12530000507831573, 0.12219999730587006, 0.12250000238418579, 0.12269999831914902, 0.12300000339746475, 0.12240000069141388, 0.12150000035762787, 0.11860000342130661, 0.12300000339746475, 0.11940000206232071, 0.11949999630451202, 0.11990000307559967, 0.11980000138282776, 0.11990000307559967, 0.12099999934434891, 0.11949999630451202, 0.11819999665021896, 0.11779999732971191, 0.11749999970197678, 0.11800000071525574, 0.11779999732971191, 0.11729999631643295, 0.1152999997138977, 0.11909999698400497, 0.11670000106096268, 0.11680000275373459, 0.11640000343322754, 0.11580000072717667, 0.1151999980211258, 0.1137000024318695, 0.1151999980211258, 0.11339999735355377, 0.11630000174045563, 0.11400000005960464, 0.11599999666213989, 0.11389999836683273, 0.11299999803304672, 0.1136000007390976, 0.11389999836683273, 0.11469999700784683, 0.11309999972581863, 0.11330000311136246, 0.11299999803304672, 0.1103999987244606, 0.1137000024318695, 0.11389999836683273, 0.11410000175237656, 0.11129999905824661, 0.11050000041723251, 0.11150000244379044, 0.11209999769926071, 0.10840000212192535, 0.11129999905824661, 0.11089999973773956, 0.11100000143051147, 0.11050000041723251, 0.11110000312328339, 0.10890000313520432, 0.11259999871253967, 0.11060000211000443, 0.0868000015616417] #Robust at 0.01: robust=[0.5317999720573425, 0.529699981212616, 0.5303000211715698, 0.5302000045776367, 0.5313000082969666, 0.527999997138977, 0.5230000019073486, 0.5185999870300293, 0.5145000219345093, 0.511900007724762, 0.5070000290870667, 0.5034999847412109, 0.49939998984336853, 0.49540001153945923, 0.49070000648498535, 0.4851999878883362, 0.47999998927116394, 0.4731999933719635, 0.4659000039100647, 0.460999995470047, 0.454800009727478, 0.4496999979019165, 0.4424999952316284, 0.438400000333786, 0.4307999908924103, 0.42260000109672546, 0.41589999198913574, 0.40799999237060547, 0.39910000562667847, 0.3921999931335449, 0.38659998774528503, 0.37860000133514404, 0.37130001187324524, 0.36320000886917114, 0.35339999198913574, 0.3449000120162964, 0.33739998936653137, 0.3301999866962433, 0.32269999384880066, 0.31610000133514404, 0.30880001187324524, 0.30250000953674316, 0.2939999997615814, 0.288100004196167, 0.28130000829696655, 0.2752000093460083, 0.2703999876976013, 0.26330000162124634, 0.2574000060558319, 0.250900000333786, 0.24539999663829803, 0.24070000648498535, 0.23420000076293945, 0.2313999980688095, 0.2281000018119812, 0.2215999960899353, 0.21709999442100525, 0.21230000257492065, 0.20679999887943268, 0.20430000126361847, 0.1981000006198883, 0.1964000016450882, 0.1899999976158142, 0.18700000643730164, 0.18400000035762787, 0.1800999939441681, 0.17829999327659607, 0.1737000048160553, 0.1728000044822693, 0.16769999265670776, 0.16459999978542328, 0.1607999950647354, 0.16120000183582306, 0.15710000693798065, 0.15530000627040863, 0.15209999680519104, 0.14949999749660492, 0.15049999952316284, 0.14650000631809235, 0.14560000598430634, 0.1444000005722046, 0.1420000046491623, 0.14149999618530273, 0.13750000298023224, 0.13680000603199005, 0.13619999587535858, 0.1347000002861023, 0.1316000074148178, 0.13210000097751617, 0.12860000133514404, 0.1290999948978424, 0.1266999989748001, 0.12489999830722809, 0.12439999729394913, 0.12349999696016312, 0.12240000069141388, 0.12020000070333481, 0.1200999990105629, 0.11819999665021896, 0.11729999631643295, 0.11620000004768372, 0.08309999853372574] #Baseline Distillated: baseline_distill=[0.5924999713897705, 0.5859000086784363, 0.5724999904632568, 0.5582000017166138, 0.5360000133514404, 0.5098000168800354, 0.4862000048160553, 0.4609000086784363, 0.429500013589859, 0.4034999907016754, 0.37599998712539673, 0.35040000081062317, 0.3264000117778778, 0.3043999969959259, 0.28299999237060547, 0.26409998536109924, 0.2475000023841858, 0.23090000450611115, 0.21780000627040863, 0.20730000734329224, 0.19550000131130219, 0.18520000576972961, 0.1754000037908554, 0.16699999570846558, 0.16249999403953552, 0.15559999644756317, 0.15129999816417694, 0.1477999985218048, 0.14429999887943268, 0.14229999482631683, 0.13950000703334808, 0.13910000026226044, 0.1362999975681305, 0.133200004696846, 0.13369999825954437, 0.13169999420642853, 0.1315000057220459, 0.13050000369548798, 0.12759999930858612, 0.12720000743865967, 0.12559999525547028, 0.125900000333786, 0.12470000237226486, 0.12370000034570694, 0.12319999933242798, 0.12349999696016312, 0.12380000203847885, 0.12160000205039978, 0.12110000103712082, 0.12139999866485596, 0.120899997651577, 0.11969999969005585, 0.11919999867677689, 0.12160000205039978, 0.11829999834299088, 0.11789999902248383, 0.1160999983549118, 0.11819999665021896, 0.11720000207424164, 0.11620000004768372, 0.11760000139474869, 0.11900000274181366, 0.11710000038146973, 0.11620000004768372, 0.11460000276565552, 0.11389999836683273, 0.11469999700784683, 0.11219999939203262, 0.11410000175237656, 0.11289999634027481, 0.1120000034570694, 0.11309999972581863, 0.11339999735355377, 0.11230000108480453, 0.11240000277757645, 0.1120000034570694, 0.11339999735355377, 0.1111999973654747, 0.1120000034570694, 0.11140000075101852, 0.11140000075101852, 0.11079999804496765, 0.11020000278949738, 0.11100000143051147, 0.11010000109672546, 0.10689999908208847, 0.1080000028014183, 0.10909999907016754, 0.10740000009536743, 0.10700000077486038, 0.10719999670982361, 0.10920000076293945, 0.10909999907016754, 0.10540000349283218, 0.1054999977350235, 0.10530000180006027, 0.10610000044107437, 0.10540000349283218, 0.10450000315904617, 0.10409999638795853, 0.10350000113248825, 0.08429999649524689] #Robust at 0.01 Distillated: robust_distill=[0.5288000106811523, 0.5266000032424927, 0.5231999754905701, 0.5188999772071838, 0.5156999826431274, 0.5080000162124634, 0.5023999810218811, 0.4925999939441681, 0.48330000042915344, 0.4749000072479248, 0.4643999934196472, 0.4537000060081482, 0.4408000111579895, 0.42579999566078186, 0.41280001401901245, 0.4016000032424927, 0.38940000534057617, 0.37709999084472656, 0.36640000343322754, 0.35359999537467957, 0.34049999713897705, 0.3278000056743622, 0.31700000166893005, 0.304500013589859, 0.2928999960422516, 0.28200000524520874, 0.27149999141693115, 0.2621000111103058, 0.2540000081062317, 0.2451000064611435, 0.23330000042915344, 0.22609999775886536, 0.21719999611377716, 0.21040000021457672, 0.20229999721050262, 0.19460000097751617, 0.1881999969482422, 0.18060000240802765, 0.17560000717639923, 0.1704999953508377, 0.16609999537467957, 0.16140000522136688, 0.15809999406337738, 0.15639999508857727, 0.15139999985694885, 0.1485999971628189, 0.14569999277591705, 0.14339999854564667, 0.14069999754428864, 0.13819999992847443, 0.13760000467300415, 0.13439999520778656, 0.1324000060558319, 0.131400004029274, 0.13089999556541443, 0.12950000166893005, 0.12800000607967377, 0.12639999389648438, 0.12620000541210175, 0.12520000338554382, 0.12430000305175781, 0.12250000238418579, 0.12160000205039978, 0.12110000103712082, 0.11800000071525574, 0.11789999902248383, 0.11829999834299088, 0.11819999665021896, 0.11649999767541885, 0.1168999969959259, 0.11670000106096268, 0.11420000344514847, 0.11810000240802765, 0.1152999997138977, 0.11339999735355377, 0.11429999768733978, 0.1136000007390976, 0.11400000005960464, 0.11330000311136246, 0.11309999972581863, 0.1137000024318695, 0.11330000311136246, 0.11249999701976776, 0.11129999905824661, 0.1128000020980835, 0.11180000007152557, 0.11089999973773956, 0.11150000244379044, 0.10930000245571136, 0.11100000143051147, 0.10899999737739563, 0.10830000042915344, 0.10920000076293945, 0.10899999737739563, 0.11060000211000443, 0.1071000024676323, 0.10790000110864639, 0.10750000178813934, 0.10890000313520432, 0.10830000042915344, 0.10610000044107437, 0.09080000221729279] #Robust ReTrained: robust_retrain=[0.5566999912261963, 0.557200014591217, 0.5541999936103821, 0.5478000044822693, 0.5435000061988831, 0.5406000018119812, 0.532800018787384, 0.5242999792098999, 0.5181000232696533, 0.5105999708175659, 0.5026000142097473, 0.49320000410079956, 0.48350000381469727, 0.46959999203681946, 0.45820000767707825, 0.4490000009536743, 0.43790000677108765, 0.4278999865055084, 0.4185999929904938, 0.4068000018596649, 0.3978999853134155, 0.3873000144958496, 0.37549999356269836, 0.36629998683929443, 0.3571999967098236, 0.3488999903202057, 0.33899998664855957, 0.3287000060081482, 0.3188999891281128, 0.3082999885082245, 0.30219998955726624, 0.2946999967098236, 0.28780001401901245, 0.27889999747276306, 0.273499995470047, 0.2669000029563904, 0.26019999384880066, 0.2535000145435333, 0.24799999594688416, 0.2434999942779541, 0.23649999499320984, 0.23280000686645508, 0.2290000021457672, 0.22450000047683716, 0.22020000219345093, 0.21619999408721924, 0.21209999918937683, 0.20810000598430634, 0.2046000063419342, 0.2021999955177307, 0.19820000231266022, 0.19850000739097595, 0.193900004029274, 0.19050000607967377, 0.18850000202655792, 0.18799999356269836, 0.1859000027179718, 0.18240000307559967, 0.179299995303154, 0.17829999327659607, 0.17649999260902405, 0.17470000684261322, 0.1736000031232834, 0.17419999837875366, 0.17159999907016754, 0.17069999873638153, 0.1688999980688095, 0.16750000417232513, 0.1679999977350235, 0.1647000014781952, 0.1648000031709671, 0.16539999842643738, 0.1647000014781952, 0.16220000386238098, 0.1624000072479248, 0.16130000352859497, 0.1599999964237213, 0.16030000150203705, 0.16089999675750732, 0.1590999960899353, 0.1574999988079071, 0.15809999406337738, 0.15919999778270721, 0.1559000015258789, 0.1573999971151352, 0.15800000727176666, 0.15459999442100525, 0.15610000491142273, 0.156700000166893, 0.1543000042438507, 0.15629999339580536, 0.15479999780654907, 0.15440000593662262, 0.15230000019073486, 0.1534000039100647, 0.15399999916553497, 0.15289999544620514, 0.15330000221729279, 0.1550000011920929, 0.15320000052452087, 0.1535000056028366, 0.14509999752044678] import matplotlib.path as mpath import numpy as np star = mpath.Path.unit_regular_star(6) circle = mpath.Path.unit_circle() # concatenate the circle with an internal cutout of the star verts = np.concatenate([circle.vertices, star.vertices[::-1, ...]]) codes = np.concatenate([circle.codes, star.codes]) cut_star = mpath.Path(verts, codes) fig = plt.figure() fig.set_size_inches(25.5, 15.5) plt.plot(epsilon_values , baseline , label='Baseline', marker=cut_star, markersize=15) plt.plot(epsilon_values , robust , label='Robust', marker=cut_star, markersize=15) plt.title('Projected Gradient Descent', fontsize=18) plt.xlabel('Epsilon', fontsize=18); plt.ylabel('Accuracy in the Test Set', fontsize=18); plt.legend() plt.show() import matplotlib.path as mpath import numpy as np star = mpath.Path.unit_regular_star(6) circle = mpath.Path.unit_circle() # concatenate the circle with an internal cutout of the star verts = np.concatenate([circle.vertices, star.vertices[::-1, ...]]) codes = np.concatenate([circle.codes, star.codes]) cut_star = mpath.Path(verts, codes) fig = plt.figure() fig.set_size_inches(25.5, 15.5) plt.plot(epsilon_values , baseline_distill , label='Baseline - Distillated', marker=cut_star, markersize=15) plt.plot(epsilon_values , robust_distill , label='Robust - Distillated', marker=cut_star, markersize=15) plt.title('Projected Gradient Descent', fontsize=18) plt.xlabel('Epsilon', fontsize=18); plt.ylabel('Accuracy in the Test Set', fontsize=18); plt.legend() plt.show() import matplotlib.path as mpath import numpy as np star = mpath.Path.unit_regular_star(6) circle = mpath.Path.unit_circle() # concatenate the circle with an internal cutout of the star verts = np.concatenate([circle.vertices, star.vertices[::-1, ...]]) codes = np.concatenate([circle.codes, star.codes]) cut_star = mpath.Path(verts, codes) fig = plt.figure() fig.set_size_inches(25.5, 15.5) plt.plot(epsilon_values , baseline_distill , label='Baseline - Distillated', marker=cut_star, markersize=15) plt.plot(epsilon_values , robust_distill , label='Robust - Distillated', marker=cut_star, markersize=15) plt.plot(epsilon_values , robust , label='Robust', marker=cut_star, markersize=15) plt.title('Projected Gradient Descent', fontsize=18) plt.xlabel('Epsilon', fontsize=18); plt.ylabel('Accuracy in the Test Set', fontsize=18); plt.legend() plt.show() import matplotlib.path as mpath import numpy as np star = mpath.Path.unit_regular_star(6) circle = mpath.Path.unit_circle() # concatenate the circle with an internal cutout of the star verts = np.concatenate([circle.vertices, star.vertices[::-1, ...]]) codes = np.concatenate([circle.codes, star.codes]) cut_star = mpath.Path(verts, codes) fig = plt.figure() fig.set_size_inches(25.5, 15.5) plt.plot(epsilon_values[:40] , baseline[:40] , label='Baseline', marker=cut_star, markersize=15) plt.plot(epsilon_values[:40] , baseline_distill[:40] , label='Baseline-Distill', marker=cut_star, markersize=15) plt.title('Projected Gradient Descent', fontsize=18) plt.xlabel('Epsilon', fontsize=18); plt.ylabel('Accuracy in the Test Set', fontsize=18); plt.legend() plt.show() import matplotlib.path as mpath import numpy as np star = mpath.Path.unit_regular_star(6) circle = mpath.Path.unit_circle() # concatenate the circle with an internal cutout of the star verts = np.concatenate([circle.vertices, star.vertices[::-1, ...]]) codes = np.concatenate([circle.codes, star.codes]) cut_star = mpath.Path(verts, codes) fig = plt.figure() fig.set_size_inches(25.5, 15.5) plt.plot(epsilon_values , baseline_distill , label='Baseline - Distillated (MSE)', marker=cut_star, markersize=15) plt.plot(epsilon_values , robust_distill , label='Robust - Distillated (MSE)', marker=cut_star, markersize=15) plt.plot(epsilon_values , robust , label='Robust', marker=cut_star, markersize=15) plt.plot(epsilon_values , robust_retrain , label='Robust - ReTrained', marker=cut_star, markersize=15) plt.title('Projected Gradient Descent', fontsize=18) plt.xlabel('Epsilon', fontsize=18); plt.ylabel('Accuracy in the Test Set', fontsize=18); plt.legend() plt.show() ```
github_jupyter
# Installation - Run these commands - git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git - cd Monk_Object_Detection/3_mxrcnn/installation - Select the right requirements file and run - cat requirements_cuda9.0.txt | xargs -n 1 -L 1 pip install # About the Network 1. Blog 1 on Resnet Network - https://medium.com/@14prakash/understanding-and-implementing-architectures-of-resnet-and-resnext-for-state-of-the-art-image-cf51669e1624 2. Blog 2 on Resnet Network - https://neurohive.io/en/popular-networks/resnet/ 3. Blog 3 on Resnet Network - https://cv-tricks.com/keras/understand-implement-resnets/ 4. Blog 4 on Redisual blocks - https://d2l.ai/chapter_convolutional-modern/resnet.html 5. Blog 1 on rcnn - https://medium.com/coinmonks/review-r-cnn-object-detection-b476aba290d1 6. Blog 2 on rcnn - https://towardsdatascience.com/r-cnn-3a9beddfd55a 7. Blog 1 on fast-rcnn - https://medium.com/coinmonks/review-fast-r-cnn-object-detection-a82e172e87ba 8. Blog 2 on fast-rcnn - https://towardsdatascience.com/part-2-fast-r-cnn-object-detection-7303e1988464 9. Blog 1 on faster rcnn - https://towardsdatascience.com/review-faster-r-cnn-object-detection-f5685cb30202 10. Blog 2 on faster rcnn - https://www.analyticsvidhya.com/blog/2018/11/implementation-faster-r-cnn-python-object-detection/ # COCO Format ## Dataset Directory Structure ../sample_dataset (root_dir) | |------kangaroo (coco_dir) | | | |---Images (img_dir) | |----| | |-------------------img1.jpg | |-------------------img2.jpg | |-------------------.........(and so on) | | | |---annotations (anno_dir) | |----| | |--------------------instances_Train.json | |--------------------classes.txt - instances_Train.json -> In proper COCO format - classes.txt -> A list of classes in alphabetical order ``` import os import sys sys.path.append("../../3_mxrcnn/lib/") sys.path.append("../../3_mxrcnn/lib/mx-rcnn") from train_base import * # Dataset params root_dir = "../sample_dataset/"; coco_dir = "kangaroo"; img_dir = "Images"; set_dataset_params(root_dir=root_dir, coco_dir=coco_dir, imageset=img_dir); # Model Type set_model_params(model_name="resnet101"); # Hyper Params set_hyper_params(gpus="0", lr=0.001, lr_decay_epoch="1", epochs=5, batch_size=1); set_output_params(log_interval=100, save_prefix="model_resnet101"); # Preprocessing params set_img_preproc_params(img_short_side=600, img_long_side=1000, mean=(123.68, 116.779, 103.939), std=(1.0, 1.0, 1.0)); # Initialize params initialize_rpn_params(); initialize_rcnn_params(); # Remove caches if any if os.path.isdir("../sample_dataset/cache/"): os.system("rm -r ../sample_dataset/cache/") # Invoke Dataloader roidb = set_dataset(); #Network sym = set_network(); # Train train(sym, roidb); ``` # Running Inference ``` import os import sys sys.path.append("../../3_mxrcnn/lib/") sys.path.append("../../3_mxrcnn/lib/mx-rcnn") from infer_base import * class_file = set_class_list("../sample_dataset/kangaroo/annotations/classes.txt"); #Model - Select the model as per number of iterations it has been trained for set_model_params(model_name="resnet101", model_path="trained_model/model_resnet101-0004.params"); #Hyper Params set_hyper_params(gpus="0", batch_size=1); # Preprocessing set_img_preproc_params(img_short_side=600, img_long_side=1000, mean=(123.68, 116.779, 103.939), std=(1.0, 1.0, 1.0)); #Initalization initialize_rpn_params(); initialize_rcnn_params(); #Network sym = set_network(); mod = load_model(sym); #Load Image and infer set_output_params(vis_thresh=0.8, vis=True) Infer("../sample_dataset/kangaroo/test/kg1.jpeg", mod); set_output_params(vis_thresh=0.8, vis=True) Infer("../sample_dataset/kangaroo/test/kg2.jpeg", mod); set_output_params(vis_thresh=0.8, vis=True) Infer("../sample_dataset/kangaroo/test/kg3.jpeg", mod); set_output_params(vis_thresh=0.8, vis=True) Infer("../sample_dataset/kangaroo/test/kg4.jpeg", mod); set_output_params(vis_thresh=0.7, vis=True) Infer("../sample_dataset/kangaroo/test/kg5.jpeg", mod); ```
github_jupyter
# Imports and Simulation Parameters ``` import numpy as np import math import cmath import scipy import scipy.integrate import sys import matplotlib import matplotlib.pyplot as plt %matplotlib inline hbar = 1.0 / (2.0 * np.pi) ZERO_TOLERANCE = 10**-6 MAX_VIBRATIONAL_STATES = 200 STARTING_GROUND_STATES = 5 STARTING_EXCITED_STATES = 5 time_scale_set = 10 #will divide the highest energy to give us the time step low_frequency_cycles = 25.0 #will multiply the frequency of the lowest frequency mode to get the max time #See if a factorial_Storage dictionary exists already and if not, create one try: a = factorial_storage except: factorial_storage = {} ``` # Defining Parameters of the System ``` energy_g = 0 energy_gamma = .1 energy_e = 0 energy_epsilon = .31 Huang_Rhys_Parameter = .80 S = Huang_Rhys_Parameter #bookkeeping overlap_storage = {} electronic_energy_gap = energy_e + .5*energy_epsilon - (energy_g + .5 * energy_gamma) min_energy = energy_g + energy_gamma * .5 mu_0 = 1.0 ``` If we set the central frequency of a pulse at the 0->0 transition, and we decide we care about the ratio of the 0->1 transition to the 0->0 transition and set that to be $\tau$ then the desired pulse width will be \begin{align} \sigma &= \sqrt{-\frac{2 \ln (\tau)}{\omega_{\epsilon}^2}} \end{align} ``` def blank_wavefunction(number_ground_states, number_excited_states): return np.zeros((number_ground_states + number_excited_states)) def perturbing_function(time): # stdev = 30000.0 * dt #very specific to 0->0 transition! stdev = 3000.0 * dt #clearly has a small amount of amplitude on 0->1 transition center = 6 * stdev return np.cos(electronic_energy_gap*(time - center) / hbar)*np.exp( - (time - center)**2 / (2 * stdev**2)) / stdev def time_function_handle_from_tau(tau_proportion): stdev = np.sqrt( -2.0 * np.log(tau_proportion) / (energy_epsilon/hbar)**2) center = 6 * stdev return center, stdev, lambda t: np.cos(electronic_energy_gap*(t - center) / hbar)*np.exp( - (t - center)**2 / (2 * stdev**2)) / stdev def time_function_handle_from_tau_and_kappa(tau_proportion, kappa_proportion): stdev = np.sqrt( -2.0 * np.log(tau_proportion) / (energy_epsilon/hbar)**2) center = 6 * stdev return center, stdev, lambda t: kappa_proportion * energy_gamma * np.cos(electronic_energy_gap*(t - center) / hbar)*np.exp( - (t - center)**2 / (2 * stdev**2)) / stdev def perturbing_function_define_tau(time, tau_proportion): center, stdev, f = time_function_handle_from_tau(tau_proportion) return f(time) ``` # Defining Useful functions $ O_{m}^{n} = \left(-1\right)^{n} \sqrt{\frac{e^{-S}S^{m+n}}{m!n!}} \sum_{j=0}^{\min \left( m,n \right)} \frac{m!n!}{j!(m-j)!(n-j)!}(-1)^j S^{-j} $ ``` def factorial(i): if i in factorial_storage: return factorial_storage[i] if i <= 1: return 1.0 else: out = factorial(i - 1) * i factorial_storage[i] = out return out def ndarray_factorial(i_array): return np.array([factorial(i) for i in i_array]) def overlap_function(ground_quantum_number, excited_quantum_number): m = ground_quantum_number n = excited_quantum_number if (m,n) in overlap_storage: return overlap_storage[(m,n)] output = (-1)**n output *= math.sqrt(math.exp(-S) * S**(m + n) /(factorial(m) * factorial(n)) ) j_indeces = np.array(range(0, min(m,n) + 1)) j_summation = factorial(m) * factorial(n) * np.power(-1.0, j_indeces) * np.power(S, -j_indeces) j_summation = j_summation / (ndarray_factorial(j_indeces) * ndarray_factorial( m - j_indeces) * ndarray_factorial(n - j_indeces) ) output *= np.sum(j_summation) overlap_storage[(m,n)] = output return output ``` # Solving the Differential Equation \begin{align*} \left(\frac{d G_a(t)}{dt} + \frac{i}{\hbar}\Omega_{(a)}\right) &=-E(t)\frac{i}{\hbar} \sum_{b} E_b(t) \mu_{a}^{b}\\ \left(\frac{d E_b(t)}{dt} + \frac{i}{\hbar} \Omega^{(b)} \right) &=-E(t)\frac{i}{\hbar} \sum_{a} G_a(t) \mu_{a}^{b} \end{align*} Or in a more compact form: \begin{align*} \frac{d}{dt}\begin{bmatrix} G_a(t) \\ E_b(t) \end{bmatrix} = -\frac{i}{\hbar} \begin{bmatrix} \Omega_{(a)} & E(t) \mu_{a}^{b} \\ E(t) \mu_{a}^{b} & \Omega^{b} \end{bmatrix} \cdot \begin{bmatrix} G_a(t) \\ E_b(t) \end{bmatrix} \end{align*} ``` def ode_diagonal_matrix(number_ground_states, number_excited_states): #Define the Matrix on the RHS of the above equation ODE_DIAGONAL_MATRIX = np.zeros((number_ground_states + number_excited_states, number_ground_states + number_excited_states), dtype=np.complex) #set the diagonals for ground_i in range(number_ground_states): ODE_DIAGONAL_MATRIX[ground_i, ground_i] = -1.0j * (energy_g + energy_gamma * (ground_i + .5)) / hbar for excited_i in range(number_excited_states): excited_index = excited_i + number_ground_states #the offset since the excited states comes later ODE_DIAGONAL_MATRIX[excited_index, excited_index] = -1.0j * (energy_e + energy_epsilon * (excited_i + .5)) / hbar return ODE_DIAGONAL_MATRIX #now for the off-diagonals def mu_matrix(c, number_ground_states, number_excited_states): MU_MATRIX = np.zeros((number_ground_states, number_excited_states), dtype = np.complex) for ground_a in range(number_ground_states): for excited_b in range(number_excited_states): new_mu_entry = overlap_function(ground_a, excited_b) if ground_a >0: new_mu_entry += c * math.sqrt(ground_a) * overlap_function(ground_a - 1, excited_b) new_mu_entry += c * math.sqrt(ground_a+1) * overlap_function(ground_a + 1, excited_b) MU_MATRIX[ground_a, excited_b] = new_mu_entry return MU_MATRIX def ode_off_diagonal_matrix(c_value, number_ground_states, number_excited_states): output = np.zeros((number_ground_states + number_excited_states, number_ground_states + number_excited_states), dtype=np.complex) MU_MATRIX = mu_matrix(c_value, number_ground_states, number_excited_states) output[0:number_ground_states, number_ground_states:] = -1.0j * mu_0 * MU_MATRIX / hbar output[number_ground_states:, 0:number_ground_states] = -1.0j * mu_0 * MU_MATRIX.T / hbar return output def IR_transition_dipoles(number_ground_states, number_excited_states): "outputs matrices to calculate ground and excited state IR emission spectra. Can be combined for total" output_g = np.zeros((number_ground_states + number_excited_states, number_ground_states + number_excited_states), dtype=np.complex) output_e = np.zeros((number_ground_states + number_excited_states, number_ground_states + number_excited_states), dtype=np.complex) for ground_a in range(number_ground_states): try: output_g[ground_a, ground_a + 1] = math.sqrt(ground_a + 1) output_g[ground_a + 1, ground_a] = math.sqrt(ground_a + 1) except: pass try: output_g[ground_a, ground_a - 1] = math.sqrt(ground_a) output_g[ground_a - 1, ground_a] = math.sqrt(ground_a) except: pass for excited_a in range(number_excited_states): matrix_index_e = number_ground_states + excited_a -1 #because of how 'number_ground_states' is defined try: output_e[matrix_index_e, matrix_index_e + 1] = math.sqrt(excited_a + 1) output_e[matrix_index_e + 1, matrix_index_e] = math.sqrt(excited_a + 1) except: pass try: output_e[matrix_index_e, matrix_index_e - 1] = math.sqrt(excited_a) output_e[matrix_index_e - 1, matrix_index_e] = math.sqrt(excited_a) except: pass return output_g, output_e ``` \begin{align*} \mu(x) &= \mu_0 \left(1 + \lambda x \right) \\ &= \mu_0 \left(1 + c\left(a + a^{\dagger} \right) \right) \\ \mu_{a}^{b} &= \mu_0\left(O_{a}^{b} + c\left(\sqrt{a}O_{a-1}^{b} + \sqrt{a+1}O_{a+1}^{b}\right) \right) \end{align*} ``` class VibrationalStateOverFlowException(Exception): def __init__(self): pass def propagate_amplitude_to_end_of_perturbation(c_value, ratio_01_00, kappa=1, starting_ground_states=STARTING_GROUND_STATES, starting_excited_states=STARTING_EXCITED_STATES): center_time, stdev, time_function = time_function_handle_from_tau_and_kappa(ratio_01_00, kappa) ending_time = center_time + 8.0 * stdev number_ground_states = starting_ground_states number_excited_states = starting_excited_states while number_excited_states + number_ground_states < MAX_VIBRATIONAL_STATES: #define time scales max_energy = energy_e + energy_epsilon * (.5 + number_excited_states) + kappa * energy_gamma * mu_0 dt = 1.0 / (time_scale_set * max_energy) ODE_DIAGONAL = ode_diagonal_matrix(number_ground_states, number_excited_states) ODE_OFF_DIAGONAL = ode_off_diagonal_matrix(c_value, number_ground_states, number_excited_states) def ODE_integrable_function(time, coefficient_vector): ODE_TOTAL_MATRIX = ODE_OFF_DIAGONAL * time_function(time) + ODE_DIAGONAL return np.dot(ODE_TOTAL_MATRIX, coefficient_vector) #define the starting wavefuntion initial_conditions = blank_wavefunction(number_ground_states, number_excited_states) initial_conditions[0] = 1 #create ode solver current_time = 0.0 ode_solver = scipy.integrate.complex_ode(ODE_integrable_function) ode_solver.set_initial_value(initial_conditions, current_time) #Run it results = [] try: #this block catches an overflow into the highest ground or excited vibrational state while current_time < ending_time: # print(current_time, ZERO_TOLERANCE) #update time, perform solution current_time = ode_solver.t+dt new_result = ode_solver.integrate(current_time) results.append(new_result) #make sure solver was successful if not ode_solver.successful(): raise Exception("ODE Solve Failed!") #make sure that there hasn't been substantial leakage to the highest excited states re_start_calculation = False if abs(new_result[number_ground_states - 1])**2 >= ZERO_TOLERANCE: number_ground_states +=1 # print("Increasing Number of Ground vibrational states to %i " % number_ground_states) re_start_calculation = True if abs(new_result[-1])**2 >= ZERO_TOLERANCE: number_excited_states +=1 # print("Increasing Number of excited vibrational states to %i " % number_excited_states) re_start_calculation = True if re_start_calculation: raise VibrationalStateOverFlowException() except VibrationalStateOverFlowException: #Move on and re-start the calculation continue #Finish calculating results = np.array(results) return results, number_ground_states, number_excited_states raise Exception("NEEDED TOO MANY VIBRATIONAL STATES! RE-RUN WITH DIFFERENT PARAMETERS!") def get_average_quantum_number_time_series(c_value, ratio_01_00, kappa=1, starting_ground_states=STARTING_GROUND_STATES, starting_excited_states=STARTING_EXCITED_STATES): results, number_ground_states, number_excited_states = propagate_amplitude_to_end_of_perturbation(c_value, ratio_01_00, kappa, starting_ground_states, starting_excited_states) probabilities = np.abs(results)**2 #calculate the average_vibrational_quantum_number series average_ground_quantum_number = probabilities[:,0:number_ground_states].dot(np.array(range(number_ground_states)) ) average_excited_quantum_number = probabilities[:,number_ground_states:].dot(np.array(range(number_excited_states))) return average_ground_quantum_number, average_excited_quantum_number, results, number_ground_states, number_excited_states def IR_emission_spectrum_after_excitation(c_value, ratio_01_00, kappa=1, starting_ground_states=STARTING_GROUND_STATES, starting_excited_states=STARTING_EXCITED_STATES): center_time, stdev, time_function = time_function_handle_from_tau_and_kappa(ratio_01_00, kappa) perturbation_ending_time = center_time + 8.0 * stdev simulation_ending_time = perturbation_ending_time + low_frequency_cycles * hbar/min_energy number_ground_states = starting_ground_states number_excited_states = starting_excited_states while number_excited_states + number_ground_states < MAX_VIBRATIONAL_STATES: ir_transDipole_g, ir_transDipole_e = IR_transition_dipoles(number_ground_states, number_excited_states) time_emission_g = [0] time_emission_e = [0] #define time scales e = energy_e + energy_epsilon * (.5 + number_excited_states) g = energy_g + energy_gamma* (.5 + number_ground_states) plus = e + g minus = e - g J = kappa * energy_gamma * mu_0 max_split_energy = plus + math.sqrt(minus**2 + 4 * J**2) max_energy = max_split_energy * .5 dt = 1.0 / (time_scale_set * max_energy) time_values = np.arange(0, simulation_ending_time, dt) ODE_DIAGONAL = ode_diagonal_matrix(number_ground_states, number_excited_states) ODE_OFF_DIAGONAL = ode_off_diagonal_matrix(c_value, number_ground_states, number_excited_states) def ODE_integrable_function(time, coefficient_vector): ODE_TOTAL_MATRIX = ODE_OFF_DIAGONAL * time_function(time) + ODE_DIAGONAL return np.dot(ODE_TOTAL_MATRIX, coefficient_vector) def ODE_jacobean(time, coefficient_vector): ODE_TOTAL_MATRIX = ODE_OFF_DIAGONAL * time_function(time) + ODE_DIAGONAL return ODE_TOTAL_MATRIX #define the starting wavefuntion initial_conditions = blank_wavefunction(number_ground_states, number_excited_states) initial_conditions[0] = 1 #create ode solver current_time = 0.0 try: del ode_solver except: pass # ode_solver = scipy.integrate.complex_ode(ODE_integrable_function) ode_solver = scipy.integrate.complex_ode(ODE_integrable_function, jac = ODE_jacobean) # ode_solver.set_integrator("lsoda") ode_solver.set_integrator("vode", with_jacobian=True) ode_solver.set_initial_value(initial_conditions, current_time) #Run it results = [] try: #this block catches an overflow into the highest ground or excited vibrational state while current_time < simulation_ending_time: # print(current_time, ZERO_TOLERANCE) #update time, perform solution current_time = ode_solver.t+dt new_result = ode_solver.integrate(current_time) results.append(new_result) #make sure solver was successful if not ode_solver.successful(): raise Exception("ODE Solve Failed!") if current_time < perturbation_ending_time: #make sure that there hasn't been substantial leakage to the highest excited states re_start_calculation = False if abs(new_result[number_ground_states - 1])**2 >= ZERO_TOLERANCE: number_ground_states +=1 # print("Increasing Number of Ground vibrational states to %i " % number_ground_states) re_start_calculation = True if abs(new_result[-1])**2 >= ZERO_TOLERANCE: number_excited_states +=1 # print("Increasing Number of excited vibrational states to %i " % number_excited_states) re_start_calculation = True if re_start_calculation: raise VibrationalStateOverFlowException() #calculate IR emission time_emission_g.append(np.conj(new_result).T.dot(ir_transDipole_g.dot(new_result))) time_emission_e.append(np.conj(new_result).T.dot(ir_transDipole_e.dot(new_result))) #on to next time value... except VibrationalStateOverFlowException: #Move on and re-start the calculation continue #Finish calculating results = np.array(results) n_t = len(time_emission_e) time_emission_g = np.array(time_emission_g) time_emission_e = np.array(time_emission_e) filter_x = np.array(range(n_t)) filter_center = n_t / 2.0 filter_sigma = n_t / 10.0 filter_values = np.exp(-(filter_x - filter_center)**2 / (2 * filter_sigma**2)) frequencies = np.fft.fftshift(np.fft.fftfreq(time_emission_g.shape[0], d= dt)) frequency_emission_g = dt * np.fft.fftshift(np.fft.fft(time_emission_g * filter_values)) frequency_emission_e = dt * np.fft.fftshift(np.fft.fft(time_emission_e * filter_values)) return results, frequencies, frequency_emission_g, frequency_emission_e, number_ground_states, number_excited_states raise Exception("NEEDED TOO MANY VIBRATIONAL STATES! RE-RUN WITH DIFFERENT PARAMETERS!") c_values = np.logspace(-4, np.log10(1), 5) tau_values = np.logspace(-4, np.log10(.95), 10) kappa_values = np.logspace(-2, np.log10(5), 20) number_calcs = c_values.shape[0] * tau_values.shape[0] * kappa_values.shape[0] heating_results_ground = np.zeros((kappa_values.shape[0], tau_values.shape[0], c_values.shape[0])) ir_amplitudes = np.zeros(heating_results_ground.shape) heating_results_excited = np.zeros(heating_results_ground.shape) # Keep track of the IR Spectrum n_g = STARTING_GROUND_STATES n_e = STARTING_EXCITED_STATES counter = 1 # we will use the value of c as a bellweather for how many starting states to work with. c_to_ng = {} c_to_ne = {} for i_kappa, kappa in enumerate(kappa_values): # as we increase in both tau and for i_tau, tau in enumerate(tau_values): for i_c, c in enumerate(c_values): try: n_g = c_to_ng[c] n_e = c_to_ne[c] except: n_g = STARTING_GROUND_STATES n_e = STARTING_EXCITED_STATES c_to_ng[c] = n_g c_to_ne[c] = n_e sys.stdout.flush() sys.stdout.write("\r%i / %i Calculating kappa=%f, c=%f, tau=%f at n_g = %i and n_e=%i..." %(counter, number_calcs, kappa, c, tau, n_g, n_e)) # print("\r%i / %i Calculating kappa=%f, c=%f, tau=%f at n_g = %i and n_e=%i..." %(counter, number_calcs, kappa, c, tau, n_g, n_e)) # n_bar_g, n_bar_e, results, num_g, num_e = get_average_quantum_number_time_series(c, # tau, # kappa, # starting_ground_states = n_g, # starting_excited_states = n_e) # heating_results_ground[i_kappa, i_tau, i_c] = n_bar_g[-1] # heating_results_excited[i_kappa, i_tau, i_c] = n_bar_e[-1] _, frequencies, emission_g, emission_e, num_g, num_e = IR_emission_spectrum_after_excitation(c, tau, kappa, starting_ground_states = n_g, starting_excited_states = n_e) if num_g > c_to_ng[c]: c_to_ng[c] = num_g if num_e > c_to_ne[c]: c_to_ne[c] = num_e vibrational_frequency_index = np.argmin(np.abs(energy_gamma - frequencies)) ir_power = np.abs(emission_g[vibrational_frequency_index])**2 ir_amplitudes[i_kappa, i_tau, i_c] = ir_power counter +=1 # plt.figure() # plt.title(r"$\kappa{}, \tau={}, c={}".format(kappa, tau, c)) # plt.plot(frequencies, emission_g) # plt.xlim(0, 2) # decreased dt, does that keep these parameters from failing? last one is 600 / 800 Calculating kappa=8.858668, c=0.750000, tau=0.850000 for i_kappa, kappa in enumerate(kappa_values): for i_tau, tau in enumerate(tau_values): ir_power = ir_amplitudes[i_kappa, i_tau, :] plt.loglog(c_values, ir_power, "*-") plt.xlabel(r"$c$") plt.figure() for i_kappa, kappa in enumerate(kappa_values): for i_c, c in enumerate(c_values): ir_power = ir_amplitudes[i_kappa, :, i_c] plt.loglog(tau_values, ir_power, "*-") plt.xlabel(r"$\tau$") plt.figure() for i_tau, tau in enumerate(tau_values): for i_c, c in enumerate(c_values): # for i_c in [0,-1]: ir_power = ir_amplitudes[:, i_tau, i_c] # plt.loglog(kappa_values, ir_power, ["blue", "red"][i_c]) plt.loglog(kappa_values, ir_power) plt.xlabel(r"$\kappa$") # plt.xlim(-.1, 1.1) # plt.ylim(-10, 500) c_log = np.log10(c_values) tau_log = np.log10(tau_values) kappa_log = np.log10(kappa_values) log_ir_amplitudes = np.log(ir_amplitudes) num_levels = 100 contours = np.linspace(np.min(log_ir_amplitudes), np.max(log_ir_amplitudes), num_levels) # for i_kappa, kappa in enumerate(kappa_values): # ir_power = log_ir_amplitudes[i_kappa, :, :] # plt.figure() # plt.contourf(c_log, tau_log, ir_power, contours) # plt.title(r"$\kappa = {}$".format(kappa)) # plt.ylabel(r"$c$") # plt.xlabel(r"$\tau$") # plt.colorbar() for i_tau, tau in enumerate(tau_values): ir_power = log_ir_amplitudes[:, i_tau, :] plt.figure() plt.contourf(c_log, kappa_log, ir_power, contours) plt.title(r"$\tau = {}$".format(tau)) plt.ylabel(r"$\kappa$") plt.xlabel(r"$c$") plt.colorbar() for i_c, c in enumerate(c_values): ir_power = log_ir_amplitudes[:, :, i_c] plt.figure() plt.contourf(tau_log, kappa_log, ir_power, contours) plt.title(r"$c = {}$".format(c)) plt.ylabel(r"$\kappa$") plt.xlabel(r"$\tau$") plt.colorbar() np.linspace() ```
github_jupyter
# Chunking strategies for a Wide-ResNet This tutorial shows how to utilize a hypernet container [HContainer](../hnets/hnet_container.py) and class [StructuredHMLP](../hnets/structured_mlp_hnet.py) (a certain kind of hypernetwork that allows *smart* chunking) in combination with a Wide-ResNet [WRN](../mnets/wide_resnet.py). ``` # Ensure code of repository is visible to this tutorial. import sys sys.path.insert(0, '..') import numpy as np import torch from hypnettorch.hnets.structured_hmlp_examples import wrn_chunking from hypnettorch.hnets import HContainer, StructuredHMLP from hypnettorch.mnets import WRN ``` ## Instantiate a WRN-28-10-B(3,3) First, we instantiate a WRN-28-10 (i.e., a WRN containing $28$ convolutional layers (and an additional fully-connected output layer) and a widening factor $k=10$) with no internal weights (`no_weights=True`). Thus, it's weights are expected to originate externally (in our case from a hypernetwork) and to be passed to its `forward` method. In particular, we are interested in instantiating a network that matches the one used in the study [Sacramento et al., "Economical ensembles with hypernetworks", 2020](https://arxiv.org/abs/2007.12927) (accessed August 18th, 2020). Therefore, the convolutional layers won't have bias terms (but the final fully-connected layer will). ``` net = WRN(in_shape=(32, 32, 3), num_classes=10, n=4, k=10, num_feature_maps=(16, 16, 32, 64), use_bias=False, use_fc_bias=True, no_weights=False, use_batch_norm=True, dropout_rate=-1) ``` ## Reproduce the chunking strategy from Sacramento et al. We first design a hypernetwork that matches the chunking strategy described in [Sacramento et al.](https://arxiv.org/abs/2007.12927). Thus, not all parameters are produced by a hypernetwork. Batchnorm weights will be shared among conditions (in their case, each condition represents one ensemble member), while the output layer weights will be condition-specific (ensemble-member-specific). The remaining weight are produced via linear hypernetworks (no bias terms in the hypernets) using a specific chunking strategy, which is described in the paper and in the docstring of function [wrn_chunking](../hnets/structured_hmlp_examples.py). To realize the mixture between shared weights (batchnorm), condition-specific weights (output weights) and hypernetwork-produced weights, we employ the special hypernetwork class [HContainer](../hnets/hnet_container.py). We first create an instance of class [StructuredHMLP](../hnets/structured_mlp_hnet.py) for all hypernetwork-produced weights. ``` # Number of conditions (ensemble members). Arbitrarily chosen! num_conds = 10 # Split the network's parameter shapes into shapes corresponding to batchnorm-weights, # hypernet-produced weights and output weights. # Here, we make use of implementation specific knowledge, which could also be retrieved # via the network's "param_shapes_meta" attribute, which contains meta information # about all parameters. bn_shapes = net.param_shapes[:2*len(net.batchnorm_layers)] # Batchnorm weight shapes hnet_shapes = net.param_shapes[2*len(net.batchnorm_layers):-2] # Conv layer weight shapes out_shapes = net.param_shapes[-2:] # Output layer weight shapes # This function already defines the network chunking in the same way the paper # specifies it. chunk_shapes, num_per_chunk, assembly_fct = wrn_chunking(net, ignore_bn_weights=True, ignore_out_weights=True, gcd_chunking=False) # Taken from table S1 in the paper. chunk_emb_sizes = [10, 7, 14, 14, 14, 7, 7, 7] # Important, the underlying hypernetworks should be linear, i.e., no hidden layers: # ``layers': []`` # They also should not use bias vectors -> hence, weights are simply generated via a # matrix vector product (chunk embedding input times hypernet, which is a weight matrix). # Note, we make the chunk embeddings conditional and tell the hypernetwork, that # it doesn't have to expect any other input except those learned condition-specific # embeddings. shnet = StructuredHMLP(hnet_shapes, chunk_shapes, num_per_chunk, chunk_emb_sizes, {'layers': [], 'use_bias': False}, assembly_fct, cond_chunk_embs=True, uncond_in_size=0, cond_in_size=0, num_cond_embs=num_conds) ``` Now, we combine the above produce `shnet` with shared batchnorm weights and condition-specific output weights in an instance of class [HContainer](../hnets/hnet_container.py), which will represent the final hypernetwork. ``` # We first have to create a simple function handle that tells the `HContainer` how to # recombine the batchnorm-weights, hypernet-produced weights and output weights. def simple_assembly_func(list_of_hnet_tensors, uncond_tensors, cond_tensors): # `list_of_hnet_tensors`: Contains outputs of all linear hypernets (conv # layer weights). # `uncond_tensors`: Contains the single set of shared batchnorm weights. # `cond_tensors`: Contains the condition-specific output weights. return uncond_tensors + list_of_hnet_tensors[0] + cond_tensors hnet = HContainer(net.param_shapes, simple_assembly_func, hnets=[shnet], uncond_param_shapes=bn_shapes, cond_param_shapes=out_shapes, num_cond_embs=num_conds) ``` Create sample predictions for 3 different ensemble members. ``` # Batch of inputs. batch_size = 1 x = torch.rand((batch_size, 32*32*3)) # Which ensemble members to consider? cond_ids = [2,3,7] # Generate weights for ensemble members defined above. weights = hnet.forward(cond_id=cond_ids) # Compute prediction for each ensemble member. for i in range(len(cond_ids)): pred = net.forward(x, weights=weights[i]) # Apply softmax. pred = torch.nn.functional.softmax(pred, dim=1).cpu().detach().numpy() print('Prediction of ensemble member %d: %s' \ % (cond_ids[i], np.array2string(pred, precision=3, separator=', '))) ``` ## Create a batch-ensemble network Now, we consider the special case where all parameters are shared except for batchnorm weights and output weights. Thus, no "hypernetwork" are required. Though, we use the class [HContainer](../hnets/hnet_container.py) for convinience. ``` def simple_assembly_func2(list_of_hnet_tensors, uncond_tensors, cond_tensors): # `list_of_hnet_tensors`: None # `uncond_tensors`: Contains all conv layer weights. # `cond_tensors`: Contains the condition-specific batchnorm and output weights. return cond_tensors[:-2] + uncond_tensors + cond_tensors[-2:] hnet2 = HContainer(net.param_shapes, simple_assembly_func2, hnets=None, uncond_param_shapes=hnet_shapes, cond_param_shapes=bn_shapes+out_shapes, num_cond_embs=num_conds) # Batch of inputs. batch_size = 1 x = torch.rand((batch_size, 32*32*3)) # Which ensemble members to consider? cond_ids = [2,3,7] # Generate weights for ensemble members defined above. weights = hnet2.forward(cond_id=cond_ids) # Compute prediction for each ensemble member. for i in range(len(cond_ids)): pred = net.forward(x, weights=weights[i]) # Apply softmax. pred = torch.nn.functional.softmax(pred, dim=1).cpu().detach().numpy() print('Prediction of ensemble member %d: %s' \ % (cond_ids[i], np.array2string(pred, precision=3, separator=', '))) ```
github_jupyter
``` %matplotlib inline from NewsFlow import * from VisualTools import * %load_ext autoreload %autoreload 2 from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" from matplotlib.patches import Patch from matplotlib.lines import Line2D from matplotlib.colors import LinearSegmentedColormap ``` # Basic use The `NewsFlow` can be simulated in many different ways using various input parameters. Nevertheless, at the very least, its basic use requires specifying the following input parameters: - population layer $\mathcal{G}_R = (\mathcal{R}, E_R)$ in the form of an (unweighted undirected) `igraph.Graph` graph, - the initial number $R^F(0) \in [1, R-1]$ of consumers of false news in the form of an `int` variable `F0`, - the rate $\eta$ of per-capita news-alignment events (these rewire the inter-layer coupling) in the form of a `double` variable `eta`, - the basic spreading rate $\lambda^F$ regulating edge-balancing events towards the false state in the form of a `double` variable `lambda_F`. For example: ``` g = ig.Graph.Barabasi(n=1000, m=20, power=-2.5) # generates a Barabási-Albert graph on 1000 vertices time_range, list_polarised, list_rhoF, list_FT_edges = newsFlow(g=g, F0=500, eta=10, lambda_F=0.5) ``` Notice that the execution terminated at $t_n$ due to the sample variance in either the number of unbalanced (active) edges $\lvert E^{FT}(t_n) \rvert$ or the number of polarised individuals $P(t_n)$ over the sliding window of length `window_size` dropping below the permitted tolerance `tol`. One could instead specify an upper limit on the simulation time by providing an `int` variable `t_max`, e.g., ``` time_range, list_polarised, list_rhoF, list_FT_edges = newsFlow(g=g, F0=500, eta=10, lambda_F=0.5, t_max=100) ``` Or, one could use *both* termination conditions by feeding in the upper limit on the simulation time `t_max` as well as the arguments `window_size` and/or `tol` customising the computation of the sliding-window variances (note that one can always tweak either or both of these parameters). ``` time_range, list_polarised, list_rhoF, list_FT_edges = newsFlow(g=g, F0=500, eta=10, lambda_F=0.5, t_max=100, window_size=200, tol=0.0001) ``` However, in order to make sense of the simulation outcomes, one might want to make use of the visualisation tools provided in the script `VisualTools`. In the [following section](#sfreq0) of the tutorial we focus solely on the default value of the sampling frequency `sfreq`, i.e., `sfreq == 0`. This comes in handy when one is interested in the detailed time evolution of the system at the macro level. Next, we take a peek at the case of `sfreq < 0`. This choice is especially helpful when only the final simulation state (at the macro level) is required. [Section](#sfreql0) provides related visual aids. Finally, the [last section](#sfreqg0) is dedicated to examples where the particular choice of `sfreq > 0` matters even more and at last justifies its name. Here, one can find a selection of functions which illustrate the time evolution (with a sampling frequency of `sfreq`) at the micro scale, that is, at the level of individual news providers and individual news consumers. ## Default value of the sampling frequency `sfreq`: `sfreq == 0` <a id='sfreq0'></a> ``` _ = plot_newsFlow(N=1000, F0=500, eta=10, lambda_F=0.5) data = time_evolution_F0(N=300, list_F0=[3, 30, 75, 150], lambda_F=2, eta=20, t_max=100) colours = ['limegreen', 'darkgray', 'gray', 'crimson'] draw_time_evolution_F0(data=data, colours=colours, inset_lims=[0.05, 1, 0.1, 0.2], inset_width=2) data = time_evolution_lambda_F_eta(N=500, F0=150, list_lambdaF=[0.2, 1, 5], list_eta=[10, 20, 50]) legend_elements = [Patch(color='limegreen', label=r'$\lambda^F = 0.2$'), Patch(color='gray', label=r'$\lambda^F = 1$'), Patch(color='crimson', label=r'$\lambda^F = 5$'), Line2D([], [], color='dimgray', label='$\eta = 10$', linestyle='dotted', markerfacecolor='dimgray'), Line2D([], [], color='dimgray', label='$\eta = 20$', linestyle='dashed', markerfacecolor='dimgray'), Line2D([], [], color='dimgray', label='$\eta = 50$', linestyle='solid', markerfacecolor='dimgray')] colours = {0.2: 'limegreen', 1: 'gray', 5: 'crimson'} linestyles = {10: 'dotted', 20: 'dashed', 50: 'solid'} alphas = {10: 0.3, 20: 0.6, 50: 0.9} draw_time_evolution_lambda_F_eta(data=data, colours=colours, linestyles=linestyles, alphas=alphas, legend_elements=legend_elements) ``` ## Sampling frequency `sfreq < 0` <a id='sfreql0'></a> ``` final_polarised, final_rhoF, final_FT_edges = rho_heatmaps(N=300, F0=150, lambdaF_list=[0.25, 0.5, 1, 2, 4], eta_list=[10, 20, 30, 40, 50], rep=10, tol=0.02) draw_heatmap(data=final_polarised, title=r'Final density $\rho^P$ of polarised individuals', cmap=sns.light_palette((210, 90, 60), input="husl"), markCells=[[1,0], [0,2]]) cmap = LinearSegmentedColormap.from_list(name='rhoF', colors=['limegreen', 'lightgreen', 'whitesmoke', 'lightcoral', 'crimson'], N=300) draw_heatmap(data=final_rhoF, title=r'Final density $\rho^F$ of individuals in the false state', cmap=cmap, markCells=[[1,1], [2,2]], vmin=0.00, vmax=1.00, center=0.50) draw_heatmap(data=final_FT_edges, title=r'Final density $\rho^{FT}$ of unbalanced edges', cmap=sns.light_palette((210, 90, 60), input="husl"), markCells=[[3,2], [2,1], [1,4]]) data = rho_lambdaF(list_N=[200, 400, 600], list_rhoF0=[0.01, 0.10, 0.25, 0.5], rep=1, list_lambdaF=[0.25, 0.5, 1, 2, 4], eta=5, t_max=100) colours = {0.01: 'limegreen', 0.1: 'darkgray', 0.25: 'gray', 0.5: 'crimson'} markers = {200: 'v', 400: 's', 600: '^'} legend_elements = [Patch(color='limegreen', label=r'$\rho^F(0) = 0.01$'), Patch(color='darkgray', label=r'$\rho^F(0) = 0.10$'), Patch(color='gray', label=r'$\rho^F(0) = 0.25$'), Patch(color='crimson', label=r'$\rho^F(0) = 0.50$'), Line2D([], [], marker='v', color='dimgray', label='R = 200', linestyle='None', markerfacecolor='dimgray', markersize=8), Line2D([], [], marker='s', color='dimgray', label='R = 400', linestyle='None', markerfacecolor='dimgray', markersize=8), Line2D([], [], marker='^', color='dimgray', label='R = 600', linestyle='None', markerfacecolor='dimgray', markersize=8)] plot_densities_lambdaF(data=data, colours=colours, markers=markers, legend_elements=legend_elements) data = rho_eta(list_N=[200, 400, 600], list_rhoF0=[0.01, 0.10, 0.25, 0.5], rep=1, list_eta=[1, 30, 50], lambda_F=2, t_max=50) plot_densities_eta(data=data, colours=colours, markers=markers, legend_elements=legend_elements) ``` ## Sampling frequency `sfreq > 0` <a id='sfreqg0'></a> ``` data = get_media_repertoire(N=500, lambda_F=1, eta=50, sfreq=10, t_max=50) media_repertoire(data=data) public_scatter(data=data) subscription(data=data) data = get_media_subscriptions(N=500, lambda_F=1, eta=50, sfreq=10, t_max=100) media_subscriptions(data=data) media_scatter(data=data) readership(data=data) ```
github_jupyter
[exercises](recording.ipynb) ``` %matplotlib inline import matplotlib.pyplot as plt import numpy as np def ab_delays(beta, d, c=343): """Calculate delays for AB stereophony. beta: angle of incidence in degree, can be a scalar or a sequence d: distance between microphones in metres c: speed of sound in m/s """ return d / c * np.sin(np.deg2rad(beta)) def plot_ab_delays(max_beta, d, c=343, **kwargs): """Plot delays for AB stereophony. See ab_delays(). """ betas = np.linspace(-max_beta, max_beta, 100) delays = ab_delays(betas, d, c); plt.plot(betas, delays * 1000, **kwargs) plt.title("d = {} m".format(d)) plt.xlabel("beta (degree)") plt.ylabel("delay (ms)") plot_ab_delays(90, 0.4, label="40 cm") plot_ab_delays(90, 0.6, label="60 cm") plot_ab_delays(90, 0.8, label="80 cm") plt.title("Delays for different mic distances") plt.legend(loc='lower right'); ``` TODO: how to get a smaller recording angle (maximum useful delay: 1.2 ms, see Weinzierl, "Handbuch der Audiotechnik" (in German), p. 573) ``` def xy_weights(beta, epsilon, a, b): """Calculate weighting factors for XY stereophony. beta: angle of incidence in degree, can be a scalar or a sequence epsilon: opening angle of microphones in degree """ Ax = a + b * np.cos(np.deg2rad(epsilon + beta)) Ay = a + b * np.cos(np.deg2rad(epsilon - beta)) return Ax, Ay import tools def plot_xy_weights(epsilon, a, b, pattern): """Create 3 plots showing the XY factors.""" betas = np.arange(-180, 181) weights = np.column_stack(xy_weights(betas, epsilon, a, b)) pos_weights = np.clip(weights, 0, None) neg_weights = -np.clip(weights, None, 0) title = "{}, ϵ = {} degree".format(pattern, epsilon) plt.figure() plt.polar(np.radians(betas), pos_weights) plt.gca().set_prop_cycle(None) plt.polar(np.radians(betas), neg_weights, linewidth=2, linestyle='dashed') plt.gca().set_theta_zero_location('N') plt.title(title) plt.figure() plt.plot(betas, tools.db(weights)) plt.title(title) plt.xlim(-180, 180) plt.ylim(-45, 3) plt.xlabel("beta / degree") plt.ylabel("$L_X$ and $L_Y$ / dB") plt.figure() differences = np.diff(tools.db(weights), axis=1) plt.plot(betas, differences) plt.title(title) plt.xlim(-180, 180) plt.ylim(-50, 50) plt.xlabel("beta / degree") plt.ylabel("level difference / dB") ``` Maximum meaningful level difference: between 15 and 18 dB (DIN EN 60268-4, section 12.5.4.1) ``` plot_xy_weights(45, 0.5, 0.5, "cardioid") plot_xy_weights(45, 0.366, 0.634, "super-cardioid") plot_xy_weights(45, 0.25, 0.75, "hyper-cardioid") plot_xy_weights(45, 0, 1, "figure-of-eight") ``` <p xmlns:dct="http://purl.org/dc/terms/"> <a rel="license" href="http://creativecommons.org/publicdomain/zero/1.0/"> <img src="http://i.creativecommons.org/p/zero/1.0/88x31.png" style="border-style: none;" alt="CC0" /> </a> <br /> To the extent possible under law, <span rel="dct:publisher" resource="[_:publisher]">the person who associated CC0</span> with this work has waived all copyright and related or neighboring rights to this work. </p>
github_jupyter
``` # 코드로 형식 지정됨 ``` #StyleGAN3 Reactive Audio By Derrick Schultz for the StyleGAN2 Deep Dive class. This notebook shows one basic example of how to alter your StyleGAN2 vectors with audio. There are lots of different techniques to explore in this, but this is one simple way. Big thanks to Robert Luxemburg who provided the basis for a lot of this code with [this gist](https://gist.github.com/rolux/48f1da6cf2bc6ca5833dbacbf852b348). ##Installation First let’s install the repos and dependencies needed. ``` !nvidia-smi !wget https://github.com/ninja-build/ninja/releases/download/v1.8.2/ninja-linux.zip !sudo unzip ninja-linux.zip -d /usr/local/bin/ !sudo update-alternatives --install /usr/bin/ninja ninja /usr/local/bin/ninja 1 --force !git clone https://github.com/dvschultz/stylegan3.git #use this fork to get interpolation functions !pip install opensimplex # needed for noise interpolation !pwd !wget https://raw.github.com/circulosmeos/gdown.pl/master/gdown.pl !chmod u+x gdown.pl !pip install gdown==4.3 !gdown --fuzzy https://drive.google.com/file/d/1_Cneq6wuh2f8_rKES1rbuFT5wYTqpXwD/view?usp=sharing !gdown --fuzzy https://drive.google.com/file/d/1wHjX4oFzwbvWYsKzeC0GsVd3jrFnnpfA/view?usp=sharing !gdown https://drive.google.com/file/d/1ea8UuF3X22ikDjSKC7pB2VPhCAtWUZH3/view?usp=sharing !pip install torch==1.9.0 import sys sys.path.append('/content/stylegan3') import os import re import numpy as np import scipy.interpolate from scipy.io import wavfile from scipy.signal import savgol_filter import matplotlib.pyplot as plt import PIL.Image import moviepy.editor import torch import pickle import random from tqdm import tqdm import copy from typing import List, Optional, Tuple, Union import click import dnnlib import imageio import legacy from scipy.interpolate import interp1d import math device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') ``` ##Process audio The next step process our audio files. Edit the first line of the next cell to point to your audio file. The run the cell after that to process the audio and generate a graph of the volume data. ``` wav_filename = "/content/forest10s.wav" audio = {} fps = 24 # 파형 민감도 설정 / window_length must be an odd number / polyorder must be smaller than window_length window_length = 75 polyorder = 3 activeness = 1 if not os.path.exists(wav_filename): audio_clip = moviepy.editor.AudioFileClip(wav_filename) audio_clip.write_audiofile(wav_filename, fps=44100, nbytes=2, codec='pcm_s16le') track_name = os.path.basename(wav_filename)[:-4] rate, signal = wavfile.read(wav_filename) signal = np.mean(signal, axis=1) # to mono signal = np.abs(signal) # seed = signal.shape[0] duration = signal.shape[0] / rate frames = int(np.ceil(duration * fps)) samples_per_frame = signal.shape[0] / frames audio[track_name] = np.zeros(frames, dtype=signal.dtype) for frame in range(frames): start = int(round(frame * samples_per_frame)) stop = int(round((frame + 1) * samples_per_frame)) audio[track_name][frame] = np.mean(signal[start:stop], axis=0) audio[track_name] = savgol_filter(audio[track_name], window_length, polyorder) audio[track_name] = audio[track_name] / max(audio[track_name]) audio[track_name] = audio[track_name] ** activeness print("Total frames : ", frames) for track in sorted(audio.keys()): plt.figure(figsize=(8, 3)) plt.title(track) plt.plot(audio[track]) plt.savefig(f'../{track}.png') def load_networks(path): with open(path, 'rb') as stream: G = pickle.load(stream)['G_ema'].to(device) G.eval() return G #---------------------------------------------------------------------------- def audio_reactive_linear(v0, v1, f): return (v0*(1.0-f)+v1*f) #---------------------------------------------------------------------------- def seed_generator(size): result = [] for v in range(size): result.append(random.randint(0, 1000)) return result #---------------------------------------------------------------------------- def layout_grid(img, grid_w=None, grid_h=1, float_to_uint8=True, chw_to_hwc=True, to_numpy=True): batch_size, channels, img_h, img_w = img.shape if grid_w is None: grid_w = batch_size // grid_h assert batch_size == grid_w * grid_h if float_to_uint8: img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8) img = img.reshape(grid_h, grid_w, channels, img_h, img_w) img = img.permute(2, 0, 3, 1, 4) img = img.reshape(channels, grid_h * img_h, grid_w * img_w) if chw_to_hwc: img = img.permute(1, 2, 0) if to_numpy: img = img.cpu().numpy() return img network_pkl = '/content/awesome_beach.pkl' G = load_networks(network_pkl) ``` ### StyleGAN3 gen_video 튜닝 ``` # StyleGAN3의 영상 생성 모델 간소화 및 Auio Reactive 기능 적용 시도 def gen_audio_reactive_video(G, mp4: str, seeds_bottom: List[int], seeds_top: List[int], shuffle_seed: Optional[int], num_keyframes_bottom: Optional[int], num_keyframes_top: Optional[int], psi=1, kind='cubic', **video_kwargs): # the output video length will be 'num_keyframes*w_frames' frames. # num_keyframes : Number of seeds to interpolate through. # w_frames : Number of frames to interpolate between latents if frames % num_keyframes_bottom != 0: print("Error") w_frames = int(frames / num_keyframes_bottom) print(w_frames) all_seeds_top = np.zeros(num_keyframes_top, dtype=np.int64) all_seeds_bottom = np.zeros(num_keyframes_bottom, dtype=np.int64) for idx in range(num_keyframes_top): all_seeds_top[idx] = seeds_top[idx % len(seeds_top)] for idx in range(num_keyframes_bottom): all_seeds_bottom[idx] = seeds_bottom[idx % len(seeds_bottom)] if shuffle_seed is not None: rng = np.random.RandomState(seed=shuffle_seed) rng.shuffle(all_seeds_top) rng.shuffle(all_seeds_bottom) zs_top = torch.from_numpy(np.stack([np.random.RandomState(seed).randn(G.z_dim) for seed in all_seeds_top])).to(device) print(zs_top.shape) zs_bottom = torch.from_numpy(np.stack([np.random.RandomState(seed).randn(G.z_dim) for seed in all_seeds_bottom])).to(device) print(zs_bottom.shape) ws_top = G.mapping(z=zs_top, c=None, truncation_psi=psi) print(ws_top.shape) ws_bottom = G.mapping(z=zs_bottom, c=None, truncation_psi=psi) print(ws_bottom.shape) ws =[] for f in range(frames): ws.append(audio_reactive_linear(ws_bottom[f%num_keyframes_bottom],ws_top[f%num_keyframes_top],audio[track_name][f])) # Render video. video_out = imageio.get_writer(mp4, mode='I', fps=fps, codec='libx264', **video_kwargs) for frame_idx in tqdm(range(frames)): w = ws[frame_idx].to(device) img = G.synthesis(ws=w.unsqueeze(0), noise_mode='const')[0].cpu() img = (img.permute(1,2,0)*127.5+128).clamp(0,255).to(torch.uint8).numpy() video_out.append_data(img) video_out.close() seeds_bottom = seed_generator(2) seeds_top = seed_generator(2) gen_audio_reactive_video(G, mp4='/content/test7.mp4', bitrate='12M', seeds_bottom=seeds_bottom, seeds_top=seeds_top, shuffle_seed=2, num_keyframes_bottom=2, num_keyframes_top=4, psi=1) # StyleGAN3 영상 생성 모델 grid 삭제 및 간소화 시도 def gen_video_audio(G, mp4: str, seeds: List[int], shuffle_seed: Optional[int], num_keyframes: Optional[int], psi=1, kind='cubic', **video_kwargs): # the output video length will be 'num_keyframes*w_frames' frames. # num_keyframes : Number of seeds to interpolate through. # w_frames : Number of frames to interpolate between latents if frames % num_keyframes != 0: print("Error") w_frames = int(frames / num_keyframes) all_seeds = np.zeros(num_keyframes, dtype=np.int64) for idx in range(num_keyframes): all_seeds[idx] = seeds[idx % len(seeds)] if shuffle_seed is not None: rng = np.random.RandomState(seed=shuffle_seed) rng.shuffle(all_seeds) zs = torch.from_numpy(np.stack([np.random.RandomState(seed).randn(G.z_dim) for seed in all_seeds])).to(device) ws = G.mapping(z=zs, c=None, truncation_psi=psi) print(ws.shape) # Render video. video_out = imageio.get_writer(mp4, mode='I', fps=fps, codec='libx264', **video_kwargs) for frame_idx in tqdm(range(frames)): w = ws.to(device) img = G.synthesis(ws=w, noise_mode='const')[0].cpu() img = (img.permute(1,2,0)*127.5+128).clamp(0,255).to(torch.uint8).numpy() video_out.append_data(img) video_out.close() seeds = seed_generator(2) gen_video_audio(G, mp4='/content/test8.mp4', bitrate='12M', seeds=seeds, shuffle_seed=2, num_keyframes=4, psi=1) # StyleGAN3의 영상 생성 모델 Auio Reactive 기능 적용 시도 (간소화 X) def gen_audio_reactive_video_grid(G, mp4: str, seeds_bottom: List[int], seeds_top: List[int], shuffle_seed: Optional[int], num_keyframes_bottom: Optional[int], num_keyframes_top: Optional[int], wraps=2, psi=1, grid_dims=(1,1), kind='cubic', **video_kwargs): # the output video length will be 'num_keyframes*w_frames' frames. # num_keyframes : Number of seeds to interpolate through. # w_frames : Number of frames to interpolate between latents grid_w = grid_dims[0] grid_h = grid_dims[1] # audio 분석으로 도출된 frames에 맞춰 영상이 생성되어야 함으로 frames와 num_keyframes가 나눠떨어져야합니다. # 이하 상단(top)은 audio reactive에서 파형의 상단에 해당하는 부분을 말하며, 하단(bottom)은 audio reactive에서 파형의 하단에 해당하는 부분을 말합니다. if frames % num_keyframes_top != 0: print("num_keyframes_top Error") if frames % num_keyframes_bottom != 0: print("num_keyframes_bottom Error") w_frames_top = int(frames / num_keyframes_top) w_frames_bottom = int(frames / num_keyframes_bottom) # 그리드 관련 수식입니다. 사용할 시드 개수도 그리드 수 만큼 나눠 떨어져야함으로 해당 수식이 들어가 있습니다. 저희는 기본적으로 1x1로 진행하는 걸 염두하고 있지만 혹시 모르므로 살려두겠습니다. if num_keyframes_top is None: if len(seeds) % (grid_w*grid_h) != 0: raise ValueError('Number of input seeds must be divisible by grid W*H') num_keyframes_top = len(seeds) // (grid_w*grid_h) if num_keyframes_bottom is None: if len(seeds) % (grid_w*grid_h) != 0: raise ValueError('Number of input seeds must be divisible by grid W*H') num_keyframes_bottom = len(seeds) // (grid_w*grid_h) # audio_reactive의 상단과 하단에 해당되는 시드를 생성하고 num_keyframes에 맞춰서 index를 부여합니다. all_seeds_top = np.zeros(num_keyframes_top*grid_h*grid_w, dtype=np.int64) all_seeds_bottom = np.zeros(num_keyframes_bottom*grid_h*grid_w, dtype=np.int64) for idx in range(num_keyframes_top*grid_h*grid_w): all_seeds_top[idx] = seeds_top[idx % len(seeds_top)] for idx in range(num_keyframes_bottom*grid_h*grid_w): all_seeds_bottom[idx] = seeds_bottom[idx % len(seeds_bottom)] # 입력된 랜덤시드를 기반으로 생성에 사용할 시드를 한 번 셔플해줍니다. if shuffle_seed is not None: rng = np.random.RandomState(seed=shuffle_seed) rng.shuffle(all_seeds_top) rng.shuffle(all_seeds_bottom) # 파형의 상단과 하단에 해당되는 노이즈를 생성하고, w 공간으로 맵핑합니다. zs_top = torch.from_numpy(np.stack([np.random.RandomState(seed).randn(G.z_dim) for seed in all_seeds_top])).to(device) zs_bottom = torch.from_numpy(np.stack([np.random.RandomState(seed).randn(G.z_dim) for seed in all_seeds_bottom])).to(device) ws_top = G.mapping(z=zs_top, c=None, truncation_psi=psi) ws_top = ws_top.reshape(grid_h, grid_w, num_keyframes_top, *ws_top.shape[1:]) ws_bottom = G.mapping(z=zs_bottom, c=None, truncation_psi=psi) ws_bottom = ws_bottom.reshape(grid_h, grid_w, num_keyframes_bottom, *ws_bottom.shape[1:]) # 상단과 하단에 해당하는 각각의 w를 grid에 따라 배열하고 Interpolation을 진행합니다. grid_top = [] for yi in range(grid_h): row = [] for xi in range(grid_w): x = np.arange(-num_keyframes_top * wraps, num_keyframes_top * (wraps + 1)) y = np.tile(ws_top[yi][xi].cpu().numpy(), [wraps * 2 + 1, 1, 1]) interp = scipy.interpolate.interp1d(x, y, kind=kind, axis=0) row.append(interp) grid_top.append(row) grid_bottom = [] for yi in range(grid_h): row = [] for xi in range(grid_w): x = np.arange(-num_keyframes_bottom * wraps, num_keyframes_bottom * (wraps + 1)) y = np.tile(ws_bottom[yi][xi].cpu().numpy(), [wraps * 2 + 1, 1, 1]) interp = scipy.interpolate.interp1d(x, y, kind=kind, axis=0) row.append(interp) grid_top.append(row) print(len(grid_top)) print(len(grid_bottom)) # Render video. # 해당 과정에서 상단의 w와 하단의 w의 audio reactive 기능을 적용하여 영상을 생성합니다. video_out = imageio.get_writer(mp4, mode='I', fps=60, codec='libx264', **video_kwargs) for frame_idx in tqdm(range(frames)): imgs = [] for yi in range(grid_h): for xi in range(grid_w): interp_top = grid_top[yi][xi] interp_bottom = grid_bottom[yi][xi] w_top = torch.from_numpy(interp_top(frame_idx / w_frames_top)).to(device) w_bottom = torch.from_numpy(interp_bottom(frame_idx / w_frames_bottom)).to(device) w = audio_reactive_linear(w_bottom,w_top,audio[track_name][frame_idx]) img = G.synthesis(ws=w.unsqueeze(0), noise_mode='const')[0] imgs.append(img) video_out.append_data(layout_grid(torch.stack(imgs), grid_w=grid_w, grid_h=grid_h)) video_out.close() seeds_bottom = seed_generator(10) seeds_top = seed_generator(10) gen_audio_reactive_video_grid(G, mp4='/content/test7.mp4', bitrate='12M', seeds_bottom=seeds_bottom, seeds_top=seeds_top, shuffle_seed=2, num_keyframes_bottom=4, num_keyframes_top=4, psi=1) ``` Run the next cell to define some functions we’ll need to use to generate our inference images. ``` import random def generate_zs_from_seeds(seeds,G): zs = [] for seed_idx, seed in enumerate(seeds): rnd = np.random.RandomState(seed) z = rnd.randn(1, G.mapping.z_dim) # [minibatch, component] zs.append(z) return zs def convertZtoW(latent, truncation_psi=0.7, truncation_cutoff=9): latent = torch.from_numpy(latent).to(device) dlatent = G.mapping(latent, 0) # [seed, layer, component] dlatent_avg = G.mapping.w_avg # [component] for i in range(truncation_cutoff): dlatent[0][i] = (dlatent[0][i]-dlatent_avg)*truncation_psi + dlatent_avg return dlatent def generate_images_in_w_space(dlatents, truncation_psi,folder='random'): # Gs_kwargs = dnnlib.EasyDict() # Gs_kwargs.output_transform = dict(func=convert_images_to_uint8, nchw_to_nhwc=True) # Gs_kwargs.randomize_noise = False # Gs_kwargs.truncation_psi = truncation_psi dlatent_avg = G.mapping.w_avg # [component] if folder == 'random': temp_dir = 'frames%06d'%int(1000000*random.random()) else: temp_dir = folder os.system('mkdir %s'%temp_dir) for row, dlatent in enumerate(dlatents): print('Generating image for step %d/%d ...' % (row, len(dlatents))) #row_dlatents = (dlatent[np.newaxis] - dlatent_avg) * np.reshape(truncation_psi, [-1, 1, 1]) + dlatent_avg dl = (dlatent-dlatent_avg)*truncation_psi + dlatent_avg row_images = G.synthesis(dl) row_image = (row_images.permute(0,2,3,1)*127.5+128).clamp(0,255).to(torch.uint8) row_image = row_image.squeeze(0).cpu().numpy() PIL.Image.fromarray(row_image, 'RGB').save('%s/frame%05d.png' % (temp_dir, row)) ``` ## Generate Images ### Use Volume to interpolate between two seeds The next cell will take two seed values and do a linear interpolation of them using the volume from your audio. When the audio is silent, it will be the first seed you list. When it is at its loudest it will be the second. Everything in between will be an interpolated value. ``` # z noise blend by linear/trigonometric function network_pkl = '/content/awesome_beach.pkl' def seed_generator(size): result = [] for v in range(size): result.append(random.randint(0, 1000)) return result seeds_num = 10 seeds_top = seed_generator(seeds_num) seeds_bottom = seed_generator(seeds_num) blend_num_top = seeds_num blend_num_bottom = seeds_num blend_weight_parameter = 1 truncation_value = 0.8 G = load_networks(network_pkl) w_avg = G.mapping.w_avg def get_ws(n, frames, seed): filename = f'../ws_{n}_{frames}_{seed}.npy' if not os.path.exists(filename): src_ws = np.random.RandomState(seed).randn(n, 512) ws = np.empty((frames, 512)) for i in range(512): x = np.linspace(0, 3*frames, 3*len(src_ws), endpoint=False) y = np.tile(src_ws[:, i], 3) x_ = np.linspace(0, 3*frames, 3*frames, endpoint=False) y_ = interp1d(x, y, kind='quadratic', fill_value='extrapolate')(x_) ws[:, i] = y_[frames:2*frames] np.save(filename, ws) else: ws = np.load(filename) return ws def relu(x): return np.maximum(0, x) def blend_linear(z_list, seeds_list, blend_num): blend_list = [] for f in range(frames): y_list = [] temp_list = [] for i in range(len(seeds_list)): y = relu(1 - (1/frames*blend_num/len(seeds_list))*abs(i/len(seeds_list)*frames-f)) y = y ** blend_weight_parameter y_list.append(y) y_list = np.divide(y_list, sum(y_list)) for i in range(len(seeds_list)): temp = y_list[i]*z_list[i] temp_list.append(temp) blend_list.append(sum(temp_list)) return blend_list def blend_tri(z_list, seeds_list): blend_list = [] for f in range(frames): y_list = [] temp_list = [] for i in range(len(seeds_list)): y = (math.cos(((i/len(seeds_list)*frames) - f) * math.pi / 2 / frames)) y_list.append(y) y_list = np.divide(y_list, sum(y_list)) for i in range(len(seeds_list)): temp = y_list[i]*z_list[i] temp_list.append(temp) blend_list.append(sum(temp_list)) return blend_list def lerp_linear(v0, v1, f, t): return (v0*(1.0-f)+v1*f) def lerp_tri(v0, v1, f, t): return v0*(1.0-(math.sin(f * math.pi * 90 / 180))) + v1*(math.sin(f * math.pi * 90 / 180)) ends_top = generate_zs_from_seeds(seeds_top,Gs) ends_bottom = generate_zs_from_seeds(seeds_bottom,Gs) ends_b = [] ends_top_b = blend_linear(ends_top, seeds_top, blend_num_top) ends_bottom_b = blend_linear(ends_bottom, seeds_bottom, blend_num_bottom) for f in range(frames): ends_b.append(lerp_linear(ends_bottom_b[f],ends_top_b[f],audio[track_name][f],f)) ends_w_b = [] for e in range(len(ends_b)): ends_w_b.append(convertZtoW(ends_b[e],1)) vectors = ends_w_b generate_images_in_w_space(vectors,truncation_value,'frames_test_20220216_13') # StyleGAN3 gen_video의 interpolation 함수 기존 코드에 적용하기 seeds_num = 10 seeds_top = seed_generator(seeds_num) seeds_bottom = seed_generator(seeds_num) wraps = 2 blend_weight_parameter = 1 truncation_value = 1 # gen_video에서 갖고온 함수의 일부를 수정하다 만 것 -------------------------------------- # 이를 수정해서 리스트로 받은 시드를 frames 수 만큼의 요소를 가진 하나의 연속적인 리스트로 만들어야함. x = np.arange(-num_keyframes_bottom * wraps, num_keyframes_bottom * (wraps + 1)) y = np.tile(ws_bottom[yi][xi].cpu().numpy(), [wraps * 2 + 1, 1, 1]) interp = scipy.interpolate.interp1d(x, ws_bottom, kind=kind, axis=0) # 핵심 함수 # ------------------------------------------------------------------------------ ends_top = generate_zs_from_seeds(seeds_top,Gs) ends_bottom = generate_zs_from_seeds(seeds_bottom,Gs) ends_b = [] ends_top_b = blend_linear(ends_top, seeds_top, blend_num_top) ends_bottom_b = blend_linear(ends_bottom, seeds_bottom, blend_num_bottom) for f in range(frames): ends_b.append(audio_reactive_linear(ends_bottom_b[f],ends_top_b[f],audio[track_name][f]) ends_w_b = [] for e in range(len(ends_b)): ends_w_b.append(convertZtoW(ends_b[e],1)) vectors = ends_w_b generate_images_in_w_space(vectors,truncation_value,'frames_test_20220216_13') # linear blend test network_pkl = '/content/awesome_beach.pkl' Gs = load_networks(network_pkl) seeds = [100] seeds_t = [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000] ends = generate_zs_from_seeds(seeds,Gs) ends_t = generate_zs_from_seeds(seeds_t,Gs) def blend_linear(ends_t): ends_t_lerp = [] for f in range(frames): y_list = [] temp_list = [] for i in range(len(seeds_t)): print("i : ", i/len(seeds_t)) y = 1 - (1/frames)*abs(i/len(seeds_t)*frames-f) print(y) y_list.append(y) y_list = np.divide(y_list, sum(y_list)) print("y_list : ", y_list) print("y_list sum : ", sum(y_list)) for i in range(len(seeds_t)): temp = y_list[i]*ends_t[i] temp_list.append(temp) print("ends_t_lerp : ", sum(temp_list)) ends_t_lerp.append(sum(temp_list)) return ends_t_lerp def lerp(v0, v1, f): return (v0*(1.0-f)+v1*f) ends_t_lerp = [] ends_w = [] ends_w_t = [] ends_t_lerp = blend_linear(ends_t) # tri blend test frames = 10 flow_energy = 2 network_pkl = '/content/awesome_beach.pkl' Gs = load_networks(network_pkl) seeds = [100] seeds_t = [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000] ends = generate_zs_from_seeds(seeds,Gs) ends_t = generate_zs_from_seeds(seeds_t,Gs) def blend_tri(ends_t): ends_t_lerp = [] for f in range(frames): y_list = [] temp_list = [] for i in range(len(seeds_t)): print("i : ", i/len(seeds_t)) y = (math.cos(((i/len(seeds_t)*frames) - f) * math.pi / frames) + 1) print(y) y_list.append(y) y_list = np.divide(y_list, sum(y_list)) print("y_list : ", y_list) print("y_list sum : ", sum(y_list)) for i in range(len(seeds_t)): temp = y_list[i]*ends_t[i] temp_list.append(temp) print("ends_t_lerp : ", sum(temp_list)) ends_t_lerp.append(sum(temp_list)) return ends_t_lerp def lerp(v0, v1, f): return (v0*(1.0-f)+v1*f) ends_t_lerp = [] ends_w = [] ends_w_t = [] ends_t_lerp = blend_tri(ends_t) ``` ### Combine the frames into a video and add the audio track back to it There’s probably a cleaner way to do this all in moviepy but I’m being lazy. ``` !ffmpeg -r 24 -i /content/frames_test_20220216_13/frame%05d.png -vcodec libx264 -pix_fmt yuv420p /content/temp_test_20220216_13.mp4 # output file name mp4_filename = '/content/audio_reactive_video_sample_20220216_13.mp4' # video_clip = moviepy.editor.VideoClip(render_frame, duration=duration) video_clip = moviepy.editor.VideoFileClip('/content/temp_test_20220216_13.mp4') audio_clip_i = moviepy.editor.AudioFileClip('/content/forest10s.wav') video_clip = video_clip.set_audio(audio_clip_i) video_clip.write_videofile(mp4_filename, fps=fps, codec='libx264', audio_codec='aac', bitrate='8M') ```
github_jupyter
# Jeu de dés Dans ce notebook, nous reprenons l'exemple vu en cours d'un jeu de $ndes$ dés dont on contraint la somme totale sur les faces. En cours nous avions pris 3 dés et contraint la somme à être égale à 8 ce qui amenait à 21 cas possibles, parmi lesquels 6 ont la face "1" pour le dé 1, 5 ont la face "2" pour le dé 1 etc. On aboutit alors à une probabilité de trouver la face $i$ pour le dé 1 qui décroit en fonction de $i$. Cette décroissance est linéaire dans ce cas. Le but de ce notebook est de montrer que si on augmente le nombre de dés, on aboutit à une décroissance exponentielle, comme pour la loi de Boltzmann. Les dés qui ne sont pas le dé observé jouent le rôle de thermostat. La loi exponentielle est obtenu pour un thermostat grand devant le système observé. C'est ce que nous allons retrouver et on comprend alors que si le thermostat ne contient que 2 dés la loi obtenue soit différente. ``` import numpy as np import matplotlib.pyplot as plt import tools.des as ds ``` ## Tirages d'un jeu aléatoire de configurations des dés avec somme fixe Ici, on fixe le nombre de dés, $ndes$, la contrainte sur la somme, $somme$ et le nombre de tirages, $ntirages$. Il est aussi possible de choisir des dés autres qu'à six faces en donnant le nombre de faces de chaque dé, $nfaces$. ``` ndes=100 somme=200 nfaces=6 ntirages=10000 ``` La fonction $ds.TiragesDes$ génère alors $ntirages$ configurations des dés avec la contrainte sur la somme des faces et avec une distribution équiprobable sur toutes les configurations compatibles avec cette contrainte. $somme$ doit vérifier $ndes \le somme \le nfaces\times ndes$, sinon $ds.TiragesDes$ renvoie un message d'erreur. Le paramètre $npas$ sert à la génération des tirages, la fonction génère en réalité $npas \times ntirages$ configurations à la suite et en extrait une configuration tous les $npas$. Ceci sert à avoir des configurations avec plus de différences entre elles. ``` tirages=ds.TiragesDes(ndes=ndes,somme=somme,nfaces=nfaces,ntirages=ntirages,npas=100) ``` On peut faire un histogramme des configurations générées. Chaque barre est indéxée par "i-j-k" où $i$, $j$ et $k$ sont les faces des dés 1, 2 et 3 respectivement. On peut tracer cette histogramme avec 4, voire 5 dés, mais s'il y a trop de po ``` hist=ds.HistogrammeDes(tirages) if (len(hist) < 50): plt.bar(hist.keys(),hist.values()) plt.xticks(rotation='vertical') plt.show() else: print("Trop de valeurs pour tracer l'histogramme") ``` ## Calcul des probabilités de chaque face pour un dé donné ``` ds.Proba(tirages,ides=1,nfaces=nfaces) x=np.arange(nfaces)+1 for ides in range(min(10,ndes)): plt.plot(x,ds.Proba(tirages,ides,nfaces=nfaces)) plt.show() n0,q=ds.BoltzmannParam(somme/ndes,nfaces=nfaces) ProbaBoltzmann=1/q*np.exp(-x/n0) plt.plot(x,ProbaBoltzmann,'o') plt.show() x=np.arange(nfaces)+1 for ides in range(min(10,ndes)): plt.plot(x,ds.Proba(tirages,ides,nfaces=nfaces)) n0,q=ds.BoltzmannParam(somme/ndes,nfaces=nfaces) ProbaBoltzmann=1/q*np.exp(-x/n0) plt.plot(x,ProbaBoltzmann,'o') plt.show() ```
github_jupyter
# Enrichment Analysis on LCI Predictions Analyzes the enrichment of proteins predicted by LCI ``` %load_ext autoreload %autoreload 2 import sys, os import json import pickle import numpy as np import matplotlib.pyplot as plt import torch import seaborn as sns import numpy as np from scipy.stats import pearsonr import pandas as pd import networkx as nx from tqdm import tqdm_notebook as tqdm import goatools from goatools.base import download_go_basic_obo, download_ncbi_associations from goatools.obo_parser import GODag from goatools.associations import read_ncbi_gene2go from goatools.go_enrichment import GOEnrichmentStudy from milieu.paper.methods.lci.lci_method import LCIModule from milieu.paper.experiments.go_enrichment import GOEnrichment from milieu.data.network import Network from milieu.data.go import load_go_annotations from milieu.util.util import Params, prepare_sns, load_mapping from milieu.data.associations import load_diseases os.chdir("/Users/sabrieyuboglu/Documents/School/SNAP/projects/disease-protein-prediction") #os.chdir("/dfs/scratch0/sabri/disease-protein-prediction") prepare_sns(sns, {}) %matplotlib inline %config InlineBackend.figure_format = 'retina' ``` ## Loading Data Load disease associations and protein-protein interaction network. ``` # load diseases diseases_dict = load_diseases("data/associations/disgenet-associations.csv", exclude_splits=['none']) # load network network = Network("data/networks/species_9606/bio-pathways/network.txt") n = len(network) # load entrez mapping symbol_to_entrez = load_mapping("data/protein/symbol_to_entrez.txt", "\t") entrez_to_symbol = {int(entrez): symbol for symbol, entrez in symbol_to_entrez.items() if entrez!=""} ``` ### Load Predictions Load predictions from a disease protein prediction method. ``` preds_df = pd.read_csv("experiments/dpp_predict/lci/predictions.csv", index_col=0) ``` ### Load Evaluation ``` eval_df = pd.read_csv("experiments/aggregation/results.csv", header=[0, 1], index_col=0) ``` ### Load Enrichment Analysis Prepare an enrichment study ``` # load gene ontology obodag = GODag("data/go/go-basic.obo") geneid2go = read_ncbi_gene2go("data/go/gene2go.txt", taxids=[9606]) goeaobj = GOEnrichmentStudy(network.get_names(), # List of mouse protein-coding genes geneid2go, # geneid/GO associations obodag, # Ontologies propagate_counts = True, alpha = 0.05, # default significance cut-off methods = ['fdr_bh']) # defult multipletest correction method ``` ## Perform Enrichment Analysis Perform an enrichment analysis on one disease. ``` disease_id = "C0001627" disease_proteins = set(diseases_dict[disease_id].proteins) disease_symbols = [entrez_to_symbol[entrez] for entrez in disease_proteins if entrez in entrez_to_symbol] symbols_str=str(disease_symbols).replace('\'', '') print(f"{diseases_dict[disease_id].name}: {symbols_str}") sorted_predictions = preds_df.loc[disease_id].sort_values(ascending=False).index print_list([entrez_to_symbol[int(entrez)] for entrez in sorted_predictions[:7] if int(entrez) in entrez_to_symbol]) # run disease enrichment disease_results = goeaobj.run_study(disease_proteins) # run prediction enrichment pred_proteins = set(map(int, sorted_predictions[:len(disease_proteins)])) pred_results = goeaobj.run_study(pred_proteins) def print_list(term_list): string = "" for i, term in enumerate(term_list): string += f"({i+1}) {term}, " print(string[:-2]) k = 5 disease_top_k = sorted(disease_results, key=lambda x: x.p_fdr_bh)[:k] disease_significant = [r for r in disease_results if r.p_fdr_bh < 0.05] print_list([result.goterm.name for result in disease_top_k]) pred_top_k = sorted(pred_results, key=lambda x: x.p_fdr_bh)[:k] pred_significant = [r for r in pred_results if r.p_fdr_bh < 0.05] print_list([result.goterm.name for result in pred_top_k]) intersection = set([result.goterm.name for result in disease_top_k]) & set([result.goterm.name for result in pred_top_k]) union = set([result.goterm.name for result in disease_top_k]) | set([result.goterm.name for result in pred_top_k]) print("Jaccard Similarity: {}".format(1.0*len(intersection)/len(union))) intersection = set([result.goterm.name for result in pred_significant]) & set([result.goterm.name for result in disease_significant]) union = set([result.goterm.name for result in pred_significant]) | set([result.goterm.name for result in disease_significant]) print("Jaccard Similarity: {}".format(1.0*len(intersection)/len(union))) ``` ## Analyze Enrichment Results Load enrichment study results ### Load Enrichment Results ``` experiment_dir = 'experiments/go_enrichment/precompute' outputs_path = os.path.join(experiment_dir, "outputs.pkl") if os.path.exists(outputs_path): with open(outputs_path, 'rb') as f: outputs = pickle.load(f) ``` ### Jaccard ``` def get_significant_terms(output, threshold=0.05): """ """ terms, pvalues = list(zip(*output.items())) significant_idxs = np.where(np.array(pvalues) < threshold) return np.array(terms)[significant_idxs] def jaccard(a_terms, b_terms): """ """ a_terms = set(a_terms) b_terms = set(b_terms) return (len(a_terms & b_terms) / len(a_terms | b_terms) if len(a_terms | b_terms) != 0 else 0) output = outputs["C0013182"] jaccard_results = {} for disease_id, output in tqdm(outputs.items()): disease_terms = get_significant_terms(output["disease"], threshold=0.01) lci_terms = get_significant_terms(output["lci"], threshold=0.01) rw_terms = get_significant_terms(output["random_walk"], threshold=0.01) lci_jaccard = jaccard(disease_terms, lci_terms) rw_jaccard = jaccard(disease_terms, rw_terms) jaccard_results[disease_id] = { "lci": lci_jaccard, "rw": rw_jaccard } jaccard_df = pd.DataFrame(list(jaccard_results.values()), index=list(jaccard_results.keys())) sns.distplot(jaccard_df["lci"], bins=22, kde=False, label="LCI", hist_kws={"range": (0, 1), "color": "#E03C3F", "alpha":0.8}) sns.distplot(jaccard_df["rw"], bins=22, kde=False, label="Random Walk", hist_kws={"range": (0, 1), "color": "lightgrey", "alpha": 0.8}) sns.despine() plt.yscale('linear') plt.xlabel("Jaccard similarity of enriched functional classes") plt.ylabel("# of diseases [linear]") plt.legend() ``` ### Spearman ``` from scipy.stats import spearmanr def compute_spearman_correlation(a_term_to_pval, b_term_to_pval, terms=None): """ """ if terms is None: terms = list(a_term_to_pval.keys()) print(len([a_term_to_pval[term] for term in terms])) print(len([b_term_to_pval[term] for term in terms])) print(np.max([a_term_to_pval[term] for term in terms])) sp_corr, sp_pval = spearmanr([a_term_to_pval[term] for term in terms], [b_term_to_pval[term] for term in terms]) return sp_corr, sp_pval output = outputs["C1849295"] lci_spearman, _ = compute_spearman_correlation(output["disease"], output["lci"]) a_term_to_pval = output["lci"] terms = list(a_term_to_pval.keys()) np.max(np.isnan([a_term_to_pval[term] for term in terms])) lci_spearman spearman_results = {} for disease_id, output in tqdm(outputs.items()): lci_spearman, _ = compute_spearman_correlation(output["disease"], output["lci"]) print(disease_id, ":", lci_spearman) rw_spearman, _ = compute_spearman_correlation(output["disease"], output["random_walk"]) spearman_results[disease_id] = { "lci": lci_spearman, "rw": rw_spearman } spearman_df = pd.DataFrame(list(spearman_results.values()), index=list(spearman_results.keys())) spearman_df.to_csv("experiments/go_enrichment/spearman.csv") spearman_df.median() prepare_sns(sns, kwargs={"font_scale": 2, "rc": {'figure.figsize':(10, 5)}}) sns.distplot(spearman_df["lci"], bins=25, kde=False, hist=True, label="LCI", hist_kws={"range": (0, 1), "color": "#E03C3F", "alpha":0.8}) #sns.distplot(spearman_df["rw"], bins=25, kde=False, hist=True, label="Random Walk", hist_kws={"range": (0, 1), # "color": "lightgrey", # "alpha": 0.8}) plt.yscale('linear') plt.xlabel("Spearman correlation of \n functional enrichment p-values") plt.ylabel("# of diseases") #plt.legend() sns.despine() plt.tight_layout() plt.savefig("experiments/go_enrichment/lci/leci_go.pdf") ``` #### Top by Mean Correlation ``` spearman_mean_df = spearman_df[['lci', 'rw']].mean(axis=1) spearman_df.insert(0, column="mean", value=spearman_mean_df) top_k = 500 sns.distplot(spearman_df.nlargest(top_k, "mean")["lci"], bins=30, kde=False, hist=True, label="LCI", hist_kws={ "range": (0.1, 0.8), "color": "#E03C3F", "alpha":0.8}) sns.distplot(spearman_df.nlargest(top_k, "mean")["rw"], bins=30, kde=False, hist=True, label="Random Walk", hist_kws={"range": (0.1, 0.8), "color": "lightgrey", "alpha": 0.8}) sns.despine() plt.yscale('linear') plt.xlabel(f"Spearman correlation of functional enrichment pvalues [top {top_k}]") plt.ylabel("# of diseases") plt.legend() plt.savefig("experiments/go_enrichment/lci/lci_rw_go.pdf") ``` #### Top by LCI ``` eval_df["lci_dpp[DP]", "Recall-at-100"] spearman_df.insert(0, column="lci_recall_100", value=eval_df["lci_dpp[DP]", "Recall-at-100"]) top_k = 500 sns.distplot(spearman_df.nlargest(top_k, "lci_recall_100")["lci"], bins=25, kde=False, hist=True, label="LCI", hist_kws={ "range": (0.1, 0.8), "color": "#E03C3F", "alpha":0.8}) sns.distplot(spearman_df.nlargest(top_k, "lci_recall_100")["rw"], bins=25, kde=False, hist=True, label="Random Walk", hist_kws={"range": (0.1, 0.8), "color": "lightgrey", "alpha": 0.8}) sns.despine() plt.yscale('linear') plt.xlabel(f"Spearman correlation of functional enrichment pvalues [top {top_k}]") plt.ylabel("# of diseases [linear]") plt.legend() #plt.savefig("../experiments/go_enrichment/lci/lci_rw_go.pdf") ``` #### Top by RW ``` eval_df["lci_dpp[DP]", "Recall-at-100"] spearman_df.insert(0, column="rw_recall_100", value=eval_df["rw_dpp[DP]", "Recall-at-100"]) top_k = 500 sns.distplot(spearman_df.nlargest(top_k, "rw_recall_100")["lci"], bins=25, kde=False, hist=True, label="LCI", hist_kws={ "range": (0.1, 0.8), "color": "#E03C3F", "alpha":0.8}) sns.distplot(spearman_df.nlargest(top_k, "rw_recall_100")["rw"], bins=25, kde=False, hist=True, label="Random Walk", hist_kws={"range": (0.1, 0.8), "color": "lightgrey", "alpha": 0.8}) sns.despine() plt.yscale('linear') plt.xlabel(f"Spearman correlation of functional enrichment pvalues [top {top_k}]") plt.ylabel("# of diseases [linear]") plt.legend() #plt.savefig("../experiments/go_enrichment/lci/lci_rw_go.pdf") ``` #### Filter Terms ``` term_to_proteins = load_go_annotations(network.get_names(), level=None) def threshold_terms(term_to_proteins, lower, upper): """ Filter terms where the number of proteins does not fall within the lower and upper bounds. """ return {term: proteins for term, proteins in term_to_proteins.items() if lower <= len(term) <= upper} params = { "lower": 20, "upper": 200 } threshold_terms = threshold_terms(term_to_proteins, **params).keys() spearman_results = {} for disease_id, output in tqdm(outputs.items()): lci_spearman, _ = compute_spearman_correlation(output["disease"], output["lci"], threshold_terms) rw_spearman, _ = compute_spearman_correlation(output["disease"], output["random_walk"], threshold_terms) spearman_results[disease_id] = { "lci": lci_spearman, "rw": rw_spearman } threshold_spearman_df = pd.DataFrame(list(spearman_results.values()), index=list(spearman_results.keys())) threshold_spearman_df.median() threshold_spearman_mean_df = threshold_spearman_df[['lci', 'rw']].mean(axis=1) threshold_spearman_df.insert(0, column="mean", value=threshold_spearman_mean_df) threshold_spearman_df top_k = 500 sns.distplot(threshold_spearman_df.nlargest(top_k, "mean")["lci"], bins=30, kde=False, hist=True, label="LCI", hist_kws={ "range": (0.1, 0.8), "color": "#E03C3F", "alpha":0.8}) sns.distplot(threshold_spearman_df.nlargest(top_k, "mean")["rw"], bins=30, kde=False, hist=True, label="Random Walk", hist_kws={"range": (0.1, 0.8), "color": "lightgrey", "alpha": 0.8}) sns.despine() plt.yscale('linear') plt.xlabel(f"Spearman correlation of functional enrichment pvalues [top {top_k}]") plt.ylabel("# of diseases [linear]") plt.legend() #plt.savefig("../experiments/go_enrichment/lci/lci_rw_go.pdf") ``` ### Pearson ``` from scipy.stats import pearsonr def compute_pearson_correlation(a_term_to_pval, b_term_to_pval): """ """ terms = list(a_term_to_pval.keys()) sp_corr, sp_pval = pearsonr([a_term_to_pval[term] for term in terms], [b_term_to_pval[term] for term in terms]) return sp_corr, sp_pval pearson_results = {} for disease_id, output in tqdm(outputs.items()): lci_pearson, _ = compute_pearson_correlation(output["disease"], output["lci"]) rw_pearson, _ = compute_pearson_correlation(output["disease"], output["random_walk"]) pearson_results[disease_id] = { "lci": lci_pearson, "rw": rw_pearson } pearson_df = pd.DataFrame(list(spearman_results.values()), index=list(spearman_results.keys())).join(size_df) sns.distplot(pearson_df.sort_values('size').head(-500)["lci"], bins=25, kde=False, hist=True, label="LCI", hist_kws={ "color": "#E03C3F", "alpha":0.8}) sns.distplot(pearson_df.sort_values('size').head(-500)["rw"], bins=25, kde=False, hist=True, label="Random Walk", hist_kws={ "color": "lightgrey", "alpha": 0.8}) sns.despine() plt.yscale('linear') plt.xlabel("SpearmanR of Functional Terms (top 250)") plt.ylabel("# of diseases [linear]") plt.legend() #plt.savefig("../experiments/go_enrichment/lci/lci_rw_go.pdf") np.mean(pearson_df["lci"]) np.mean(pearson_df["rw"]) ``` ### Disease Size Analysis ``` size_dict = {disease.id: len(disease) for disease in diseases_dict.values()} size_df = pd.DataFrame(size_dict.values(), index=size_dict.keys(), columns=["size"]) sns.scatterplot(data=spearman_df.join(size_df), x="size", y="lci", alpha=0.1) plt.xscale('log') ```
github_jupyter
# Imports ``` import re import nltk import os import logging import torch import torch.nn as nn import torch.nn.functional as F import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline from sklearn.model_selection import train_test_split from torchtext.data import Field, BucketIterator, TabularDataset from nltk.stem import PorterStemmer from nltk.corpus import stopwords nltk.download('stopwords') from preprocessing import * from dataset import * from metrics import * from train import * from plots import * ``` # Hyperparameters of the process ``` batch_size = 32 max_vocab_size = 25_000 epochs = 5 learning_rate = 0.0075 clip = 10 # glove = 'glove.6B.100d' glove = 'glove.twitter.27B.100d' ``` # Preprocessing and preparing the dataset ``` # EDIT THE PATH OF THE CSV HERE root_dir = os.path.join('.', 'drive', 'My Drive', 'Colab Notebooks', 'AI2', 'Project1', 'dataset') filename = 'SentimentTweets.csv' split_dir_name = os.path.join(root_dir, 'split_dataset') # split the full dataset into 3 files for train-val-test, in the split_dir filenames = split_dataset(root_dir, filename, split_dir_name, train_size=0.90) # filenames = ('train_dataset.csv', 'val_dataset.csv', 'test_dataset.csv') # get the datasets and the Bucket Iterators device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') TEXT, LABEL, datasets, iterators = parse_datasets(split_dir_name, filenames, device, batch_size=batch_size, glove=glove, max_vocab_size=max_vocab_size) device # unpack the datasets and the iterators train_dataset, val_dataset, test_dataset = datasets train_iterator, val_iterator, test_iterator = iterators ``` # Model ``` class RNN(nn.Module): """ Recurrent Neural Network Class """ def __init__(self, TEXT, config): """ :param torchtext.data.Field TEXT: A Field object representing the text data of the model. :param dict config: Dictionary containing key-values pairs that define the architecture of the model. """ super(RNN, self).__init__() cells = { 'RNN': nn.RNN, 'GRU': nn.GRU, 'LSTM': nn.LSTM } # get the different hyperparameters self.cell_type = config['cell_type'] self.emb_dim = config['emb_dim'] self.vocab_size = config['vocab_size'] self.hidden_dim = config['hidden_dim'] self.num_layers = config['num_layers'] self.rnn_dropout = config['rnn_dropout'] self.dropout_rate = config['dropout_rate'] self.bidirectional = config['bidirectional'] self.classes = config['classes'] # create the embedding layer glove = TEXT.vocab.vectors self.embedding = nn.Embedding(num_embeddings=self.vocab_size, embedding_dim=self.emb_dim).from_pretrained(glove) # make sure that the embeddings will not be updated self.embedding.weight.requires_grad = False # create a set of RNN layers self.rnn = cells[self.cell_type](self.emb_dim, self.hidden_dim, num_layers=self.num_layers, dropout=self.rnn_dropout, bidirectional=self.bidirectional) # dropout layer self.dropout = nn.Dropout(self.dropout_rate) # create the fully connected layer rnn_output_dim = self.hidden_dim if self.bidirectional: rnn_output_dim *= 2 classes = self.classes if self.classes == 2: classes = 1 self.fc = nn.Linear(in_features=rnn_output_dim, out_features=classes) self.sigmoid = nn.Sigmoid() def forward(self, X, lengths): """ :param Tensor X: Input to be fed to the RNN. [maxlen x batch_size] :param Tensor lengths: Actual length of each training example in the batch. [1 x batch_size] :return: The output of the model. [1 x batch_size] :rtype: Tensor """ # take the embeddings each word in the sentence embedded = self.embedding(X) # unpad the sequence, that is, remove the embeddings of the padding token unpadded = nn.utils.rnn.pack_padded_sequence(embedded, lengths.to('cpu')) # run through the RNN if self.cell_type == 'LSTM': rnn_outputs, (last_hidden_state, last_cell_state) = self.rnn(unpadded) else: rnn_outputs, last_hidden_state = self.rnn(unpadded) # get the data of the last hidden state for each layer batch_size = X.shape[1] num_directions = 2 if self.bidirectional else 1 data = last_hidden_state.data.view(self.num_layers, num_directions, batch_size, self.hidden_dim) # if bidirectional if self.bidirectional: # concatenate the last hiddens states (-> and <-) of the topmost layer hidden_states_concatenated = torch.cat((data[-1, 0, :, :], data[-1, 1, :, :]), dim=1) else: # else, just get the last hidden state of the topmost layer hidden_states_concatenated = data[-1, 0, :, :] # run through the fully connected layer and a sigmoid to get probabilities if self.dropout_rate > 0.0: hidden_states_concatenated = self.dropout(hidden_states_concatenated) output_fc = self.fc(hidden_states_concatenated) output = self.sigmoid(output_fc) return output def unfreeze_embedding(self): """ This method makes the embedding layer trainable. """ self.embedding.weight.requires_grad = True ``` ## Create the model ``` config = { 'cell_type': 'LSTM', 'emb_dim': 100, 'vocab_size': len(TEXT.vocab), 'hidden_dim': 512, 'num_layers': 2, 'rnn_dropout': 0.1, 'dropout_rate': 0.1, 'bidirectional': True, 'classes': 2 } classifier = RNN(TEXT, config) classifier.to(device) ``` ## Define here the loss function, the optimizer and the scheduler ``` optimizer = torch.optim.Adam(classifier.parameters(), lr=learning_rate) criterion = nn.BCELoss().to(device) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, patience=2, eps=1e-5, verbose=True) # scheduler = None ``` # Train! ``` # method found in train.py, should take ~3.5 minutes/epoch on GPU metrics = train(classifier, train_iterator, val_iterator, epochs, optimizer, criterion, scheduler=scheduler, clip=clip, unfreeze_on_epoch=0, verbose=True) ``` ## Let's plot the different metrics for the model ``` # plot the Training and Validation Losses plot_metrics(metrics['train_losses'], metrics['val_losses'], 'Train Loss', 'Val Loss', 'Loss\n', '\nLosses\n') # plot the Training and Validation Accuracies plot_metrics(metrics['train_accuracies'], metrics['val_accuracies'], 'Train Accuracy', 'Val Accuracy', 'Accuracy\n', '\n\nAccuracies\n') # plot the Training and Validation F1 scores plot_metrics(metrics['train_f1'], metrics['val_f1'], 'Train F1', 'Val F1', 'F1 Score\n', '\n\nF1 scores\n') ``` ## Get the ground truths and the predictions for the whole Test Dataset ``` # function located in dataset.py y_test, y_test_pred = get_truths_and_predictions(classifier, test_iterator) y_test_pred ``` ## Classification report and Confusion Matrix ``` from sklearn.metrics import classification_report, confusion_matrix threshold = 0.5 predicted_test_labels = torch.where(y_test_pred > threshold, 1, 0) print("Classification Report:") print(f"{classification_report(y_test.cpu(), predicted_test_labels.cpu())}") print("\nConfusion Matrix:\n") print(confusion_matrix(y_test.cpu(), predicted_test_labels.cpu())) ``` ## Let's also plot the ROC Curve ``` plot_roc_curve(y_test, y_test_pred) ``` # Custom Grid Search function ``` def grid_search(train_iterator, val_iterator, epochs, clip, TEXT, device, cell_types, hidden_dims, layers, learning_rates): """ Grid search to find the best model """ for cell_type in cell_types: for hidden_dim in hidden_dims: for num_layers in layers: for lr in learning_rates: print('=' * 100) print('Config: cell type = {}, hidden_dim = {}, num_layers = {} and ' 'lr = {}'.format(cell_type, hidden_dim, num_layers, lr)) print('=' * 100) config = { 'cell_type': cell_type, 'emb_dim': 100, 'vocab_size': len(TEXT.vocab), 'hidden_dim': hidden_dim, 'num_layers': num_layers, 'rnn_dropout': 0.15, 'dropout_rate': 0.3, 'bidirectional': True, 'classes': 2 } classifier = RNN(TEXT, config) classifier.to(device) optimizer = torch.optim.Adam(classifier.parameters(), lr=lr) criterion = nn.BCELoss().to(device) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, patience=4, eps=1e-5, verbose=True) metrics = train(classifier, train_iterator, val_iterator, epochs, optimizer, criterion, scheduler=scheduler, clip=clip, unfreeze_on_epoch=0, verbose=True) train_loss = metrics['train_losses'] val_loss = metrics['val_losses'] plot_metrics(train_loss, val_loss, 'Train Loss', 'Val Loss', 'Loss\n', '\nLosses with config: cell type ' '= {}, hidden_dim = {}, num_layers = {} and lr = ' '{}\n'.format(cell_type, hidden_dim, num_layers, lr)) train_acc = metrics['train_accuracies'] val_acc = metrics['val_accuracies'] plot_metrics(train_acc, val_acc, 'Train Accuracy', 'Val Accuracy', 'Accuracy\n', '\n\nAccuracies with ' 'config: cell type = {}, hidden_dim = {}, num_layers = ' '{} and lr = {}\n'.format(cell_type, hidden_dim, num_layers, lr)) train_f1 = metrics['train_f1'] val_f1 = metrics['val_f1'] plot_metrics(train_f1, val_f1, 'Train F1', 'Val F1', 'F1 Score\n', '\n\nF1 scores with config: cell type = {}' ', hidden_dim = {}, num_layers = {} and lr = ' '{}\n'.format(cell_type, hidden_dim, num_layers, lr)) print('\n\n\n\n') """ cell_types = ['LSTM', 'GRU'] hidden_dims = [64, 128, 256, 512] layers = [2, 3] learning_rates = [0.01, 0.001] grid_search(train_iterator, val_iterator, epochs, clip, TEXT, device, cell_types=cell_types, hidden_dims=hidden_dims, layers=layers, learning_rates=learning_rates) """ ``` ## Predict your own sentences ``` def predict(classifier, sentence, TEXT, device, threshold=0.5): """ predict the sentiment of a given sentence """ sentence = main_pipeline(sentence) split_sentence = sentence.split() words = len(split_sentence) tokenized_sentence = list(map(lambda word: TEXT.vocab.stoi.get(word, 0), split_sentence)) tokenized_sentence = torch.LongTensor(tokenized_sentence).reshape(-1, 1).to(device) length = torch.LongTensor([words]).to(device) with torch.no_grad(): classifier.eval() value = classifier(tokenized_sentence, length) # print(value.item()) if value >= threshold: return 1 else: return 0 sentence = "ALFZ FYGE" sentiment = predict(classifier, sentence, TEXT, device) print("Sentiment predicted: ", sep='', end='') if sentiment == 1: print("Positive") else: print("Negative") ```
github_jupyter
## Approximate Inference over a BN using Belief Propagation Consider the following joint probability distribution: \begin{align} p(x_1,x2,x3)=p(x_1)p(x_2|x_1)p(x_3|x_1,x_2)p(x_4|x_2) \end{align} where $x_1\in\{0,1\}$, $x_2\in\{0,1\}$, $x_3\in\{0,1,2\}$ and $x_4\in\{0,1,2,3\}$ The values of the CPD factors are know. Here we simply worry about inference. The goal is to learn how to use and interpret the provided library. Note now that the associated factor graph contains cycles!! ``` import numpy as np from lib import Message_passing_BN as mp import warnings warnings.filterwarnings('ignore') from IPython.display import Image Image(filename='files/BN2.png') #p(x1) CPD_1=np.array([0.5, 0.5]) #p(x2|x1) CPD_2=np.array([0.75, 0.25, 0.3, 0.7]) #p(x3|x2) CPD_3=np.array([0.1, 0.67,0.23,0.1,0.3,0.6,0.1, 0.6,0.3,0.2,0.3,0.5]) #p(x4|x2) CPD_4=np.array([0.05,0.8,0.05,0.1,0.2,0.2,0.5,0.1]) ``` ### Defining the factor graph associated to the BN Before running inference using BP, we will define the variable nodes and the graph. ``` v_card=[2,2,3,4] #Vector of cardinalities node_x1=mp.create_var_node(ID=1,cardinality=v_card[0],neighbor_order=[1,2,3],observed_value_index=-1) node_x2=mp.create_var_node(ID=2,cardinality=v_card[1],neighbor_order=[2,3,4],observed_value_index=-1) node_x3=mp.create_var_node(ID=3,cardinality=v_card[2],neighbor_order=[3],observed_value_index=-1) node_x4=mp.create_var_node(ID=4,cardinality=v_card[3],neighbor_order=[4],observed_value_index=-1) list_var_nodes=[node_x1,node_x2,node_x3,node_x4] factor_1=mp.create_factor_node(ID=1,neighbors=[node_x1],CPD=CPD_1) factor_2=mp.create_factor_node(ID=2,neighbors=[node_x1,node_x2],CPD=CPD_2) factor_3=mp.create_factor_node(ID=3,neighbors=[node_x1,node_x2,node_x3],CPD=CPD_3) factor_4=mp.create_factor_node(ID=4,neighbors=[node_x2,node_x4],CPD=CPD_4) list_factor_nodes=[factor_1,factor_2,factor_3,factor_4] ``` ### Running BP message passing This is straightforward given the functions provided. The factor graph is not a tree, so the algorithm may not converge (in this case it will because we have a single loop, but in general we losse converge and we have to stop after a few iterations) ``` L=5 #BP iterations for l in range(L): #Factor update for factor_node in list_factor_nodes: mp.update_factor_to_var(factor_node) #Variable update for var_node in list_var_nodes: mp.update_var_to_factor(var_node) list_marg=[] for var_node in list_var_nodes: marg=mp.compute_var_marginal(var_node) list_marg.append(marg) print("The marginal pmf of node x_%d is=" %(var_node['ID'])) print(marg) ``` ### Inference with partial observations Note that if we observe either x1 or x2, the FG becomes a tree and BP converges ``` # The function mp.initialize_variable set the value to their current observed state mp.initialize_variable(var_node=node_x1,observed_value_index=0) #IMPORTANT, we have to re-initialize all factor nodes!! Intermediate messages were not erased from the previous BP mp.initialize_factor_msgs(factor_1,neighbors=[node_x1]) mp.initialize_factor_msgs(factor_2,neighbors=[node_x1,node_x2]) mp.initialize_factor_msgs(factor_3,neighbors=[node_x1,node_x2,node_x3]) mp.initialize_factor_msgs(factor_4,neighbors=[node_x2,node_x4]) ``` And now we can run BP again ... ``` L=20 #BP iterations for l in range(L): #Factor update for factor_node in list_factor_nodes: mp.update_factor_to_var(factor_node) #Variable update for var_node in list_var_nodes: mp.update_var_to_factor(var_node) list_marg=[] for var_node in list_var_nodes: marg=mp.compute_var_marginal(var_node) list_marg.append(marg) print("The marginal pmf of node x_%d is=" %(var_node['ID'])) print(marg) ``` ### Exact Inference via node clustering The optimal inference algorithm for **any** BN, no matter if the factor graph contains cycles or not, is the *Junction Tree Algorithm (JTA)*. In a nutshell, it is based on creating cluster of nodes so that the resulting fg with the cluster nodes is cycle-free and BP con be run over it. JTA finds the most efficient way to achieve such graph. **However its complexity is in general prohibitive. Unless the dimension is very small or the graph has a sparse structure**. Lets show that we can run exact inference in the current BP **by merging nodes x1 and x2** Again, we make use of a provided function to create the cluster-node. ** Some CPDs have to be re-defined! Now we have a cluster node that can take up to 4 values!** ``` Image(filename='files/BN22.png') #p(x1,x2) CPD_12_joint=CPD_2 CPD_12_joint[0:2]*=CPD_1[0] #This exploits the Tree-structure CPD_12_joint[2:4]*=CPD_1[1] node_x1=mp.create_var_node(ID=1,cardinality=v_card[0],neighbor_order=[1,2,3],observed_value_index=-1) node_x2=mp.create_var_node(ID=2,cardinality=v_card[1],neighbor_order=[2,3,4],observed_value_index=-1) node_x3=mp.create_var_node(ID=3,cardinality=v_card[2],neighbor_order=[3],observed_value_index=-1) node_x4=mp.create_var_node(ID=4,cardinality=v_card[3],neighbor_order=[4],observed_value_index=-1) node_x12=mp.create_joint_node(5,node_members=[node_x1,node_x2],neighbor_order=[5],observed_values_indexes=-1) list_var_nodes=[node_x12,node_x3,node_x4] factor_1=mp.create_factor_node(ID=1,neighbors=[node_x1],CPD=CPD_1) factor_2=mp.create_factor_node(ID=2,neighbors=[node_x1,node_x2],CPD=CPD_2) factor_3=mp.create_factor_node(ID=3,neighbors=[node_x1,node_x2,node_x3],CPD=CPD_3) factor_4=mp.create_factor_node(ID=4,neighbors=[node_x2,node_x4],CPD=CPD_4) factor_12=mp.create_factor_node(5,neighbors=[node_x12],CPD=CPD_12_joint) list_factor_nodes=[factor_12,factor_3,factor_4] L=3 #BP iterations for l in range(L): #Factor update for factor_node in list_factor_nodes: mp.update_factor_to_var(factor_node) #Variable update for var_node in list_var_nodes: mp.update_var_to_factor(var_node) list_marg=[] for var_node in list_var_nodes: marg=mp.compute_var_marginal(var_node) list_marg.append(marg) print("The marginal pmf of node x_%d is=" %(var_node['ID'])) print(marg) ``` BP marginals are close to the exact ones, but they are not equal!
github_jupyter
# Extract time series from a published figure Scott Cole 29 July 2016 ## Summary Sometimes we might be interested in obtaining a precise estimate of the results published in a figure. Instead of zooming in a ton on the figure and manually taking notes, here we use some simple image processing to extract the data that we're interested in. ## Example Case We're looking at [a recent Neuron paper](http://www.sciencedirect.com/science/article/pii/S0896627315004134) that highlighted a potential top-down projection from motor cortex (M2) to primary somatosensory cortex (S1). This interaction is summarized in the firing rate curves below: <img src="files/figure_raw.PNG"> If we were interested in modeling this interaction, then we may want to closely replicate the firing rate dynamics of S1. So in this notebook, we extract this time series from the figure above so that we can use it for future model fitting. ## Step 1 In our favorite image editting software (or simply MS Paint), we can isolate the curve we are interested in as well as the scale bars (separated by whitespace) <img src="files/figure_processed.png"> ## Step 2: Convert image to binary ``` # Load image and libraries %matplotlib inline from matplotlib import cm import matplotlib.pyplot as plt import numpy as np from scipy import misc input_image = misc.imread('figure_processed.png') # Convert input image from RGBA to binary input_image = input_image - 255 input_image = np.mean(input_image,2) binary_image = input_image[::-1,:] binary_image[binary_image>0] = 1 Npixels_rate,Npixels_time = np.shape(binary_image) # Visualize binary image plt.figure(figsize=(8,5)) plt.pcolor(np.arange(Npixels_time),np.arange(Npixels_rate),binary_image, cmap=cm.bone) plt.xlim((0,Npixels_time)) plt.ylim((0,Npixels_rate)) plt.xlabel('Time (pixels)',size=20) plt.ylabel('Firing rate (pixels)',size=20) ``` ## Step 3. Project 2-D binary image to 1-D time series ``` # Extract the time series (not the scale bars) by starting in the first column col_in_time_series = True s1rate_pixels = [] col = 0 while col_in_time_series == True: if len(np.where(binary_image[:,col]==1)[0]): s1rate_pixels.append(np.mean(np.where(binary_image[:,col]==1)[0])) else: col_in_time_series = False col += 1 s1rate_pixels = np.array(s1rate_pixels) # Subtract baseline s1rate_pixels = s1rate_pixels - np.min(s1rate_pixels) # Visualize time series plt.figure(figsize=(5,5)) plt.plot(s1rate_pixels,'k',linewidth=3) plt.xlabel('Time (pixels)',size=20) plt.ylabel('Firing rate (pixels)',size=20) ``` ## Step 4. Rescale in x- and y- variables ``` # Convert rate from pixels to Hz ratescale_col = 395 # Column in image containing containing rate scale rate_scale = 50 # Hz, scale in image ratescale_Npixels = np.sum(binary_image[:,ratescale_col]) pixels_to_rate = rate_scale/ratescale_Npixels s1rate = s1rate_pixels*pixels_to_rate # Convert time from pixels to ms timescale_row = np.argmax(np.mean(binary_image[:,400:],1)) # Row in image containing time scale time_scale = 100 # ms, scale in image timescale_Npixels = np.sum(binary_image[timescale_row,400:]) pixels_to_time = time_scale/timescale_Npixels pixels = np.arange(len(s1rate_pixels)) t = pixels*pixels_to_time # Visualize re-scaled time series plt.figure(figsize=(5,5)) plt.plot(t, s1rate,'k',linewidth=3) plt.xlabel('Time (ms)',size=20) plt.ylabel('Firing rate (Hz)',size=20) ``` ## Step 5. Resample at desired sampling rate ``` # Interpolate time series to sample every 1ms from scipy import interpolate f = interpolate.interp1d(t, s1rate) # Set up interpolation tmax = np.floor(t[-1]) t_ms = np.arange(tmax) # Desired time series, in ms s1rate_ms = f(t_ms) # Perform interpolation # Visualize re-scaled time series plt.figure(figsize=(5,5)) plt.plot(t_ms, s1rate_ms,'k',linewidth=3) plt.xlabel('Time (ms)',size=20) plt.ylabel('Firing rate (Hz)',size=20) # Save final time series np.save('extracted_timeseries',s1rate_ms) ```
github_jupyter
![](../images/logos.jpg "MiCMOR, KIT Campus Alpin") **[MiCMOR](https://micmor.kit.edu) [SummerSchool "Environmental Data Science: From Data Exploration to Deep Learning"](https://micmor.kit.edu/sites/default/files/MICMoR%20Summer%20School%202019%20Flyer.pdf)** IMK-IFU KIT Campus Alpin, Sept. 4 - 13 2019, Garmisch-Partenkirchen, Germany. --- # Basic plotting in Python: Matplotlib **Note:** Parts of this notebook are inspired by the Notebook of Chapter 9 of the excellent [Python for Data Analysis Book](https://www.oreilly.com/library/view/python-for-data/9781491957653/) by Wes McKinney (the creator of pandas). The original notebook is located [here](https://nbviewer.jupyter.org/github/pydata/pydata-book/blob/2nd-edition/ch09.ipynb). The core plotting layer for most python plotting libs is [matplotlib](https://matplotlib.org). Initially, it was modeled after the Matlab plotting routines. It's been around for a long time and is very solid but may seem a bit clunky at first. Some of the more modern packages have nicer visuals by default, but you can tweak matplotlib a lot and can also make it look more modern. Furthermore, a lot of the other packages will use matplotlib as a back-end. This is nice since you then can alter them as you would with pure matplotlib plots. What also makes it a bit confusing for newcomers is a) that it's pretty low level and that b) it can be used with two paradigms: the matlab-style state-based model and the object-oriented (OO) model. ``` %matplotlib inline %load_ext autoreload %autoreload 2 import matplotlib.pylab as plt import numpy as np ``` ## The two Matplotlib Plotting APIs Just a brief comparison... ### MATLAB-style stateful plotting flavor This interface is stateful (it keeps track of the "current" figure and axes, you can get a reference to them with `plt.gcf()` (get current figure) and `plt.gca()` (get current axes). The interface is fast and convenient for simple plots. However, it can be nasty for more complex pots (i.e. mulitple panels). IMHO, do not use this for your work. The OO-style is almost as easy to use and much more powerful... However, you often might want to mix it in so it's good to know. ``` # some data x = np.linspace(0, 10, 100) # create a plot figure plt.figure() # create the first of two panels and set current axis plt.subplot(2, 1, 1) # (rows, columns, panel number) plt.plot(x, np.sin(x)) # create the second panel and set current axis plt.subplot(2, 1, 2) plt.plot(x, np.cos(x)); ``` ### Object-oriented flavor ``` # First create a grid of plots # ax will be an array of two Axes objects fig, ax = plt.subplots(2) # Call plot() method on the appropriate object ax[0].plot(x, np.sin(x)) ax[1].plot(x, np.cos(x)); ``` ## The Basics **Note:** It's quite common to mix and match the two interface styles. ### Figures and subplots As you can see, we always start with `figure`, `subplot` and `axes` objects. You can think of figure as the canvas and axes as an individual plot with its x/y-axis, ticks, labels and the actual plots. A figure can be composed by multiple subplots which each being addressed by its own axis. ``` # the manual way to compose a figure with three subplots fig = plt.figure() # the numbers are: row, column, number of plot ax1 = fig.add_subplot(2, 2, 1) ax2 = fig.add_subplot(2, 2, 2) ax3 = fig.add_subplot(2, 2, 3) ``` Matlotlib (MPL) uses a code to represent linestyle, cymbols or color. Here, we want a black (`k`) dashed (`--`) line. ``` plt.plot(np.random.randn(50).cumsum(), 'k--'); ``` Now we do a more complicated plot. It is composed of two subplots, with a histogram on the left and a scatter plot on the right. We specify a plot size of 14x4 inches. We also use 20 bins for the histogram and fill the bars with black (30% opacity). ``` fig = plt.figure(figsize=(14,4)) ax1 = fig.add_subplot(1, 2, 1) ax2 = fig.add_subplot(1, 2, 2) ax1.hist(np.random.randn(100), bins=20, color='k', alpha=0.3) ax2.scatter(np.arange(30), np.arange(30) + 3 * np.random.randn(30)); plt.close('all') ``` A more common way to create a figure with subplots is: ```python fig, axes = plt.subplots(2, 3) ``` This would create a figure with 6 subplots (2 rows, 3 columns). Now, let's compose a 2x2 plot with shared axes and histograms. We also adjust the space between the plots for a more compact plot. ``` fig, axes = plt.subplots(2, 2, sharex=True, sharey=True) for i in range(2): for j in range(2): axes[i, j].hist(np.random.randn(500), bins=50, color='k', alpha=0.5) plt.subplots_adjust(wspace=0, hspace=0) ``` ## Colors, Markers and line styles As mentioned above, you can adjust markers and styles with a special syntax. `ko--` would create a ine plot (dashed) with marker circles in black. ``` from numpy.random import randn plt.plot(randn(30).cumsum(), 'ko--'); plt.close('all') ``` You can combine multiple plots into an axis and create a legend that's composed from labels attached to the plot commands... ``` data = np.random.randn(30).cumsum() plt.plot(data, 'k--', label='Default') plt.plot(data, 'k-', drawstyle='steps-post', label='steps-post') plt.legend(loc='best'); ``` ## Ticks, labels and legends Se can customize all parts of the plots. I.e., we can set custom ticks and labels. ``` fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.plot(np.random.randn(1000).cumsum()) # now we define custom tick positions and tick labels ticks = ax.set_xticks([0, 250, 500, 750, 1000]) labels = ax.set_xticklabels(['one', 'two', 'three', 'four', 'five'], rotation=30, fontsize='small') # set a title and an axis label ax.set_title('My first matplotlib plot') ax.set_xlabel('Stages'); ``` As previously shown above, it's easy to add legends (see the guide [[here]](https://matplotlib.org/3.1.1/tutorials/intermediate/legend_guide.html) for more details... ``` from numpy.random import randn fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.plot(randn(1000).cumsum(), 'k', label='one') ax.plot(randn(1000).cumsum(), 'k--', label='two') ax.plot(randn(1000).cumsum(), 'k.', label='three'); # add legend to the best location (repart the plot by # running the cell multiple times to see it in action) ax.legend(loc='best'); ``` ## Saving files to disk It's easy to save the figure to disc. You can specify the file type to `pdf` or `png`: ```python # plot my stuff, then... plt.savefig('figure.png', dpi=400, bbox_inches='tight') # and as a pdf plt.savefig('figure.pdf', bbox_inches='tight') ``` `bbox_inches='tight'` is required to avoid unnecessary whitespace around figures... ### Change the plot style As this is a common complaint about matplotlib: you can actually style your plots in matplotlib. ```python plt.style.use('seaborn-whitegrid') ``` Below are some examples (source - also click for more examples: https://matplotlib.org/3.1.1/gallery/style_sheets/style_sheets_reference.html): ![](https://matplotlib.org/3.1.1/_images/sphx_glr_style_sheets_reference_001.png) ![](https://matplotlib.org/3.1.1/_images/sphx_glr_style_sheets_reference_026.png) ![](https://matplotlib.org/3.1.1/_images/sphx_glr_style_sheets_reference_006.png) And a note on confusing differences in the API. Consider this simple plot: ``` x = np.linspace(0, 10, 30) y = np.sin(x) plt.plot(x, y, 'o', color='black'); ``` Matplotlib can be used with a bit of an archaic syntax for specifying linstyles, symbols and colors in very short argument form. Take a look at this example: ``` plt.plot(x, y, 'o-k'); ``` This could also be written as: ``` plt.plot(x, y, 'o', ls='-', c='k'); ``` Or even more verbose (but actually readable): ``` plt.plot(x, y, 'o', linestyle='-', color='black'); ``` Use scatter instead of plot if you want to control aspects of the points with data... ``` rng = np.random.RandomState(0) x = rng.randn(100) y = rng.randn(100) colors = rng.rand(100) sizes = 1000 * rng.rand(100) plt.scatter(x, y, c=colors, s=sizes, alpha=0.3, cmap='viridis') plt.colorbar(); # show color scale ``` This is much more versatile and already halfway towards the more modern plottings libraries. ## Closing early... We have to leave it at that since we cannot dive well into `matplotlib` in this course. We will use mostly other libraries to get results fast, but some matplotlib might crop up occasionally. However, you can always - and should - consult the excellent matplotlib documentation/ gallery with its vast number of examples. [Matplotlib gallery](https://matplotlib.org/gallery/index.html)
github_jupyter
[View in Colaboratory](https://colab.research.google.com/github/whongyi/openrec/blob/master/tutorials/Youtube_Recommender_example.ipynb) <p align="center"> <img src ="https://recsys.acm.org/wp-content/uploads/2017/07/recsys-18-small.png" height="40" /> <font size="4">Recsys 2018 Tutorial</font> </p> <p align="center"> <font size="4"><b>Modularizing Deep Neural Network-Inspired Recommendation Algorithms</b></font> </p> <p align="center"> <font size="4">Hands on: Customizing Deep YouTube Video Recommendation. Youtube example</font> </p> # the Youtube Recommender The training graph of YouTube-Rec can be decomposed as follows. <p align="center"> <img src ="https://s3.amazonaws.com/cornell-tech-sdl-openrec/tutorials/youtube_rec_module.png" height="600" /> </p> * **inputgraph**: user demographis, item consumption history and the groundtruth label. * **usergraph**: extract user-specific latent factor. * **itemgraph**: extract latent factors for items. * **interactiongraph**: uses MLP and softmax to model user-item interactions. After defining subgraphs, their interfaces and connections need to be specified. A sample specification of YouTube-Rec can be as follows. <p align="center"> <img src ="https://s3.amazonaws.com/cornell-tech-sdl-openrec/tutorials/youtube_rec.png" height="300" /> </p> # Install OpenRec and download dataset ``` !pip install openrec import urllib.request dataset_prefix = 'http://s3.amazonaws.com/cornell-tech-sdl-openrec' urllib.request.urlretrieve('%s/lastfm/lastfm_test.npy' % dataset_prefix, 'lastfm_test.npy') urllib.request.urlretrieve('%s/lastfm/lastfm_train.npy' % dataset_prefix, 'lastfm_train.npy') urllib.request.urlretrieve('%s/lastfm/user_feature.npy' % dataset_prefix, 'user_feature.npy') ``` # Your task - understand reuse and extend an exsiting recommender - fill in the placeholders in the implementation of the `YouTubeRec` function - successfully run the experimental code with the recommender you just built. ``` from openrec.recommenders import VanillaYouTubeRec # load the vanilla version and extend it with user demographic informaton from openrec.modules.extractions import LatentFactor from openrec.modules.interactions import MLPSoftmax import tensorflow as tf def Tutorial_YouTubeRec(batch_size, user_dict, item_dict, dim_user_embed, dim_item_embed, max_seq_len, l2_reg_embed=None, l2_reg_mlp=None, dropout=None, init_model_dir=None, save_model_dir='Youtube/', train=True, serve=False): rec = VanillaYouTubeRec(batch_size=batch_size, dim_item_embed=dim_item_embed['id'], max_seq_len=max_seq_len, total_items=item_dict['id'], l2_reg_embed=l2_reg_embed, l2_reg_mlp=l2_reg_embed, dropout=dropout, init_model_dir=init_model_dir, save_model_dir=save_model_dir, train=train, serve=serve) @rec.traingraph.inputgraph.extend(outs=['user_gender', 'user_geo']) def add_train_feature(subgraph): subgraph['user_gender'] = tf.placeholder(tf.int32, shape=[batch_size], name='user_gender') subgraph['user_geo'] = tf.placeholder(tf.int32, shape=[batch_size], name='user_geo') subgraph.update_global_input_mapping({'user_gender': subgraph['user_gender'], 'user_geo': subgraph['user_geo']}) @rec.servegraph.inputgraph.extend(outs=['user_gender', 'user_geo']) def add_serve_feature(subgraph): subgraph['user_gender'] = tf.placeholder(tf.int32, shape=[None], name='user_gender') subgraph['user_geo'] = tf.placeholder(tf.int32, shape=[None], name='user_geo') subgraph.update_global_input_mapping({'user_gender': subgraph['user_gender'], 'user_geo': subgraph['user_geo']}) @rec.traingraph.usergraph(ins=['user_gender', 'user_geo'], outs=['user_vec']) @rec.servegraph.usergraph(ins=['user_gender', 'user_geo'], outs=['user_vec']) def user_graph(subgraph): _, user_gender = LatentFactor(l2_reg=l2_reg_embed, shape=[user_dict['gender'], dim_user_embed['gender']], id_=subgraph['user_gender'], subgraph=subgraph, init='normal', scope='user_gender') _, user_geo = LatentFactor(l2_reg=l2_reg_embed, shape=[user_dict['geo'], dim_user_embed['geo']], id_=subgraph['user_geo'], subgraph=subgraph, init='normal', scope='user_geo') subgraph['user_vec'] = tf.concat([user_gender, user_geo], axis=1) @rec.traingraph.interactiongraph(ins=['user_vec', 'seq_item_vec', 'seq_len', 'label']) def train_interaction_graph(subgraph): MLPSoftmax(user=subgraph['user_vec'], item=subgraph['seq_item_vec'], seq_len=subgraph['seq_len'], max_seq_len=max_seq_len, dims=[dim_user_embed['total'] + dim_item_embed['total'], item_dict['id']], l2_reg=l2_reg_mlp, labels=subgraph['label'], dropout=dropout, train=True, subgraph=subgraph, scope='MLPSoftmax') @rec.servegraph.interactiongraph(ins=['user_vec', 'seq_item_vec', 'seq_len']) def serve_interaction_graph(subgraph): MLPSoftmax(user=subgraph['user_vec'], item=subgraph['seq_item_vec'], seq_len=subgraph['seq_len'], max_seq_len=max_seq_len, dims=[dim_user_embed['total'] + dim_item_embed['total'], item_dict['id']], l2_reg=l2_reg_mlp, train=False, subgraph=subgraph, scope='MLPSoftmax') @rec.traingraph.connector.extend @rec.servegraph.connector.extend def connect(graph): graph.usergraph['user_gender'] = graph.inputgraph['user_gender'] graph.usergraph['user_geo'] = graph.inputgraph['user_geo'] graph.interactiongraph['user_vec'] = graph.usergraph['user_vec'] return rec ``` # Experiement We will use the recommender you implemented to run a toy experiement on the LastFM dataset. ## load lastfm dataset ``` import numpy as np train_data = np.load('lastfm_train.npy') test_data = np.load('lastfm_test.npy') user_feature = np.load('user_feature.npy') total_users = 992 total_items = 14598 user_dict = {'gender': 3, 'geo': 67} item_dict = {'id': total_items} user_feature[:10], test_data[:10] ``` ## preprocessing dataset ``` from openrec.utils import Dataset train_dataset = Dataset(train_data, total_users, total_items, sortby='ts', name='Train') test_dataset = Dataset(test_data, total_users, total_items, sortby='ts', name='Test') ``` ## hyperparameters and training parameters ``` dim_user_embed = {'geo': 40, # dimension of user geographic embedding 'gender': 10, # dimension of user gender embedding 'total': 50} dim_item_embed = {'id': 50, 'total': 50} # dimension of item embedding max_seq_len = 100 # the maxium length of user's listen history total_iter = int(1e3) # iterations for training batch_size = 100 # training batch size eval_iter = 100 # iteration of evaluation save_iter = eval_iter # iteration of saving model ``` ## define sampler We use `YouTubeSampler` and `YouTubeEvaluationSampler` to sample sequences of training and testing samples. ``` from openrec.utils.samplers import YouTubeSampler, YouTubeEvaluationSampler train_sampler = YouTubeSampler(user_feature=user_feature, batch_size=batch_size, max_seq_len=max_seq_len, dataset=train_dataset, num_process=1) test_sampler = YouTubeEvaluationSampler(user_feature=user_feature, dataset=test_dataset, max_seq_len=max_seq_len) ``` ## define evaluator ``` from openrec.utils.evaluators import AUC, Recall auc_evaluator = AUC() recall_evaluator = Recall(recall_at=[100, 200, 300, 400, 500]) ``` ## define model trainer we used the Vanilla version of the Youtube recommender to train our model. ``` from openrec import ModelTrainer model = Tutorial_YouTubeRec(batch_size=batch_size, user_dict=user_dict, item_dict=item_dict, max_seq_len=max_seq_len, dim_item_embed=dim_item_embed, dim_user_embed=dim_user_embed, save_model_dir='youtube_recommender/', train=True, serve=True) model_trainer = ModelTrainer(model=model) ``` ## training and testing ``` model_trainer.train(total_iter=total_iter, eval_iter=eval_iter, save_iter=save_iter, train_sampler=train_sampler, eval_samplers=[test_sampler], evaluators=[auc_evaluator, recall_evaluator]) ```
github_jupyter
# Welcome to the Interactive User Guide for KALMUS (GUI)! In this notebook, I will introduce: 1. **Installation of KALMUS package** 2. **What is KALMUS for** - Extract color information from film frames or brightness information from monochrome film frames using different color metrics and image sampling methods. - Generate a barcode representation of the color/brightness information of a film. 3D information in 2D representation. - Compare different barcodes globally through similarity measures on images. Interpret the difference through similarity scores. - Compare segments of barcodes locally using functions embedded in GUI. Interpret the difference using domain knowledge and contextual information extracted by KALMUS. 3. **How to interact with KALMUS using its Graphic user interface** - Visualize barcodes - Generate barcodes - Change barcodes - Save barcodes - Load barcodes - Compare barcodes ## 1. Installation There are two ways that you could install KALMUS on your local machine: 1. (**Recommended**) Get the latest distribution of KALMUS from PyPI ([KALMUS Project Page on PyPI](https://pypi.org/project/kalmus/)). Use command `$ pip install kalmus` or `$ pip install --upgrade kalmus` (if kalmus has been installed) to install the latest version of the KALMUS package. All dependencies should be automatically installed during this process. 2. Alternatively, you could install the KALMUS locally by first cloning the GitHub repo of Kalmus ([GitHub page](https://github.com/KALMUS-Color-Toolkit/KALMUS)). Then, move to the top directory of cloned KALMUS project and install using the command `pip install .` **See our [Installation Guide](https://kalmus-color-toolkit.github.io/KALMUS/install.html) for more details.** Once the package is installed, you could verify the version of KALMUS package using the command `$ pip show kalmus` <img src="notebook_figures/kalmus_version.png" alt="drawing" width="800 px"/> In version 1.3.7 or above, you could also verify the version of KALMUS through the module's `__version__` attribute. ``` # Uncommented the code below, only if your installed KALMUS version >= 1.3.7 # import kalmus # print(kalmus.__version__) ``` ## 2. What is KALMUS for? KALMUS is a Python package for the computational analysis of colors in films. It addresses how to best describe a film's color. This package is optimized for two purposes: **(1) various ways to measure, calculate and compare a film's color and (2) various ways to visualize a film's color.** KALMUS utilizes the movie barcode as a visualization of the film's color. It has a modularized pipeline for the generation of barcodes using different measures of color and region of interest in each film frame. It also provides a set of measures that allow users to compare different films' colors directly through this visualization. ### 2.1 Barcode Generation Barcode supports __7 color metrics__ that measure the color of a frame and __5 frame types__ that specify which part of the frame will be used in the color measures. Below is a table of available combinations of color metric and frame type in barcode generation. | frame_type \ color_metric | Average | Median | Mode | Top-dominant | Weighted-dominant | Brightest | Bright | | --------------------------| :-----: | :----: | :----: | :----------: | :---------------: | :-------: | :----: | | **Whole_frame** | &#9745; | &#9745; | &#9745; | &#9745; | &#9745; | &#9745; | &#9745; | | **High_contrast_region** | &#9745; | &#9745; | &#9745; | &#9745; | &#9745; | &#9745; | &#x2612; | | **Low_contrast_region** | &#9745; | &#9745; | &#9745; | &#9745; | &#9745; | &#9745; | &#x2612; | | **Foreground** | &#9745; | &#9745; | &#9745; | &#9745; | &#9745; | &#9745; | &#x2612; | | **Background** | &#9745; | &#9745; | &#9745; | &#9745; | &#9745; | &#9745; | &#x2612; | ### 2.2 Examples of the color of a frame using a selected color metric and frame type. Here, we show some example frames with their color extracted using the selected color metric and frame type In the figures below, - On the left of each figure, we show the original frame (with letterboxing if applicable). - On the right of each figure, we show the extracted region using the selected frame type with __the color of extracted region on the rightmost__. **Casino Royale (2006) using Average Color with Whole frame or only Region with High (brightness) contrast** ![casino_whole_avg](notebook_figures/casino_2_whole_average.png) ![casino_high_avg](notebook_figures/casino_2_high_average.png) --- **Casino Royale (2006) using Average Color with Whole frame or only Foreground of frame** ![casino_2_whole_avg](notebook_figures/casino_1_whole_average.png) ![casino_2_fore_avg](notebook_figures/casino_1_fore_average.png) --- **Incredibles (2004) using Whole frame with Mode color, Top-dominant color, or Brightest color** ![incre_whole_avg](notebook_figures/incredible_1_whole_mode.png) ![incre_whole_top](notebook_figures/incredible_1_whole_dominant.png) ![incre_whole_bri](notebook_figures/incredible_1_whole_brightest.png) --- **Mission: Impossible (1996) using Whole frame and Foreground with Mode or Average color** ![mission_whole_mode](notebook_figures/mission_1_whole_mode.png) ![mission_fore_avg](notebook_figures/mission_1_fore_avg.png) ![mission_fore_mode](notebook_figures/mission_1_fore_mode.png) --- **I, Robot (2004) using Median color with Whole, Foreground, or Background of frame** ![robot_whole_med](notebook_figures/robot_1_whole_median.png) ![robot_fore_med](notebook_figures/robot_1_fore_median.png) ![robot_back_med](notebook_figures/robot_1_back_median.png) ### 2.3 Examples of barcode generated from a whole film using selected color metric and frame type Below, we show two barcodes generated from a whole film (Mission: Impossible (2006)) using two different frame types. **Barcode generated using Average color and Whole_frame of each frame** ![whole_barcode](notebook_figures/mission_barcode_whole_frame_avg.png) **Barcode generated using Average color but only Foreground of each frame** ![fore_barcode](notebook_figures/mission_barcode_Foreground_avg.png) **Available options for comparing different barcode visualization** We provide a set of six comparison metrics for users to assess the similarity between two barcodes. | Comparison metric | Range | Tag | | :---------------- | ----: | :---: | | Normalized root mean square error | 0 least similar, 1 most similar | Image Similarity | | Structural similarity index | 0 least similar, 1 most similar | Image Similarity | | Cross correlation | -1 anti-similar, 1 most similar | Signal Correlation | | Local cross correlation | -1 anti-similar, 1 most similar | Signal Correlation | | Needleman-Wunsch | 0 least similar, 1 most similar | Sequence Matching | | Smith-Waterman | 0 least similar, 1 most similar | Sequence Matching | For more details, please see our paper [KALMUS: tools for color analysis of films](../paper/joss-paper.md) ## Get Started... KALMUS has a low-level API, high-level command-line, and **Graphic user interface** for audiences from all backgrounds to take advantage of its functionality. In this notebook Guide, we will focus on the **Graphic user interface** of KALMUS. ## 3. How to interact with KALMUS through Graphic User Interface If you have installed the KALMUS package on your machine with version 1.3.0 and onward, you can start the GUI using the command: ``` $ kalmus-gui ``` Alternatively, you could import the main function of the GUI from `kalmus.command_line_gui` module. ``` from kalmus.command_line_gui import main main(); ``` ### 3.1 Main window of KALMUS ![kalmus_gui](notebook_figures/kalmus_gui_main_display.png) - (1) The display 1 of Barcode (barcode image of Barcode Object) - (2) The display 2 of Barcode (barcode image of Barcode Object) - (3) A histogram plot of the [hue](https://en.wikipedia.org/wiki/HSL_and_HSV) (0 - 360 degree on the color wheel) distribution of the Barcode image in display 1. - (4) A histogram plot of the [hue](https://en.wikipedia.org/wiki/HSL_and_HSV) distribution of the Barcode image in display 2. - (5) Matplotlib's [interactive navigation toolbar](https://matplotlib.org/3.2.2/users/navigation_toolbar.html). Notice that we wish the users to use the **Save Image** button on the left instead of the save button on the toolbar if they only want to save the barcode image (not the whole figure). The **display (1)** and **display(2)** are clickable plots. - You can click on any point of the barcode image to get the RGB (Brightness for Brightness barcode) values, (x, y) position, frame index, and time of video at that point. - You can also check the frames around that point **if you saved the frames** during the barcode generation (see section 3.2 (10) for how to save frames during the generation) ![clickable_plot](notebook_figures/kalmus_gui_main_2.png) --- ![main_2](notebook_figures/kalmus_gui_main_buttons.png) ### 3.2 (6) Generate Barcode Window ![gene](notebook_figures/kalmus_gui_generate_barcode.png) - (1) Barcode Color/Brightness metric - (2) Barcode Frame type - (3) Barcode type (Color or Brightness) - (4) Start collecting colors from frames at **Start at** (type: int) (**Optional**: No specified or specify start==0, no frames will be skipped) - (5) Frame sampled rate: Collect color from one frame every **sampled rate** frame (type: int) - (6) How many frames included in the generated barcode (type: int) (**Optional**: No specified or specify end. Collect color/brightness till the end of input video) - (7) Alternatively, you could use the more intuitive time unit. ![gene2](notebook_figures/kalmus_gui_generate_barcode_2.png) - (time unit) (4) Start at minutes:seconds of input video (minutes and seconds are all type: int) (**Optional**: No specified or specify start==0, no frames will be skipped) - (time unit) (5) Period in seconds for one sampled frame (type: float) - (time unit) (6) End at minutes:seconds of input video (minutes and seconds are all type: int) (**Optional**: No specified or specify end. Collect color/brightness till the end of input video) - (8) The path to the input video. Users may use the Browse button to locate the media file directly. - (9) Whether automatically detect the letterbox and remove. Recommend use **Auto**, use manual only if you know the exact location (in pixels) of the letterbox or the input video's letterboxing does not follow the convention (not black or in dark color). - (10) Whether saved frames during the generation, and save one frame in how many seconds (seconds type: float). - (11) Whether rescale the frames during the generation. Highly recommend resizing frames if you are using the frame type other than whole_frame or the input video is in high resolution. - (12) Whether multi-threading the generation process. Highly recommend it if your processor supports multi-threading. - (13) Start the generation of barcode ![specify](notebook_figures/kalmus_gui_generate_barcode_3.png) - (14) Specify the meta information of the input video. **Warning:** Specify the input video's meta information before you press the generate barcode button! Press Update Meta Info to save the entered entries. ### 3.3 (7) Load Json Barcode Window ![load](notebook_figures/kalmus_gui_load_json.png) - Specify the file path to the .JSON Barcode (what is a JSON Barcode? check section 3.6 below) - Specify the type of barcode saved in JSON - Specify which barcode display on the Main window that you will load the barcode into - Press the Load button to load the JSON barcode ### 3.4 (8) Load Barcode from Memory Window ![load_mem](notebook_figures/kalmus_gui_load_memory.png) - Every barcode generated from the current running GUI or Loaded from JSON barcode will be stored on the memory - User can load them onto the main display by selecting the name of barcode on the list - Specify which display on the main window that new barcode will be loaded into - Press the Load Selected Barcode button ### 3.5 (9) Reshape Barcode Window ![reshape](notebook_figures/kalmus_gui_reshape_barcode.png) **There are three options available for users to change the barcode on the display** - Reshape how many frames==pixels in each column of frames (similar to numpy.ndarray.reshape) - Scale the barcode image by enlarging or shrinking the barcode image by a factor - Resize the barcode image to a specific size in pixels In the window: - (1) Show the current spatial size of the selected barcode in the main display (Barcode 1 in this case) - (2) Select which options to use - (3) Select which Barcode to change - Press Process to change the Barcode using the given option and parameters ### 3.6 (10) Save JSON Barcode Window ![save](notebook_figures/kalmus_gui_save_json.png) Similar to the load memory window - Select the barcode on memory (list) that you wish to save locally as a JSON file - Give the path to the saved JSON file in JSON file path textbox - Press the Save Barcode button The attributes of Barcode Object will be stored in a JSON file that can be used to rebuild the Barcode Object (in GUI, you simply reload the JSON barcode through Load JSON Window **section 3.3**) ### 3.7 (11) Save Barcode Image Window ![save](notebook_figures/kalmus_gui_save_image.png) - Select which barcode on the main display that you wish to save locally as an image. - The Saved width and height are automatically filled with the current width and height of barcodes. You could change to your desirable spatial size. - Specify the path to the saved image file in the Image file path textbox - Press the Save Barcode button ### 3.8 (12) Inspect Barcode Window ![inspect](notebook_figures/kalmus_gui_inspect.png) You will first be asked which barcode on the main display that you wish to inspect in further details. ![inspect](notebook_figures/kalmus_gui_inspect_2.png) In the inspect window there are three options to explore - (1) Output the color/brightness data of the Color/Brightness barcode into a csv file - (2) Show the histogram distribution of hue values of the Color barcode or brightness value of Brightness barcode (similar to those in the main display) - (3) (Only available for Color barcode) Show the distribution of RGB color of the Color barcode in RGB cube. ![cube](notebook_figures/kalmus_gui_inspect_3.png) ### 3.9 (13) Statistics Information Window ![stats](notebook_figures/kalmus_gui_stats.png) The similarity comparison between the displayed barcodes using a set of six comparison metrics. **Warning:** The initiation of this window may take tens of seconds. For more references about these six comparison metrics, please check section 2.3 above. ### 3.10 (14) Check Meta Information Window Similarly to the **Inspect Barcode Window** ![inspect](notebook_figures/kalmus_gui_inspect.png) You will first be asked which barcode on the main display that you wish to check for meta information. ![meta](notebook_figures/kalmus_gui_check_meta.png) - A list of meta-information will be shown here - To update the meta information, similarly to Specify Meta Info in the barcode generation, use the Update Meta Info button ![specify](notebook_figures/kalmus_gui_generate_barcode_3.png) - Hit the Update Meta Info button in the Specify Meta Data window after you update the entries. - Hit Refresh in Check Barcode Meta Information Window to see the updates - To reflect the updates on the title of plots in the main display, find the barcode with updated meta information in the memory using the Load Memory button and load the updated barcode back to the main display. ### 3.11 (15) Quit Quit the KALMUS's GUI. **Be sure to save all the barcodes you like before you quit the program, and make sure the Generate Barcode Window is closed before quitting**. ## 4. Thank you! Thank you so much for reading through this notebook! If you have any problems in running this notebook, please check the README.md file in this folder for troubleshooting. If you find any errors in the instructions, please feel free to email the notebook author, Yida Chen, <yc015@bucknell.edu>
github_jupyter
``` import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras import utils, regularizers, callbacks, backend from tensorflow.keras.layers import Input, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Reshape, Conv1D, MaxPooling1D, Dropout, Add, LSTM, Embedding from tensorflow.keras.initializers import glorot_normal, glorot_uniform from tensorflow.keras.optimizers import Adam from tensorflow.keras.models import Model, load_model from astropy.io import fits from IPython.display import YouTubeVideo from tools.resample_flux import trapz_rebin from tensorflow.keras.utils import to_categorical from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix ``` # <span style="color:#3E2D80">Creando un cerebro artifical para encontrar la más temperamental de las bestias astronómicas</span> Si crees que Nueva York es un lugar ajetreado, ¡piénsalo de nuevo! El espacio está lleno de cosas que chocan continuamente en la noche. Durante su estudio de cinco años, DESI tendrá la oportunidad de atrapar a miles de las bestias astronómicas más temperamentales del Universo, desde [estrellas en explosión](https://es.wikipedia.org/wiki/Supernova) a [fusionando estrellas de neutrones y estrellas negras agujeros](https://es.wikipedia.org/wiki/Kilonova) y [estrellas destrozadas por agujeros negros](https://es.wikipedia.org/wiki/Evento_de_disrupci%C3%B3n_de_marea), sin mencionar una larga lista de otros eventos fantásticos. Los científicos de DESI son inteligentes, ¡pero ni siquiera ellos pueden identificar todos estos casi > 30 millones de espectros! Afortunadamente, tenemos expertos que saben cómo crear cerebros artificiales, o redes neuronales, construidas para especializarse en tareas como esta. En este cuaderno, [Segev Ben Zvi](https://www.pas.rochester.edu/~sybenzvi/), <img src="../desihigh/images/Segev.jpg" alt="Drawing" style="width: 400px;"/> nos pondrá al día con el aprendizaje automático de vanguardia y cómo se usa en esta búsqueda. Las supernovas (de tipo Ia) son las más críticas para la cosmología, así que primero nos centraremos en cómo identificarlas (a simple vista), ¡luego enseñaremos a trozos de silicio a hacerlo! ### Supernovas de colapso del núcleo En casi todos los casos, las supernovas son la etapa final del ciclo de vida de las estrellas más masivas (> 10 $ M_{\odot} $, 10 veces mayor que la masa del Sol). Finalmente, el combustible disponible que sostiene la fusión nuclear en el núcleo se agota y la energía utilizada para soportar la capa más externa desaparece. Entonces, inevitablemente, la estrella colapsa por su propio peso en una violenta explosión que dura solo una fracción de segundo. Una estrella de neutrones quedará atrás o, si la estrella progenitora es lo suficientemente masiva, incluso un [agujero negro](https://es.wikipedia.org/wiki/Agujero_negro). ``` YouTubeVideo('o-Psuz7u5OI', width=800, height=400) ``` ### Supernovas binarias No todas las supernovas surgen debido al colapso del núcleo. En sistemas binarios con una enana blanca compacta y una estrella envejecida (secuencia principal), la enana blanca puede atraer material gravitacionalmente desde la capa de su compañera. Si se acumula suficiente material en la superficie de la enana blanca para que su masa total exceda el [límite de Chandrasekhar](https://es.wikipedia.org/wiki/L%C3%ADmite_de_Chandrasekhar), 1.44 $ M_\odot $, entonces ocurre una reacción nuclear descontrolada que hace explotar la Enana Blanca en una explosión intensamente brillante. ¡Estas llamadas supernovas de tipo Ia son la razón por la que descubrimos la Energía Oscura en primer lugar! <img src="../desihigh/images/type1a.jpg" alt="Drawing" style="width: 400px;"/> Estos diferentes tipos de supernova se distinguen por su brillo cambiante con el tiempo, su _'curva de luz'_, pero si tenemos el espectro de la supernova, es mucho menos probable que cometamos un error. Cada espectro de supernova tiene líneas muy amplias distintas tanto en emisión como en absorción (Si II, H $\alpha$, Ca II, etc.), del gas caliente que escapa de la explosión central a alta velocidad. Estas "huellas" permiten distinguir los distintos tipos. <img src="../desihigh/images/sne_filippenko.png" width="600px"/> ¿Puedes decir qué es diferente entre estos cuatro ejemplos? ¿Tienen líneas en común y diferencias? Durante muchas décadas, los astrónomos han seguido esta lógica y la han dividido en una clasificación de supernovas, lo que nos dice el mecanismo detrás de su origen. <img src="../desihigh/images/SupernovaeClassification.png" width="600px"/> Por ejemplo, una supernovas de tipo Ib no muestra líneas de hidrógeno (H), pero sí las de helio. ## Cátalogo de Galaxias Brillantes de DESI (Bright Galaxy Survey) Para ser lo suficientemente brillantes como para ser detectadas, las supernovas deben vivir en una galaxia relativamente cercana, como Messier 101, <img src="../desihigh/images/type1aBGS.jpg" alt="Drawing" style="width: 400px;"/> Con su estudio de diez millones de galaxias en el Universo _local_, el DESI Bright Galaxy Survey será perfecto para encontrarlas, ya que la luz recolectada por cualquier fibra dada será el total de la galaxia y la luz de una supernova potencial, <img src="../desihigh/images/Messier101.png" alt="Drawing" style="width: 400px;"/> donde vemos la misma galaxia Messier 101 en las imágenes Legacy de DESI y el área sobre la cual la luz es recolectada por las fibras (círculos). No sabremos de antemano si hay supernovas allí, por supuesto, así que esto solo funciona ya que DESI observará muchas galaxias y la tasa de supernovas en el Universo es relativamente alta, ¡así que tenemos buenas probabilidades! Veamos cómo se ve esto en la práctica. ### ¿Qué ve DESI? ``` # Abrimos el archivo y vemos su contenido básico hdus = fits.open('../desihigh/dat/bgs-supernovae.fits') hdus.info() ``` Here _WAVE_ is simply an array of the observed wavelengths, ``` hdus['WAVE'].data ``` _FLUX_ es la intensidad de luz observada y _IVAR_ es $ (1\ / \ \rm{varianza \ esperada }) $ del flujo, es decir, de esto podemos derivar el error en este flujo observado. ¿Puedes pensar cómo? Dada la forma, ``` hdus['FLUX'].data.shape ``` vemos que tenemos el flujo medido a las mismas longitudes de onda que las de arriba, para seis objetivos. ``` # Graficamos todos los espectros en la misma cuadrícula de longitud de onda. fig, axes = plt.subplots(2,3, figsize=(8,5), sharex=True, sharey=True, tight_layout=True) for i, ax in enumerate(axes.flatten()): wave = hdus['WAVE'].data flux = hdus['FLUX'].data[i] ivar = hdus['IVAR'].data[i] spec = hdus['SPECTYPE'].data[i][0] ax.plot(wave, flux, alpha=0.5) ax.set(xlim=(3600, 9800), ylim=(-10,50), title=spec) axes[0,0].set(ylabel=r'Flux [$10^{-17}$ erg cm$^{-2}$ s$^{-1}$ $\AA^{-1}$]'); axes[1,0].set(ylabel=r'Flux [$10^{-17}$ erg cm$^{-2}$ s$^{-1}$ $\AA^{-1}$]'); for i in range(0,3): axes[1,i].set(xlabel=r'Observed wavelength [$\AA$]') ``` Estos son los flujos observados por una fibra determinada de DESI, después de haber removido la luz conocida de la atmósfera. En la parte superior izquierda, tenemos la luz emitida por una galaxia BGS típica. Los otros paneles contienen la luz de la galaxia de un tipo específico de supernovas en cada caso. ¡Lo primero que hay que notar es que esto es _mucho_ menos claro de lo que esperábamos del espectro de ejemplo anterior! Entonces, ¿cómo podemos confirmar que la luz de las supernovas de cada tipo (Ib, Ic, IIn, IIP) está realmente allí, como se confirma? ## Menos es más Nuestro "problema" es que DESI mide la cantidad de luz en intervalos muy pequeños en longitud de onda (o color), debido a su alta resolución. Los espectros de objetivos específicos, en este caso supernovas, pueden no cambiar mucho en un intervalo de longitud de onda pequeño, por lo que terminamos con muchas mediciones _ruidosas_ de la misma cosa: la intensidad de la luz en un color o longitud de onda promedio. Esto es _genial_, ya que podemos ser inteligentes y simplemente combinar estas medidas para obtener un valor promedio que es mucho más _preciso_. ``` # Graficamos todos los espectros en la misma cuadrícula de longitud de onda. fig, axes = plt.subplots(2,3, figsize=(15,8), sharex=True, sharey=True, tight_layout=True) # Remuestreamos el flujo en una nueva cuadrícula de longitud de onda con solo 100 bins. coarse_wave = np.linspace(3600, 9800, 101) for i, ax in enumerate(axes.flatten()): wave = hdus['WAVE'].data flux = hdus['FLUX'].data[i] ivar = hdus['IVAR'].data[i] spec = hdus['SPECTYPE'].data[i][0] ax.plot(wave, flux, alpha=0.5) fl = trapz_rebin(wave, flux, edges=coarse_wave) ax.plot(coarse_wave[:-1], fl, 'k-', alpha=0.2) ax.set(xlim=(3600, 9800), ylim=(-10,50), title=spec) axes[0,0].set(ylabel=r'flux [$10^{-17}$ erg cm$^{-2}$ s$^{-1}$ $\AA^{-1}$]'); axes[1,0].set(ylabel=r'flux [$10^{-17}$ erg cm$^{-2}$ s$^{-1}$ $\AA^{-1}$]'); for i in range(0,3): axes[1,i].set(xlabel=r'Observed wavelength [$\AA$]') ``` ¡Y ahora vemos mucho mejor los deeps característicos, o líneas de absorción, de las supernovas! Por supuesto, este no es el único enfoque. Podríamos haber usado estadísticas y los datos originales 'no agrupados', pero las cosas siempre son mejores cuando son más transparentes. Sin embargo, sigue siendo complicado, ya que la velocidad de la estrella significa que las líneas de absorción no estarán donde las esperaríamos normalmente. En este sentido, hay _dos corrimientos al rojo_ en el espectro. Así que hemos identificado con éxito una (¡simulada, por ahora!) Supernovas DESI, pero nunca pudimos inspeccionar visualmente el estudio completo de esta manera. Necesitamos un método más eficiente... ## Un cerebro artificial: donde la anatomía se encuentra con la astronomía <img src="../desihigh/images/AIBrain.jpg" alt="Drawing" style="width: 600px;"/> Dada la increíble capacidad del cerebro humano, ¿a quién no se le ocurriría intentar comprenderlo y reproducirlo? Se logró un gran progreso en la década de 1940 con modelos simples que llevaron a los modelos de vanguardia de _aprendizaje profundo_ que han sido fundamentales para Google, Amazon y otras tecnologías hoy en día gigantes. En las neuronas biológicas, las dendritas reciben señales de entrada de _miles_ de neuronas vecinas. Estas señales se _combinan_ en el núcleo y pasan a un axón. Solo si la señal combinada es _mayor que un valor de umbral_, entonces el axón liberará una señal prescrita. <img src="../desihigh/images/Dendrite.jpg" alt="Drawing" style="width: 600px;"/> Este simple paso final es donde ocurre la magia. Antes de esto, la señal de salida es una simple suma ponderada de las señales de entrada, p. Ej. $ y = a \ cdot x + b $. Las posibilidades de esta configuración son relativamente limitadas. El umbral hace que la señal sea _no lineal_, de modo que cuando la señal de entrada cambia en una _cantidad pequeña, dx_, la señal de salida puede cambiar en _mucho_. Esto, junto con el _gran número_ de neuronas (relativamente simples) en el cerebro permite un potencial casi ilimitado, representado por el rango de posibilidades de salida o ideas que pueden formarse. Mientras lees esto, hay _100 mil millones_ de neuronas que pueden disparar su imaginación. ¡Quién sabe lo que se te ocurra! ### Potencial ilimitado El aprendizaje automático 'profundo' es donde estas ideas relativamente simples se encuentran con la potencia informática bruta que nos otorgó [John von Neumann](https://es.wikipedia.org/wiki/John_von_Neumann), [John Bardeen](https://es.wikipedia.org/wiki/John_Bardeen), ... Las cosas realmente despegaron cuando salió a la luz el [martillo](https://es.wikipedia.org/wiki/Unidad_de_procesamiento_gr%C3%A1fico) adecuado para el problema. En un equipo, pero en esencia, la misma estructura, podemos tener una serie de valores de entrada, $ x_1, x_2, ... $, como lo que sería recogido por las dendritas, que se ponderan y se suma, $ a_1 \ cdot x 1 + a2 \ cdot x_2 + ... + b_1 \ cdot _ x2 + ... $ como en el axón. Esto puede suceder varias veces, en una serie de 'capas ocultas', donde la salida de la capa anterior actúa como entrada de la siguiente. En cualquier etapa, aunque normalmente es la última, la "magia" podría ser introducida, o de forma similar, en una capa determinada. Por lo general, esto se conoce como "activación", en analogía con el disparo de un axón. <img src="../desihigh/images/network.jpg" width="600px"/> De antemano, o _a priori_, no tenemos idea de cuál debería ser el peso de nuestra 'red neuronal' artificial para un problema dado. La capacidad de la configuración está representada por el número de salidas diferentes que se pueden generar cuando cualquier peso determinado produce un nuevo número aleatorio, el resultado de lo que normalmente Será algo sin sentido para una entrada dada. Pero si podemos definir el "éxito", como la satisfacción después de una buena comida o una broma, entonces podemos inculcar el "buen comportamiento" en los pesos. Introducimos repetidamente la red en un rango de escenarios diferentes y permitimos que los pesos, $ a_1, ..., b_1, ... $, o entidad, se refinen a aquellos que logran el _mayor éxito_, según se define en comparación con el 'verdad'. De esta manera, podemos replicar cientos de generaciones de [selección natural](https://es.wikipedia.org/wiki/Selecci%C3%B3n_natural) en una fracción de segundo. ### Si la vida fuera tan simple El atractivo del aprendizaje profundo es claro, al igual que su potencial: ¿cuántas veces te has equivocado en tu vida? Claro, siempre estamos aprendiendo, pero la ciencia generalmente se basa en lo que sabemos y lo que no, dentro de lo razonable. No sobrevivirás demasiado tiempo en el campo si predices una respuesta diferente a la que tenías seis meses antes, ¡a pesar de lo mucho que hayas aprendido! En otras palabras, lo más importante es saber lo que puedes decir _con confianza_. Normalmente, las redes neuronales harán una predicción definitiva, sin tener idea de su confianza y crecimiento potencial. Como tal, podríamos revisar nuestra ambición ... ### Potencial Limitado Habiendo admitido nuestro potencial mundano, podríamos sacrificar algo de nuestra capacidad de ser perfectos, a favor de limitar el _potencial de equivocarnos_. Esto puede ser práctico, ya que una red con una gran cantidad de pesos puede ser infalible dada una potencia informática infinita, pero de lo contrario sería un desafío enorme. Uno de los medios más efectivos para limitar el número de pesos en nuestra red y, por lo tanto, la posibilidad de estar equivocados, es reemplazar capas ocultas 'completamente conectadas' de pesos arbitrarios con capas 'convolucionales'. Una convolución simplemente reemplaza una cantidad de píxeles vecinos con un promedio ponderado, como en este caso. <img src="../desihigh/images/CMB-lensing.jpg" width="600px"/> Así, no es algo muy diferente a una capa completamente conectada. Sin embargo, difiere en que los pesos aplicados dentro de la imagen son _constantes en toda la imagen_. En otras palabras, una capa "completamente conectada" permite un tamaño, forma y tipo de vidrio diferentes en cualquier posición dada de una imagen. Al restringirnos a 'un vaso', reducimos en gran medida el potencial de la red, tanto en su capacidad para tener razón como para estar equivocado. De esta manera, hemos recurrido a [filtros adaptados](https://es.wikipedia.org/wiki/Filtro_adaptado) arbitrarios, conocidos pronto por la invención del radar y el sonar. Eso no quiere decir que no tengamos algunas capas completamente conectadas, lo que podría detectar anomalías que no son consistentes en una imagen determinada. ## El Evento Principal Primero, simplemente abramos nuestro archivo de datos y enumeremos el resultado. ``` spectra = np.load('../desihigh/dat/sn_simspectra.npz') spectra.files ``` y descomprímamoslo en algo más explícito, ``` wave = spectra['wave'] # Host galaxy. host_fluxes = spectra['hosts'] snia_fluxes = spectra['snia'] snib_fluxes = spectra['snib'] snic_fluxes = spectra['snic'] snii_fluxes = spectra['sniip'] ``` Como anteriormente, esto es simplemente espectros de galaxias (hospedaje de nuestras supernovas) de ejemplo, junto con ejemplos que contienen clases específicas de supernovas. Para ser concretos, tracemos un espectro de cada uno. ``` fig, axes = plt.subplots(2,3, figsize = (12,7), tight_layout=True, sharex=True, sharey=True) ax = axes[0,0] ax.plot(wave, host_fluxes[0]) ax.set(ylabel='normalized flux', title='BGS host galaxy') ax = axes[0,1] ax.plot(wave, snia_fluxes[0]) ax.set(title='Host + SN Ia') ax = axes[0,2] ax.plot(wave, snib_fluxes[0]) ax.set(title='Host + SN Ib') ax = axes[1,0] ax.plot(wave, snic_fluxes[0]) ax.set(xlabel=r'Rest frame wavelength [$\AA$]', ylabel='Normalized flux', title='Host + SN Ic') ax = axes[1,1] ax.plot(wave, snii_fluxes[0]) ax.set(xlabel=r'rest frame wavelength [$\AA$]', title='Host + SN IIP') axes[1,2].axis('off'); ``` Hay muchas posibilidades entre las que le estamos pidiendo a la red que busque. Para facilitar las cosas, estos espectros se han _precondicionado_. Para lograr esto, hemos: eliminado el efecto del corrimiento al rojo del espectro observado; promediado en los contenedores vecinos, como anteriormente; renormalizado los flujos para que se encuentren entre [0, 1]; por lo tanto, ignoramos el brillo de las supernovas al determinar su tipo. ## Creando un cerebro artificial para encontrar las bestias astronómicas más temperamentales Primero crearemos nuestro cerebro artificial y luego diseñaremos desafíos que pueda intentar repetidamente y _aprender_. Si usamos todos nuestros datos para crear los desafíos, entonces no tenemos datos externos para probar qué tan bien está funcionando la red; este sería un ejemplo clásico de [sobreajuste](https://es.wikipedia.org/wiki/Sobreajuste). Para evitar esto, dividiremos nuestros datos en conjuntos de *entrenamiento* (train) y *validación* (test). Finalmente, para ser amigable con la computadora, asignaremos _etiquetas enteras_ simples a los tipos que esperamos que la red pueda adivinar: (host, Ia, Ib, Ic, IIp) = (0,1,2,3,4) respectivamente. ``` # Divide los datos en variables x y y, donde x = espectros y y = nuestras etiquetas. nbins = len(wave) x = np.concatenate([host_fluxes, snia_fluxes, snib_fluxes, snic_fluxes, snii_fluxes ]).reshape(-1, nbins, 1) y = to_categorical( np.concatenate([np.full(len(host_fluxes), 0), np.full(len(snia_fluxes), 1), np.full(len(snib_fluxes), 2), np.full(len(snic_fluxes), 3), np.full(len(snii_fluxes), 4) ])) x.shape, y.shape # Dividiendo entre *entrenamiento* (train) y *validación* (test). x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.333) ``` ### La red Inicializaremos una red con un potencial limitado por tres capas convolucionales. Podría tener menos o más, según lo determinado por los pros y los contras anteriores. También vamos a optimizar la red en función de su _exactitud_, una métrica que maximiza el número de predicciones de la red que coinciden con la verdad. También podríamos entrenar en otras métricas como _precision_, que minimiza la cantidad de predicciones falsas; parece que debería ser lo mismo, pero [no](https://es.wikipedia.org/wiki/Precisi%C3%B3n_y_exhaustividad). ``` def network(input_shape, ncat, learning_rate=0.0005, reg=0.003, dropout=0.7, seed=None): """Definimos la estructura de la red convolucional. Párametros ---------- input_shape : int Forma del espectro de entrada. ncat : int Numero de categorias o clases. learning_rate : float Tasa de Aprendizaje. reg : float Factor de Regularización. dropout : float Factor de perdida. seed : int Semilla de inicialización Retorno ------- modelo : tensorflow.keras.Model Una instancia modelo de la red. """ X_input = Input(input_shape, name='Input_Spec') # Primera capa convolucional with backend.name_scope('Conv_1'): X = Conv1D(filters=8, kernel_size=5, strides=1, padding='same', kernel_regularizer=regularizers.l2(reg), bias_initializer='zeros', kernel_initializer=glorot_normal(seed))(X_input) X = BatchNormalization(axis=2)(X) X = Activation('relu')(X) X = MaxPooling1D(pool_size= 2)(X) # Segunda capa convolucional with backend.name_scope('Conv_2'): X = Conv1D(filters=16, kernel_size=5, strides=1, padding='same', kernel_regularizer=regularizers.l2(reg), bias_initializer='zeros', kernel_initializer=glorot_normal(seed))(X) X = BatchNormalization(axis=2)(X) X = Activation('relu')(X) X = MaxPooling1D(2)(X) # Tercera capa convolucional with backend.name_scope('Conv_3'): X = Conv1D(filters=32, kernel_size=5, strides=1, padding='same', kernel_regularizer=regularizers.l2(reg), bias_initializer='zeros', kernel_initializer=glorot_normal(seed))(X) X = BatchNormalization(axis=2)(X) X = Activation('relu')(X) X = MaxPooling1D(2)(X) # Capa aplanada completamente conectada with backend.name_scope('Dense_Layer'): X = Flatten()(X) X = Dense(256, kernel_regularizer=regularizers.l2(reg), activation='relu')(X) X = Dropout(rate=dropout, seed=seed)(X) # Capa de salida con activación sigmoide with backend.name_scope('Output_Layer'): X = Dense(ncat, kernel_regularizer=regularizers.l2(reg), activation='sigmoid',name='Output_Classes')(X) model = Model(inputs=X_input, outputs=X, name='SNnet') # Configura el optimizador, la función de pérdida y las métricas de optimización. model.compile(optimizer=Adam(lr=learning_rate), loss='categorical_crossentropy', metrics=['accuracy']) return model # Definiremos la red aquí. # La forma de entrada tendrá nbins = 150 bins de longitud de onda. # El número de categorías está definido por la forma de nuestro vector y. # Deberíamos tener cinco categorías (hosts, SN Ia, Ib, Ic y IIP). cnn_model = network((nbins, 1), ncat=y.shape[1]) cnn_model.summary() ``` Considere esto por un minuto. En total, hay 152,405 valores separados que se pueden entrenar en esta red. Eso es un poco menos de los ~10 mil millones en el suyo, ¡pero aún es mucho! Esto significa que existe un potencial considerable para que la red obtenga muy buenos espectros distintivos de candidatos a supernovas, pero que se necesitará mucho aprendizaje para obtener resultados sensibles. ### De pie sobre tus propios pies Ahora, deja que suceda la magia. Dejaremos que la red aborde los desafíos y diseñe sus pesos, o neuronas, para maximizar la satisfacción o su capacidad para tener éxito. ``` history = cnn_model.fit(x_train, y_train, batch_size=50, epochs=125, validation_data=(x_test, y_test), shuffle=True) ``` ### ¿Pro o no pro? Cada vez que la red intenta aprender, el desafío se conoce como una "época". A medida que aprende, deberíamos preocuparnos por la pérdida: una medida de qué tan mal la red está identificando nuestros espectros, que queremos minimizar. Considerando que, la precisión mide qué tan bien lo hace la red para identificar las cinco clases diferentes simuladas, siendo esto lo que queremos maximizar. ``` fig, axes = plt.subplots(1,2, figsize=(12,5), sharex=True) nepoch = len(history.history['loss']) epochs = np.arange(1, nepoch+1) ax = axes[0] ax.plot(epochs, history.history['accuracy'], label='acc') ax.plot(epochs, history.history['val_accuracy'], label='val_acc') ax.set(xlabel='Training epoch', ylabel='Accuracy', xlim=(0, nepoch), ylim=(0.,1.0) ) ax.legend(fontsize=12, loc='best') ax.grid(ls=':') ax = axes[1] ax.plot(epochs, history.history['loss'], label='loss') ax.plot(epochs, history.history['val_loss'], label='val_loss') ax.set(xlabel='Training epoch', ylabel='Loss', xlim=(0, nepoch), # ylim=(0.,2.0) ) ax.legend(fontsize=12, loc='best') ax.grid(ls=':') fig.tight_layout(); ``` Podemos ver que la red aprendió inicialmente muy rápido, pero la mejora se ralentizó alrededor de 50 épocas más o menos. En este punto, la precisión alcanzada fue de alrededor del 70%, ¡así que no está mal! ### Matriz de confusión En este tipo de clasificación, otra forma de medir el éxito es mediante la _matriz de confusión_, ``` # Vamos a utilizar la muestra de prueba para probar las predicciones de nuestra # CNN entrenada ahora que ha "aprendido" a distinguir los diferentes tipos de # supernovas. Esto no es ideal ... deberíamos tener una tercer # muestra independiente de las muestras de entrenamiento y prueba. y_pred = cnn_model.predict(x_test) # En el clasificador de clases múltiples, evalue el máximo de # coincidencias en la matriz de salida de valores. cm = confusion_matrix(y_test.argmax(axis=1), y_pred.argmax(axis=1)) cmnorm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] fig, ax = plt.subplots(1,1, figsize=(8,7)) im = ax.imshow(cmnorm, cmap='Blues', vmin=0, vmax=1) cb = ax.figure.colorbar(im, ax=ax, fraction=0.046, pad=0.04) cb.set_label('Fracción Correcta') labels = ['Hosts', 'SN Ia', 'SN Ib', 'SN Ic', 'SN IIP'] ax.set(aspect='equal', xlabel='Clase Predicha', xticks=np.arange(cm.shape[1]), xticklabels=labels, ylabel='Clase Verdadera', yticks=np.arange(cm.shape[1]), yticklabels=labels) thresh = 0.5*cm.max() for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, '{:.3f}\n({:d})'.format(cmnorm[i,j], cm[i,j]), ha='center', va='center', color='black' if cm[i,j] < thresh else 'white') fig.tight_layout() ``` Si la CNN es perfecta para clasificar todas las categorías, entonces la entrada 'Etiqueta verdadera' y la salida de 'Etiqueta predicha' de la red coincidirían siempre. En este caso, solo los cuadrados diagonales aparecerían rellenos en la matriz; El resto estaría en blanco. En nuestro caso, vemos que la CNN es realmente buena para clasificar el tipo Ia; promedio en la clasificación de tipo IIP; y mediocre en la clasificación de Ib y Ic supernvoae. Parece estar mezclando el tipo Ib y el Ic, y afirma incorrectamente que el 10% de las galaxias anfitrionas contienen en realidad una supernova de tipo Ib. ¡Ups! Finalmente, es notable que hemos completado el círculo, por supuesto, como el silicio fundamental para los transitores en el que se realizará este cálculo una vez que se originó en las mismas supernovas que estamos buscando. ## ¿Puedes hacerlo mejor? Una de las cosas fascinantes del aprendizaje automático es que se trata de un número ilimitado de cosas que se pueden modificar y que podrían mejorar el rendimiento. El arte es saber qué cosas pueden mejorar las cosas para un problema dado. _Aumentar el tamaño del conjunto de entrenamiento_: podríamos creer que hemos proporcionado datos insuficientes para que la red aprenda. Tenemos más disponibles en el _conjunto de validación_ que podríamos agregar, pero ¿a qué costo? Si la red mejora, ¿es real o simplemente está sobreajustada? Sin embargo, puede intentar cambiar a una 'división 80-20'. _Personalizar la tasa de aprendizaje y abandono_: la rapidez con la que la CNN reacciona a los nuevos datos está determinada por una _tasa de aprendizaje preestablecida_: hay un equilibrio que hay que mantener para resolver mejor los problemas nuevos, sin dejar de hacerlo bien en los viejos; Esto a menudo se conoce como "histéresis" dentro de la física, en analogía con efectos similares en el magnetismo. Puede intentar cambiar la tasa de aprendizaje en la $\tt{red}$ anterior. _Cambio de la 'arquitectura'_: hemos limitado la red a tres capas, podríamos agregar más o menos. ¿Encuentra alguna diferencia significativa? ## Entonces, te gusta el aprendizaje automático ... Bueno, ¡estás muy bien preparado para los probables éxitos y desafíos del próximo siglo! Existe una gran cantidad de material, pero lo siguiente es muy recomendable: [Neural Networks and Deep Learning](http://neuralnetworksanddeeplearning.com/), [3blue1brown: Neural Networks](https://youtu.be/aircAruvnKk) y [Aprendizaje profundo con Python](https://www.amazon.com/Deep-Learning-Python-Francois-Chollet/dp/1617294438). Realmente el aprendizaje automático tiene el potencial de cambiar todo, como se ha demostrado asombrosamente [recientemente](https://www.nature.com/articles/d41586-020-03348-4).
github_jupyter
Filter pipelines ================ This example shows how to use the `pymia.filtering` package to set up image filter pipeline and apply it to an image. The pipeline consists of a gradient anisotropic diffusion filter followed by a histogram matching. This pipeline will be applied to a T1-weighted MR image and a T2-weighted MR image will be used as a reference for the histogram matching. <div class="alert alert-warning"> Tip This example is available as Jupyter notebook at [./examples/filtering/basic.ipynb](https://github.com/rundherum/pymia/blob/master/examples/filtering/basic.ipynb) and Python script at [./examples/filtering/basic.py](https://github.com/rundherum/pymia/blob/master/examples/filtering/basic.py). </div> <div class="alert alert-info"> Note To be able to run this example: - Get the example data by executing [./examples/example-data/pull_example_data.py](https://github.com/rundherum/pymia/blob/master/examples/example-data/pull_example_data.py). - Install matplotlib (`pip install matplotlib`). </div> Import the required modules. ``` import glob import os import matplotlib.pyplot as plt import pymia.filtering.filter as flt import pymia.filtering.preprocessing as prep import SimpleITK as sitk ``` Define the path to the data. ``` data_dir = '../example-data' ``` Let us create a list with the two filters, a gradient anisotropic diffusion filter followed by a histogram matching. ``` filters = [ prep.GradientAnisotropicDiffusion(time_step=0.0625), prep.HistogramMatcher() ] histogram_matching_filter_idx = 1 # we need the index later to update the HistogramMatcher's parameters ``` Now, we can initialize the filter pipeline. ``` pipeline = flt.FilterPipeline(filters) ``` We can now loop over the subjects of the example data. We will both load the T1-weighted and T2-weighted MR images and execute the pipeline on the T1-weighted MR image. Note that for each subject, we update the parameters for the histogram matching filter to be the corresponding T2-weighted image. ``` # get subjects to evaluate subject_dirs = [subject for subject in glob.glob(os.path.join(data_dir, '*')) if os.path.isdir(subject) and os.path.basename(subject).startswith('Subject')] for subject_dir in subject_dirs: subject_id = os.path.basename(subject_dir) print(f'Filtering {subject_id}...') # load the T1- and T2-weighted MR images t1_image = sitk.ReadImage(os.path.join(subject_dir, f'{subject_id}_T1.mha')) t2_image = sitk.ReadImage(os.path.join(subject_dir, f'{subject_id}_T2.mha')) # set the T2-weighted MR image as reference for the histogram matching pipeline.set_param(prep.HistogramMatcherParams(t2_image), histogram_matching_filter_idx) # execute filtering pipeline on the T1-weighted image filtered_t1_image = pipeline.execute(t1_image) # plot filtering result slice_no_for_plot = t1_image.GetSize()[2] // 2 fig, axs = plt.subplots(1, 2) axs[0].imshow(sitk.GetArrayFromImage(t1_image[:, :, slice_no_for_plot]), cmap='gray') axs[0].set_title('Original image') axs[1].imshow(sitk.GetArrayFromImage(filtered_t1_image[:, :, slice_no_for_plot]), cmap='gray') axs[1].set_title('Filtered image') fig.suptitle(f'{subject_id}', fontsize=16) plt.show() ``` Visually, we can clearly see the smoothing of the filtered image due to the anisotrophic filtering. Also, the image intensities are brighter due to the histogram matching.
github_jupyter
``` import pandas as pd import numpy as np from matplotlib import pyplot as plt from tqdm import tqdm_notebook as tqdm file_path = 'Mendelian.train.tsv' df=pd.read_csv(file_path, sep='\t') metrics = df.columns.tolist() POSITIVES_NUMBER = 356 from scipy.stats import norm, beta, gamma, expon, exponweib distributions = { "CpGobsExp": beta, "CpGperCpG": beta, "CpGperGC": norm, "DGVCount": gamma, "DnaseClusteredHyp": gamma, "EncH3K27Ac": gamma, "GCContent": norm, "EncH3K4Me3": gamma, "ISCApath": gamma, "DnaseClusteredScore": beta, "EncH3K4Me1": gamma, "GerpRS": gamma, "GerpRSpv": gamma, "commonVar": exponweib, "dbVARCount": gamma, "fantom5Perm": gamma, "fantom5Robust": gamma, "mamPhastCons46way": gamma, "priPhastCons46way": gamma, "rareVar": beta, "verPhastCons46way": gamma, "numTFBSConserved": expon, "fracRareCommon": beta, "priPhyloP46way": beta, "verPhyloP46way": norm, "mamPhyloP46way": norm } def get_frequencies(my_list): n = len(my_list) unique, counts = np.unique(my_list, return_counts=True) counts = counts.astype(float)/n return dict(zip(unique, counts)) def plot_frequencies(name, my_list): """Plot given list by frequencies and list values.""" plt.title(name) arr = np.array(my_list) trimmed = arr[arr != 0] mean = np.mean(trimmed) min_val, max_val = np.min(trimmed), np.max(trimmed) normal_arr = (trimmed - mean) / (max_val - min_val) frequencies = get_frequencies(normal_arr) try: y = [frequencies[i] for i in normal_arr] except Exception: return plt.xlabel(name) plt.ylabel("frequency") plt.plot(normal_arr, y, '.', markersize=2) def plot_estimated_distribution(name, metric, my_list): """Plot given list by frequencies and list values.""" arr = np.array(my_list) trimmed = arr[arr != 0] mean = np.mean(trimmed) min_val, max_val = np.min(trimmed), np.max(trimmed) normal_arr = (trimmed - mean) / (max_val - min_val) distribution = distributions[metric] parameters = distribution.fit(normal_arr) print("Distribution parameters: %s"%str(parameters)) x = np.sort(distribution.rvs(*parameters, size=1000)) y = distribution.pdf(x, *parameters) plt.title("Estimating %s density with %s"%(metric, distribution.name)) plt.xlabel(name) plt.ylabel("density") plt.plot(x, y, '.', markersize=2) from collections import Counter for i, metric in tqdm(enumerate(metrics), total=len(metrics)): data = list(df[metric]) positives, negatives = data[:POSITIVES_NUMBER], data[POSITIVES_NUMBER:] plt.rcParams['figure.figsize'] = [20, 5] plt.suptitle("%s) Plotting metric %s"%(i+1, metric), fontsize=20) plt.subplot(141) plot_frequencies("Positives", positives) plt.subplot(142) plot_frequencies("Negatives", negatives) plt.subplot(143) plot_frequencies("Data", data) plt.subplot(144) plot_estimated_distribution("Data", metric, data) plt.savefig("stats/%s"%metric) plt.show() ```
github_jupyter
# Amazon SageMaker Multi-Model Endpoints using Scikit Learn *이 노트북은 [Amazon SageMaker Multi-Model Endpoints using Scikit Learn (영문 원본)](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/advanced_functionality/multi_model_sklearn_home_value/sklearn_multi_model_endpoint_home_value.ipynb) 의 한국어 번역입니다.* 고객들은 [Amazon SageMaker 멀티 모델 엔드포인트(multi-model endpoints)](https://docs.aws.amazon.com/sagemaker/latest/dg/multi-model-endpoints.html)를 사용하여 최대 수천 개의 모델을 완벽하게 호스팅하는 엔드포인트를 생성할 수 있습니다. 이러한 엔드포인트는 공통 추론 컨테이너(common inference container)에서 제공할 수 있는 많은 모델 중 하나를 온디맨드(on demand)로 호출할 수 있어야 하고 자주 호출되지 않는 모델이 약간의 추가 대기 시간(latency) 허용이 가능한 사례들에 적합합니다. 지속적으로 낮은 추론 대기 시간이 필요한 애플리케이션의 경우 기존의 엔드포인트가 여전히 최선의 선택입니다. High level에서 Amazon SageMaker는 필요에 따라 멀티 모델 엔드포인트에 대한 모델 로딩 및 언로딩을 관리합니다. 특정 모델에 대한 호출 요청이 발생하면 Amazon SageMaker는 해당 모델에 할당된 인스턴스로 요청을 라우팅하고 S3에서 모델 아티팩트(model artifacts)를 해당 인스턴스로 다운로드한 다음 컨테이너의 메모리에 모델 로드를 시작합니다. 로딩이 완료되면 Amazon SageMaker는 요청된 호출을 수행하고 결과를 반환합니다. 모델이 선택된 인스턴스의 메모리에 이미 로드되어 있으면 다운로드 및 로딩 단계들을 건너 뛰고 즉시 호출이 수행됩니다. 멀티 모델 엔드포인트 작성 및 사용 방법을 보여주기 위해, 이 노트북은 단일 위치의 주택 가격을 예측하는 Scikit Learn 모델을 사용하는 예시를 제공합니다. 이 도메인은 멀티 모델 엔드포인트를 쉽게 실험하기 위한 간단한 예제입니다. Amazon SageMaker 멀티 모델 엔드포인트 기능은 컨테이너를 가져 오는 프레임워크를 포함한 모든 머신 러닝 프레임워크 및 알고리즘에서 작동하도록 설계되었습니다. ### Contents 1. [Build and register a Scikit Learn container that can serve multiple models](#Build-and-register-a-Scikit-Learn-container-that-can-serve-multiple-models) 1. [Generate synthetic data for housing models](#Generate-synthetic-data-for-housing-models) 1. [Train multiple house value prediction models](#Train-multiple-house-value-prediction-models) 1. [Import models into hosting](#Import-models-into-hosting) 1. [Deploy model artifacts to be found by the endpoint](#Deploy-model-artifacts-to-be-found-by-the-endpoint) 1. [Create the Amazon SageMaker model entity](#Create-the-Amazon-SageMaker-model-entity) 1. [Create the multi-model endpoint](#Create-the-multi-model-endpoint) 1. [Exercise the multi-model endpoint](#Exercise-the-multi-model-endpoint) 1. [Dynamically deploy another model](#Dynamically-deploy-another-model) 1. [Invoke the newly deployed model](#Invoke-the-newly-deployed-model) 1. [Updating a model](#Updating-a-model) 1. [Clean up](#Clean-up) ## Build and register a Scikit Learn container that can serve multiple models ``` !pip install -qU awscli boto3 sagemaker ``` 추론 컨테이너가 멀티 모델 엔드 포인트에서 여러 모델을 제공하려면 특정 모델의 로드(load), 나열(list), 가져오기(get), 언로드(unload) 및 호출(invoke)을 위한 [추가 API](https://docs.aws.amazon.com/sagemaker/latest/dg/build-multi-model-build-container.html)를 구현해야 합니다. [SageMaker Scikit Learn 컨테이너 저장소의 'mme' branch](https://github.com/aws/sagemaker-scikit-learn-container/tree/mme)는 멀티 모델 엔드포인트에 필요한 추가 컨테이너 API를 구현하는 HTTP 프론트엔드를 제공하는 프레임워크인 [Multi Model Server](https://github.com/awslabs/multi-model-server)를 사용하도록 SageMaker의 Scikit Learn 프레임워크 컨테이너를 조정하는 방법에 대한 예제 구현입니다. 또한 사용자 정의 프레임워크 (본 예시에서는 Scikit Learn 프레임워크)를 사용하여 모델을 제공하기 위한 플러그 가능한 백엔드 핸들러(pluggable backend handler)를 제공합니다. 이 branch를 사용하여 모든 멀티 모델 엔드 포인트 컨테이너 요구 사항을 충족하는 Scikit Learn 컨테이너를 구축한 다음 해당 이미지를 Amazon Elastic Container Registry(ECR)에 업로드합니다. 이미지를 ECR에 업로드하면 새로운 ECR 저장소가 생성될 수 있으므로 이 노트북에는 일반 `SageMakerFullAccess` 권한 외에 권한이 필요합니다. 이러한 권한을 추가하는 가장 쉬운 방법은 관리형 정책 `AmazonEC2ContainerRegistryFullAccess`를 노트북 인스턴스를 시작하는 데 사용한 역할(role)에 추가하는 것입니다. 이 작업을 수행할 때 노트북 인스턴스를 다시 시작할 필요가 없으며 새 권한을 즉시 사용할 수 있습니다. ``` ALGORITHM_NAME = 'multi-model-sklearn' %%sh -s $ALGORITHM_NAME algorithm_name=$1 account=$(aws sts get-caller-identity --query Account --output text) # Get the region defined in the current configuration (default to us-west-2 if none defined) region=$(aws configure get region) ecr_image="${account}.dkr.ecr.${region}.amazonaws.com/${algorithm_name}:latest" # If the repository doesn't exist in ECR, create it. aws ecr describe-repositories --repository-names "${algorithm_name}" > /dev/null 2>&1 if [ $? -ne 0 ] then aws ecr create-repository --repository-name "${algorithm_name}" > /dev/null fi # Get the login command from ECR and execute it directly $(aws ecr get-login --region ${region} --no-include-email --registry-ids ${account}) # Build the docker image locally with the image name and then push it to ECR # with the full image name. # First clear out any prior version of the cloned repo rm -rf sagemaker-scikit-learn-container/ # Clone the sklearn container repo git clone --single-branch --branch mme https://github.com/aws/sagemaker-scikit-learn-container.git cd sagemaker-scikit-learn-container/ # Build the "base" container image that encompasses the installation of the # scikit-learn framework and all of the dependencies needed. docker build -q -t sklearn-base:0.20-2-cpu-py3 -f docker/0.20-2/base/Dockerfile.cpu --build-arg py_version=3 . # Create the SageMaker Scikit-learn Container Python package. python setup.py bdist_wheel --universal # Build the "final" container image that encompasses the installation of the # code that implements the SageMaker multi-model container requirements. docker build -q -t ${algorithm_name} -f docker/0.20-2/final/Dockerfile.cpu . docker tag ${algorithm_name} ${ecr_image} docker push ${ecr_image} ``` ## Generate synthetic data for housing models ``` import numpy as np import pandas as pd import json import datetime import time from time import gmtime, strftime import matplotlib.pyplot as plt NUM_HOUSES_PER_LOCATION = 1000 LOCATIONS = ['NewYork_NY', 'LosAngeles_CA', 'Chicago_IL', 'Houston_TX', 'Dallas_TX', 'Phoenix_AZ', 'Philadelphia_PA', 'SanAntonio_TX', 'SanDiego_CA', 'SanFrancisco_CA'] PARALLEL_TRAINING_JOBS = 4 # len(LOCATIONS) if your account limits can handle it MAX_YEAR = 2019 def gen_price(house): _base_price = int(house['SQUARE_FEET'] * 150) _price = int(_base_price + (10000 * house['NUM_BEDROOMS']) + \ (15000 * house['NUM_BATHROOMS']) + \ (15000 * house['LOT_ACRES']) + \ (15000 * house['GARAGE_SPACES']) - \ (5000 * (MAX_YEAR - house['YEAR_BUILT']))) return _price def gen_random_house(): _house = {'SQUARE_FEET': int(np.random.normal(3000, 750)), 'NUM_BEDROOMS': np.random.randint(2, 7), 'NUM_BATHROOMS': np.random.randint(2, 7) / 2, 'LOT_ACRES': round(np.random.normal(1.0, 0.25), 2), 'GARAGE_SPACES': np.random.randint(0, 4), 'YEAR_BUILT': min(MAX_YEAR, int(np.random.normal(1995, 10)))} _price = gen_price(_house) return [_price, _house['YEAR_BUILT'], _house['SQUARE_FEET'], _house['NUM_BEDROOMS'], _house['NUM_BATHROOMS'], _house['LOT_ACRES'], _house['GARAGE_SPACES']] COLUMNS = ['PRICE', 'YEAR_BUILT', 'SQUARE_FEET', 'NUM_BEDROOMS', 'NUM_BATHROOMS', 'LOT_ACRES', 'GARAGE_SPACES'] def gen_houses(num_houses): _house_list = [] for i in range(num_houses): _house_list.append(gen_random_house()) _df = pd.DataFrame(_house_list, columns=COLUMNS) return _df ``` ## Train multiple house value prediction models ``` import sagemaker from sagemaker import get_execution_role from sagemaker.predictor import csv_serializer import boto3 sm_client = boto3.client(service_name='sagemaker') runtime_sm_client = boto3.client(service_name='sagemaker-runtime') s3 = boto3.resource('s3') s3_client = boto3.client('s3') sagemaker_session = sagemaker.Session() role = get_execution_role() ACCOUNT_ID = boto3.client('sts').get_caller_identity()['Account'] REGION = boto3.Session().region_name BUCKET = sagemaker_session.default_bucket() SCRIPT_FILENAME = 'script.py' USER_CODE_ARTIFACTS = 'user_code.tar.gz' MULTI_MODEL_SKLEARN_IMAGE = '{}.dkr.ecr.{}.amazonaws.com/{}:latest'.format(ACCOUNT_ID, REGION, ALGORITHM_NAME) DATA_PREFIX = 'DEMO_MME_SCIKIT' HOUSING_MODEL_NAME = 'housing' MULTI_MODEL_ARTIFACTS = 'multi_model_artifacts' TRAIN_INSTANCE_TYPE = 'ml.m4.xlarge' ENDPOINT_INSTANCE_TYPE = 'ml.m4.xlarge' ``` ### Split a given dataset into train, validation, and test ``` from sklearn.model_selection import train_test_split SEED = 7 SPLIT_RATIOS = [0.6, 0.3, 0.1] def split_data(df): # split data into train and test sets seed = SEED val_size = SPLIT_RATIOS[1] test_size = SPLIT_RATIOS[2] num_samples = df.shape[0] X1 = df.values[:num_samples, 1:] # keep only the features, skip the target, all rows Y1 = df.values[:num_samples, :1] # keep only the target, all rows # Use split ratios to divide up into train/val/test X_train, X_val, y_train, y_val = \ train_test_split(X1, Y1, test_size=(test_size + val_size), random_state=seed) # Of the remaining non-training samples, give proper ratio to validation and to test X_test, X_test, y_test, y_test = \ train_test_split(X_val, y_val, test_size=(test_size / (test_size + val_size)), random_state=seed) # reassemble the datasets with target in first column and features after that _train = np.concatenate([y_train, X_train], axis=1) _val = np.concatenate([y_val, X_val], axis=1) _test = np.concatenate([y_test, X_test], axis=1) return _train, _val, _test ``` ### Launch a single training job for a given housing location 모델 학습 시, 기존 SageMaker 모델과 동일한 방식으로 학습하기 때문에 멀티 모델 엔트 포인트에 특화된 기능을 따로 구현하실 필요가 없습니다. ``` %%writefile $SCRIPT_FILENAME import argparse import os import glob import numpy as np import pandas as pd from sklearn.ensemble import RandomForestRegressor from sklearn.externals import joblib # inference functions --------------- def model_fn(model_dir): print('loading model.joblib from: {}'.format(model_dir)) _loaded_model = joblib.load(os.path.join(model_dir, 'model.joblib')) return _loaded_model if __name__ =='__main__': print('extracting arguments') parser = argparse.ArgumentParser() # hyperparameters sent by the client are passed as command-line arguments to the script. # to simplify the demo we don't use all sklearn RandomForest hyperparameters parser.add_argument('--n-estimators', type=int, default=10) parser.add_argument('--min-samples-leaf', type=int, default=3) # Data, model, and output directories parser.add_argument('--model-dir', type=str, default=os.environ.get('SM_MODEL_DIR')) parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN')) parser.add_argument('--validation', type=str, default=os.environ.get('SM_CHANNEL_VALIDATION')) parser.add_argument('--model-name', type=str) args, _ = parser.parse_known_args() print('reading data') print('model_name: {}'.format(args.model_name)) train_file = os.path.join(args.train, args.model_name + '_train.csv') train_df = pd.read_csv(train_file) val_file = os.path.join(args.validation, args.model_name + '_val.csv') test_df = pd.read_csv(os.path.join(val_file)) print('building training and testing datasets') X_train = train_df[train_df.columns[1:train_df.shape[1]]] X_test = test_df[test_df.columns[1:test_df.shape[1]]] y_train = train_df[train_df.columns[0]] y_test = test_df[test_df.columns[0]] # train print('training model') model = RandomForestRegressor( n_estimators=args.n_estimators, min_samples_leaf=args.min_samples_leaf, n_jobs=-1) model.fit(X_train, y_train) # print abs error print('validating model') abs_err = np.abs(model.predict(X_test) - y_test) # print couple perf metrics for q in [10, 50, 90]: print('AE-at-' + str(q) + 'th-percentile: ' + str(np.percentile(a=abs_err, q=q))) # persist model path = os.path.join(args.model_dir, 'model.joblib') joblib.dump(model, path) print('model persisted at ' + path) # can test the model locally # ! python script.py --n-estimators 100 \ # --min-samples-leaf 2 \ # --model-dir ./ \ # --model-name 'NewYork_NY' \ # --train ./data/NewYork_NY/train/ \ # --validation ./data/NewYork_NY/val/ # from sklearn.externals import joblib # regr = joblib.load('./model.joblib') # _start_time = time.time() # regr.predict([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]) # _duration = time.time() - _start_time # print('took {:,d} ms'.format(int(_duration * 1000))) from sagemaker.sklearn.estimator import SKLearn def launch_training_job(location): # clear out old versions of the data _s3_bucket = s3.Bucket(BUCKET) _full_input_prefix = '{}/model_prep/{}'.format(DATA_PREFIX, location) _s3_bucket.objects.filter(Prefix=_full_input_prefix + '/').delete() # upload the entire set of data for all three channels _local_folder = 'data/{}'.format(location) _inputs = sagemaker_session.upload_data(path=_local_folder, key_prefix=_full_input_prefix) print('Training data uploaded: {}'.format(_inputs)) _job = 'mme-{}'.format(location.replace('_', '-')) _full_output_prefix = '{}/model_artifacts/{}'.format(DATA_PREFIX, location) _s3_output_path = 's3://{}/{}'.format(BUCKET, _full_output_prefix) _estimator = SKLearn( entry_point=SCRIPT_FILENAME, role=role, train_instance_count=1, train_instance_type=TRAIN_INSTANCE_TYPE, framework_version='0.20.0', output_path=_s3_output_path, base_job_name=_job, metric_definitions=[ {'Name' : 'median-AE', 'Regex': 'AE-at-50th-percentile: ([0-9.]+).*$'}], hyperparameters = {'n-estimators' : 100, 'min-samples-leaf': 3, 'model-name' : location}) DISTRIBUTION_MODE = 'FullyReplicated' _train_input = sagemaker.s3_input(s3_data=_inputs+'/train', distribution=DISTRIBUTION_MODE, content_type='csv') _val_input = sagemaker.s3_input(s3_data=_inputs+'/val', distribution=DISTRIBUTION_MODE, content_type='csv') _remote_inputs = {'train': _train_input, 'validation': _val_input} _estimator.fit(_remote_inputs, wait=False) return _estimator.latest_training_job.name ``` ### Kick off a model training job for each housing location ``` def save_data_locally(location, train, val, test): _header = ','.join(COLUMNS) os.makedirs('data/{}/train'.format(location)) np.savetxt( 'data/{0}/train/{0}_train.csv'.format(location), train, delimiter=',', fmt='%.2f') os.makedirs('data/{}/val'.format(location)) np.savetxt( 'data/{0}/val/{0}_val.csv'.format(location), val, delimiter=',', fmt='%.2f') os.makedirs('data/{}/test'.format(location)) np.savetxt( 'data/{0}/test/{0}_test.csv'.format(location), test, delimiter=',', fmt='%.2f') import shutil import os training_jobs = [] shutil.rmtree('data', ignore_errors=True) for loc in LOCATIONS[:PARALLEL_TRAINING_JOBS]: _houses = gen_houses(NUM_HOUSES_PER_LOCATION) _train, _val, _test = split_data(_houses) save_data_locally(loc, _train, _val, _test) _job = launch_training_job(loc) training_jobs.append(_job) print('{} training jobs launched: {}'.format(len(training_jobs), training_jobs)) ``` ### Wait for all model training to finish ``` def wait_for_training_job_to_complete(job_name): print('Waiting for job {} to complete...'.format(job_name)) _resp = sm_client.describe_training_job(TrainingJobName=job_name) _status = _resp['TrainingJobStatus'] while _status=='InProgress': time.sleep(60) _resp = sm_client.describe_training_job(TrainingJobName=job_name) _status = _resp['TrainingJobStatus'] if _status == 'InProgress': print('{} job status: {}'.format(job_name, _status)) print('DONE. Status for {} is {}\n'.format(job_name, _status)) # wait for the jobs to finish for j in training_jobs: wait_for_training_job_to_complete(j) ``` ## Import models into hosting 멀티 모델 엔드포인트의 가장 큰 차이점은 모델 엔티티(Model entity)를 작성할 때 컨테이너의 `MultiModel`은 엔드포인트에서 호출할 수 있는 모델 아티팩트가 있는 S3 접두부(prefix)입니다. 나머지 S3 경로는 실제로 모델을 호출할 때 지정됩니다. 슬래시로 위치를 닫아야 하는 점을 기억해 주세요. 컨테이너의 `Mode`는 컨테이너가 여러 모델을 호스팅함을 나타내기 위해 `MultiModel`로 지정됩니다. ### Deploy model artifacts to be found by the endpoint 상술한 바와 같이, 멀티 모델 엔드 포인트는 S3의 특정 위치에서 모델 아티팩트를 찾도록 구성됩니다. 학습된 각 모델에 대해 모델 아티팩트를 해당 위치에 복사합니다. 이 예에서는 모든 모델들을 단일 폴더에 저장합니다. 멀티 모델 엔드 포인트의 구현은 임의의 폴더 구조를 허용할 만큼 유연합니다. 예를 들어 일련의 하우징 모델의 경우 각 지역마다 최상위 폴더가 있을 수 있으며 모델 아티팩트는 해당 지역 폴더로 복사됩니다. 이러한 모델을 호출할 때 참조되는 대상 모델에는 폴더 경로가 포함됩니다. 예를 들어 `northeast/Boston_MA.tar.gz`입니다. ``` import re def parse_model_artifacts(model_data_url): # extract the s3 key from the full url to the model artifacts _s3_key = model_data_url.split('s3://{}/'.format(BUCKET))[1] # get the part of the key that identifies the model within the model artifacts folder _model_name_plus = _s3_key[_s3_key.find('model_artifacts') + len('model_artifacts') + 1:] # finally, get the unique model name (e.g., "NewYork_NY") _model_name = re.findall('^(.*?)/', _model_name_plus)[0] return _s3_key, _model_name # make a copy of the model artifacts from the original output of the training job to the place in # s3 where the multi model endpoint will dynamically load individual models def deploy_artifacts_to_mme(job_name): _resp = sm_client.describe_training_job(TrainingJobName=job_name) _source_s3_key, _model_name = parse_model_artifacts(_resp['ModelArtifacts']['S3ModelArtifacts']) _copy_source = {'Bucket': BUCKET, 'Key': _source_s3_key} _key = '{}/{}/{}.tar.gz'.format(DATA_PREFIX, MULTI_MODEL_ARTIFACTS, _model_name) print('Copying {} model\n from: {}\n to: {}...'.format(_model_name, _source_s3_key, _key)) s3_client.copy_object(Bucket=BUCKET, CopySource=_copy_source, Key=_key) return _key ``` *의도적으로 첫 번째 모델을 복사하지 않는다는 점을 유의해 주세요.*. 첫 번째 모델은 향후 실습 과정에서 복사하여 이미 실행 중인 엔드포인트에 새 모델을 동적으로 추가하는 방법을 보여주기 위함입니다. ``` # First, clear out old versions of the model artifacts from previous runs of this notebook s3 = boto3.resource('s3') s3_bucket = s3.Bucket(BUCKET) full_input_prefix = '{}/multi_model_artifacts'.format(DATA_PREFIX) print('Removing old model artifacts from {}'.format(full_input_prefix)) filter_resp = s3_bucket.objects.filter(Prefix=full_input_prefix + '/').delete() # copy every model except the first one for job in training_jobs[1:]: deploy_artifacts_to_mme(job) ``` ### Create the Amazon SageMaker model entity `boto3`을 사용하여 모델 엔터티를 만듭니다. 단일 모델을 설명하는 대신 멀티 모델 시맨틱(semantics)의 사용을 나타내며 모든 특정 모델 아티팩트의 소스 위치를 식별합니다. ``` # When using multi-model endpoints with the Scikit Learn container, we need to provide an entry point for # inference that will at least load the saved model. This function uploads a model artifact containing such a # script. This tar.gz file will be fed to the SageMaker multi-model creation and pointed to by the # SAGEMAKER_SUBMIT_DIRECTORY environment variable. def upload_inference_code(script_file_name, prefix): _tmp_folder = 'inference-code' if not os.path.exists(_tmp_folder): os.makedirs(_tmp_folder) !tar -czvf $_tmp_folder/$USER_CODE_ARTIFACTS $script_file_name > /dev/null _loc = sagemaker_session.upload_data(_tmp_folder, key_prefix='{}/{}'.format(prefix, _tmp_folder)) return _loc + '/' + USER_CODE_ARTIFACTS def create_multi_model_entity(multi_model_name, role): # establish the place in S3 from which the endpoint will pull individual models _model_url = 's3://{}/{}/{}/'.format(BUCKET, DATA_PREFIX, MULTI_MODEL_ARTIFACTS) _container = { 'Image': MULTI_MODEL_SKLEARN_IMAGE, 'ModelDataUrl': _model_url, 'Mode': 'MultiModel', 'Environment': { 'SAGEMAKER_PROGRAM' : SCRIPT_FILENAME, 'SAGEMAKER_SUBMIT_DIRECTORY' : upload_inference_code(SCRIPT_FILENAME, DATA_PREFIX) } } create_model_response = sm_client.create_model( ModelName = multi_model_name, ExecutionRoleArn = role, Containers = [_container]) return _model_url multi_model_name = '{}-{}'.format(HOUSING_MODEL_NAME, strftime('%Y-%m-%d-%H-%M-%S', gmtime())) model_url = create_multi_model_entity(multi_model_name, role) print('Multi model name: {}'.format(multi_model_name)) print('Here are the models that the endpoint has at its disposal:') !aws s3 ls --human-readable --summarize $model_url ``` ### Create the multi-model endpoint 멀티 모델 엔드포인트에 대한 SageMaker 엔드포인트 설정(config)에는 특별한 것이 없습니다. 예상 예측 워크로드에 적합한 인스턴스 유형과 인스턴스 수를 고려해야 합니다. 개별 모델의 수와 크기에 따라 메모리 요구 사항이 변동합니다. 엔드포인트 설정이 완료되면 엔드포인트 생성(creation)은 간단합니다. ``` endpoint_config_name = multi_model_name print('Endpoint config name: ' + endpoint_config_name) create_endpoint_config_response = sm_client.create_endpoint_config( EndpointConfigName = endpoint_config_name, ProductionVariants=[{ 'InstanceType': ENDPOINT_INSTANCE_TYPE, 'InitialInstanceCount': 1, 'InitialVariantWeight': 1, 'ModelName' : multi_model_name, 'VariantName' : 'AllTraffic'}]) endpoint_name = multi_model_name print('Endpoint name: ' + endpoint_name) create_endpoint_response = sm_client.create_endpoint( EndpointName=endpoint_name, EndpointConfigName=endpoint_config_name) print('Endpoint Arn: ' + create_endpoint_response['EndpointArn']) print('Waiting for {} endpoint to be in service...'.format(endpoint_name)) waiter = sm_client.get_waiter('endpoint_in_service') waiter.wait(EndpointName=endpoint_name) ``` ## Exercise the multi-model endpoint ### Invoke multiple individual models hosted behind a single endpoint 여기서 여러분은 특정 위치 기반 주택 모델을 무작위로 선택하는 것을 반복합니다. 주어진 모델의 첫번째 호출에 대해 지불된 콜드 스타트(cold start) 비용이 과금된다는 점을 알아 두세요. 동일한 모델의 후속 호출은 이미 메모리에 로드된 모델을 활용합니다. ``` def predict_one_house_value(features, model_name): print('Using model {} to predict price of this house: {}'.format(full_model_name, features)) _float_features = [float(i) for i in features] _body = ','.join(map(str, _float_features)) + '\n' _start_time = time.time() _response = runtime_sm_client.invoke_endpoint( EndpointName=endpoint_name, ContentType='text/csv', TargetModel=full_model_name, Body=_body) _predicted_value = json.loads(_response['Body'].read())[0] _duration = time.time() - _start_time print('${:,.2f}, took {:,d} ms\n'.format(_predicted_value, int(_duration * 1000))) # iterate through invocations with random inputs against a random model showing results and latency for i in range(10): model_name = LOCATIONS[np.random.randint(1, len(LOCATIONS[:PARALLEL_TRAINING_JOBS]))] full_model_name = '{}.tar.gz'.format(model_name) predict_one_house_value(gen_random_house()[1:], full_model_name) ``` ### Dynamically deploy another model 여기서 신규 모델의 동적 로딩의 힘을 볼 수 있습니다. 이전에 모델을 배포할 때 의도적으로 첫 번째 모델을 복사하지 않았습니다. 이제 추가 모델을 배포하고 다중 모델 엔드 포인트를 통해 즉시 모델을 호출할 수 있습니다. 이전 모델과 마찬가지로 엔드포인트가 모델을 다운로드하고 메모리에 로드하는 데 시간이 걸리므로 새 모델을 처음 호출하는 데 시간이 약간 더 걸린다는 점을 명심해 주세요. ``` # add another model to the endpoint and exercise it deploy_artifacts_to_mme(training_jobs[0]) ``` ### Invoke the newly deployed model 엔드포인트 업데이트 또는 재시작 없이 새로 배포된 모델들로 호출을 수행해 보세요. ``` print('Here are the models that the endpoint has at its disposal:') !aws s3 ls $model_url model_name = LOCATIONS[0] full_model_name = '{}.tar.gz'.format(model_name) for i in range(5): features = gen_random_house() predict_one_house_value(gen_random_house()[1:], full_model_name) ``` ### Updating a model 모델을 업데이트하려면 위와 동일한 방법으로 새 모델로 추가하세요. 예를 들어,`NewYork_NY.tar.gz` 모델을 재학습하고 호출을 시작하려는 경우 업데이트된 모델 아티팩트를 S3 접두어(prefix) 뒤에 `NewYork_NY_v2.tar.gz`와 같은 새로운 이름으로 업로드한 다음 `NewYork_NY.tar.gz` 대신`NewYork_NY_v2.tar.gz`를 호출하도록 `TargetModel` 필드를 변경하세요. 모델의 이전 버전이 여전히 컨테이너 또는 엔드포인트 인스턴스의 스토리지 볼륨에 로드될 수 있으므로 Amazon S3에서 모델 아티팩트를 덮어 쓰지 않으려고 합니다. 그러면 새 모델 호출 시 이전 버전의 모델을 호출할 수 있습니다. 또는, 엔드포인트를 중지하고 새로운 모델 셋을 재배포할 수 있습니다. ## Clean up 더 이상 사용하지 않는 엔드포인트에 대한 요금이 청구되지 않도록 리소스를 정리합니다. ``` # shut down the endpoint sm_client.delete_endpoint(EndpointName=endpoint_name) # and the endpoint config sm_client.delete_endpoint_config(EndpointConfigName=endpoint_config_name) # delete model too sm_client.delete_model(ModelName=multi_model_name) ```
github_jupyter
# Análise Financeira com Python ### Objetivo: Vamos dividir essa mentoria em 2 etapas: Etapa 1 - Usar o Python para puxar dados da Web de cotação de qualquer ação e analisar o resultado de uma carteira Etapa 2 - Puxar indicadores de empresas diferentes e fazer a comparação entre elas para escolhar "qual a melhor empresa" ### Disclamer Importante Não sou analista, influencer, agente autônomo, sábio ou nada de finanças ou ações. Temos 1 objetivo aqui e apenas 1: aprender como usar o Python para fazer o que a gente quiser. Então não se preocupe com "erros teóricos" ou ainda com o resultado de qualquer tipo de análise. O nosso objetivo aqui é treinar Python ### Parte 1 - Carteira de Investimentos - Vamos pegar uma carteira teórica completa e calcular o rendimento dela ao longo de 2020 e comparar com os principais indicadores (CDI, IBOV, IPCA). No caso vamos usar o IBOV, mas o procedimento para os outros indicadores é semelhante - Carteira: R$100.000, divididos da seguinte maneira em 01/01/2020: - 30% Ações Brasileiras - Arquivo Carteira - 10% SMAL11 - Arquivo Carteira - 10% FII - Arquivo Carteira - 50% Tesouro Selic ``` #importando as bibliotecas import pandas as pd import matplotlib.pyplot as plt #importando a carteira carteira = pd.read_excel('CarteiraMentoria.xlsx') display(carteira) # código do matplotlib que faz os dois gráficos ficarem um do lado do outro fig, (ax1, ax2) = plt.subplots(1, 2) fig.set_size_inches(15, 5) #vamos ver a distribuição da carteira por ativos # .plot.pie -> mostra um gráfico de pizza # ax=ax1 -> mostra que esse gráfico é o ax1 para o matplotlib # ax=ax2 -> mostra que esse gráfico é o ax2 para o matplotlib # labels -> são os rótulos # y -> são os valores que vão ser mostrados # legend=False -> ele mostraria uma legenda para o gráfico, mas não precisa nesse caso # title -> título # figsize -> mexe no tamanho do gráfico com largura e altura # autopct -> adiciona o percentual grafico1 = carteira.plot.pie(ax=ax1, labels=carteira['Ativos'], y='Valor Investido', legend=False, title='Distribuição de Ativos da Carteira', figsize=(15, 5), autopct="%.1f%%") # .set_ylabel('') -> tira a legenda do eixo y grafico1.set_ylabel('') # vai pegar as linhas da coluna 'Tipo' e vai somar grafico2 = carteira.groupby('Tipo').sum().plot.pie(ax=ax2, y='Valor Investido', legend=False, title='Distribuição de Classe de Ativos da Carteira', figsize=(15, 5), autopct="%.1f%%") grafico2.set_ylabel('') ``` #### Pegando as Cotações ao Longo de 2020 - IBOV ``` # permite pegar cotações do yahoo finances import pandas_datareader.data as web # web.DataReader -> comando para pegar as cotações # ticker -> exemplo: ^BVSP # local de onde pega as informações -> exemplo: yahoo # início -> start # fim -> end ibov_df = web.DataReader('^BVSP', data_source='yahoo', start='2020-01-01', end='2020-12-10') display(ibov_df) #print(ibov_df.info()) # mostrando o gráfico ibov_df['Adj Close'].plot(figsize=(15, 5)) ``` - Da nossa carteira ``` carteira_df = pd.DataFrame() # para cada ativo da tabela 'Ativos' vai pegar os valores que não tem 'Tesouro' no ativo # criou um dataframe chamado carteira_df, vai adicionar cada ativo nessa carteira for ativo in carteira['Ativos']: if 'Tesouro' not in ativo: carteira_df[ativo] = web.DataReader('{}.SA'.format(ativo), data_source='yahoo', start='2020-01-01', end='2020-12-10')['Adj Close'] # para consertar o problema abaixo usou o método ffill que preenche o valor vazio com o valor anterior carteira_df = carteira_df.ffill() # analisou para ver se tem valores faltando no non-null (os valores não estão iguais) print(carteira_df.info()) # mostrando a tabela display(carteira_df) ``` - Do Tesouro Selic: ``` # podemos ler um arquivo csv da internet link = 'https://www.tesourotransparente.gov.br/ckan/dataset/df56aa42-484a-4a59-8184-7676580c81e3/resource/796d2059-14e9-44e3-80c9-2d9e30b405c1/download/PrecoTaxaTesouroDireto.csv' # passando o link e tratando a tabela # decimal -> troca as vírgulas pelo ponto nos números tesouro_df = pd.read_csv(link, sep=';', decimal=',') # transformando a coluna em datetime tesouro_df['Data Base'] = pd.to_datetime(tesouro_df['Data Base'], format='%d/%m/%Y') # pegando a linha a coluna 'Tipo Titulo' for igual a 'Tesouro Selic' e todas as colunas tesouro_df = tesouro_df.loc[tesouro_df['Tipo Titulo']=='Tesouro Selic', :] # mostrando a tabela display(tesouro_df) ``` - Juntar o tesouro selic na nossa carteira ``` # renomeando a coluna 'Data Base' para 'Date' porque essa coluna precisa ter o nome igual da coluna do ibov_df tesouro_df = tesouro_df.rename(columns={'Data Base': 'Date'}) # juntando as colunas 'Data' e 'Pu Base Manha' do tesouro_df com a carteira_df # on -> passa a coluna que é igual nas duas tabelas # how -> diz que a tabela da carteira vai ficar na esquerda carteira_df = carteira_df.merge(tesouro_df[['Date', 'PU Base Manha']], on='Date', how='left') # mostrando a tabela display(carteira_df) # renomeando o nome da coluna carteira_df = carteira_df.rename(columns={'PU Base Manha': 'Tesouro Selic'}) # ffill vai preencher o valor vazio com o valor anterior carteira_df = carteira_df.ffill() # mostrando a tabela display(carteira_df) ``` - Calcular o valor investido ``` valor_investido = carteira_df.copy() for ativo in carteira['Ativos']: # vai multiplicar o preço investido pela quantidade valor_investido[ativo] = valor_investido[ativo] * carteira.loc[carteira['Ativos']==ativo, 'Qtde'].values[0] # transformando a coluna 'Date' em índice porque não queremos somar ela valor_investido = valor_investido.set_index('Date') # criando uma coluna 'Total' e somando as linhas (axis=1) valor_investido['Total'] = valor_investido.sum(axis=1) # mostrando a tabela display(valor_investido) # é necessário normalizar os dados para começarem do mesmo ponto no gráfico # o valor investido normalizado é o valor investido / primeira linha valor investido valor_investido_norm = valor_investido / valor_investido.iloc[0] # o ibov normalizado é o ibov / primeira linha do ibov ibov_df_norm = ibov_df / ibov_df.iloc[0] # mostrando os gráficos valor_investido_norm['Total'].plot(figsize=(15, 5), label='Carteira') ibov_df_norm['Adj Close'].plot(label='IBOV') # mostrando a legenda plt.legend() # rentabilidade é o último valor investido normalizado divido pelo primeiro valor investido normalizado -1 rentabilidade_carteira = valor_investido_norm['Total'].iloc[-1] - 1 rentabilidade_ibov = ibov_df_norm['Adj Close'].iloc[-1] - 1 print('Rentabilidade da Carteira {:.1%}'.format(rentabilidade_carteira)) print('Rentabilidade do Ibovespa {:.1%}'.format(rentabilidade_ibov)) ``` ### Parte 2 - Comparativo entre Ativos Créditos: https://simply-python.com/2019/01/16/retrieving-stock-statistics-from-yahoo-finance-using-python/ ``` tgt_website = r'https://sg.finance.yahoo.com/quote/PETR4.SA/key-statistics?p=PETR4.SA' def get_key_stats(tgt_website, ticker): # lendo todas as tabelas desse página html df_list = pd.read_html(tgt_website) result_df = df_list[0] # para cada tabela adiciona uma tabela na outra for df in df_list[1:]: result_df = result_df.append(df) # o nome da coluna 1 é renomeado para 'ticker' result_df = result_df.rename(columns={1: ticker}) # vai transpor a tabela com o 'T' (vai deixar a tabela mais organizada) return result_df.set_index(0).T df_petr4 = get_key_stats(tgt_website, 'PETR4') display(df_petr4) ``` - Comparando as ações de: 1. Magazine Luiza (MGLU3) 2. Lojas Americanas (LAME4) 3. Via Varejo (VVAR3) ``` acoes = ['MGLU3', 'LAME4', 'VVAR3'] # criando um dataframe estatiscas_empresas = pd.DataFrame() # para cada ação na lista de ações vai passar um link onde vai ter o nome de cada ação no link for acao in acoes: link = f'https://sg.finance.yahoo.com/quote/{acao}.SA/key-statistics?p={acao}.SA' # vai pegar as key stats df = get_key_stats(link, acao) estatiscas_empresas = estatiscas_empresas.append(df) display(estatiscas_empresas) print(list(estatiscas_empresas.columns)) ``` #### Price/sales (ttm) ``` import seaborn as sns # mostra um gráfico de barras # é necessário passar o valor de 'x' e o valor de 'y' sns.barplot(x=estatiscas_empresas.index, y=estatiscas_empresas['Price/sales (ttm)']) ``` #### Enterprise value / EBITDA ``` sns.barplot(x=estatiscas_empresas.index, y=estatiscas_empresas['Enterprise value/EBITDA 6']) ```
github_jupyter
# Generate drift trajectories according to Intoy & Rucci, 2020. Brownian motion simulation code downloaded from: https://people.sc.fsu.edu/~jburkardt/py_src/brownian_motion_simulation/brownian_motion_simulation.html Assuming stationary brownian motion with different diffusion constatnt for the different conditions (Snellen vs. fixation or free veiwing) [1]. In order to translate the units used in Intoy & Rucci [1] to pixels assumes the dimension of a single pixel is equivalent to 1 arcmin on the retina ("...roughly matches the filtering of the optics and the spacing of receptors within the foveola.") [1] Intoy, J., & Rucci, M. (2020). Finely tuned eye movements enhance visual acuity. Nature Communications, 11(1), 795. https://doi.org/10.1038/s41467-020-14616-2 ``` import numpy as np import matplotlib.pyplot as plt def brownian_motion_simulation ( m = 2, n = 1001, d = 10.0, t = 1.0 ): #*****************************************************************************80 # ## BROWNIAN_MOTION_SIMULATION simulates Brownian motion. # # Discussion: # # Thanks to Feifei Xu for pointing out a missing factor of 2 in the # stepsize calculation, 08 March 2016. # # Thanks to Joerg Peter Pfannmoeller for pointing out a missing factor # of M in the stepsize calculation, 23 April 2018. # # Licensing: # # This code is distributed under the GNU LGPL license. # # Modified: # # 10 June 2018 # # Author: # # John Burkardt # # Parameters: # # Input, integer M, the spatial dimension. # This defaults to 2. # # Input, integer N, the number of time steps to take, plus 1. # This defaults to 1001. # # Input, real D, the diffusion coefficient. # This defaults to 10.0. # # Input, real T, the total time. # This defaults to 1.0 # # Output, real X(M,N), the initial position at time 0.0, and # the N-1 successive locations of the particle. # # # Set the time step. # dt = t / float ( n - 1 ) x = np.zeros ( [ m, n ] ) dx = np.zeros ( [ m, n ] ) step_size = np.sqrt ( 2.0 * m * d * dt ) * np.random.randn ( n ) dx = np.random.randn ( m, n ) # # Compute the individual steps. # for j in range ( 1, n ): # # S is the stepsize # # s = np.sqrt ( 2.0 * m * d * dt ) * np.random.randn ( n ) # # Direction is random. # if ( m == 1 ): dx[j] = step_size[j] * np.ones ( 1 ); else: # dx[0:m,j] = np.random.randn ( m ) norm_dx = np.sqrt ( np.sum ( dx[:,j] ** 2 ) ) for i in range ( 0, m ): dx[i,j] = step_size[j] * dx[i,j] / norm_dx # # Each position is the sum of the previous steps. # x[0:m,j] = x[0:m,j-1] + dx[0:m,j] return x def diffusion_const_conversion(D_angle): # input - D_angle: diffusion factor in units of arcmin^2*sec^(-1) # output- D_receptors: diffusion factor in units of pix^2*sec^(-1) k = 1 # conversion factor, photoreceptor/pixel diameter in arcmin [pix*arcmin^(-1)] # k=1 means that one photoreceptor in the fovea spans 1 arcmin [1]. D_receptors = k**2*D_angle return D_receptors def gen_drift_traj( D_arcmin = 10.998, duration = 0.3, N = 5 ): # D_arcmin - Diffusion constant with units of arcmin^2*sec^(-1). D = diffusion_const_conversion(D_arcmin) #D- Diffusion constant with units of receptor_spacing^2*sec^(-1) m = 2 n = int(duration*1000+1) #assuming original traj is sampled at 1KHz traj = brownian_motion_simulation(m=m, n=n, d=D, t=duration) #subsampling traj to produce trajectory with N points evenly distributed in time subsamp_traj = traj[:,range(0,n,n//(N-1))] return traj, subsamp_traj def gen_drift_traj_condition(duration = 0.3, N = 5, snellen = True): # duration- duration of the drift in seconds # N- the number of points the subsmapled trajectory should include # snellen- True: generate Snellen-condition drift, False: generate Fixation condition drift D_IR = 0.0 if snellen: D_IR = 10.998 # see [1] source data file else: D_IR = 30.155 # see [1] source data file traj, traj_sb = gen_drift_traj(D_arcmin = D_IR, duration=duration, N=N) return traj, traj_sb traj_sn, traj_sb_sn = gen_drift_traj_condition(duration=0.3, N=10, snellen = True) traj_fix, traj_sb_fix = gen_drift_traj_condition(duration=0.3, N=10, snellen = False) fig,[ax1, ax2] = plt.subplots(1,2, figsize=(10,10)) ax1.plot( traj_sn[0,:], traj_sn[1,:], 'c', LineWidth = 2 ) ax1.plot( traj_sb_sn[0,:], traj_sb_sn[1,:], 'k-o', LineWidth = 2, markersize=10 ) ax1.plot( traj_sn[0,0], traj_sn[1,0], 'g.', markersize = 35 ) ax1.plot( traj_sn[0,-1], traj_sn[1,-1], 'r.', markersize = 35 ) ax1.set_xlabel('x (pix)') ax1.set_ylabel('y (pix)') ax1.set_title('Snellen condition generated drift') ax1.legend(['generated drift at 1 KHz', 'subsampled drift']) ax1.set_aspect('equal', 'box') ax2.plot( traj_fix[0,:], traj_fix[1,:], 'c', LineWidth = 2 ) ax2.plot( traj_fix[0,0], traj_fix[1,0], 'g.', markersize = 35 ) ax2.plot( traj_fix[0,-1], traj_fix[1,-1], 'r.', markersize = 35 ) ax2.plot( traj_sb_fix[0,:], traj_sb_fix[1,:], 'k-o', LineWidth = 2, markersize=10 ) ax2.set_xlabel('x (pix)') ax2.set_ylabel('y (pix)') ax2.set_title('Fixation condition generated drift') ax2.set_aspect('equal', 'box') ```
github_jupyter
# Training HOG-based AU detectors *written by Tiankang Xie* In the tutorial we will demonstrate how to train the HOG-based AU models as described in our paper. The tutorial is split into 3 parts, where the first part demonstrates how to extract hog features from the dataset, and the second part demonstrates how to use the extracted hogs to perform statistical learning, the third part will be to demonstrate how to test the trained models with additional test data ## Part 1: Extracting HOGs and Landmarks ``` from PIL import Image, ImageOps import math from scipy.spatial import ConvexHull from skimage.morphology.convex_hull import grid_points_in_poly from feat import Detector from feat.tests.utils import get_test_data_path import os import matplotlib.pyplot as plt ## EXTRACT HOGS from skimage import data, exposure from skimage.feature import hog from tqdm import tqdm import cv2 import pandas as pd import csv import numpy as np def align_face_68pts(img, img_land, box_enlarge, img_size=112): """ Adapted from https://github.com/ZhiwenShao/PyTorch-JAANet by Zhiwen Shao, modified by Tiankang Xie img: image img_land: landmarks 68 box_enlarge: relative size of face img_size = 112 """ leftEye0 = (img_land[2 * 36] + img_land[2 * 37] + img_land[2 * 38] + img_land[2 * 39] + img_land[2 * 40] + img_land[2 * 41]) / 6.0 leftEye1 = (img_land[2 * 36 + 1] + img_land[2 * 37 + 1] + img_land[2 * 38 + 1] + img_land[2 * 39 + 1] + img_land[2 * 40 + 1] + img_land[2 * 41 + 1]) / 6.0 rightEye0 = (img_land[2 * 42] + img_land[2 * 43] + img_land[2 * 44] + img_land[2 * 45] + img_land[2 * 46] + img_land[2 * 47]) / 6.0 rightEye1 = (img_land[2 * 42 + 1] + img_land[2 * 43 + 1] + img_land[2 * 44 + 1] + img_land[2 * 45 + 1] + img_land[2 * 46 + 1] + img_land[2 * 47 + 1]) / 6.0 deltaX = (rightEye0 - leftEye0) deltaY = (rightEye1 - leftEye1) l = math.sqrt(deltaX * deltaX + deltaY * deltaY) sinVal = deltaY / l cosVal = deltaX / l mat1 = np.mat([[cosVal, sinVal, 0], [-sinVal, cosVal, 0], [0, 0, 1]]) mat2 = np.mat([[leftEye0, leftEye1, 1], [rightEye0, rightEye1, 1], [img_land[2 * 30], img_land[2 * 30 + 1], 1], [img_land[2 * 48], img_land[2 * 48 + 1], 1], [img_land[2 * 54], img_land[2 * 54 + 1], 1]]) mat2 = (mat1 * mat2.T).T cx = float((max(mat2[:, 0]) + min(mat2[:, 0]))) * 0.5 cy = float((max(mat2[:, 1]) + min(mat2[:, 1]))) * 0.5 if (float(max(mat2[:, 0]) - min(mat2[:, 0])) > float(max(mat2[:, 1]) - min(mat2[:, 1]))): halfSize = 0.5 * box_enlarge * float((max(mat2[:, 0]) - min(mat2[:, 0]))) else: halfSize = 0.5 * box_enlarge * float((max(mat2[:, 1]) - min(mat2[:, 1]))) scale = (img_size - 1) / 2.0 / halfSize mat3 = np.mat([[scale, 0, scale * (halfSize - cx)], [0, scale, scale * (halfSize - cy)], [0, 0, 1]]) mat = mat3 * mat1 aligned_img = cv2.warpAffine(img, mat[0:2, :], (img_size, img_size), cv2.INTER_LINEAR, borderValue=(128, 128, 128)) land_3d = np.ones((int(len(img_land)/2), 3)) land_3d[:, 0:2] = np.reshape(np.array(img_land), (int(len(img_land)/2), 2)) mat_land_3d = np.mat(land_3d) new_land = np.array((mat * mat_land_3d.T).T) new_land = np.array(list(zip(new_land[:,0], new_land[:,1]))).astype(int) return aligned_img, new_land def extract_hog(image, detector): im = cv2.imread(image) detected_faces = np.array(detector.detect_faces(im)[0]) im = np.asarray(im) detected_faces = np.array(detector.detect_faces(np.array(im))[0]) detected_faces = detected_faces.astype(int) points = detector.detect_landmarks(np.array(im), [detected_faces])[0].astype(int) aligned_img, points = align_face_68pts(im, points.flatten(), 2.5) hull = ConvexHull(points) mask = grid_points_in_poly(shape=np.array(aligned_img).shape, verts= list(zip(points[hull.vertices][:,1], points[hull.vertices][:,0])) # for some reason verts need to be flipped ) mask[0:np.min([points[0][1], points[16][1]]), points[0][0]:points[16][0]] = True aligned_img[~mask] = 0 resized_face_np = aligned_img fd, hog_image = hog(resized_face_np, orientations=8, pixels_per_cell=(8, 8), cells_per_block=(2, 2), visualize=True, multichannel=True) return fd, points.flatten(), resized_face_np, hog_image # Initialize a detector class to detect landmarks and bounding box A01 = Detector(face_model='RetinaFace',emotion_model=None, landmark_model="mobilenet", au_model=None) #initialize model master_file = pd.read_csv("/home/tiankang/AU_Dataset/DISFA/toy_disfa.csv",index_col=0) print(master_file) # Make sure that in the master file you have a column (called original_path), which points to the path of the images write_path = "/home/tiankang/AU_Dataset/OpenFace/tutorial" # This is the path where you store all hog and landmark files master_file["Marked"] = True # THis mark column serves to check whether each image can be correctly processed by the algorithm. with open(write_path+'hogs_file.csv', "w", newline='') as csvfile: writer = csv.writer(csvfile, delimiter=',') writer.writerow(list(range(5408))) with open(write_path+'new_landmarks_files.csv', "w", newline='') as csvfile: writer = csv.writer(csvfile, delimiter=',') writer.writerow(list(range(136))) for ix in tqdm(range(master_file.shape[0])): try: imageURL = master_file['original_path'][ix] fd, landpts, cropped_img, hogs = extract_hog(imageURL, detector=A01) with open(write_path+'hogs_file.csv', "a+", newline='') as csvfile: writer = csv.writer(csvfile, delimiter=',') writer.writerow(fd) with open(write_path+'new_landmarks_files.csv', "a+", newline='') as csvfile: writer = csv.writer(csvfile, delimiter=',') writer.writerow(landpts.flatten()) except: master_file['Marked'][ix] = False print("failed to load",imageURL) continue; # Try to visualize one subject plt.imshow(hogs) plt.show() # Save the master file master_file.to_csv(write_path+"HOG_master_file.csv") ``` ## Part 2: Train Models with Hog ``` import pandas as pd import numpy as np from sklearn.metrics import classification_report from sklearn.svm import LinearSVC, SVC from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from tqdm import tqdm from sklearn.metrics import f1_score from sklearn.ensemble import RandomForestClassifier from sklearn.datasets import make_classification from sklearn.linear_model import LogisticRegression # Load in y labels from the stored master file master_file_hog = pd.read_csv(write_path+"HOG_master_file.csv", index_col=0) master_file_hog = master_file_hog[master_file_hog['Marked']==True] master_file_hog.reset_index(drop=True,inplace=True) print(master_file_hog.head()) # We see that there are 9 possible AUs AU_nums = [1,2,4,6,9,12,17,25,26] AU_LABELS = ['AU'+str(i) for i in AU_nums] master_ylabels = master_file_hog[AU_LABELS] print(master_ylabels) # Load in hogs and landmarks hogs_vals = pd.read_csv(write_path+"hogs_file.csv") landmarks_ts = pd.read_csv(write_path+"new_landmarks_files.csv") print(hogs_vals.shape) print(landmarks_ts.shape) # Perform PCA on the hogs and concatenate it with landmarks scaler = StandardScaler() hogs_cbd_std = scaler.fit_transform(hogs_vals) pca = PCA(n_components = 0.95, svd_solver = 'full') hogs_transformed = pca.fit_transform(hogs_cbd_std) x_features = np.concatenate((hogs_transformed,landmarks_ts),1) print(x_features.shape) # Train a classifier def balanced_sample_maker(X, y, sample_size, random_seed=None): """ return a balanced data set by sampling all classes with sample_size current version is developed on assumption that the positive class is the minority. Parameters: =========== X: {numpy.ndarrray} y: {numpy.ndarray} """ uniq_levels = np.unique(y) uniq_counts = {level: sum(y == level) for level in uniq_levels} if not random_seed is None: np.random.seed(random_seed) # find observation index of each class levels groupby_levels = {} for ii, level in enumerate(uniq_levels): obs_idx = [idx for idx, val in enumerate(y) if val == level] groupby_levels[level] = obs_idx # oversampling on observations of each label balanced_copy_idx = [] for gb_level, gb_idx in groupby_levels.items(): over_sample_idx = np.random.choice(gb_idx, size=sample_size, replace=True).tolist() balanced_copy_idx+=over_sample_idx np.random.shuffle(balanced_copy_idx) return (X[balanced_copy_idx, :], y[balanced_copy_idx], balanced_copy_idx) def _return_SVM(hog_feature_cbd, y): (balX, baly, idx) = balanced_sample_maker(hog_feature_cbd, y, sample_size = np.sum(y==1), random_seed=1) # Scale HOG features #balXstd = scaler.fit_transform(balX) balXstd = balX # Fit model clf = LinearSVC(C = 1e-6, max_iter=1200) # Change parameter here clf.fit(X=balXstd, y=baly) print("Training report:", classification_report(baly, clf.predict(balXstd))) return(clf) def _return_RF(hog_feature_cbd,y): #print(np.isnan(hog_feature_cbd).sum()) (balX, baly, idx) = balanced_sample_maker(hog_feature_cbd, y, sample_size = np.sum(y==1), random_seed=1) balXstd = balX clf = RandomForestClassifier(random_state=0, n_estimators=140,max_depth=15,min_samples_split=20,min_samples_leaf=20) #Change parameter here clf.fit(X=balXstd, y=baly) print("Training report:", classification_report(baly, clf.predict(balXstd))) return(clf) def _return_Logistic(hog_feature_cbd, y): (balX, baly, idx) = balanced_sample_maker(hog_feature_cbd, y, sample_size = np.sum(y==1), random_seed=1) balXstd = balX # Fit model clf = LogisticRegression(random_state=0) # Change parameter here clf.fit(X=balXstd, y=baly) print("Training report:", classification_report(baly, clf.predict(balXstd))) return(clf) def fit_transform_test(hog_features, au_matrix, AuNum_list, method='SVM'): all_svm_params = {} scaler = StandardScaler() for AuNum in AuNum_list: print("==========", "processing AU:",AuNum,'====================') y = au_matrix[AuNum] y = y.values.ravel() if method == 'SVM': new_clf = _return_SVM(hog_feature_cbd=hog_features, y = y) elif method == 'RF': new_clf = _return_RF(hog_feature_cbd=hog_features, y = y) elif method == 'Logistic': new_clf = _return_Logistic(hog_feature_cbd=hog_features, y = y) all_svm_params[AuNum] = new_clf return(all_svm_params) trained_svm = fit_transform_test(hog_features=x_features,au_matrix=master_ylabels,AuNum_list=AU_LABELS, method='SVM') ``` ## 3. Validate the results on benchmark data ``` # If we have a test data: test_master_file = pd.read_csv("/home/tiankang/AU_Dataset/DISFA/toy_test.csv",index_col=0) print(test_master_file.head()) # We will again process the HOGs ad landmarks for test data write_path = "/home/tiankang/AU_Dataset/OpenFace/tutorial" # This is the path where you store all hog and landmark files master_file["Marked"] = True # THis mark column serves to check whether each image can be correctly processed by the algorithm. with open(write_path+'hogs_file_test.csv', "w", newline='') as csvfile: writer = csv.writer(csvfile, delimiter=',') writer.writerow(list(range(5408))) with open(write_path+'new_landmarks_files_test.csv', "w", newline='') as csvfile: writer = csv.writer(csvfile, delimiter=',') writer.writerow(list(range(136))) for ix in tqdm(range(master_file.shape[0])): try: imageURL = master_file['original_path'][ix] fd, landpts, cropped_img, hogs = extract_hog(imageURL, detector=A01) with open(write_path+'hogs_file_test.csv', "a+", newline='') as csvfile: writer = csv.writer(csvfile, delimiter=',') writer.writerow(fd) with open(write_path+'new_landmarks_files_test.csv', "a+", newline='') as csvfile: writer = csv.writer(csvfile, delimiter=',') writer.writerow(landpts.flatten()) except: master_file['Marked'][ix] = False print("failed to load",imageURL) continue; # Save the master file master_file.to_csv(write_path+"HOG_master_file_test.csv") # Load in y labels from the stored master file master_file_hog_test = pd.read_csv(write_path+"HOG_master_file_test.csv", index_col=0) master_file_hog_test = master_file_hog_test[master_file_hog_test['Marked']==True] master_file_hog_test.reset_index(drop=True,inplace=True) print(master_file_hog_test.head()) # We see that there are 9 possible AUs AU_nums = [1,2,4,6,9,12,17,25,26] AU_LABELS = ['AU'+str(i) for i in AU_nums] master_ylabels_test = master_file_hog_test[AU_LABELS] print(master_ylabels_test) # Load in hogs and landmarks hogs_test = pd.read_csv(write_path+"hogs_file_test.csv") landmarks_test = pd.read_csv(write_path+"new_landmarks_files_test.csv") print(hogs_test.shape) print(landmarks_test.shape) # Perform PCA on the hogs and concatenate it with landmarks # Use the previous PCA and scalar hogs_test_std = scaler.fit_transform(hogs_test) hogs_test_transformed = pca.fit_transform(hogs_test_std) x_features_test = np.concatenate((hogs_test_transformed,landmarks_test),1) print(x_features_test.shape) for AU_m in trained_svm: clf = trained_svm[AU_m] pred_y = clf.predict(X=x_features_test) print("prediction result for AU:", AU_m) print("Training report:", classification_report(master_ylabels_test[AU_m], pred_y)) print("f1_score:",f1_score(master_ylabels_test[AU_m], pred_y)) ```
github_jupyter
# Import libraries We will make extensive use of `pandas` and `LightGBM` throughout this demo. `pickle` will be used to save and load model files ``` import lightgbm as lgb import pandas as pd import numpy as np import csv import pickle from sklearn.metrics import mean_squared_error import matplotlib %matplotlib inline from matplotlib.pylab import rcParams rcParams['figure.figsize'] = 12, 4 ``` # Slack channel notifications Import `SlackClient` and create basic function that will post a Slack notification in `channel` when code is finished running ``` from slackclient import SlackClient def slack_message(message, channel): token = 'your_token' sc = SlackClient(token) sc.api_call('chat.postMessage', channel=channel, text=message, username='My Sweet Bot', icon_emoji=':upside_down_face:') ``` # Import data and set data types Set working directory and ensure schema is correct before importing train and test sets. `infer_datetime_format` automatically reads the date column `dates` - check this is correct afterwards, but it is usually pretty smart ``` data_dir = '/your/directory/' data_file = data_dir + 'data_file' data = pd.read_csv(data_file, sep = "\t", parse_dates = ['dates'], infer_datetime_format = True) ``` # Combine train and test set Combine `train` and `test` data sets before parsing through dense vector encoding. This is especially important because we want to maintain the same set of columns across both train and test sets. These can be inconsistent if a particular level of a categorical variable is present in one data set but not the other * `cat_cols` are categorical columns that will be used in model training * `index_cols` are columns that are used for indexing purposes and will not be fit in the model * `pred_cols` are the response variable columns * `num_cols` are the numeric columns that will be used in model training ``` cat_cols = ['ATTRIBUTE_1','ATTRIBUTE_2','ATTRIBUTE_3'] index_cols = ['FACTOR_1','FACTOR_2','FACTOR_3'] pred_cols = ['RESPONSE'] num_cols = [x for x in list(data.columns.values) if x not in cat_cols if x not in fac_cols if x not in pred_cols] ``` # Convert categorial variables to dense vectors ``` data_cat = pd.DataFrame(data[cat_cols]) for feature in cat_cols: # Loop through all columns in the dataframe if data_cat[feature].dtype == 'object': # Only apply for columns with categorical strings data_cat[feature] = pd.Categorical(data[feature]).codes # Replace strings with an integer ``` # Prepare final dataframe before resplitting into train and test sets Importantly, we want to ensure that `train_final` and `test_final` are the same rows of data as `train` and `test`. `DATE_SPLIT` is the date we want to use to split our train and test sets ``` data_num = data[num_cols] data_final = pd.concat([data_cat, data_num], axis=1) data_final['DATE'] = data['DATE'] data_final['RESPONSE'] = data['RESPONSE'] print data_final.shape train_final = data_final[data_final['DATE'] <= 'DATE_SPLIT'] test_final = data_final[data_final['DATE'] >= 'DATE_SPLIT' ] print(train_final.shape) print(test_final.shape) train = data[data['DATE'] <= 'DATE_SPLIT'] test = data[data['DATE'] >= 'DATE_SPLIT' ] print(train.shape) print(test.shape) ``` # Create design matrix and response vector ``` y_train = train_final['RESPONSE'] y_test = test_final['RESPONSE'] x_train = train_final.drop(['RESPONSE','DATE'], axis=1) x_test = test_final.drop(['RESPONSE','DATE'], axis=1) print x_train.columns.values ``` # Create Dataset objects for LightGBM ``` lgb_train = lgb.Dataset(data = x_train, label = y_train, free_raw_data = False) lgb_test = lgb.Dataset(data = x_test, label = y_test, reference = lgb_train, free_raw_data = False) ``` # Set hyperparameters for LightGBM Set hyperparameters for training GBM. LightGBM grows each tree in a leaf-wise fashion, compared to other algorithms like XGBoost which grows each tree level-wise. LightGBM will choose the leaf with the highest delta loss to grow, leading to greater loss reductions compared to level-wise algorithms. We need to specify the `num_leaves` parameter, which controls the maximum number of leaves a base learner can grow. `max_depth` can also be used to control for maximum tree depth, since leaf-wise growth may cause over-fitting when the dataset is small. A separate `max_depth` parameter has not been set here, but can be implemented by simply changing the `max_depth` value in `params`. We implement the DART algorithm here, instead of a traditional Gradient Boosting Decision Tree. More information about DART can be found here: https://arxiv.org/pdf/1505.01866.pdf ``` depth = 8 num_leaves = 2**depth - 1 params = {'boosting_type': 'dart', 'objective': 'regression', 'metric': 'l2', 'num_leaves': num_leaves, 'max_depth': -1, 'learning_rate': 0.02, 'n_estimators': 1000, 'min_split_gain': 0.05, 'min_child_weight': 0.5, 'subsample': 0.8, 'colsample_bytree': 0.8, 'reg_alpha': 0.2, 'reg_lambda': 0.2, 'drop_rate': 0.2, 'skip_drop': 0.8, 'max_drop': 200, 'seed': 100, 'silent': False } ``` # Run cross-validation with set hyperparameters Early stopping rounds have also been implemented, so we can be ambitious and increase `n_estimators` to `1000`. We will use the best tree to fit the final GBM ``` num_boost_round = 1000 early_stopping_rounds = 10 nfold = 5 evals_result = {} gbmCV = lgb.cv(params, train_set = lgb_train, num_boost_round = num_boost_round, nfold = nfold, early_stopping_rounds = early_stopping_rounds, verbose_eval = True ) slack_message("Cross validation completed!", 'channel') ``` # Train GBM Train model using the best tree found from cross-validation. Here, we record the test results at each boosting iteration ``` num_boost_round = len(gbmCV['l2-mean']) gbm = lgb.train(params, train_set = lgb_train, num_boost_round = num_boost_round, valid_sets = [lgb_test], valid_names = ['eval'], evals_result = evals_result, verbose_eval = True ) slack_message("Booster object completed!", 'channel') ``` # Plot feature importance and print values Plot the top 30 features by `split` importance. Create dataframe that records the `split` and `gain` of each feature ``` lgb.plot_importance(gbm, max_num_features = 30, importance_type='split') importance = pd.DataFrame() importance['Feature'] = x_train.columns.values importance['ImportanceWeight'] = gbm.feature_importance(importance_type = 'split') importance['ImportanceGain'] = gbm.feature_importance(importance_type = 'gain') importance.sort_values(by = 'ImportanceWeight', ascending = False, inplace = True) importance.head() ``` # Plot L2 during training Plot the test results at each boosting iteration ``` lgb.plot_metric(evals_result, metric='l2') ``` # Produce predictions for train and test sets before measuring accuracy Calculate predictions for both train and test sets, and then calculate MSE and RMSE for both datasets ``` gbm_train_preds = gbm.predict(x_train, num_iteration = gbm.best_iteration) gbm_test_preds = gbm.predict(x_test, num_iteration = gbm.best_iteration) print gbm_train_preds.shape print gbm_test_preds.shape print "\nModel Report" print "MSE Train : %f" % mean_squared_error(y_train, gbm_train_preds) print "MSE Test: %f" % mean_squared_error(y_test, gbm_test_preds) print "RMSE Train: %f" % mean_squared_error(y_train, gbm_train_preds)**0.5 print "RMSE Test: %f" % mean_squared_error(y_test, gbm_test_preds)**0.5 ``` # Save model file and write .csv files to working directory Save LightGBM model file for future reference. Similar function to load previously saved files is commented out below. Then, write all files to the working directory ``` pickle.dump(gbm, open("gbm.pickle.dat", "wb")) # gbm = pickle.load(open("gbm.pickle.dat", "rb")) # gbm_train_preds = gbm.predict(x_train) # gbm_test_preds = gbm.predict(x_test) # print "\nModel Report" # print "MSE Train : %f" % mean_squared_error(y_train, gbm_train_preds) # print "MSE Test: %f" % mean_squared_error(y_test, gbm_test_preds) # print "RMSE Train: %f" % mean_squared_error(y_train, gbm_train_preds)**0.5 # print "RMSE Test: %f" % mean_squared_error(y_test, gbm_test_preds)**0.5 train_preds = pd.DataFrame(gbm_train_preds) test_preds = pd.DataFrame(gbm_test_preds) train_preds.columns = ['RESPONSE'] test_preds.column = ['RESPONSE'] train.to_csv('LGBM Train.csv', sep=',') train_preds.to_csv('LGBM Train Preds.csv', sep=',') test.to_csv('LGBM Test.csv', sep=',') test_preds.to_csv('LGBM Test Preds.csv', sep=',') importance.to_csv('LGBM Feature Importance.csv', index = False) slack_message("Files saved!", 'channel') ```
github_jupyter
``` import numpy as np import scipy as sp import pandas as pd from scipy.stats import norm from scipy.stats import bernoulli from scipy.stats import dirichlet import seaborn as sns import matplotlib.pyplot as plt from statsmodels.nonparametric.kernel_density import KDEMultivariate from tqdm.notebook import tqdm %reload_ext autoreload %autoreload 2 ``` # Density Estimation ## 1: GMM ``` #GMM datasets def gen_data(seed,n): np.random.seed(seed) p = bernoulli.rvs(0.2,size = n) y = np.random.randn(n) y[p==0] +=-2 y[p==1]+=2 return y.reshape(-1,1) #Generate data (change n for both plots) #n = 50 n = 200 seed = 102 n_test = 5000 d = 1 y = gen_data(seed,n) mean_y = np.mean(y) std_y = np.std(y) y = (y-mean_y)/std_y y_test = gen_data(seed+1,n_test).reshape(-1,1) y_test = (y-mean_y)/std_y y_plot = np.arange(-4,4,0.05).reshape(-1,1)*std_y + mean_y dy = y_plot[1]-y_plot[0] true_pdf = (0.8*norm.pdf(y_plot,loc = -2) + 0.2*norm.pdf(y_plot,loc = 2)) #load random densities logpdf_pr = np.load('plot_files/copula_gmm_logpdf_pr_n{}.npy'.format(n)) logcdf_pr = np.load('plot_files/copula_gmm_logcdf_pr_n{}.npy'.format(n)) #Extract joint pdf_pr = np.exp(logpdf_pr)[:,:,-1] cdf_pr = np.exp(logcdf_pr)[:,:,-1] #Compute mean and quantiles mean_pdf_pr = np.mean(pdf_pr,axis = 0) bot25_pdf_pr = np.percentile(pdf_pr,2.5,axis = 0) top25_pdf_pr = np.percentile(pdf_pr,97.5,axis = 0) #GMM ground truth comparison f =plt.figure(figsize=(14,4)) ylim = (-0.02, 0.5) #ylim = (-0.03988344073295537, 1.1) plt.subplot(1,2,1) plt.title('Copula',fontsize = 13) sns.lineplot(y_plot[:,0],mean_pdf_pr/std_y,color = 'k') plt.fill_between(y_plot[:,0], top25_pdf_pr/std_y,bot25_pdf_pr/std_y,alpha = 0.2, label = '95% CI',color ='k') plt.plot(y_plot,true_pdf,color = 'k',linestyle = '--',alpha = 0.7, label = 'Truth') plt.scatter(y*std_y + mean_y,np.zeros(n),color = "k",s =4,label = 'Data') #plt.legend(loc = 1) ylim = plt.ylim(ylim) xlim = plt.xlim() plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.xlabel('$y$ \n \n(a)',fontsize = 12) plt.ylabel('Density',fontsize = 12) plt.subplot(1,2,2) gmm_plot = pd.read_csv("plot_files/dpmm_gmm_plot_n{}.csv".format(n)) mean_pdf_MCMC = gmm_plot['mean_pdf'] post_pdf_low_MCMC = gmm_plot['top25_pdf'] post_pdf_high_MCMC = gmm_plot['bot25_pdf'] plt.plot(y_plot,true_pdf,color = 'k',linestyle = '--',alpha = 0.7, label = 'Truth') sns.lineplot(y_plot[:,0],mean_pdf_MCMC/std_y,label = 'Posterior mean',color = 'k') plt.fill_between(y_plot[:,0], post_pdf_high_MCMC/std_y,post_pdf_low_MCMC/std_y,alpha = 0.2, label = '95% credible interval',color = 'k') plt.scatter(y*std_y + mean_y,np.zeros(n),color = "k",s =4,label = 'Data') #plt.legend(loc = 1) plt.legend("",frameon=False) plt.title('DPMM',fontsize = 13) plt.ylim(ylim) plt.xlim(xlim) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.xlabel('$y$ \n \n(b)',fontsize = 12) plt.ylabel('Density',fontsize = 12) f.savefig("plots/gmm_example_{}.pdf".format(n),bbox_inches='tight') #Convergence plot pdiff = np.load('plot_files/copula_gmm_pr_pdiff_n{}.npy'.format(n)) cdiff = np.load('plot_files/copula_gmm_pr_cdiff_n{}.npy'.format(n)) plt.figure() f = plt.figure(figsize = (14,3)) plt.subplot(1,2,1) dy = y_plot[1]- y_plot[0] plt.plot(np.arange(10000)+n,pdiff[0,0:10000]*np.size(y_plot)*dy,color = 'black',alpha = 0.8) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.xlabel('Forward samples $N$ \n \n(a)',fontsize = 12) plt.ylabel('Distance',fontsize = 12) plt.subplot(1,2,2) plt.plot(np.arange(10000)+n,cdiff[0,0:10000]*np.size(y_plot)*dy,color = 'black',alpha = 0.8) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.xlabel('Forward samples $N$ \n \n(b)',fontsize = 12) plt.ylabel('Distance',fontsize = 12) f.savefig('plots/convergence_gmm_{}.pdf'.format(n), bbox_inches='tight') ``` ## 1: Galaxy ``` #Load data and normalize from pydataset import data y_gal = data('galaxies').values y_gal = y_gal /1000 n_gal = np.shape(y_gal)[0] mean_y = np.mean(y_gal) std_y = np.std(y_gal) n_plot = 200 y_plot_gal = np.linspace(5,40,n_plot) dy = y_plot_gal[1]-y_plot_gal[0] #Find bandwidth and fit KDE kde_sm = KDEMultivariate(y_gal,var_type = 'c',bw = 'cv_ml') ``` ### Plots ``` ##Load posterior samples pdf_joints_pr = np.exp(np.load('plot_files/copula_galaxy_logpdf_pr.npy')[:,:,-1]) cdf_conditionals_pr = np.exp(np.load('plot_files/copula_galaxy_logcdf_pr.npy')) ylim = (-0.017714758231901252, 0.37) f = plt.figure(figsize = (14,4 )) plt.subplot(1,2,1) #rescaled mean_pdf = np.mean(pdf_joints_pr,axis = 0) bot10_pdf = np.percentile(pdf_joints_pr,2.5,axis = 0) top10_pdf = np.percentile(pdf_joints_pr,97.5,axis = 0) sns.lineplot(y_plot_gal,mean_pdf/std_y,color = 'k') plt.fill_between(y_plot_gal, top10_pdf/std_y,bot10_pdf/std_y,alpha = 0.2,color = 'k') plt.plot(y_plot_gal,kde_sm.pdf(y_plot_gal),color = 'k',linestyle = ':',alpha = 0.8) plt.scatter(y_gal,np.zeros(n_gal),color = "k",s =4,label = 'Data') #plt.legend() plt.title("Copula",fontsize = 13) plt.ylim(ylim) xlim = plt.xlim() plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.xlabel('$y$\n \n(a)',fontsize = 12) plt.ylabel('Density',fontsize=12) #DPMM plt.subplot(1,2,2) galaxy_plot = pd.read_csv("plot_files/dpmm_galaxy_plot.csv") mean_pdf = galaxy_plot['mean_pdf'] post_pdf_low = galaxy_plot['top25_pdf'] post_pdf_high = galaxy_plot['bot25_pdf'] plt.plot(y_plot_gal,kde_sm.pdf(y_plot_gal),color = 'k',linestyle = ':',alpha = 0.8,label = 'KDE') sns.lineplot(y_plot_gal,mean_pdf,label = 'Posterior mean',color = 'k') plt.fill_between(y_plot_gal, post_pdf_high,post_pdf_low,alpha = 0.2, label = '95% credible interval',color = 'k') plt.scatter(y_gal,np.zeros(n_gal),color = "k",s =4,label = 'Data') #plt.legend() plt.ylim(ylim) plt.xlim(xlim) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.xlabel('$y$\n \n(b)',fontsize = 12) plt.ylabel('Density',fontsize=12) plt.legend("",frameon=False) plt.title("DPMM",fontsize = 13) f.savefig('plots/copula_DPMM_galaxy.pdf', bbox_inches='tight') f = plt.figure(figsize = (14,4)) plt.subplot(1,2,1) #Compute mode count for copula B_postsamples = np.shape(pdf_joints_pr)[0] modes_cop = np.zeros(B_postsamples,dtype = 'int') for i in tqdm(range(B_postsamples)): modes_cop[i] = (np.diff(np.sign(np.diff(pdf_joints_pr[i])*dy)) < 0).sum() #look for when second derivative is negative #compute mode count for dpmm pdf_dpmm = pd.read_csv("plot_files/dpmm_galaxy_pdf_samples.csv").values[:,1:] B_dpmm = np.shape(pdf_dpmm)[0] modes_dpmm = np.zeros(B_dpmm,dtype = 'int') for i in tqdm(range(B_dpmm)): modes_dpmm[i] = (np.diff(np.sign(np.diff(pdf_dpmm[i])*dy)) < 0).sum() #look for when second derivative is negative #Setup plot colors = ["k", "grey"] modes_data = pd.concat([pd.DataFrame({'method': "Copula",'modes': modes_cop}),pd.DataFrame({'method': "DPMM",'modes':modes_dpmm})]) g = sns.histplot(modes_data,x = 'modes', hue = 'method',common_norm=False,\ multiple = 'dodge',discrete = True,stat = "probability",shrink = 0.7,palette = colors) #g.legend_.set_title(None) plt.legend("",frameon=False) plt.xlabel('Number of modes \n \n(a)',fontsize = 12) plt.ylabel('Probability',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) #Quantiles plt.subplot(1,2,2) m =10 #DPMM cdf_dpmm = pd.read_csv("plot_files/dpmm_galaxy_cdf_samples.csv").values[:,1:] B = np.shape(cdf_dpmm)[0] quantiles_dpmm = np.zeros(B) for i in range(B): quantiles_dpmm[i]= y_plot_gal[np.argmin(np.abs(cdf_dpmm[i] - m/100))] #Copula B = np.shape(cdf_conditionals_pr)[0] quantiles_cop = np.zeros(B) for i in range(B): quantiles_cop[i]= y_plot_gal[np.argmin(np.abs(cdf_conditionals_pr[i,:,0] - m/100))] plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.xlabel('$y$ \n \n(b)',fontsize = 12) plt.ylabel('Density',fontsize = 12) sns.kdeplot(quantiles_cop,label = 'Copula'.format(m),color = 'k',ls = '-') sns.kdeplot(quantiles_dpmm,label = 'DPMM'.format(m),color = 'grey',ls = '--') #plt.legend(loc = 1) plt.legend("",frameon=False) plt.ylim(0,0.25) f.savefig('plots/modes_galaxy.pdf', bbox_inches='tight') #Convergence plot pdiff = np.load('plot_files/copula_galaxy_pr_pdiff.npy'.format(n)) cdiff = np.load('plot_files/copula_galaxy_pr_cdiff.npy'.format(n)) plt.figure() f = plt.figure(figsize = (14,3)) plt.subplot(1,2,1) dy = y_plot[1]- y_plot[0] #normalize by pdf to get correct weighting? plt.plot(np.arange(10000)+n,pdiff[0,0:10000]*np.size(y_plot)*dy,color = 'black',alpha = 0.8) plt.xlabel('Forward samples $N$ \n \n(a)',fontsize = 12) plt.ylabel('Distance',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.subplot(1,2,2) plt.plot(np.arange(10000)+n,cdiff[0,0:10000]*np.size(y_plot)*dy,color = 'black',alpha = 0.8) plt.xlabel('Forward samples $N$ \n \n(b)',fontsize = 12) plt.ylabel('Distance',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) f.savefig('plots/convergence_galaxy.pdf', bbox_inches='tight') ``` ## 2: Ozone ``` #Ozone from matplotlib import cm #load function def load_ozone(n_plot_marg): from pydataset import data data = data('airquality') var = ['Ozone','Solar.R'] y = data[var].dropna().values y[:,0] = y[:,0]**(1/3) y = (y-np.mean(y,axis =0))/np.std(y,axis = 0) n = np.shape(y)[0] d = np.shape(y)[1] ylim = (-2.5,2.25) xlim = (-2.75,2.75) y_plot_marg = np.zeros((n_plot_marg,d)) y_plot_marg[:,0] = np.linspace(-2.75,2.75,num = n_plot_marg) y_plot_marg[:,1] = np.linspace(-2.5,2.25,num = n_plot_marg) dy1 = y_plot_marg[1,0]- y_plot_marg[0,0] dy2 = y_plot_marg[1,1]- y_plot_marg[0,1] y_plot_grid = np.meshgrid(y_plot_marg[:,0],y_plot_marg[:,1]) y_plot = np.vstack((y_plot_grid[0].ravel(), y_plot_grid[1].ravel())).transpose() n_plot_y = np.shape(y_plot)[0] return y,y_plot,y_plot_grid,n,d,ylim,xlim,y_plot_marg n_plot_marg = 25 y,y_plot,y_plot_grid,n,d,ylim,xlim,y_plot_marg =load_ozone(n_plot_marg) sns.kdeplot(y[:,0],y[:,1],cbar = True,shade = True) plt.xlabel('$y_1$') plt.ylabel('$y_2$') plt.scatter(y[:,0], y[:,1],s = 5) d = np.shape(y)[1] ``` ### Copula ``` logpdf_pr = np.load('plot_files/copula_ozone_logpdf_pr.npy') logcdf_pr = np.load('plot_files/copula_ozone_logcdf_pr.npy') #Extract joint pdf_pr = np.exp(logpdf_pr)[:,:,-1] cdf_pr = np.exp(logcdf_pr)[:,:,-1] pdf_pr_mean = np.mean(pdf_pr,axis = 0) pdf_pr_std = np.std(pdf_pr,axis = 0) fig = plt.figure(figsize = (14,4)) plt.subplot(1,2,1) ax = fig.gca() cont = ax.contourf(y_plot_grid[0], y_plot_grid[1],pdf_pr_mean.reshape\ (n_plot_marg,n_plot_marg),cmap=cm.Greys) CB = fig.colorbar(cont, shrink=0.8, extend='both') CB.ax.tick_params(labelsize=12) plt.scatter(y[:,0], y[:,1],s =2,color = 'black',alpha = 0.7,label= 'Data') plt.ylim(ylim) plt.xlim(xlim) plt.xlabel('$y_1$\n \n(a)',fontsize = 12) plt.ylabel('$y_2$',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) #plt.legend() plt.subplot(1,2,2) ax = fig.gca() cont = ax.contourf(y_plot_grid[0], y_plot_grid[1], pdf_pr_std.reshape\ (n_plot_marg,n_plot_marg),cmap=cm.Greys) CB = fig.colorbar(cont, shrink=0.8, extend='both') CB.ax.tick_params(labelsize=12) plt.xlabel('$y_1$\n \n(b)',fontsize = 12) plt.ylabel('$y_2$',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.ylim(ylim) plt.xlim(xlim) plt.scatter(y[:,0], y[:,1],s =2,color = 'black',alpha = 0.7,label= 'Data') plt.show() fig.savefig('plots/mv_copula_ozone.pdf', bbox_inches='tight') #Convergence plots pdiff = np.load('plot_files/copula_ozone_pr_pdiff.npy'.format(n)) cdiff = np.load('plot_files/copula_ozone_pr_cdiff.npy'.format(n)) pdf_pr_conv = np.load('plot_files/copula_ozone_pr_pdf_samp.npy') T_fwdsamples = np.shape(pdiff)[1] f = plt.figure(figsize = (14,4)) plt.subplot(1,2,1) ax = f.gca() cont = ax.contourf(y_plot_grid[0], y_plot_grid[1],pdf_pr_conv.reshape\ (n_plot_marg,n_plot_marg),cmap=cm.Greys) CB = f.colorbar(cont, shrink=0.8, extend='both') CB.ax.tick_params(labelsize=12) plt.ylim(ylim) plt.xlim(xlim) plt.xlabel('$y_1$ \n \n(a)',fontsize = 12) plt.ylabel('$y_2$',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.subplot(1,2,2) #normalize by pdf to get correct weighting? dy1 = np.abs(y_plot_grid[0][0,1] - y_plot_grid[0][0,0]) dy2 = np.abs(y_plot_grid[1][1,0] - y_plot_grid[1][0,0]) plt.plot(np.arange(T_fwdsamples)+n,pdiff[0]*(n_plot_marg**2*dy1*dy2),color = 'black',alpha = 0.8) plt.xlabel('Forward samples $N$ \n \n(b)',fontsize = 12) plt.ylabel('Distance',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) f.savefig('plots/convergence_ozone.pdf', bbox_inches='tight') ``` ### DPMM with MCMC ``` from matplotlib import cm import matplotlib.colors as mc #Load in R data ozone_plot = pd.read_csv("plot_files/dpmm_ozone_plot.csv") y_plot = ozone_plot.iloc[:,1:3].values n_plot_marg = 25 mean_pdf = ozone_plot["mean_pdf"].values std_pdf = ozone_plot["std_pdf"].values bot25_pdf = ozone_plot["bot25_pdf"].values top25_pdf = ozone_plot["top25_pdf"].values fig = plt.figure(figsize = (14,4)) plt.subplot(1,2,1) ax = fig.gca() cont = ax.contourf(y_plot[:,0].reshape(n_plot_marg,n_plot_marg), y_plot[:,1].reshape(n_plot_marg,n_plot_marg), mean_pdf.reshape(n_plot_marg,n_plot_marg),cmap=cm.Greys) CB = fig.colorbar(cont, shrink=0.8, extend='both') CB.ax.tick_params(labelsize=12) plt.xlabel('$y_1$\n \n(a)',fontsize = 12) plt.ylabel('$y_2$',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) #plt.title(r'DPMM posterior mean density') plt.scatter(y[:,0], y[:,1],s =2,color = 'black',alpha = 0.7,label= 'Data') #plt.legend() plt.subplot(1,2,2) #plt.title('DPMM posterior standard deviation of density') ax = fig.gca() cont = ax.contourf(y_plot[:,0].reshape(n_plot_marg,n_plot_marg), y_plot[:,1].reshape(n_plot_marg,n_plot_marg), \ std_pdf.reshape(n_plot_marg,n_plot_marg),cmap=cm.Greys) CB = fig.colorbar(cont, shrink=0.8, extend='min') CB.ax.tick_params(labelsize=12) plt.xlabel('$y_1$\n \n(b)',fontsize = 12) plt.ylabel('$y_2$',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.scatter(y[:,0], y[:,1],s =2,color = 'black',alpha = 0.7,label= 'Data') fig.savefig('plots/mv_dpmm_fullcov_ozone.pdf', bbox_inches='tight') ``` ## 3: UCI Datasets ``` #Pick dataset for dataset in ["Breast","Ionosphere","Parkinsons","Wine"]: #Load test log likelihoods test_loglik_cop = np.load('plot_files/copula_mv_loglik{}.npy'.format(dataset)) test_loglik_kde = np.load('plot_files/kde_mv_loglik{}.npy'.format(dataset)) test_loglik_dpmm = np.load('plot_files/dpmm_mv_loglik{}.npy'.format(dataset)) test_loglik_gaussian= np.load('plot_files/gaussian_mv_loglik{}.npy'.format(dataset)) #Print average print('Dataset: {}'.format(dataset)) print('Avg test loglik for Gaussian = {} +- {}'.format(np.mean(test_loglik_gaussian),np.std(test_loglik_gaussian)/np.sqrt(10))) print('Avg test loglik for KDE = {} +- {}'.format(np.mean(test_loglik_kde),np.std(test_loglik_kde)/np.sqrt(10))) print('Avg test loglik for DPMM = {} +- {}'.format(np.mean(test_loglik_dpmm), np.std(test_loglik_dpmm)/np.sqrt(10))) print('Avg test loglik for Copula = {} +- {} \n'.format(np.mean(test_loglik_cop), np.std(test_loglik_cop)/np.sqrt(10))) ``` ### Performance with d ``` #Load test loglik test_loglik_cop = np.load('plot_files/copula_mv_loglik{}.npy'.format("gmm")) test_loglik_kde = np.load('plot_files/kde_mv_loglik{}.npy'.format("gmm")) test_loglik_dpmm = np.load('plot_files/dpmm_mv_loglik{}.npy'.format("gmm")) d_range = np.array([1,2,10,20,40,60,80,100]) f =plt.figure(figsize=(6,4)) plt.plot(d_range,test_loglik_cop,linestyle = '-',label = 'Copula',color = 'k') plt.plot(d_range,test_loglik_dpmm,linestyle = ':',label = 'DPMM (VI)',color = 'k') plt.plot(d_range,test_loglik_kde,linestyle= '-.',label = 'KDE',color = 'k') plt.xlabel('d',fontsize = 12) plt.ylabel('Test log-likelihood',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) #plt.title('GMM with dimensionality') #plt.legend() f.savefig('plots/highd_GMM.pdf', bbox_inches='tight') ``` # Regression ## 4: Lidar ### Joint Method ``` #Lidar data DATA_URI = 'http://www.stat.cmu.edu/~larry/all-of-nonpar/=data/lidar.dat' df = pd.read_csv(DATA_URI,delim_whitespace = True) y = df['logratio'].values x = df['range'].values.reshape(-1,1) y =(y-np.mean(y))/np.std(y) x = (x - np.mean(x,axis = 0))/np.std(x,axis = 0) plt.scatter(x,y, s= 2) plt.xlabel('Standardized range') plt.ylabel('Standardized log ratio') y_plot = np.linspace(-3.0,2.0,num = 200) n_plot_y = np.shape(y_plot)[0] dy = y_plot[1] - y_plot[0] x_plot = np.linspace(np.min(x),np.max(x),num = 40) n_plot_x = np.shape(x_plot)[0] dx = x_plot[1] - x_plot[0] n = np.shape(y)[0] n_plot_marg = np.array([np.shape(x_plot)[0],np.shape(y_plot)[0]]) xlim = (-1.8,1.8) ylim = (-2.5,1.7) z_plot_grid = np.meshgrid(x_plot,y_plot) x_plot_ravel = z_plot_grid[0].ravel().reshape(-1,1) y_plot_ravel = z_plot_grid[1].ravel() #Load predictive densities pdf_cop_condj = np.load('plot_files/jcopula_lidar_pdf_plot.npy') cdf_cop_condj = np.load('plot_files/jcopula_lidar_cdf_plot.npy') #preprocess cdf_condj_plot = cdf_cop_condj.reshape(n_plot_marg[1],n_plot_marg[0]) pdf_condj_plot = pdf_cop_condj.reshape(n_plot_marg[1],n_plot_marg[0]) n_plot_x = np.shape(x_plot)[0] bot25_joint = np.zeros(n_plot_x) top25_joint = np.zeros(n_plot_x) mean_cop_joint = np.zeros(n_plot_x) for j in range(n_plot_x): bot25_joint[j] =y_plot[np.searchsorted(cdf_condj_plot[:,j],0.025)-1] top25_joint[j] =y_plot[np.searchsorted(cdf_condj_plot[:,j],0.975)-1] mean_cop_joint[j] = np.sum(pdf_condj_plot[:,j]*y_plot*dy) #plot f = plt.figure(figsize = (14,4)) plt.subplot(1,2,1) plt.plot(x_plot,mean_cop_joint,color = 'k',label = 'Posterior mean') plt.fill_between(x_plot, bot25_joint, top25_joint, alpha = 0.2, label = '95% predictive interval',color = 'grey') plt.scatter(x,y,label = 'Data',s = 5,color = 'grey') plt.xlabel('$x$\n \n(a)',fontsize = 12) plt.ylabel('$y$',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.ylim(ylim) plt.xlim(xlim) plt.title('Copula',fontsize = 13) #plt.legend(loc = 3) plt.subplot(1,2,2) lidar_plot_xrange = pd.read_csv("plot_files/dpmm_lidar_plot_xrange.csv") plt.title('DPMM',fontsize = 13) plt.plot(x_plot,lidar_plot_xrange["mean"],label = 'Posterior mean',color = 'k') plt.fill_between(x_plot, lidar_plot_xrange["top25"], lidar_plot_xrange["bot25"], alpha = 0.2, label = '95% predictive interval',color = 'grey') plt.scatter(x,y,label = 'Data',s = 5,color = 'grey') plt.xlabel('$x$\n \n(b)',fontsize = 12) plt.ylabel('$y$',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.ylim(ylim) plt.xlim(xlim) f.savefig('plots/lidar_copula_jdpmm.pdf', bbox_inches='tight') #pick x #x_pr = [-3] x_pr = [0] ylim2 = (-0.102216080353868, 3.0) #Load samples and process logpdf_joints_pr = np.load('plot_files/jcopula_lidar_logpdf_pr{}.npy'.format(x_pr[0])) pdf_condj_pr = np.exp(logpdf_joints_pr[:,:,-1] - logpdf_joints_pr[:,:,-2]) mean_pdf_condj = np.mean(pdf_condj_pr,axis = 0) bot25_pdf_condj = np.percentile(pdf_condj_pr,2.5,axis = 0) top25_pdf_condj = np.percentile(pdf_condj_pr,97.5,axis = 0) f = plt.figure(figsize = (14,4 )) plt.subplot(1,2,1) #rescaled sns.lineplot(y_plot,mean_pdf_condj,color = 'k',label = 'Posterior mean') plt.fill_between(y_plot, top25_pdf_condj,bot25_pdf_condj,alpha = 0.2,color = 'k',label = '95% credible interval') plt.xlabel('$y$ \n \n(a)',fontsize = 12) plt.ylabel('Density',fontsize = 12) #plt.legend(loc = 2) plt.title("Joint Copula".format(x_pr[0]),fontsize = 13) plt.ylim(ylim2) xlim2 = plt.xlim() plt.xticks(fontsize=12) plt.yticks(fontsize = 12) #plt.legend(loc = 2) plt.legend("",frameon=False) #plot plt.subplot(1,2,2) lidar_plot = pd.read_csv("plot_files/dpmm_lidar_plot{}.csv".format(x_pr[0])) #rescaled sns.lineplot(y_plot,lidar_plot['mean_pdf'],color = 'k') plt.fill_between(y_plot, lidar_plot['top25_pdf'],lidar_plot['bot25_pdf'],alpha = 0.2,color = 'k',label = '95% credible interval') plt.xlabel('$y$ \n \n(b)',fontsize = 12) plt.ylabel('Density',fontsize = 12) plt.title("Joint DPMM".format(x_pr[0]),fontsize = 13) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.ylim(ylim2) plt.xlim(xlim2) f.savefig(f"plots/lidar_jdpmm_x{x_pr[0]}.pdf", bbox_inches='tight') #Load samples cdf_condj_pr = np.load("plot_files/jcopula_lidar_cdf_median.npy") #Took 13 seconds for 40 x 40. #Setup plot n_grid_x = np.shape(x_plot)[0] n_grid_y = 40 y_plot_median = np.linspace(bot25_joint,top25_joint,n_grid_y,axis = -1) xlim = (-1.8,1.8) ylim = (-2.6,1.5) cdf_condj_pr = cdf_condj_pr.reshape(B_postsamples,n_grid_x,n_grid_y) median_samp = np.zeros((B_postsamples,n_grid_x)) for j in range(n_grid_x): median_samp[:,j] = y_plot_median[j,np.argmin(np.abs(cdf_condj_pr[:,j]- 0.5),axis = 1)] f = plt.figure(figsize = (14,4)) mean_median_pr = np.mean(median_samp,axis = 0) bot25_median_pr = np.percentile(median_samp,2.5,axis = 0) top25_median_pr = np.percentile(median_samp,97.5,axis = 0) plt.subplot(1,2,1) plt.plot(x_plot,mean_median_pr,color = 'k',label = 'Posterior mean') plt.fill_between(x_plot, top25_median_pr,bot25_median_pr,alpha = 0.2, label = '95% credible interval',color = 'k') plt.scatter(x,y,label = 'Data',s = 5,color = 'grey') plt.xlabel('$x$\n \n(a)',fontsize = 12) plt.ylabel('$y$',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.ylim(ylim) plt.xlim(xlim) #plt.title('Joint copula median of $P_\infty(y \mid x)$') #plt.legend(loc = 3) plt.subplot(1,2,2) #x_pr = [-3] x_pr = [0] pdiff = np.load("plot_files/jcopula_lidar_pr_pdiff{}.npy".format(x_pr[0])) dy = y_plot[1]- y_plot[0] #normalize by pdf to get correct weighting? plt.plot(np.arange(10000)+n,pdiff[0,0:10000]*np.size(y_plot)*dy,color = 'black',alpha = 0.8) plt.xlabel('Forward samples $N$\n \n(b)',fontsize = 12) plt.ylabel('Distance',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) #plt.title('$L_1$-distance between $p_N$ and initial $p_n$ at $x = {}$'.format(x_pr[0])) f.savefig('plots/lidar_median_convergence.pdf', bbox_inches='tight') ``` ### Conditional Method ``` #Load samples #Load predictive densities pdf_cop_condc = np.load('plot_files/ccopula_lidar_pdf_plot.npy') cdf_cop_condc = np.load('plot_files/ccopula_lidar_cdf_plot.npy') cdf_condc_plot = cdf_cop_condc.reshape(n_plot_marg[1],n_plot_marg[0]) pdf_condc_plot = pdf_cop_condc.reshape(n_plot_marg[1],n_plot_marg[0]) n_plot_x = np.shape(x_plot)[0] bot25_cond = np.zeros(n_plot_x) top25_cond = np.zeros(n_plot_x) mean_cop_cond = np.zeros(n_plot_x) for j in range(n_plot_x): bot25_cond[j] =y_plot[np.searchsorted(cdf_condc_plot[:,j],0.025)-1] top25_cond[j] =y_plot[np.searchsorted(cdf_condc_plot[:,j],0.975)-1] mean_cop_cond[j] = np.sum(pdf_condc_plot[:,j]*y_plot*dy) xlim = (-1.8,1.8) ylim = (-2.6,1.5) #plot f = plt.figure(figsize = (14,4)) plt.subplot(1,2,1) plt.plot(x_plot,mean_cop_cond,color = 'k',label = 'Mean') plt.fill_between(x_plot, bot25_cond, top25_cond, alpha = 0.2, label = '95% predictive interval',color = 'grey') plt.scatter(x,y,label = 'Data',s = 5,color = 'k',alpha = 0.3) plt.xlabel('$x$\n \n(a)',fontsize = 12) plt.ylabel('$y$',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.ylim(ylim) plt.xlim(xlim) plt.title('Conditional Copula',fontsize = 13) plt.subplot(1,2,2) mean_gp = np.load('plot_files/gp_lidar_mean.npy') std_gp = np.load('plot_files/gp_lidar_std.npy') plt.title('GP',fontsize = 13) plt.plot(x_plot,mean_gp,label = 'Predictive mean',color = 'k') plt.fill_between(x_plot, mean_gp + 2*std_gp, mean_gp - 2*std_gp, alpha = 0.2, label = '95% predictive interval',color = 'grey') plt.scatter(x,y,label = 'Data',s = 5,color = 'grey') plt.xlabel('$x$\n \n(b)',fontsize = 12) plt.ylabel('$y$',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) #plt.legend(loc = 3) plt.ylim(ylim) plt.xlim(xlim) f.savefig('plots/lidar_conditcopula_gp.pdf', bbox_inches='tight') #x_pr = [-3] x_pr = [0] ylim2 = (-0.102216080353868, 3.0) f = plt.figure(figsize = (14,4 )) plt.subplot(1,2,1) #Load samples and Process logpdf_joints_pr = np.load('plot_files/jcopula_lidar_logpdf_pr{}.npy'.format(x_pr[0])) pdf_condj_pr = np.exp(logpdf_joints_pr[:,:,-1] - logpdf_joints_pr[:,:,-2]) mean_pdf_condj = np.mean(pdf_condj_pr,axis = 0) bot25_pdf_condj = np.percentile(pdf_condj_pr,2.5,axis = 0) top25_pdf_condj = np.percentile(pdf_condj_pr,97.5,axis = 0) #rescaled sns.lineplot(y_plot,mean_pdf_condj,color = 'k',label = 'Posterior mean') plt.fill_between(y_plot, top25_pdf_condj,bot25_pdf_condj,alpha = 0.2,color = 'k',label = '95% credible interval') plt.xlabel('$y$\n \n(a)',fontsize = 12) plt.ylabel('Density',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.title("Joint Copula".format(x_pr[0]),fontsize = 13) plt.ylim(ylim2) xlim2 = plt.xlim() #plt.legend(loc = 2) plt.legend("",frameon=False) #plot plt.subplot(1,2,2) #Load samples and process logpdf_pr = np.load('plot_files/ccopula_lidar_logpdf_pr{}.npy'.format(x_pr[0])) pdf_condc_pr = np.exp(logpdf_pr[:,:,0]) mean_pdf_condc = np.mean(pdf_condc_pr,axis = 0) bot25_pdf_condc = np.percentile(pdf_condc_pr,2.5,axis = 0) top25_pdf_condc = np.percentile(pdf_condc_pr,97.5,axis = 0) #rescaled sns.lineplot(y_plot,mean_pdf_condc,color = 'k') plt.fill_between(y_plot, top25_pdf_condc,bot25_pdf_condc,alpha = 0.2,color = 'k',label = '95% credible interval') plt.xlabel('$y$\n \n(b)',fontsize = 12) plt.ylabel('Density',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) #plt.legend(loc = 2) plt.title("Conditional Copula".format(x_pr[0]),fontsize = 13) plt.ylim(ylim2) plt.xlim(xlim2) f.savefig(f"plots/lidar_jc_x{x_pr[0]}.pdf", bbox_inches='tight') ``` ## 5: Moon ``` from sklearn.datasets import make_moons seed = 52 n = 100 n_test = 5000 noise = 0.3 x_temp,y_temp= make_moons(n+n_test,noise = noise,random_state = seed) y = y_temp[0:n] x = x_temp[0:n] y_test = y_temp[n:n+n_test] x_test = x_temp[n:n+n_test] d = np.shape(x)[1] d_gp = d #normalize mean_norm = np.mean(x,axis = 0) std_norm = np.std(x,axis = 0) x = (x - mean_norm)/std_norm x_test = (x_test - mean_norm)/std_norm #for plot noise = 0 x_temp,y_temp= make_moons(n+n_test,noise = noise,random_state = seed) y_test2 = y_temp[n:n+n_test] x_test2 = x_temp[n:n+n_test] types = np.array([0.,0.]) n_cats = np.array([]) #normalize x_test2 = (x_test2 - np.mean(x_test2,axis = 0))/np.std(x_test2,axis = 0) #Setup plot y_plot = np.array([0]) x_plot = np.linspace(-4,4.1,25) x_meshgrid = np.meshgrid(x_plot,x_plot) n_grid1 = np.shape(x_meshgrid)[1] n_grid2 = np.shape(x_meshgrid)[2] fig = plt.figure(figsize= (14,4)) plt.subplot(1,2,1) xlim = (-4,4) ylim = (-4,4) plt.xlim(xlim) plt.ylim(ylim) #test data pos_ind = y_test==1 neg_ind = y_test==0 plt.xlabel('$x_1$ \n \n(a)',fontsize = 12) plt.ylabel(r'$x_2$',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.scatter(x_test2[neg_ind,0],x_test2[neg_ind,1],label = r'$y=0$',s = 20, color = 'k',marker = 'o') plt.scatter(x_test2[pos_ind,0],x_test2[pos_ind,1],label = r'$y=1$',s =20, color = 'darkgrey',marker = 'x') #plt.legend() plt.subplot(1,2,2) gp_pred = np.load('plot_files/gp_moon_pred.npy')[:,1].reshape(np.size(x_plot),np.size(x_plot)) ax = fig.gca() n_grid1 = np.shape(x_meshgrid)[1] n_grid2 = np.shape(x_meshgrid)[2] cont = ax.contourf(x_meshgrid[0], x_meshgrid[1], gp_pred,cmap = 'binary') CB = fig.colorbar(cont, shrink=0.8) CB.ax.tick_params(labelsize=12) plt.xlim(xlim) plt.ylim(ylim) pos_ind = y==1 neg_ind = y==0 plt.xlabel('$x_1$ \n \n(b)',fontsize = 12) plt.ylabel(r'$x_2$',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.scatter(x[neg_ind,0],x[neg_ind,1],label = r'$y=0$',s = 10, color = 'k',marker = 'o') plt.scatter(x[pos_ind,0],x[pos_ind,1],label = r'$y=1$',s =20, color = 'darkgrey',marker = 'x') #plt.legend() fig.savefig("plots/moon_data.pdf", bbox_inches='tight') #Load samples pmf_ytest_samp = np.exp(np.load("plot_files/ccopula_moon_logpmf_ytest_pr.npy")) mean_pmf = np.mean(pmf_ytest_samp,axis = 0).reshape(np.shape(x_meshgrid[0])[0],np.shape(x_meshgrid[1])[0]) std_pmf = np.std(pmf_ytest_samp,axis = 0).reshape(np.shape(x_meshgrid[0])[0],np.shape(x_meshgrid[1])[0]) fig = plt.figure(figsize= (14,4)) plt.subplot(1,2,1) ax = fig.gca() cont = ax.contourf(x_meshgrid[0], x_meshgrid[1], mean_pmf.reshape(np.shape(x_meshgrid[0])[0],np.shape(x_meshgrid[1])[0]),cmap = 'binary') CB = fig.colorbar(cont, shrink=0.8) CB.ax.tick_params(labelsize=12) plt.xlabel('$x_1$\n \n(a)',fontsize = 12) plt.ylabel(r'$x_2$',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.xlim(xlim) plt.ylim(ylim) plt.scatter(x[neg_ind,0],x[neg_ind,1],label = r'$y=0$',s = 10, color = 'k',marker = 'o') plt.scatter(x[pos_ind,0],x[pos_ind,1],label = r'$y=1$',s =20, color = 'darkgrey',marker = 'x') #plt.legend() plt.subplot(1,2,2) ax = fig.gca() cont = ax.contourf(x_meshgrid[0], x_meshgrid[1], std_pmf.reshape(np.shape(x_meshgrid[0])[0],np.shape(x_meshgrid[1])[0]),cmap = 'binary') CB = fig.colorbar(cont, shrink=0.8) CB.ax.tick_params(labelsize=12) plt.xlabel('$x_1$ \n \n(b)',fontsize = 12) plt.ylabel(r'$x_2$',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.xlim(xlim) plt.ylim(ylim) plt.scatter(x[neg_ind,0],x[neg_ind,1],label = r'$y=0$',s = 10, color = 'k',marker = 'o') plt.scatter(x[pos_ind,0],x[pos_ind,1],label = r'$y=1$',s =20, color = 'darkgrey',marker = 'x') fig.savefig("plots/moon_fit.pdf", bbox_inches='tight') #Load sample and pdiff pmf_yn_samp = np.exp(np.load('plot_files/ccopula_moon_logpmf_yn_pr.npy')) pdiff = np.load('plot_files/ccopula_moon_pdiff.npy') T = 10000 plt.figure() f = plt.figure(figsize = (14,4)) plt.subplot(1,2,1) ax = f.gca() i = 30 print(x[i]) sns.distplot(pmf_yn_samp[:,i,0],color = 'k') plt.xlabel('$p_N(y=1 \mid x)$ \n \n(a)',fontsize = 12) plt.ylabel('Density',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.subplot(1,2,2) plt.plot(np.arange(n,n+T),pdiff[0,n:,i],color = 'black',alpha = 0.8) plt.xlabel('Forward samples $N$\n \n(b)',fontsize = 12) plt.ylabel('Distance',fontsize = 12) plt.xticks(fontsize=12) plt.yticks(fontsize = 12) plt.ylim((-0.004801830649375916, 0.14)) f.savefig('plots/convergence_moon.pdf', bbox_inches='tight') ``` ## 6: UCI Datasets ### Regression ``` #Output Test loglik for dataset in ["Boston","Concrete","Diabetes","Wine"]: #Load test log likelihoods test_loglik_cop = np.load('plot_files/copula_regression_loglik{}.npy'.format(dataset)) test_loglik_bayeslin = np.load('plot_files/bayeslin_regression_loglik{}.npy'.format(dataset)) test_loglik_gp= np.load('plot_files/gp_regression_loglik{}.npy'.format(dataset)) #Print average print('Dataset: {}'.format(dataset)) print('Avg test loglik for Bayes Linear = {} +- {}'.format(np.mean(test_loglik_bayeslin),np.std(test_loglik_bayeslin)/np.sqrt(10))) print('Avg test loglik for GP = {} +- {}'.format(np.mean(test_loglik_gp),np.std(test_loglik_gp)/np.sqrt(10))) print('Avg test loglik for Copula = {} +- {}\n'.format(np.mean(test_loglik_cop),np.std(test_loglik_cop)/np.sqrt(10))) ``` ### Classification ``` #Output Test loglik for dataset in ["Breast","Ionosphere","Parkinsons","Statlog"]: #Load test log likelihoods test_loglik_cop = np.load('plot_files/copula_classification_loglik{}.npy'.format(dataset)) test_loglik_logistic = np.load('plot_files/logistic_classification_loglik{}.npy'.format(dataset)) test_loglik_gp = np.load('plot_files/gp_classification_loglik{}.npy'.format(dataset)) #Print average print('Dataset: {}'.format(dataset)) print('Avg test loglik for Logistic = {} +- {}'.format(np.mean(test_loglik_logistic), np.std(test_loglik_logistic)/np.sqrt(10))) print('Avg test loglik for GP = {} +- {}'.format(np.mean(test_loglik_gp), np.std(test_loglik_gp)/np.sqrt(10))) print('Avg test loglik for Copula = {} +- {} \n'.format(np.mean(test_loglik_cop), np.std(test_loglik_cop)/np.sqrt(10))) ```
github_jupyter
# Project 3: Smart Beta Portfolio and Portfolio Optimization ## Overview Smart beta has a broad meaning, but we can say in practice that when we use the universe of stocks from an index, and then apply some weighting scheme other than market cap weighting, it can be considered a type of smart beta fund. A Smart Beta portfolio generally gives investors exposure or "beta" to one or more types of market characteristics (or factors) that are believed to predict prices while giving investors a diversified broad exposure to a particular market. Smart Beta portfolios generally target momentum, earnings quality, low volatility, and dividends or some combination. Smart Beta Portfolios are generally rebalanced infrequently and follow relatively simple rules or algorithms that are passively managed. Model changes to these types of funds are also rare requiring prospectus filings with US Security and Exchange Commission in the case of US focused mutual funds or ETFs.. Smart Beta portfolios are generally long-only, they do not short stocks. In contrast, a purely alpha-focused quantitative fund may use multiple models or algorithms to create a portfolio. The portfolio manager retains discretion in upgrading or changing the types of models and how often to rebalance the portfolio in attempt to maximize performance in comparison to a stock benchmark. Managers may have discretion to short stocks in portfolios. Imagine you're a portfolio manager, and wish to try out some different portfolio weighting methods. One way to design portfolio is to look at certain accounting measures (fundamentals) that, based on past trends, indicate stocks that produce better results. For instance, you may start with a hypothesis that dividend-issuing stocks tend to perform better than stocks that do not. This may not always be true of all companies; for instance, Apple does not issue dividends, but has had good historical performance. The hypothesis about dividend-paying stocks may go something like this: Companies that regularly issue dividends may also be more prudent in allocating their available cash, and may indicate that they are more conscious of prioritizing shareholder interests. For example, a CEO may decide to reinvest cash into pet projects that produce low returns. Or, the CEO may do some analysis, identify that reinvesting within the company produces lower returns compared to a diversified portfolio, and so decide that shareholders would be better served if they were given the cash (in the form of dividends). So according to this hypothesis, dividends may be both a proxy for how the company is doing (in terms of earnings and cash flow), but also a signal that the company acts in the best interest of its shareholders. Of course, it's important to test whether this works in practice. You may also have another hypothesis, with which you wish to design a portfolio that can then be made into an ETF. You may find that investors may wish to invest in passive beta funds, but wish to have less risk exposure (less volatility) in their investments. The goal of having a low volatility fund that still produces returns similar to an index may be appealing to investors who have a shorter investment time horizon, and so are more risk averse. So the objective of your proposed portfolio is to design a portfolio that closely tracks an index, while also minimizing the portfolio variance. Also, if this portfolio can match the returns of the index with less volatility, then it has a higher risk-adjusted return (same return, lower volatility). Smart Beta ETFs can be designed with both of these two general methods (among others): alternative weighting and minimum volatility ETF. ## Instructions Each problem consists of a function to implement and instructions on how to implement the function. The parts of the function that need to be implemented are marked with a `# TODO` comment. After implementing the function, run the cell to test it against the unit tests we've provided. For each problem, we provide one or more unit tests from our `project_tests` package. These unit tests won't tell you if your answer is correct, but will warn you of any major errors. Your code will be checked for the correct solution when you submit it to Udacity. ## Packages When you implement the functions, you'll only need to you use the packages you've used in the classroom, like [Pandas](https://pandas.pydata.org/) and [Numpy](http://www.numpy.org/). These packages will be imported for you. We recommend you don't add any import statements, otherwise the grader might not be able to run your code. The other packages that we're importing are `helper`, `project_helper`, and `project_tests`. These are custom packages built to help you solve the problems. The `helper` and `project_helper` module contains utility functions and graph functions. The `project_tests` contains the unit tests for all the problems. ### Install Packages ``` import sys !{sys.executable} -m pip install -r requirements.txt ``` ### Load Packages ``` import pandas as pd import numpy as np import helper import project_helper import project_tests ``` ## Market Data ### Load Data For this universe of stocks, we'll be selecting large dollar volume stocks. We're using this universe, since it is highly liquid. ``` df = pd.read_csv('../../data/project_3/eod-quotemedia.csv') percent_top_dollar = 0.2 high_volume_symbols = project_helper.large_dollar_volume_stocks(df, 'adj_close', 'adj_volume', percent_top_dollar) df = df[df['ticker'].isin(high_volume_symbols)] close = df.reset_index().pivot(index='date', columns='ticker', values='adj_close') volume = df.reset_index().pivot(index='date', columns='ticker', values='adj_volume') dividends = df.reset_index().pivot(index='date', columns='ticker', values='dividends') ``` ### View Data To see what one of these 2-d matrices looks like, let's take a look at the closing prices matrix. ``` project_helper.print_dataframe(close) ``` # Part 1: Smart Beta Portfolio In Part 1 of this project, you'll build a portfolio using dividend yield to choose the portfolio weights. A portfolio such as this could be incorporated into a smart beta ETF. You'll compare this portfolio to a market cap weighted index to see how well it performs. Note that in practice, you'll probably get the index weights from a data vendor (such as companies that create indices, like MSCI, FTSE, Standard and Poor's), but for this exercise we will simulate a market cap weighted index. ## Index Weights The index we'll be using is based on large dollar volume stocks. Implement `generate_dollar_volume_weights` to generate the weights for this index. For each date, generate the weights based on dollar volume traded for that date. For example, assume the following is close prices and volume data: ``` Prices A B ... 2013-07-08 2 2 ... 2013-07-09 5 6 ... 2013-07-10 1 2 ... 2013-07-11 6 5 ... ... ... ... ... Volume A B ... 2013-07-08 100 340 ... 2013-07-09 240 220 ... 2013-07-10 120 500 ... 2013-07-11 10 100 ... ... ... ... ... ``` The weights created from the function `generate_dollar_volume_weights` should be the following: ``` A B ... 2013-07-08 0.126.. 0.194.. ... 2013-07-09 0.759.. 0.377.. ... 2013-07-10 0.075.. 0.285.. ... 2013-07-11 0.037.. 0.142.. ... ... ... ... ... ``` ``` def generate_dollar_volume_weights(close, volume): """ Generate dollar volume weights. Parameters ---------- close : DataFrame Close price for each ticker and date volume : str Volume for each ticker and date Returns ------- dollar_volume_weights : DataFrame The dollar volume weights for each ticker and date """ assert close.index.equals(volume.index) assert close.columns.equals(volume.columns) total_vol = close * volume return total_vol.div(total_vol.sum(axis=1), axis=0) project_tests.test_generate_dollar_volume_weights(generate_dollar_volume_weights) ``` ### View Data Let's generate the index weights using `generate_dollar_volume_weights` and view them using a heatmap. ``` index_weights = generate_dollar_volume_weights(close, volume) project_helper.plot_weights(index_weights, 'Index Weights') ``` ## Portfolio Weights Now that we have the index weights, let's choose the portfolio weights based on dividend. You would normally calculate the weights based on trailing dividend yield, but we'll simplify this by just calculating the total dividend yield over time. Implement `calculate_dividend_weights` to return the weights for each stock based on its total dividend yield over time. This is similar to generating the weight for the index, but it's using dividend data instead. For example, assume the following is `dividends` data: ``` Prices A B 2013-07-08 0 0 2013-07-09 0 1 2013-07-10 0.5 0 2013-07-11 0 0 2013-07-12 2 0 ... ... ... ``` The weights created from the function `calculate_dividend_weights` should be the following: ``` A B 2013-07-08 NaN NaN 2013-07-09 0 1 2013-07-10 0.333.. 0.666.. 2013-07-11 0.333.. 0.666.. 2013-07-12 0.714.. 0.285.. ... ... ... ``` ``` def calculate_dividend_weights(dividends): """ Calculate dividend weights. Parameters ---------- dividends : DataFrame Dividend for each stock and date Returns ------- dividend_weights : DataFrame Weights for each stock and date """ #TODO: Implement function dividends = dividends.cumsum() return dividends.div(dividends.sum(axis=1), axis=0) project_tests.test_calculate_dividend_weights(calculate_dividend_weights) ``` ### View Data Just like the index weights, let's generate the ETF weights and view them using a heatmap. ``` etf_weights = calculate_dividend_weights(dividends) project_helper.plot_weights(etf_weights, 'ETF Weights') ``` ## Returns Implement `generate_returns` to generate returns data for all the stocks and dates from price data. You might notice we're implementing returns and not log returns. Since we're not dealing with volatility, we don't have to use log returns. ``` def generate_returns(prices): """ Generate returns for ticker and date. Parameters ---------- prices : DataFrame Price for each ticker and date Returns ------- returns : Dataframe The returns for each ticker and date """ return prices / prices.shift(1) - 1 project_tests.test_generate_returns(generate_returns) ``` ### View Data Let's generate the closing returns using `generate_returns` and view them using a heatmap. ``` returns = generate_returns(close) project_helper.plot_returns(returns, 'Close Returns') ``` ## Weighted Returns With the returns of each stock computed, we can use it to compute the returns for an index or ETF. Implement `generate_weighted_returns` to create weighted returns using the returns and weights. ``` def generate_weighted_returns(returns, weights): """ Generate weighted returns. Parameters ---------- returns : DataFrame Returns for each ticker and date weights : DataFrame Weights for each ticker and date Returns ------- weighted_returns : DataFrame Weighted returns for each ticker and date """ assert returns.index.equals(weights.index) assert returns.columns.equals(weights.columns) return returns * weights project_tests.test_generate_weighted_returns(generate_weighted_returns) ``` ### View Data Let's generate the ETF and index returns using `generate_weighted_returns` and view them using a heatmap. ``` index_weighted_returns = generate_weighted_returns(returns, index_weights) etf_weighted_returns = generate_weighted_returns(returns, etf_weights) project_helper.plot_returns(index_weighted_returns, 'Index Returns') project_helper.plot_returns(etf_weighted_returns, 'ETF Returns') ``` ## Cumulative Returns To compare performance between the ETF and Index, we're going to calculate the tracking error. Before we do that, we first need to calculate the index and ETF comulative returns. Implement `calculate_cumulative_returns` to calculate the cumulative returns over time given the returns. ``` def calculate_cumulative_returns(returns): """ Calculate cumulative returns. Parameters ---------- returns : DataFrame Returns for each ticker and date Returns ------- cumulative_returns : Pandas Series Cumulative returns for each date """ return (returns.sum(axis=1) + 1).cumprod() project_tests.test_calculate_cumulative_returns(calculate_cumulative_returns) ``` ### View Data Let's generate the ETF and index cumulative returns using `calculate_cumulative_returns` and compare the two. ``` index_weighted_cumulative_returns = calculate_cumulative_returns(index_weighted_returns) etf_weighted_cumulative_returns = calculate_cumulative_returns(etf_weighted_returns) project_helper.plot_benchmark_returns(index_weighted_cumulative_returns, etf_weighted_cumulative_returns, 'Smart Beta ETF vs Index') ``` ## Tracking Error In order to check the performance of the smart beta portfolio, we can calculate the annualized tracking error against the index. Implement `tracking_error` to return the tracking error between the ETF and benchmark. For reference, we'll be using the following annualized tracking error function: $$ TE = \sqrt{252} * SampleStdev(r_p - r_b) $$ Where $ r_p $ is the portfolio/ETF returns and $ r_b $ is the benchmark returns. _Note: When calculating the sample standard deviation, the delta degrees of freedom is 1, which is the also the default value._ ``` def tracking_error(benchmark_returns_by_date, etf_returns_by_date): """ Calculate the tracking error. Parameters ---------- benchmark_returns_by_date : Pandas Series The benchmark returns for each date etf_returns_by_date : Pandas Series The ETF returns for each date Returns ------- tracking_error : float The tracking error """ assert benchmark_returns_by_date.index.equals(etf_returns_by_date.index) return np.sqrt(252) * np.std(benchmark_returns_by_date - etf_returns_by_date, ddof=1) project_tests.test_tracking_error(tracking_error) ``` ### View Data Let's generate the tracking error using `tracking_error`. ``` smart_beta_tracking_error = tracking_error(np.sum(index_weighted_returns, 1), np.sum(etf_weighted_returns, 1)) print('Smart Beta Tracking Error: {}'.format(smart_beta_tracking_error)) ``` # Part 2: Portfolio Optimization Now, let's create a second portfolio. We'll still reuse the market cap weighted index, but this will be independent of the dividend-weighted portfolio that we created in part 1. We want to both minimize the portfolio variance and also want to closely track a market cap weighted index. In other words, we're trying to minimize the distance between the weights of our portfolio and the weights of the index. $Minimize \left [ \sigma^2_p + \lambda \sqrt{\sum_{1}^{m}(weight_i - indexWeight_i)^2} \right ]$ where $m$ is the number of stocks in the portfolio, and $\lambda$ is a scaling factor that you can choose. Why are we doing this? One way that investors evaluate a fund is by how well it tracks its index. The fund is still expected to deviate from the index within a certain range in order to improve fund performance. A way for a fund to track the performance of its benchmark is by keeping its asset weights similar to the weights of the index. We’d expect that if the fund has the same stocks as the benchmark, and also the same weights for each stock as the benchmark, the fund would yield about the same returns as the benchmark. By minimizing a linear combination of both the portfolio risk and distance between portfolio and benchmark weights, we attempt to balance the desire to minimize portfolio variance with the goal of tracking the index. ## Covariance Implement `get_covariance_returns` to calculate the covariance of the `returns`. We'll use this to calculate the portfolio variance. If we have $m$ stock series, the covariance matrix is an $m \times m$ matrix containing the covariance between each pair of stocks. We can use [`Numpy.cov`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.cov.html) to get the covariance. We give it a 2D array in which each row is a stock series, and each column is an observation at the same period of time. For any `NaN` values, you can replace them with zeros using the [`DataFrame.fillna`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.fillna.html) function. The covariance matrix $\mathbf{P} = \begin{bmatrix} \sigma^2_{1,1} & ... & \sigma^2_{1,m} \\ ... & ... & ...\\ \sigma_{m,1} & ... & \sigma^2_{m,m} \\ \end{bmatrix}$ ``` def get_covariance_returns(returns): """ Calculate covariance matrices. Parameters ---------- returns : DataFrame Returns for each ticker and date Returns ------- returns_covariance : 2 dimensional Ndarray The covariance of the returns """ #TODO: Implement function return np.cov(returns.fillna(value=0).transpose()) project_tests.test_get_covariance_returns(get_covariance_returns) ``` ### View Data Let's look at the covariance generated from `get_covariance_returns`. ``` covariance_returns = get_covariance_returns(returns) covariance_returns = pd.DataFrame(covariance_returns, returns.columns, returns.columns) covariance_returns_correlation = np.linalg.inv(np.diag(np.sqrt(np.diag(covariance_returns)))) covariance_returns_correlation = pd.DataFrame( covariance_returns_correlation.dot(covariance_returns).dot(covariance_returns_correlation), covariance_returns.index, covariance_returns.columns) project_helper.plot_covariance_returns_correlation( covariance_returns_correlation, 'Covariance Returns Correlation Matrix') ``` ### portfolio variance We can write the portfolio variance $\sigma^2_p = \mathbf{x^T} \mathbf{P} \mathbf{x}$ Recall that the $\mathbf{x^T} \mathbf{P} \mathbf{x}$ is called the quadratic form. We can use the cvxpy function `quad_form(x,P)` to get the quadratic form. ### Distance from index weights We want portfolio weights that track the index closely. So we want to minimize the distance between them. Recall from the Pythagorean theorem that you can get the distance between two points in an x,y plane by adding the square of the x and y distances and taking the square root. Extending this to any number of dimensions is called the L2 norm. So: $\sqrt{\sum_{1}^{n}(weight_i - indexWeight_i)^2}$ Can also be written as $\left \| \mathbf{x} - \mathbf{index} \right \|_2$. There's a cvxpy function called [norm()](https://www.cvxpy.org/api_reference/cvxpy.atoms.other_atoms.html#norm) `norm(x, p=2, axis=None)`. The default is already set to find an L2 norm, so you would pass in one argument, which is the difference between your portfolio weights and the index weights. ### objective function We want to minimize both the portfolio variance and the distance of the portfolio weights from the index weights. We also want to choose a `scale` constant, which is $\lambda$ in the expression. $\mathbf{x^T} \mathbf{P} \mathbf{x} + \lambda \left \| \mathbf{x} - \mathbf{index} \right \|_2$ This lets us choose how much priority we give to minimizing the difference from the index, relative to minimizing the variance of the portfolio. If you choose a higher value for `scale` ($\lambda$). We can find the objective function using cvxpy `objective = cvx.Minimize()`. Can you guess what to pass into this function? ### constraints We can also define our constraints in a list. For example, you'd want the weights to sum to one. So $\sum_{1}^{n}x = 1$. You may also need to go long only, which means no shorting, so no negative weights. So $x_i >0 $ for all $i$. you could save a variable as `[x >= 0, sum(x) == 1]`, where x was created using `cvx.Variable()`. ### optimization So now that we have our objective function and constraints, we can solve for the values of $\mathbf{x}$. cvxpy has the constructor `Problem(objective, constraints)`, which returns a `Problem` object. The `Problem` object has a function solve(), which returns the minimum of the solution. In this case, this is the minimum variance of the portfolio. It also updates the vector $\mathbf{x}$. We can check out the values of $x_A$ and $x_B$ that gave the minimum portfolio variance by using `x.value` ``` import cvxpy as cvx def get_optimal_weights(covariance_returns, index_weights, scale=2.0): """ Find the optimal weights. Parameters ---------- covariance_returns : 2 dimensional Ndarray The covariance of the returns index_weights : Pandas Series Index weights for all tickers at a period in time scale : int The penalty factor for weights the deviate from the index Returns ------- x : 1 dimensional Ndarray The solution for x """ assert len(covariance_returns.shape) == 2 assert len(index_weights.shape) == 1 assert covariance_returns.shape[0] == covariance_returns.shape[1] == index_weights.shape[0] # number of stocks m is number of rows of returns, and also number of index weights m = covariance_returns.shape[1] # x variables (to be found with optimization) x = cvx.Variable(m) #portfolio variance, in quadratic form portfolio_variance = cvx.quad_form(x,covariance_returns) # euclidean distance (L2 norm) between portfolio and index weights distance_to_index = cvx.norm(x-index_weights,p=2,axis=0) #objective function objective = cvx.Minimize(portfolio_variance + scale * distance_to_index) #constraints constraints = [x >= 0, sum(x) == 1] #use cvxpy to solve the objective problem = cvx.Problem(objective=objective, constraints=constraints) problem.solve() #retrieve the weights of the optimized portfolio x_values = x.value return x_values project_tests.test_get_optimal_weights(get_optimal_weights) ``` ## Optimized Portfolio Using the `get_optimal_weights` function, let's generate the optimal ETF weights without rebalanceing. We can do this by feeding in the covariance of the entire history of data. We also need to feed in a set of index weights. We'll go with the average weights of the index over time. ``` raw_optimal_single_rebalance_etf_weights = get_optimal_weights(covariance_returns.values, index_weights.iloc[-1]) optimal_single_rebalance_etf_weights = pd.DataFrame( np.tile(raw_optimal_single_rebalance_etf_weights, (len(returns.index), 1)), returns.index, returns.columns) ``` With our ETF weights built, let's compare it to the index. Run the next cell to calculate the ETF returns and compare it to the index returns. ``` optim_etf_returns = generate_weighted_returns(returns, optimal_single_rebalance_etf_weights) optim_etf_cumulative_returns = calculate_cumulative_returns(optim_etf_returns) project_helper.plot_benchmark_returns(index_weighted_cumulative_returns, optim_etf_cumulative_returns, 'Optimized ETF vs Index') optim_etf_tracking_error = tracking_error(np.sum(index_weighted_returns, 1), np.sum(optim_etf_returns, 1)) print('Optimized ETF Tracking Error: {}'.format(optim_etf_tracking_error)) ``` ## Rebalance Portfolio Over Time The single optimized ETF portfolio used the same weights for the entire history. This might not be the optimal weights for the entire period. Let's rebalance the portfolio over the same period instead of using the same weights. Implement `rebalance_portfolio` to rebalance a portfolio. Reblance the portfolio every n number of days, which is given as `shift_size`. When rebalancing, you should look back a certain number of days of data in the past, denoted as `chunk_size`. Using this data, compute the optoimal weights using `get_optimal_weights` and `get_covariance_returns`. ``` def rebalance_portfolio(returns, index_weights, shift_size, chunk_size): """ Get weights for each rebalancing of the portfolio. Parameters ---------- returns : DataFrame Returns for each ticker and date index_weights : DataFrame Index weight for each ticker and date shift_size : int The number of days between each rebalance chunk_size : int The number of days to look in the past for rebalancing Returns ------- all_rebalance_weights : list of Ndarrays The ETF weights for each point they are rebalanced """ assert returns.index.equals(index_weights.index) assert returns.columns.equals(index_weights.columns) assert shift_size > 0 assert chunk_size >= 0 #TODO: Implement function weights_return = [] for i in range(chunk_size, len(returns), shift_size): start_date = i - chunk_size if returns.iloc[start_date:i,].shape[0] - chunk_size < 0: continue optimal_weights = get_optimal_weights(get_covariance_returns(returns.iloc[start_date:i,]), index_weights.iloc[i-1,]) weights_return.append(optimal_weights) return weights_return project_tests.test_rebalance_portfolio(rebalance_portfolio) ``` Run the following cell to rebalance the portfolio using `rebalance_portfolio`. ``` chunk_size = 250 shift_size = 5 all_rebalance_weights = rebalance_portfolio(returns, index_weights, shift_size, chunk_size) ``` ## Portfolio Turnover With the portfolio rebalanced, we need to use a metric to measure the cost of rebalancing the portfolio. Implement `get_portfolio_turnover` to calculate the annual portfolio turnover. We'll be using the formulas used in the classroom: $ AnnualizedTurnover =\frac{SumTotalTurnover}{NumberOfRebalanceEvents} * NumberofRebalanceEventsPerYear $ $ SumTotalTurnover =\sum_{t,n}{\left | x_{t,n} - x_{t+1,n} \right |} $ Where $ x_{t,n} $ are the weights at time $ t $ for equity $ n $. $ SumTotalTurnover $ is just a different way of writing $ \sum \left | x_{t_1,n} - x_{t_2,n} \right | $ ``` def get_portfolio_turnover(all_rebalance_weights, shift_size, rebalance_count, n_trading_days_in_year=252): """ Calculage portfolio turnover. Parameters ---------- all_rebalance_weights : list of Ndarrays The ETF weights for each point they are rebalanced shift_size : int The number of days between each rebalance rebalance_count : int Number of times the portfolio was rebalanced n_trading_days_in_year: int Number of trading days in a year Returns ------- portfolio_turnover : float The portfolio turnover """ assert shift_size > 0 assert rebalance_count > 0 total_turnover = 0 for i in range(len(all_rebalance_weights) - 1): total_turnover = total_turnover + np.cumsum(abs(all_rebalance_weights[i]-all_rebalance_weights[i+1]))[-1] numberRebalanceYear = n_trading_days_in_year/shift_size return total_turnover/rebalance_count * numberRebalanceYear project_tests.test_get_portfolio_turnover(get_portfolio_turnover) ``` Run the following cell to get the portfolio turnover from `get_portfolio turnover`. ``` print(get_portfolio_turnover(all_rebalance_weights, shift_size, len(all_rebalance_weights) - 1)) ``` That's it! You've built a smart beta portfolio in part 1 and did portfolio optimization in part 2. You can now submit your project. ## Submission Now that you're done with the project, it's time to submit it. Click the submit button in the bottom right. One of our reviewers will give you feedback on your project with a pass or not passed grade. You can continue to the next section while you wait for feedback.
github_jupyter
# Boltzmann Wealth Model ``` from mesa import Agent, Model from mesa.time import RandomActivation #activation order of agents matters but #there are 3 implementations - Random, Simultaneous %matplotlib inline import matplotlib.pyplot as plt import random #import seaborn as sns from mesa.space import MultiGrid from mesa.datacollection import DataCollector ``` ## Establishes Agent Class 1. Attributes (wealth) whe agent is initialized 2. move - move through multigrid 3. give money - identify neighbors, give money 4. Step which is action agent takes each time iteration ``` # Set up initial agents this is where you typically assign your attirbutes for each agent object class MoneyAgent(Agent): """ An agent with fixed initial wealth.""" def __init__(self, unique_id, model): super().__init__(unique_id, model) self.wealth = 1 def move(self): possible_steps = self.model.grid.get_neighborhood( self.pos, moore=True, include_center=False) new_position = random.choice(possible_steps) self.model.grid.move_agent(self, new_position) def give_money(self): cellmates = self.model.grid.get_cell_list_contents([self.pos]) if len(cellmates) > 1: other = random.choice(cellmates) other.wealth += 1 self.wealth -= 1 def step(self): self.move() if self.wealth > 0: self.give_money() ``` # Model class retains all agents and runs each step of the model # Contains 1. activation object - randomly shuffle agent order each step 2. world (i.e. multigrid) -allows more than one agent on a spot 3. data collector - collects data on agents 4. agent population 5. gini coefficient function - kept outside class for batch runner/ model class interaction 6. step function for model ``` def compute_gini(model): agent_wealths = [agent.wealth for agent in model.schedule.agents] x = sorted(agent_wealths) N = model.num_agents B = sum( xi * (N-i) for i,xi in enumerate(x) ) / (N*sum(x)) return (1 + (1/N) - 2*B) # Your model class which stores all your agents class MoneyModel(Model): """A model with some number of agents.""" def __init__(self, N, width, height): self.num_agents = N self.grid = MultiGrid(width, height, True) self.schedule = RandomActivation(self) # create a datacollector to caputre key metric self.datacollector = DataCollector( model_reporters={"Gini": compute_gini}, # A function to call agent_reporters={"Wealth": "wealth"}) # An agent attribute # Create population of agents for i in range(self.num_agents): a = MoneyAgent(i, self) self.schedule.add(a) # Add the agent to a random grid cell x = random.randrange(self.grid.width) y = random.randrange(self.grid.height) self.grid.place_agent(a, (x, y)) def step(self): self.datacollector.collect(self) self.schedule.step() ``` ## Run the model and plot the results ## Runs 50 Agents in a 10 X 10 grid ``` model = MoneyModel(100, 10, 10) for i in range(100): model.step() ``` # Plot Location and Wealth ``` import numpy as np plt.figure(figsize=(10,10)) agent_counts = np.zeros((model.grid.width, model.grid.height)) for cell in model.grid.coord_iter(): cell_content, x, y = cell agent_count = len(cell_content) agent_counts[x][y] = agent_count plt.imshow(agent_counts, interpolation='nearest') plt.colorbar() # If running from a text editor or IDE, remember you'll need the following: ``` # Plot Gini Coefficient - g coefficient by step ``` plt.figure(figsize=(10,10)) gini = model.datacollector.get_model_vars_dataframe() plt.plot(gini) ``` # See Dataframe of Plot ``` agent_wealth = model.datacollector.get_agent_vars_dataframe() agent_wealth ``` # Plot Histogram of Step ``` plt.figure(figsize=(10,10)) end_wealth = agent_wealth.xs(19, level="Step")["Wealth"] # .xs is a pandas dataframe to get a cross section of the frame plt.hist(end_wealth, bins=range(agent_wealth.Wealth.max()+1)) ``` # Plot Path of One Agent ``` plt.figure(figsize=(10,10)) one_agent_wealth = agent_wealth.xs(20, level="AgentID") plt.plot(one_agent_wealth) ``` ## BATCH RUNNER Adds one element - self.running = True ``` from mesa.batchrunner import BatchRunner class MoneyModel(Model): """A model with some number of agents.""" def __init__(self, N, width, height): self.num_agents = N self.grid = MultiGrid(width, height, True) self.schedule = RandomActivation(self) self.running = True # create a datacollector to caputre key metric self.datacollector = DataCollector( model_reporters={"Gini": compute_gini}, # A function to call agent_reporters={"Wealth": "wealth"}) # An agent attribute # Create population of agents for i in range(self.num_agents): a = MoneyAgent(i, self) self.schedule.add(a) # Add the agent to a random grid cell x = random.randrange(self.grid.width) y = random.randrange(self.grid.height) self.grid.place_agent(a, (x, y)) def step(self): self.datacollector.collect(self) self.schedule.step() fixed_params = {"width": 10, "height": 10} variable_params = {"N": range(10, 500, 10)} batch_run = BatchRunner(MoneyModel, fixed_parameters=fixed_params, variable_parameters=variable_params, iterations=5, max_steps=100, model_reporters={"Gini": compute_gini}) batch_run.run_all() plt.figure(figsize=(10,10)) run_data = batch_run.get_model_vars_dataframe() run_data.head() plt.scatter(run_data.N, run_data.Gini) ```
github_jupyter
# Behringer X-Touch Mini This notebook can be downloaded [here](https://github.com/jupyter-widgets/midicontrols/blob/master/examples/Example.ipynb). Because Chrome is the only browser that implements the [Web MIDI API](https://developer.mozilla.org/en-US/docs/Web/API/MIDIAccess), this package only works in Chrome. Firefox has [recent discussion](https://bugzilla.mozilla.org/show_bug.cgi?id=836897) on how to move forward with implementing this standard. Each midi controller needs a custom implementation exposing the interface for that specific midi controller as buttons, knobs, faders, etc. Currently we support the [Behringer X-Touch Mini](https://www.behringer.com/Categories/Behringer/Computer-Audio/Desktop-Controllers/X-TOUCH-MINI/p/P0B3M#googtrans(en|en)) controller, which is currently available for around \$60. ``` from ipymidicontrols import XTouchMini, xtouchmini_ui x = XTouchMini() ``` We can work directly with the controls to assign values, listen for value changes, etc., just like a normal widget. Run the cell below, then turn the first knob or press the upper left button. You should see the values below update. Note that the button value toggles when held down, and the light on the physical button reflects this state, where true means light on, false means light off. ``` left_knob = x.rotary_encoders[0] upper_left_button = x.buttons[0] display(left_knob) display(upper_left_button) ``` You can also adjust the values from Python and the changes are reflected in the kernel. ``` left_knob.value = 50 ``` ## Rotary encoders (knobs) Rotary encoders (i.e., knobs) have a min and max that can be set. ``` left_knob.min=0 left_knob.max=10 ``` Knobs have a variety of ways to display the value in the lights around the knob. If your value represents a deviation from some reference, you might use the `'trim'` light mode. If your value represents the width of a symmetric range around some reference, you might use the `'spread'` light mode. ``` # light_mode can be 'single', 'wrap', 'trim', 'spread' left_knob.light_mode = 'spread' ``` We'll set the min/max back to the default (0, 100) range for the rest of the example for consistency with other knobs. ``` left_knob.min = 0 left_knob.max = 100 ``` ## Buttons Since the button has a True/False state, and holding down the button momentarily toggles the state, if we set the button to True when it is not held down, we reverse the toggling (i.e., it is now True by default, and pressing it toggles it to False). ``` upper_left_button.value = True # Now press the button to see it toggle to false. ``` We can change this toggling behavior in the button by setting the button `mode`. It defaults to `'momentary'`, which means the button state toggles only when the button is held down. Setting `mode` to `'toggle'` makes the button toggle its value each time it is pressed. Run the following cell and press the button several times. Notice how the toggle behavior is different. ``` upper_left_button.mode = 'toggle' ``` Each rotary encoder can also be pressed as a button and the toggle mode can be set as well. Run the cell below and press the left knob. ``` left_knob_button = x.rotary_buttons[0] left_knob_button.mode = 'toggle' display(left_knob_button) ``` ## Faders The fader can send its value to Python and has `min`, `max`, and `value` properties. ``` fader = x.faders[0] display(fader) ``` Because the X-Touch Mini does not have motorized faders, the fader cannot be moved to represent a value set from Python. Any value set from Python is overridden by the next fader movement. ## Listening to changes As with any widget, we can observe changes from any control to run a function. ``` from ipywidgets import Output out = Output() @out.capture() def f(change): print('upper left button is %s'%(change.new)) upper_left_button.observe(f, 'value') display(out) ``` ## Linking to other widgets You can synchronize these widgets up to other widgets using `link()` to give a nicer GUI. Run the cell below and then try turning the left knob or pressing the upper left button. Also try adjusting the slider and checkbox below to see that the values are synchronized both ways. ``` from ipywidgets import link, IntSlider, Checkbox, VBox slider = IntSlider(description="Left knob", min=left_knob.min, max=left_knob.max) checkbox = Checkbox(description="Upper left button") link((left_knob, 'value'), (slider, 'value')) link((upper_left_button, 'value'), (checkbox, 'value')) display(VBox([slider, checkbox])) ``` This package includes a convenience function, `xtouchmini_ux()`, to link each control up to a slider or checkbox widget in a GUI that roughly approximates the physical layout. ``` xtouchmini_ui(x) ``` ## Experimenting with options Let's set various controls to explore the available button and knob light modes, as well as some random values to see what they look like on the controller. ``` for b in x.buttons: b.mode='toggle' for b in x.rotary_buttons[:4]: b.mode='toggle' for b in x.rotary_buttons[4:]: b.mode='momentary' for b in x.side_buttons: b.mode='momentary' for b, mode in zip(x.rotary_encoders, ['single', 'single', 'trim', 'trim', 'wrap', 'wrap', 'spread', 'spread']): b.light_mode = mode # Set some random values import secrets for b in x.buttons: b.value=secrets.choice([False, True]) for b in x.rotary_encoders: b.value = secrets.randbelow(101) ``` ## Clearing values Finally, let's clear all of the values. ``` # Clear all values for b in x.buttons: b.value = False for b in x.rotary_buttons: b.value = False for b in x.rotary_encoders: b.value = 0 ```
github_jupyter
# PyTorch Image Captioning model for time series This notebook demonstrates how to use an image-captioning model for time-series prediction. The data set is based on the NASDAQ 100 data provided in "[A Dual-Stage Attention-Based Recurrent Neural Network for Time Series Prediction](https://arxiv.org/pdf/1704.02971.pdf)". The code reuses the same sampling code from Chandler Zuo's [blog](http://chandlerzuo.github.io/blog/2017/11/darnn). The image captioning model was based off of the following PyTorch [tutorial](https://github.com/yunjey/pytorch-tutorial/tree/master/tutorials/03-advanced/image_captioning) on image captioning. ``` import torch from torch import nn from torch.autograd import Variable from torch import optim import torch.nn.functional as F import matplotlib # matplotlib.use('Agg') %matplotlib notebook import os import datetime as dt import itertools import pandas as pd import matplotlib.pyplot as plt import numpy as np use_cuda = torch.cuda.is_available() # use_cuda = False print("Is CUDA available? ", use_cuda) ``` ### Logger set up ``` import logging modelName = "cnn_lstm" # check if results directory exists if not os.path.isdir("./results/%s" %(modelName)): os.makedirs("./results/%s" %(modelName)) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) dlogger = logging.getLogger(__name__) dlogger.setLevel(logging.DEBUG) # create file handler log = logging.FileHandler("./results/%s/%s_results.log" %(modelName, modelName)) log.setLevel(logging.INFO) debug_log = logging.FileHandler("./results/%s/%s_debug.log" %(modelName, modelName)) debug_log.setLevel(logging.DEBUG) logger.addHandler(log) dlogger.addHandler(debug_log) ``` ### Encoder: CNN ``` class EncoderCNN(nn.Module): def __init__(self, input_dim=1, channel_size=64, batch_size=10, T=100, feature_size=81): super(EncoderCNN, self).__init__() self.input_dim = input_dim # num channels self.batch_size = batch_size self.channel_size = channel_size self.T = T self.feature_size = feature_size # (N, C, H, W) = (num_batch, features, history, stocks) # Conv2d - out:(N, 64, 100, 81), kernel(3, 5) stride:1 # added a linear layer to shrink the num stocks lower due to memory self.small_feature_size = 10 self.first_linear = nn.Linear(feature_size, self.small_feature_size) self.first_cnn_layer = nn.Sequential( nn.Conv2d(input_dim, channel_size, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.Dropout(0.2)) # Conv2d - out:(N, 64, 100, 81), kernel(3,5) stride:1 self.second_cnn_layer = nn.Sequential( nn.Conv2d(channel_size, channel_size, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.Dropout(0.2)) # dense layer - in: (N, 100*64*81), out: (N, 100*81) self.first_dense_layer = nn.Sequential( nn.Linear(T*self.small_feature_size*channel_size, T*self.feature_size), nn.ReLU(), nn.Dropout(0.2)) def forward(self, xt): # conv2d input (N, 1, H, W) expects (N, C, H, W) # print("x: ", xt.size()) N = xt.size(0) # lin: in (N, 1, H, W) out: (N, 1, H, 10) out = self.first_linear(xt) # print("cnn: linear: ", out.size()) # cnn: in (N, 1, H, 10) out: (N, C, H, 10) out = self.first_cnn_layer(out) # print("cnn: first_layer output: ", out.size()) # cnn: in (N, C, H, 10) out: (N, C, H, 10) out = self.second_cnn_layer(out) # print("cnn: second_layer output: ", out.size()) # reshape for linear layer out = out.view(N, self.T*self.small_feature_size*self.channel_size) # print("flatten: ", out.size()) # first dense layer in: (N, C*H*W) out: (N, H*W) out = self.first_dense_layer(out) # print("first dense layer output: ", out.size()) # reshape output for (N, T, W) out = out.reshape(out.size(0), self.T, self.feature_size) # print("reshape out: ", out.size()) return out ``` ### Decoder: LSTM ``` class DecoderLSTM(nn.Module): def __init__(self, feature_size, decoder_hidden_size, T=100, num_layers=2): super(DecoderLSTM, self).__init__() self.T = T self.feature_size = feature_size self.decoder_hidden_size = decoder_hidden_size # print("decoder: decoder_hidden_size: ", decoder_hidden_size) # lstm - in: (N, T, W) out: (N, T, H) self.lstm_layer = nn.LSTM(feature_size, decoder_hidden_size, num_layers=num_layers, dropout=0.2, batch_first=True) # dense layer - in: (N, T*H), out: (N, T*H) self.dense_layer = nn.Sequential( nn.Linear(T*(decoder_hidden_size+1), T*(decoder_hidden_size+1)), nn.ReLU(), nn.Dropout(0.2)) # final layer - in: (N, T*H) out:(N, 1) self.final_layer = nn.Linear(T*(decoder_hidden_size+1), 1) # log info logger.info("decoder - feature_size: %s hidden_size: %s T: %s" \ %(feature_size, decoder_hidden_size, T)) def forward(self, features, y_history): # x (N, T, W) y_history (N, T, 1) # print("features: ", features.size()) # print("y_history: ", y_history.size()) # lstm layer in: (N, T, W) out: (N, T, H) out, lstm_out = self.lstm_layer(features) # print("lstm layer: ", out.size()) # clipping to eliminate nan's from lstm out.register_hook(lambda x: x.clamp(min=-100, max=100)) # combine with y_history out = torch.cat((out, y_history), dim=2) # print("out cat: ", out.size()) # flatten in: (N, T, H) out: (N, T*(H=1)) out = out.contiguous().view(-1, out.size(1)*out.size(2)) # print("out flatten: ", out.size()) # final layer in: (N, T*(H+1)), out: (N, 1) out = self.final_layer(out) # print("final layer: ", out.size()) return out def init_hidden(self, x): return Variable(x.data.new(1, x.size(0), self.decoder_hidden_size).zero_()) ``` ### Train & Test ``` # Train the model class cnn_lstm: def __init__(self, file_data, decoder_hidden_size = 64, T = 10, input_dim= 1, channel_size = 64, feature_size = 81, learning_rate = 0.01, batch_size = 128, parallel = False, debug = False): self.T = T dat = pd.read_csv(file_data, nrows = 100 if debug else None) print("Shape of data: %s.\nMissing in data: %s" %(dat.shape, dat.isnull().sum().sum())) self.X = dat[[x for x in dat.columns if x != 'NDX']][:-1].as_matrix() # drop last row since using forward y y = dat.NDX.shift(-1).values self.y = y[:-1].reshape((-1, 1)) print("X shape", self.X.shape) print("y shape", self.y.shape) self.batch_size = batch_size if use_cuda: #input_dim=1, channel_size=64, batch_size=10, T=100, feature_size=81) self.encoder = EncoderCNN(input_dim=input_dim, channel_size=channel_size, batch_size=batch_size, T=T, feature_size=feature_size).cuda() # feature_size, decoder_hidden_size, T=100, num_layers=2) self.decoder = DecoderLSTM(feature_size, decoder_hidden_size, T=T, num_layers=2).cuda() else: self.encoder = EncoderCNN(input_dim=input_dim, channel_size=channel_size, batch_size=batch_size, T=T, feature_size=feature_size).cpu() self.decoder = DecoderLSTM(feature_size, decoder_hidden_size, T=T, num_layers=2).cpu() if parallel: self.encoder = nn.DataParallel(self.encoder) self.decoder = nn.DataParallel(self.decoder) self.encoder_optimizer = optim.Adam(params = filter(lambda p: p.requires_grad, self.encoder.parameters()), lr = learning_rate) self.decoder_optimizer = optim.Adam(params = filter(lambda p: p.requires_grad, self.decoder.parameters()), lr = learning_rate) self.train_size = int(self.X.shape[0] * 0.7) self.train_mean = np.mean(self.y[:self.train_size]) print("train mean: %s" %(self.train_mean)) self.y = self.y - self.train_mean # Question: why Adam requires data to be normalized? print("Training size: %s" %(self.train_size)) logger.info("Training size: %s" %(self.train_size)) def train(self, n_epochs = 10): iter_per_epoch = int(np.ceil(self.train_size * 1. / self.batch_size)) print("Iterations per epoch: %s ~ %s" %(self.train_size * 1. / self.batch_size, iter_per_epoch)) logger.info("Iterations per epoch: %s ~ %s" %(self.train_size * 1. / self.batch_size, iter_per_epoch)) self.iter_losses = np.zeros(n_epochs * iter_per_epoch) self.epoch_losses = np.zeros(n_epochs) self.loss_func = nn.MSELoss() n_iter = 0 learning_rate = 1. for i in range(n_epochs): print("\n-------------------------------------------") print("Epoch: ", i) logger.info("\n-------------------------------------------") logger.info("Epoch: %s" %(i)) perm_idx = np.random.permutation(self.train_size - self.T) j = 0 while j < self.train_size - self.T: batch_idx = perm_idx[j:(j + self.batch_size)] X = np.zeros((len(batch_idx), self.T, self.X.shape[1])) y_history = np.zeros((len(batch_idx), self.T)) y_target = self.y[batch_idx + self.T] for k in range(len(batch_idx)): X[k, :, :] = self.X[batch_idx[k] : (batch_idx[k] + self.T), :] # y_history[k, :] (T-1,) y_history[k, :] = self.y[batch_idx[k] : (batch_idx[k] + self.T)].flatten() # train loss = self.train_iteration(X, y_history, y_target) # print("loss: ", loss.item()) self.iter_losses[int(i * iter_per_epoch + j / self.batch_size)] = loss if (j / self.batch_size) % 50 == 0: print("\tbatch: %s loss: %s" %(j / self.batch_size, loss)) logger.info("\tbatch: %s loss: %s" %(j / self.batch_size, loss)) j += self.batch_size n_iter += 1 # decrease learplt.savefig("./results/%s/predi")plt.savefig("./results/%s/predi")ning rate if n_iter % 10000 == 0 and n_iter > 0: for param_group in self.encoder_optimizer.param_groups: param_group['lr'] = param_group['lr'] * 0.9 logger.info("encoder learning rate: ", param_group["lr"]) for param_group in self.decoder_optimizer.param_groups: param_group['lr'] = param_group['lr'] * 0.9 logger.info("decoder learning rate: ", param_group["lr"]) self.epoch_losses[i] = np.mean(self.iter_losses[range(i * iter_per_epoch, (i + 1) * iter_per_epoch)]) if i % 10 == 0: print("Epoch %s, loss: %s" %(i, self.epoch_losses[i])) logger.info("Epoch %s, loss: %s" %(i, self.epoch_losses[i])) if i % 10 == 0: print("\n Predict") y_train_pred = self.predict(on_train = True) y_test_pred = self.predict(on_train = False) y_pred = np.concatenate((y_train_pred, y_test_pred)) y_pred = y_train_pred plt.figure() plt.plot(range(1, 1 + len(self.y)), self.y, label = "True") plt.plot(range(self.T , len(y_train_pred) + self.T), y_train_pred, label = 'Predicted - Train') plt.plot(range(self.T + len(y_train_pred) , len(self.y) + 1), y_test_pred, label = 'Predicted - Test') plt.legend(loc = 'upper left') plt.show() plt.savefig("./results/%s/predict_%s_epoch%s.png" %(modelName, modelName, i), bbox_inches="tight") def train_iteration(self, X, y_history, y_target): # zero gradient - original code placemenet self.encoder_optimizer.zero_grad() self.decoder_optimizer.zero_grad() # define variables if use_cuda: Xt = Variable(torch.from_numpy(X).type(torch.FloatTensor).cuda()) yht = Variable(torch.from_numpy(y_history).type(torch.FloatTensor).cuda()) y_true = Variable(torch.from_numpy(y_target).type(torch.FloatTensor).cuda()) else: Xt = Variable(torch.from_numpy(X).type(torch.FloatTensor).cpu()) yht = Variable(torch.from_numpy(y_history).type(torch.FloatTensor).cpu()) y_true = Variable(torch.from_numpy(y_target).type(torch.FloatTensor).cpu()) # run models get prediction # Xt (N, C, H, W) Xt = Xt.view(Xt.size(0), 1, Xt.size(1), Xt.size(2)) # yht (N, T, 1) yht = yht.unsqueeze(2) features = self.encoder(Xt) y_pred = self.decoder(features, yht) # loss loss = self.loss_func(y_pred, y_true) loss.backward() # optimizer self.encoder_optimizer.step() self.decoder_optimizer.step() return loss.item() def predict(self, on_train = False): if on_train: y_pred = np.zeros(self.train_size - self.T + 1) print("PREDICT train") else: y_pred = np.zeros(self.X.shape[0] - self.train_size) print("PREDICT test") i = 0 while i < len(y_pred): batch_idx = np.array(range(len(y_pred)))[i : (i + self.batch_size)] X = np.zeros((len(batch_idx), self.T, self.X.shape[1])) y_history = np.zeros((len(batch_idx), self.T)) for j in range(len(batch_idx)): if on_train: X[j, :, :] = self.X[range(batch_idx[j], batch_idx[j] + self.T), :] y_history[j, :] = self.y[range(batch_idx[j], batch_idx[j]+ self.T)].flatten() else: X[j, :, :] = self.X[range(batch_idx[j] + self.train_size - self.T, batch_idx[j] + self.train_size), :] y_history[j, :] = self.y[range(batch_idx[j] + self.train_size - self.T, batch_idx[j]+ self.train_size)].flatten() if use_cuda: Xt = Variable(torch.from_numpy(X).type(torch.FloatTensor).cuda()) yht = Variable(torch.from_numpy(y_history).type(torch.FloatTensor).cuda()) # Xt (N, C, H, W) Xt = Xt.view(Xt.size(0), 1, Xt.size(1), Xt.size(2)) # yht (N, T, 1) yht = yht.unsqueeze(2) features = self.encoder(Xt) pred_cuda = self.decoder(features, yht) y_pred[i:(i+self.batch_size)] = pred_cuda.cpu().data.numpy()[:, 0] else: Xt = Variable(torch.from_numpy(X).type(torch.FloatTensor).cpu()) yht = Variable(torch.from_numpy(y_history).type(torch.FloatTensor).cpu()) # Xt (N, C, H, W) Xt = Xt.view(Xt.size(0), 1, Xt.size(1), Xt.size(2)) # yht (N, T, 1) yht = yht.unsqueeze(2) features = self.encoder(Xt) y_pred[i:(i + self.batch_size)] = self.decoder(features, yht).data.numpy()[:, 0] i += self.batch_size return y_pred index_file = "./nasdaq100/small/nasdaq100_padding.csv" # file_data, decoder_hidden_size = 64, T = 10, # input_dim= 1, channel_size = 64, feature_size = 81, # learning_rate = 0.01, batch_size = 128, parallel = False, debug = False) model = cnn_lstm(file_data=index_file, decoder_hidden_size = 100, T = 50, input_dim = 1, channel_size = 64, feature_size = 81, learning_rate = 0.01, batch_size = 128, parallel = False, debug = False) model.train(n_epochs = 100) y_train = model.predict(on_train=True) y_pred = model.predict(on_train=False) plt.figure() plt.semilogy(range(len(model.iter_losses)), model.iter_losses) plt.show() plt.savefig("./results/%s/iter_losses_%s.png" %(modelName, modelName), bbox_inches="tight") plt.figure() plt.semilogy(range(len(model.epoch_losses)), model.epoch_losses) plt.show() plt.savefig("./results/%s/epoch_losses_%s.png" %(modelName, modelName), bbox_inches="tight") plt.figure() plt.semilogy(range(len(model.iter_losses)), model.iter_losses) plt.show() plt.savefig("./results/%s/iter_losses_%s.png" %(modelName, modelName), bbox_inches="tight") plt.figure() plt.semilogy(range(len(model.epoch_losses)), model.epoch_losses) plt.show() plt.savefig("./results/%s/epoch_losses_%s.png" %(modelName, modelName), bbox_inches="tight") plt.figure() plt.plot(model.y[model.train_size:], label = "True") plt.plot(y_pred, label = 'Predicted', linestyle="--", ) plt.legend(loc = 'upper left') plt.title("Test Results") plt.show() plt.savefig("./results/%s/predict_%s.png" %(modelName, modelName), bbox_inches="tight") print("train_mean: ", model.train_mean) y_true = model.y[model.train_size:].flatten() + model.train_mean ydf = pd.DataFrame({"pred":y_pred + model.train_mean, "true":y_true}) ydf.to_csv("./results/%s/price_predictions_%s.csv" %(modelName, modelName)) from sklearn.metrics import mean_squared_error mse = mean_squared_error(ydf.true, ydf.pred) print("average test mse: %s" %(mse)) y_train_true = model.y[:len(y_train)].flatten() + model.train_mean ytrain_df = pd.DataFrame({"pred":y_train + model.train_mean, "true":y_train_true}) mse = mean_squared_error(ytrain_df.true, ytrain_df.pred) print("average train mse: %s" %(mse)) # pre announcement pre_mse = mean_squared_error(ydf.true.loc[:9259], ydf.pred.loc[:9259]) post_mse = mean_squared_error(ydf.true.loc[9259:], ydf.pred.loc[9259:]) print("pre_mse: %s" %(pre_mse)) print("post mse: %s" %(post_mse)) ```
github_jupyter
``` # -*- coding: utf-8 -*- """ This program makes learning ev-gmm. """ # __future__ module make compatible python2 and python3 from __future__ import division, print_function # basic modules import os import os.path import time # for warning ignore import warnings #warning.filterwarnings('ignore') # for file system manupulation from shutil import rmtree import glob import argparse # for save object import pickle # for make glaph %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns sns.set_style("whitegrid") plt.rcParams['figure.figsize'] = (16, 5) import librosa.display # for scientific computing import numpy as np from numpy.linalg import norm from sklearn.decomposition import PCA from sklearn.mixture import GMM # GMM class cannot use after sklearn 0.20.0 import sklearn.mixture #from sklearn.mixture.gaussian_mixture import _compute_precision_cholesky from sklearn.preprocessing import StandardScaler import scipy.sparse from scipy.signal import firwin, lfilter # for display audio controler from IPython.display import Audio # for manuplate audio data import soundfile as sf import pyworld as pw import pysptk from dtw import dtw from fastdtw import fastdtw class WORLD(object): """ WORLD based speech analyzer and synthezer. Ref : https://github.com/k2kobayashi/sprocket/ """ def __init__(self, fs=16000, fftl=1024, shiftms=5.0, minf0=40.0, maxf0=500.0): """ Parameters ---------- fs : int Sampling frequency fftl : int FFT length shiftms : float Shift length [ms] minf0 : float Floor in F0 estimation maxf0 : float Seli in F0 estimation """ self.fs = fs self.fftl = fftl self.shiftms = shiftms self.minf0 = minf0 self.maxf0 = maxf0 def analyze(self, x): """ Analyze acoustic featueres. Parameters ---------- x : array, shape(`T`) monoral speech signal in time domain Returns ---------- f0 : array, shape(`T`) F0 sequence sp : array, shape(`T`, `fftl / 2 + 1`) Spectral envelope sequence ap : array, shape(`T`, `fftl / 2 + 1`) aperiodicity sequence """ f0, time_axis = pw.harvest(x, self.fs, f0_floor=self.minf0, f0_ceil=self.maxf0, frame_period=self.shiftms) sp = pw.cheaptrick(x, f0, time_axis, self.fs, fft_size=self.fftl) ap = pw.d4c(x, f0, time_axis, self.fs, fft_size=self.fftl) assert sp.shape == ap.shape return f0, sp, ap def analyze_f0(self, x): """ Analyze f0. Parameters ---------- x : array, shape(`T`) monoral speech signal in time domain Returns ---------- f0 : array, shape(`T`) F0 sequence """ f0, time_axis = pw.harvest(x, self.fs, f0_floor=self.minf0, f0_ceil=self.maxf0, frame_period=self.shiftms) assert f0.shape == x.shape() return f0 def synthesis(self, f0, sp, ap): """ Re-synthesizes a speech waveform from acoustic featueres. Parameters ---------- f0 : array, shape(`T`) F0 sequence sp : array, shape(`T`, `fftl / 2 + 1`) Spectral envelope sequence ap : array, shape(`T`, `fftl / 2 + 1`) aperiodicity sequence """ return pw.synthesize(f0, sp, ap, self.fs, frame_period=self.shiftms) class FeatureExtractor(object): """ Analyze acoustic features from a waveform. This class may have several types of estimeter like WORLD or STRAIGHT. Default type is WORLD. Ref : https://github.com/k2kobayashi/sprocket/ """ def __init__(self, analyzer='world', fs=16000, fftl=1024, shiftms=5.0, minf0=50.0, maxf0=500.0): """ Parameters ---------- analyzer : str Analyzer fs : int Sampling frequency fftl : int FFT length shiftms : float Shift length [ms] minf0 : float Floor in F0 estimation maxf0 : float Seli in F0 estimation """ self.analyzer = analyzer self.fs = fs self.fftl = fftl self.shiftms = shiftms self.minf0 = minf0 self.maxf0 = maxf0 if self.analyzer == 'world': self.analyzer = WORLD(fs=self.fs, fftl=self.fftl, minf0=self.minf0, maxf0=self.maxf0, shiftms=self.shiftms) else: raise('Analyzer Error : not support type, see FeatureExtractor class.') self._f0 = None self._sp = None self._ap = None def analyze(self, x): """ Analyze acoustic featueres. Parameters ---------- x : array, shape(`T`) monoral speech signal in time domain Returns ---------- f0 : array, shape(`T`) F0 sequence sp : array, shape(`T`, `fftl / 2 + 1`) Spectral envelope sequence ap : array, shape(`T`, `fftl / 2 + 1`) aperiodicity sequence """ self.x = np.array(x, dtype=np.float) self._f0, self._sp, self._ap = self.analyzer.analyze(self.x) # check f0 < 0 self._f0[self._f0 < 0] = 0 if np.sum(self._f0) == 0.0: print("Warning : F0 values are all zero.") return self._f0, self._sp, self._ap def analyze_f0(self, x): """ Analyze f0. Parameters ---------- x : array, shape(`T`) monoral speech signal in time domain Returns ---------- f0 : array, shape(`T`) F0 sequence """ self.x = np.array(x, dtype=np.float) self._f0 = self.analyzer.analyze_f0(self.x) # check f0 < 0 self._f0[self._f0 < 0] = 0 if np.sum(self._f0) == 0.0: print("Warning : F0 values are all zero.") return self._f0 def mcep(self, dim=24, alpha=0.42): """ Convert mel-cepstrum sequence from spectral envelope. Parameters ---------- dim : int mel-cepstrum dimension alpha : float parameter of all-path filter Returns ---------- mcep : array, shape(`T`, `dim + 1`) mel-cepstrum sequence """ self._analyzed_check() return pysptk.sp2mc(self._sp, dim, alpha) def codeap(self): """ """ self._analyzed_check() return pw.code_aperiodicity(self._ap, self.fs) def npow(self): """ Normalized power sequence from spectral envelope. Returns ---------- npow : vector, shape(`T`, `1`) Normalized power sequence of the given waveform """ self._analyzed_check() npow = np.apply_along_axis(self._spvec2pow, 1, self._sp) meanpow = np.mean(npow) npow = 10.0 * np.log10(npow / meanpow) return npow def _spvec2pow(self, specvec): """ """ fftl2 = len(specvec) - 1 fftl = fftl2 * 2 power = specvec[0] + specvec[fftl2] for k in range(1, fftl2): power += 2.0 * specvec[k] power /= fftl return power def _analyzed_check(self): if self._f0 is None and self._sp is None and self._ap is None: raise('Call FeatureExtractor.analyze() before this method.') class Synthesizer(object): """ Synthesize a waveform from acoustic features. Ref : https://github.com/k2kobayashi/sprocket/ """ def __init__(self, fs=16000, fftl=1024, shiftms=5.0): """ Parameters ---------- fs : int Sampling frequency fftl : int FFT length shiftms : float Shift length [ms] """ self.fs = fs self.fftl = fftl self.shiftms = shiftms def synthesis(self, f0, mcep, ap, rmcep=None, alpha=0.42): """ Re-synthesizes a speech waveform from acoustic featueres. Parameters ---------- f0 : array, shape(`T`) F0 sequence mcep : array, shape(`T`, `dim`) mel-cepstrum sequence ap : array, shape(`T`, `fftl / 2 + 1`) aperiodicity sequence rmcep : array, shape(`T`, `dim`) array of reference mel-cepstrum sequence alpha : float parameter of all-path filter Returns ---------- wav : array, syntesized waveform """ if rmcep is not None: # power modification mcep = mod_power(mcep, rmcep, alpha=alpha) sp = pysptk.mc2sp(mcep, alpha, self.fftl) wav = pw.synthesize(f0, sp, ap, self.fs, frame_period=self.shiftms) return wav def synthesis_diff(self, x, diffmcep, rmcep=None, alpha=0.42): """ Re-synthesizes a speech waveform from acoustic featueres. filtering with a differential mel-cepstrum. Parameters ---------- x : array, shape(`samples`) array of waveform sequence diffmcep : array, shape(`T`, `dim`) array of differential mel-cepstrum sequence rmcep : array, shape(`T`, `dim`) array of reference mel-cepstrum sequence alpha : float parameter of all-path filter Returns ---------- wav : array, syntesized waveform """ x = x.astype(np.float64) dim = diffmcep.shape[1] - 1 shiftl = int(self.fs / 1000 * self.shiftms) if rmcep is not None: # power modification diffmcep = mod_power(rmcep + diffmcep, rmcep, alpha=alpha) - rmcep # mc2b = transform mel-cepstrum to MLSA digital filter coefficients. b = np.apply_along_axis(pysptk.mc2b, 1, diffmcep, alpha) mlsa_fil = pysptk.synthesis.Synthesizer(pysptk.synthesis.MLSADF(dim, alpha=alpha), shiftl) wav = mlsa_fil.synthesis(x, b) return wav def synthesis_sp(self, f0, sp, ap): """ Re-synthesizes a speech waveform from acoustic featueres. Parameters ---------- f0 : array, shape(`T`) F0 sequence spc : array, shape(`T`, `dim`) mel-cepstrum sequence ap : array, shape(`T`, `fftl / 2 + 1`) aperiodicity sequence Returns ---------- wav : array, syntesized waveform """ wav = pw.synthesize(f0, sp, ap, self.fs, frame_period=self.shiftms) return wav def mod_power(cvmcep, rmcep, alpha=0.42, irlen=256): """ power modification based on inpuulse responce Parameters ---------- cvmcep : array, shape(`T`, `dim`) array of converted mel-cepstrum rmcep : arraym shape(`T`, `dim`) array of reference mel-cepstrum alpha : float parameter of all-path filter irlen : int Length for IIR filter Returns ---------- modified_cvmcep : array, shape(`T`, `dim`) array of power modified converted mel-cepstrum """ if rmcep.shape != cvmcep.shape: raise ValueError( "The shape of the converted and reference mel-cepstrum are different : {} / {}.format(cvmcep.shape, rmcep.shape)" ) # mc2e = Compute energy from mel-cepstrum. e-option cv_e = pysptk.mc2e(cvmcep, alpha=alpha, irlen=irlen) r_e = pysptk.mc2e(rmcep, alpha=alpha, irlen=irlen) dpow = np.log(r_e / cv_e) / 2 modified_cvmcep = np.copy(cvmcep) modified_cvmcep[:, 0] += dpow return modified_cvmcep # def util methods def melcd(array1, array2): """ calculate mel-cepstrum distortion Parameters ---------- array1, array2 : array, shape(`T`, `dim`) or shape(`dim`) Array of original and target. Returns ---------- mcd : scala, number > 0 Scala of mel-cepstrum distoriton """ if array1.shape != array2.shape: raise ValueError( "The shape of both array are different : {} / {}.format(array1.shape,array2.shape)" ) if array1.ndim == 2: diff = array1 - array2 mcd = 10.0 / np.log(10) * np.mean(np.sqrt(2.0 * np.sum(diff ** 2, axis=1))) elif array1.ndim == 1: diff = array1 - array2 mcd = 10.0 / np.log(10) * np.sqrt(2.0 * np.sum(diff ** 2)) else: raise ValueError("Dimension mismatch.") return mcd def delta(data, win=[-1.0, 1.0, 0]): """ calculate delta component Parameters ---------- data : array, shape(`T`, `dim`) Array of static matrix sequence. win : array, shape(`3`) The shape of window matrix. Returns ---------- delta : array, shape(`T`, `dim`) Array of delta matrix sequence. """ if data.ndim == 1: # change vector into 1d-array T = len(data) dim = data.ndim data = data.reshape(T, dim) else: T, dim = data.shape win = np.array(win, dtype=np.float64) delta = np.zeros((T, dim)) delta[0] = win[0] * data[0] + win[1] * data[1] delta[-1] = win[0] * data[-2] + win[1] * data[-1] for i in range(len(win)): delta[1:T - 1] += win[i] * delta[i:T - 2 + i] return delta def static_delta(data, win=[-1.0, 1.0, 0]): """ calculate static and delta component Parameters ---------- data : array, shape(`T`, `dim`) Array of static matrix sequence. win : array, shape(`3`) The shape of window matrix. Returns ---------- sddata : array, shape(`T`, `dim * 2`) Array of static and delta matrix sequence. """ sddata = np.c_[data, delta(data, win)] assert sddata.shape[1] == data.shape[1] * 2 return sddata def construct_static_and_delta_matrix(T, D, win=[-1.0, 1.0, 0]): """ calculate static and delta transformation matrix Parameters ---------- T : scala, `T` Scala of time length D : scala, `D` Scala of the number of dimension. win : array, shape(`3`) The shape of window matrix. Returns ---------- W : array, shape(`2 * D * T`, `D * T`) Array of static and delta transformation matrix. """ static = [0, 1, 0] delta = win assert len(static) == len(delta) # generate full W DT = D * T ones = np.ones(DT) row = np.arange(2 * DT).reshape(2 * T, D) # generate serial numbers static_row = row[::2] # [1,2,3,4,5] => [1,3,5] delta_row = row[1::2] # [1,2,3,4,5] => [2,4] col = np.arange(DT) data = np.array([ones * static[0], ones * static[1], ones * static[2], ones * delta[0], ones * delta[1], ones * delta[2]]).flatten() row = np.array([[static_row] * 3, [delta_row] * 3]).flatten() col = np.array([[col - D, col, col + D] * 2]).flatten() # remove component at first and end frame valid_idx = np.logical_not(np.logical_or(col < 0, col >= DT)) W = scipy.sparse.csr_matrix( (data[valid_idx], (row[valid_idx], col[valid_idx])), shape=(2 * DT, DT)) W.eliminate_zeros() return W def extfrm(data, npow, power_threshold=-20): """ Extract frame over the power threshold Parameters ---------- data : array, shape(`T`, `dim`) array of input data npow : array, shape(`T`) vector of normalized power sequence threshold : scala scala of power threshold [dB] Returns ---------- data : array, shape(`T_ext`, `dim`) remaining data after extracting frame `T_ext` <= `T` """ T = data.shape[0] if T != len(npow): raise("Length of two vectors is different.") valid_index = np.where(npow > power_threshold) extdata = data[valid_index] assert extdata.shape[0] <= T return extdata def estimate_twf(orgdata, tardata, distance='melcd', fast=True, otflag=None): """ time warping function estimator Parameters ---------- orgdata : array, shape(`T_org`, `dim`) array of source feature tardata : array, shape(`T_tar`, `dim`) array of target feature distance : str distance function fast : bool use fastdtw instead of dtw otflag : str Alignment into the length of specification 'org' : alignment into original length 'tar' : alignment into target length Returns ---------- twf : array, shape(`2`, `T`) time warping function between original and target """ if distance == 'melcd': def distance_func(x, y): return melcd(x, y) else: raise ValueError('this distance method is not support.') if fast: _, path = fastdtw(orgdata, tardata, dist=distance_func) twf = np.array(path).T else: _, _, _, twf = dtw(orgdata, tardata, distance_func) if otflag is not None: twf = modify_twf(twf, otflag=otflag) return twf def align_data(org_data, tar_data, twf): """ get aligned joint feature vector Parameters ---------- org_data : array, shape(`T_org`, `dim_org`) Acoustic feature vector of original speaker tar_data : array, shape(`T_tar`, `dim_tar`) Acoustic feature vector of target speaker twf : array, shape(`2`, `T`) time warping function between original and target Returns ---------- jdata : array, shape(`T_new`, `dim_org + dim_tar`) Joint feature vector between source and target """ jdata = np.c_[org_data[twf[0]], tar_data[twf[1]]] return jdata def modify_twf(twf, otflag=None): """ align specified length Parameters ---------- twf : array, shape(`2`, `T`) time warping function between original and target otflag : str Alignment into the length of specification 'org' : alignment into original length 'tar' : alignment into target length Returns ---------- mod_twf : array, shape(`2`, `T_new`) time warping function of modified alignment """ if otflag == 'org': of, indice = np.unique(twf[0], return_index=True) mod_twf = np.c_[of, twf[1][indice]].T elif otflag == 'tar': tf, indice = np.unique(twf[1], return_index=True) mod_twf = np.c_[twf[0][indice], tf].T return mod_twf def low_cut_filter(x, fs, cutoff=70): """ low cut filter Parameters ---------- x : array, shape('samples') waveform sequence fs : array, int Sampling frequency cutoff : float cutoff frequency of low cut filter Returns ---------- lct_x : array, shape('samples') Low cut filtered waveform sequence """ nyquist = fs // 2 norm_cutoff = cutoff / nyquist # low cut filter fil = firwin(255, norm_cutoff, pass_zero=False) lct_x = lfilter(fil, 1, x) return lct_x def extsddata(data, npow, power_threshold=-20): """ get power extract static and delta feature vector Parameters ---------- data : array, shape(`T`, `dim`) acoustic feature vector npow : array, shape(`T`) normalized power vector power_threshold : float power threshold Returns ---------- extsddata : array, shape(`T_new`, `dim * 2`) silence remove static and delta feature vector """ extsddata = extfrm(static_delta(data), npow, power_threshold=power_threshold) return extsddata def transform_jnt(array_list): num_files = len(array_list) for i in range(num_files): if i == 0: jnt = array_list[i] else: jnt = np.r_[jnt, array_list[i]] return jnt class F0statistics(object): """ Estimate F0 statistics and convert F0 """ def __init__(self): pass def estimate(self, f0list): """ estimate F0 statistics from list of f0 Parameters ---------- f0list : list, shape(`f0num`) List of several F0 sequence Returns ---------- f0stats : array, shape(`[mean, std]`) values of mean and standard deviation for log f0 """ n_files = len(f0list) assert n_files != 0 for i in range(n_files): f0 = f0list[i] nonzero_indices = np.nonzero(f0) if i == 0: f0s = np.log(f0[nonzero_indices]) else: f0s = np.r_[f0s, np.log(f0[nonzero_indices])] f0stats = np.array([np.mean(f0s), np.std(f0s)]) return f0stats def convert(self, f0, orgf0stats, tarf0stats): """ convert F0 based on F0 statistics Parameters ---------- f0 : array, shape(`T`, `1`) array of F0 sequence orgf0stats : array, shape(`[mean, std]`) vectors of mean and standard deviation of log f0 for original speaker tarf0stats : array, shape(`[mean, std]`) vectors of mean and standard deviation of log f0 for target speaker Returns ---------- cvf0 : array, shape(`T`, `1`) array of converted F0 sequence """ # get length and dimension T = len(f0) # perform f0 conversion cvf0 = np.zeros(T) nonzero_indices = f0 > 0 cvf0[nonzero_indices] = np.exp((tarf0stats[1] / orgf0stats[1]) * (np.log(f0[nonzero_indices]) - orgf0stats[0]) + tarf0stats[0]) return cvf0 class GV(object): """ Estimate statistics and perform postfilter based on the GV statistics. """ def __init__(self): pass def estimate(self, datalist): """ estimate GV statistics from list of data Parameters ---------- datalist : list, shape(`num_data`) List of several data ([T, dim]) sequence Returns ---------- gvstats : array, shape(`2`, `dim`) array of mean and standard deviation for GV """ n_files = len(datalist) assert n_files != 0 var = [] for i in range(n_files): data = datalist[i] var.append(np.var(data, axis=0)) # calculate vm and vv vm = np.mean(np.array(var), axis=0) vv = np.var(np.array(var), axis=0) gvstats = np.r_[vm, vv] gvstats = gvstats.reshape(2, len(vm)) return gvstats def postfilter(self, data, gvstats, cvgvstats=None, alpha=1.0, startdim=1): """ perform postfilter based on GV statistics into data Parameters ---------- data : array, shape(`T`, `dim`) array of data sequence gvstats : array, shape(`2`, `dim`) array of mean and variance for target GV cvgvstats : array, shape(`2`, `dim`) array of mean and variance for converted GV alpha : float morphing coefficient between GV transformed data and data. alpha * gvpf(data) + (1 - alpha) * data startdim : int start dimension to perform GV postfilter Returns ---------- filtered_data : array, shape(`T`, `data`) array of GV postfiltered data sequnece """ # get length and dimension T, dim = data.shape assert gvstats is not None assert dim == gvstats.shape[1] # calculate statics of input data datamean = np.mean(data, axis=0) if cvgvstats is None: # use variance of the given data datavar = np.var(data, axis=0) else: # use variance of trained gv stats datavar = cvgvstats[0] # perform GV postfilter filterd = np.sqrt(gvstats[0, startdim:] / datavar[startdim:]) * (data[:, startdim:] - datamean[startdim:]) + datamean[startdim:] filterd_data = np.c_[data[:, :startdim], filterd] return alpha * filterd_data + (1 - alpha) * data # 0. config path __versions = "pre-stored0.1.3" __same_path = "./utterance/" + __versions + "/" prepare_path = __same_path + "output/" pre_stored_source_list = __same_path + 'pre-source/**/V01/T01/**/*.wav' pre_stored_list = __same_path + "pre/**/V01/T01/**/*.wav" output_path = "./utterance/tried/ej/ej-f/1/adapt1/" # 1. estimate features feat = FeatureExtractor() synthesizer = Synthesizer() org_f0list = None org_splist = None org_mceplist = None org_aplist = None org_npowlist = None org_codeaplist = None if os.path.exists(prepare_path + "_org_f0.pickle") \ and os.path.exists(prepare_path + "_org_sp.pickle") \ and os.path.exists(prepare_path + "_org_ap.pickle") \ and os.path.exists(prepare_path + "_org_mcep.pickle") \ and os.path.exists(prepare_path + "_org_npow.pickle") \ and os.path.exists(prepare_path + "_org_codeap.pickle"): with open(prepare_path + "_org_f0.pickle", 'rb') as f: org_f0list = pickle.load(f) with open(prepare_path + "_org_sp.pickle", 'rb') as f: org_splist = pickle.load(f) with open(prepare_path + "_org_ap.pickle", 'rb') as f: org_aplist = pickle.load(f) with open(prepare_path + "_org_mcep.pickle", 'rb') as f: org_mceplist = pickle.load(f) with open(prepare_path + "_org_npow.pickle", 'rb') as f: org_npowlist = pickle.load(f) with open(prepare_path + "_org_codeap.pickle", 'rb') as f: org_codeaplist = pickle.load(f) else: org_f0list = [] org_splist = [] org_mceplist = [] org_aplist = [] org_npowlist = [] org_codeaplist = [] ite = 0 for files in sorted(glob.iglob(pre_stored_source_list, recursive=True)): wavf = files x, fs = sf.read(wavf) x = np.array(x, dtype=np.float) x = low_cut_filter(x, fs, cutoff=70) assert fs == 16000 print("extract acoustic featuers: " + wavf) f0, sp, ap = feat.analyze(x) mcep = feat.mcep() npow = feat.npow() codeap = feat.codeap() wav = synthesizer.synthesis_sp(f0, sp, ap) wav = np.clip(wav, -32768, 32767) sf.write(prepare_path + "src_ansys_{}_.wav".format(ite), wav, fs) org_f0list.append(f0) org_splist.append(sp) org_mceplist.append(mcep) org_aplist.append(ap) org_npowlist.append(npow) org_codeaplist.append(codeap) wav = synthesizer.synthesis(f0, mcep, ap) wav = np.clip(wav, -32768, 32767) sf.write(prepare_path + "src_mcep_{}_.wav".format(ite), wav, fs) ite = ite + 1 with open(prepare_path + "_org_f0.pickle", 'wb') as f: pickle.dump(org_f0list, f) with open(prepare_path + "_org_sp.pickle", 'wb') as f: pickle.dump(org_splist, f) with open(prepare_path + "_org_npow.pickle", 'wb') as f: pickle.dump(org_npowlist, f) with open(prepare_path + "_org_ap.pickle", 'wb') as f: pickle.dump(org_aplist, f) with open(prepare_path + "_org_mcep.pickle", 'wb') as f: pickle.dump(org_mceplist, f) with open(prepare_path + "_org_codeap.pickle", 'wb') as f: pickle.dump(org_codeaplist, f) mid_f0list = None mid_mceplist = None mid_aplist = None mid_npowlist = None mid_splist = None mid_codeaplist = None if os.path.exists(prepare_path + "_mid_f0.pickle") \ and os.path.exists(prepare_path + "_mid_sp_0_.pickle") \ and os.path.exists(prepare_path + "_mid_ap_0_.pickle") \ and os.path.exists(prepare_path + "_mid_mcep.pickle") \ and os.path.exists(prepare_path + "_mid_npow.pickle") \ and os.path.exists(prepare_path + "_mid_codeap.pickle"): with open(prepare_path + "_mid_f0.pickle", 'rb') as f: mid_f0list = pickle.load(f) for i in range(0, len(org_splist)*21, len(org_splist)): with open(prepare_path + "_mid_sp_{}_.pickle".format(i), 'rb') as f: temp_splist = pickle.load(f) if mid_splist is None: mid_splist = temp_splist else: mid_splist = mid_splist + temp_splist for i in range(0, len(org_aplist)*21, len(org_aplist)): with open(prepare_path + "_mid_ap_{}_.pickle".format(i), 'rb') as f: temp_aplist = pickle.load(f) if mid_aplist is None: mid_aplist = temp_aplist else: mid_aplist = mid_aplist + temp_aplist with open(prepare_path + "_mid_mcep.pickle", 'rb') as f: mid_mceplist = pickle.load(f) with open(prepare_path + "_mid_npow.pickle", 'rb') as f: mid_npowlist = pickle.load(f) with open(prepare_path + "_mid_codeap.pickle", 'rb') as f: mid_codeaplist = pickle.load(f) else: mid_f0list = [] mid_mceplist = [] mid_aplist = [] mid_npowlist = [] mid_splist = [] mid_codeaplist = [] ite = 0 for files in sorted(glob.iglob(pre_stored_list, recursive=True)): wavf = files x, fs = sf.read(wavf) x = np.array(x, dtype=np.float) x = low_cut_filter(x, fs, cutoff=70) assert fs == 16000 print("extract acoustic featuers: " + wavf) f0, sp, ap = feat.analyze(x) mcep = feat.mcep() npow = feat.npow() codeap = feat.codeap() name, ext = os.path.splitext(wavf) wav = synthesizer.synthesis_sp(f0, sp, ap) wav = np.clip(wav, -32768, 32767) sf.write(prepare_path + "mid_ansys_{}_.wav".format(ite), wav, fs) mid_f0list.append(f0) mid_splist.append(sp) mid_mceplist.append(mcep) mid_aplist.append(ap) mid_npowlist.append(npow) mid_codeaplist.append(codeap) wav = synthesizer.synthesis(f0, mcep, ap) wav = np.clip(wav, -32768, 32767) sf.write(prepare_path + "mid_mcep_{}_.wav".format(ite), wav, fs) ite = ite + 1 with open(prepare_path + "_mid_f0.pickle", 'wb') as f: print(f) pickle.dump(mid_f0list, f) with open(prepare_path + "_mid_npow.pickle", 'wb') as f: print(f) pickle.dump(mid_npowlist, f) for i in range(0, len(mid_splist), len(org_splist)): with open(prepare_path + "_mid_sp_{}_.pickle".format(i), 'wb') as f: print(f) pickle.dump(mid_splist[i:i+len(org_splist)], f) for i in range(0, len(mid_aplist), len(org_aplist)): with open(prepare_path + "_mid_ap_{}_.pickle".format(i), 'wb') as f: print(f) pickle.dump(mid_aplist[i:i+len(org_aplist)], f) with open(prepare_path + "_mid_mcep.pickle", 'wb') as f: print(f) pickle.dump(mid_mceplist, f) with open(prepare_path + "_mid_codeap.pickle", 'wb') as f: print(f) pickle.dump(mid_codeaplist, f) class GMMTrainer(object): """ this class offers the training of GMM with several types of covariance matrix. Parameters ---------- n_mix : int the number of mixture components of the GMM n_iter : int the number of iteration for EM algorithm covtype : str the type of covariance matrix of the GMM 'full': full-covariance matrix Attributes --------- param : sklearn-based model parameters of the GMM """ def __init__(self, n_mix=64, n_iter=100, covtype='full', params='wmc'): self.n_mix = n_mix self.n_iter = n_iter self.covtype = covtype self.params = params self.param = sklearn.mixture.GMM(n_components=self.n_mix, covariance_type=self.covtype, n_iter=self.n_iter, params=self.params) def train(self, jnt): """ fit GMM parameter from given joint feature vector Parametes --------- jnt : array, shape(`T`, `jnt.shape[0]`) joint feature vector of original and target feature vector consisting of static and delta components """ if self.covtype == 'full': self.param.fit(jnt) return class GMMConvertor(object): """ this class offers the several conversion techniques such as Maximum Likelihood Parameter Generation (MLPG) and Minimum Mean Square Error (MMSE). Parametes --------- n_mix : int the number of mixture components of the GMM covtype : str the type of covariance matrix of the GMM 'full': full-covariance matrix gmmmode : str the type of the GMM for opening `None` : Normal Joint Density - GMM (JD-GMM) Attributes --------- param : sklearn-based model parameters of the GMM w : shape(`n_mix`) vector of mixture component weight of the GMM jmean : shape(`n_mix`, `jnt.shape[0]`) Array of joint mean vector of the GMM jcov : shape(`n_mix`, `jnt.shape[0]`, `jnt.shape[0]`) array of joint covariance matrix of the GMM """ def __init__(self, n_mix=64, covtype='full', gmmmode=None): self.n_mix = n_mix self.covtype = covtype self.gmmmode = gmmmode def open_from_param(self, param): """ open GMM from GMMTrainer Parameters ---------- param : GMMTrainer GMMTrainer class """ self.param = param self._deploy_parameters() return def convert(self, data, cvtype='mlpg'): """ convert data based on conditional probability density function Parametes --------- data : array, shape(`T`, `dim`) original data will be converted cvtype : str type of conversion technique `mlpg` : maximum likelihood parameter generation Returns ---------- odata : array, shape(`T`, `dim`) converted data """ # estimate parameter sequence cseq, wseq, mseq, covseq = self._gmmmap(data) if cvtype == 'mlpg': odata = self._mlpg(mseq, covseq) else: raise ValueError('please choose conversion mode in `mlpg`.') return odata def _gmmmap(self, sddata): # paramete for sequencial data T, sddim = sddata.shape # estimate posterior sequence wseq = self.pX.predict_proba(sddata) # estimate mixture sequence cseq = np.argmax(wseq, axis=1) mseq = np.zeros((T, sddim)) covseq = np.zeros((T, sddim, sddim)) for t in range(T): # read maximum likelihood mixture component in frame t m = cseq[t] # conditional mean vector sequence mseq[t] = self.meanY[m] + self.A[m] @ (sddata[t] - self.meanX[m]) # conditional covariance sequence covseq[t] = self.cond_cov_inv[m] return cseq, wseq, mseq, covseq def _mlpg(self, mseq, covseq): # parameter for sequencial data T, sddim = mseq.shape # prepare W W = construct_static_and_delta_matrix(T, sddim // 2) # prepare D D = get_diagonal_precision_matrix(T, sddim, covseq) # calculate W'D WD = W.T @ D # W'DW WDW = WD @ W # W'Dm WDM = WD @ mseq.flatten() # estimate y = (W'DW)^-1 * W'Dm odata = scipy.sparse.linalg.spsolve(WDW, WDM, use_umfpack=False).reshape(T, sddim // 2) return odata def _deploy_parameters(self): # read JD-GMM parameters from self.param self.W = self.param.weights_ self.jmean = self.param.means_ self.jcov = self.param.covars_ # devide GMM parameters into source and target parameters sddim = self.jmean.shape[1] // 2 self.meanX = self.jmean[:, 0:sddim] self.meanY = self.jmean[:, sddim:] self.covXX = self.jcov[:, :sddim, :sddim] self.covXY = self.jcov[:, :sddim, sddim:] self.covYX = self.jcov[:, sddim:, :sddim] self.covYY = self.jcov[:, sddim:, sddim:] # change model parameter of GMM into that of gmmmode if self.gmmmode is None: pass else: raise ValueError('please choose GMM mode in [None]') # estimate parameters for conversion self._set_Ab() self._set_pX() return def _set_Ab(self): # calculate A and b from self.jmean, self.jcov sddim = self.jmean.shape[1] // 2 # calculate inverse covariance for covariance XX in each mixture self.covXXinv = np.zeros((self.n_mix, sddim, sddim)) for m in range(self.n_mix): self.covXXinv[m] = np.linalg.inv(self.covXX[m]) # calculate A, b, and conditional covariance given X self.A = np.zeros((self.n_mix, sddim, sddim)) self.b = np.zeros((self.n_mix, sddim)) self.cond_cov_inv = np.zeros((self.n_mix, sddim, sddim)) for m in range(self.n_mix): # calculate A (A = yxcov_m * xxcov_m^-1) self.A[m] = self.covYX[m] @ self.covXXinv[m] # calculate b (b = mean^Y - A * mean^X) self.b[m] = self.meanY[m] - self.A[m] @ self.meanX[m] # calculate conditional covariance (cov^(Y|X)^-1 = (yycov - A * xycov)^-1) self.cond_cov_inv[m] = np.linalg.inv(self.covYY[m] - self.A[m] @ self.covXY[m]) return def _set_pX(self): # probability density function of X self.pX = sklearn.mixture.GMM(n_components=self.n_mix, covariance_type=self.covtype) self.pX.weights_ = self.W self.pX.means_ = self.meanX self.pX.covars_ = self.covXX # following function is required to estimate porsterior # p(x | \lambda^(X)) #self.pX.precisions_cholesky_ = _compute_precision_cholesky(self.covXX, self.covtype) return def get_diagonal_precision_matrix(T, D, covseq): return scipy.sparse.block_diag(covseq, format='csr') def get_alignment(odata, onpow, tdata, tnpow, opow=-20, tpow=-20, sd=0, cvdata=None, given_twf=None, otflag=None, distance='melcd'): """ get alignment between original and target. Parameters ---------- odata : array, shape(`T`, `dim`) acoustic feature vector of original onpow : array, shape(`T`) Normalized power vector of original tdata : array, shape(`T`, `dim`) acoustic feature vector of target tnpow : array, shape(`T`) Normalized power vector of target opow : float power threshold of original tpow : float power threshold of target sd : int start dimension to be used for alignment cvdata : array, shape(`T`, `dim`) converted original data given_twf : array, shape(`T_new`, `dim * 2`) Alignment given twf otflag : str Alignment into the length of specification 'org' : alignment into original length 'tar' : alignment into target length distance : str Distance function to be used Returns ---------- jdata : array, shape(`T_new`, `dim * 2`) joint static and delta feature vector twf : array, shape(`T_new`, `dim * 2`) Time warping function mcd : float Mel-cepstrum distortion between arrays """ oexdata = extsddata(odata[:, sd:], onpow, power_threshold=opow) texdata = extsddata(tdata[:, sd:], tnpow, power_threshold=tpow) if cvdata is None: align_odata = oexdata else: cvexdata = extsddata(cvdata, onpow, power_threshold=opow) align_odata = cvexdata if given_twf is None: twf = estimate_twf(align_odata, texdata, distance=distance, fast=False, otflag=otflag) else: twf = given_twf jdata = align_data(oexdata, texdata, twf) mcd = melcd(align_odata[twf[0]], texdata[twf[1]]) return jdata, twf, mcd def align_feature_vectors(odata, onpows, tdata, tnpows, opow=-100, tpow=-100, itnum=3, sd=0, given_twfs=None, otflag=None): """ get alignment to create joint feature vector Parameters ---------- odata : list, (`num_files`) List of original feature vectors onpow : list, (`num_files`) List of original npows tdata : list, (`num_files`) List of target feature vectors tnpow : list, (`num_files`) List of target npows opow : float power threshold of original tpow : float power threshold of target itnum : int the number of iteration sd : int start dimension of feature vector to be used for alignment given_twf : array, shape(`T_new`, `dim * 2`) use given alignment while 1st iteration otflag : str Alignment into the length of specification 'org' : alignment into original length 'tar' : alignment into target length distance : str Distance function to be used Returns ---------- jdata : array, shape(`T_new`, `dim * 2`) joint static and delta feature vector twf : array, shape(`T_new`, `dim * 2`) Time warping function mcd : float Mel-cepstrum distortion between arrays """ it = 1 num_files = len(odata) cvgmm, cvdata = None, None for it in range(1, itnum+1): print('{}-th joint feature extraction starts.'.format(it)) # alignment twfs, jfvs = [], [] for i in range(num_files): if it == 1 and given_twfs is not None: gtwf = given_twfs[i] else: gtwf = None if it > 1: cvdata = cvgmm.convert(static_delta(odata[i][:, sd:])) jdata, twf, mcd = get_alignment(odata[i], onpows[i], tdata[i], tnpows[i], opow=opow, tpow=tpow, sd=sd, cvdata=cvdata, given_twf=gtwf, otflag=otflag) twfs.append(twf) jfvs.append(jdata) print('distortion [dB] for {}-th file: {}'.format(i+1, mcd)) jnt_data = transform_jnt(jfvs) if it != itnum: # train GMM, if not final iteration datagmm = GMMTrainer() datagmm.train(jnt_data) cvgmm = GMMConvertor() cvgmm.open_from_param(datagmm.param) it += 1 return jfvs, twfs # 2. estimate twf and jnt if os.path.exists(prepare_path + "_jnt_mcep_0_.pickle"): pass else: for i in range(0, len(mid_mceplist), len(org_mceplist)): org_mceps = org_mceplist org_npows = org_npowlist mid_mceps = mid_mceplist[i:i+len(org_mceps)] mid_npows = mid_npowlist[i:i+len(org_npows)] assert len(org_mceps) == len(mid_mceps) assert len(org_npows) == len(mid_npows) assert len(org_mceps) == len(org_npows) # dtw between original and target 0-th and silence print("## alignment mcep 0-th and silence ##") jmceps, twfs = align_feature_vectors(org_mceps, org_npows, mid_mceps, mid_npows, opow=-15, tpow=-15, sd=1) jnt_mcep = transform_jnt(jmceps) # save joint feature vectors with open(prepare_path + "_jnt_mcep_{}_.pickle".format(i), 'wb') as f: print(f) pickle.dump(jnt_mcep, f) # 3. make EV-GMM initgmm, initgmm_codeap = None, None if os.path.exists(prepare_path + "initgmm.pickle"): with open(prepare_path + "initgmm.pickle".format(i), 'rb') as f: print(f) initgmm = pickle.load(f) else: jnt, jnt_codeap = None, [] for i in range(0, len(mid_mceplist), len(org_mceplist)): with open(prepare_path + "_jnt_mcep_{}_.pickle".format(i), 'rb') as f: temp_jnt = pickle.load(f) if jnt is None: jnt = temp_jnt else: jnt = np.r_[jnt, temp_jnt] # train initial gmm initgmm = GMMTrainer() initgmm.train(jnt) with open(prepare_path + "initgmm.pickle", 'wb') as f: print(f) pickle.dump(initgmm, f) # get initial gmm params init_W = initgmm.param.weights_ init_jmean = initgmm.param.means_ init_jcov = initgmm.param.covars_ sddim = init_jmean.shape[1] // 2 init_meanX = init_jmean[:, :sddim] init_meanY = init_jmean[:, sddim:] init_covXX = init_jcov[:, :sddim, :sddim] init_covXY = init_jcov[:, :sddim, sddim:] init_covYX = init_jcov[:, sddim:, :sddim] init_covYY = init_jcov[:, sddim:, sddim:] fitted_source = init_meanX fitted_target = init_meanY sv = None if os.path.exists(prepare_path + "_sv.npy"): sv = np.array(sv) sv = np.load(prepare_path + '_sv.npy') else: depengmm, depenjnt = None, None sv = [] for i in range(0, len(mid_mceplist), len(org_mceplist)): with open(prepare_path + "_jnt_mcep_{}_.pickle".format(i), 'rb') as f: depenjnt = pickle.load(f) depengmm = GMMTrainer(params='m') depengmm.param.weights_ = init_W depengmm.param.means_ = init_jmean depengmm.param.covars_ = init_jcov depengmm.train(depenjnt) sv.append(depengmm.param.means_) sv = np.array(sv) np.save(prepare_path + "_sv", sv) n_mix = 64 S = int(len(mid_mceplist) / len(org_mceplist)) assert S == 22 source_pca = sklearn.decomposition.PCA() source_pca.fit(sv[:,:,:sddim].reshape((S, n_mix*sddim))) target_pca = sklearn.decomposition.PCA() target_pca.fit(sv[:,:,sddim:].reshape((S, n_mix*sddim))) eigenvectors = source_pca.components_.reshape((n_mix, sddim, S)), target_pca.components_.reshape((n_mix, sddim, S)) biasvectors = source_pca.mean_.reshape((n_mix, sddim)), target_pca.mean_.reshape((n_mix, sddim)) # estimate statistic features for_convert_source = __same_path + 'input/EJM10/V01/T01/TIMIT/000/*.wav' for_convert_target = __same_path + 'adaptation/EJF01/V01/T01/ATR503/A/*.wav' src_f0list = [] src_splist = [] src_mceplist = [] src_aplist = [] src_npowlist = [] src_codeaplist = [] if os.path.exists(__same_path + 'input/EJM10/V01/T01/TIMIT/000/A11.wav'): ite = 0 for files in sorted(glob.iglob(for_convert_source, recursive=True)): wavf = files x, fs = sf.read(wavf) x = np.array(x, dtype=np.float) x = low_cut_filter(x, fs, cutoff=70) assert fs == 16000 print("extract acoustic featuers: " + wavf) f0, sp, ap = feat.analyze(x) mcep = feat.mcep() npow = feat.npow() codeap = feat.codeap() wav = synthesizer.synthesis_sp(f0, sp, ap) wav = np.clip(wav, -32768, 32767) sf.write(output_path + "input_ansys_{}_.wav".format(ite), wav, fs) src_f0list.append(f0) src_splist.append(sp) src_mceplist.append(mcep) src_aplist.append(ap) src_npowlist.append(npow) src_codeaplist.append(codeap) wav = synthesizer.synthesis(f0, mcep, ap) wav = np.clip(wav, -32768, 32767) sf.write(output_path + "input_mcep_{}_.wav".format(ite), wav, fs) ite = ite + 1 else: raise ValueError("No such files.") tar_f0list = [] tar_mceplist = [] tar_aplist = [] tar_npowlist = [] tar_splist = [] tar_codeaplist = [] if os.path.exists(__same_path + 'adaptation/EJF01/V01/T01/ATR503/A/A01.wav'): ite = 0 for files in sorted(glob.iglob(for_convert_target, recursive=True)): wavf = files x, fs = sf.read(wavf) x = np.array(x, dtype=np.float) x = low_cut_filter(x, fs, cutoff=70) assert fs == 16000 print("extract acoustic featuers: " + wavf) f0, sp, ap = feat.analyze(x) mcep = feat.mcep() npow = feat.npow() codeap = feat.codeap() name, ext = os.path.splitext(wavf) wav = synthesizer.synthesis_sp(f0, sp, ap) wav = np.clip(wav, -32768, 32767) sf.write(output_path + "target_ansys_{}_.wav".format(ite), wav, fs) tar_f0list.append(f0) tar_splist.append(sp) tar_mceplist.append(mcep) tar_aplist.append(ap) tar_npowlist.append(npow) tar_codeaplist.append(codeap) wav = synthesizer.synthesis(f0, mcep, ap) wav = np.clip(wav, -32768, 32767) sf.write(output_path + "target_mcep_{}_.wav".format(ite), wav, fs) ite = ite + 1 else: raise ValueError("No such files.") f0statis = F0statistics() tarf0stats = f0statis.estimate(tar_f0list) srcf0stats = f0statis.estimate(org_f0list) gv = GV() srcgvstats = gv.estimate(org_mceplist) targvstats = gv.estimate(tar_mceplist) # 5. fitting target epoch = 100 fitgmm = sklearn.mixture.GMM(n_components=n_mix, covariance_type='full', n_iter=100) fitgmm.weights_ = init_W fitgmm.means_ = init_meanY fitgmm.covars_ = init_covYY for i in range(len(tar_mceplist)): print("adapt: ", i+1, "/", len(tar_mceplist)) target = tar_mceplist[i] target_pow = target[:, 0] target = target[:, 1:] for x in range(epoch): print("epoch = ", x) predict = fitgmm.predict_proba(np.atleast_2d(static_delta(target))) y = np.sum([predict[:, k:k+1] * (static_delta(target) - biasvectors[1][k]) for k in range(n_mix)], axis=1) gamma = np.sum(predict, axis=0) left = np.sum([gamma[k] * np.dot(eigenvectors[1][k].T, np.linalg.solve(fitgmm.covars_, eigenvectors[1])[k]) for k in range(n_mix)], axis=0) right = np.sum([np.dot(eigenvectors[1][k].T, np.linalg.solve(fitgmm.covars_, y)[k]) for k in range(n_mix)], axis=0) weight = np.linalg.solve(left, right) fitted_target = np.dot(eigenvectors[1], weight) + biasvectors[1] fitgmm.means_ = fitted_target def mcepconvert(source, weights, jmean, meanX, covarXX, covarXY, covarYX, covarYY, fitted_source, fitted_target): M = 64 # set pX px = sklearn.mixture.GMM(n_components=M, covariance_type='full', n_iter=100) px.weights_ = weights px.means_ = meanX px.covars_ = covarXX # set Ab sddim = jmean.shape[1] // 2 covXXinv = np.zeros((M, sddim, sddim)) for m in range(M): covXXinv[m] = np.linalg.inv(covarXX[m]) A = np.zeros((M, sddim, sddim)) b = np.zeros((M, sddim)) cond_cov_inv = np.zeros((M, sddim, sddim)) for m in range(M): A[m] = covarYX[m] @ covXXinv[m] b[m] = fitted_target[m] - A[m] @ meanX[m] cond_cov_inv[m] = np.linalg.inv(covarYY[m] - A[m] @ covarXY[m]) # _gmmmap T, sddim = source.shape wseq = px.predict_proba(source) cseq = np.argmax(wseq, axis=1) mseq = np.zeros((T, sddim)) covseq = np.zeros((T, sddim, sddim)) for t in range(T): m = cseq[t] mseq[t] = fitted_target[m] + A[m] @ (source[t] - meanX[m]) covseq[t] = cond_cov_inv[m] # _mlpg T, sddim = mseq.shape W = construct_static_and_delta_matrix(T, sddim // 2) D = get_diagonal_precision_matrix(T, sddim, covseq) WD = W.T @ D WDW = WD @ W WDM = WD @ mseq.flatten() output = scipy.sparse.linalg.spsolve(WDW, WDM, use_umfpack=False).reshape(T, sddim // 2) return output # learn cvgvstats cv_mceps = [] for i in range(len(src_mceplist)): temp_mcep = src_mceplist[i] temp_mcep_0th = temp_mcep[:, 0] temp_mcep = temp_mcep[:, 1:] sta_mcep = static_delta(temp_mcep) cvmcep_wopow = np.array(mcepconvert(sta_mcep, init_W, init_jmean, init_meanX, init_covXX, init_covXY, init_covYX, init_covYY, fitted_source, fitted_target)) cvmcep = np.c_[temp_mcep_0th, cvmcep_wopow] cv_mceps.append(cvmcep) cvgvstats = gv.estimate(cv_mceps) for i in range(len(src_mceplist)): cvmcep_wGV = gv.postfilter(cv_mceps[i], targvstats, cvgvstats=cvgvstats) cvf0 = f0statis.convert(src_f0list[i], srcf0stats, tarf0stats) wav = synthesizer.synthesis(cvf0, cvmcep_wGV, src_aplist[i], rmcep=src_mceplist[i]) wav = np.clip(wav, -32768, 32767) sf.write(output_path + "cv_{}_.wav".format(i), wav, 16000) for i in range(len(src_mceplist)): wav = synthesizer.synthesis(src_f0list[i], src_mceplist[i], src_aplist[i]) wav = np.clip(wav, -32768, 32767) sf.write(output_path + "mcep_{}_.wav".format(i), wav, 16000) wav = synthesizer.synthesis_sp(src_f0list[i], src_splist[i], src_aplist[i]) wav = np.clip(wav, -32768, 32767) sf.write(output_path + "ansys_{}_.wav".format(i), wav, 16000) cvf0 = f0statis.convert(src_f0list[0], srcf0stats, tarf0stats) plt.plot(cvf0) plt.plot(src_f0list[0]) cvmcep_wGV = gv.postfilter(cv_mceps[0], srcgvstats, cvgvstats=cvgvstats) cvf0 = f0statis.convert(src_f0list[0], srcf0stats, tarf0stats) wav = synthesizer.synthesis(cvf0, cvmcep_wGV, src_aplist[0], rmcep=src_mceplist[0]) wav = np.clip(wav, -32768, 32767) sf.write(output_path + "te.wav", wav, 16000) # org-cv distance with open(output_path + "melcd_org-cv.txt", "w") as outfile: outfile.write("adapt1 org-cv mcd.\n") for i in range(len(src_mceplist)): temp_mcep = src_mceplist[i] temp_mcep_0th = temp_mcep[:, 0] temp_mcep = temp_mcep[:, 1:] temp_cv = cv_mceps[i] temp_cv_0th = temp_cv[:, 0] temp_cv = temp_cv[:, 1:] _, _, mcd = get_alignment(temp_mcep, temp_mcep_0th, temp_cv, temp_cv_0th, opow=-15, tpow=-15, sd=1) outfile.write("{0},{1}\n".format(i, mcd)) # cv-target distance # read target files and analyze mceps targets_mceplist = [] targets_list = __same_path + 'adaptation/EJF01/V01/T01/TIMIT/000/*.wav' for files in sorted(glob.iglob(targets_list, recursive=True)): wavf = files x, fs = sf.read(wavf) x = np.array(x, dtype=np.float) x = low_cut_filter(x, fs, cutoff=70) assert fs == 16000 print("extract acoustic featuers: " + wavf) f0, sp, ap = feat.analyze(x) mcep = feat.mcep() targets_mceplist.append(mcep) with open(output_path + "melcd_cv-target.txt", "w") as outfile: outfile.write("adapt1 cv-target mcd.\n") for i in range(len(src_mceplist)): temp_mcep = targets_mceplist[i] temp_mcep_0th = temp_mcep[:, 0] temp_mcep = temp_mcep[:, 1:] temp_cv = cv_mceps[i] temp_cv_0th = temp_cv[:, 0] temp_cv = temp_cv[:, 1:] _, _, mcd = get_alignment(temp_cv, temp_cv_0th, temp_mcep, temp_mcep_0th, opow=-15, tpow=-15, sd=1) outfile.write("{0},{1}\n".format(i, mcd)) # org-target distance with open(output_path + "melcd_org-target.txt", "w") as outfile: outfile.write("adapt1 org-target mcd.\n") for i in range(len(src_mceplist)): temp_mcep = src_mceplist[i] temp_mcep_0th = temp_mcep[:, 0] temp_mcep = temp_mcep[:, 1:] temp_mcep2 = targets_mceplist[i] temp_mcep2_0th = temp_mcep2[:, 0] temp_mcep2 = temp_mcep2[:, 1:] _, _, mcd = get_alignment(temp_mcep, temp_mcep_0th, temp_mcep2, temp_mcep2_0th, opow=-15, tpow=-15, sd=1) outfile.write("{0},{1}\n".format(i, mcd)) ```
github_jupyter
# My First Neural Network, Part 2. Bias and CE Loss > Bias and cross-entropy loss - toc: true - branch: master - badges: true - comments: true - metadata_key1: metadata_value1 - metadata_key2: metadata_value2 - image: https://i.imgur.com/5CbsjVW.png - description: Bias and cross-entropy loss - redirect_to: https://drscotthawley.github.io/blog/2019/02/04/My-First-NN-Part-2.html Links to lessons: [Part 0](https://drscotthawley.github.io/Following-Gravity/), [Part 1](https://colab.research.google.com/drive/1CPueDKDYOC33U_0qHxBhsDPpzYMpD8c8), [Part 2](https://colab.research.google.com/drive/1O9xcdAQUxo7KdVhbggqLCvabc77Rj-yH), [Part 3](https://colab.research.google.com/drive/1UZDEK-3v-SWxpDYBfamBoD4xhy7H2DEZ#scrollTo=14tOCcvT_a0I) Moving on from our [our previous notebook](https://colab.research.google.com/drive/1CPueDKDYOC33U_0qHxBhsDPpzYMpD8c8), we will investigate three things we could do to improve the models developed previously: 1. Add a bias term 2. Use a different loss function 3. Add more layers to the network *(postponed to next lesson)* ## 1. Add a bias term Our weighted sums did not include any constant offset or "bias" term. This may be fine for some data, but not for many others. For example, in a simple linear model $y = mx+b$, the choice of $b=0$ limits the model's ability to accurately fit some data. ![image of bias vs no biast](https://i.imgur.com/5CbsjVW.png) That is effectively what we did with our weighted sum $\sum_j X_{ij}w_j$: there was no constant offset. To correct this lack, we could add a new variable $b$ and make our weighted sum $b + \sum_j X_{ij}w_j$. Equivalently, and more conveniently for the purposes of coding, we could put an additional column of 1's in the input $X$, and a new row to our weight matrix $w$. By convention, this is usually done with the zeroth element, so that $X_{i0}=1$ and the columns of $X$ are moved to the right, and $w_0 = b$ will be the new constant offset (because $1*w_0 = w_0$.) For the first problem (Trask's first problem), this change makes our new matrix equation look like (with new bias terms in red) $$ f\left( \overbrace{ \left[ {\begin{array}{ccc} \color{red}1 & 0 & 0 & 1 \\ \color{red}1 & 0 & 1 & 1\\ \color{red}1 & 1 & 0 & 1\\ \color{red}1 & 1 & 1 & 1\\ \end{array} } \right] }^\text{X} \overbrace{ \left[ {\begin{array}{c} \color{red}{w_0} \\ w_1\\ w_2\\ w_3 \end{array} } \right] }^{w} \right) = \overbrace{ \left[ {\begin{array}{c} 0 \\ 0 \\ 1 \\ 1 \\ \end{array} } \right] }^{\tilde{Y}} $$ ***Foreshadowing***: *Note that in this problem, the rightmost column of $X$ already was a column of all 1's, and so already has something akin to a bias. Thus, adding a new column of all 1's will not add any information, and so for this problem we expect that adding the bias won't improve the model performance.)* With this change, we can still write our weighted sum as $\sum_j X_{ij}w_j$, it's just that $j$ now runs over 0..3 instead of 0..2. To emphasize: We can leave the rest of our code the same as before, provided we change $X$ by adding a column of 1's. In terms of coding the change to $X$, we can either rewrite it by hand, or pull a numpy trick: ``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline # old data X = np.array([ [0,0,1], [0,1,1], [1,0,1], [1,1,1] ]) # add a column of 1's new_col = np.ones((X.shape[0],1)) # array of 1s, w/ same # of rows as X, 1 col wide X_bias = np.hstack((new_col,X)) # stack them horizontally print(X_bias) ``` Let's compare our the use of the bias term without. We'll define functions for the gradient descent and for the plotting of the loss history, so we can call these again later in this lesson. ``` Y = np.array([[0,0,1,1]]).T # target output dataset def sigmoid(x,deriv=False): if(deriv==True): return x*(1-x) return 1/(1+np.exp(-x)) def calc_loss(Y_pred, Y, X, w, activ, loss_type="mse"): # MSE loss diff = Y_pred - Y loss = (diff**2).mean() gradient = np.dot( X.T, diff*activ(Y_pred, deriv=True)) # for weight update return loss, gradient def fit(X, Y, activ, use_bias=True, alpha=3.0, maxiter=10000, loss_type='mse'): """ fit: Generic routine for doing our gradient decent Required arguments: X: input matrix Y: target output activ: reference to an activation function Keywork arguments (optional): use_bias: Flag for whether to use bias in the model alpha: learning rate. Tip: Use the largest alpha 'you can get away with' maxiter: maximum number of iterations loss_type: Set to MSE for now but we'll extend this later. """ if use_bias: # add a column of 1's to X new_col = np.ones((X.shape[0],1)) X = np.hstack((new_col,X)) # Define weights np.random.seed(1) # for reproducibility if activ == sigmoid: w = 2*np.random.random((X.shape[1],Y.shape[1])) - 1 # -1..1 else: w = np.random.random((X.shape[1],Y.shape[1]))/10 # only positive weights (for later) loss_hist = [] # start with an empty list for iter in range(maxiter): Y_pred = activ(np.dot(X,w)) # compute prediction, i.e. tilde{Y} loss, gradient = calc_loss(Y_pred, Y, X, w, activ, loss_type) loss_hist.append(loss) # add to the history of the loss w -= alpha * gradient # weight update return w, Y_pred, loss_hist # send these values back # Now call the fit function twice, to compare with and without bias: w_old, Y_pred_old, loss_hist_old = fit(X, Y, sigmoid, use_bias=False) w_new, Y_pred_new, loss_hist_new = fit(X, Y, sigmoid) # Plot the results. Make a function so we can call this again later def plot_new_old(loss_hist_old, loss_hist_new, labels=["no bias", "with bias"]): plt.loglog(loss_hist_old, label=labels[0]) plt.loglog(loss_hist_new, label=labels[1]) plt.xlabel("Iteration") plt.ylabel("MSE Loss (monitoring)") plt.legend() plt.show() plot_new_old(loss_hist_old, loss_hist_new) # And print the final answers: print("No bias: Y_pred =",Y_pred_old) print("With bias: Y_pred =",Y_pred_new) ``` As expected, *for this problem*, the inclusion of bias didn't make any significant difference. Let's try the same thing for the 7-segment display problem from Part 1. And let's try two different runs, one with sigmoid activation, and another with ReLU: ``` X_7seg = np.array([ [1,1,1,1,1,1,0,1], # 0 [0,1,1,0,0,0,0,1], # 1 [1,1,0,1,1,0,1,1], # 2 [1,1,1,1,0,0,1,1], # 3 [0,1,1,0,0,1,1,1], # 4 [1,0,1,1,0,1,1,1], # 5 [1,0,1,1,1,1,1,1], # 6 [1,1,1,0,0,0,0,1], # 7 [1,1,1,1,1,1,1,1], # 8 [1,1,1,1,0,1,1,1] # 9 ]) Y_7seg = np.eye(10) X, Y = X_7seg, Y_7seg def relu(x,deriv=False): # relu activation if(deriv==True): return 1*(x>0) return x*(x>0) # Call the fit routine twice, once for sigmoid activation, once for relu for activ in [sigmoid, relu]: print("\n\n--------- activ = ",activ) alpha = 0.5 if activ == sigmoid else 0.005 # assign learning rate w_old, Y_pred_old, loss_hist_old = fit(X, Y, activ, alpha=alpha, use_bias=False) w_new, Y_pred_new, loss_hist_new = fit(X, Y, activ, alpha=alpha) # Report results plot_new_old(loss_hist_old, loss_hist_new) np.set_printoptions(formatter={'float': lambda x: "{0:0.2f}".format(x)}) # 2 sig figs print("No bias: Y_pred =\n",Y_pred_old) print("With bias: Y_pred =\n",Y_pred_new) ``` ...So for this problem, it seems that adding the bias gave us a bit more accuracy, both for the sigmoid and relu activations. *Note: in this example, the learning rates were chosen by experiment; you should get in the habit of going back and experimenting with different learning rates.* ## Video Interlude: Logistic Regression What we've been doing up until now has been a "classification" problem, with "yes"/"no" answers represented by 1's and 0's. This sort of operation is closely associated with the statistical method of Logistic Regression. It is akin to linear regression but with a sigmoid activation function. When doing Logistic Regression, one optimizes to fit by finding the maximum "likelihood" of a given model being correct. To gain some insight on Logistic Regression, watch [this SatsQuest video](https://www.youtube.com/watch?v=yIYKR4sgzI8). (You can ignore his remarks about his preceding video, "R squared" and "p-value", etc.) In what follows, we will be *minimizing* the *negative* of the *logarithm* of the likelihood, a quantity typically known as the Cross-Entropy loss. (This same quantity is also the non-constant part of the "Kullback-Leibler Divergence" or "K-L divergence," so you may hear it called that sometimes.) ## 2. Use a different loss function: Cross-Entropy loss Let's return to Trask's first problem for which there is only one target per data point (row) of input, namely a target of 0 or 1. In this case, using the sigmoid function for this classification problem is one of [logistic regression](https://www.youtube.com/watch?v=yIYKR4sgzI8), even though we hadn't it identified it as such. We've been using mean squared error (MSE) loss, but other loss functions exist. In particular, for outputs which are either "yes" or "no" such as the *classification problem* we are solving, a function called "[cross entropy](https://ml-cheatsheet.readthedocs.io/en/latest/loss_functions.html#cross-entropy)" is typically preferred. The cross-entropy loss is written like this: $$L_{CE} = -\sum_i \left[ Y_i\log(\tilde{Y}_i) + (1-Y_i)\log(1-\tilde{Y}_i) \right]$$ Note that since the function $\log(x)$ is undefined for $x\le0$, we need to make sure $0<\tilde{Y}_i<1$ for all $i$. One way to ensure this is to use sigmoid activation! Thus, for classification problems, it is very common to see sigmoid activation (or its multi-class relative "[softmax](http://dataaspirant.com/2017/03/07/difference-between-softmax-function-and-sigmoid-function/)") immediately before the output, even for many-layer neural networks with all kinds of other activations in other places. To use the CE loss with gradient descent, we need its derivative with respect to the weights. First let's write the CE loss in terms of the inputs $X$, weights $w$ and activation function $f$: ...wait, for compactness, let's write the weighted sum as $S_i = \sum_j X_{ij}w_j$. Ok, now going forward.... $$L_{CE} = -\sum_i\left[ Y_i\log\left(f\left(S_i \right)\right) + (1-Y_i)\log\left(1- f\left(S_{i}\right)\right) \right]$$ For any function $g(x)$, the derivative of $\log(g(x))$ with respect to x is just $1/g*(du/dx)$, so our partial derivatives with respect to weights look like $${\partial L_{CE}\over \partial w_j} = -\sum_i\left[ {Y_i\over\tilde{Y_i}}{\partial f(S_i)\over \partial w_j} - {1-Y_i\over 1-\tilde{Y}_i} {\partial f(S_i)\over \partial w_j} \right]\\ = -\sum_i {\partial f(S_i) \over \partial S_i}{\partial S_i\over\partial w_j} \left[ {Y_i\over\tilde{Y_i}} - {1-Y_i\over 1-\tilde{Y}_i} \right] $$ And if we multiply by $2/N$, we can write this as $$ {\partial L_{CE}\over \partial w_j} = {2\over N} \sum_{i=0}^{N-1} {\partial f(S_i) \over \partial S_i}X_{ij} \left[ {\tilde{Y_i}-Y_i\over \tilde{Y_i}(1-\tilde{Y_i}) }\right]$$ This is similar to the partial derivatives for our MSE loss, except the term in the denominator is new. To see this more clearly, recall that the weight update for MSE (from Part 1) was $$ w := w - \alpha X^T \cdot \left( [\tilde{Y}-Y]*\tilde{Y}*(1-\tilde{Y})\right) $$ whereas for CE we actually get a bit of a simplification because the term in the denominator cancels with a similar term in the numerator: $$ w := w - \alpha X^T \cdot \left( [\tilde{Y}-Y]*\tilde{Y}*(1-\tilde{Y})\right) / (\tilde{Y}*(1-\tilde{Y})) \\ w := w - \alpha X^T \cdot [\tilde{Y}-Y]. $$ Thus despite all this seeming complication, our CE weight update is actually simpler than what it was before as MSE! Let's try this out with code now: ``` # we'll "overwrite" the earlier calc_loss function def calc_loss(Y_pred, Y, X, w, activ, loss_type='ce'): diff = Y_pred - Y loss = (diff**2).mean() # MSE loss if 'ce' == loss_type: diff = diff / (Y_pred*(1-Y_pred)) # use this for gradient #loss = -(Y*np.log(Y_tilde) + (1-Y)*np.log1p(-Y_tilde)).mean() # CE Loss # Actually we don't care what the loss itself is. # Let's use MSE loss for 'monitoring' regardless, so we can compare the # effects of using different gradients-of-loss functions gradient = np.dot( X.T, diff*activ(Y_pred, deriv=True)) # same as before return loss, gradient #---- X = X_bias Y = np.array([[0,0,1,1]]).T # target output dataset # Compare old and new w_mse, Y_pred_mse, loss_hist_mse = fit(X, Y, sigmoid, alpha=0.5, loss_type='mse') w_ce, Y_pred_ce, loss_hist_ce = fit(X, Y, sigmoid, alpha=0.5, loss_type='ce') # fit plot_new_old(loss_hist_mse, loss_hist_ce, ["MSE, with bias", "CE, with bias"]) # And print the final answers: print("MSE _loss: Y_pred =\n",Y_pred_mse) print("CE loss: Y_pred =\n",Y_pred_ce) ``` This works a lot better! To understand why, note that the gradients for MSE loss scale like $$[\tilde{Y}-Y]*\tilde{Y}*(1-\tilde{Y})$$ and thus **these gradients go to zero** as $\tilde{Y}\rightarrow 0$, and/or $\tilde{Y}\rightarrow 1$, which makes training **very slow**! In contrast, the extra denominator in the CE gradients effectively cancels out this behavior, leaving the remaining term of $$[\tilde{Y}-Y]$$ which varies *linearly* with the difference from the target value. This makes training much more efficient. ``` # Aside: What happens if we try ReLU activation with CE loss? Bad things, probably. # Recall that ReLU maps negative numbers to 0, and isn't bounded from above. # Thus the "denominator" in the 'diff term' in our earlier code will tend to 'explode'. # Put differently, note that log(x) is undefined for x=0, as is log(1-x) for x=1. w_relu_ce, Y_pred_relu_ce, loss_hist_relu_ce = fit(X, Y, relu, alpha=0.001, loss_type='ce') plot_new_old(loss_hist_ce, loss_hist_relu_ce, ["CE, sigmoid", "CE, ReLU"]) ``` # Excercise: Do the same comparison for the 7-segment display problem: Make a plot showing a comparison of the loss history use MSE loss vs. using CE loss. And print out the final values of `Y_pred` for each. Use a learning rate of 0.5 and sigmoid activation, with bias. Take a screenshot of the output and upload it to your instructor. (*Note:* for the 7-segment display, since the target $Y$ has multiple columns, we should "normalize" the output in order to be able to properly interpret the output values $\tilde{Y}$ as probabilities. To do so, we would use a `softmax` activation. For now, we haven't bothered with this because it would add a bit more math, and is not actually necessary to solve this problem. ) Next time: [Part 3: Multi-Layer Networks and Backpropagation](https://colab.research.google.com/drive/1UZDEK-3v-SWxpDYBfamBoD4xhy7H2DEZ#scrollTo=14tOCcvT_a0I)
github_jupyter
# Instaquery In this notebook, we'll define an `instaquery()` function that lets you: 1. define a renderer (plot, table, print, etc.) 2. filter the data using `pandas.query` syntax 3. specify a column to group-by This tiny function can be handy for quick, throwaway exploration that you do not want captured permanently in the notebook (e.g., exploration off the primary track). ``` %matplotlib inline from IPython.display import display, Image from IPython.html.widgets import interact_manual def instaquery(df, renderer=lambda df, by: display(df)): ''' Creates an interactive query widget with an optional custom renderer. df: DataFrame to query renderer: Render function of the form lambda df, by where df is the subset of the DataFrame rows matching the query and by is the column selected for a group-by option. The default render function simply displays the rows matching the query and ignores the group-by. ''' by_vals = tuple(['---'] + list(df.columns)) @interact_manual(query='', by=by_vals) def instaquery(query, by): '''Inner function that gets called when the user interacts with the widgets.''' try: sub_df = df.query(query) except Exception: sub_df = df # replace sentinel with None by = None if by == '---' else by renderer(sub_df, by) ``` It doesn't look like much, but hers's a screenshot of just one thing it can do. ``` Image('./instaquery.png', retina=True, ) ``` ## Example #1: Pair Plot of Iris Data Let's use it to render seaborn pairplots for the class iris dataset first. ``` import warnings warnings.filterwarnings('ignore') import pandas as pd import seaborn as sns df = sns.load_dataset('iris') df.head() ``` We initialize the instaquery with the iris DataFrame and a custom render function. In our function, we render plots of all pairwise column combinations. We color points / bars by a selected column. 1. Run the cell below. Then hit *Run instaquery*. You should see the pairwise plots for the full dataset. 2. Next, select `species` in the *by* dropdown and click *Run instaquery* again. You still see the full dataset, but with each feature vector color coded according to its `species` category. 4. Finally, enter the query string `petal_width > 0.5 and petal_length < 5` and click *Run instaquery*. Now you only see the data that fall within the query parameters colorized according to species. ``` instaquery(df, lambda df, by: sns.pairplot(df, size=2.5, hue=by)) ``` ## Example #2: Pair Plot of Select Tip Columns Now let's switch to another dataset, but and customize the pairwise plots a bit. ``` df = sns.load_dataset('tips') ``` In this dataset, not all columns have numeric values. Since we're interested in using `pairplot`, we need to filter out some columns from our pairings. But, at the same time, we gain more categorical columns to use for grouping. ``` df.head() ``` In this invocation of `instaquery`, we pass a fixed list of columns of interest for ploitting. We also specify the plot fill opacity since our data is denser than in the iris dataset. 1. Try coloring by `sex`, `day`, `smoker`, etc. 2. When coloring by `day`, try the query `total_bill * 0.15 < tip`. This plots customer tips greater than 15% of the total bill amount colored by day. ``` instaquery(df, lambda df, by: sns.pairplot(df, vars=['tip', 'total_bill', 'size'], size=3.5, hue=by, plot_kws={'alpha' : 0.7})) ``` ## Example #3: Description of Tip Groups As a final example, we cease plotting and use a different renderer: a pandas table giving the basic summary stats of all numeric columns, optionally grouped by a column. 1. Try *Run instaquery* with no values first. 2. Try grouping by `sex`. 3. Try grouping by `sex` with the query `time == 'Lunch'`. ``` instaquery(df, lambda df, by: display(df.groupby(by).describe()) if by else display(df.describe())) ``` ## Homework: Violin Plot Renderer Try to implement a violin plot renderer that compares the distribution of tips grouped by a column of choice. Here's a hint in the form of a static violin plot. ``` sns.violinplot(df.tip, df.sex) ``` <div class="alert" style="border: 1px solid #aaa; background: radial-gradient(ellipse at center, #ffffff 50%, #eee 100%);"> <div class="row"> <div class="col-sm-1"><img src="https://knowledgeanyhow.org/static/images/favicon_32x32.png" style="margin-top: -6px"/></div> <div class="col-sm-11">This notebook was created using <a href="https://knowledgeanyhow.org">IBM Knowledge Anyhow Workbench</a>. To learn more, visit us at <a href="https://knowledgeanyhow.org">https://knowledgeanyhow.org</a>.</div> </div> </div>
github_jupyter
``` # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Vertex AI client library: AutoML tabular classification model for batch prediction <table align="left"> <td> <a href="https://colab.research.google.com/github/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/community/gapic/automl/showcase_automl_tabular_classification_batch.ipynb"> <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab </a> </td> <td> <a href="https://github.com/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/community/gapic/automl/showcase_automl_tabular_classification_batch.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> </table> <br/><br/><br/> ## Overview This tutorial demonstrates how to use the Vertex AI Python client library to create tabular classification models and do batch prediction using Google Cloud's [AutoML](https://cloud.google.com/ai-platform-unified/docs/start/automl-users). ### Dataset The dataset used for this tutorial is the [Iris dataset](https://www.tensorflow.org/datasets/catalog/iris) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). This dataset does not require any feature engineering. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the type of Iris flower species from a class of three species: setosa, virginica, or versicolor. ### Objective In this tutorial, you create an AutoML tabular classification model from a Python script, and then do a batch prediction using the Vertex AI client library. You can alternatively create and deploy models using the `gcloud` command-line tool or online using the Google Cloud Console. The steps performed include: - Create a Vertex AI `Dataset` resource. - Train the model. - View the model evaluation. - Make a batch prediction. There is one key difference between using batch prediction and using online prediction: * Prediction Service: Does an on-demand prediction for the entire set of instances (i.e., one or more data items) and returns the results in real-time. * Batch Prediction Service: Does a queued (batch) prediction for the entire set of instances in the background and stores the results in a Cloud Storage bucket when ready. ### Costs This tutorial uses billable components of Google Cloud (GCP): * Vertex AI * Cloud Storage Learn about [Vertex AI pricing](https://cloud.google.com/ai-platform-unified/pricing) and [Cloud Storage pricing](https://cloud.google.com/storage/pricing), and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage. ## Installation Install the latest version of Vertex AI client library. ``` import sys if "google.colab" in sys.modules: USER_FLAG = "" else: USER_FLAG = "--user" ! pip3 install -U google-cloud-aiplatform $USER_FLAG ``` Install the latest GA version of *google-cloud-storage* library as well. ``` ! pip3 install -U google-cloud-storage $USER_FLAG ``` ### Restart the kernel Once you've installed the Vertex AI client library and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages. ``` import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ``` ## Before you begin ### GPU runtime *Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU** ### Set up your Google Cloud project **The following steps are required, regardless of your notebook environment.** 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs. 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project) 3. [Enable the Vertex AI APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component) 4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Vertex AI Notebooks. 5. Enter your project ID in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. ``` PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID ``` #### Region You can also change the `REGION` variable, which is used for operations throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you. - Americas: `us-central1` - Europe: `europe-west4` - Asia Pacific: `asia-east1` You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services. For the latest support per region, see the [Vertex AI locations documentation](https://cloud.google.com/ai-platform-unified/docs/general/locations) ``` REGION = "us-central1" # @param {type: "string"} ``` #### Timestamp If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial. ``` from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") ``` ### Authenticate your Google Cloud account **If you are using Vertex AI Notebooks**, your environment is already authenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth. **Otherwise**, follow these steps: In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page. **Click Create service account**. In the **Service account name** field, enter a name, and click **Create**. In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex AI" into the filter box, and select **Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**. Click Create. A JSON file that contains your key downloads to your local environment. Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell. ``` import os import sys # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. # If on AI Platform, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' ``` ### Create a Cloud Storage bucket **The following steps are required, regardless of your notebook environment.** This tutorial is designed to use training data that is in a public Cloud Storage bucket and a local Cloud Storage bucket for your batch predictions. You may alternatively use your own training data that you have stored in a local Cloud Storage bucket. Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization. ``` BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP ``` **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket. ``` ! gsutil mb -l $REGION $BUCKET_NAME ``` Finally, validate access to your Cloud Storage bucket by examining its contents: ``` ! gsutil ls -al $BUCKET_NAME ``` ### Set up variables Next, set up some variables used throughout the tutorial. ### Import libraries and define constants #### Import Vertex AI client library Import the Vertex AI client library into our Python environment. ``` import os import sys import time import google.cloud.aiplatform_v1 as aip from google.protobuf import json_format from google.protobuf.json_format import MessageToJson, ParseDict from google.protobuf.struct_pb2 import Struct, Value ``` #### Vertex AI constants Setup up the following constants for Vertex AI: - `API_ENDPOINT`: The Vertex AI API service endpoint for dataset, model, job, pipeline and endpoint services. - `PARENT`: The Vertex AI location root path for dataset, model, job, pipeline and endpoint resources. ``` # API service endpoint API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION) # Vertex AI location root path for your dataset, model and endpoint resources PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION ``` #### AutoML constants Set constants unique to AutoML datasets and training: - Dataset Schemas: Tells the `Dataset` resource service which type of dataset it is. - Data Labeling (Annotations) Schemas: Tells the `Dataset` resource service how the data is labeled (annotated). - Dataset Training Schemas: Tells the `Pipeline` resource service the task (e.g., classification) to train the model for. ``` # Tabular Dataset type DATA_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/metadata/tables_1.0.0.yaml" # Tabular Labeling type LABEL_SCHEMA = ( "gs://google-cloud-aiplatform/schema/dataset/ioformat/table_io_format_1.0.0.yaml" ) # Tabular Training task TRAINING_SCHEMA = "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_tables_1.0.0.yaml" ``` #### Hardware Accelerators Set the hardware accelerators (e.g., GPU), if any, for prediction. Set the variable `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4) For GPU, available accelerators include: - aip.AcceleratorType.NVIDIA_TESLA_K80 - aip.AcceleratorType.NVIDIA_TESLA_P100 - aip.AcceleratorType.NVIDIA_TESLA_P4 - aip.AcceleratorType.NVIDIA_TESLA_T4 - aip.AcceleratorType.NVIDIA_TESLA_V100 Otherwise specify `(None, None)` to use a container image to run on a CPU. ``` if os.getenv("IS_TESTING_DEPOLY_GPU"): DEPLOY_GPU, DEPLOY_NGPU = ( aip.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_DEPOLY_GPU")), ) else: DEPLOY_GPU, DEPLOY_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1) ``` #### Container (Docker) image For AutoML batch prediction, the container image for the serving binary is pre-determined by the Vertex AI prediction service. More specifically, the service will pick the appropriate container for the model depending on the hardware accelerator you selected. #### Machine Type Next, set the machine type to use for prediction. - Set the variable `DEPLOY_COMPUTE` to configure the compute resources for the VM you will use for prediction. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \] *Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs* ``` if os.getenv("IS_TESTING_DEPLOY_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE") else: MACHINE_TYPE = "n1-standard" VCPU = "4" DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Deploy machine type", DEPLOY_COMPUTE) ``` # Tutorial Now you are ready to start creating your own AutoML tabular classification model. ## Set up clients The Vertex AI client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex AI server. You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront. - Dataset Service for `Dataset` resources. - Model Service for `Model` resources. - Pipeline Service for training. - Job Service for batch prediction and custom training. ``` # client options same for all services client_options = {"api_endpoint": API_ENDPOINT} def create_dataset_client(): client = aip.DatasetServiceClient(client_options=client_options) return client def create_model_client(): client = aip.ModelServiceClient(client_options=client_options) return client def create_pipeline_client(): client = aip.PipelineServiceClient(client_options=client_options) return client def create_job_client(): client = aip.JobServiceClient(client_options=client_options) return client clients = {} clients["dataset"] = create_dataset_client() clients["model"] = create_model_client() clients["pipeline"] = create_pipeline_client() clients["job"] = create_job_client() for client in clients.items(): print(client) ``` ## Dataset Now that your clients are ready, your first step is to create a `Dataset` resource instance. This step differs from Vision, Video and Language. For those products, after the `Dataset` resource is created, one then separately imports the data, using the `import_data` method. For tabular, importing of the data is deferred until the training pipeline starts training the model. What do we do different? Well, first you won't be calling the `import_data` method. Instead, when you create the dataset instance you specify the Cloud Storage location of the CSV file or BigQuery location of the data table, which contains your tabular data as part of the `Dataset` resource's metadata. #### Cloud Storage `metadata = {"input_config": {"gcs_source": {"uri": [gcs_uri]}}}` The format for a Cloud Storage path is: gs://[bucket_name]/[folder(s)/[file] #### BigQuery `metadata = {"input_config": {"bigquery_source": {"uri": [gcs_uri]}}}` The format for a BigQuery path is: bq://[collection].[dataset].[table] Note that the `uri` field is a list, whereby you can input multiple CSV files or BigQuery tables when your data is split across files. ### Data preparation The Vertex AI `Dataset` resource for tabular has a couple of requirements for your tabular data. - Must be in a CSV file or a BigQuery query. #### CSV For tabular classification, the CSV file has a few requirements: - The first row must be the heading -- note how this is different from Vision, Video and Language where the requirement is no heading. - All but one column are features. - One column is the label, which you will specify when you subsequently create the training pipeline. #### Location of Cloud Storage training data. Now set the variable `IMPORT_FILE` to the location of the CSV index file in Cloud Storage. ``` IMPORT_FILE = "gs://cloud-samples-data/tables/iris_1000.csv" ``` #### Quick peek at your data You will use a version of the Iris dataset that is stored in a public Cloud Storage bucket, using a CSV index file. Start by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (`wc -l`) and then peek at the first few rows. You also need for training to know the heading name of the label column, which is save as `label_column`. For this dataset, it is the last column in the CSV file. ``` count = ! gsutil cat $IMPORT_FILE | wc -l print("Number of Examples", int(count[0])) print("First 10 rows") ! gsutil cat $IMPORT_FILE | head heading = ! gsutil cat $IMPORT_FILE | head -n1 label_column = str(heading).split(",")[-1].split("'")[0] print("Label Column Name", label_column) if label_column is None: raise Exception("label column missing") ``` ## Dataset Now that your clients are ready, your first step in training a model is to create a managed dataset instance, and then upload your labeled data to it. ### Create `Dataset` resource instance Use the helper function `create_dataset` to create the instance of a `Dataset` resource. This function does the following: 1. Uses the dataset client service. 2. Creates an Vertex AI `Dataset` resource (`aip.Dataset`), with the following parameters: - `display_name`: The human-readable name you choose to give it. - `metadata_schema_uri`: The schema for the dataset type. - `metadata`: The Cloud Storage or BigQuery location of the tabular data. 3. Calls the client dataset service method `create_dataset`, with the following parameters: - `parent`: The Vertex AI location root path for your `Database`, `Model` and `Endpoint` resources. - `dataset`: The Vertex AI dataset object instance you created. 4. The method returns an `operation` object. An `operation` object is how Vertex AI handles asynchronous calls for long running operations. While this step usually goes fast, when you first use it in your project, there is a longer delay due to provisioning. You can use the `operation` object to get status on the operation (e.g., create `Dataset` resource) or to cancel the operation, by invoking an operation method: | Method | Description | | ----------- | ----------- | | result() | Waits for the operation to complete and returns a result object in JSON format. | | running() | Returns True/False on whether the operation is still running. | | done() | Returns True/False on whether the operation is completed. | | canceled() | Returns True/False on whether the operation was canceled. | | cancel() | Cancels the operation (this may take up to 30 seconds). | ``` TIMEOUT = 90 def create_dataset(name, schema, src_uri=None, labels=None, timeout=TIMEOUT): start_time = time.time() try: if src_uri.startswith("gs://"): metadata = {"input_config": {"gcs_source": {"uri": [src_uri]}}} elif src_uri.startswith("bq://"): metadata = {"input_config": {"bigquery_source": {"uri": [src_uri]}}} dataset = aip.Dataset( display_name=name, metadata_schema_uri=schema, labels=labels, metadata=json_format.ParseDict(metadata, Value()), ) operation = clients["dataset"].create_dataset(parent=PARENT, dataset=dataset) print("Long running operation:", operation.operation.name) result = operation.result(timeout=TIMEOUT) print("time:", time.time() - start_time) print("response") print(" name:", result.name) print(" display_name:", result.display_name) print(" metadata_schema_uri:", result.metadata_schema_uri) print(" metadata:", dict(result.metadata)) print(" create_time:", result.create_time) print(" update_time:", result.update_time) print(" etag:", result.etag) print(" labels:", dict(result.labels)) return result except Exception as e: print("exception:", e) return None result = create_dataset("iris-" + TIMESTAMP, DATA_SCHEMA, src_uri=IMPORT_FILE) ``` Now save the unique dataset identifier for the `Dataset` resource instance you created. ``` # The full unique ID for the dataset dataset_id = result.name # The short numeric ID for the dataset dataset_short_id = dataset_id.split("/")[-1] print(dataset_id) ``` ## Train the model Now train an AutoML tabular classification model using your Vertex AI `Dataset` resource. To train the model, do the following steps: 1. Create an Vertex AI training pipeline for the `Dataset` resource. 2. Execute the pipeline to start the training. ### Create a training pipeline You may ask, what do we use a pipeline for? You typically use pipelines when the job (such as training) has multiple steps, generally in sequential order: do step A, do step B, etc. By putting the steps into a pipeline, we gain the benefits of: 1. Being reusable for subsequent training jobs. 2. Can be containerized and ran as a batch job. 3. Can be distributed. 4. All the steps are associated with the same pipeline job for tracking progress. Use this helper function `create_pipeline`, which takes the following parameters: - `pipeline_name`: A human readable name for the pipeline job. - `model_name`: A human readable name for the model. - `dataset`: The Vertex AI fully qualified dataset identifier. - `schema`: The dataset labeling (annotation) training schema. - `task`: A dictionary describing the requirements for the training job. The helper function calls the `Pipeline` client service'smethod `create_pipeline`, which takes the following parameters: - `parent`: The Vertex AI location root path for your `Dataset`, `Model` and `Endpoint` resources. - `training_pipeline`: the full specification for the pipeline training job. Let's look now deeper into the *minimal* requirements for constructing a `training_pipeline` specification: - `display_name`: A human readable name for the pipeline job. - `training_task_definition`: The dataset labeling (annotation) training schema. - `training_task_inputs`: A dictionary describing the requirements for the training job. - `model_to_upload`: A human readable name for the model. - `input_data_config`: The dataset specification. - `dataset_id`: The Vertex AI dataset identifier only (non-fully qualified) -- this is the last part of the fully-qualified identifier. - `fraction_split`: If specified, the percentages of the dataset to use for training, test and validation. Otherwise, the percentages are automatically selected by AutoML. ``` def create_pipeline(pipeline_name, model_name, dataset, schema, task): dataset_id = dataset.split("/")[-1] input_config = { "dataset_id": dataset_id, "fraction_split": { "training_fraction": 0.8, "validation_fraction": 0.1, "test_fraction": 0.1, }, } training_pipeline = { "display_name": pipeline_name, "training_task_definition": schema, "training_task_inputs": task, "input_data_config": input_config, "model_to_upload": {"display_name": model_name}, } try: pipeline = clients["pipeline"].create_training_pipeline( parent=PARENT, training_pipeline=training_pipeline ) print(pipeline) except Exception as e: print("exception:", e) return None return pipeline ``` ### Construct the task requirements Next, construct the task requirements. Unlike other parameters which take a Python (JSON-like) dictionary, the `task` field takes a Google protobuf Struct, which is very similar to a Python dictionary. Use the `json_format.ParseDict` method for the conversion. The minimal fields you need to specify are: - `prediction_type`: Whether we are doing "classification" or "regression". - `target_column`: The CSV heading column name for the column we want to predict (i.e., the label). - `train_budget_milli_node_hours`: The maximum time to budget (billed) for training the model, where 1000 = 1 hour. - `disable_early_stopping`: Whether True/False to let AutoML use its judgement to stop training early or train for the entire budget. - `transformations`: Specifies the feature engineering for each feature column. For `transformations`, the list must have an entry for each column. The outer key field indicates the type of feature engineering for the corresponding column. In this tutorial, you set it to `"auto"` to tell AutoML to automatically determine it. Finally, create the pipeline by calling the helper function `create_pipeline`, which returns an instance of a training pipeline object. ``` TRANSFORMATIONS = [ {"auto": {"column_name": "sepal_width"}}, {"auto": {"column_name": "sepal_length"}}, {"auto": {"column_name": "petal_length"}}, {"auto": {"column_name": "petal_width"}}, ] PIPE_NAME = "iris_pipe-" + TIMESTAMP MODEL_NAME = "iris_model-" + TIMESTAMP task = Value( struct_value=Struct( fields={ "target_column": Value(string_value=label_column), "prediction_type": Value(string_value="classification"), "train_budget_milli_node_hours": Value(number_value=1000), "disable_early_stopping": Value(bool_value=False), "transformations": json_format.ParseDict(TRANSFORMATIONS, Value()), } ) ) response = create_pipeline(PIPE_NAME, MODEL_NAME, dataset_id, TRAINING_SCHEMA, task) ``` Now save the unique identifier of the training pipeline you created. ``` # The full unique ID for the pipeline pipeline_id = response.name # The short numeric ID for the pipeline pipeline_short_id = pipeline_id.split("/")[-1] print(pipeline_id) ``` ### Get information on a training pipeline Now get pipeline information for just this training pipeline instance. The helper function gets the job information for just this job by calling the the job client service's `get_training_pipeline` method, with the following parameter: - `name`: The Vertex AI fully qualified pipeline identifier. When the model is done training, the pipeline state will be `PIPELINE_STATE_SUCCEEDED`. ``` def get_training_pipeline(name, silent=False): response = clients["pipeline"].get_training_pipeline(name=name) if silent: return response print("pipeline") print(" name:", response.name) print(" display_name:", response.display_name) print(" state:", response.state) print(" training_task_definition:", response.training_task_definition) print(" training_task_inputs:", dict(response.training_task_inputs)) print(" create_time:", response.create_time) print(" start_time:", response.start_time) print(" end_time:", response.end_time) print(" update_time:", response.update_time) print(" labels:", dict(response.labels)) return response response = get_training_pipeline(pipeline_id) ``` # Deployment Training the above model may take upwards of 30 minutes time. Once your model is done training, you can calculate the actual time it took to train the model by subtracting `end_time` from `start_time`. For your model, you will need to know the fully qualified Vertex AI Model resource identifier, which the pipeline service assigned to it. You can get this from the returned pipeline instance as the field `model_to_deploy.name`. ``` while True: response = get_training_pipeline(pipeline_id, True) if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED: print("Training job has not completed:", response.state) model_to_deploy_id = None if response.state == aip.PipelineState.PIPELINE_STATE_FAILED: raise Exception("Training Job Failed") else: model_to_deploy = response.model_to_upload model_to_deploy_id = model_to_deploy.name print("Training Time:", response.end_time - response.start_time) break time.sleep(60) print("model to deploy:", model_to_deploy_id) ``` ## Model information Now that your model is trained, you can get some information on your model. ## Evaluate the Model resource Now find out how good the model service believes your model is. As part of training, some portion of the dataset was set aside as the test (holdout) data, which is used by the pipeline service to evaluate the model. ### List evaluations for all slices Use this helper function `list_model_evaluations`, which takes the following parameter: - `name`: The Vertex AI fully qualified model identifier for the `Model` resource. This helper function uses the model client service's `list_model_evaluations` method, which takes the same parameter. The response object from the call is a list, where each element is an evaluation metric. For each evaluation (you probably only have one) we then print all the key names for each metric in the evaluation, and for a small set (`logLoss` and `auPrc`) you will print the result. ``` def list_model_evaluations(name): response = clients["model"].list_model_evaluations(parent=name) for evaluation in response: print("model_evaluation") print(" name:", evaluation.name) print(" metrics_schema_uri:", evaluation.metrics_schema_uri) metrics = json_format.MessageToDict(evaluation._pb.metrics) for metric in metrics.keys(): print(metric) print("logloss", metrics["logLoss"]) print("auPrc", metrics["auPrc"]) return evaluation.name last_evaluation = list_model_evaluations(model_to_deploy_id) ``` ## Model deployment for batch prediction Now deploy the trained Vertex AI `Model` resource you created for batch prediction. This differs from deploying a `Model` resource for on-demand prediction. For online prediction, you: 1. Create an `Endpoint` resource for deploying the `Model` resource to. 2. Deploy the `Model` resource to the `Endpoint` resource. 3. Make online prediction requests to the `Endpoint` resource. For batch-prediction, you: 1. Create a batch prediction job. 2. The job service will provision resources for the batch prediction request. 3. The results of the batch prediction request are returned to the caller. 4. The job service will unprovision the resoures for the batch prediction request. ## Make a batch prediction request Now do a batch prediction to your deployed model. ### Make test items You will use synthetic data as a test data items. Don't be concerned that we are using synthetic data -- we just want to demonstrate how to make a prediction. ``` HEADING = "petal_length,petal_width,sepal_length,sepal_width" INSTANCE_1 = "1.4,1.3,5.1,2.8" INSTANCE_2 = "1.5,1.2,4.7,2.4" ``` ### Make the batch input file Now make a batch input file, which you will store in your local Cloud Storage bucket. Unlike image, video and text, the batch input file for tabular is only supported for CSV. For CSV file, you make: - The first line is the heading with the feature (fields) heading names. - Each remaining line is a separate prediction request with the corresponding feature values. For example: "feature_1", "feature_2". ... value_1, value_2, ... ``` import tensorflow as tf gcs_input_uri = BUCKET_NAME + "/test.csv" with tf.io.gfile.GFile(gcs_input_uri, "w") as f: f.write(HEADING + "\n") f.write(str(INSTANCE_1) + "\n") f.write(str(INSTANCE_2) + "\n") print(gcs_input_uri) ! gsutil cat $gcs_input_uri ``` ### Compute instance scaling You have several choices on scaling the compute instances for handling your batch prediction requests: - Single Instance: The batch prediction requests are processed on a single compute instance. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to one. - Manual Scaling: The batch prediction requests are split across a fixed number of compute instances that you manually specified. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and batch prediction requests are evenly distributed across them. - Auto Scaling: The batch prediction requests are split across a scaleable number of compute instances. - Set the minimum (`MIN_NODES`) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions. The minimum number of compute instances corresponds to the field `min_replica_count` and the maximum number of compute instances corresponds to the field `max_replica_count`, in your subsequent deployment request. ``` MIN_NODES = 1 MAX_NODES = 1 ``` ### Make batch prediction request Now that your batch of two test items is ready, let's do the batch request. Use this helper function `create_batch_prediction_job`, with the following parameters: - `display_name`: The human readable name for the prediction job. - `model_name`: The Vertex AI fully qualified identifier for the `Model` resource. - `gcs_source_uri`: The Cloud Storage path to the input file -- which you created above. - `gcs_destination_output_uri_prefix`: The Cloud Storage path that the service will write the predictions to. - `parameters`: Additional filtering parameters for serving prediction results. The helper function calls the job client service's `create_batch_prediction_job` metho, with the following parameters: - `parent`: The Vertex AI location root path for Dataset, Model and Pipeline resources. - `batch_prediction_job`: The specification for the batch prediction job. Let's now dive into the specification for the `batch_prediction_job`: - `display_name`: The human readable name for the prediction batch job. - `model`: The Vertex AI fully qualified identifier for the `Model` resource. - `dedicated_resources`: The compute resources to provision for the batch prediction job. - `machine_spec`: The compute instance to provision. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated. - `starting_replica_count`: The number of compute instances to initially provision, which you set earlier as the variable `MIN_NODES`. - `max_replica_count`: The maximum number of compute instances to scale to, which you set earlier as the variable `MAX_NODES`. - `model_parameters`: Additional filtering parameters for serving prediction results. *Note*, image segmentation models do not support additional parameters. - `input_config`: The input source and format type for the instances to predict. - `instances_format`: The format of the batch prediction request file: `csv` only supported. - `gcs_source`: A list of one or more Cloud Storage paths to your batch prediction requests. - `output_config`: The output destination and format for the predictions. - `prediction_format`: The format of the batch prediction response file: `csv` only supported. - `gcs_destination`: The output destination for the predictions. This call is an asychronous operation. You will print from the response object a few select fields, including: - `name`: The Vertex AI fully qualified identifier assigned to the batch prediction job. - `display_name`: The human readable name for the prediction batch job. - `model`: The Vertex AI fully qualified identifier for the Model resource. - `generate_explanations`: Whether True/False explanations were provided with the predictions (explainability). - `state`: The state of the prediction job (pending, running, etc). Since this call will take a few moments to execute, you will likely get `JobState.JOB_STATE_PENDING` for `state`. ``` BATCH_MODEL = "iris_batch-" + TIMESTAMP def create_batch_prediction_job( display_name, model_name, gcs_source_uri, gcs_destination_output_uri_prefix, parameters=None, ): if DEPLOY_GPU: machine_spec = { "machine_type": DEPLOY_COMPUTE, "accelerator_type": DEPLOY_GPU, "accelerator_count": DEPLOY_NGPU, } else: machine_spec = { "machine_type": DEPLOY_COMPUTE, "accelerator_count": 0, } batch_prediction_job = { "display_name": display_name, # Format: 'projects/{project}/locations/{location}/models/{model_id}' "model": model_name, "model_parameters": json_format.ParseDict(parameters, Value()), "input_config": { "instances_format": IN_FORMAT, "gcs_source": {"uris": [gcs_source_uri]}, }, "output_config": { "predictions_format": OUT_FORMAT, "gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix}, }, "dedicated_resources": { "machine_spec": machine_spec, "starting_replica_count": MIN_NODES, "max_replica_count": MAX_NODES, }, } response = clients["job"].create_batch_prediction_job( parent=PARENT, batch_prediction_job=batch_prediction_job ) print("response") print(" name:", response.name) print(" display_name:", response.display_name) print(" model:", response.model) try: print(" generate_explanation:", response.generate_explanation) except: pass print(" state:", response.state) print(" create_time:", response.create_time) print(" start_time:", response.start_time) print(" end_time:", response.end_time) print(" update_time:", response.update_time) print(" labels:", response.labels) return response IN_FORMAT = "csv" OUT_FORMAT = "csv" # [csv] response = create_batch_prediction_job( BATCH_MODEL, model_to_deploy_id, gcs_input_uri, BUCKET_NAME, None ) ``` Now get the unique identifier for the batch prediction job you created. ``` # The full unique ID for the batch job batch_job_id = response.name # The short numeric ID for the batch job batch_job_short_id = batch_job_id.split("/")[-1] print(batch_job_id) ``` ### Get information on a batch prediction job Use this helper function `get_batch_prediction_job`, with the following paramter: - `job_name`: The Vertex AI fully qualified identifier for the batch prediction job. The helper function calls the job client service's `get_batch_prediction_job` method, with the following paramter: - `name`: The Vertex AI fully qualified identifier for the batch prediction job. In this tutorial, you will pass it the Vertex AI fully qualified identifier for your batch prediction job -- `batch_job_id` The helper function will return the Cloud Storage path to where the predictions are stored -- `gcs_destination`. ``` def get_batch_prediction_job(job_name, silent=False): response = clients["job"].get_batch_prediction_job(name=job_name) if silent: return response.output_config.gcs_destination.output_uri_prefix, response.state print("response") print(" name:", response.name) print(" display_name:", response.display_name) print(" model:", response.model) try: # not all data types support explanations print(" generate_explanation:", response.generate_explanation) except: pass print(" state:", response.state) print(" error:", response.error) gcs_destination = response.output_config.gcs_destination print(" gcs_destination") print(" output_uri_prefix:", gcs_destination.output_uri_prefix) return gcs_destination.output_uri_prefix, response.state predictions, state = get_batch_prediction_job(batch_job_id) ``` ### Get Predictions When the batch prediction is done processing, the job state will be `JOB_STATE_SUCCEEDED`. Finally you view the predictions stored at the Cloud Storage path you set as output. The predictions will be in a CSV format, which you indicated at the time you made the batch prediction job, under a subfolder starting with the name `prediction`, and under that folder will be a file called `predictions*.csv`. Now display (cat) the contents. You will see multiple rows, one for each prediction. For each prediction: - The first four fields are the values (features) you did the prediction on. - The remaining fields are the confidence values, between 0 and 1, for each prediction. ``` def get_latest_predictions(gcs_out_dir): """ Get the latest prediction subfolder using the timestamp in the subfolder name""" folders = !gsutil ls $gcs_out_dir latest = "" for folder in folders: subfolder = folder.split("/")[-2] if subfolder.startswith("prediction-"): if subfolder > latest: latest = folder[:-1] return latest while True: predictions, state = get_batch_prediction_job(batch_job_id, True) if state != aip.JobState.JOB_STATE_SUCCEEDED: print("The job has not completed:", state) if state == aip.JobState.JOB_STATE_FAILED: raise Exception("Batch Job Failed") else: folder = get_latest_predictions(predictions) ! gsutil ls $folder/prediction*.csv ! gsutil cat $folder/prediction*.csv break time.sleep(60) ``` # Cleaning up To clean up all GCP resources used in this project, you can [delete the GCP project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial. Otherwise, you can delete the individual resources you created in this tutorial: - Dataset - Pipeline - Model - Endpoint - Batch Job - Custom Job - Hyperparameter Tuning Job - Cloud Storage Bucket ``` delete_dataset = True delete_pipeline = True delete_model = True delete_endpoint = True delete_batchjob = True delete_customjob = True delete_hptjob = True delete_bucket = True # Delete the dataset using the Vertex AI fully qualified identifier for the dataset try: if delete_dataset and "dataset_id" in globals(): clients["dataset"].delete_dataset(name=dataset_id) except Exception as e: print(e) # Delete the training pipeline using the Vertex AI fully qualified identifier for the pipeline try: if delete_pipeline and "pipeline_id" in globals(): clients["pipeline"].delete_training_pipeline(name=pipeline_id) except Exception as e: print(e) # Delete the model using the Vertex AI fully qualified identifier for the model try: if delete_model and "model_to_deploy_id" in globals(): clients["model"].delete_model(name=model_to_deploy_id) except Exception as e: print(e) # Delete the endpoint using the Vertex AI fully qualified identifier for the endpoint try: if delete_endpoint and "endpoint_id" in globals(): clients["endpoint"].delete_endpoint(name=endpoint_id) except Exception as e: print(e) # Delete the batch job using the Vertex AI fully qualified identifier for the batch job try: if delete_batchjob and "batch_job_id" in globals(): clients["job"].delete_batch_prediction_job(name=batch_job_id) except Exception as e: print(e) # Delete the custom job using the Vertex AI fully qualified identifier for the custom job try: if delete_customjob and "job_id" in globals(): clients["job"].delete_custom_job(name=job_id) except Exception as e: print(e) # Delete the hyperparameter tuning job using the Vertex AI fully qualified identifier for the hyperparameter tuning job try: if delete_hptjob and "hpt_job_id" in globals(): clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id) except Exception as e: print(e) if delete_bucket and "BUCKET_NAME" in globals(): ! gsutil rm -r $BUCKET_NAME ```
github_jupyter
For this problem set, we'll be using the Jupyter notebook: ![](jupyter.png) --- ## Part A (2 points) Write a function that returns a list of numbers, such that $x_i=i^2$, for $1\leq i \leq n$. Make sure it handles the case where $n<1$ by raising a `ValueError`. ``` def squares(n): """Compute the squares of numbers from 1 to n, such that the ith element of the returned list equals i^2. """ ### BEGIN SOLUTION if n < 1: raise ValueError("n must be greater than or equal to 1") return [i ** 2 for i in range(1, n + 1)] ### END SOLUTION ``` Your function should print `[1, 4, 9, 16, 25, 36, 49, 64, 81, 100]` for $n=10$. Check that it does: ``` squares(10) """Check that squares returns the correct output for several inputs""" assert squares(1) == [1] assert squares(2) == [1, 4] assert squares(10) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100] assert squares(11) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121] """Check that squares raises an error for invalid inputs""" try: squares(0) except ValueError: pass else: raise AssertionError("did not raise") try: squares(-4) except ValueError: pass else: raise AssertionError("did not raise") ``` --- ## Part B (1 point) Using your `squares` function, write a function that computes the sum of the squares of the numbers from 1 to $n$. Your function should call the `squares` function -- it should NOT reimplement its functionality. ``` def sum_of_squares(n): """Compute the sum of the squares of numbers from 1 to n.""" ### BEGIN SOLUTION return sum(squares(n)) ### END SOLUTION ``` The sum of squares from 1 to 10 should be 385. Verify that this is the answer you get: ``` sum_of_squares(10) """Check that sum_of_squares returns the correct answer for various inputs.""" assert sum_of_squares(1) == 1 assert sum_of_squares(2) == 5 assert sum_of_squares(10) == 385 assert sum_of_squares(11) == 506 """Check that sum_of_squares relies on squares.""" orig_squares = squares del squares try: sum_of_squares(1) except NameError: pass else: raise AssertionError("sum_of_squares does not use squares") finally: squares = orig_squares ``` --- ## Part C (1 point) Using LaTeX math notation, write out the equation that is implemented by your `sum_of_squares` function. $\sum_{i=1}^n i^2$ --- ## Part D (2 points) Find a usecase for your `sum_of_squares` function and implement that usecase in the cell below. ``` def pyramidal_number(n): """Returns the n^th pyramidal number""" return sum_of_squares(n) ```
github_jupyter
## Data access for all data sets The Visualize and Analyze Data notebook utilized an Amazon s3 location for staged data that were only available during the live tutorial session. This notebook can be run as an alternative to the staged data location. After inputting your Earthdata login username and associated email address in the `Input Earthdata Login credentials` section, you can run this notebook in its entirety to return all data files in the Outputs folder. ### Import packages ``` import requests import getpass import socket import json import zipfile import io import math import os import shutil import pprint import re import time from statistics import mean from requests.auth import HTTPBasicAuth ``` ### Select data sets and determine version numbers ``` # Create dictionary of data set parameters we'll use in our access API command below. We'll start with data set IDs (e.g. ATL07) of interest here, also known as "short name". data_dict = { 'sea_ice_fb' : {'short_name' : 'ATL10'}, 'sea_ice_height' : {'short_name' : 'ATL07'}, 'ist' : {'short_name' : 'MOD29'}, } # Get json response from CMR collection metadata to grab version numbers and add to data_dict for i in range(len(data_dict)): cmr_collections_url = 'https://cmr.earthdata.nasa.gov/search/collections.json' response = requests.get(cmr_collections_url, params=list(data_dict.values())[i]) results = json.loads(response.content) # Find all instances of 'version_id' in metadata and print most recent version number versions = [el['version_id'] for el in results['feed']['entry']] versions = [i for i in versions if not any(c.isalpha() for c in i)] data_dict[list(data_dict.keys())[i]]['version'] = max(versions) ``` ### Select time and area of interest Data granules are returned based on a spatial bounding box and temporal range. ``` # Bounding Box spatial parameter in 'W,S,E,N' decimal degrees format bounding_box = '140,72,153,80' #add bounding_box to each data set dictionary for k, v in data_dict.items(): data_dict[k]['bounding_box'] = bounding_box #Input temporal range in 'YYYY-MM-DDThh:mm:ssZ,YYYY-MM-DDThh:mm:ssZ' format temporal = '2019-03-23T00:00:00Z,2019-03-23T23:59:59Z' #add temporal to each data set dictionary for k, v in data_dict.items(): data_dict[k]['temporal'] = temporal ``` ### Determine how many granules exist over this time and area of interest, as well as the average size and total volume of those granules ``` # Query number of granules (paging over results) granule_search_url = 'https://cmr.earthdata.nasa.gov/search/granules' for i in range(len(data_dict)): params = { 'short_name': list(data_dict.values())[i]['short_name'], 'version': list(data_dict.values())[i]['version'], 'bounding_box': bounding_box, 'temporal': temporal, 'page_size': 100, 'page_num': 1 } granules = [] headers={'Accept': 'application/json'} while True: response = requests.get(granule_search_url, params=params, headers=headers) results = json.loads(response.content) if len(results['feed']['entry']) == 0: # Out of results, so break out of loop break # Collect results and increment page_num granules.extend(results['feed']['entry']) params['page_num'] += 1 print('There are', len(granules), 'granules of', list(data_dict.values())[i]['short_name'], 'version', list(data_dict.values())[i]['version'], 'over my area and time of interest.') for k, v in data_dict.items(): data_dict[k]['gran_num'] = len(granules) granule_sizes = [float(granule['granule_size']) for granule in granules] print(f'The average size of each granule is {mean(granule_sizes):.2f} MB and the total size of all {len(granules)} granules is {sum(granule_sizes):.2f} MB') print() ``` Note that subsetting, reformatting, or reprojecting can alter the size of the granules if those services are applied to your request. ### Select subsetting, reformatting, and reprojection options for each dataset ### Input Earthdata Login credentials An Earthdata Login account is required to access data from the NSIDC DAAC. If you do not already have an Earthdata Login account, visit http://urs.earthdata.nasa.gov to register. ``` uid = '' # Enter Earthdata Login user name pswd = getpass.getpass('Earthdata Login password: ') # Input and store Earthdata Login password email = '' # Enter email associated with Earthata Login account ``` The NSIDC DAAC supports customization services on many of our NASA Earthdata mission collections. See the Customize and Access Data notebook to query the subsetting, reformatting, and reprojection service options available for your data set of interest. Since we already know these options, we'll add our subsetting requests directly into our data dictionary. ``` # Spatial and temporal subsetting for ATL10 data_dict['sea_ice_fb']['bbox'] = bounding_box data_dict['sea_ice_fb']['time'] = '2019-03-23T00:00:00,2019-03-23T23:59:59' # Spatial and temporal subsetting for ATL07 data_dict['sea_ice_height']['bbox'] = bounding_box data_dict['sea_ice_height']['time'] = '2019-03-23T00:00:00,2019-03-23T23:59:59' # Spatial subsetting and polar stereographic reprojection for MOD29 data_dict['ist']['bbox'] = bounding_box ``` Now let's select a subset of variables. We'll use these primary variables of interest for the ICESat-2 ATL07 product: ``` #ATL07 #Use only strong beams data_dict['sea_ice_height']['coverage'] = '/gt1l/sea_ice_segments/delta_time,\ /gt1l/sea_ice_segments/latitude,\ /gt1l/sea_ice_segments/longitude,\ /gt1l/sea_ice_segments/heights/height_segment_confidence,\ /gt1l/sea_ice_segments/heights/height_segment_height,\ /gt1l/sea_ice_segments/heights/height_segment_quality,\ /gt1l/sea_ice_segments/heights/height_segment_surface_error_est,\ /gt2l/sea_ice_segments/delta_time,\ /gt2l/sea_ice_segments/latitude,\ /gt2l/sea_ice_segments/longitude,\ /gt2l/sea_ice_segments/heights/height_segment_confidence,\ /gt2l/sea_ice_segments/heights/height_segment_height,\ /gt2l/sea_ice_segments/heights/height_segment_quality,\ /gt2l/sea_ice_segments/heights/height_segment_surface_error_est,\ /gt3l/sea_ice_segments/delta_time,\ /gt3l/sea_ice_segments/latitude,\ /gt3l/sea_ice_segments/longitude,\ /gt3l/sea_ice_segments/heights/height_segment_confidence,\ /gt3l/sea_ice_segments/heights/height_segment_height,\ /gt3l/sea_ice_segments/heights/height_segment_quality,\ /gt3l/sea_ice_segments/heights/height_segment_surface_error_est' ``` ### Select data access configurations The data request can be accessed asynchronously or synchronously. The asynchronous option will allow concurrent requests to be queued and processed without the need for a continuous connection. Those requested orders will be delivered to the specified email address, or they can be accessed programmatically as shown below. Synchronous requests will automatically download the data as soon as processing is complete. For this tutorial, we will be selecting the asynchronous method. ``` #Set NSIDC data access base URL base_url = 'https://n5eil02u.ecs.nsidc.org/egi/request' for k, v in data_dict.items(): #Add email address data_dict[k]['email'] = email #Set the request mode to asynchronous data_dict[k]['request_mode'] = 'async' #Set the page size to the maximum for asynchronous requests page_size = 2000 data_dict[k]['page_size'] = page_size #Determine number of orders needed for requests over 2000 granules. page_num = math.ceil(data_dict[k]['gran_num']/page_size) data_dict[k]['page_num'] = page_num del data_dict[k]['gran_num'] ``` ### Create the API endpoint Programmatic API requests are formatted as HTTPS URLs that contain key-value-pairs specifying the service operations that we specified above. The following command can be executed via command line, a web browser, or in Python below. ``` endpoint_list = [] for k, v in data_dict.items(): param_string = '&'.join("{!s}={!r}".format(k,v) for (k,v) in v.items()) param_string = param_string.replace("'","") #Print API base URL + request parameters API_request = api_request = f'{base_url}?{param_string}' endpoint_list.append(API_request) if data_dict[k]['page_num'] > 1: for i in range(data_dict[k]['page_num']): page_val = i + 2 data_dict[k]['page_num'] = page_val API_request = api_request = f'{base_url}?{param_string}' endpoint_list.append(API_request) print("\n".join("\n"+s for s in endpoint_list)) ``` ### Request data We will now download data using the Python requests library. The data will be downloaded directly to this notebook directory in a new Outputs folder. The progress of each order will be reported. ``` from xml.etree import ElementTree as ET # Create an output folder if the folder does not already exist. path = str(os.getcwd() + '/Outputs') if not os.path.exists(path): os.mkdir(path) session = requests.session() # Request data service for each page number, and unzip outputs for k, v in data_dict.items(): for i in range(data_dict[k]['page_num']): page_val = i + 1 print(v['short_name'], 'Order: ', page_val) # For all requests other than spatial file upload, use get function request = session.get(base_url, params=v.items()) response = session.get(request.url,auth=(uid,pswd)) print('Request HTTP response: ', response.status_code) # Raise bad request: Loop will stop for bad response code. response.raise_for_status() #print('Order request URL: ', request.url) esir_root = ET.fromstring(response.content) #print('Order request response XML content: ', request.content) #Look up order ID orderlist = [] for order in esir_root.findall("./order/"): orderlist.append(order.text) orderID = orderlist[0] print('order ID: ', orderID) #Create status URL statusURL = base_url + '/' + orderID print('status URL: ', statusURL) #Find order status request_response = session.get(statusURL) print('HTTP response from order response URL: ', request_response.status_code) # Raise bad request: Loop will stop for bad response code. request_response.raise_for_status() request_root = ET.fromstring(request_response.content) statuslist = [] for status in request_root.findall("./requestStatus/"): statuslist.append(status.text) status = statuslist[0] print('Data request ', page_val, ' is submitting...') print('Initial request status is ', status) #Continue loop while request is still processing while status == 'pending' or status == 'processing': print('Status is not complete. Trying again.') time.sleep(10) loop_response = session.get(statusURL) # Raise bad request: Loop will stop for bad response code. loop_response.raise_for_status() loop_root = ET.fromstring(loop_response.content) #find status statuslist = [] for status in loop_root.findall("./requestStatus/"): statuslist.append(status.text) status = statuslist[0] print('Retry request status is: ', status) if status == 'pending' or status == 'processing': continue #Order can either complete, complete_with_errors, or fail: # Provide complete_with_errors error message: if status == 'complete_with_errors' or status == 'failed': messagelist = [] for message in loop_root.findall("./processInfo/"): messagelist.append(message.text) print('error messages:') pprint.pprint(messagelist) # Download zipped order if status is complete or complete_with_errors if status == 'complete' or status == 'complete_with_errors': downloadURL = 'https://n5eil02u.ecs.nsidc.org/esir/' + orderID + '.zip' print('Zip download URL: ', downloadURL) print('Beginning download of zipped output...') zip_response = session.get(downloadURL) # Raise bad request: Loop will stop for bad response code. zip_response.raise_for_status() with zipfile.ZipFile(io.BytesIO(zip_response.content)) as z: z.extractall(path) print('Data request', page_val, 'is complete.') else: print('Request failed.') print() ``` ### Finally, we will clean up the Output folder by removing individual order folders: ``` # Clean up Outputs folder by removing individual granule folders for root, dirs, files in os.walk(path, topdown=False): for file in files: try: shutil.move(os.path.join(root, file), path) except OSError: pass for name in dirs: os.rmdir(os.path.join(root, name)) ```
github_jupyter
# MAT281 - Laboratorio N°11 <a id='p1'></a> ## I.- Problema 01 Lista de actos delictivos registrados por el Service de police de la Ville de Montréal (SPVM). <img src="http://henriquecapriles.com/wp-content/uploads/2017/02/femina_detenida-1080x675.jpg" width="480" height="360" align="center"/> El conjunto de datos en estudio `interventionscitoyendo.csv` corresponde a todos los delitos entre 2015 y agosto de 2020en Montreal. Cada delito está asociado en grandes categorías, y hay información sobre la ubicación, el momento del día, etc. > **Nota**: Para más información seguir el siguiente el [link](https://donnees.montreal.ca/ville-de-montreal/actes-criminels). ``` # librerias import os import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import statsmodels.api as sm from statsmodels.tsa.statespace.sarimax import SARIMAX from metrics_regression import * # graficos incrustados plt.style.use('fivethirtyeight') %matplotlib inline import warnings warnings.filterwarnings('ignore') # parametros esteticos de seaborn sns.set_palette("deep", desat=.6) sns.set_context(rc={"figure.figsize": (12, 4)}) # read data validate_categorie = [ 'Introduction', 'Méfait','Vol dans / sur véhicule à moteur', 'Vol de véhicule à moteur', ] df = pd.read_csv(os.path.join("data","interventionscitoyendo.csv"), sep=",", encoding='latin-1') df.columns = df.columns.str.lower() df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d') df = df.loc[lambda x: x['categorie'].isin(validate_categorie)] df = df.sort_values(['categorie','date']) df.head() ``` Como tenemos muchos datos por categoría a nivel de día, agruparemos a nivel de **semanas** y separaremos cada serie temporal. ``` cols = ['date','pdq'] y_s1 = df.loc[lambda x: x.categorie == validate_categorie[0] ][cols].set_index('date').resample('W').mean() y_s2 = df.loc[lambda x: x.categorie == validate_categorie[1] ][cols].set_index('date').resample('W').mean() y_s3 = df.loc[lambda x: x.categorie == validate_categorie[2] ][cols].set_index('date').resample('W').mean() y_s4 = df.loc[lambda x: x.categorie == validate_categorie[3] ][cols].set_index('date').resample('W').mean() ``` El objetivo de este laboratorio es poder realizar un análisis completo del conjunto de datos en estudio, para eso debe responder las siguientes preguntas: 1. Realizar un gráfico para cada serie temporal $y\_{si}, i =1,2,3,4$. ``` # graficar datos y_s1.plot(figsize=(15, 3),color = 'blue') y_s2.plot(figsize=(15, 3),color = 'red') y_s3.plot(figsize=(15, 3),color = 'green') y_s4.plot(figsize=(15, 3),color = 'pink') plt.show() ``` 2. Escoger alguna serie temporal $y\_{si}, i =1,2,3,4$. Luego: * Realice un análisis exploratorio de la serie temporal escogida * Aplicar el modelo de pronóstico $SARIMA(p,d,q)x(P,D,Q,S)$, probando varias configuraciones de los hiperparámetros. Encuentre la mejor configuración. Concluya. * Para el mejor modelo encontrado, verificar si el residuo corresponde a un ruido blanco. > **Hint**: Tome como `target_date` = '2021-01-01'. Recuerde considerar que su columna de valores se llama `pdq`. # EDA ``` y_s1.head() y_s1.sort_values(['date']) y_s1.describe().T # diagrama de caja y bigotes fig, ax = plt.subplots(figsize=(15,6)) sns.boxplot(y_s1.index.year, y_s1.pdq, ax=ax) plt.show() from statsmodels.graphics.tsaplots import plot_acf from statsmodels.graphics.tsaplots import plot_pacf from matplotlib import pyplot pyplot.figure(figsize=(12,9)) # acf pyplot.subplot(211) plot_acf(y_s1.pdq, ax=pyplot.gca(), lags = 30) #pacf pyplot.subplot(212) plot_pacf(y_s1.pdq, ax=pyplot.gca(), lags = 30) pyplot.show() from pylab import rcParams import statsmodels.api as sm import matplotlib.pyplot as plt rcParams['figure.figsize'] = 18, 8 decomposition = sm.tsa.seasonal_decompose(y_s1, model='multiplicative') fig = decomposition.plot() plt.show() ``` # Modelo SARIMA ``` # creando clase SarimaModels class SarimaModels: def __init__(self,params): self.params = params @property def name_model(self): return f"SARIMA_{self.params[0]}X{self.params[1]}".replace(' ','') @staticmethod def test_train_model(y,date): mask_ds = y.index < date y_train = y[mask_ds] y_test = y[~mask_ds] return y_train, y_test def fit_model(self,y,date): y_train, y_test = self.test_train_model(y,date ) model = SARIMAX(y_train, order=self.params[0], seasonal_order=self.params[1], enforce_stationarity=False, enforce_invertibility=False) model_fit = model.fit(disp=0) return model_fit def df_testig(self,y,date): y_train, y_test = self.test_train_model(y,date ) model = SARIMAX(y_train, order=self.params[0], seasonal_order=self.params[1], enforce_stationarity=False, enforce_invertibility=False) model_fit = model.fit(disp=0) start_index = y_test.index.min() end_index = y_test.index.max() preds = model_fit.get_prediction(start=start_index,end=end_index, dynamic=False) df_temp = pd.DataFrame( { 'y':y_test['pdq'], 'yhat': preds.predicted_mean } ) return df_temp def metrics(self,y,date): df_temp = self.df_testig(y,date) df_metrics = summary_metrics(df_temp) df_metrics['model'] = self.name_model return df_metrics # definir parametros import itertools p = d = q = range(0, 2) pdq = list(itertools.product(p, d, q)) seasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))] params = list(itertools.product(pdq,seasonal_pdq)) target_date = '2021-01-01' # iterar para los distintos escenarios frames = [] for param in params: try: sarima_model = SarimaModels(param) df_metrics = sarima_model.metrics(y_s1,target_date) frames.append(df_metrics) except: pass # juntar resultados de las métricas y comparar df_metrics_result = pd.concat(frames) df_metrics_result.sort_values(['mae','mape']) param = [(0,0,0),(1,0,1,12)] sarima_model = SarimaModels(param) model_fit = sarima_model.fit_model(y_s1,target_date) best_model = sarima_model.df_testig(y_s1,target_date) best_model.head() # graficar mejor modelo preds = best_model['yhat'] ax = y_s1['2020':].plot(label='observed') preds.plot(ax=ax, label='Forecast', alpha=.7, figsize=(14, 7)) ax.set_xlabel('Date') ax.set_ylabel('PDQ') plt.legend() plt.show() ``` **Conclusión**: La configuración la cual minimiza el modelo según mae y mape, a pesar de ser el mejor, no es un buen modelo de pronostico SARIMA. # Verificación de ruido blanco ``` # resultados del error model_fit.plot_diagnostics(figsize=(16, 8)) plt.show() ``` * **gráfico standarized residual**:En este caso se observa que esta nueva serie de tiempo corresponde a una serie estacionaria que oscila entorno al cero, por lo tanto es un ruido blanco. * **gráfico histogram plus estimated density**: En este caso, el histograma es muy similar al histograma de una variable N(0,1), por lo que es ruido blanco. * **gráfico correlogramse**: observa que no hay correlación entre ninguna de las variables, por lo que se puedan dar indicios de independencia entre las variables. **Conclusión:** Dados los graficos de los errores asociados al modelo, se concluye que el error asociado al modelo en estudio corresponde a un ruido blanco.
github_jupyter
# Vacinação Geral no Brasil ![](https://raw.githubusercontent.com/diascarolina/vacinacao-geral-no-brasil/main/other/banner.jpg?token=AH6WME4BOFRN6LIHPSJL6KDAZVPJW) # Sumário - [1 Introdução](#intro) - [2 Dados](#dados) - [3 Importação das Bibliotecas & Configurações](#import) - [4 Limpeza dos Dados](#clean) - [5 Panorama Geral da Vacinação no Brasil](#vacina) - [5.1 Cobertura Vacinal por Região do Brasil, de 1994 a 2019](#cv) - [5.1.2 Análise Gráfica](#analiseum) - [5.2 Cobertura Vacinal e Valor Absoluto por Imunizantes no Brasil em 2015](#cv2) - [5.2.2 Análise Gráfica](#analisedois) - [5.3 Linha do Tempo da Vacinação no Brasil, de 1994 a 2019](#linha) - [6 Conclusão](#conc) - [7 Referências](#ref) <a name="intro"></a> # 1 Introdução <a href="https://github.com/diascarolina/vacinacao-geral-no-brasil/blob/main/other/syringe.png"> <img align="right" src="https://raw.githubusercontent.com/diascarolina/vacinacao-geral-no-brasil/main/other/syringe.png?token=AH6WMEZBFIPTIFQTPBFZKFDAZQRI6"> </a> Já é de conhecimento de todos que o principal assunto dos últimos meses, nos quais estamos vivendo em uma pandemia de Covid-19, é a vacinação. Diariamente checamos a quantidade de pessoas já vacinadas e aguardamos ansiosamente o momento em que poderemos dizer "Estou imunizado contra a Covid (com a PIFAIZÊR)". Mas será que já enfrentamos algo parecido? Em uma escala mundial talvez não recentemente, mas em uma escala regional não é a primeira vez que ficamos à mercê de doenças que poderiam ser evitadas com uma vacinação efetiva. É nesse contexto que torna-se relevante olharmos para o passado e estudarmos casos anteriores de doenças que foram superadas com vacinas, as chamadas doenças imunopreveníveis, para que possamos traçar um paralelo com os dias de hoje, pois, como diria aquela famosa frase atribuída ao filósofo irlandês Edmund Burke, _"Aqueles que não conhecem a história estão fadados a repeti-la."_ Pelo surgimento e avanço de grupos anti-vacina e pelo comportamento de quem deveria estar liderando o país, parece que essa lição não foi aprendida. Assim, iniciaremos com uma análise geral sobre os imunizantes mais aplicados e utilizados no calendário vacinal brasileiro. Buscamos validar duas hipóteses propostas e que, à primeira vista, nos parecem óbvias: > **Hipótese 1:** A cobertura vacinal vem crescendo anualmente em todo o país. > **Hipótese 2:** Vacinas que devem ser aplicadas em bebês logo após o nascimento, como a BCG e da Hepatite B, principalmente, possuem maior cobertura vacinal e também maior valor absoluto de aplicações. <div class="alert alert-success"> <strong>Clique <a href='https://github.com/diascarolina/vacinacao-geral-no-brasil/blob/main/notebooks/sarampo.ipynb'>aqui</a> para acessar a Parte 2, na qual faço uma análise sobre o surto de Sarampo ocorrido entre 2013 a 2015 em Pernambuco e no Ceará.</strong> </div> <div class="alert alert-success"> <strong><a href='https://carodias.medium.com/vacina%C3%A7%C3%A3o-no-brasil-o-que-aprendemos-com-o-surto-de-sarampo-em-2014-56d8518c3ef0'>Análise Completa no Medium</a></strong> </div> <a name="dados"></a> # 2 Dados <a href="https://raw.githubusercontent.com/diascarolina/vacinacao-geral-no-brasil/main/other/pni.png?token=AH6WME7VQUE662N2BI6F6Q3AZQQRQ"> <img align="right" src="https://raw.githubusercontent.com/diascarolina/vacinacao-geral-no-brasil/main/other/pni.png?token=AH6WME7VQUE662N2BI6F6Q3AZQQRQ"> </a> Os dados utilizados nas análises foram obtidos através do dados abertos do Sistema Único de Saúde (SUS), mais especificamente os dados do [Programa Nacional de Imunização (PNI)](http://pni.datasus.gov.br/apresentacao.asp). Criado em 1973 pelo Ministério da Saúde, o PNI tem como objetivo coordenar as ações de imunizações em todo o território nacional, traçando diretrizes e prestando serviços integrais de saúde através de sua rede própria. É uma ferramenta essencial para a manutenção da saúde pública brasileira, tendo sido responsável por erradicar ou controlar diversas doenças imunopreveníveis, como o sarampo, a Hepatite B, e a Poliomielite. Com essa contextualização, vamos à parte técnica dos dados. **Caminho para a obtenção dos dados brutos:** [TABNET](http://www2.datasus.gov.br/DATASUS/index.php?area=02) ➔ [Assistência à Saúde](http://www2.datasus.gov.br/DATASUS/index.php?area=0202) ➔ [Imunizações - desde 1994](http://www2.datasus.gov.br/DATASUS/index.php?area=0202&id=11637) ➔ [Doses Aplicadas](http://tabnet.datasus.gov.br/cgi/tabcgi.exe?pni/cnv/cpniuf.def) Nesta última página foram feitas as seguintes seleções: - **Linha:** Imuno ou Ano, dependendo da análise feita; - **Coluna**: Região (do país); - **Medidas:** Coberturas Vacinais ou Doses Cálculos CV, a depender da análise; - **Períodos Disponíveis**: todos os anos de 1994 a 2019; Esses dados foram atualizados pela última vez em 04/09/2019. Uma versão mais recente desses dados pode ser encontrada nessa [página do DATASUS](http://tabnet.datasus.gov.br/cgi/tabcgi.exe?pni/cnv/cpniuf.def) que teve última atualização em 10/06/2021. Uma nova análise futura poderá utilizar esses dados mais recentes. Também há um documento com as [Notas Técnicas](http://tabnet.datasus.gov.br/cgi/pni/Imun_cobertura_desde_1994.pdf) para os dados. Nele encontramos a **origem** e a **descrição** de algumas variáveis presentes nos dados. Como mais relevante destaco a seguinte informação, que nos diz como é feito o cálculo da cobertura vacinal, que representa uma porcentagem da população alvo que recebeu o imunizante: > _"A fórmula de cálculo da cobertura é o número de doses aplicadas da dose indicada (1ª, 2ª, 3ª dose ou dose única, conforme a vacina) dividida pela população alvo, multiplicado por 100. Exemplo: para a Tetravalente (DTP/Hib), considera-se o número de terceiras doses aplicadas na faixa etária de menores de 1 ano. Para a vacina oral de rotavírus humano, pode-se avaliar cobertura de 1ª e 2ª doses."_ <a name="import"></a> # 3 Importação das Bibliotecas & Configurações ``` import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import matplotlib.ticker as ticker from wordcloud import WordCloud from PIL import Image # formatar os números para mostrarem duas casa decimais pd.options.display.float_format = '{:.2f}'.format # configurações de cores GRAY1, GRAY2, GRAY3 = '#231F20', '#414040', '#555655' GRAY4, GRAY5, GRAY6 = '#646369', '#76787B', '#828282' GRAY7, GRAY8, GRAY9 = '#929497', '#A6A6A5', '#BFBEBE' BLUE1, BLUE2, BLUE3, BLUE4 = '#174A7E', '#4A81BF', '#94B2D7', '#94AFC5' RED1, RED2 = '#C3514E', '#E6BAB7' GREEN1, GREEN2 = '#0C8040', '#9ABB59' ORANGE1 = '#F79747' # configurações da fonte utilizada nos gráficos plt.rcParams['font.family'] = 'Arial' plt.rcParams['mathtext.fontset'] = 'custom' plt.rcParams['mathtext.bf'] = 'Arial:bold' plt.rcParams['mathtext.it'] = 'Arial:italic' ``` <a name="clean"></a> # 4 Limpeza dos Dados Por questões de tamanho e organização, a parte de limpeza dos datasets foi realizada em um outro notebook linkado a seguir: <div class="alert alert-success"> <strong><a href='https://github.com/diascarolina/vacinacao-geral-no-brasil/blob/main/notebooks/limpeza_dados.ipynb'>Notebook com a limpeza dos dados.</a></strong> </div> Cada arquivo _.csv_ com os dados brutos utilizados foi tratado no notebook acima e salvo uma nova versão com os dados limpos que são os dados importados no notebook atual. <a name="vacina"></a> # 5 Panorama Geral da Vacinação no Brasil Aqui nessa primeira parte daremos um panorama geral de como andam as coberturas vacinais no país no período de 1994 a 2019, além de analisarmos quais as vacinas mais aplicadas em números absolutos. Para termos uma base para seguirmos nessa análise, levantamos duas hipóteses: - **Hipótese 1:** A cobertura vacinal vem crescendo anualmente em todo o país. - **Hipótese 2:** Vacinas que devem ser aplicadas em bebês logo após o nascimento, como a [BCG e da Hepatite B](https://www.unimedlondrina.com.br/noticias/tudo-saude/07/06/2018/vacinas-importantes-recem-nascidos/), principalmente, possuem maior cobertura vacinal e também maior valor absoluto de aplicações. <a name="cv"></a> ## 5.1 Cobertura Vacinal por Região do Brasil, de 1994 a 2019 Aqui iremos considerar a cobertura vacinal de todos os imunizantes presentes na base de dados. Esse total é a média de todos os 26 imunizantes (aqui considerando também a quantidade de doses que cada um necessita). Muitos dos imunizantes tiveram um determinado período em que foram utilizados no calendário vacinal brasileiro, como, por exemplo, a vacina Dupla Viral, que esteve presente de 2001 a 2004, vindo a ser substituída pela tríplice viral, como visto nas [Notas Técnicas](http://tabnet.datasus.gov.br/cgi/pni/Imun_cobertura_desde_1994.pdf). Como já fizemos a limpeza dos dados anteriormente, podemos já importar o dataset e analisá-lo. ``` # importação dos dados da cobertura vacinal por região cv_reg = pd.read_csv('../dados/clean/cv_reg.csv', encoding = 'ISO-8859-1', index_col = 'Unnamed: 0') cv_reg.head() ``` A coluna ```Total``` refere-se à média das 5 regiões, então podemos usá-la como métrica para avaliar a cobertura vacinal do Brasil levando em consideração todas as vacinas disponíveis na base de dados. <a name="analiseum"></a> ### 5.1.2 Análise Gráfica ``` ax = cv_reg['Total'].plot(figsize = (15, 10), linewidth = 4) plt.ylim(0, 100) ax.tick_params(color = 'darkgrey', bottom = 'off') ax.spines['bottom'].set_color('darkgrey') ax.spines['left'].set_color('darkgrey') sns.despine() for i in ax.get_yticklabels() + ax.get_xticklabels(): i.set_fontsize(15) i.set_color(GRAY5) plt.title('$\\bf{Cobertura\ Vacinal\ no\ Brasil\ de\ 1994\ a\ 2019}$\nMédia total de todos os imunizantes considerados', loc = 'left', fontsize = 25, color = GRAY4) plt.ylabel('Cobertura Vacinal (em %)', loc = 'top', fontsize = 20, color = GRAY5) plt.xlabel('Ano', loc = 'left', fontsize = 20, color = GRAY5) ax.set_xticks(range(1994, 2020, 2)) plt.grid(color = 'lightgrey', linewidth = 0.5) plt.show() ``` Algumas considerações e observações que obtemos não só desse gráfico, mas também do conhecimento que temos da base de dados: - Os dados foram atualizados pela última vez em **04/09/2019**, como visto no próprio site. Ou seja, as informações sobre 2019 estão incompletas. - O que aconteceu em **2016** que explique uma **queda tão brusca** na cobertura vacinal? Pelo nosso conhecimento dessa base de dados do Governo, sabemos que os dados sofreram diversas mudanças de infraestrutura ao longo dos anos, até mesmo com inserção duplicada de valores e corrigidas posteriormente. Isso tudo é explicado nas notas de rodapé do [site](http://tabnet.datasus.gov.br/cgi/tabcgi.exe?pni/cnv/cpniuf.def) de onde os dados brutos foram retirados. Então esse ano atípico de 2016 pode muito bem ser explicado por **inconsistências** na base de dados. Mas isso é só uma especulação, não podemos ter certeza sem uma pesquisa mais a fundo. De **1994 até 2015**, aproximadamente, parece que temos dados mais consistentes. Vamos então observar esse período de tempo. ``` ax = cv_reg[:-4]['Total'].plot(figsize = (15, 10), linewidth = 4) plt.ylim(0, 100) ax.tick_params(color = 'darkgrey', bottom = 'off') ax.spines['bottom'].set_color('darkgrey') ax.spines['left'].set_color('darkgrey') sns.despine() for i in ax.get_yticklabels() + ax.get_xticklabels(): i.set_fontsize(15) i.set_color(GRAY5) plt.title('$\\bf{Cobertura\ Vacinal\ no\ Brasil\ de\ 1994\ a\ 2015}$\nMédia total de todos os imunizantes considerados', loc = 'left', fontsize = 25, color = GRAY4) plt.ylabel('Cobertura Vacinal (em %)', loc = 'top', fontsize = 20, color = GRAY5) plt.xlabel('Ano', loc = 'left', fontsize = 20, color = GRAY5) ax.set_xticks(range(1994, 2016, 2)) plt.grid(color = 'lightgrey', linewidth = 0.5) plt.show() ``` Agora fica mais fácil observar como se comportou a cobertura vacinal no Brasil nesse período. De **1994 a 1999**, é evidente que houve uma grande **alta** na porcentagem analisada. De **1999 a 2013**, houve uma certa **estabilidade** nesse valor, apenas com um pequeno aumento em 2011. A partir de **2013 até 2015**, já voltamos a ter uma **subida** acentuada na cobertura vacinal. **Seria isso suficiente para responder à nossa Hipótese 1 de que a cobertura vacinal vem crescendo anualmente em todo o país?** Por mais que tenhamos saído de uma média de menos de 40% em 1994 e chegado a quase 100% em 2015, de um ano para o outro também ocorreram diversas quedas nesse número. No geral podemos afirmar que sim, houve um expressivo aumento na cobertura vacinal de 1994 a 2015, mas não anualmente, e sim como um todo. <a name="cv2"></a> ## 5.2 Cobertura Vacinal e Valor Absoluto por Imunizante no Brasil em 2015 Agora vamos avaliar a cobertura vacinal de cada vacina específica. Como 2015 é o ano que atingiu maior cobertura vacinal em todo o período analisado, vamos utilizá-lo para análise. ``` cv_reg_ano = pd.read_csv('../dados/clean/cv_reg_ano.csv', encoding = 'ISO-8859-1') cv_reg_ano.head() ``` <a name="analisedois"></a> ### 5.2.2 Análise Gráfica ``` ax = cv_reg_ano.set_index('Imuno').sort_values('2015')['2015'].T.plot(figsize = (18, 10), kind = 'barh', color = BLUE2) ax.tick_params(color = 'darkgrey', bottom = 'off') ax.spines['bottom'].set_color('darkgrey') sns.despine(left = True) for i in ax.get_yticklabels() + ax.get_xticklabels(): i.set_fontsize(12) i.set_color(GRAY4) ax.yaxis.label.set_visible(False) ax.yaxis.set_tick_params(length = 0) ax.xaxis.set_tick_params(length = 0) ax.xaxis.set_major_formatter(ticker.PercentFormatter(100)) plt.grid(axis = 'x', zorder = 0, linewidth = 0.4, color = 'lightgray') plt.text(0, 27, '$\\bf{Cobertura\ Vacinal\ por\ Tipo\ de\ Imunizante}$', fontsize = 22, color = GRAY4) plt.text(0, 26, 'Porcentagem considerando o ano de 2015', fontsize = 20, color = GRAY5) plt.text(0, -2.5, 'Cobertura Vacinal', fontsize = 18, color = GRAY6) plt.show() ``` Esse gráfico já nos ajuda a validar a **Hipótese 2**, pois vemos que realmente as vacinas que possuem uma maior cobertura vacinal são aquelas aplicadas em recém-nascidos, como a vacina BCG e a da Hepatite B, entre outras. Vacinas com valores de 0% na cobertura vacinal tiveram sua aplicação finalizada antes do período analisado, a saber, 2015. Para completarmos a **Hipótese 2**, basta analisarmos os valores absolutos dos imunizantes aplicados. Vamos lá?! Para isso, usarei a soma total de todas as aplicações de imunizantes de 1994 a 2019. ``` tipo_regiao = pd.read_csv('../dados/clean/tipo_regiao.csv', encoding = 'ISO-8859-1') tipo_regiao.sort_values('Total', ascending = False).head() ``` Como seria uma uma boa forma de visualizar esse total de imunizantes aplicados? Um clássico gráfico de barras, claro! Mas antes, vamos tentar visualizar com uma "wordcloud", _just for fun!_ ``` tipo_regiao['Imuno'] = tipo_regiao['Imuno'].str.replace(' ', '_') tipo_regiao = tipo_regiao[['Imuno', 'Total']] d = {} for Imuno, Total in tipo_regiao.values: d[Imuno] = Total vaccine_mask = np.array(Image.open('../other/vacina.png')) wordcloud = WordCloud(background_color = 'white', width = 1000, height = 500, mask = vaccine_mask, contour_width = 3, contour_color = 'lightblue', colormap = 'winter') wordcloud.generate_from_frequencies(frequencies = d) plt.figure(figsize = (20, 15)) plt.text(100, -50, 'Imunizantes Mais Aplicados no Brasil', fontsize = 30, color = GRAY4) plt.text(100, -15, 'Total Absoluto de Doses de 1994 a 2019', fontsize = 25, color = GRAY5) plt.imshow(wordcloud, interpolation = 'bilinear') plt.axis('off') plt.show() ``` Se formos nos basear pelo tamanho da fonte, que é como é feita a leitura de uma _wordcloud_, vemos que a vacina BCG e a da Poliomielite são as mais aplicadas, em valores absolutos, seguidas pelo imunizante DPT. Vamos confirmar isso em uma visualização mais tradicional abaixo. ``` tipo_regiao['Imuno'] = tipo_regiao['Imuno'].str.replace('_', ' ') ax = tipo_regiao.set_index('Imuno').sort_values('Total')['Total'].T.plot(figsize = (18, 10), kind = 'barh', color = BLUE2) ax.tick_params(color = 'darkgrey', bottom = 'off') ax.spines['bottom'].set_color('darkgrey') sns.despine(left = True) for i in ax.get_yticklabels() + ax.get_xticklabels(): i.set_fontsize(12) i.set_color(GRAY4) ax.yaxis.label.set_visible(False) ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:,.0f}')) ax.yaxis.set_tick_params(length = 0) ax.xaxis.set_tick_params(length = 0) plt.grid(axis = 'x', zorder = 1, linewidth = 0.4, color = 'lightgray') plt.text(-30, 26, '$\\bf{Imunizantes\ Mais\ Aplicados\ no\ Brasil}$', fontsize = 22, color = GRAY4) plt.text(-30, 25, 'Total Absoluto de Doses de 1994 a 2019', fontsize = 20, color = GRAY5) plt.text(0, -2.5, 'Valores Absolutos', fontsize = 18, color = GRAY6) plt.show() ``` Finalmente podemos validar nossa **Hipótese 2** a partir dos dados desse gráfico e do dataframe ```tipo_regiao```: > Vacinas que devem ser aplicadas em bebês logo após o nascimento, como a [BCG e da Hepatite B](https://www.unimedlondrina.com.br/noticias/tudo-saude/07/06/2018/vacinas-importantes-recem-nascidos/), principalmente, possuem maior cobertura vacinal e também maior valor absoluto de aplicações. Observamos que os imunizantes BCG, Poliomielite e DTP foram os mais aplicados, em valores absolutos, no Brasil, de 1994 a 2019. Esses são, justamente, os aplicados em recém-nascidos. Isso se explica pois, ao serem aplicados já na maternidade, não há o risco de esquecimento ou falta de vontade para a aplicação dessas vacinas, como ocorre frequentemente com outras que são aplicadas em adultos. <a name="linha"></a> ## 5.3 Linha do Tempo da Vacinação no Brasil, de 1994 a 2019 Para finalizarmos nossa contextualização geral sobre a vacinação no Brasil de 1994 a 2019 (ano da última atualização dos dados), vamos observar uma linha do tempo de quando cada vacina foi introduzida do calendário vacinal. ``` # configuração das informações datas1 = ['1994\nBCG, FebrAmar, HepB,\nSarampo, Polio, TB', '2000\nInflu B\nTríp. Viral', '2004\nFim da Dupla Viral e Início\nda Campanha da Tríplice Viral', '2006\nContra o\nRotavírus Humano', '2019\nÚltima Atualizaçao\nDos Dados'] tl_x = [1, 2.7, 4.35, 5.6, 9] tl_sub_x = [2.3, 3.15, 3.56] tl_sub_times = ["1999", "2001", "2002"] tl_text = ["Campanha Influenza", "Dupla Viral", "Influenza B (Fim)\nSarampo (Fim)\nTetravalente"] # inicialização do gráfico fig, ax = plt.subplots(figsize = (22, 6), constrained_layout = True) ax.set_ylim(-2, 1.75) ax.set_xlim(0, 10) # desenha a linha ax.axhline(0, xmin = 0.1, xmax = 0.9, c = 'forestgreen', zorder = 1) # desenha os pontos ax.scatter(tl_x, np.zeros(len(tl_x)), s = 120, c = 'green', zorder = 2) ax.scatter(tl_x, np.zeros(len(tl_x)), s = 30, c = 'green', zorder = 3) ax.scatter(tl_sub_x, np.zeros(len(tl_sub_x)), s = 50, c = 'green', zorder = 4) # texto for x, date in zip(tl_x, datas1): ax.text(x, -0.5, date, ha = 'center', fontfamily = 'serif', fontweight = 'bold', color = 'royalblue', fontsize = 12) # linhas verticais levels = np.zeros(len(tl_sub_x)) levels[::-2] = 0.3 levels[1::2] = -0.3 markerline, stemline, baseline = ax.stem(tl_sub_x, levels, use_line_collection = True) plt.setp(baseline, zorder = 0) plt.setp(markerline, marker = ',', color = 'green') plt.setp(stemline, color = 'green') # texto for idx, x, time, txt in zip(range(1, len(tl_sub_x) + 1), tl_sub_x, tl_sub_times, tl_text): ax.text(x, 1.3*(idx%2)-0.5, time, ha = 'center', fontfamily = 'serif', fontweight = 'bold', color = 'royalblue', fontsize = 12) ax.text(x, 1.3*(idx%2)-0.6, txt, va = 'top', ha = 'center', fontweight = 'bold', fontfamily = 'serif', color = 'royalblue', fontsize = 13) for spine in ['left', 'top', 'right', 'bottom']: ax.spines[spine].set_visible(False) ax.set_xticks([]) ax.set_yticks([]) ax.set_title('Introdução das Vacinas no Calendário Vacinal', fontweight = 'bold', fontfamily = 'serif', fontsize = 25, color = 'royalblue') ax.text(2.1, 1.57, 'Ano no qual cada vacina foi introduzida no calendário vacinal brasileiro a partir de 1994', fontfamily = 'serif', fontsize = 20, color = 'mediumblue') plt.show() ``` Essas informações podem ser encontradas nas [Notas Técnicas sobre as Imunizações](http://tabnet.datasus.gov.br/cgi/pni/Imun_cobertura_desde_1994.pdf). É relevante notar que grande parte dos imunizantes foram introduzidos antes de 1994 (início da contabilização dos dados nessa base) e continuam os mesmos até a data de atualização em 2019. Esses são, por exemplo: - **BCG** (Contra Tuberculose); - **FA** (Contra Febre Amarela); - **HB** (Contra Hepatite B); - **VOP** (Oral Contra Poliomielite); - **DTP** (Tríplice Bacteriana). <a name="conc"></a> # 6 Conclusão Após essa análise, conseguimos obter algumas conclusões relevantes e também relacionadas as nossas hipóteses. - **Hipótese 1:** A cobertura vacinal vem crescendo anualmente em todo o país. > Por mais que tenhamos saído de uma média de menos de 40% em 1994 e chegado a quase 100% em 2015, de um ano para o outro também ocorreram diversas quedas nesse número. No geral podemos afirmar que sim, houve um expressivo aumento na cobertura vacinal de 1994 a 2015, mas não anualmente, e sim como um todo. - **Hipótese 2:** Vacinas que devem ser aplicadas em bebês logo após o nascimento, como a [BCG e da Hepatite B](https://www.unimedlondrina.com.br/noticias/tudo-saude/07/06/2018/vacinas-importantes-recem-nascidos/), principalmente, possuem maior cobertura vacinal e também maior valor absoluto de aplicações. > Observamos que os imunizantes BCG, Poliomielite e DTP foram os mais aplicados, em valores absolutos, no Brasil, de 1994 a 2019. Esses são, justamente, os aplicados em recém-nascidos. Isso se explica pois, ao serem aplicados já na maternidade, não há o risco de esquecimento ou falta de vontade para a aplicação dessas vacinas, como ocorre frequentemente com outras que são aplicadas em adultos. Por último, em uma linha do tempo visualizamos quando determinados imunizantes foram introduzidos no calendário vacinal brasileiro e notamos que grande parte dos imunizantes foram introduzidos antes de 1994 (início da contabilização dos dados nessa base) e continuam os mesmos até a data de atualização em 2019. Esses são, por exemplo: - **BCG** (Contra Tuberculose); - **FA** (Contra Febre Amarela); - **HB** (Contra Hepatite B); - **VOP** (Oral Contra Poliomielite); - **DTP** (Tríplice Bacteriana). Essa foi a nossa visão geral sobre as vacinas no Brasil. <a name="ref"></a> # 7 Referências - [Programa Nacional de Imunização - Apresentação](http://pni.datasus.gov.br/apresentacao.asp) - [TABNET](http://www2.datasus.gov.br/DATASUS/index.php?area=02) - [DATASUS](https://datasus.saude.gov.br/) - [Origem dos Dados](http://tabnet.datasus.gov.br/cgi/tabcgi.exe?pni/cnv/cpniuf.def) - [Notas Técnicas](http://tabnet.datasus.gov.br/cgi/pni/Imun_cobertura_desde_1994.pdf) - [As Vacinas Importantes Para Recém-Nascidos](https://www.unimedlondrina.com.br/noticias/tudo-saude/07/06/2018/vacinas-importantes-recem-nascidos/) - [Alura](https://www.alura.com.br/) - [Bootcamp Data Science Aplicada](https://www.alura.com.br/bootcamp/data-science-aplicada/matriculas-abertas) - [Storytelling with Data](https://www.storytellingwithdata.com/) - [Storytelling with Data in Python](https://github.com/empathy87/storytelling-with-data) - [Numpy](https://numpy.org/](https://numpy.org/) - [Pandas](https://pandas.pydata.org/](https://pandas.pydata.org/) - [Matplotlib](https://matplotlib.org/](https://matplotlib.org/) <div class="alert alert-success"> <strong>Clique <a href='https://github.com/diascarolina/vacinacao-geral-no-brasil/blob/main/notebooks/sarampo.ipynb'>aqui</a> para acessar a Parte 2, na qual faço uma análise sobre o surto de Sarampo ocorrido entre 2013 a 2015 em Pernambuco e no Ceará.</strong> </div>
github_jupyter
``` import pandas as pd import numpy as np import gzip from collections import defaultdict from keras.utils.np_utils import to_categorical import matplotlib.pyplot as plt from keras.layers import Dense, LSTM, Embedding from keras.models import Sequential from keras.preprocessing.text import Tokenizer,text_to_word_sequence from keras.utils.np_utils import to_categorical from keras.preprocessing.sequence import pad_sequences vocab_size = 400000 embedding_size = 50 maxlen = 10 embeddings_path = 'glove.6B.50d.txt.gz' import pandas as pd from sklearn.cross_validation import train_test_split df = pd.read_csv('Consumer_Complaints.csv', encoding='latin-1') df = df[pd.notnull(df['Consumer complaint narrative'])] train, test = train_test_split( df, test_size=0.20, random_state=10) train.shape test.shape train.head() # Common methods to read data and get embeddings def read_data(): df_train = train df_val = test categories = list(set(df_train.Product.values)) return df_train,df_val,categories def load_embeddings(): word_index = {} embeddings = np.zeros((vocab_size,embedding_size)) with gzip.open(embeddings_path) as file: for i,line in enumerate(file): line_tokens = line.split() word = line_tokens[0].decode('utf8') embeddings[i] = np.asarray(line_tokens[1:],dtype='float32') word_index[word] = i return embeddings,word_index def get_embedding(word,word_index,embeddings): if word in word_index: return embeddings[word_index[word]].reshape(((embedding_size,1))) else: return np.zeros((embedding_size,1)) # Methods for Neural Network Model def prepare_data(df_train,df_val,categories): train_text = df_train['Consumer complaint narrative'].tolist() val_text = df_val['Consumer complaint narrative'].tolist() tk = Tokenizer(num_words = vocab_size, lower = True) tk.fit_on_texts(train_text + val_text) x_train = pad_sequences(tk.texts_to_sequences(train_text),maxlen=maxlen) x_val = pad_sequences(tk.texts_to_sequences(val_text),maxlen=maxlen) y_train = category_to_one_hot(df_train['Product'].values,categories) y_val = category_to_one_hot(df_val['Product'].values,categories) return tk.word_index,x_train,y_train,x_val,y_val def prepare_data_from_full_word_index(df_train,df_val,categories,word_index): train_text = df_train['Consumer complaint narrative'].tolist() val_text = df_val['Consumer complaint narrative'].tolist() x_train = get_pad_sequences(train_text,word_index) x_val = get_pad_sequences(val_text,word_index) y_train = category_to_one_hot(df_train['Product'].values,categories) y_val = category_to_one_hot(df_val['Product'].values,categories) return word_index,x_train,y_train,x_val,y_val def get_pad_sequences(text_list,word_index): seqs = [] for text in text_list: word_seq = text_to_word_sequence(text.lower()) seq = [] for word in word_seq: if word in word_index: seq.append(word_index[word]) seqs.append(seq) return pad_sequences(seqs,maxlen) # Convert the list of categories to one_hot vector def category_to_one_hot(cat_list,cat_master): cat_dict = {} for i,cat in enumerate(cat_master): cat_dict[cat] = i cat_integers = [cat_dict[cat] for cat in cat_list] return to_categorical(cat_integers,num_classes=len(cat_master)) # Convert one_hot to category def one_hot_to_category(cat_one_hot_list,cat_master): return [cat_master[cat_one_hot.argmax()] for cat_one_hot in cat_one_hot_list] # Get the embedding weights for the model def get_embedding_matrix_for_model(embeddings,word_index): train_val_words = min(vocab_size, len(word_index)) +1 embedding_matrix = np.zeros((train_val_words, embedding_size)) for word, i in word_index.items(): embedding_vector = get_embedding(word,word_index,embeddings).flatten() if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix # Build the keras model def build_model(embedding_matrix,categories): model = Sequential() model.add(Embedding(embedding_matrix.shape[0], embedding_size, weights=[embedding_matrix],input_length=maxlen,trainable=False)) model.add(LSTM(32)) # We don't lose much by replacing LSTM with this flatten layer (as we have short sequences) # model.add(Flatten()) model.add(Dense(32, activation='relu')) model.add(Dense(len(categories), activation='sigmoid')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['acc']) return model def get_val(numerator,divisor): return float('nan') if divisor == 0 else np.round(numerator/divisor,3) def analyze_predictions(categories,y_true,y_pred): tp = defaultdict(int) tn = defaultdict(int) fp = defaultdict(int) fn = defaultdict(int) precisions = [] recalls = [] f1s = [] cat_counts = defaultdict(int) for cat in y_true: cat_counts[cat]+=1 correct = 0 conf_mat = defaultdict(dict) for cat1 in categories: for cat2 in categories: conf_mat[cat1][cat2] = 0 for y,y_hat in zip(y_true,y_pred): conf_mat[y][y_hat]+=1 if y == y_hat: correct+=1 tp[y]+=1 else: fp[y_hat]+=1 fn[y]+=1 print('Overall Accuracy:',round(correct/len(y_pred),3)) for cat in categories: precision = get_val(tp[cat],tp[cat]+fp[cat]) recall = get_val(tp[cat],(tp[cat]+fn[cat])) f1 = get_val(2*precision*recall,precision + recall) precisions.append(precision) recalls.append(recall) f1s.append(f1) print('{} --> Precision:{},Recall:{},F1:{}'.format(cat,precision,recall,f1)) print ('\nAverages---> Precision:{}, Recall:{}, F1:{}'.format(np.round(np.nanmean(precisions),3), np.round(np.nanmean(recalls),3), np.round(np.nanmean(f1s),3)) ) print('\nConfusion Matrix') for cat1 in categories: print('\n' +cat1+'({}) --> '.format(cat_counts[cat1]),end='') for cat2 in categories: print('{}({})'.format(cat2,conf_mat[cat1][cat2]),end=' , ') print('') # From Deep Learning with Python book def make_history_plot(history): acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(acc) + 1) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', color='green',label='Validation acc') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', color='green',label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() import numpy as np np.random.seed(42) # for reproducibility from IPython.display import SVG from keras.utils.vis_utils import model_to_dot import matplotlib.pyplot as plt %matplotlib inline df_train,df_val,categories = read_data() #Load Glove 50-d embeddings embeddings,word_index = load_embeddings() #Prepare the data for the model tk_word_index,x_train,y_train,x_val,y_val = prepare_data_from_full_word_index(df_train,df_val,categories,word_index) # Get the embedding matrix for the model, build model, display model summary embedding_matrix = get_embedding_matrix_for_model(embeddings,word_index) model = build_model(embedding_matrix,categories) model.summary() # Train the model, record history history = model.fit(x_train, y_train, epochs=5, batch_size=24, shuffle=False, validation_data=(x_val, y_val)) make_history_plot(history) # Make and analyze training predictions train_predictions = one_hot_to_category(model.predict(x_train),categories) analyze_predictions(categories,df_train['Product'].values,train_predictions) # Make and analyze validation predictions val_predictions = one_hot_to_category(model.predict(x_val),categories) analyze_predictions(categories,df_val['Product'].values,val_predictions) ```
github_jupyter
# MUR SST Benchmark tests using consolidated metadata versus individual netcdf NASA JPL PODAAC has put the entire [MUR SST](https://podaac.jpl.nasa.gov/dataset/MUR-JPL-L4-GLOB-v4.1) dataset on AWS cloud as individual netCDF files, **but all ~7000 of them are netCDF files.**\ Accessing one file works well, but accessing multiple files is **very slow** because the metadata for each file has to be queried. Here, we create **fast access** by consolidating the metadata and accessing the entire dataset rapidly via zarr. More background on this project: [medium article](https://medium.com/pangeo/fake-it-until-you-make-it-reading-goes-netcdf4-data-on-aws-s3-as-zarr-for-rapid-data-access-61e33f8fe685) and in this [repo](https://github.com/lsterzinger/fsspec-reference-maker-tutorial). We need help developing documentation and more test datasets. If you want to help, we are working in the [Pangeo Gitter](https://gitter.im/pangeo-data/cloud-performant-netcdf4). To run this code: - you need to set your AWS credentials up using `aws configure --profile esip-qhub` - you need to set up your `.netrc` file in your home directory with your earthdata login info Authors: - [Chelle Gentemann](https://github.com/cgentemann) - [Rich Signell](https://github.com/rsignell-usgs) - [Lucas Steringzer](https://github.com/lsterzinger/) - [Martin Durant](https://github.com/martindurant) Credit: - Funding: Interagency Implementation and Advanced Concepts Team [IMPACT](https://earthdata.nasa.gov/esds/impact) for the Earth Science Data Systems (ESDS) program and AWS Public Dataset Program - AWS Credit Program - ESIP Hub ## Summary of results | Test | Consolidated-metadata | Single netCDF | Improvement | | :---- | :----: |:----: |:----:| | Access entire dataset | 1:41 min | 60 min* | 36x | | Plot 1 year at point | 14 sec | 4:12 min | 18x | | Plot 1 day | 50 sec | 1:07 min | 1.4x | *Extrapolated from 3:16 min for 1 year to 18 years of data Accessing the entire dataset is substantially faster because we have already consolidated the metadata into a single file.\ Accessing 1 year of data is also substantially faster because our single metadata file can point to exactly where the data is rather than first accessing the metadata for each day, then finding the data.\ Accessing 1 day is roughly the same amount of time because both are just calling metadata for a single file. | Test | Consolidated-metadata | Zarr-v1 | Zarr | | :---- | :----: |:----: |:----:| | Access entire dataset | 1:41 min | 5 sec | 1 sec | | Plot 1 year at point | 14 sec | 6 sec | 3 sec | | Plot 1 day | 50 sec | 55 sec | a long time | This compares the consolidated metadata access with two Zarr versions of MUR SST currently stored on AWS. Zarr-v1 was re-chuncked for general use, balancing time/space. The access is faster for the timeseries analysis, because there are less chuncks in the zarr file to access than the netcdf file. The access is slightly slower to image the globe for a single day, because the netcdf file is in 1 file, while the Zarr-v1 has to access 360x180 files. Zarr was re-chuncked for timeseries analysis. The access is faster for the timeseries analysis, because there are less chuncks in the zarr file to access than the netcdf file. The access is substantially slower to image the globe for a single day, because with the chuncking set this way, to access a single day requires reading in the entire 16 TB dataset. ``` import netrc import os import sys from datetime import datetime from http.cookiejar import CookieJar from urllib import request import fsspec import hvplot.xarray import requests import s3fs import xarray as xr #########Setting up earthdata login credentials # this code is from https://github.com/podaac/tutorials/blob/master/notebooks/cloudwebinar/cloud_direct_access_s3.py def setup_earthdata_login_auth(endpoint): """ Set up the request library so that it authenticates against the given Earthdata Login endpoint and is able to track cookies between requests. This looks in the .netrc file first and if no credentials are found, it prompts for them. Valid endpoints: urs.earthdata.nasa.gov - Earthdata Login production """ try: username, _, password = netrc.netrc().authenticators(endpoint) except (FileNotFoundError, TypeError): # FileNotFound = There's no .netrc file # TypeError = The endpoint isn't in the netrc file, causing the above to try unpacking None print("There's no .netrc file or the The endpoint isn't in the netrc file") manager = request.HTTPPasswordMgrWithDefaultRealm() manager.add_password(None, endpoint, username, password) auth = request.HTTPBasicAuthHandler(manager) jar = CookieJar() processor = request.HTTPCookieProcessor(jar) opener = request.build_opener(auth, processor) request.install_opener(opener) ############################################################################### edl = "urs.earthdata.nasa.gov" setup_earthdata_login_auth(edl) def begin_s3_direct_access(): url = "https://archive.podaac.earthdata.nasa.gov/s3credentials" response = requests.get(url).json() return s3fs.S3FileSystem( key=response["accessKeyId"], secret=response["secretAccessKey"], token=response["sessionToken"], client_kwargs={"region_name": "us-west-2"}, ) url = "https://archive.podaac.earthdata.nasa.gov/s3credentials" response = requests.get(url).json() ``` - Consolidated metadata test (without simple_templates) ``` %%time json_consolidated = "s3://esip-qhub-public/nasa/mur/murv41_consolidated_20211011.json" s_opts = {"requester_pays": True, "skip_instance_cache": True} r_opts = { "key": response["accessKeyId"], "secret": response["secretAccessKey"], "token": response["sessionToken"], "client_kwargs": {"region_name": "us-west-2"}, } fs = fsspec.filesystem( "reference", fo=json_consolidated, ref_storage_args=s_opts, remote_protocol="s3", remote_options=r_opts, simple_templates=True, ) m = fs.get_mapper("") ds = xr.open_dataset(m, decode_times=False, engine="zarr", consolidated=False) ds.close() ds %%time # test getting a random value ds["analysed_sst"].sel(time="2005-12-20", lat=0, lon=0, method="nearest") %%time # test getting a random value ts = ds["analysed_sst"].sel( lat=0.01, lon=0.01, time=slice("2005-01-01T09", "2006-06-01T09") ) ts.plot() %%time now = datetime.now() dy = ds["analysed_sst"].sel(time="2005-01-01", method="nearest") dy.hvplot.quadmesh(x="lon", y="lat", geo=True, rasterize=True, cmap="turbo") then = datetime.now() print(then - now) ``` ## Benchmark tests for the PODAAC cloud MUR SST, individual netcdf files ``` %%time from os.path import dirname, join fs = begin_s3_direct_access() files = fs.glob( join("podaac-ops-cumulus-protected/", "MUR-JPL-L4-GLOB-v4.1", "2005*.nc") ) ds2 = xr.open_mfdataset( paths=[fs.open(f) for f in files], combine="by_coords", mask_and_scale=True, decode_cf=True, chunks={"time": 1}, # analysis. ) ds2.close() %%time # test getting a random value # ts = ds2['analysed_sst'].sel(lat=0.01,lon=0.01,time=slice('2005-01-01T09','2006-01-01T09')) #memory issues times out so break it up tem2 = [] for imon in range(12): tem = ( ds2["analysed_sst"] .sel(lat=0.01, lon=0.01, time="2005-" + str(imon + 1).zfill(2)) .load() ) tem2.append(tem) # print(imon+1) ts = xr.concat(tem2, dim="time") %%time ts.plot() %%time now = datetime.now() dy = ds2["analysed_sst"].sel(time="2005-01-01") dy.hvplot.quadmesh(x="lon", y="lat", geo=True, rasterize=True, cmap="turbo") then = datetime.now() print(then - now) ``` ## Benchmark tests for the AWS cloud [MUR SST](https://registry.opendata.aws/mur/), ~chunked for general use - MUR Level 4 SST dataset in Zarr format. The zarr-v1/ directory contains a zarr store chunked (5, 1799, 3600) along the dimensions (time, lat, lon). ``` import warnings import fsspec import hvplot.xarray import numpy as np import pandas as pd import xarray as xr import hvplot.xarray warnings.simplefilter("ignore") # filter some warning messages xr.set_options(display_style="html") # display dataset nicely %%time ds_sst = xr.open_zarr( "https://mur-sst.s3.us-west-2.amazonaws.com/zarr-v1", consolidated=True ) ds_sst %%time ts = ds_sst["analysed_sst"].sel( lat=0.01, lon=0.01, time=slice("2005-01-01T09", "2006-01-01T09") ) ts.plot() %%time now = datetime.now() dy = ds_sst["analysed_sst"].sel(time="2005-01-01") dy.hvplot.quadmesh(x="lon", y="lat", geo=True, rasterize=True, cmap="turbo") then = datetime.now() print(then - now) ``` ## Benchmark tests for the AWS cloud [MUR SST](https://registry.opendata.aws/mur/), ~chunked for timeseries - MUR Level 4 SST dataset in Zarr format. The zarr/ directory contains a zarr store chunked (6443, 100, 100) along the dimensions (time, lat, lon). ``` %%time ds_sst = xr.open_zarr( "https://mur-sst.s3.us-west-2.amazonaws.com/zarr", consolidated=True ) ds_sst %%time ts = ds_sst["analysed_sst"].sel( lat=0.01, lon=0.01, time=slice("2005-01-01T09", "2006-01-01T09") ) ts.plot() #%%time # now = datetime.now() # dy = ds_sst["analysed_sst"].sel(time="2005-01-01") # dy.hvplot.quadmesh(x='lon', y='lat', geo=True, rasterize=True, cmap='turbo' ) # then = datetime.now() # print(then-now) ```
github_jupyter
``` import os os.environ['CUDA_VISIBLE_DEVICES'] = "-1" import tensorflow as tf tf.compat.v1.disable_eager_execution() import gin import numpy as np import torch import torch.nn as nn from matplotlib import pyplot as plt from tqdm import tqdm import seaborn as sns from vectorincrement.vectorincrementenv import SparseMatrixEnvironment from vectorincrement.observation_encoder import KerasEncoder, KerasEncoderWrapper, linear_encoder_unbiased_normal from helpers import EnvDataCollector from gym.wrappers import TimeLimit # state dim n = 5 # obs dim k = 100 # features dim f = 5 # number of steps to generate n_steps = 10000 gin.bind_parameter("KerasEncoder.model_callable", linear_encoder_unbiased_normal) gin.bind_parameter("KerasEncoder.out_shape", (k,)) env = SparseMatrixEnvironment(n=n) env = TimeLimit(env, 1000) env = KerasEncoderWrapper(env) env = EnvDataCollector(env) A = env.env.env.A np.where(A)[0] == np.where(A)[1] # collecting data while env.steps < n_steps: done = False env.reset() while not done: _, _, done, _ = env.step(env.action_space.sample()) env.flush() observations = [np.array([step['observation'] for step in rollout]) for rollout in env.rollouts] len(observations) class LinearNet(nn.Module): """Linear neural network.""" def __init__(self, inp_dim, out_dim): super(LinearNet, self).__init__() self.fc1 = nn.Linear(in_features=inp_dim, out_features=out_dim, bias=False) def forward(self, x): return self.fc1(x) def reconstruction_loss(obss, decoder, reconstructor): """Ensure that the decoder is not degenerate by fitting a reconstructor.""" mse = torch.nn.MSELoss() return mse(reconstructor(decoder(obss)), obss) def reconstruction_loss_norm(reconstructor): """Ensure that the decoder is not degenerate (inverse norm not too high).""" regularization_loss = 0 for param in reconstructor.parameters(): regularization_loss += torch.sum(torch.square(param)) return regularization_loss def fit_loss(obss, obss_prev, decoder, model): """Ensure that the model fits the features data.""" mse = torch.nn.MSELoss() return mse(model(decoder(obss_prevv)), decoder(obss)) def sparsity_loss(model): """Ensure that the model is sparse.""" regularization_loss = 0 for param in model.parameters(): regularization_loss += torch.sum(torch.abs(param)) return regularization_loss def losses(obss_torch, obss_prev_torch, decoder, reconstructor, model, rn_threshold=100): """Compute all losses ob observations.""" res = {} res['r'] = reconstruction_loss(obss_torch, decoder, reconstructor) res['f'] = fit_loss(obss_torch, obss_prev_torch, decoder, model) res['s'] = sparsity_loss(model) res['rn'] = reconstruction_loss_norm(reconstructor) if res['rn'] < rn_threshold: res['rn'] = torch.from_numpy(np.array(rn_threshold)) return res def total_loss(losses_, hypers): """Compute total loss.""" loss = 0.0 for key in losses_.keys(): loss += hypers[key] * losses_[key] return loss def lstdct2dctlst(lst): """List of dictionaries -> dict of lists.""" keys = lst[0].keys() result = {k: [] for k in keys} for item in lst: for k, v in item.items(): result[k].append(v) return result def epoch(obss_torch, obss_prev_torch, optimizer, decoder, reconstructor, model, hypers): """One optimization epoch.""" optimizer.zero_grad() L = losses(obss_torch, decoder, reconstructor, model) loss = total_loss(L, hypers) loss.backward() optimizer.step() L['total'] = loss return {x: y.item() for x, y in L.items()} def metrics(decoder, reconstructor, model, hypers): m = {} m['nnz'] = np.sum(np.abs(list(model.parameters())[0].detach().numpy().flatten()) > 1e-2) m['hyper_s'] = hypers['s'] return m ``` #### Optimizing via total loss over the whole data ``` observations = def fit_on_observations(observations, model, decoder, reconstructor, hypers): """Fit model/decoder/reconstructor on observations""" # converting observations to torch obss_torch = torch.from_numpy(np.array(observations, dtype=np.float32)) all_parameters = list(model.parameters()) + list(decoder.parameters()) + list(reconstructor.parameters()) optimizer = torch.optim.Adam(all_parameters, lr=hypers['lr']) # training last_hyper_adjustment = -1 suggested_hyper = None results = [] for i in tqdm(range(hypers['epochs'])): e = epoch(obss_torch, optimizer, decoder, reconstructor, model, hypers) e.update(metrics(decoder, reconstructor, model, hypers)) results.append(e) if hypers['hyper_do_update']: if e['r'] + e['f'] > hypers['rf_threshold']: if hypers['s'] > hypers['s_min']: suggested_hyper = hypers['s'] * hypers['s_decreaser'] else: if hypers['s'] < hypers['s_max']: suggested_hyper = hypers['s'] / hypers['s_decreaser'] if i - last_hyper_adjustment >= hypers['hyper_freeze_steps'] and suggested_hyper is not None: hypers['s'] = suggested_hyper last_hyper_adjustment = i return results def plot_losses_and_model(results, model): """Plot loss curves and the weight heatmap.""" # plotting plt.figure(figsize=(16, 5)) for i, (k_, v) in enumerate(lstdct2dctlst(results).items()): plt.subplot(1, len(results[0]) + 1, i + 1) plt.xlabel('epoch') plt.title(k_) plt.axhline(0) ax = plt.gca() plt.yscale('log') ax.tick_params(axis="y", direction="in", pad=-30) plt.plot(v) plt.subplot(1, len(results[0]) + 1, len(results[0]) + 1) plt.title("Weights heatmap") sns.heatmap(list(model.parameters())[0].detach().numpy()) # creating models decoder = LinearNet(inp_dim=k, out_dim=f) reconstructor = LinearNet(inp_dim=f, out_dim=k) model = LinearNet(inp_dim=f, out_dim=f) hypers_dry = {'r': 0.1, 'f': 0.2, 's': 0.0, 'rn': 0.0, 'lr': 1e-3, 'epochs': 5000, 'hyper_do_update': False, } k, f, len(observations) results_dry = fit_on_observations(observations, model, decoder, reconstructor, hypers_dry) plot_losses_and_model(results_dry, model) hypers = {'r': 0.1, 'f': 0.2, 's': 1e-10, 'rn': 0.0001, 'lr': 1e-3, 'rf_threshold': 100 * (results_dry[-1]['r'] + results_dry[-1]['f']), 'epochs': 10000, 's_min': 1e-10, 's_max': 10, 's_decreaser': 0.5, 'hyper_freeze_steps': 100, 'hyper_do_update': True } k, f, len(observations) # creating models decoder = LinearNet(inp_dim=k, out_dim=f) reconstructor = LinearNet(inp_dim=f, out_dim=k) model = LinearNet(inp_dim=f, out_dim=f) results = fit_on_observations(observations, model, decoder, reconstructor, hypers) plot_losses_and_model(results, model) results[-1] ```
github_jupyter
# Diseño de software para cómputo científico ---- ## Unidad 3: Persistencia de datos. ### Agenda de la Unidad 3 --- #### Clase 1 - Lectura y escritura de archivos. - Persistencia de binarios en Python (pickle). - Archivos INI/CFG, CSV, JSON, XML y YAML #### Clase 2 - Bases de datos relacionales y SQL. ### Clase 3 - Formato HDF5. - Breve repaso de bases de datos No relacionales. ## Lectura y escritura de archivos - Python ofrece los objetos para la interacción con archivos de texto dentro del módulo `io`. - Para crear abrir un archivo se utiliza la función `open()`. ``` try: fp = open("code/archivo.txt") fp.read() finally: if not fp.closed: fp.close() with open("code/archivo.txt") as fp: fp.read() ``` ## Lectura y escritura de archivos - Por defecto `open()`abre los archivos en modo lectura (`r`), y con encoding `utf-8` ``` open("code/archivo.txt") ``` <small> <code> ========= =============================================================== Character Meaning --------- --------------------------------------------------------------- 'r' open for reading (default) 'w' open for writing, truncating the file first 'x' create a new file and open it for writing 'a' open for writing, appending to the end of the file if it exists 'b' binary mode 't' text mode (default) '+' open a disk file for updating (reading and writing) 'U' universal newline mode (deprecated) ========= =============================================================== </code> </small> ### Pickle - El módulo pickle implementa protocolos binarios para serializar y deserializar una estructura de objeto Python. - Esencialmente lo que hace es guardar estado. - No puede serializar clases generadas dinámicamente ``` class Persona(): def __init__(self, name): self.name = name p = Persona("tito") obj = {"Hola": "Mundo", "Un-Objecto": p} import pickle pickle.dumps(obj, protocol=4) ``` ### Pickle Hay 4 APIS importantes 1. `load`: Lee de un archivo 2. `loads`: Lee de un string 3. `dump`: Escribe a un archivo 4. `dumps`: Retorna un string Los archivos se abren en formato binario (`open("...", mode="b")`) ### CSV - Los archivos CSV (del inglés comma-separated values) son un tipo de documento en formato abierto sencillo para representar datos en forma de tabla, en las que las columnas se separan por comas (o cualquier otro caracter) - El formato CSV es muy sencillo y no indica un juego de caracteres concreto. - **NO ES UN ESTANDAR Y DEBE SER EVITADO** Librerías utiles para tratar estos formatos son: - Pandas (`pd.from_csv`) - csv (en la biblioteca estandar) ### CSV - csv ``` import csv with open("code/archivo.csv", encoding="utf-16") as fp: for row in csv.reader(fp, delimiter=","): print(row) ``` ### CSV - Pandas ``` import pandas as pd pd.read_csv("code/archivo.csv", encoding="utf-16") ``` ## Archivos `ini`/`cfg` - .ini es una extensión de archivo para denotar ficheros de configuración utilizados por aplicaciones de los sistemas operativos Windows y ciertas aplicaciones en ambiente GNU/Linux. - El término proviene de "Windows Initialization file", es decir, archivo de inicialización de Windows. - Actualmente esta en desuso en Windows pero es útil en ambientes con mejores políticas de seguridad como Linux/OSX/Unix - Loa archivos `ini` se estructuran en: - **Secciones:** permiten agrupar parámetros relacionados. Por ejemplo: "Parámetros de red". - **Valores:** definen parámetros y su valor. Primero se define el nombre del parámetro y después su valor separado por el signo de igualdad (=). - **Comentarios:** permiten explicar el propósito de una sección o parámetro. Los comentarios comienzan con el carácter punto y coma (;). ## Archivos `ini`/`cfg` ```ini [Red] ; Poner UsarProxy=1 si no hay cortafuegos UsarProxy=1 Proxy=192.168.0.1 [Preferencias] PaginaInicio=https://wikipedia.org Maximizar=1 ``` ## Archivos `ini`/`cfg` ``` import configparser cfg = configparser.ConfigParser() cfg.read_string("""[Red] ; Poner UsarProxy=1 si no hay cortafuegos UsarProxy=1 Proxy=192.168.0.1 [Preferencias] PaginaInicio=https://wikipedia.org Maximizar=1""") print(cfg.sections()) cfg["Red"].getint("UsarProxy") cfg.read_string("""[Otro] foo=23.9""") cfg["Otro"].getfloat("foo") ``` ## Extensible Markup Language - XML - Es un meta-lenguaje que permite definir lenguajes de marcas desarrollado por el World Wide Web Consortium (W3C) utilizado para almacenar datos en forma legible. - Establece su codificación en la cabecera. - Es un bicho grande y muy extensible. - Tiene un formato de validación llamado DTD. ### Ejemplo XML ```xml <?xml version="1.0" encoding="UTF-8" ?> <!DOCTYPE Edit_Mensaje SYSTEM "Edit_Mensaje.dtd"> <Edit_Mensaje> <Mensaje id="1"> <Remitente> <Nombre>Nombre del remitente</Nombre> <Mail format="email"> Correo del remitente </Mail> </Remitente> <Destinatario> <Nombre>Nombre del destinatario</Nombre> <Mail format="email">Correo del destinatario</Mail> </Destinatario> <Texto> <Asunto> Este es mi documento con una estructura muy sencilla no contiene atributos ni entidades... </Asunto> <Parrafo> Este es mi documento con una estructura muy sencilla no contiene atributos ni entidades... </Parrafo> </Texto> </Mensaje> </Edit_Mensaje> ``` ``` xml = """ <Edit_Mensaje> <Mensaje id="1"> <Remitente> <Nombre>Nombre del remitente</Nombre> <Mail format="email"> Correo del remitente </Mail> </Remitente> <Destinatario> <Nombre>Nombre del destinatario</Nombre> <Mail format="email">Correo del destinatario</Mail> </Destinatario> <Texto> <Asunto> Este es mi documento con una estructura muy sencilla no contiene atributos ni entidades... </Asunto> <Parrafo> Este es mi documento con una estructura muy sencilla no contiene atributos ni entidades... </Parrafo> </Texto> </Mensaje> </Edit_Mensaje>""" ``` ### XML Hay muchas formas y paradigmas totalmente diferentes para leer archivos XML. Vamos a ver tres simples ``` from xml.etree import ElementTree tree = ElementTree.fromstring(xml) for msg in tree.findall("Mensaje"): for part in msg: for children in part: print(children.tag) print(children.text) print(children.attrib) print("-" * 10) ``` ### Pyquery - Implementa un mecanismo de busqueda tipo base de datos sobre XML. - Es un port de la librería Jquery de Javascript. - Esta implementado sobre un ElemeTree muy eficiente llamado `lxml` Se instala con `pip install pyquery` ``` import pyquery as pq d = pq.PyQuery(xml, parser='xml') d("Mensaje#1") ``` ## JSON - Javascript Simple Object Notation - Aprovechar comportamiento comun de todos los lenguajes de programación. - Se trata de un subconjunto de la notación literal de objetos de JavaScript, aunque, debido a su amplia adopción como alternativa a XML, se considera (año 2019) un formato independiente del lenguaje. - JSON es **UTF-8** ![image.png](attachment:image.png) ## Los tipos de datos disponibles con JSON son: - Números: Se permiten números negativos y opcionalmente pueden contener parte fraccional separada por puntos. Ejemplo: `123.456` - Cadenas: Representan secuencias de cero o más caracteres. Se ponen entre doble comilla y se permiten cadenas de escape. Ejemplo: `"Hola"` - Booleanos: Representan valores booleanos y pueden tener dos valores: `true` y `false` - `null`: Representan el valor nulo. - Array: Representa una lista ordenada de cero o más valores los cuales pueden ser de cualquier tipo. Los valores se separan por comas y el vector se mete entre corchetes. Ejemplo `["juan","pedro","jacinto"]` - Objetos: Son colecciones no ordenadas de pares de la forma <nombre>:<valor> separados por comas y puestas entre llaves. Ejemplo: `{"hola": ["Mundo", 1]}` ### JSON - Ejemplo ![image.png](attachment:image.png) ### JSON - Ejemplo ``` src = """{ "menu": { "id": "file", "value": "File", "foo": true, "popup": { "menuitem": [ {"value": "New", "onclick": "CreateNewDoc()"}, {"value": "Open", "onclick": "OpenDoc()"}, {"value": "Close", "onclick": "CloseDoc()"}]}}}""" import json json.loads(src) json.dumps({"hola": {True}}) ``` ## YAML - YAML Ain't Markup Language - Es un superset de json (Osea un parser YAML lee JSON) - Es "menos" estandar. - Pero soporta comentarios! - Soporta texto multilinea - Persiste objetos arbitrarios Python (OJO). - `pip install PyYAML` ### Todo esto es YAML ![image.png](attachment:image.png) ### Parseando YAML - JSON ``` import yaml yaml.safe_load(""" { "menu": { "id": "file", "value": "File", "popup": { "menuitem": [ {"value": "New", "onclick": "CreateNewDoc()"}, {"value": "Open", "onclick": "OpenDoc()"}, {"value": "Close", "onclick": "CloseDoc()"}]}}}""") ``` ### Parseando YAML - YAML PURO ``` yaml.safe_load(""" menu: id: file value: File popup: menuitem: - value: New onClick: CreateNewDoc() - value: Open onClick: OpenDoc() - value: Close onClick: CloseDoc()""") ``` ### Parseando YAML - Hibrido y dump ``` yaml.safe_load(""" menu: id: "file" value: "File" popup: menuitem: - {value: "New", onclick: "CreateNewDoc()"} - {value: "Open", onclick: "OpenDoc()"} - {value: "Close", onclick: "CloseDoc()"} """) import attr @attr.s class Persona: name = attr.ib() password = attr.ib(repr=False) yaml.dump({"hola": Persona(name="Tito", password="kkk")}) ```
github_jupyter
# geotiff2csv Extraire une portion de GeoTiff en utilisant la librairie Gdal. ## Introduction Le **GeoTiff** est un format d'image fréquemment utilisé pour stocker des **données géo-référencées**. Le format GeoTiff est, par exemple, utilisé pour la mise à disposition de [Corine Land Cover](https://land.copernicus.eu/pan-european/corine-land-cover)(**CLC**). Dans le cas de CLC, à chaque **Pixel** de l'image correspond un **hectare** de terrain. A chaque Pixel est associé un code. Dans le cas de CLC, il s'agit d'un code d'occupation du sol (Land Use). CLC couvre 39 états. Les fichiers GeoTiff fournis par l'Agence Européenne de l'environnement sont très gros. Il y a un fichier par année. Chaque fichier fait environ 200 MB. Un GeoTiff est une image qui peut facilement être importée sous la forme d'une **matrice**. Dans le cas de CLC, la matrice est énorme puisqu'elle compte **65.000 lignes** pour **46.000 colonnes**, ce qui fait 65.000 * 46.000 = 2.99 milliards de pixels! Il s'agit vraiment d'une très grosse matrice! Nous ne nous intéressons, pour l'instant, qu'aux données belges. L'idée est d'extraire de cette grosse matrice, une sous-matrice qui comporte tous les pixels relatifs à la Belgique. Nous aurons, bien sûr, dans notre sous-matrice, quelques éléments (pixels) qui ne concernent pas la Belgique, puisque notre pays n'a pas une forme "rectangulaire". Pour extraire des informations d'un GeoTiff, nous pouvons utiliser la librairie [Gdal](https://en.wikipedia.org/wiki/GDAL). ## Tiff Le Tiff est un format pour le stockage d'images matricielles. On parle aussi de "raster" en anglais. Voici une image extraite de Wikipédia et qui montre différents éléments (ou pixels) qui composent une image matricielle. ![image.png](attachment:image.png) ## GeoTiff Un GeoTiff est une image au format Tiff auquel on a ajouté des **méta-données de géo-référencement**. De la documentation sur le *format GeoTiff* est disponible à l'adresse [https://www.gdal.org/frmt_gtiff.html](https://www.gdal.org/frmt_gtiff.html). Lorsque nous importons un GeoTiff en Python, nous obtenons une matrice. L'élément de la matrice situé à l'intersection de la $L^{ème}$ ligne et de la $P^{ème}$ colonne est identifié à l'aide du couple $(L,P)$. Les méta-données stockées dans les fichiers Corine Land Cover nous permettent de transformer un couple $(L,P)$ en coordonnées géographiques $(X,Y)$. Nous ferons en sorte que ces coordonnées correspondent au centroïde de l'hectare correspondant au pixel situé en $(L,P)$. Le système de projection utilisé par CLC est l'ETRS89 LAEA. **LAEA** est un acronyme pour **Lambert Azimuthal Equal Area**. ## Géoréférencement Le géoréférencement est l'action qui consiste à associer les coordonnées $(L,P)$ aux coordonnées $(X,Y)$. ## Gdal Pour extraire des informations d'un GeoTiff, nous pouvons utiliser la librairie [Gdal](https://en.wikipedia.org/wiki/GDAL). Il s'agit d'une librairie qui est utilisée par un très grand nombre de logiciels (ArcGis, QGis, ...). La documentation est disponible à l'adresse [https://www.gdal.org/classGDALDataset.html](https://www.gdal.org/classGDALDataset.html). Fonction intéressantes du package Gdal (pas toutes utilisées ic): * myRaster.GetProjectionRef() * myRaster.RasterCount * myBand=myRaster.GetRasterBand(1) * myBand.DataType * myBand.XSize,myBand.YSize * myRaster.GetGeoTransform() * myBand.GetNoDataValue() ## Bounding box Nous pouvons utiliser [BBox finder](http://bboxfinder.com/#49.425267,2.427979,51.553167,6.553345) pour identifier un rectangle qui entoure la Belgique (ou une autre unité administrative). Exemple de Bounding Boxes: - Belgium: **(3795050, 2939950)->(4067050, 3169950)** - Rixensart: **(3919063,3067640)->(3952705,3091820)** # Paramètres ``` # Input files CorineGeoTiffDirectory='/home/yoba/DataScience/data/Open-Data/clc' CorineGeoTiff={ 1990:'{}/1990/CLC1990_CLC1990_V2018_20b2.tif'.format(CorineGeoTiffDirectory), 2000:'{}/2000/CLC2006_CLC2000_V2018_20b2.tif'.format(CorineGeoTiffDirectory), 2006:'{}/2006/CLC2012_CLC2006_V2018_20b2.tif'.format(CorineGeoTiffDirectory), 2012:'{}/2012/CLC2018_CLC2012_V2018_20b2.tif'.format(CorineGeoTiffDirectory), 2018:'{}/2018/clc2018_clc2018_V2018.20b2.tif'.format(CorineGeoTiffDirectory) } # Bounding box x1,y1=3795050,2939950 x2,y2=4067050,3169950 # Output directory CorineCsvDirectory='/home/yoba/DataScience/data/StatisticalProducts/clc/bbox' ``` # Imports ``` import numpy as np import pandas as pd from osgeo import gdal from numpy.linalg import inv ``` # Fonctions ## Transformer des (X,Y) en (L,P) ``` def xy2pixel(padfTransform,Xp,Yp)-> '(pixel,line) coordinates': """ Transforming projection coordinates (Xp,Yp) to pixel/line (P,L) raster space. """ pixel2xy=np.array([[padfTransform[1], padfTransform[2]], [padfTransform[4], padfTransform[5]]]) xy2pixel=inv(pixel2xy) a = np.matmul(xy2pixel,np.array([[Xp-padfTransform[0]], [Yp-padfTransform[3]]])) result=(a-np.array([[0.5],[0.5]])).tolist() return {'pixel':int(result[0][0]),'line':int(result[1][0])} ``` ## Transformer des (L,P) en (X,Y) ``` def pixel2xy(padfTransform,P:'pixel (column)',L:'line')->'(x,y) coordinates': """ Transforming pixel/line (P,L) raster space to projection coordinates (Xp,Yp). In a north up image, padfTransform[1] is the pixel width, and padfTransform[5] is the pixel height. The upper left corner of the upper left pixel is at position (padfTransform[0],padfTransform[3]). The padTransform vector is stored in the metadata portion of the geoTiff. We can extract it using the GetGetTransform() method. """ Xp = padfTransform[0] + (P+0.5) * padfTransform[1] + (L+0.5) * padfTransform[2] Yp = padfTransform[3] + (P+0.5) * padfTransform[4] + (L+0.5) * padfTransform[5] return (Xp,Yp) def geotiff2csv(year): inputFile=CorineGeoTiff[year] outputFile=f'{CorineCsvDirectory}/clc-({x1},{y1})-({x2},{y2})-{year}.csv' # Opening a GeoTiff does not import it into memory. In the end, we have a "link" myRaster = gdal.Open(inputFile) # "Connect" to first band - There is only one band in CLC files myBand=myRaster.GetRasterBand(1) # Identify portion of geotiff to extract lowerLeftCorner =xy2pixel(myRaster.GetGeoTransform(),x1,y1) upperRightCorner=xy2pixel(myRaster.GetGeoTransform(),x2,y2) # Extract pixelOffset = lowerLeftCorner['pixel'] pixelWindowSize = upperRightCorner['pixel']-lowerLeftCorner['pixel'] lineOffset = upperRightCorner['line'] lineWindowSize = lowerLeftCorner['line']-upperRightCorner['line'] myBBox=myBand.ReadAsArray(xoff = pixelOffset , yoff = lineOffset , win_xsize = pixelWindowSize , win_ysize = lineWindowSize ) # Get rid of "No Data" values L,P = np.where( myBBox != myBand.GetNoDataValue() ) data_vals = np.extract(myBBox != myBand.GetNoDataValue(), myBBox) # Transform (P,L) to (X,Y) coordinates XY=pixel2xy(myRaster.GetGeoTransform(),pixelOffset+P,lineOffset+L) # Export results myDataFrame=pd.DataFrame({'X':XY[0],'Y':XY[1],'VALUE':data_vals}) myDataFrame.to_csv(outputFile,index=False,sep='|') ``` # Functions calls ``` for y in [1990,2000,2006,2012,2018]: geotiff2csv(y) ```
github_jupyter
# Code Katas Keep your skills sharp by implementing fundamental (and sometimes tricky) algorithms and data structures over and over again. ## White Belt *Easy peasy lemon squeezy* ### Palindrome String Check if input string is a palindrome. Try to only use constant extra space. ``` def palindrome(string): """Check if input string (all lower-case characters) is a palindrome.""" for i in range(len(string)//2): if string[i] != string[len(string)-i-1]: return False return True assert palindrome('') == True assert palindrome('a') == True assert palindrome('ab') == False assert palindrome('abba') == True assert palindrome('redivider') == True print('All passed!') ``` ### Merge Sorted Merge two lists sorted in descending order. Result should also be sorted in descending order. ``` def merge(a, b): """Merge two lists sorted in descending order.""" return [max(a, b).pop(0) for _ in a+b] assert merge([], []) == [] assert merge([1], [0]) == [1,0] assert merge([7,5,1], [2]) == [7,5,2,1] print('All passed!') ``` ### Length of Last Word Given a string of words separated by spaced, return length of last word. Think of an efficient way to do it for a string with millions of words in it. ``` def last_word_length(text): """Given a string of words separated by spaced, return length of last word.""" i = len(text)-1 while i >= 0 and text[i] == ' ': i -= 1 count = 0 while i >= 0 and text[i] != ' ': i -= 1 count += 1 return count assert last_word_length('') == 0 assert last_word_length('last ') == 4 assert last_word_length('string of words') == 5 print('All passed!') ``` ## Yellow Belt *Requires a certain level of problem solving and coding skills* ### Binary search Let's start with binary search. Implement find_in_sorted function that looks for number *target* in a sorted array *nums*. Remember, it has to run in O(logN) time. ``` def find_in_sorted(nums, target): """Binary search.""" start, end = 0, len(nums)-1 while start < end: mid = (start+end)//2 if nums[mid] == target: return mid elif nums[mid] < target: start = mid+1 else: end = mid-1 return -1 assert find_in_sorted([], 0) == -1 assert find_in_sorted([1,2,3], 0) == -1 assert find_in_sorted([1,2,3], 2) == 1 assert find_in_sorted([1,2,2,2,2,2,3], 2) in range(1, 6) assert find_in_sorted([1,2,3,4,6,7,8,12,13,16], 12) == 7 print('All passed!') ``` ### Simplify Unix-style file path. ``` def simplify_path(path): """Simplify Unix-style file path.""" stack = [] tokens = [t for t in path.split('/') if t != '.' and t != ''] for token in tokens: if token != '..': stack.append(token) elif stack: stack.pop() return '/'+'/'.join(stack) assert simplify_path('/') == '/' assert simplify_path('/../') == '/' assert simplify_path('/...') == '/...' assert simplify_path('/.../') == '/...' assert simplify_path('/foo/..') == '/' assert simplify_path('/foo///.//bar//') == '/foo/bar' print('All passed!') ``` ### Create Maximum Number Given a number with n digits represented as a string, find maximum number with k digits, 0<k<n. Output result as a string. ``` def create_max(num, k): digits = list(num) drop = len(digits) - k stack = [] for digit in digits: while drop and stack and stack[-1] < digit: stack.pop() drop -= 1 stack.append(digit) return ''.join(stack[:k]) num = '912583' assert create_max(num, 1) == '9' assert create_max(num, 2) == '98' assert create_max(num, 3) == '983' assert create_max(num, 4) == '9583' assert create_max(num, 5) == '92583' print('All passed!') ``` ## Orange Belt *Advanced stuff, certain flexibility of thinking is required* ### Linked Lists These tend to be trickier than they seem. First, reverse a linked list iteratively, in place. ``` class ListNode(object): def __init__(self, x): self.val = x self.next = None def __str__(self): # for your debugging purposes return str(self.val) + '->' + str(self.next) def __eq__(self, other): # for asserts to work return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__) def reverse_list(head): """Iterative solution.""" prev = None while head: nxt = head.next head.next = prev prev = head head = nxt return prev head = ListNode(1) rev = ListNode(1) assert reverse_list(head) == head head = ListNode(1) head.next = ListNode(2) rev = ListNode(2) rev.next = ListNode(1) assert reverse_list(head) == rev head = ListNode(1) head.next = ListNode(2) head.next.next = ListNode(3) rev = ListNode(3) rev.next = ListNode(2) rev.next.next = ListNode(1) assert reverse_list(head) == rev print('All passed!') ``` Now, let's do the same, only this time recursively. ``` class ListNode(object): def __init__(self, x): self.val = x self.next = None def __str__(self): # for your debugging purposes return str(self.val) + '->' + str(self.next) def __eq__(self, other): # for asserts to work return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__) def reverse_list(head, prev=None): """Recursive solution.""" if not head: return prev nxt = head.next head.next = prev return reverse_list(nxt, head) head = ListNode(1) rev = ListNode(1) assert reverse_list(head) == head head = ListNode(1) head.next = ListNode(2) rev = ListNode(2) rev.next = ListNode(1) assert reverse_list(head) == rev head = ListNode(1) head.next = ListNode(2) head.next.next = ListNode(3) rev = ListNode(3) rev.next = ListNode(2) rev.next.next = ListNode(1) assert reverse_list(head) == rev print('All passed!') ``` ### Maximum Profit Given stock prices list, find maximum profit possible. You can buy and sell multiple times, but you can't hold more than one share at a time. Hint: the solution is really simple, but it's not easy to figure it out - go over some custom test cases by hand and try to see a pattern. ``` def max_profit(prices): """Find maximum profit possible.""" profit = 0 for i in range(len(prices)-1): if prices[i+1] > prices[i]: profit += prices[i+1] - prices[i] return profit assert max_profit([]) == 0 assert max_profit([100]) == 0 assert max_profit([1,6,5,2,8,1,4,5]) == 15 assert max_profit(range(100, 0, -1)) == 0 print('All passed') ``` ## Green Belt *You'll be a black belt soon enough* ### List Subsets Find all possible subsets of a list (or all possible sets of characters contained in a string). ``` def subsets(s): """Find all possible subsets of a list.""" if not s: return [[]] res = [] for sub in subsets(s[1:]): res.append(sub) res.append([s[0]]+sub) return res assert subsets('') == [[]] # please note, inputs 'abc' and ['a', 'b', 'c'] should be equivalent for your function assert subsets('abc') == [[],['a'],['b'],['a','b'],['c'],['a','c'],['b','c'],['a','b','c']] assert subsets(['a','b','c']) == [[],['a'],['b'],['a','b'],['c'],['a','c'],['b','c'],['a','b','c']] print('All passed!') ``` ### String Permutations Find all possible permutations of a string. ``` def string_permutations(s): """Find all possible permutations of a string.""" if not s: return [''] res = [] for perm in string_permutations(s[1:]): for i in range(len(perm)+1): res.append(perm[:i]+s[0]+perm[i:]) return sorted(res) assert string_permutations('') == [''] assert string_permutations('abc') == ['abc','acb','bac','bca','cab','cba'] print('All passed!') ``` ## Blue Belt *With great power comes great responsibility* ### Implement Quicksort ``` def quicksort(nums, start=0, end=None): """Quicksort using last element as pivot.""" def partition(nums, start, end): pindex = start pivot = end for i in range(start, end): if nums[i] <= nums[pivot]: nums[i], nums[pindex] = nums[pindex], nums[i] pindex += 1 nums[pindex], nums[pivot] = nums[pivot], nums[pindex] return pindex if not end: end = len(nums)-1 if start >= end: return None pivot = partition(nums, start, end) quicksort(nums, start, pivot-1) quicksort(nums, pivot+1, end) a = [2, 9, 2, 3, 5, 8, 1] quicksort(a) assert a == [1, 2, 2, 3, 5, 8, 9] print('All passed!') ``` ### Implement Mergesort ``` def mergesort(nums): """Mergesort.""" def merge(a, b): i, j, merged, m, n = 0, 0, [], len(a), len(b) while i < m and j < n: if a[i] < b[j]: merged.append(a[i]) i += 1 else: merged.append(b[j]) j += 1 return merged + a[i:] + b[j:] if len(nums) < 2: return nums a = mergesort(nums[:len(nums)//2]) b = mergesort(nums[len(nums)//2:]) return merge(a, b) a = [2, 9, 2, 3, 5, 8, 1] assert mergesort(a) == [1, 2, 2, 3, 5, 8, 9] print('All passed!') ``` ### Find shortest path in undirected graph Given a graph *g* represented as adjacency list and nodes *u* and *v*, find shortest path between *u* and *v*. If no such path, return -1. ``` def shortest_path(g, u, v): """Find shortest path between u and v in g.""" visited = set() from queue import Queue q = Queue() q.put([u]) while not q.empty(): path = q.get() if path[-1] == v: return path visited.add(path[-1]) for neighbor in g[path[-1]]: if not neighbor in visited: q.put(path+[neighbor]) return -1 assert shortest_path({'a': ['a']}, 'a', 'a') == ['a'] assert shortest_path({'a': [], 'b': []}, 'a', 'b') == -1 graph = {'a': ['b'], 'b': ['a', 'c', 'd'], 'c': ['b', 'd', 'e'], 'd': ['b', 'c', 'f'], 'e': ['c', 'f', 'g'], 'f': ['d', 'e', 'g'], 'g': ['e', 'f']} start = 'a' end = 'g' assert len(shortest_path(graph, start, end)) == 5 print('All passed!') ``` ## Red and Black belts Implementing your own hash table, heap, caching algorithms and more fun coming soon...
github_jupyter
# T81-558: Applications of Deep Neural Networks **Module 7: Convolutional Neural Networks.** * Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), School of Engineering and Applied Science, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx) * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). # Module Video Material Main video lecture: * [Part 7.1: Data Sets for Computer Vision](https://www.youtube.com/watch?v=u8xn393mDPM&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN&index=21) * [Part 7.2: Convolution Neural Network](https://www.youtube.com/watch?v=cf6FDLFNWEk&index=22&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) * [Part 7.3: Using Convolutional Neural Networks (CNNs) in Keras and TensorFlo](https://www.youtube.com/watch?v=LSSH_NdXwhU&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN&index=23) Weekly video update: * [Weekly Update #7, Feb 25, 2018](https://youtu.be/KquKVOXU2bc) # Helpful Functions You will see these at the top of every module. These are simply a set of reusable functions that we will make use of. Each of them will be explained as the semester progresses. They are explained in greater detail as the course progresses. Class 4 contains a complete overview of these functions. ``` from sklearn import preprocessing import matplotlib.pyplot as plt import numpy as np import pandas as pd import shutil import os import requests import base64 # Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue) def encode_text_dummy(df, name): dummies = pd.get_dummies(df[name]) for x in dummies.columns: dummy_name = "{}-{}".format(name, x) df[dummy_name] = dummies[x] df.drop(name, axis=1, inplace=True) # Encode text values to a single dummy variable. The new columns (which do not replace the old) will have a 1 # at every location where the original column (name) matches each of the target_values. One column is added for # each target value. def encode_text_single_dummy(df, name, target_values): for tv in target_values: l = list(df[name].astype(str)) l = [1 if str(x) == str(tv) else 0 for x in l] name2 = "{}-{}".format(name, tv) df[name2] = l # Encode text values to indexes(i.e. [1],[2],[3] for red,green,blue). def encode_text_index(df, name): le = preprocessing.LabelEncoder() df[name] = le.fit_transform(df[name]) return le.classes_ # Encode a numeric column as zscores def encode_numeric_zscore(df, name, mean=None, sd=None): if mean is None: mean = df[name].mean() if sd is None: sd = df[name].std() df[name] = (df[name] - mean) / sd # Convert all missing values in the specified column to the median def missing_median(df, name): med = df[name].median() df[name] = df[name].fillna(med) # Convert all missing values in the specified column to the default def missing_default(df, name, default_value): df[name] = df[name].fillna(default_value) # Convert a Pandas dataframe to the x,y inputs that TensorFlow needs def to_xy(df, target): result = [] for x in df.columns: if x != target: result.append(x) # find out the type of the target column. Is it really this hard? :( target_type = df[target].dtypes target_type = target_type[0] if hasattr(target_type, '__iter__') else target_type # Encode to int for classification, float otherwise. TensorFlow likes 32 bits. if target_type in (np.int64, np.int32): # Classification dummies = pd.get_dummies(df[target]) return df.as_matrix(result).astype(np.float32), dummies.as_matrix().astype(np.float32) else: # Regression return df.as_matrix(result).astype(np.float32), df.as_matrix([target]).astype(np.float32) # Nicely formatted time string def hms_string(sec_elapsed): h = int(sec_elapsed / (60 * 60)) m = int((sec_elapsed % (60 * 60)) / 60) s = sec_elapsed % 60 return "{}:{:>02}:{:>05.2f}".format(h, m, s) # Regression chart. def chart_regression(pred,y,sort=True): t = pd.DataFrame({'pred' : pred, 'y' : y.flatten()}) if sort: t.sort_values(by=['y'],inplace=True) a = plt.plot(t['y'].tolist(),label='expected') b = plt.plot(t['pred'].tolist(),label='prediction') plt.ylabel('output') plt.legend() plt.show() # Remove all rows where the specified column is +/- sd standard deviations def remove_outliers(df, name, sd): drop_rows = df.index[(np.abs(df[name] - df[name].mean()) >= (sd * df[name].std()))] df.drop(drop_rows, axis=0, inplace=True) # Encode a column to a range between normalized_low and normalized_high. def encode_numeric_range(df, name, normalized_low=-1, normalized_high=1, data_low=None, data_high=None): if data_low is None: data_low = min(df[name]) data_high = max(df[name]) df[name] = ((df[name] - data_low) / (data_high - data_low)) \ * (normalized_high - normalized_low) + normalized_low # This function submits an assignment. You can submit an assignment as much as you like, only the final # submission counts. The paramaters are as follows: # data - Pandas dataframe output. # key - Your student key that was emailed to you. # no - The assignment class number, should be 1 through 1. # source_file - The full path to your Python or IPYNB file. This must have "_class1" as part of its name. # . The number must match your assignment number. For example "_class2" for class assignment #2. def submit(data,key,no,source_file=None): if source_file is None and '__file__' not in globals(): raise Exception('Must specify a filename when a Jupyter notebook.') if source_file is None: source_file = __file__ suffix = '_class{}'.format(no) if suffix not in source_file: raise Exception('{} must be part of the filename.'.format(suffix)) with open(source_file, "rb") as image_file: encoded_python = base64.b64encode(image_file.read()).decode('ascii') ext = os.path.splitext(source_file)[-1].lower() if ext not in ['.ipynb','.py']: raise Exception("Source file is {} must be .py or .ipynb".format(ext)) r = requests.post("https://api.heatonresearch.com/assignment-submit", headers={'x-api-key':key}, json={'csv':base64.b64encode(data.to_csv(index=False).encode('ascii')).decode("ascii"), 'assignment': no, 'ext':ext, 'py':encoded_python}) if r.status_code == 200: print("Success: {}".format(r.text)) else: print("Failure: {}".format(r.text)) ``` # Computer Vision This class will focus on computer vision. There are some important differences and similarities with previous neural networks. * We will usually use classification, though regression is still an option. * The input to the neural network is now 3D (height, width, color) * Data are not transformed, no zscores or dummy variables. * Processing time is much longer. * We now have different layer times: dense layers (just like before), convolution layers and max pooling layers. * Data will no longer arrive as CSV files. TensorFlow provides some utilities for going directly from image to the input for a neural network. # Computer Vision Data Sets There are many data sets for computer vision. Two of the most popular are the MNIST digits data set and the CIFAR image data sets. ## MNIST Digits Data Set The [MNIST Digits Data Set](http://yann.lecun.com/exdb/mnist/) is very popular in the neural network research community. A sample of it can be seen here: ![MNIST Data Set](https://raw.githubusercontent.com/jeffheaton/t81_558_deep_learning/master/images/class_8_mnist.png "MNIST Data Set") This data set was generated from scanned forms. ![Exam](https://raw.githubusercontent.com/jeffheaton/t81_558_deep_learning/master/images/class_8_exam.png "Exam") ## CIFAR Data Set The [CIFAR-10 and CIFAR-100](https://www.cs.toronto.edu/~kriz/cifar.html) datasets are also frequently used by the neural network research community. ![cifar-10](https://raw.githubusercontent.com/jeffheaton/t81_558_deep_learning/master/images/class_8_cifar.png "cifar-10") The CIFAR-10 data set contains low-rez images that are divided into 10 classes. The CIFAR-100 data set contains 100 classes in a hierarchy. # Other Resources * [Imagenet:Large Scale Visual Recognition Challenge 2014](http://image-net.org/challenges/LSVRC/2014/index) * [Andrej Karpathy](http://cs.stanford.edu/people/karpathy/) - PhD student/instructor at Stanford. * [CS231n Convolutional Neural Networks for Visual Recognition](http://cs231n.stanford.edu/) - Stanford course on computer vision/CNN's. * [CS231n - GitHub](http://cs231n.github.io/) * [ConvNetJS](http://cs.stanford.edu/people/karpathy/convnetjs/) - Javascript library for deep learning. # Convolutional Neural Networks (CNNs) The convolutional neural network (CNN) is a neural network technology that has profoundly impacted the area of computer vision (CV). Fukushima (1980) introduced the original concept of a convolutional neural network, and LeCun, Bottou, Bengio & Haffner (1998) greatly improved this work. From this research, Yan LeCun introduced the famous LeNet-5 neural network architecture. This class follows the LeNet-5 style of convolutional neural network. **A LeNET-5 Network (LeCun, 1998)** ![LENET5](https://raw.githubusercontent.com/jeffheaton/t81_558_deep_learning/master/images/class_8_lenet5.png "LENET5") So far we have only seen one layer type (dense layers). By the end of this course we will have seen: * **Dense Layers** - Fully connected layers. (introduced previously) * **Convolution Layers** - Used to scan across images. (introduced this class) * **Max Pooling Layers** - Used to downsample images. (introduced this class) * **Dropout Layer** - Used to add regularization. (introduced next class) ## Convolution Layers The first layer that we will examine is the convolutional layer. We will begin by looking at the hyper-parameters that you must specify for a convolutional layer in most neural network frameworks that support the CNN: * Number of filters * Filter Size * Stride * Padding * Activation Function/Non-Linearity The primary purpose for a convolutional layer is to detect features such as edges, lines, blobs of color, and other visual elements. The filters can detect these features. The more filters that we give to a convolutional layer, the more features it can detect. A filter is a square-shaped object that scans over the image. A grid can represent the individual pixels of a grid. You can think of the convolutional layer as a smaller grid that sweeps left to right over each row of the image. There is also a hyper parameter that specifies both the width and height of the square-shaped filter. Figure 10.1 shows this configuration in which you see the six convolutional filters sweeping over the image grid: A convolutional layer has weights between it and the previous layer or image grid. Each pixel on each convolutional layer is a weight. Therefore, the number of weights between a convolutional layer and its predecessor layer or image field is the following: ``` [FilterSize] * [FilterSize] * [# of Filters] ``` For example, if the filter size were 5 (5x4) for 10 filters, there would be 250 weights. You need to understand how the convolutional filters sweep across the previous layer’s output or image grid. Figure 10.2 illustrates the sweep: ![CNN](https://raw.githubusercontent.com/jeffheaton/t81_558_deep_learning/master/images/class_8_cnn_grid.png "CNN") The above figure shows a convolutional filter with a size of 4 and a padding size of 1. The padding size is responsible for the boarder of zeros in the area that the filter sweeps. Even though the image is actually 8x7, the extra padding provides a virtual image size of 9x8 for the filter to sweep across. The stride specifies the number of positions at which the convolutional filters will stop. The convolutional filters move to the right, advancing by the number of cells specified in the stride. Once the far right is reached, the convolutional filter moves back to the far left, then it moves down by the stride amount and continues to the right again. Some constraints exist in relation to the size of the stride. Obviously, the stride cannot be 0. The convolutional filter would never move if the stride were set to 0. Furthermore, neither the stride, nor the convolutional filter size can be larger than the previous grid. There are additional constraints on the stride (s), padding (p) and the filter width (f) for an image of width (w). Specifically, the convolutional filter must be able to start at the far left or top boarder, move a certain number of strides, and land on the far right or bottom boarder. The following equation shows the number of steps a convolutional operator must take to cross the image: $ steps = \frac{w - f + 2p}{s+1} $ The number of steps must be an integer. In other words, it cannot have decimal places. The purpose of the padding (p) is to be adjusted to make this equation become an integer value. ## Max Pooling Layers Max-pool layers downsample a 3D box to a new one with smaller dimensions. Typically, you can always place a max-pool layer immediately following convolutional layer. The LENET shows the max-pool layer immediately after layers C1 and C3. These max-pool layers progressively decrease the size of the dimensions of the 3D boxes passing through them. This technique can avoid overfitting (Krizhevsky, Sutskever & Hinton, 2012). A pooling layer has the following hyper-parameters: * Spatial Extent (f ) * Stride (s) Unlike convolutional layers, max-pool layers do not use padding. Additionally, max-pool layers have no weights, so training does not affect them. These layers simply downsample their 3D box input. The 3D box output by a max-pool layer will have a width equal to this equation: $ w_2 = \frac{w_1 - f}{s + 1} $ The height of the 3D box produced by the max-pool layer is calculated similarly with this equation: $ h_2 = \frac{h_1 - f}{s + 1} $ The depth of the 3D box produced by the max-pool layer is equal to the depth the 3D box received as input. The most common setting for the hyper-parameters of a max-pool layer are f =2 and s=2. The spatial extent (f) specifies that boxes of 2x2 will be scaled down to single pixels. Of these four pixels, the pixel with the maximum value will represent the 2x2 pixel in the new grid. Because squares of size 4 are replaced with size 1, 75% of the pixel information is lost. The following figure shows this transformation as a 6x6 grid becomes a 3x3: ![MaxPool](https://raw.githubusercontent.com/jeffheaton/t81_558_deep_learning/master/images/class_8_conv_maxpool.png "MaxPool") Of course, the above diagram shows each pixel as a single number. A grayscale image would have this characteristic. For an RGB image, we usually take the average of the three numbers to determine which pixel has the maximum value. [More information on CNN's](http://cs231n.github.io/convolutional-networks/) # TensorFlow with CNNs The following sections describe how to use TensorFlow/Keras with CNNs. # Access to Data Sets Keras provides built in access classes for MNIST. It is important to note that MNIST data arrives already separated into two sets: * **train** - Neural network will be trained with this. * **test** - Used for validation. ``` import keras from keras.callbacks import EarlyStopping from keras.layers import Dense, Dropout from keras import regularizers from keras.datasets import mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() print("Shape of x_train: {}".format(x_train.shape)) print("Shape of y_train: {}".format(y_train.shape)) print() print("Shape of x_test: {}".format(x_test.shape)) print("Shape of y_test: {}".format(y_test.shape)) ``` # Display the Digits The following code shows what the MNIST files contain. ``` # Display as text from IPython.display import display import pandas as pd print("Shape for dataset: {}".format(x_train.shape)) print("Labels: {}".format(y_train)) # Single MNIST digit single = x_train[0] print("Shape for single: {}".format(single.shape)) display(pd.DataFrame(single.reshape(28,28))) # Display as image %matplotlib inline import matplotlib.pyplot as plt import numpy as np digit = 105 # Change to choose new digit a = x_train[digit] plt.imshow(a, cmap='gray', interpolation='nearest') print("Image (#{}): Which is digit '{}'".format(digit,y_train[digit])) ``` # Define CNN ``` import keras from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras import backend as K batch_size = 128 num_classes = 10 epochs = 12 # input image dimensions img_rows, img_cols = 28, 28 if K.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 print('x_train shape:', x_train.shape) print("Training samples: {}".format(x_train.shape[0])) print("Test samples: {}".format(x_test.shape[0])) # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) ``` ## Training/Fitting CNN The following code will train the CNN for 20,000 steps. This can take awhile, you might want to scale the step count back. GPU training can help. My results: * CPU Training Time: Elapsed time: 1:50:13.10 * GPU Training Time: Elapsed time: 0:13:43.06 ``` import tensorflow as tf import time start_time = time.time() model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_data=(x_test, y_test)) score = model.evaluate(x_test, y_test, verbose=0) print('Test loss: {}'.format(score[0])) print('Test accuracy: {}'.format(score[1])) elapsed_time = time.time() - start_time print("Elapsed time: {}".format(hms_string(elapsed_time))) ``` ## Evaluate Accuracy Note, if you are using a GPU you might get the **ResourceExhaustedError**. This occurs because the GPU might not have enough ram to predict the entire data set at once. ``` # Predict using either GPU or CPU, send the entire dataset. This might not work on the GPU. # Set the desired TensorFlow output level for this example score = model.evaluate(x_test, y_test, verbose=0) print('Test loss: {}'.format(score[0])) print('Test accuracy: {}'.format(score[1])) ``` GPUs are most often used for training rather than prediction. For prediction either disable the GPU or just predict on a smaller sample. If your GPU has enough memory, the above prediction code may work just fine. If not, just prediction on a sample with the following code: ``` from sklearn import metrics # For GPU just grab the first 100 images small_x = x_test[1:100] small_y = y_test[1:100] small_y2 = np.argmax(small_y,axis=1) pred = model.predict(small_x) pred = np.argmax(pred,axis=1) score = metrics.accuracy_score(small_y2, pred) print('Accuracy: {}'.format(score)) ``` # Latest Advances in CNN's He, K., Zhang, X., Ren, S., & Sun, J. (2016). [Deep residual learning for image recognition](https://arxiv.org/abs/1512.03385). In *Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition* (pp. 770-778). * [Caffee ResNet GitHub](https://github.com/KaimingHe/deep-residual-networks) * [ResNet in TensorFlow](https://github.com/tensorflow/models/blob/master/resnet/resnet_model.py) Geoffrey Hinton is suggesting that we might be [doing computer vision wrong](https://www.wired.com/story/googles-ai-wizard-unveils-a-new-twist-on-neural-networks/) and has introduced two new papers about an old technique called Capsule Neural Networks: * Sabour, S., Frosst, N., & Hinton, G. E. (2017). [Dynamic Routing Between Capsules](https://arxiv.org/abs/1710.09829). arXiv preprint arXiv:1710.09829. * [Matrix capsules with EM routing](https://openreview.net/forum?id=HJWLfGWRb&noteId=HJWLfGWRb) - ICLR 2018 Blind Submission # Module 7 Assignment You can find the first assignmeht here: [assignment 7](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class7.ipynb)
github_jupyter
``` import numpy as np from keras.models import Model from keras.layers import Input from keras.layers.recurrent import SimpleRNN from keras import backend as K import json from collections import OrderedDict def format_decimal(arr, places=6): return [round(x * 10**places) / 10**places for x in arr] DATA = OrderedDict() ``` ### SimpleRNN **[recurrent.SimpleRNN.0] units=4, activation='tanh'** Note dropout_W and dropout_U are only applied during training phase ``` data_in_shape = (3, 6) rnn = SimpleRNN(4, activation='tanh') layer_0 = Input(shape=data_in_shape) layer_1 = rnn(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(3400 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'U', 'b'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['recurrent.SimpleRNN.0'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[recurrent.SimpleRNN.1] units=5, activation='sigmoid'** Note dropout_W and dropout_U are only applied during training phase ``` data_in_shape = (8, 5) rnn = SimpleRNN(5, activation='sigmoid') layer_0 = Input(shape=data_in_shape) layer_1 = rnn(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(3500 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'U', 'b'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['recurrent.SimpleRNN.1'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[recurrent.SimpleRNN.2] units=4, activation='tanh', return_sequences=True** Note dropout_W and dropout_U are only applied during training phase ``` data_in_shape = (7, 6) rnn = SimpleRNN(4, activation='tanh', return_sequences=True) layer_0 = Input(shape=data_in_shape) layer_1 = rnn(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(3600 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'U', 'b'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['recurrent.SimpleRNN.2'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[recurrent.SimpleRNN.3] units=4, activation='tanh', return_sequences=False, go_backwards=True** Note dropout_W and dropout_U are only applied during training phase ``` data_in_shape = (7, 6) rnn = SimpleRNN(4, activation='tanh', return_sequences=False, go_backwards=True) layer_0 = Input(shape=data_in_shape) layer_1 = rnn(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(3700 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'U', 'b'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['recurrent.SimpleRNN.3'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[recurrent.SimpleRNN.4] units=4, activation='tanh', return_sequences=True, go_backwards=True** Note dropout_W and dropout_U are only applied during training phase ``` data_in_shape = (7, 6) rnn = SimpleRNN(4, activation='tanh', return_sequences=True, go_backwards=True) layer_0 = Input(shape=data_in_shape) layer_1 = rnn(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(3800 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'U', 'b'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['recurrent.SimpleRNN.4'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[recurrent.SimpleRNN.5] units=4, activation='tanh', return_sequences=False, go_backwards=False, stateful=True** Note dropout_W and dropout_U are only applied during training phase **To test statefulness, model.predict is run twice** ``` data_in_shape = (7, 6) rnn = SimpleRNN(4, activation='tanh', return_sequences=False, go_backwards=False, stateful=True) layer_0 = Input(batch_shape=(1, *data_in_shape)) layer_1 = rnn(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(3800 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'U', 'b'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['recurrent.SimpleRNN.5'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[recurrent.SimpleRNN.6] units=4, activation='tanh', return_sequences=True, go_backwards=False, stateful=True** Note dropout_W and dropout_U are only applied during training phase **To test statefulness, model.predict is run twice** ``` data_in_shape = (7, 6) rnn = SimpleRNN(4, activation='tanh', return_sequences=True, go_backwards=False, stateful=True) layer_0 = Input(batch_shape=(1, *data_in_shape)) layer_1 = rnn(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(3810 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'U', 'b'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['recurrent.SimpleRNN.6'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[recurrent.SimpleRNN.7] units=4, activation='tanh', return_sequences=False, go_backwards=True, stateful=True** Note dropout_W and dropout_U are only applied during training phase **To test statefulness, model.predict is run twice** ``` data_in_shape = (7, 6) rnn = SimpleRNN(4, activation='tanh', return_sequences=False, go_backwards=True, stateful=True) layer_0 = Input(batch_shape=(1, *data_in_shape)) layer_1 = rnn(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(3820 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'U', 'b'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['recurrent.SimpleRNN.7'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[recurrent.SimpleRNN.8] units=4, activation='tanh', use_bias=False, return_sequences=True, go_backwards=True, stateful=True** Note dropout_W and dropout_U are only applied during training phase **To test statefulness, model.predict is run twice** ``` data_in_shape = (7, 6) rnn = SimpleRNN(4, activation='tanh', use_bias=False, return_sequences=True, go_backwards=True, stateful=True) layer_0 = Input(batch_shape=(1, *data_in_shape)) layer_1 = rnn(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(3830 + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) weight_names = ['W', 'U'] for w_i, w_name in enumerate(weight_names): print('{} shape:'.format(w_name), weights[w_i].shape) print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist())) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['recurrent.SimpleRNN.8'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` ### export for Keras.js tests ``` print(json.dumps(DATA)) ```
github_jupyter
<h1>Model Evaluation and Selection</h1> **Agenda** - **Regression Evaluation** - What is the usage of **classification accuracy**? - How does a **confusion matrix** describe the performance of a classifier? - **Precision, Recall, F1-score** and **Fb-Score** - **ROC, AUC curve** - Evaluation measures for **multi-class** classification - **Overfitting** and **Underfitting** - **Cross-Validation** - **Learning Curves** - Searching for optimal tuning parameters using **Grid Search** and **Random Search** **Load Libs** ``` from sklearn.model_selection import train_test_split import numpy as np import pandas as pd from matplotlib import pyplot as plt import warnings; warnings.filterwarnings('ignore') ``` ## Regression Evaluation ``` from sklearn.linear_model import LinearRegression from sklearn.datasets import load_boston Dataset = load_boston() X,y = Dataset.data, Dataset.target X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=.3) clf = LinearRegression().fit(X_train, y_train) y_pred = clf.predict(X_test) ``` ### Mean Absolute Error <img src='img/eq.jpg' > <img src='img/mae.png' > ``` from sklearn.metrics import mean_absolute_error mean_absolute_error(y_test, y_pred) ``` ### Mean Squared Error <img src="img/eq2.jpg"> ``` from sklearn.metrics import mean_squared_error mean_squared_error(y_test, y_pred) ``` ### R2 Score <img src='img/r2.png' > ``` from sklearn.metrics import r2_score r2_score(y_test, y_pred) ``` ## Classification Evaluation ### Imbalanced-Data Classification Accuracy **Dataset Link:** https://www.kaggle.com/mlg-ulb/creditcardfraud <br> **Alternatively** unzip credit_card_fraud.rar ``` Dataset = pd.read_csv('credit_card_fraud.csv') print(Dataset.shape) Dataset.head(10) X = Dataset.iloc[:,:-1] target = Dataset.iloc[:,Dataset.shape[1]-1] target_count = target.value_counts() print('Not Fraud:', target_count[0]) print('Fraud:', target_count[1]) target_count.plot(kind='bar', title='Count (target)'); ``` <img src="img/f2.png"> **Logistic Regression** ``` X_train, X_test, y_train, y_test = train_test_split(X, target, random_state=42, test_size=.3) from sklearn.linear_model import LogisticRegression lr = LogisticRegression(random_state = 42).fit(X_train, y_train) y_pred = lr.fit(X_train, y_train).predict(X_test) from sklearn import metrics print("Accurcy Score: {0}".format(metrics.accuracy_score(y_test, y_pred))) ``` **Dummy Classifiers** <img src="img/f3.png"> ``` from sklearn.dummy import DummyClassifier dummy_majority = DummyClassifier(strategy = 'most_frequent').fit(X_train, y_train) y_dummy_predictions = dummy_majority.predict(X_test) print("Dummy Score: {0}".format(dummy_majority.score(X_test, y_test))) ``` **Accuracy** <img src="img/acc.png"> ### Confusion matrix **Example** <img src="img/conf_ex2.png"> <img src='img/conf_eq.png' > **Confusion Matrix of Dummy Classifer** ``` from sklearn.metrics import confusion_matrix confusion_dummy = confusion_matrix(y_true=y_test, y_pred=y_dummy_predictions) print(pd.DataFrame(data=confusion_dummy, index=["Actuall Not Fraud","Actuall Fraud"], columns =["Predicted Not Fraud","Predicted Fraud"])) labels = ['Not Fraud', 'Fraud'] fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(confusion_dummy, cmap=plt.cm.Blues) fig.colorbar(cax) ax.set_xticklabels([''] + labels) ax.set_yticklabels([''] + labels) plt.xlabel('Predicted') plt.ylabel('Actuall') plt.show() ``` **Confusion Matrix of Logistic regression** ``` confusion = confusion_matrix(y_true=y_test, y_pred=y_pred) print(pd.DataFrame(data=confusion, index=["Actuall Not Fraud","Actuall Fraud"], columns =["Predicted Not Fraud","Predicted Fraud"])) labels = ['Not Fraud', 'Fraud'] fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(confusion, cmap=plt.cm.Blues) fig.colorbar(cax) ax.set_xticklabels([''] + labels) ax.set_yticklabels([''] + labels) plt.xlabel('Predicted') plt.ylabel('Actuall') plt.show() ``` ### Precision How many of the predicted positive examples are really positive? <img src='img/pre.png' > ### Recall (Sensitivity) When the actual value is positive, how often is the prediction correct? <img src='img/re.png' > <br> **Trading Off Precision and Recall**<br> Can be achieved by e.g. varying decision **threshold** of a classifier. - Suppose we want to predict y=1 only if very **confident** - Higher threshold, higher precision, lower recall. - Suppose we want to **avoid missing** positive examples - Lower threshold, higher recall, lower precision. **Which metrics should you focus on?** - Choice of metric depends on your **business objective** - **Spam filter** (positive class is "spam"): Optimize for **precision or specificity** because false negatives (spam goes to the inbox) are more acceptable than false positives (non-spam is caught by the spam filter) - **Fraudulent transaction detector** (positive class is "fraud"): Optimize for **Recall (sensitivity)** because false positives (normal transactions that are flagged as possible fraud) are more acceptable than false negatives (fraudulent transactions that are not detected) <br> ### F1-Score **F1-Score** is the **Harmonic Mean** between precision and recall <img src='img/f1-score.png' > ### Fb-Score Gives a percentage more **importance/weight** to either **precision** or **recall** <img src='img/fb-score.png' > ``` from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score print("Score of Logistic Regression Classifer:") print(pd.DataFrame(data=[accuracy_score(y_test, y_pred), precision_score(y_test, y_pred), recall_score(y_test, y_pred), f1_score(y_test, y_pred)], index=["Accuracy","Precision","Recall","F1-Score"], columns =["Value"])) print("Score of Dummy Regression Classifer:") print(pd.DataFrame(data=[accuracy_score(y_test, y_dummy_predictions), precision_score(y_test, y_dummy_predictions), recall_score(y_test, y_dummy_predictions), f1_score(y_test, y_dummy_predictions)], index=["Accuracy","Precision","Recall","F1-Score"], columns =["Value"])) from sklearn.metrics import classification_report print("Score of Logistic Regression Classifier:") print(classification_report(y_test, y_pred, target_names=['Not Fraud', 'Fraud'])) print("Score of Dummy Classifier:") print(classification_report(y_test, y_dummy_predictions, target_names=['Not Fraud', 'Fraud'])) from sklearn.metrics import fbeta_score print("Fb-Score: {:.2f}".format(fbeta_score(y_test, y_pred, average='macro', beta=2))) print("Fb-Score: {:.2f}".format(fbeta_score(y_test, y_pred, average='macro', beta=1))) for THRESHOLD in np.arange(0.1, 1.0, 0.1): preds = np.where(lr.predict_proba(X_test)[:,1] > THRESHOLD, 1, 0) print ("THRESHOLD= {:.1f}".format(THRESHOLD)) print(pd.DataFrame(data=[precision_score(y_test, preds), recall_score(y_test, preds),], index=["Precision", "Recall"], columns =["Value"])) print() print() ``` ### ROC curves, Area-Under-Curve (AUC) **Recall (Sensitivity)** **When the actual value is positive, how often is the prediction correct?** <img src='img/re.png' > **Specificity** **When the actual value is negative, how often is the prediction correct?** <img src='img/spec.png'> ``` from sklearn.metrics import roc_curve, auc y_score_lr = lr.fit(X_train, y_train).decision_function(X_test) fpr_lr, tpr_lr, _ = roc_curve(y_test, y_score_lr) plt.figure(num=None, figsize=(7, 7), dpi=80, facecolor='w', edgecolor='k') plt.xlim([-0.01, 1.00]) plt.ylim([-0.01, 1.01]) plt.plot(fpr_lr, tpr_lr, lw=3, label='AUC: {:0.2f})'.format(metrics.roc_auc_score(y_test, y_score_lr))) plt.xlabel('False Positive Rate (Specifity)', fontsize=16) plt.ylabel('True Positive Rate (Sensitivity)', fontsize=16) plt.title('ROC curve', fontsize=16) plt.legend(loc='lower right', fontsize=13) plt.plot([0, 1], [0, 1], color='navy', lw=3, linestyle='--') plt.axes().set_aspect('equal') plt.show() ``` **AUC = 0 (worst) AUC = 1 (best)** **ROC/AUC advantages:** - Does not require you to **set a classification threshold** - Still useful when there is **high class imbalance** - Gives a **single number** for easy comparison. ## Evaluation measures for multi-class classification **Data Preparation** ``` from sklearn.ensemble import RandomForestClassifier import seaborn as sns from sklearn.datasets import fetch_olivetti_faces dataset = fetch_olivetti_faces() X, y = dataset.data, dataset.target X_train_mc, X_test_mc, y_train_mc, y_test_mc = train_test_split(X, y, random_state=0) ``` **Classification** ``` clf = RandomForestClassifier(n_estimators=200, max_depth=15, random_state=42).fit(X_train_mc, y_train_mc) clf_predicted_mc = clf.predict(X_test_mc) ``` **Confusion Matrix** ``` confusion_mc = confusion_matrix(y_test_mc, clf_predicted_mc) df_cm = pd.DataFrame(confusion_mc, index = [i for i in range(0,confusion_mc.shape[0])], columns = [i for i in range(0,confusion_mc.shape[1])]) plt.figure(num=None, figsize=(7, 7), dpi=80, facecolor='w', edgecolor='k') sns.heatmap(df_cm, annot=True) plt.title('Random Forest Classifier \nAccuracy:{0:.3f}'.format(accuracy_score(y_test_mc, clf_predicted_mc))) plt.ylabel('Actuall label') plt.xlabel('Predicted label') print(classification_report(y_test_mc, clf_predicted_mc)) ``` ## Overfitting and Underfitting - Machine Learning models have one sole purpose; to generalize well.<br> - A model that **generalizes** well is a model that is neither **underfit** nor **overfit.** - **Generalization** is the model’s ability to give sensible outputs to sets of input that it has never seen before. ``` from sklearn.tree import DecisionTreeRegressor # Create a random dataset rng = np.random.RandomState(1) X = np.sort(5 * rng.rand(80, 1), axis=0) y = np.sin(X).ravel() y[::5] += 3 * (0.5 - rng.rand(16)) # Plot the results plt.figure(num=None, figsize=(7, 7), dpi=80, facecolor='w', edgecolor='k') plt.scatter(X, y, s=20, edgecolor="black", c="darkorange", label="data") plt.xlabel("data") plt.ylabel("target") plt.title("Data") plt.legend() plt.show() ``` <br> <br> **Overfitting** is the case where the overall cost is really small, but the generalization of the model is unreliable. This is due to the model learning “too much” from the training data set. ``` # Fit regression model clf = DecisionTreeRegressor(max_depth=15) clf.fit(X, y) # Predict y_1 = clf.predict(X) # Plot the results plt.figure(num=None, figsize=(7, 7), dpi=80, facecolor='w', edgecolor='k') plt.scatter(X, y, s=20, edgecolor="black", c="darkorange", label="data") plt.plot(X, y_1, color="cornflowerblue", label="max_depth=15", linewidth=2) plt.xlabel("data") plt.ylabel("target") plt.title("Overfitting") plt.legend() plt.show() ``` **Overfitting** happens when the model is too complex relative to the amount and noisiness of the training data.<br><br> **Possible Solutions:** - Simplify the model by selecting one with fewer parameters. - Gather more training data. - Reduce the noise in the training data. - Apply Cross- Validation. <br> <br> **Underfitting** is the case where the model has “ not learned enough” from the training data, resulting in low generalization and unreliable predictions. ``` X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=0.9) # Fit regression model clf = DecisionTreeRegressor(max_depth=1) clf.fit(X_train, y_train) # Predict X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis] y_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis] y_1 = clf.predict(X_test) # Plot the results plt.figure(num=None, figsize=(7, 7), dpi=80, facecolor='w', edgecolor='k') plt.scatter(X, y, s=20, edgecolor="black", c="darkorange", label="data") plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=1", linewidth=2) plt.xlabel("data") plt.ylabel("target") plt.title("Underfitting") plt.legend() plt.show() ``` **Underfitting** occurs when your model is too simple to learn the underlying structure of the data.<br><br> **Possible Solutions:** - Selecting a more powerful model, with more parameters. - Feeding better features to the learning algorithm. - Reducing the constraints on the model, <br> <br> ## Model selection using evaluation metrics **Train/test on same data** - Typically overfits and likely won't generalize well to new data. - **Not recommended** ### Single train/test split (Holdout) - Fast and simple. - In most patterns we don't have the **luxury** of large data set. - Generally, this split will be close to 80% of the data for training and 20% of the data for testing. - Splitting the data set into training and testing sets will leave us with either **insufficient training or testing patterns**. - Clearly the testing data set contains **useful information** for learning. Yet, they are **ignored** and not used for training purposes in the data splitting error rate estimation method. - If dataset is not completely even. In splitting our dataset we may end up splitting it in such a way that our training set is very different from the test <img src='img/holdout.png'> ``` from sklearn.datasets import load_breast_cancer from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn import metrics digits = load_breast_cancer() X = digits.data y = digits.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2 , random_state=2233) knn = KNeighborsClassifier(n_neighbors=5) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) print(classification_report(y_test, y_pred)) ``` ### K-fold cross-validation - K train-test splits. - Average metric over all splits. - Randomly split our dataset into K equally sized parts. <br> <br> **K-fold cross-validation algorithm** 1. Split the dataset into K **equal** partitions (or "folds"). 2. Use fold 1 as the **testing set** and the union of the other folds as the **training set**. 3. Calculate **testing accuracy**. 4. Repeat steps 2 and 3 K times, using a **different fold** as the testing set each time. 5. Use the **average testing accuracy** as the estimate of out-of-sample accuracy. <img src='img/k1.png'> ``` data = load_breast_cancer() X, y = data.data, data.target from sklearn.model_selection import cross_val_score knn = KNeighborsClassifier(n_neighbors=5) # use recall as scoring metric scores = cross_val_score(knn, X, y, cv=5, scoring = 'f1') print('Cross-validation (F1-Score)', scores) print("Average cross-validation score (F1-Score) : {:.3f}".format(scores.mean())) print() ``` ### RepeatedKFold - the most robust of cross validation methods. - Similar to K-Fold, we set a value for K which signifies the number of times we will train our model. However, in this case K will not represent the number of equally sized partitions. - on each training iteration, we randomly select points to be for the testing set. - The advantage of this method over K-Fold is that the proportion of the train-test split is not dependent on the number of iterations. - The disadvantage of this method is that some points may never be selected to be in the test subset at all — at the same time, some points might be selected multiple times. <img src='k2.png' > ``` from sklearn.metrics import f1_score from sklearn.model_selection import RepeatedKFold data = load_breast_cancer() X, y = data.data, data.target rkf = RepeatedKFold(n_splits=5, n_repeats=2, random_state=42) l = [] knn = KNeighborsClassifier(n_neighbors=5) for train_index, test_index in rkf.split(X): X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] knn.fit(X_train, y_train) y_pred = knn.predict(X_test) l.append(f1_score(y_test, y_pred)) print('RepeatedKFold (F1-Score)', l) print("Average RepeatedKFold score (F1-Score) : {:.3f}".format(sum(l) / len(l))) ``` <br> <br> ### Learning Curves - **Train Learning Curve:** Learning curve calculated from the training dataset that gives an idea of how well the model is learning. - **Validation Learning Curve:** Learning curve calculated from a hold-out validation dataset that gives an idea of how well the model is generalizing. ``` import numpy as np import matplotlib.pyplot as plt from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from sklearn.datasets import load_digits from sklearn.model_selection import learning_curve from sklearn.model_selection import ShuffleSplit from sklearn.tree import DecisionTreeClassifier def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)): plt.figure() plt.title(title) if ylim is not None: plt.ylim(*ylim) plt.xlabel("Training examples") plt.ylabel("Score") train_sizes, train_scores, test_scores = learning_curve( estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.grid() plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="validation score") plt.legend(loc="best") return plt digits = load_digits() X, y = digits.data, digits.target ``` **Underfitting:** A model of a given complexity will underfit a large dataset: this means that the training score will decrease, but the validation score will increase <br> 1. Example of Training Learning Curve Showing an Underfit Model That Requires Further Training ``` title = "Learning Curves (Naive Bayes)" estimator = GaussianNB() plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01)) ``` 2. Example of Training Learning Curve Showing An Underfit Model That Does Not Have Sufficient Capacity ``` title = "Learning Curves (SVC)" estimator = SVC(gamma=0.1) plot_learning_curve(estimator, title, X, y) plt.show() ``` **Overfitting:** A model of a given complexity will overfit a small dataset: this means the training score will be relatively high, while the validation score increases to a point and begins decreasing again. ``` title = "Learning Curves (Decision Tree)" cv = ShuffleSplit(n_splits=10, test_size=0.001, random_state=0) estimator = DecisionTreeClassifier(random_state=42, max_depth=100) plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv) plt.show() ``` **Good Fit:** A model will never, except by chance, give a better score to the validation set than the training set: this means the curves should keep getting closer together but never cross. ``` title = "Learning Curves (SVC)" cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0) estimator = SVC(gamma=0.001) plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv) plt.show() ``` ### Searching for optimal tuning parameters using Grid Search and Random Search #### Grid Search <img src='grid.png' > ``` from sklearn.datasets import load_digits from sklearn.model_selection import train_test_split dataset = load_digits() X, y = dataset.data, dataset.target == 1 X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV clf = SVC(kernel='rbf') grid_values = {'C': [ 0.01, 0.1, 1, 10, 100], 'gamma': [ 0.005, 0.01, 0.05, 0.1, 1, 10, 100]} grid_clf = GridSearchCV(clf, param_grid = grid_values,cv=5, scoring = 'f1_micro') grid_clf.fit(X_train, y_train) y_decision_fn_scores = grid_clf.decision_function(X_test) print('Grid best score (F1): {:.2f}'.format(grid_clf.best_score_)) print('Grid best parameter (max. F1): ', grid_clf.best_params_) print('Grid best Estimator: ',grid_clf.best_estimator_) ``` #### Random Search <img src='rand.png' > ``` from sklearn.model_selection import RandomizedSearchCV from sklearn.neighbors import KNeighborsClassifier param_dist = dict(n_neighbors=list(range(1, 200)), weights=['uniform', 'distance'] ,algorithm =['auto', 'ball_tree', 'kd_tree', 'brute'] ,leaf_size=list(range(1, 70)), p = [1,2] ) knn = KNeighborsClassifier() rand = RandomizedSearchCV(knn, param_dist, cv=5, scoring='f1_micro', n_iter=100) rand.fit(X_train, y_train) y_decision_fn_scores = rand.predict(X_test) print('Grid best score (F1): {:.2f}'.format(rand.best_score_)) print('Grid best parameter (max. F1): ', rand.best_params_) print('Grid best Estimator: ',rand.best_estimator_) ``` #### Grid Search vs Random Search <img src='com.png' > **Grid Search** - Works best for lower dimensional data. - finds the optimal parameter. - Finds the best soultion. - Slower. **Random Search** - Works best for higher dimensional data. - The optimal parameter is not found since we do not have it in our grid. - Finds the near best solution. - Faster. <br><br><br><br> **Comments or Questions?** - Email: <baghdady.usama@gmail.com> - Linkedin: www.linkedin.com/in/usama-albaghdady-76944057 - Facebook: www.facebook.com/baghdady.usama
github_jupyter
# Train VAE ``` %load_ext autoreload %autoreload 2 import os import sys import ast import pandas as pd import numpy as np import random import warnings warnings.filterwarnings(action='ignore') sys.path.append("../") from functions import vae from numpy.random import seed randomState = 123 seed(randomState) # Create list of base directories base_dir = os.path.abspath(os.path.join(os.getcwd(),"../..")) base_dirs = [os.path.join(base_dir, "data"), os.path.join(base_dir, "models"), os.path.join(base_dir, "output"), os.path.join(base_dir, "data", "encoded"), os.path.join(base_dir, "output", "stats"), os.path.join(base_dir, "output", "viz") ] # Check if analysis directory exist otherwise create for each_dir in base_dirs: if os.path.exists(each_dir): print('directory already exists: {}'.format(each_dir)) else: print('creating new directory: {}'.format(each_dir)) os.makedirs(each_dir, exist_ok=True) # Load config params config_file = "config_exp_0.txt" d = {} float_params = ["learning_rate", "kappa", "epsilon_std"] str_params = ["analysis_name", "NN_architecture"] lst_params = ["num_batches"] with open(config_file) as f: for line in f: (name, val) = line.split() if name in float_params: d[name] = float(val) elif name in str_params: d[name] = str(val) elif name in lst_params: d[name] = ast.literal_eval(val) else: d[name] = int(val) # Load arguments normalized_data_file = os.path.join( os.path.abspath(os.path.join(os.getcwd(),"../..")), "data", "input", "train_set_normalized.pcl") # Read data normalized_data = pd.read_table( normalized_data_file, header=0, sep='\t', index_col=0).T print(normalized_data.shape) # Parameters learning_rate = d['learning_rate'] batch_size = d['batch_size'] epochs = d['epochs'] kappa = d['kappa'] intermediate_dim = d['intermediate_dim'] latent_dim = d['latent_dim'] epsilon_std = d['epsilon_std'] num_PCs = latent_dim train_architecture = "NN_{}_{}".format(intermediate_dim, latent_dim) # Create output directories output_dirs = [os.path.join(base_dir, "data", "encoded"), os.path.join(base_dir, "models"), os.path.join(base_dir, "output", "stats"), os.path.join(base_dir, "output", "viz") ] # Check if analysis directory exist otherwise create for each_dir in output_dirs: new_dir = os.path.join(each_dir, train_architecture) if os.path.exists(new_dir): print('directory already exists: {}'.format(new_dir)) else: print('creating new directory: {}'.format(new_dir)) os.makedirs(new_dir, exist_ok=True) # Train nonlinear (VAE) vae.tybalt_2layer_model(learning_rate, batch_size, epochs, kappa, intermediate_dim, latent_dim, epsilon_std, base_dir, train_architecture ) ```
github_jupyter
# To evaluta the network of classifer, we trained the model on GTSRB dataset Thanks for the great selfless tutorial: https://chsasank.github.io/keras-tutorial.html ``` import numpy as np from skimage import io, color, exposure, transform from sklearn.cross_validation import train_test_split import os import glob import h5py from time import time from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential, model_from_json from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, Merge from keras.layers import Convolution2D, MaxPooling2D, AveragePooling2D from keras.optimizers import SGD, Adam, RMSprop from keras.utils import np_utils from keras.callbacks import LearningRateScheduler, ModelCheckpoint, TensorBoard, ReduceLROnPlateau from keras.regularizers import l2 from keras import backend as K import numpy as np from matplotlib import pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading external modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 NUM_CLASSES = 43 IMG_SIZE = 48 ``` ## Function to preprocess the image: ``` def preprocess_img(img): # Histogram normalization in y # hsv = color.rgb2hsv(img) # hsv[:,:,2] = exposure.equalize_hist(hsv[:,:,2]) # img = color.hsv2rgb(hsv) # central scrop min_side = min(img.shape[:-1]) centre = img.shape[0]//2, img.shape[1]//2 img = img[centre[0]-min_side//2:centre[0]+min_side//2, centre[1]-min_side//2:centre[1]+min_side//2, :] # //代表 整数除法 # rescale to standard size img = transform.resize(img, (IMG_SIZE, IMG_SIZE)) # 注意对于tensorflow和theano通道顺序的不同 # roll color axis to axis 0 #img = np.rollaxis(img,-1) return img def get_class(img_path): return int(img_path.split('/')[-2]) ``` ## Preprocess all training images into a numpy array ``` try: with h5py.File('X.h5') as hf: X, Y = hf['imgs'][:], hf['labels'][:] print("Loaded images from X.h5") except (IOError,OSError, KeyError): print("Error in reading X.h5. Processing all images...") root_dir = '/home/jia/Desktop/traffic_sign_keras1.2/GTSRB_Train_images/Final_Training/Images/' imgs = [] labels = [] all_img_paths = glob.glob(os.path.join(root_dir, '*/*.ppm')) #打乱图片路径顺序 np.random.shuffle(all_img_paths) for img_path in all_img_paths: try: img = preprocess_img(io.imread(img_path)) # io.imread 读入的数据是 uint8 label = get_class(img_path) imgs.append(img) labels.append(label) if len(imgs)%1000 == 0: print("Processed {}/{}".format(len(imgs), len(all_img_paths))) except (IOError, OSError): print('missed', img_path) pass X = np.array(imgs, dtype='float32') Y = np.eye(NUM_CLASSES, dtype='uint8')[labels] # Y = ***[labels] 生成one-hot编码的方式 with h5py.File('X.h5','w') as hf: hf.create_dataset('imgs', data=X) hf.create_dataset('labels', data=Y) ``` # Load and Preprocess Test images ``` try: with h5py.File('X_test.h5') as hf: X_test, y_test = hf['imgs'][:], hf['labels'][:] print("Loaded images from X_test.h5") except (IOError,OSError, KeyError): print("Error in reading X.h5. Processing all images...") import pandas as pd test = pd.read_csv('/home/jia/Desktop/traffic_sign_keras1.2/GTSRB_Test_images/Final_Test/Images/GT-final_test.csv',sep=';') X_test = [] y_test = [] i = 0 for file_name, class_id in zip(list(test['Filename']), list(test['ClassId'])): img_path = os.path.join('/home/jia/Desktop/CS231N_AND_traffic_sign_keras1.2/GTSRB_Test_images/Final_Test/Images/',file_name) X_test.append(preprocess_img(io.imread(img_path))) y_test.append(class_id) X_test = np.array(X_test, dtype='float32') y_test = np.array(y_test, dtype='uint8') with h5py.File('X_test.h5','w') as hf: hf.create_dataset('imgs', data=X_test) hf.create_dataset('labels', data=y_test) index=np.zeros(1307, dtype='int') for i in range(1307): index[i]=i*30+np.random.randint(0,30) X_val = X[index] y_val = Y[index] # creat the training index1 index1=np.setdiff1d(np.array(range(39210)), index, assume_unique=True) X_train=X[index1] y_train=Y[index1] normalize = 0 # Normalize the data: subtract the mean image if normalize: mean_image = np.mean(X_train, axis=0) X_train -= mean_image X_val -= mean_image X_test -= mean_image print 'Train data shape: ', X_train.shape print 'Train labels shape: ', y_train.shape print 'Validation data shape: ', X_val.shape print 'Validation labels shape: ', y_val.shape print 'Test data shape: ', X_test.shape print 'Test labels shape: ', y_test.shape ``` # Show Traffic Sign Examples ``` # Visualize some examples from the dataset. # We show a few examples of training images from each class. classes = [8, 11, 14,16, 26, 33, 38] num_classes = len(classes) samples_per_class = 7 for y, cls in enumerate(classes): idxs = np.flatnonzero(y_test == cls) #返回扁平化数组之后非0元素的index, y是数字标签 idxs = np.random.choice(idxs, samples_per_class, replace=False) #replace=False 使得随即选取的没有重复的 for i, idx in enumerate(idxs): plt_idx = i * num_classes + y + 1 plt.subplot(samples_per_class, num_classes, plt_idx) img=X_test[idx].copy() plt.imshow(img) #X原始是float64,图像显示要变成uint8 plt.axis('off') #取消axis的显示 if i == 0: plt.title(cls) foo_fig = plt.gcf() # 'get current figure' #foo_fig.savefig('foo.eps', format='eps', dpi=1000) foo_fig.savefig('jiatong.png', format='png', dpi=300) plt.close() ``` # Initialization of Weights ``` from keras import backend as K import numpy as np def my_init(shape, name=None): value = np.random.random(shape) return K.variable(value, name=name) ``` # Define Keras model ``` # 注意keras使用tensorflow和thano不同后台, 数据输入的通道顺序不同哦 def cnn_model(): branch_0= Sequential() branch_1 = Sequential() model0 = Sequential() model = Sequential() # ********************************************** 48*48 model0.add(Convolution2D(32, 3, 3, border_mode='same', init='he_normal' , input_shape=(IMG_SIZE, IMG_SIZE, 3))) model0.add(BatchNormalization(epsilon=1e-06, axis=3)) model0.add(Activation('relu')) model0.add(Convolution2D(48, 7, 1, border_mode='same', init='he_normal')) model0.add(BatchNormalization(epsilon=1e-06, axis=3)) model0.add(Activation('relu')) model0.add(Convolution2D(48, 1, 7, border_mode='same', init='he_normal')) model0.add(BatchNormalization(epsilon=1e-06, axis=3)) model0.add(Activation('relu')) model0.add(MaxPooling2D(pool_size=(2, 2))) model0.add(Dropout(0.2)) # ****************************************** 24*24 branch_0.add(model0) branch_1.add(model0) branch_0.add(Convolution2D(64, 3, 1, border_mode='same', init='he_normal')) branch_0.add(BatchNormalization(epsilon=1e-06, axis=3)) branch_0.add(Activation('relu')) branch_0.add(Convolution2D(64, 1, 3, border_mode='same', init='he_normal')) branch_0.add(BatchNormalization(epsilon=1e-06, axis=3)) branch_0.add(Activation('relu')) branch_1.add(Convolution2D(64, 1, 7, border_mode='same', init='he_normal')) branch_1.add(BatchNormalization(epsilon=1e-06, axis=3)) branch_1.add(Activation('relu')) branch_1.add(Convolution2D(64, 7, 1, border_mode='same', init='he_normal')) branch_1.add(BatchNormalization(epsilon=1e-06, axis=3)) branch_1.add(Activation('relu')) model.add(Merge([branch_0, branch_1], mode='concat', concat_axis=-1)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) # ******************************************* 12*12 model.add(Convolution2D(128, 3, 3, border_mode='same', init='he_normal')) model.add(BatchNormalization(epsilon=1e-06, axis=3)) model.add(Activation('relu')) model.add(Convolution2D(256, 3, 3, border_mode='same', init='he_normal')) # 之前是256个滤波器 model.add(BatchNormalization(epsilon=1e-06, axis=3)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.3)) # *************************************** 6*6 model.add(Flatten()) model.add(Dense(256, init='he_normal')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(0.4)) model.add(Dense(NUM_CLASSES, activation='softmax', init='he_normal')) return model model = cnn_model() # let's train the model using SGD + momentum (how original). lr = 0.001 sgd = SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True) adm = Adam(lr=0.001, decay=1e-6) #之前没有设置decay model.compile(loss='categorical_crossentropy', optimizer=adm, metrics=['accuracy']) def lr_schedule(epoch): return lr * (0.1 ** int(epoch/10)) ``` # Plot and Save the model ``` from keras.utils.visualize_util import plot plot(model, to_file='model.png') ``` # Start Training ``` batch_size = 16 nb_epoch = 30 t1=time() history = model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, validation_data=(X_val, y_val), shuffle = True ) t2=time() print (t2-t1) ``` # Test on the Test Dataset ``` t1=time() y_pred = model.predict_classes(X_test) t2=time() acc = np.mean(y_pred==y_test) print("Test accuracy = {}".format(acc)) print (t2-t1) ``` # Plot the Training History ``` plt.figure(figsize=(6,5)) plt.plot(history.history['loss'], '-o') plt.plot(history.history['val_loss'], '-x') plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train_loss', 'val_loss'], loc='upper right') plt.show() plt.figure(figsize=(6,5)) plt.plot(history.history['acc'], '-o') plt.plot(history.history['val_acc'], '-x') plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train_acc', 'val_acc'], loc='lower right') plt.show() ``` # Load Test data ``` # 将类别标签转换成为 one-hot 编码, 如果需要的话 #from keras.utils.np_utils import to_categorical #y_test1 = to_categorical(y_test, 43) ``` # With Data augmentation ``` from sklearn.cross_validation import train_test_split datagen = ImageDataGenerator(featurewise_center=False, featurewise_std_normalization=False, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2, shear_range=0.1, rotation_range=10.,) datagen.fit(X_train) # # Reinstallise models # model = cnn_model() # # let's train the model using SGD + momentum (how original). # lr = 0.001 # sgd = SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True) # model.compile(loss='categorical_crossentropy', # optimizer=adm, # metrics=['accuracy']) # def lr_schedule(epoch): # return lr*(0.1**int(epoch/10)) ``` # Training based on the former model ``` nb_epoch = 200 history2=model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size), samples_per_epoch=X_train.shape[0], nb_epoch=nb_epoch, validation_data=(X_val, y_val), callbacks=[ReduceLROnPlateau('val_loss', factor=0.2, patience=20, verbose=1, mode='auto'), ModelCheckpoint('model_final.h5',save_best_only=True)] ) model.save('my_model_99.66.h5') t1=time() y_pred = model.predict_classes(X_test) t2=time() acc = np.mean(y_pred==y_test) print("Test accuracy = {}".format(acc)) print (t2-t1) model.summary() model.count_params() ```
github_jupyter
``` import requests from bs4 import BeautifulSoup import random import re import json import copy from collections import defaultdict import time from tqdm import tqdm import numpy as np import nltk from nltk.tokenize import word_tokenize from nltk.corpus import stopwords import torch import torch.nn as nn import torch.utils.data as data import torch.optim as optim import torch.nn.functional as F import pandas as pd #nltk.download('punkt') #nltk.download('stopwords') ####WIKIPEDIA SCRAPING##### response = requests.get('https://en.wikipedia.org/wiki/Machine_learning') html = response.text soup = BeautifulSoup(html, 'html.parser') stop_words = set(stopwords.words('english')) # get random wikipedia corpus alllinks = [] num_links = 0 max_links = 10 print("Scraping links...") pbar = tqdm(total = max_links) while(num_links < max_links): links = [] # Get all the links for link in soup.findAll('a', attrs={'href': re.compile("^/wiki/")}): links.append(link.get('href')) random.shuffle(links) alllinks.append(links[0]) soup = BeautifulSoup(requests.get( "http://en.wikipedia.org" + links[0]).text, 'html.parser') num_links += 1 pbar.update(1) pbar.close() alltext = "" print("Extracting text...") # put all words into text file for link in tqdm(alllinks): response = requests.get("http://en.wikipedia.org" + link) html = response.text soup = BeautifulSoup(html, 'html.parser') # extract paragraphs text_container = soup.find('div', {'class': 'mw-parser-output'}) zero_paragaph = {"title": "", "text": ""} if(text_container == None): continue current_paragraph = copy.deepcopy(zero_paragaph) page = {'paragraphs': []} for child in text_container.children: if child.name == "p": current_paragraph["text"] += child.text + "\n" elif child.name == "h2": page['paragraphs'].append(current_paragraph) current_paragraph = copy.deepcopy(zero_paragaph) current_paragraph["title"] = next(child.children).text page['paragraphs'] = list( filter(lambda x: x["text"] != "", page['paragraphs'])) for pg in page['paragraphs']: alltext += pg['text'] uncleaned_tokens = word_tokenize(alltext) print("Cleaning...") #clean up tokens stop_words = set(stopwords.words('english')) tokens = [] for tok in tqdm(uncleaned_tokens): tok = tok.lower() if(tok.isalnum() and not tok.isdigit()): tokens.append(tok) #### CORPUS #### class Corpus(torch.utils.data.Dataset): def __init__(self, tokens, ngrams=1): self.ngrams = ngrams self.tokens = tokens #list of words self.totlen = len(tokens) # vocab will be defined over kmers self.create_vocab() def create_vocab(self): self.vocab_freq = defaultdict(float) st_time = time.time() for word in self.tokens: # compute freq of each word if word in self.vocab_freq: self.vocab_freq[word] += 1 else: self.vocab_freq[word] = 1 self.vocab = sorted(self.vocab_freq.keys()) # create forward and reverse index for all the words in vocab self.word_to_idx = defaultdict(lambda: 0) self.idx_to_word = defaultdict(lambda: 0) for idx, w in enumerate(self.vocab): self.word_to_idx[w] = idx self.idx_to_word[idx] = w self.vocab_prob = np.array([self.vocab_freq[k] for k in self.vocab]) self.vocab_freq_scaler = 0.73 total_freq = float(self.vocab_prob.sum()) self.vocab_prob = self.vocab_prob / total_freq self.vocab_csum = np.cumsum(self.vocab_prob) en_time = time.time() print("corpus construct time (seconds):", en_time - st_time, "num tokens:", total_freq) def __len__(self): return self.totlen def __getitem__(self, index): if(index >= len(self.tokens) - 15): return ( torch.tensor([self.word_to_idx[word] for word in self.tokens[index:index+10]]), torch.tensor([self.word_to_idx[word] for word in self.tokens[index:index+10]]), ) else: return ( torch.tensor([self.word_to_idx[word] for word in self.tokens[index:index+10]]), torch.tensor([self.word_to_idx[word] for word in self.tokens[index+1:index+11]]), ) #### POS/NEG TARGET CONTEXT CREATION #### class NegSampler: '''generate a block of negative samples from the cumsum array (Cumulative Distribution Function)''' def __init__(self, csum_ary): self.csum_ary = csum_ary self.time = 0. def get_neg_words(self, num_words): '''get num_words negative words sampled from cumsum array''' st_time = time.time() nprobs = np.random.random(num_words) neg_words = np.searchsorted(self.csum_ary, nprobs) en_time = time.time() self.time += en_time-st_time return neg_words class PosNegSampler(torch.utils.data.IterableDataset): '''This class creates a block of positive and negative pairs for word2vec training The iterable will return a numpy array of target, context and label triples''' def __init__(self, C, window_size, neg_samples, block_size): super(PosNegSampler, self).__init__() self.window_size = window_size self.neg_samples = neg_samples self.time = 0. self.block_sz = block_size self.C = C self.negsampler = NegSampler(self.C.vocab_csum) def context_data(self, block_sz): '''generate center word, context word pairs ''' T = [] C = [] for i, word in enumerate(self.C.tokens): #get window of words and create target context pairs start_idx = max(0, i - self.window_size) end_idx = min(len(self.C.vocab), i + self.window_size + 1) for j in range(start_idx, end_idx): if i != j: T.append(self.C.word_to_idx[word]) C.append(self.C.word_to_idx[self.C.tokens[j]]) # return a block of T, C if len(T) >= block_sz: yield (T, C) T, C = [], [] # return any remining elements yield (T, C) def __iter__(self): '''return one pos word and neq_samples neg words and the labels use context_data to retrieve a block ''' st_time = time.time() for i, (T, C) in enumerate(self.context_data(self.block_sz)): Tnp = np.array(T) Cnp = np.array(C) L = np.ones(len(T)) yield (Tnp, Cnp, L) L = np.zeros(len(T)) N = self.negsampler.get_neg_words( len(T)) Nnp = np.array(N) yield (Tnp, Nnp, L) en_time = time.time() self.time += en_time-st_time class Word2Vec(nn.Module): '''The word2vec model to train the word embeddings''' def __init__(self, embedding_size, vocab_size): super(Word2Vec, self).__init__() self.embedding_size = embedding_size self.T = nn.Embedding(vocab_size, embedding_size) self.C = nn.Embedding(vocab_size, embedding_size) def forward(self, target_word, context_word, label): t = self.T(target_word) c = self.C(context_word) out = torch.sum(t * c, dim=1) return out def save_embeddings(self, file_name, idx_to_word): # average the T and C matrices W = (net.T.weight.cpu().data.numpy() + net.C.weight.cpu().data.numpy())/2. with open(file_name, "w") as f: f.write("%d %d\n" % (len(idx_to_word), self.embedding_size)) for wid, w in idx_to_word.items(): e = ' '.join(map(lambda x: str(x), W[wid])) f.write("%s %s\n" % (w, e)) if torch.cuda.is_available(): device = "cuda" print("using device", torch.cuda.get_device_name(device)) else: device = "cpu" #scrape and read c = Corpus(tokens) window_size = 30 neg_samples = 60 block_size = 1024 PNS = PosNegSampler(c, window_size, neg_samples, block_size) V = len(PNS.C.vocab) print("vocab, device: ", V, device) training_generator = data.DataLoader( PNS, batch_size = 1 ) #model parameters word_embedding_dim = 200 epochs = 200 learning_rate = 0.01 # create the NN model net = Word2Vec(embedding_size=word_embedding_dim, vocab_size=V) net.to(device) net.train() net loss_function = nn.BCEWithLogitsLoss() optimizer = optim.Adam(net.parameters(), lr=learning_rate) start_t = time.time() for e in range(epochs): running_loss = 0 for bidx, (targets, contexts, labels) in enumerate( tqdm(training_generator)): targets = targets.flatten().to(device) contexts = contexts.flatten().to(device) labels = labels.flatten().to(device) net.zero_grad() preds = net(targets, contexts, labels) loss = loss_function(preds, labels) loss.backward() optimizer.step() running_loss += loss.item() print("epoch", e, running_loss, bidx, running_loss / (bidx + 1)) end_t = time.time() print("finished in time", end_t - start_t) for mode in ['avg', 'target', 'context']: output_file = "./word_embeds.vec" net.save_embeddings(output_file, PNS.C.idx_to_word) class TextGenerate(nn.Module): def __init__(self, C, word_embed_file, lstm_dim, embedding_dim, num_layers): super(TextGenerate, self).__init__() self.lstm_dim = lstm_dim self.embedding_dim = embedding_dim self.num_layers = num_layers n_vocab = len(C) embeddings_index = {} with open(word_embed_file) as f: for line in f: values = line.split() word = values[0] coefs = np.asarray(values[1:], dtype='float32') embeddings_index[word] = coefs embeddings_matrix = np.zeros((n_vocab, embedding_dim)) for word, i in C.word_to_idx.items(): embedding_vector = embeddings_index.get(word) if(embedding_vector is not None): embeddings_matrix[i] = embedding_vector print(embeddings_matrix.shape) self.embedding = nn.Embedding(num_embeddings=n_vocab, embedding_dim=self.embedding_dim, _weight=torch.Tensor(embeddings_matrix)) self.embedding.requires_grad = False self.lstm = nn.LSTM(input_size = self.lstm_dim, hidden_size = self.lstm_dim, num_layers = self.num_layers, dropout=0.2) self.fc = nn.Linear(self.lstm_dim, n_vocab) self.relu = nn.ReLU() self.fc2 = nn.Linear(n_vocab, n_vocab) def forward(self, x, prev_state): embed = self.embedding(x) output, state = self.lstm(embed, prev_state) logits = self.fc(output) logits = self.relu(logits) logits = self.fc2(logits) return logits, state def init_state(self, sequence_length): return (torch.zeros(self.num_layers, sequence_length, self.lstm_dim), torch.zeros(self.num_layers, sequence_length, self.lstm_dim)) training_generator = data.DataLoader( c, batch_size = 1 ) #model parameters word_embedding_dim = 200 epochs = 5 learning_rate = 0.01 # create the NN model net2 = TextGenerate(c, './word_embeds.vec', word_embedding_dim, word_embedding_dim, 6) net2.to(device) loss_function = nn.CrossEntropyLoss() optimizer = optim.Adam(net2.parameters(), lr=learning_rate) net2.train() net2 start_t = time.time() for e in range(epochs): running_loss = 0 state_h, state_c = net2.init_state(10) state_h = state_h.to(device) state_c = state_c.to(device) for bidx, (x, y) in enumerate(tqdm(training_generator)): net2.zero_grad() x = x.to(device) y = y.to(device) if(x.shape[1] != 10 or y.shape[1] != 10): continue y_pred, (state_h, state_c) = net2(x, (state_h, state_c)) loss = loss_function(y_pred.transpose(1, 2), y) state_h = state_h.detach() state_c = state_c.detach() loss.backward() optimizer.step() running_loss += loss.item() print("epoch", e, running_loss, bidx, running_loss / (bidx + 1)) end_t = time.time() print("finished in time", end_t - start_t) def getResponse(question, num_words_response): text = question next_words = num_words_response words = text.split(' ') output = [] state_h, state_c = net2.init_state(len(words)) state_h = state_h.to(device) state_c = state_c.to(device) last_word = '' with torch.no_grad(): for i in range(0, next_words): x = torch.tensor([[c.word_to_idx[w] for w in words[i:]]]) x = x.to(device) y_pred, (state_h, state_c) = net2(x, (state_h, state_c)) last_word_logits = y_pred[0][-1] p = torch.nn.functional.softmax(last_word_logits, dim=0).cpu().numpy() word_index = np.random.choice(len(last_word_logits), p=p) last_word = c.idx_to_word[word_index] while(c.idx_to_word[word_index] == last_word): word_index = np.random.choice(len(last_word_logits), p=p) new_last_word = c.idx_to_word[word_index] new_last_word = str(new_last_word) if(len(new_last_word) == 1 and new_last_word != 'a'): continue if(new_last_word != last_word): last_word = new_last_word break words.append(last_word) output.append(last_word) return output #### MACHINE INTERROGATER TURING MACHINE MODEL #### ## GENERATE POS/NEG SAMPLES ## #POS 1 = Machine generated response #NEG 0 = Human generated response class Responses(torch.utils.data.IterableDataset): def __init__(self, C, _csv, _chunk_size, pad_sz): self.csv = _csv self.C = C self.chunk_size = _chunk_size self.pad_sz = pad_sz def generateMachineResponse(self, question, response_size=15): return getResponse(question, response_size) def __len__(self): return 38269/self.chunk_size def __iter__(self): self.bidx = 0 self.data = pd.read_csv(self.csv, chunksize=self.chunk_size) return self def padout(self, questions, answers): padded_questions = [] padded_answers = [] for question in questions: question = np.array(question) padright = self.pad_sz - len(question) if(padright < 0): padded_questions.append(question[0:self.pad_sz]) else: padded_questions.append(np.pad(question, (0, padright), 'constant', constant_values=(0))) for answer in answers: answer = np.array(answer) padright = self.pad_sz - len(answer) if(padright < 0): padded_answers.append(answer[0:self.pad_sz]) else: padded_answers.append(np.pad(answer, (0, padright), 'constant', constant_values=(0))) return padded_questions, padded_answers def __next__(self): chunk_to_process = next(iter(self.data)) questions = [] answers = [] labels = [] #get text for question in chunk_to_process['Question'].tolist(): questions.append([self.C.word_to_idx[x] for x in question.split(' ')]) for answer in chunk_to_process['Answer'].tolist(): answer = str(answer) answers.append([self.C.word_to_idx[x] for x in answer.split(' ')]) labels.append([1 for x in range(self.chunk_size)]) for question in chunk_to_process['Question'].tolist(): questions.append([self.C.word_to_idx[x] for x in question.split(' ')]) answers.append([self.C.word_to_idx[x] for x in self.generateMachineResponse(question)]) labels.append([0 for x in range(self.chunk_size)]) questions, answers = self.padout(questions, answers) return torch.cat((torch.Tensor(np.array(questions)), torch.Tensor(np.array(answers))), axis=1), torch.Tensor(labels) class MachineInterrogator(nn.Module): def __init__(self, indim, hiddim, num_layers, dpout=0.15, bidir=False): super(MachineInterrogator, self).__init__() self.ind = indim self.hid = hiddim self.num_layers = num_layers self.dropout = dpout self.bidir = bidir self.lstm = nn.LSTM(self.ind, self.hid, num_layers, dropout=self.dropout, bidirectional=bidir, batch_first=True) self.lim = nn.Linear(in_features=self.hid, out_features=1, bias=True) def forward(self, x): h0 = torch.zeros(self.num_layers, x.size(0), self.hid).to(device) c0 = torch.zeros(self.num_layers, x.size(0), self.hid).to(device) x, _= self.lstm(x, (h0, c0)) x = self.lim(x) return x r = Responses(c, './half_jokes.csv', 1, 20) batch_size = 100 dataset_size = 10000 training_generator = data.DataLoader( r, batch_size = batch_size ) #model parameters input_dim = 40 epochs = 5 learning_rate = 0.01 # create the NN model net3 = MachineInterrogator(input_dim, 128, 2) net3 = net3.to(device) loss_function = nn.BCEWithLogitsLoss() optimizer = optim.Adam(net3.parameters(), lr=learning_rate) net3.train() start_t = time.time() for e in range(epochs): correct = 0 total = 0 for bidx, (feats, labels) in enumerate(tqdm(training_generator, total=dataset_size//batch_size)): net3.zero_grad() feats = feats.to(device) labels = labels.to(device) preds = net3(feats) loss = loss_function(preds, labels) preds = torch.round(torch.sigmoid(preds)) loss.backward() optimizer.step() correct += (preds == labels).sum().item() total += len(preds) print("epoch", e, " acc: ", correct / (total * 2)) end_t = time.time() print("finished in time", end_t - start_t) ```
github_jupyter
### Import Numpy & Math libraries ``` import numpy as np import math ``` ### Assuming a 200 Layer Neural Network #### Let's see what happens to the Mean and Std-Dev at the end of 200 layer computations when we randomly initialize from a standard distribution (Using np.random.randn) ``` x = np.random.randn(256) for i in range(200): a = np.random.randn(256, 256) x = np.matmul(a, x) print('Mean : {}'.format(x.mean())) print('Std-Dev : {}'.format(x.std())) ``` #### Above results show that the activations blew and the mean & std-dev are too high. Let's see after which layer this actually happened... ``` x = np.random.randn(256) for i in range(200): a = np.random.randn(256, 256) x = np.matmul(a, x) if np.isinf(x.std()): break print('Layer : {}'.format(i)) ``` #### At Layer 127, the std-dev went close to inf!!! #### Let's see if we initialize it too low, having a std-dev of 0.01 ``` x = np.random.randn(256) for i in range(200): a = np.random.randn(256, 256) * 0.01 x = np.matmul(a, x) print('Mean : {}'.format(x.mean())) print('Std-Dev : {}'.format(x.std())) ``` #### As we can see, the values are too low and have vanished completely!!! #### The matrix product of our inputs x and weight matrix a that we initialized from a standard normal distribution will, on average, have a standard deviation very close to the square root of the number of input connections i.e √256 ``` for i in range(200): x = np.random.randn(256) a = np.random.randn(256, 256) y = np.matmul(a, x) print('Mean : {}'.format(y.mean())) print('Std-Dev : {}'.format(y.std())) print('Sqrt of 256 : {}'.format(math.sqrt(256))) ``` #### We see a Std-Dev of 0 when we initialize with 1 as parameter for the standard normal distribution ``` for i in range(200): x = np.random.randn(1) a = np.random.randn(1) y = np.matmul(a, x) print('Mean : {}'.format(y.mean())) print('Std-Dev : {}'.format(y.std())) ``` #### If we first scale the weight matrix a by dividing all its randomly chosen values by √256, the element-wise multiplication that fills in one element of the outputs y would now, on average, have a variance of only 1/√256. ``` for i in range(200): x = np.random.randn(256) a = np.random.randn(256, 256) * math.sqrt(1./256) y = np.matmul(a, x) print('Mean : {}'.format(y.mean())) print('Std-Dev : {}\n'.format(y.std())) ``` #### Let's scale the weights by 1/√n, where n is the number of network input connections at a layer (256 in our case) ``` x = np.random.randn(256) for i in range(200): a = np.random.randn(256, 256) * math.sqrt(1./256) x = np.matmul(a, x) print('Mean : {}'.format(x.mean())) print('Std-Dev : {}\n'.format(x.std())) ``` #### Voila!!! Certainly did not explode at the end of 200 layers also!!! #### Let's see what happens if we scale the weights and have a network size of 1000 ``` x = np.random.randn(256) for i in range(1000): a = np.random.randn(256, 256) * math.sqrt(1./256) x = np.matmul(a, x) print('Mean : {}'.format(x.mean())) print('Std-Dev : {}\n'.format(x.std())) ``` #### Certainly impressive results, they are not yet blown even after 1000 layer ### Let's attach activation functions and look at the results ``` def tanh(x): return np.tanh(x) def sigmoid(x): s = 1.0/(1 + np.exp(-x)) return s def ReLU(x): r = np.maximum(0, x) return r ``` #### Tanh Activation + Standard Normal Distribution ``` x = np.random.randn(256) for i in range(200): a = np.random.randn(256, 256) * math.sqrt(1./256) x = tanh(np.matmul(a, x)) print('Mean : {}'.format(x.mean())) print('Std-Dev : {}\n'.format(x.std())) ``` #### Sigmoid Activation + Standard Normal Distribution ``` x = np.random.randn(256) for i in range(200): a = np.random.randn(256, 256) * math.sqrt(1./256) x = sigmoid(np.matmul(a, x)) print('Mean : {}'.format(x.mean())) print('Std-Dev : {}\n'.format(x.std())) ``` #### Conceptually, it makes sense that when using activation functions that are symmetric about zero and have outputs inside [-1,1], such as softsign and tanh, we’d want the activation outputs of each layer to have a mean of 0 and a standard deviation around 1, on average. Let's see if applied to ReLU #### ReLU + Standard Normal Distribution fails drastically ``` x = np.random.randn(256) for i in range(200): a = np.random.randn(256, 256) * math.sqrt(1./256) x = ReLU(np.matmul(a, x)) print('Mean : {}'.format(x.mean())) print('Std-Dev : {}\n'.format(x.std())) ``` #### In ReLU the Std-dev is quite close to the square root of the number of input connections, divided by the square root of two, or √256/√2 here ``` for i in range(200): x = np.random.randn(256) a = np.random.randn(256, 256) y = ReLU(np.matmul(a, x)) print('Mean : {}'.format(y.mean())) print('Std-Dev : {}\n'.format(y.std())) math.sqrt(256/2) ``` #### Scaling the weights by this number i.e. 2 will help our cause. One more reason of choosing two is increase the influence by 2 as half of the weights are lost when f(x)<0 ``` x = np.random.randn(256) for i in range(200): a = np.random.randn(256, 256) * math.sqrt(2./256) x = ReLU(np.matmul(a, x)) print('Mean : {}'.format(x.mean())) print('Std-Dev : {}\n'.format(x.std())) ``` #### Tanh + Uniform Normal Distribution fails drastically ``` x = np.random.randn(256) for i in range(200): a = np.random.uniform(-1, 1, (256, 256)) * math.sqrt(1./256) x = tanh(np.matmul(a, x)) print('Mean : {}'.format(x.mean())) print('Std-Dev : {}\n'.format(x.std())) ``` #### Xavier Initialization + Tanh works well ``` x = np.random.randn(256) for i in range(200): a = np.random.uniform(-1, 1, (256, 256)) * math.sqrt(6./(256 + 256)) x = tanh(np.matmul(a, x)) print('Mean : {}'.format(x.mean())) print('Std-Dev : {}\n'.format(x.std())) ``` #### Xavier Initialization + ReLU fails! ``` x = np.random.randn(256) for i in range(200): a = np.random.uniform(-1, 1, (256, 256)) * math.sqrt(6./(256 + 256)) x = ReLU(np.matmul(a, x)) print('Mean : {}'.format(x.mean())) print('Std-Dev : {}\n'.format(x.std())) ``` ## Summary #### Xavier Init with ReLU fails #### Xavier Init and Std Normal Distribution with all zero-centered activations will work well (Tanh, Sigmoid etc) #### Std Normal Distribution scaled by sqrt(2/n) with ReLU works well where n is no. of neurons
github_jupyter
# 準ニュートン法 引き続き、非線形最適化の手法について学習していきましょう。 今回は、ニュートン法においてその計算が必要であったヘッセ行列(の逆行列) の近似を、計算の過程で並行して求めることで、ニュートン法のような高速な収束性を、より少ない計算量で行うことのできる、**準ニュートン法**について扱います。 準ニュートン法に於けるヘッセ行列(の逆行列) の近似公式はいくつも知られていますが、ここではAnaconda に標準で添付されているSciPy において提供される、BFGS(Broyden-Fletcher-Goldfarb-Shanno) 公式による準ニュートン法について、その生成点列の挙動を確認しましょう。 ``` import numpy as np from scipy.optimize import line_search def sdm(xk, iter=15): sequence = [xk] for k in range(iter): dk = -jac(xk) alpha = line_search(fun, jac, xk, dk)[0] xk = xk + alpha * dk sequence.append(xk) return np.array(sequence) ``` ## 目的関数 今回も、前々回、前回と扱ってきた、 \begin{align*} f(x_0, x_1):=\sin\left(\frac{1}{2}x_0^2-\frac{1}{4}x_1^2+3\right)\cos(2x_0+1-e^{x_1}) \end{align*} を目的関数として考えましょう。 この関数は、前回確認した通り、下記のような複雑な形状をしていました。 ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D fig = plt.figure(figsize=(8, 6), dpi=80) ax = plt.axes(projection='3d') X, Y = np.meshgrid(np.linspace(-1, 1, 100), np.linspace(-1, 1, 100)) Z = np.array([[np.sin((x ** 2) / 2 - (y ** 2 ) / 4 + 3) * np.cos(2 * x + 1 - np.exp(y)) for x, y in zip(vx, vy)] for vx, vy in zip(X, Y)]) ax.plot_surface(X, Y, Z, cmap='plasma') ``` この目的関数`fun` およびその勾配`jac` は、それぞれ下記のようにSymPy を用いてPython 関数として導出することができました。 後にNewton 法とも結果の比較を行うために、ヘッセ行列`hess` も同時に求めておきましょう。 ``` import numpy as np from sympy import symbols, sin, cos, exp, diff _x, _y = symbols('x, y') F = sin((_x ** 2) / 2 - (_y ** 2) / 4 + 3) * cos(2 * _x + 1 - exp(_y)) FdX, FdY = diff(F, _x), diff(F, _y) FdXdX, FdXdY = diff(FdX, _x), diff(FdX, _y) FdYdX, FdYdY = diff(FdY, _x), diff(FdY, _y) def fun(x): p = {_x: x[0], _y: x[1]} return float(F.subs(p)) def jac(x): p = {_x: x[0], _y: x[1]} return np.array([FdX.subs(p), FdY.subs(p)], dtype=np.float) def hess(x): p = {_x: x[0], _y: x[1]} return np.array([[FdXdX.subs(p), FdXdY.subs(p)], [FdYdX.subs(p), FdYdY.subs(p)]], dtype=np.float) ``` ## BFGS 公式による準ニュートン法 それでは早速、実験をしてみましょう。 BFGS 公式による準ニュートン法は、下記の関数により実行することができます。 ```python from scipy.optimize import minimize x = minimize(fun, x0, method='bfgs', jac=jac).x ``` 前回の信頼領域法と同様に、``scipy.optimize`` モジュールの``minimize`` 関数より、最適化アルゴリズムを呼び出すことができます。 信頼領域法と異なり、ヘッセ行列を指定する必要はなく、目的関数``fun``、初期点``x0`` および勾配``jac`` を与えることで実行できます。 それでは早速、生成点列の挙動を確認してみましょう。 初期点は前回までと同様に、$x^{(0)}:=(-0.3, 0.2)^\top$ を与えるものとします。 ここでは、比較のため、最急降下法とニュートン法による生成点列も同時にプロットしてみましょう。 ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt from scipy.optimize import minimize, line_search def newton(xk, iter=15): sequence = [xk] return np.array(sequence) X, Y = np.meshgrid(np.linspace(-1, 1, 100), np.linspace(-1, 1, 100)) plt.ylim(-1, 1) plt.xlim(-1, 1) Z = np.array([[np.sin((x ** 2) / 2 - (y ** 2 ) / 4 + 3) * np.cos(2 * x + 1 - np.exp(y)) for x, y in zip(vx, vy)] for vx, vy in zip(X, Y)]) plt.contour(X, Y, Z, cmap='plasma', levels=np.linspace(np.min(Z), np.max(Z), 15)) sequence = [np.array([-0.3, 0.2])] for k in range(15): dk = -jac(sequence[-1]) alpha = line_search(fun, jac, sequence[-1], dk)[0] sequence.append(sequence[-1] + alpha * dk) sequence = np.array(sequence) plt.plot(sequence[:, 0], sequence[:, 1], marker='o', label='sdm') sequence = [np.array([-0.3, 0.2])] for k in range(15): sequence.append(sequence[-1] + np.linalg.solve(hess(sequence[-1]), -jac(sequence[-1]))) sequence = np.array(sequence) plt.plot(sequence[:, 0], sequence[:, 1], marker='v', label='newton') sequence = [np.array([-0.3, 0.2])] minimize(fun, sequence[0], method='bfgs', jac=jac, callback=lambda xk: sequence.append(xk)) sequence = np.array(sequence) plt.plot(sequence[:, 0], sequence[:, 1], marker='^', label='bfgs') plt.legend() ``` 準ニュートン法による生成点列は、多少の振動こそ起きていますが、最急降下法(sdm) のような極端な振動はなく、しかしニュートン法(newton) のように発散はせずに、比較的効率的に最適解へ収束していることが見て取れます。 ## 参考文献 * 福島雅夫著『新版 数理計画入門』(朝倉書店; 2011) * 矢部博著『工学基礎 最適化とその応用』(数理工学社; 2006) * J. Nocedal, S. J. Wright: Numerical Optimization (2nd ed.), Springer (2006) * [Gradient descent - Wikipedia](https://en.wikipedia.org/wiki/Gradient_descent) (目的関数はこのページのものを使用しました。)
github_jupyter