text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib.patches import Ellipse from sklearn.preprocessing import StandardScaler from sklearn.mixture import GaussianMixture from sklearn.decomposition import PCA from sklearn import datasets, metrics heart_disease = pd.read_excel('Processed_Cleveland.xlsx') heart_disease # Define the features and the outcome X = heart_disease.iloc[:, :13] y = heart_disease.iloc[:, 13] # Replace missing values (marked by ?) with a 0 X = X.replace(to_replace='?', value=0) # Binarize y so that 1 means heart disease diagnosis and 0 means no diagnosis y = np.where(y > 0, 0, 1) scaler = StandardScaler() X_std = scaler.fit_transform(X) gmm_cluster = GaussianMixture(n_components=2, random_state=123) clusters = gmm_cluster.fit_predict(X_std) pca = PCA(n_components=2).fit_transform(X_std) plt.figure(figsize=(10,5)) colours = 'rbg' for i in range(pca.shape[0]): plt.text(pca[i, 0], pca[i, 1], str(clusters[i]), color=colours[y[i]], fontdict={'weight': 'bold', 'size': 50} ) plt.xticks([]) plt.yticks([]) plt.axis('off') plt.show() print("GMM çözümünün Düzeltilmiş Rand Endeksi (ARI): {:.3f}" .format(metrics.adjusted_rand_score(y, clusters))) print("GMM çözümünün siluet skoru : {:.3f}" .format(metrics.silhouette_score(X_std, clusters, metric='euclidean'))) ``` Recall that for 2 clusters in the K-means algo we had; * ARI score: 0.7453081340344547 * Silhouette Score: 0.1757847117726187 And for average linkage in Hierarchical clustering; * Yığınsal Kümeleme Sonuçlarının Ayarlanmış Rand Endeksi: 0.29404901333534655 * The silhoutte score of the Agglomerative Clustering solution: 0.23325509557943136 Hence we got our results. # Different Covariances ``` gmm_cluster = GaussianMixture(n_components=2, random_state=123, covariance_type='tied') clusters = gmm_cluster.fit_predict(X_std) print("GMM çözümünün Düzeltilmiş Rand Endeksi (ARI): {:.3f}" .format(metrics.adjusted_rand_score(y, clusters))) print("GMM çözümünün siluet skoru : {:.3f}" .format(metrics.silhouette_score(X_std, clusters, metric='euclidean'))) gmm_cluster = GaussianMixture(n_components=2, random_state=123, covariance_type='diag') clusters = gmm_cluster.fit_predict(X_std) print("GMM çözümünün Düzeltilmiş Rand Endeksi (ARI): {:.3f}" .format(metrics.adjusted_rand_score(y, clusters))) print("GMM çözümünün siluet skoru : {:.3f}" .format(metrics.silhouette_score(X_std, clusters, metric='euclidean'))) gmm_cluster = GaussianMixture(n_components=2, random_state=123, covariance_type='spherical' ) clusters = gmm_cluster.fit_predict(X_std) print("GMM çözümünün Düzeltilmiş Rand Endeksi (ARI): {:.3f}" .format(metrics.adjusted_rand_score(y, clusters))) print("GMM çözümünün siluet skoru : {:.3f}" .format(metrics.silhouette_score(X_std, clusters, metric='euclidean'))) ``` Spherical covariance has the greater ARI score. The difference in silhouette scores are not that significant.
github_jupyter
``` # UN_Geosheme_Subregion = ['Australia and New Zealand','Caribbean','Central America','Central Asia','Eastern Africa','Eastern Asia','Eastern Europe','Melanesia','Micronesia','Middle Africa','Northern Africa','Northern America','Northern Europe','Polynesia','South America','South-Eastern Asia','Southern Africa','Southern Asia','Southern Europe','Western Africa','Western Asia','Western Europe'] import os import pandas as pd from pathlib import Path from functools import reduce DATASET_FOLDER = '../../datasets/tempetes/Temperatures_Projections' arr = os.listdir(DATASET_FOLDER) print(arr) DATASET_FOLDER_AVG_RCP26 = '../../datasets/tempetes/Temperatures_Projections/AverageTemp/RCP26' arr = os.listdir(DATASET_FOLDER_AVG_RCP26) print(arr) len(arr) ``` # Concatenate avg, min, max temperatures into 4 distinct csv, each one for a scenario ### RCP26 ### Average monthly temperatures ``` def get_concatenated_projections_avg(DATASET_FOLDER): dir = Path(DATASET_FOLDER) dfs_list = [] for f in dir.glob("*.csv"): df = pd.read_csv(f) df["ISO"] = str(f).split("_")[-1].split(".")[0] dfs_list.append(df) df_final = pd.concat(dfs_list) df_final = df_final.rename(columns={"Monthly Temperature - (Celsius)":"avg_monthly_temp"}) return df_final df_AVG_RCP26 = get_concatenated_projections_avg('../../datasets/tempetes/Temperatures_Projections/AverageTemp/RCP26') df_AVG_RCP26.head() df_AVG_RCP26.shape df_AVG_RCP45 = get_concatenated_projections_avg('../../datasets/tempetes/Temperatures_Projections/AverageTemp/RCP45') df_AVG_RCP45.head() df_AVG_RCP60 = get_concatenated_projections_avg('../../datasets/tempetes/Temperatures_Projections/AverageTemp/RCP60') df_AVG_RCP60.head() df_AVG_RCP85 = get_concatenated_projections_avg('../../datasets/tempetes/Temperatures_Projections/AverageTemp/RCP85') df_AVG_RCP85.head() ``` ### Max temperatures ``` def get_concatenated_projections_max(DATASET_FOLDER): dir = Path(DATASET_FOLDER) dfs_list = [] for f in dir.glob("*.csv"): df = pd.read_csv(f) df["ISO"] = str(f).split("_")[-1].split(".")[0] dfs_list.append(df) df_final = pd.concat(dfs_list) df_final = df_final.rename(columns={"Monthly Max-Temperature - (Celsius)":"max_monthly_temp"}) return df_final df_MAX_RCP26 = get_concatenated_projections_max('../../datasets/tempetes/Temperatures_Projections/TempMax/RCP26') df_MAX_RCP26.head() df_MAX_RCP45 = get_concatenated_projections_max('../../datasets/tempetes/Temperatures_Projections/TempMax/RCP45') df_MAX_RCP45.head() df_MAX_RCP60 = get_concatenated_projections_max('../../datasets/tempetes/Temperatures_Projections/TempMax/RCP60') df_MAX_RCP60.head() df_MAX_RCP85 = get_concatenated_projections_max('../../datasets/tempetes/Temperatures_Projections/TempMax/RCP85') df_MAX_RCP85.head() ``` ### Min temperatures ``` def get_concatenated_projections_min(DATASET_FOLDER): dir = Path(DATASET_FOLDER) dfs_list = [] for f in dir.glob("*.csv"): df = pd.read_csv(f) df["ISO"] = str(f).split("_")[-1].split(".")[0] dfs_list.append(df) df_final = pd.concat(dfs_list) df_final = df_final.rename(columns={"Monthly Min-Temperature - (Celsius)":"min_monthly_temp"}) return df_final df_MIN_RCP26 = get_concatenated_projections_min('../../datasets/tempetes/Temperatures_Projections/TempMin/RCP26') df_MIN_RCP26.head() df_MIN_RCP45 = get_concatenated_projections_min('../../datasets/tempetes/Temperatures_Projections/TempMin/RCP45') df_MIN_RCP45.head() df_MIN_RCP60 = get_concatenated_projections_min('../../datasets/tempetes/Temperatures_Projections/TempMin/RCP60') df_MIN_RCP60.head() df_MIN_RCP85 = get_concatenated_projections_min('../../datasets/tempetes/Temperatures_Projections/TempMin/RCP85') df_MIN_RCP85.head() ``` # Test sets prep ### RCP26 ``` dfs_list = [df_AVG_RCP26, df_MIN_RCP26, df_MAX_RCP26] temp_proj_rcp26 = reduce(lambda x, y: pd.merge(x, y, on = ["ISO", "Month", "Year"]), dfs_list) temp_proj_rcp26.head() ``` ### RCP45 ``` dfs_list = [df_AVG_RCP45, df_MIN_RCP45, df_MAX_RCP45] temp_proj_rcp45 = reduce(lambda x, y: pd.merge(x, y, on = ["ISO", "Month", "Year"]), dfs_list) temp_proj_rcp45.head() ``` ### RCP60 ``` dfs_list = [df_AVG_RCP60, df_MIN_RCP60, df_MAX_RCP60] temp_proj_rcp60 = reduce(lambda x, y: pd.merge(x, y, on = ["ISO", "Month", "Year"]), dfs_list) temp_proj_rcp60.head() ``` ### RCP85 ``` dfs_list = [df_AVG_RCP85, df_MIN_RCP85, df_MAX_RCP85] temp_proj_rcp85 = reduce(lambda x, y: pd.merge(x, y, on = ["ISO", "Month", "Year"]), dfs_list) temp_proj_rcp85.head() ``` # Write test sets ``` temp_proj_rcp26.to_csv('../../datasets/tempetes/test_sets' + '/' + 'monthly_temp_proj_rcp26.csv') temp_proj_rcp45.to_csv('../../datasets/tempetes/test_sets' + '/' + 'monthly_temp_proj_rcp45.csv') temp_proj_rcp60.to_csv('../../datasets/tempetes/test_sets' + '/' + 'monthly_temp_proj_rcp60.csv') temp_proj_rcp85.to_csv('../../datasets/tempetes/test_sets' + '/' + 'monthly_temp_proj_rcp85.csv') ```
github_jupyter
# Challenge 2 - Padlock Secret **Difficulty level**: 3 - beginner One approach to find a password or a padlock secret combination is to use a brute force attack. Of course, for a small combination it is not a big deal, but for complex combination it could be almost impossible using the current computation power. Here, first, we will check how to create a simple brute-force attack to resolve a padlock with 3 numbers combination. When you understand how to do it, you will be ready for the challenge! ## Brute-force attack example The follow example simulates a brute-force "attack" that tries to resolve the padlock secret. ``` import itertools import random import time lock_secret = ( random.randint(0, 9), random.randint(0, 9), random.randint(0, 9), ) possibilities = list( itertools.product(range(10), repeat=len(lock_secret)) ) print('All possibilities:', possibilities, '\n') t0 = time.time() # brute-force attack for guess in possibilities: if guess == lock_secret: print(f'The guess ({guess}) was correct!') break print('execution time:', time.time() - t0) ``` ## Socket example This example shows how to create a socket and a client that communicates between them. It uses `multiprocessing.Process` to run the server and client code in a different system process. Using this approach, we can run server and client code in parallel. ``` import socket import requests # pip install requests import multiprocessing as mp import time HOST = '127.0.0.1' # Standard loopback interface address (localhost) PORT = 58123 # Port to listen on (non-privileged ports are > 1023) BUFSIZE = 1024 # Buffer size # SERVER def server(host: str='127.0.0.1', port: int=58123, bufsize: int=1024): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: print(f'server: binding {host}:{port}') s.bind((host, port)) print('server: listening ...') s.listen() print('server: accepting connection ...') conn, addr = s.accept() with conn: print('server: connected by', addr) while True: print('server: receiving data ...') data = conn.recv(bufsize) if not data: break print('server: sending data ...') conn.sendall(data) print('server: done.') # CLIENT def client(server_host: str='127.0.0.1', server_port: int=58123, bufsize: int=1024): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: print(f'client: connecting to the server {server_host}:{server_port}') s.connect((HOST, PORT)) print('client: sending data ...') s.sendall(b'Hello, world') data = s.recv(bufsize) print('client: Data received', repr(data)) print('client: done.') # create server and client processes p_server = mp.Process(target=server, args=(HOST, PORT, BUFSIZE)) p_client = mp.Process(target=client, args=(HOST, PORT, BUFSIZE)) # start both processes print("main: starting server ...") p_server.start() time.sleep(1) print("main: starting client ...") p_client.start() # wait and finish all the processes p_server.join() p_client.join() ``` ## Challenge - Create a socket code (server) and a client code. - Create a socket (server) that generates a random lock_secret with 3 numbers and receive a guess from a client. - If the guess is correct, the socket returns '{"is_correct": true}' and client stops the iteration. - If the guess is NOT correct, return '{"is_correct": false}' and client continues the iteration. ``` # your code here! ``` ## References - https://en.wikipedia.org/wiki/Permutation - https://en.wikipedia.org/wiki/Combination - https://www.geeksforgeeks.org/permutation-and-combination-in-python/ - https://docs.python.org/3.8/library/itertools.html - https://docs.python.org/3/library/socket.html - https://realpython.com/python-sockets/
github_jupyter
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.png) # Automated Machine Learning _**Classification of credit card fraudulent transactions on remote compute **_ ## Contents 1. [Introduction](#Introduction) 1. [Setup](#Setup) 1. [Train](#Train) 1. [Results](#Results) 1. [Test](#Test) 1. [Acknowledgements](#Acknowledgements) ## Introduction In this example we use the associated credit card dataset to showcase how you can use AutoML for a simple classification problem. The goal is to predict if a credit card transaction is considered a fraudulent charge. This notebook is using remote compute to train the model. If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. In this notebook you will learn how to: 1. Create an experiment using an existing workspace. 2. Configure AutoML using `AutoMLConfig`. 3. Train the model using remote compute. 4. Explore the results. 5. Test the fitted model. ## Setup As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments. ``` import logging from matplotlib import pyplot as plt import pandas as pd import os import azureml.core from azureml.core.experiment import Experiment from azureml.core.workspace import Workspace from azureml.core.dataset import Dataset from azureml.train.automl import AutoMLConfig ``` This sample notebook may use features that are not available in previous versions of the Azure ML SDK. ``` print("This notebook was created using version 1.39.0 of the Azure ML SDK") print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK") ws = Workspace.from_config() # choose a name for experiment experiment_name = 'automl-classification-ccard-remote' experiment=Experiment(ws, experiment_name) output = {} output['Subscription ID'] = ws.subscription_id output['Workspace'] = ws.name output['Resource Group'] = ws.resource_group output['Location'] = ws.location output['Experiment Name'] = experiment.name pd.set_option('display.max_colwidth', None) outputDf = pd.DataFrame(data = output, index = ['']) outputDf.T ``` ## Create or Attach existing AmlCompute A compute target is required to execute the Automated ML run. In this tutorial, you create AmlCompute as your training compute resource. > Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist. #### Creation of AmlCompute takes approximately 5 minutes. If the AmlCompute with that name is already in your workspace this code will skip the creation process. As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota. ``` from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException # Choose a name for your CPU cluster cpu_cluster_name = "cpu-cluster-1" # Verify that cluster does not exist already try: compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name) print('Found existing cluster, use it.') except ComputeTargetException: compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2', max_nodes=6) compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config) compute_target.wait_for_completion(show_output=True) ``` # Data ### Load Data Load the credit card dataset from a csv file containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. Next, we'll split the data using random_split and extract the training data for the model. ``` data = "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv" dataset = Dataset.Tabular.from_delimited_files(data) training_data, validation_data = dataset.random_split(percentage=0.8, seed=223) label_column_name = 'Class' ``` ## Train Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment. |Property|Description| |-|-| |**task**|classification or regression| |**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>| |**enable_early_stopping**|Stop the run if the metric score is not showing improvement.| |**n_cross_validations**|Number of cross validation splits.| |**training_data**|Input dataset, containing both features and label column.| |**label_column_name**|The name of the label column.| **_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric) ``` automl_settings = { "n_cross_validations": 3, "primary_metric": 'AUC_weighted', "enable_early_stopping": True, "max_concurrent_iterations": 2, # This is a limit for testing purpose, please increase it as per cluster size "experiment_timeout_hours": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ablity to find the best model possible "verbosity": logging.INFO, } automl_config = AutoMLConfig(task = 'classification', debug_log = 'automl_errors.log', compute_target = compute_target, training_data = training_data, label_column_name = label_column_name, **automl_settings ) ``` Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous. ``` remote_run = experiment.submit(automl_config, show_output = False) # If you need to retrieve a run that already started, use the following code #from azureml.train.automl.run import AutoMLRun #remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>') ``` ## Results #### Widget for Monitoring Runs The widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete. **Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details ``` from azureml.widgets import RunDetails RunDetails(remote_run).show() remote_run.wait_for_completion(show_output=False) ``` #### Explain model Automated ML models can be explained and visualized using the SDK Explainability library. ## Analyze results ### Retrieve the Best Model Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*. ``` best_run, fitted_model = remote_run.get_output() fitted_model ``` #### Print the properties of the model The fitted_model is a python object and you can read the different properties of the object. ## Test the fitted model Now that the model is trained, split the data in the same way the data was split for training (The difference here is the data is being split locally) and then run the test data through the trained model to get the predicted values. ``` # convert the test data to dataframe X_test_df = validation_data.drop_columns(columns=[label_column_name]).to_pandas_dataframe() y_test_df = validation_data.keep_columns(columns=[label_column_name], validate=True).to_pandas_dataframe() # call the predict functions on the model y_pred = fitted_model.predict(X_test_df) y_pred ``` ### Calculate metrics for the prediction Now visualize the data on a scatter plot to show what our truth (actual) values are compared to the predicted values from the trained model that was returned. ``` from sklearn.metrics import confusion_matrix import numpy as np import itertools cf =confusion_matrix(y_test_df.values,y_pred) plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest') plt.colorbar() plt.title('Confusion Matrix') plt.xlabel('Predicted') plt.ylabel('Actual') class_labels = ['False','True'] tick_marks = np.arange(len(class_labels)) plt.xticks(tick_marks,class_labels) plt.yticks([-0.5,0,1,1.5],['','False','True','']) # plotting text value inside cells thresh = cf.max() / 2. for i,j in itertools.product(range(cf.shape[0]),range(cf.shape[1])): plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black') plt.show() ``` ## Acknowledgements This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Université Libre de Bruxelles) on big data mining and fraud detection. More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project Please cite the following works: Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015 Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE Dal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi) Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-Aël; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier Carcillo, Fabrizio; Le Borgne, Yann-Aël; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing Bertrand Lebichot, Yann-Aël Le Borgne, Liyun He, Frederic Oblé, Gianluca Bontempi Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection, INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019 Fabrizio Carcillo, Yann-Aël Le Borgne, Olivier Caelen, Frederic Oblé, Gianluca Bontempi Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection Information Sciences, 2019
github_jupyter
# Ray Crash Course - Exercise Solutions © 2019-2021, Anyscale. All Rights Reserved ![Anyscale Academy](../../images/AnyscaleAcademyLogo.png) This notebook discusses solutions for the exercises in the _crash course_. ## 01 Ray Crash Course - Tasks - Exercise 1 As currently written, the memory footprint of `estimate_pi` scales linearly with `N`, because it allocates two NumPy arrays of size `N`. This limits the size of `N` we can evaluate (as I confirmed by locking up my laptop...). However, this isn't actually necessary. We could do the same calculation in "blocks, for example `m` blocks of size `N/m` and then combine the results. Furthermore, there's no dependencies between the calculations with those blocks, giving us further potential speed-up by parellelizing them with Ray. Adapt `ray_estimate_pi` to use this technique. Pick some `N` value above which the calculation is done in blocks. Compare the performance of the old vs. new implementation. As you do this exercise, you might ponder the fact that we often averaged multiple trials for a given `N` and then ask yourself, what's the difference between averaging `10` trials for `N = 1000` vs. `1` trial for `N = 10000`, for example? First, import things we need and redefine functions and data we need from the notebook: ``` import numpy as np import sys, time, statistics, math import ray sys.path.append('..') from pi_calc import str_large_n trials = 5 ray.init(ignore_reinit_error=True) print(f'Dashboard URL: http://{ray.get_dashboard_url()}') ``` Here's `estimate_pi` again, but now we'll also return the counts, for reasons we'll discuss shortly. ``` def estimate_pi(num_samples): xs = np.random.uniform(low=-1.0, high=1.0, size=num_samples) # Generate num_samples random samples for the x coordinate. ys = np.random.uniform(low=-1.0, high=1.0, size=num_samples) # Generate num_samples random samples for the y coordinate. xys = np.stack((xs, ys), axis=-1) # Like Python's "zip(a,b)"; creates np.array([(x1,y1), (x2,y2), ...]). inside = xs*xs + ys*ys <= 1.0 # Creates a predicate over all the array elements. xys_inside = xys[inside] # Selects only those "zipped" array elements inside the circle. in_circle = xys_inside.shape[0] # Return the number of elements inside the circle. approx_pi = 4.0*in_circle/num_samples # The Pi estimate. return approx_pi, in_circle, num_samples ``` Here's the original `ray_estimate_pi`, but now it will also return the counts, not just $\pi$. ``` @ray.remote def ray_estimate_pi(num_samples): return estimate_pi(num_samples) fmt = '{:10.5f} seconds: pi ~ {:7.6f}, stddev = {:5.4f}, error = {:5.4f}%' ``` Here's `ray_try_it`, but now we handle the additional returned values from `ray_estimate_pi`: ``` def ray_try_it(n, trials): print('trials = {:5d}, N = {:s}: '.format(trials, str_large_n(n, padding=15)), end='') # str_large_n imported above. start = time.time() refs = [ray_estimate_pi.remote(n) for _ in range(trials)] pis_counts = ray.get(refs) pis = list(map(lambda t: t[0], pis_counts)) approx_pi = statistics.mean(pis) stdev = 0.0 if trials == 1 else statistics.stdev(pis) duration = time.time() - start error = (100.0*abs(approx_pi-np.pi)/np.pi) print(fmt.format(duration, approx_pi, stdev, error)) # str_large_n imported above. return trials, n, duration, approx_pi, stdev, error ``` First, let's look at the "ponder" question at the end, just using the original implementation. We'll do a few runs of the following cell. Note that we're using large maximum `n` values here. If you are working on a slow machine or VM, consider deleting the last value `10000000` here and below: ``` for n in [1000, 10000, 100000, 1000000, 10000000]: ray_try_it(n, round(10000000/n)) for n in [1000, 10000, 100000, 1000000, 10000000]: ray_try_it(n, round(10000000/n)) for n in [1000, 10000, 100000, 1000000, 10000000]: ray_try_it(n, round(10000000/n)) ``` The standard deviation is misleading now, because the number of trials change. The errors are roughly within an order of magnitude, due in part to expected statistical variation. Generally speaking, larger `N` and lower `trials` had lower errors. This may be due to the other big source of variation, the inevitable rounding error computing $\pi$ (`4 * inside_count/N`), one time per trial (`1` to `10,000` times). Experiments are supposed to eliminate as many extraneous variables as possible, so I would argue that sticking to one value for `trials` and varying `N` is more meaningful. In fact, in the implementation that follows, we'll eliminate the potential rounding error variation by keep track of the inside and total counts, then computing $\pi$ once at the end. First, a function to return sample sizes for a given `N` and `m`. ``` def sample_sizes(N, m): ranges = [(m*i, m*(i+1)) for i in range(math.ceil(N/m))] if ranges[-1][1] > N: ranges[-1] = (ranges[-1][0], N) return list(map(lambda x: x[1]-x[0], ranges)) @ray.remote def ray_estimate_pi_blocks(num_samples, m): """ Perform the estimate in blocks up to ``m`` samples in size. A more user-friendly solution would embed logic to determine an reasonably good ``m`` value, but for our purposes, passing in ``m`` is more convenient. """ sizes = sample_sizes(num_samples, m) refs = [ray_estimate_pi.remote(size) for size in sizes] values = ray.get(refs) # Not using ray.wait() is okay; the tasks are all roughly the same size inside_count = 0 total_count = 0 for _, icount, tcount in values: # Toss the pi value returned inside_count += icount total_count += tcount return 4.0*inside_count/total_count, inside_count, total_count ``` Let's try it: ``` for m in [10000, 100000, 1000000]: print(f'm = {m}:') for n in [1000, 10000, 100000, 1000000, 10000000, 100000000]: start = time.time() approx_pi, inside_count, total_count = ray.get(ray_estimate_pi_blocks.remote(n, m)) duration = time.time() - start print(f'{n:15}: duration = {duration:6.5} seconds, pi = {approx_pi:6.5}, # inside/outside = {inside_count:12}/{total_count}') ``` Let's compare to the original implementation: ``` for n in [1000, 10000, 100000, 1000000, 10000000, 100000000]: start = time.time() approx_pi, inside_count, total_count = ray.get(ray_estimate_pi.remote(n)) duration = time.time() - start print(f'{n:15}: duration = {duration:6.5} seconds, pi = {approx_pi:6.5}, # inside/outside = {inside_count:12}/{total_count}') ``` Note that for larger `N`, `ray_estimate_pi_blocks` time scale noticeably slower than the original implementation, e.g., for the highest `N`, `100,000,000`, the durations are approximately `1.2` seconds vs. `9.6` seconds. ## 01 Ray Crash Course - Tasks - Exercise 2 What `N` value is needed to get a reliable estimate to five decimal places, `3.1415` (for some definition of "reliable")? If you have a powerful machine or a cluster, you could try a higher accuracy. You'll need to use the solution to Exercise 1 or you can make a guess based on the results we've already seen in this notebook. To use the solution from Exercise 1, we'll need a modified `ray_try_it` to add the `m` blocks parameter: ``` def ray_try_it_blocks(n, m, trials): print('trials = {:5d}, N = {:s}: '.format(trials, str_large_n(n, padding=15)), end='') # str_large_n imported above. start = time.time() refs = [ray_estimate_pi_blocks.remote(n, m) for _ in range(trials)] pis_counts = ray.get(refs) pis = list(map(lambda t: t[0], pis_counts)) approx_pi = statistics.mean(pis) stdev = 0.0 if trials == 1 else statistics.stdev(pis) duration = time.time() - start error = (100.0*abs(approx_pi-np.pi)/np.pi) print(fmt.format(duration, approx_pi, stdev, error)) # str_large_n imported above. return trials, n, duration, approx_pi, stdev, error ``` Let's compute the error we would have to achieve for this accuracy. ``` target_error = 100*abs(3.1415 - np.pi)/np.pi target_error ``` Okay, let's keep trying bigger `N` until we get to this number, but now we need to pick a definition of "reliable", because the results will depend on the number of `trials` we do. Also, some experiments will get "lucky" for relatively low `N` values. > **WARNING:** This could take a while. You could choose a less accurate error goal if you have limited compute resources. ``` N = 100 error = 10.0 while error > target_error: N *= 10 _, _, duration, approx_pi, _, error = ray_try_it_blocks(N, 1000000, trials) if N > 100000000: print("Stopping so we don't crash the machine...") break print(f'{N} samples is sufficient to get the error below {target_error}%') ``` You should run the previous cell several times. Some runs might succeed with `N = 100,000`, while more often it will be above 1M or 10M. ## 01 Ray Crash Course - Tasks - Exercise 3 For small computation problems, Ray adds enough overhead that its benefits are outweighed. You can see from the performance graphs in the lesson that smaller `N` or smaller trial values will likely cause the performance curves to cross. Try small values of `N` and small trial numbers. When do the lines cross? Try timing individual runs for small `N` around the crossing point. What can you infer from this "tipping point" about appropriate sizing of tasks, at least for your test environment? First, here is more code from the notebook. Here is `try_it`, modified to handle the extra return values from the modified `estimate_pi`: ``` def try_it(n, trials): print('trials = {:3d}, N = {:s}: '.format(trials, str_large_n(n, padding=12)), end='') # str_large_n imported above. start = time.time() pis_counts = [estimate_pi(n) for _ in range(trials)] pis = list(map(lambda t: t[0], pis_counts)) approx_pi = statistics.mean(pis) stdev = statistics.stdev(pis) duration = time.time() - start error = (100.0*abs(approx_pi-np.pi)/np.pi) print(fmt.format(duration, approx_pi, stdev, error)) # str_large_n imported above. return trials, n, duration, approx_pi, stdev, error small_ns = [1, 10, 100, 1000, 10000, 100000] data_ns = [try_it(n, trials) for n in small_ns] ray_data_ns = [ray_try_it(n, trials) for n in small_ns] np_data_ns = np.array(data_ns) np_ray_data_ns = np.array(ray_data_ns) from bokeh_util import two_lines_plot, means_stddevs_plot # Some plotting utilities in `./bokeh_util.py`. from bokeh.plotting import show, figure from bokeh.layouts import gridplot two_lines = two_lines_plot( "N vs. Execution Times (Smaller Is Better)", 'N', 'Time', 'No Ray', 'Ray', np_data_ns[:,1], np_data_ns[:,2], np_ray_data_ns[:,1], np_ray_data_ns[:,2], x_axis_type='log', y_axis_type='log') show(two_lines, plot_width=800, plot_height=400) ``` (If you can't see it, click [here](../../images/Pi-small-Ns-vs-times.png).) Let's calculate the `N` where they cross: ``` for i in range(len(small_ns)): if data_ns[i] >= ray_data_ns[i]: print(f'Crossing point: N = {small_ns[i]}') ``` ## 02 Ray Crash Course - Actors - Exercise 1 You are asked these questions about the `Counter` vs. `RayCounter` performance: > Ignoring pause = 0, can you explain why the Ray times are almost, but slightly larger than the non-ray times consistently? Study the implementations for `ray_counter_trial` and `RayCounter`. What code is synchronous and blocking vs. concurrent? In fact, is there _any_ code that is actually concurrent when you have just one instance of `Counter` or `RayCounter`? Here is `ray_counter_trial` again, with comments about concurrency vs. synchronous blocking calls: ``` def ray_counter_trial(count_to, num_counters = 1, pause = 0.01): print('ray: count_to = {:5d}, num counters = {:4d}, pause = {:5.3f}: '.format(count_to, num_counters, pause), end='') start = time.time() final_count_futures = [] # Actor instantiation blocks, but returns almost immediately. The actor creation overhead is low. It is a little bit larger # than normal class instantiation, but insignificant for overall performance. counters = [RayCounter.remote(pause) for _ in range(num_counters)] for i in range(num_counters): for n in range(count_to): counters[i].next.remote() # Nonblocking, so will be faster for long pause scenarios... final_count_futures.append(counters[i].get_count.remote()) ray.get(final_count_futures) # but block until all invocations are finished! duration = time.time() - start print('time = {:9.5f} seconds'.format(duration)) return count_to, num_counters, pause, duration ``` Both `next` methods in `Counter` and `RayCounter`, call `time.sleep(pause)` before completing, but for `RayCounter` it runs asynchronously, while it blocks for `Counter`. You do have to block to get the current count and if lots of async invocations of `next` are being processed, a call to `ray.get(actor.get_counter())` will block until all of them are finished. Hence, the reason a single `RayCounter` instance never outperforms a `Counter` instance is because _all_ the code in `ray_counter_trial` becomes effectively _synchronous_ because of the single line `ray.get(final_count_futures)`. Since the Ray implementation has extra overhead for Ray, it will always take a little longer. The real benefit is running many counters concurrently. `ray_counter_trial` does this seamlessly, while `counter_trial` remains fully synchronous. At the end of the exercise is this statement and question: > Once past zero pauses, the Ray overhead is constant. It doesn't grow with the pause time. Can you explain why it doesn't grow? The Ray overhead doesn't change because the number of Ray-related invocations don't change as the pause time grows. We still use one counter instance and ten invocations of it. Hence the overhead is a constant, even though the method invocations will take longer to complete, depending on the `pause` value. # 03 Ray Crash Course - Why Ray? There were no exercises for this lesson. # 04 Ray Crash Course - Python Multiprocessing with Ray There were no exercises for this lesson. # 05 Ray Crash Course - Ray Parallel Iterators - Exercises 1-3 Here we combine the solutions for the first three exercises. This code is also available as a complete, standalone Ray program in [word-count-exercises.py](word-count-exercises.py). ``` import glob, gzip, re, sys, os import numpy as np class WordCount: "Wraps a dictionary of words and counts." def __init__(self): self.counts = {} def __call__(self, word, increment): count = increment if word in self.counts: count = self.counts[word]+increment self.counts[word] = count return (word, count) def sort_counts(self, descending=True): "Returns a generator of word-count pairs sorted by count." return (wc for wc in sorted(self.counts.items(), key = lambda wc: wc[1], reverse=descending)) def unzip(f): if f.endswith(".gz"): return gzip.open(f) else: return open(f, 'r') # Exercise 3: Remove stop words. Edit this set to taste! stop_words1 = { 'that', 'the', 'this', 'an', 'and', 'or', 'but', 'of' } ## All the single digits and ASCII letters: l=[str(i) for i in range(10)] l.extend([chr(i) for i in range(ord('a'), ord('z')+1)]) stop_words = stop_words1.union(set(l)) def is_stop_word(word): """ Treat all single-character words, blanks, and integers as stop words. (Try adding floating point numbers.) Otherwise, check for membership in a set of words. We use a set because it provides O(1) lookup! """ w = word.strip() if len(w) <= 1 or w.isdigit(): return True return w in stop_words def count_words(file_globs, top_n = 100, batch_window = 1024): # The working directory of this application may be _different_ # than the Ray cluster's working directory. (In a real cluster, # the files available will be different, too, but we'll ignore # the problem here.) So, we need to pass absolute paths or our # ray.util.iter.from_items won't find the files! globs = [g for f in file_globs for g in glob.glob(f)] file_list = list(map(lambda f: os.path.abspath(f), globs)) print(f'Processing {len(file_list)} files: {file_list}') # Exercise 1: use combine instead of for_each(...).flatten(...). # We replace two occurrences: word_count = ( ray.util.iter.from_items(file_list, num_shards=4) .combine(lambda f: unzip(f).readlines()) # Exercise 2: convert to lower case! .combine(lambda line: re.split('\W+', line.lower())) # split into words. # Exercise 3: remove stop words. .filter(lambda word: not is_stop_word(word)) .for_each(lambda word: (word, 1)) .batch(batch_window) ) # Combine the dictionaries of counts across shards with a sliding window # of "batch_window" lines. wordCount = WordCount() for shard_counts in word_count.gather_async(): for word, count in shard_counts: wordCount(word, count) sorted_list_iterator = wordCount.sort_counts() return [sorted_list_iterator.__next__() for i in range(top_n)] %time word_counts = count_words(['../*.ipynb'], top_n=100) # The notebooks are now in the parent directory. word_counts ``` # 05 Ray Crash Course - Ray Parallel Iterators - Exercise 4 Now let's run `count_words` on the `README.md` for the tutorial repo: ``` %time word_counts_readme = count_words(['../../README.md'], top_n=100) # The README is two directories up! word_counts_readme ``` Now which words are most prominent? ``` ray.shutdown() # "Undo ray.init()". Terminate all the processes started in this notebook. ```
github_jupyter
``` from fastai2.vision.all import * ``` # Checar el VerboseCallback ``` from fastai2.test_utils import VerboseCallback class VerboseCallback(Callback): "Callback that prints the name of each event called" def __call__(self, event_name): print(event_name) super().__call__(event_name) ``` # Crear callback que anote los tiempos del learner ``` import threading import time from torch.autograd.profiler import FunctionEvent, EventList class ChromeStatics(Callback): def __init__(self, ll): super().__init__() self.l = ll self.d = {} def __call__(self, event_name): tid = threading.get_ident() tname = threading.current_thread().name if event_name.startswith('begin'): evt = event_name.replace('begin_', '') self.d[evt] = time.clock_gettime_ns(time.CLOCK_PROCESS_CPUTIME_ID)/1000 elif event_name.startswith('after'): evt = event_name.replace('after_', '') end = time.clock_gettime_ns(time.CLOCK_PROCESS_CPUTIME_ID)/1000 if evt in self.d: start = self.d[evt] fe = FunctionEvent(tname, tid, evt, tid, start, end) else: print(f"ENDDDD before 500 {evt}") start = end-500 # 1-time after_xxx fe = FunctionEvent(tname, tid, evt, tid, start, end) self.l.append(fe) self.d.pop(evt, None) def export_chrome_trace(self, fname="export_chrome_trace.json", dumps=False): with open(fname, mode="w+") as f: prof = EventList(self.l) prof.export_chrome_trace(f.name) # Now validate the json if dumps: parsed = json.load(f) print(json.dumps(parsed, indent=4, sort_keys=True)) ``` # Usarlo en ejemplo sencillo ``` %%time from fastai2.vision.all import * path = untar_data(URLs.PETS) files = get_image_files(path/"images") def label_func(f): return f[0].isupper() dls = ImageDataLoaders.from_name_func(path, files, label_func, item_tfms=Resize(224)) learn = cnn_learner(dls, resnet34, metrics=error_rate) my_list = [] cb_stats = ChromeStatics(my_list) learn.fine_tune(1, cbs=cb_stats) cb_stats.export_chrome_trace() ``` # profile autograd de pytorch ``` %%time from fastai2.vision.all import * path = untar_data(URLs.PETS) files = get_image_files(path/"images") def label_func(f): return f[0].isupper() dls = ImageDataLoaders.from_name_func(path, files, label_func, item_tfms=Resize(224)) learn = cnn_learner(dls, resnet34, metrics=error_rate) my_list = [] cb_stats = ChromeStatics(my_list) with torch.autograd.profiler.profile() as prof: learn.fine_tune(1, cbs=cb_stats) cb_stats.export_chrome_trace() prof.export_chrome_trace("prof.json") type(prof), type(cb_stats) with open('prof.json', 'r') as p: with open('export_chrome_trace.json', 'r') as e: jsonp = json.load(p) jsone = json.load(e) r = jsonp+jsone with open('mezcla.json', mode="w+") as fout: fout.write(json.dumps(r)) print('done') ``` # change learner loop ``` %%time from fastai2.vision.all import * @patch_to(Learner) def begin_pred(self:Learner): return @patch_to(Learner) def one_batch(self, i, b): self.iter = i try: print("----------- 1") self._split(b); self('begin_batch') print("----------- (((((((((((((((2)))))))))))))))") self('begin_pred') print("----------- (((3)))") self.pred = self.model(*self.xb); self('after_pred') print("----------- 4") if len(self.yb) == 0: return print("----------- 5") #self('begin_loss') print("----------- 6") self.loss = self.loss_func(self.pred, *self.yb); self('after_loss') print("----------- 7") if not self.training: return print("----------- 8") #self('begin_backward') print("----------- 9") self.loss.backward(); self('after_backward') print("----------- 10") self.opt.step(); self('after_step') print("----------- 11") self.opt.zero_grad() except CancelBatchException: self('after_cancel_batch') finally: self('after_batch') path = untar_data(URLs.PETS) files = get_image_files(path/"images") def label_func(f): return f[0].isupper() dls = ImageDataLoaders.from_name_func(path, files, label_func, item_tfms=Resize(224)) learn = cnn_learner(dls, resnet34, metrics=error_rate) my_list = [] cb_stats = ChromeStatics(my_list) learn.fine_tune(1, cbs=cb_stats) cb_stats.export_chrome_trace() ```
github_jupyter
# Answers: Classes Provided here are answers to the practice questions at the end of "Classes". ## Objects **Objects Q1**. ``` # specific strings will differ true_var = 'asdf123'.isalnum() false_var = '!!!!'.isalnum() ``` **Objects Q2**. ``` days_summary = {} for day in days_of_week: days_summary[day] = site_days.count(day) ``` **Objects Q3**. ``` from random import choice rand_int = choice(range(0,10)) ``` ## Classes **Classes Q1**. ``` class ClassRoster(): def __init__(self, course): self.students = [] self.course = course def add_student(self, pid, name): self.students.append({pid: name}) ``` **Classes Q2**. ``` class ToDo(): def __init__(self): self.to_do = [] def add_item(self, item, top=True): if top: self.to_do.insert(0, item) else: self.to_do.append(item) def remove_item(self, item): self.to_do.remove(item) ``` **Classes Q3**. ``` class NewYear(): zodiac_signs = { 'Ox' : [1937, 1949, 1961, 1973, 1985, 1997, 2009, 2021], 'Tiger' : [1938, 1950, 1962, 1974, 1986, 1998, 2010], 'Rabbit' : [1939, 1951, 1963, 1975, 1987, 1999, 2011, 2023], 'Dragon' : [1940, 1952, 1964, 1976, 1988, 2000, 2012, 2024], 'Snake' : [1941, 1953, 1965, 1977, 1989, 2001, 2013, 2025], 'Horse' : [1942, 1954, 1966, 1978, 1990, 2002, 2014, 2026], 'Goat/Sheep' : [1943, 1955, 1967, 1979, 1991, 2003, 2015, 2027], 'Monkey' : [1944, 1956, 1968, 1980, 1992, 2004, 2016, 2028], 'Rooster' : [1945, 1957, 1969, 1981, 1993, 2005, 2017, 2029], 'Dog' : [1946, 1958, 1970, 1982, 1994, 2006, 2018, 2030], 'Pig' : [1947, 1959, 1971, 1983, 1995, 2007, 2019, 2031], 'Rat' : [1936, 1948, 1960, 1972, 1984, 1996, 2008, 2020] } def __init__(self, year): self.year = year def return_sign(self): for key in self.zodiac_signs: if self.year in self.zodiac_signs[key]: out = key break # answer would be fine without break here return 'You were born in the year of the ' + out + '!' ``` **Classes Q4**. Part I. ``` class Kingdom(): def __init__(self, name, title): self.name = name self.title = title def introduce(self): return 'Hello, my name is ' + self.name + ', and I am a ' + self.title + '.' ``` Part II. ``` import random class CourtJester(Kingdom): headwear = "fool's cap" def tell_a_joke(self): joke_list = ['A clown held the door open for me yesterday. I thought it was a nice jester', 'How does the court jester address the King of Ducks? Mal’Lard', 'What did the court jester call the balding crown prince? The Heir Apparent with no Hair Apparent', 'What do you call a joke made by using sign language? A jester'] out_joke = random.choice(joke_list) return out_joke ``` **Classes Q5**. ``` class StudentInfo(): def __init__(self, name, year, school, proj_grade): self.name = name self.year = year self.school = school self.proj_grade = proj_grade def follow_up(self): out = {} if self.proj_grade <= 65: out[self.name] = self.proj_grade return out ```
github_jupyter
## Keypad Combinations A keypad on a cellphone has alphabets for all numbers between 2 and 9, as shown in the figure below: <img style="float: center;height:200px;" src="Keypad.png" alt="A cell phone keypad that has letters associated with each number 2 through 9"><br> You can make different combinations of alphabets by pressing the numbers. For example, if you press 23, the following combinations are possible: `ad, ae, af, bd, be, bf, cd, ce, cf` Note that because 2 is pressed before 3, the first letter is always an alphabet on the number 2. Likewise, if the user types 32, the order would be `da, db, dc, ea, eb, ec, fa, fb, fc` Given an integer `num`, find out all the possible strings that can be made using digits of input `num`. Return these strings in a list. The order of strings in the list does not matter. However, as stated earlier, the order of letters in a particular string matters. ``` def get_characters(num): if num == 2: return "abc" elif num == 3: return "def" elif num == 4: return "ghi" elif num == 5: return "jkl" elif num == 6: return "mno" elif num == 7: return "pqrs" elif num == 8: return "tuv" elif num == 9: return "wxyz" else: return "" def keypad(num): # TODO: Write your keypad solution here! if num <= 1: return [''] elif 1 < num <= 9: return list(get_characters(num)) last_digit = num % 10 small_output = keypad(num // 10) keypad_string = get_characters(last_digit) output = [] ''' The Idea: Each character of keypad_string must be appended to the end of each string available in the small_output ''' for character in keypad_string: for item in small_output: new_item = item + character output.append(new_item) return output def get_characters(num): if num == 2: return "abc" elif num == 3: return "def" elif num == 4: return "ghi" elif num == 5: return "jkl" elif num == 6: return "mno" elif num == 7: return "pqrs" elif num == 8: return "tuv" elif num == 9: return "wxyz" else: return "" def keypad(num): if num <= 1: return [''] elif num <= 9: return [get_characters(num)] chars = keypad(num // 10) + keypad(num % 10) # 23 -> # ['abc', 'def'] return chars def keypad(num): if num <= 1: return [''] elif num <= 9: return list(get_characters(num)) curr = keypad(num % 10) prev = keypad(num // 10) output = [] for i in prev: for j in curr: output.append(i + j) return output ``` <span class="graffiti-highlight graffiti-id_9ibtd5w-id_haj1ksk"><i></i><button>Show Solution</button></span> ``` def test_keypad(input, expected_output): if sorted(keypad(input)) == expected_output: print("Yay. We got it right.") else: print("Oops! That was incorrect.") # Base case: list with empty string input = 0 expected_output = [""] test_keypad(input, expected_output) # Example case input = 23 expected_output = sorted(["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"]) test_keypad(input, expected_output) # Example case input = 32 expected_output = sorted(["da", "db", "dc", "ea", "eb", "ec", "fa", "fb", "fc"]) test_keypad(input, expected_output) # Example case input = 8 expected_output = sorted(["t", "u", "v"]) test_keypad(input, expected_output) input = 354 expected_output = sorted(["djg", "ejg", "fjg", "dkg", "ekg", "fkg", "dlg", "elg", "flg", "djh", "ejh", "fjh", "dkh", "ekh", "fkh", "dlh", "elh", "flh", "dji", "eji", "fji", "dki", "eki", "fki", "dli", "eli", "fli"]) test_keypad(input, expected_output) ```
github_jupyter
# 三星级复现项目:使用DDPG解决四轴飞行器速度控制 (这可能是史上最“偷懒”的三星级复现项目,改改任务环境就可以提交了 - -!应该没有更懒的了,O(∩_∩)O哈哈~) # Step1 安装依赖 !pip uninstall -y parl # 说明:AIStudio预装的parl版本太老,容易跟其他库产生兼容性冲突,建议先卸载 !pip uninstall -y pandas scikit-learn # 提示:在AIStudio中卸载这两个库再import parl可避免warning提示,不卸载也不影响parl的使用 ``` !pip uninstall -y parl # 说明:AIStudio预装的parl版本太老,容易跟其他库产生兼容性冲突,建议先卸载 !pip uninstall -y pandas scikit-learn # 提示:在AIStudio中卸载这两个库再import parl可避免warning提示,不卸载也不影响parl的使用 !pip install paddlepaddle==1.6.3 -i https://mirror.baidu.com/pypi/simple #可选安装paddlepaddle-gpu==1.6.3.post97 !pip install parl==1.3.1 !pip install rlschool==0.3.1 # 说明:安装日志中出现两条红色的关于 paddlehub 和 visualdl 的 ERROR 与parl无关,可以忽略,不影响使用 # 检查依赖包版本是否正确 !pip list | grep paddlepaddle !pip list | grep parl !pip list | grep rlschool ``` # Step2 导入依赖 ``` import os import numpy as np import parl from parl import layers from paddle import fluid from parl.utils import logger from parl.utils import action_mapping # 将神经网络输出映射到对应的 实际动作取值范围 内 from parl.utils import ReplayMemory # 经验回放 from rlschool import make_env # 使用 RLSchool 创建飞行器环境 ``` # Step3 设置超参数 ``` ###################################################################### ###################################################################### # # 1. 请设定 learning rate,尝试增减查看效果 # ###################################################################### ###################################################################### ACTOR_LR =5* 0.0002 # Actor网络更新的 learning rate 开始直接5倍学习率,后期模型相对稳定后再调低 CRITIC_LR =5* 0.001 # Critic网络更新的 learning rate 开始直接5倍学习率,后期模型相对稳定后再调低 GAMMA = 0.99 # reward 的衰减因子,一般取 0.9 到 0.999 不等 TAU = 0.001 # target_model 跟 model 同步参数 的 软更新参数 MEMORY_SIZE = 60e4 # replay memory的大小,越大越占用内存 MEMORY_WARMUP_SIZE = 1e4 # replay_memory 里需要预存一些经验数据,再从里面sample一个batch的经验让agent去learn REWARD_SCALE = 0.01 # reward 的缩放因子 BATCH_SIZE = 2*256 # 每次给agent learn的数据数量,从replay memory随机里sample一批数据出来 2倍的batch_size TRAIN_TOTAL_STEPS = 60e4 # 总训练步数 TEST_EVERY_STEPS = 1e4 # 每个N步评估一下算法效果,每次评估5个episode求平均reward GM = 0.2 # 变电压的浮动参数 ``` # Step4 搭建Model、Algorithm、Agent架构 * `Agent`把产生的数据传给`algorithm`,`algorithm`根据`model`的模型结构计算出`Loss`,使用`SGD`或者其他优化器不断的优化,`PARL`这种架构可以很方便的应用在各类深度强化学习问题中。 ## (1)Model * 分别搭建`Actor`、`Critic`的`Model`结构,构建`QuadrotorModel`。 ``` class ActorModel(parl.Model): def __init__(self, act_dim): ###################################################################### ###################################################################### # # 2. 请配置model结构 # ###################################################################### ###################################################################### hid_size1 = 64 hid_size2 = 64 self.fc1 = layers.fc(size=hid_size1, act='relu',param_attr=fluid.initializer.Normal(loc=0.0, scale=0.1)) self.fc2 = layers.fc(size=hid_size2, act='relu',param_attr=fluid.initializer.Normal(loc=0.0, scale=0.1)) self.fc3 = layers.fc(size=act_dim , act='tanh',param_attr=fluid.initializer.Normal(loc=0.0, scale=0.1)) def policy(self, obs): ###################################################################### ###################################################################### # # 3. 请组装policy网络 # ###################################################################### ###################################################################### hid = self.fc1(obs) hid = self.fc2(hid) logits = self.fc3(hid) return logits class CriticModel(parl.Model): def __init__(self): ###################################################################### ###################################################################### # # 4. 请配置model结构 # ###################################################################### ###################################################################### hid_size = 100 self.fc1 = layers.fc(size=hid_size, act='relu',param_attr=fluid.initializer.Normal(loc=0.0, scale=0.1)) self.fc2 = layers.fc(size=1, act=None) def value(self, obs, act): # 输入 state, action, 输出对应的Q(s,a) ###################################################################### ###################################################################### # # 5. 请组装Q网络 # ###################################################################### ###################################################################### concat = layers.concat([obs, act], axis=1) hid = self.fc1(concat) Q = self.fc2(hid) Q = layers.squeeze(Q, axes=[1]) return Q class QuadrotorModel(parl.Model): def __init__(self, act_dim): self.actor_model = ActorModel(act_dim) self.critic_model = CriticModel() def policy(self, obs): return self.actor_model.policy(obs) def value(self, obs, act): return self.critic_model.value(obs, act) def get_actor_params(self): return self.actor_model.parameters() ``` ## (2)Algorithm * 可以采用下面的方式从`parl`库中快速引入`DDPG`算法,无需自己重新写算法 ``` from parl.algorithms import DDPG ``` ## (3)Agent ``` class QuadrotorAgent(parl.Agent): def __init__(self, algorithm, obs_dim, act_dim): assert isinstance(obs_dim, int) assert isinstance(act_dim, int) self.obs_dim = obs_dim self.act_dim = act_dim super(QuadrotorAgent, self).__init__(algorithm) # 注意,在最开始的时候,先完全同步target_model和model的参数 self.alg.sync_target(decay=0) def build_program(self): self.pred_program = fluid.Program() self.learn_program = fluid.Program() with fluid.program_guard(self.pred_program): obs = layers.data( name='obs', shape=[self.obs_dim], dtype='float32') self.pred_act = self.alg.predict(obs) with fluid.program_guard(self.learn_program): obs = layers.data( name='obs', shape=[self.obs_dim], dtype='float32') act = layers.data( name='act', shape=[self.act_dim], dtype='float32') reward = layers.data(name='reward', shape=[], dtype='float32') next_obs = layers.data( name='next_obs', shape=[self.obs_dim], dtype='float32') terminal = layers.data(name='terminal', shape=[], dtype='bool') _, self.critic_cost = self.alg.learn(obs, act, reward, next_obs, terminal) def predict(self, obs): obs = np.expand_dims(obs, axis=0) act = self.fluid_executor.run( self.pred_program, feed={'obs': obs}, fetch_list=[self.pred_act])[0] return act def learn(self, obs, act, reward, next_obs, terminal): feed = { 'obs': obs, 'act': act, 'reward': reward, 'next_obs': next_obs, 'terminal': terminal } critic_cost = self.fluid_executor.run( self.learn_program, feed=feed, fetch_list=[self.critic_cost])[0] self.alg.sync_target() return critic_cost ``` # Step4 Training && Test(训练&&测试) ``` def run_episode(env, agent, rpm): obs = env.reset() total_reward, steps = 0, 0 while True: steps += 1 batch_obs = np.expand_dims(obs, axis=0) action0 = agent.predict(batch_obs.astype('float32')) #action = action.mean(axis=1) #加的一行代码,使输出一致,效果你懂的,值得一试O(∩_∩)O哈哈~ action = np.squeeze(action0) mean_a= action[4] #加的三行代码,还原输出,目的使输出稳定,相当于加了先验,4轴飞行器的电压的保持相对的稳定,更有利于收敛。 action = action[0:4] #其中一个维度是作为基本值,其他4个维度作为浮动值。 action = GM*action + mean_a #此处我取了一个GM = 0.15的系数,为什么有效?可能神经网络训练的时候输出的值是差不多的,强行加一个系数相当于人为的先验。 # 给输出动作增加探索扰动,输出限制在 [-1.0, 1.0] 范围内 action = np.clip(np.random.normal(action, 1.0), -1.0, 1.0) ## action = np.clip(action, -1.0, 1.0) ,变成这个样子就是直接用网络输出不加扰动存入经验池, ##大家也可以加大或者降低normal值,来增加或者减小探索的幅度 # 动作映射到对应的 实际动作取值范围 内, action_mapping是从parl.utils那里import进来的函数 action = action_mapping(action, env.action_space.low[0], env.action_space.high[0]) ##测试print(action) #之前测试action用的 next_obs, reward, done, info = env.step(action) rpm.append(obs, action0, REWARD_SCALE * reward, next_obs, done) #注意变量名 action0,rpm需要原始输出,而env需要处理后的输出 if rpm.size() > MEMORY_WARMUP_SIZE: batch_obs, batch_action, batch_reward, batch_next_obs, \ batch_terminal = rpm.sample_batch(BATCH_SIZE) critic_cost = agent.learn(batch_obs, batch_action, batch_reward, batch_next_obs, batch_terminal) obs = next_obs total_reward += reward if done: break return total_reward, steps # 评估 agent, 跑 5 个episode,总reward求平均 def evaluate(env, agent): eval_reward = [] for i in range(5): obs = env.reset() total_reward, steps = 0, 0 while True: batch_obs = np.expand_dims(obs, axis=0) action = agent.predict(batch_obs.astype('float32')) ##action[0] = action.mean(axis=1) #加的一行代码,使输出为4个神经元的平均值,此处是之前测试用的,大家也可以试下, action = np.squeeze(action) mean_a= action[4] #加的代码,还原输出,目的使输出稳定,原因同上。 action = action[0:4] action = GM*action + mean_a #此处我取了一个GM = 0.2的系数,在全局变量里面设置,用于变电压浮动的控制 action = np.clip(action, -1.0, 1.0) #加的一行代码,防止报错 action = action_mapping(action, env.action_space.low[0], env.action_space.high[0]) next_obs, reward, done, info = env.step(action) obs = next_obs total_reward += reward steps += 1 if done: break eval_reward.append(total_reward) return np.mean(eval_reward) ``` # Step 5 创建环境和Agent,创建经验池,启动训练,定期保存模型 ``` # 创建飞行器环境 env = make_env("Quadrotor", task="velocity_control", seed=0) ##关键的点到了,此处为作业到复现最大的改动,就改了一个文件名,说明parl框架的确复用性非常强。 env.reset() obs_dim = env.observation_space.shape[0] act_dim = env.action_space.shape[0] +1 #输出加一个维度,评估时再还原 # 根据parl框架构建agent ###################################################################### ###################################################################### # # 6. 请构建agent: QuadrotorModel, DDPG, QuadrotorAgent三者嵌套 # ###################################################################### ###################################################################### model = QuadrotorModel(act_dim) algorithm = DDPG( model, gamma=GAMMA, tau=TAU, actor_lr=ACTOR_LR, critic_lr=CRITIC_LR) agent = QuadrotorAgent(algorithm, obs_dim, act_dim) # parl库也为DDPG算法内置了ReplayMemory,可直接从 parl.utils 引入使用 rpm = ReplayMemory(int(MEMORY_SIZE), obs_dim, act_dim) # 启动训练 test_flag = 0 total_steps = 0 while total_steps < TRAIN_TOTAL_STEPS: train_reward, steps = run_episode(env, agent, rpm) total_steps += steps #logger.info('Steps: {} Reward: {}'.format(total_steps, train_reward)) # 打印训练reward if total_steps // TEST_EVERY_STEPS >= test_flag: # 每隔一定step数,评估一次模型 while total_steps // TEST_EVERY_STEPS >= test_flag: test_flag += 1 evaluate_reward = evaluate(env, agent) logger.info('Steps {}, Test reward: {}'.format( total_steps, evaluate_reward)) # 打印评估的reward # 每评估一次,就保存一次模型,以训练的step数命名 ckpt = 'model_dir3/s2[{}]_{}.ckpt'.format(int(evaluate_reward),total_steps) #想存不同版本的ckpt文件,可以在此处改目录,一个版本一个目录肯定不会混。 agent.save(ckpt) ``` # 验收测评 **我的理解是既然是速度控制,那么越接近规定的速度越好,最好的情况就是与规定的速度相同也就是0误差,这可能就是reward要定为很小的负值的意义** * 大家可以看到log信息,reward在35W步的时候得到了最低分-908,训练到75W步的时候基本稳定在-20的水平,说明使用parl框架训练有效。 ``` ###################################################################### ###################################################################### # # 7. 请选择你训练的最好的一次模型文件做评估 # ###################################################################### ###################################################################### ckpt = 'model_dir3/s2[-19]_590000.ckpt' # 请设置ckpt为你训练中效果最好的一次评估保存的模型文件名称 agent.restore(ckpt) evaluate_reward = evaluate(env, agent) logger.info('Evaluate reward: {}'.format(evaluate_reward)) # 打印评估的reward ``` **一个有趣的地方:** * 训练的时候action的GM值取的全局变量,GM= 0.2,但测试的时候我改写了评估程序,令 action = gm * action +(1-gm)* mean_a 。 * 这个操作只会对测试产生影响,而不会对rpm产生影响,因为存入rpm的是神经网络的原始输出值。 * 评估时每循环一次都改变了gm的值,gm最小取0,最大取1。取gm = 0时,action 失效;取 gm = 1时,mean_a 失效。 * 我循环测试了21次,每次gm 值增加0.05 ,即使是gm为0 或者 gm 为1的时候,飞行器都能得到高的reward ,这说明无论是action (具有4个输出维度),还是 mean_a(只有1个输出维度) 都能独立完成任务。 * 这可能就是设置基本值mean_a和浮动值action ,并按一定比例叠加送入到env之后能够提高收敛速度的原因:浮动值和基值均能独立起作用,将其混合之后提高了输出的相对稳定性。 ``` ckpt = 'model_dir3/s2[-19]_590000.ckpt' # 请设置ckpt为你训练中效果最好的一次评估保存的模型文件名称 agent.restore(ckpt) def evaluate1(env, agent ,gm): eval_reward = [] for i in range(5): obs = env.reset() total_reward, steps = 0, 0 while True: batch_obs = np.expand_dims(obs, axis=0) action = agent.predict(batch_obs.astype('float32')) ##action[0] = action.mean(axis=1) #加的一行代码,使输出为4个神经元的平均值,此处是之前测试用的,大家也可以试下, action = np.squeeze(action) mean_a= action[4] #加的代码,还原输出,目的使输出稳定,原因同上。 action = action[0:4] action = gm*action +(1-gm) * mean_a #注意此处的gm,用于变电压浮动的控制 action = np.clip(action, -1.0, 1.0) #加的一行代码,防止报错 action = action_mapping(action, env.action_space.low[0], env.action_space.high[0]) next_obs, reward, done, info = env.step(action) obs = next_obs total_reward += reward steps += 1 if done: break eval_reward.append(total_reward) print("一次评估完成,此时的gm值",gm,"此次的total_reward",total_reward) return np.mean(eval_reward) for gm in range(21): gm = 0.05*float(gm) print("此轮的gm值:",gm) evaluate_reward = evaluate1(env, agent,gm) logger.info('Evaluate reward: {}'.format(evaluate_reward)) # 打印评估的reward ```
github_jupyter
``` # python 3.6.8 # DLISIO v0.3.5 # numpy v1.16.2 # pandas v0.24.1 # lasio v0.25.1 from dlisio import lis import pandas as pd import os import lasio import numpy as np def extract_wellname(f, find_wellname, manualwellname): if find_wellname == "Yes": records = f.wellsite_data() inforec = records[0] # assume well name in is in the first record #Can it be structured? Otherwise return an error if inforec.isstructured() == True: np_array = (inforec.table(simple=True)).T df = pd.DataFrame(data=np_array) wellname_list = [] items = ['Well Name', 'wellname', 'WN', 'WELL NAME'] ##set all the possible search terms for item in items: df["string_index"] = df["VALU"].str.find(str(item)).values wellname = df.loc[df['string_index'] >= 0, 'VALU'].values.tolist() #wellname = wellname[0] #take the first value returned. wellname_list.append(wellname) df["string_index"] = df["MNEM"].str.find(str(item)).values wellname = df.loc[df['string_index'] >= 0, 'VALU'].values.tolist() wellname_list.append(wellname) wellname_list = [x for x in wellname_list if x != []] for wellname in wellname_list: wellname = [s.replace('Well Name:', '') for s in wellname] wellname = [s.replace(' WN', '') for s in wellname] wellname = [s.replace('Well Name ', '') for s in wellname] wellname = [s.replace('WN', '') for s in wellname] wellname = [s.strip() for s in wellname] wellname = wellname[0] ##take the first item in the list if len(wellname)>0: wellname_report_error = "" else: wellname_report_error = "Could not read header - no name extraction" else: wellname_report_error = "Could not read header - not structured" else: wellname = manualwellname wellname_report_error = '' return wellname, wellname_report_error def create_las(curves_df, units, curvenames, wellname, filepath): las = lasio.LASFile() # write the pandas data to the las file las.set_data(curves_df) # write the curve metadata from our three lists. counter = 0 for x in curvenames: las.curves[x].unit = units[counter] counter = counter + 1 las.well.WELL = wellname las.params['LINEAGE'] = lasio.HeaderItem('LINEAGE', value="Python-converted from LIS") las.params['ORFILE'] = lasio.HeaderItem('ORFILE', value=filepath) return las def write_las_file(las, filepath, frame_count, output_folder_location): filename = os.path.basename(filepath) filename = os.path.splitext(filename)[0] outfile = filename + "_" + "converted_with_python_" + str(frame_count) + ".las" outpath = os.path.join(output_folder_location, outfile) if not os.path.exists(output_folder_location): print("Making output directory: [{}]\n".format(output_folder_location)) os.makedirs(output_folder_location) print("Writing: [{}]\n".format(outpath)) las.write(outpath, version=2) # process each logical file and each sampling rate (frame) at a time. # The goal with this function is to only read from the lis file once! # If you don't care about speed, and need the well names extracted from the lis header, set the find_wellname = Yes # Otherwise pass in the name of the well into the manualwellname variable. # The number of logical files times the number of groups of sample rates will determine the number of las files created. # Curves with no matching frame sample rate will not be included!! See Fast Channels in the LIS User Guides @dlisio.readthedocs def lis_to_las(filepath, output_folder_location, manualwellname = "set input", find_wellname = "Yes"): frame_count = 0 with lis.load(filepath) as files: for f in files: wellname = extract_wellname(f, find_wellname, manualwellname) for fs in f.data_format_specs(): for frame in fs.sample_rates(): curve_frames = [] #create dataframe units = [] curvenames = [] meta = lis.curves_metadata(fs, sample_rate=frame) curves = lis.curves(f, fs, sample_rate=frame) curves_df = curves.T names = curves.dtype.names curves_df = pd.DataFrame(data=curves_df, columns=names) #check if dataframe is empty if curves_df.empty: frame_count = frame_count + 1 print('Frame number '+ str(frame_count) + " is empty") break #get and set index curve index_curve = fs.index_mnem curves_df = curves_df.set_index(index_curve) #get inventories for n in names: spec = meta[n] units.append(spec.units) curvenames.append(n) # advance the count of sample rate frames frame_count = frame_count + 1 #Quick quality test unit_len = len(curvenames) name_len = len(units) if unit_len != name_len: warning = "Frame "+ str(frame_count)+" "+"Mismatch in number of names and units; use with caution." else: warning = "" print(warning) las = create_las(curves_df, units, curvenames, wellname, filepath) write_las_file(las, filepath, frame_count, output_folder_location) report = ("Number of frames = " + str(frame_count)) print(report) return report filepath = r"\Volve_Well_logs_pr_WELL\15_9-F-4\04.COMPOSITE\WLC_PETROPHYSICAL_COMPOSITE_1.LIS" filepath = r"\Volve_Well_logs_pr_WELL\15_9-F-4\01.MUD_LOG\MUD_LOG_1.LIS" output_folder_location = "" results = lis_to_las(filepath, output_folder_location, manualwellname = "Volve", find_wellname="No") results = lis_to_las(filepath, output_folder_location, find_wellname="Yes") ```
github_jupyter
# SYS 611: Dice Fighters Example (w/ Binomial Process Gen.) Paul T. Grogan <pgrogan@stevens.edu> This example shows how to model the dice fighters example in Python using a binomial process generator. ## Dependencies This example is compatible with Python 2 environments through use of the `__future__` library function. Additionally, this example uses the `numpy` and `scipy.stats` libraries. ``` # import the python3 behavior for importing, division, and printing in python2 from __future__ import absolute_import, division, print_function # import the numpy library and refer to it as `np` import numpy as np # import the scipy.stats library and refer to it as `stats` import scipy.stats as stats ``` ## Elementary State Variables There are five elementary state variables defined below: * `round_number`: Current round number * `red_size`: Red force size * `blue_size`: Blue force size * `red_chance_hit`: Red team probability of landing a 'hit' on a blue team * `blue_chance_hit`: Blue team probability of landing a 'hit' on a red team All variables are defined with global scope and initialized to an initial value. A helper function `print_state` formats the display of key state variables. ``` round_number = 0 red_size = 20 blue_size = 10 red_chance_hit = 1/6 blue_chance_hit = 3/6 def print_state(): print("Round: {:d} | Red: {:d}, Blue: {:d}".format(round_number, red_size, blue_size)) ``` ## Derived State Variables There is one derived state variable defined below: * `is_complete`: Determines if a game is complete. ``` def is_complete(): """ Check if the game is complete, meaning at least one team has no forces remaining. Return True if the game is complete, False otherwise. """ return (red_size <= 0 or blue_size <= 0) ``` ## Process Generators There are two process generator functions defined below: * `generate_red_hits`: a process generator to determine how many hits the red team scores * `generate_blue_hits`: a process generator to determine how many hits the blue team scores These functions use the binomial inverse CDF function (called a PPF function in `scipy.stats`) following the inverse transform method (IVT) to generate the number of hits based on the number of forces remaining. ``` # define the generate_red_hits function def generate_red_hits(): """ Randomly generate the number of red hits on the blue team. """ # use the binomial PPF (inverse CDF) with a random sample and cast to an integer return int(stats.binom.ppf(np.random.rand(), red_size, red_chance_hit)) # note: the code above could be replaced by a built-in numpy process generator: # return np.random.binomial(red_size, red_chance_hit) # define the generate_blue_hits function def generate_blue_hits(): """ Randomly generate the number of blue hits on the red team. """ # use the binomial PPF (inverse CDF) with a random sample and cast to an integer return int(stats.binom.ppf(np.random.rand(), blue_size, blue_chance_hit)) # note: the code above could be replaced by a built-in numpy process generator: # return np.random.binomial(blue_size, blue_chance_hit) ``` ## State Transition Functions There are three state transition functions defined below: * `red_suffer_losses`: decreases the red force size by the number of blue hits * `generate_red_hits`: decreases the blue force size by the number of red hits * `next_round`: advances to the next round ``` def red_suffer_losses(opponent_hits): """ Decrease the red team size by the number of blue hits. """ # (note: red_size must be declared as a global variable to update in this function!) global red_size # update the red_size based on the number of opponent hits red_size -= opponent_hits def blue_suffer_losses(opponent_hits): """ Decrease the blue team size by the number of red hits. """ # (note: blue_size must be declared as a global variable to update in this function!) global blue_size # update the blue_size based on number of opponent hits blue_size -= opponent_hits def next_round(): """ Advance to the next round. """ # (note: round_number must be declared as a global variable to update in this function!) global round_number # advance the round_number round_number += 1 ``` ## Simulation Execution The following script runs a complete dice fighters match. ``` round_number = 0 red_size = 20 blue_size = 10 red_chance_hit = 1/6 blue_chance_hit = 3/6 # main execution loop: continue while the game is not complete while not is_complete(): # generate the number of red hits red_hits = generate_red_hits() # generate the number of blue hits blue_hits = generate_blue_hits() # red team suffers losses of blue hits red_suffer_losses(blue_hits) # blue team suffers losses of red hits blue_suffer_losses(red_hits) # advance to the next round next_round() # print out the current state for debugging print_state() # after main loop exists, check who won (whichever team still has fighters!) if red_size > 0: print("Red Wins") elif blue_size > 0: print("Blue Wins") else: print("Tie - Mutual Destruction!") ```
github_jupyter
# Sampled Softmax For classification and prediction problems a typical criterion function is cross-entropy with softmax. If the number of output classes is high the computation of this criterion and the corresponding gradients could be quite costly. Sampled Softmax is a heuristic to speed up training in these cases. (see: [Adaptive Importance Sampling to Accelerate Training of a Neural Probabilistic Language Model](http://www.iro.umontreal.ca/~lisa/pointeurs/importance_samplingIEEEtnn.pdf), [Exploring the Limits of Language Modeling](https://arxiv.org/pdf/1602.02410v1.pdf), [What is Candidate Sampling](https://www.tensorflow.org/extras/candidate_sampling.pdf)) #### Select the notebook runtime environment devices / settings Before we dive into the details we run some setup that is required for automated testing of this notebook. ``` import os import cntk as C # Select the right target device when this notebook is being tested: if 'TEST_DEVICE' in os.environ: if os.environ['TEST_DEVICE'] == 'cpu': C.device.try_set_default_device(C.device.cpu()) else: C.device.try_set_default_device(C.device.gpu(0)) ``` ## Basics The softmax function is used in neural networks if we want to interpret the network output as a probability distribution over a set of classes $C$ with $|C|=N_C$. Softmax maps an $N_C$-dimensional vector $z$, which has unrestricted values, to an $N_C$ dimensional vector $p$ with non-negative values that sum up to 1 so that they can be interpreted as probabilities. More precisely: $$ \begin{align} p_i &= softmax(z, i)\\ &= \frac{exp(z_i)}{\sum_{k\in C} exp(z_k)}\\ \end{align} $$ In what follows we assume that the input $z$ to the softmax is computed from some hidden vector $h$ of dimension $N_h$ in a specific way, namely: $$ z = W h + b $$ where $W$ is a learnable weight matrix of dimension $(N_c, N_h)$ and $b$ is a learnable bias vector. We restrict ourselves to this specific choice of $z$ because it helps in implementing an efficient sampled softmax. In a typical use-case like for example a recurrent language model, the hidden vector $h$ would be the output of the recurrent layers and $C$ would be the set of words to predict. As a training criterion, we use cross-entropy which is a function of the expected (true) class $t\in C$ and the probability predicted for it: $$cross\_entropy := -log(p_t)$$ ## Sampled Softmax from the outside For the normal softmax the CNTK Python-api provides the function [cross_entropy_with_softmax](https://cntk.ai/pythondocs/cntk.ops.html?highlight=softmax#cntk.ops.cross_entropy_with_softmax). This takes as input the $N_C$-dimensional vector $z$. As mentioned for our sampled softmax implementation we assume that this z is computed by $ z = W h + b $. In sampled softmax this has to be part of the whole implementation of the criterion. Below we show the code for `cross_entropy_with_sampled_softmax_and_embedding`. Let’s look at the signature first. One fundamental difference to the corresponding function in the Python-api (`cross_entropy_with_softmax`) is that in the Python api function the input corresponds to $z$ and must have the same dimension as the target vector, while in cross_entropy_with_full_softmax the input corresponds to our hidden vector $h$ can have any dimension (hidden_dim). Actually, hidden_dim will be typically much lower than the dimension of the target vector. We also have some additional parameters `num_samples, sampling_weights, allow_duplicates` that control the random sampling. Another difference to the api function is that we return a triple (z, cross_entropy_on_samples, error_on_samples). We will come back to the details of the implementation below. ``` from __future__ import print_function from __future__ import division # Creates a subgraph computing cross-entropy with sampled softmax. def cross_entropy_with_sampled_softmax_and_embedding( hidden_vector, # Node providing hidden input target_vector, # Node providing the expected labels (as sparse vectors) num_classes, # Number of classes hidden_dim, # Dimension of the hidden vector num_samples, # Number of samples to use for sampled softmax sampling_weights, # Node providing weights to be used for the weighted sampling allow_duplicates = True, # Boolean flag to control whether to use sampling with replacemement # (allow_duplicates == True) or without replacement. ): # define the parameters learnable parameters b = C.Parameter(shape = (num_classes, 1), init = 0) W = C.Parameter(shape = (num_classes, hidden_dim), init = C.glorot_uniform()) # Define the node that generates a set of random samples per minibatch # Sparse matrix (num_samples * num_classes) sample_selector = C.random_sample(sampling_weights, num_samples, allow_duplicates) # For each of the samples we also need the probablity that it in the sampled set. inclusion_probs = C.random_sample_inclusion_frequency(sampling_weights, num_samples, allow_duplicates) # dense row [1 * vocab_size] log_prior = C.log(inclusion_probs) # dense row [1 * num_classes] # Create a submatrix wS of 'weights W_sampled = C.times(sample_selector, W) # [num_samples * hidden_dim] z_sampled = C.times_transpose(W_sampled, hidden_vector) + C.times(sample_selector, b) - C.times_transpose (sample_selector, log_prior)# [num_samples] # Getting the weight vector for the true label. Dimension hidden_dim W_target = C.times(target_vector, W) # [1 * hidden_dim] z_target = C.times_transpose(W_target, hidden_vector) + C.times(target_vector, b) - C.times_transpose(target_vector, log_prior) # [1] z_reduced = C.reduce_log_sum_exp(z_sampled) # Compute the cross entropy that is used for training. # We don't check whether any of the classes in the random samples conincides with the true label, so it might # happen that the true class is counted # twice in the normalising demnominator of sampled softmax. cross_entropy_on_samples = C.log_add_exp(z_target, z_reduced) - z_target # For applying the model we also output a node providing the input for the full softmax z = C.times_transpose(W, hidden_vector) + b z = C.reshape(z, shape = (num_classes)) zSMax = C.reduce_max(z_sampled) error_on_samples = C.less(z_target, zSMax) return (z, cross_entropy_on_samples, error_on_samples) ``` To give a better idea of what the inputs and outputs are and how this all differs from the normal softmax we give below a corresponding function using normal softmax: ``` # Creates subgraph computing cross-entropy with (full) softmax. def cross_entropy_with_softmax_and_embedding( hidden_vector, # Node providing hidden input target_vector, # Node providing the expected labels (as sparse vectors) num_classes, # Number of classes hidden_dim # Dimension of the hidden vector ): # Setup bias and weights b = C.Parameter(shape = (num_classes, 1), init = 0) W = C.Parameter(shape = (num_classes, hidden_dim), init = C.glorot_uniform()) z = C.reshape( C.times_transpose(W, hidden_vector) + b, (1, num_classes)) # Use cross_entropy_with_softmax cross_entropy = C.cross_entropy_with_softmax(z, target_vector) zMax = C.reduce_max(z) zT = C.times_transpose(z, target_vector) error_on_samples = C.less(zT, zMax) return (z, cross_entropy, error_on_samples) ``` As you can see the main differences to the api function `cross_entropy_with_softmax` are: * We include the mapping $ z = W h + b $ into the function. * We return a triple (z, cross_entropy, error_on_samples) instead of just returning the cross entropy. ## A toy example To explain how to integrate sampled softmax let us look at a toy example. In this toy example we first transform one-hot input vectors via some random projection into a lower dimensional vector $h$. The modeling task is to reverse this mapping using (sampled) softmax. Well, as already said this is a toy example. ``` import numpy as np from math import log, exp, sqrt from cntk.logging import ProgressPrinter import timeit # A class with all parameters class Param: # Learning parameters learning_rate = 0.03 minibatch_size = 100 num_minbatches = 100 test_set_size = 1000 momentum_time_constant = 5 * minibatch_size reporting_interval = 10 allow_duplicates = False # Parameters for sampled softmax use_sampled_softmax = True use_sparse = True softmax_sample_size = 10 # Details of data and model num_classes = 50 hidden_dim = 10 data_sampling_distribution = lambda: np.repeat(1.0 / Param.num_classes, Param.num_classes) softmax_sampling_weights = lambda: np.repeat(1.0 / Param.num_classes, Param.num_classes) # Creates random one-hot vectors of dimension 'num_classes'. # Returns a tuple with a list of one-hot vectors, and list with the indices they encode. def get_random_one_hot_data(num_vectors): indices = np.random.choice( range(Param.num_classes), size=num_vectors, p = data_sampling_distribution()).reshape((1, num_vectors)) list_of_vectors = C.Value.one_hot(indices, Param.num_classes) return (list_of_vectors, indices.flatten()) # Create a network that: # * Transforms the input one hot-vectors with a constant random embedding # * Applies a linear decoding with parameters we want to learn def create_model(labels): # random projection matrix random_data = np.random.normal(scale = sqrt(1.0/Param.hidden_dim), size=(Param.num_classes, Param.hidden_dim)).astype(np.float32) random_matrix = C.constant(shape = (Param.num_classes, Param.hidden_dim), value = random_data) h = C.times(labels, random_matrix) # Connect the latent output to (sampled/full) softmax. if Param.use_sampled_softmax: sampling_weights = np.asarray(softmax_sampling_weights(), dtype=np.float32) sampling_weights.reshape((1, Param.num_classes)) softmax_input, ce, errs = cross_entropy_with_sampled_softmax_and_embedding( h, labels, Param.num_classes, Param.hidden_dim, Param.softmax_sample_size, softmax_sampling_weights(), Param.allow_duplicates) else: softmax_input, ce, errs = cross_entropy_with_softmax_and_embedding( h, labels, Param.num_classes, Param.hidden_dim) return softmax_input, ce, errs def train(do_print_progress): labels = C.input_variable(shape = Param.num_classes, is_sparse = Param.use_sparse) z, cross_entropy, errs = create_model(labels) # Setup the trainer learning_rate_schedule = C.learning_rate_schedule(Param.learning_rate, C.UnitType.sample) momentum_schedule = C.momentum_as_time_constant_schedule(Param.momentum_time_constant) learner = C.momentum_sgd(z.parameters, learning_rate_schedule, momentum_schedule, True) progress_writers = None if do_print_progress: progress_writers = [ProgressPrinter(freq=Param.reporting_interval, tag='Training')] trainer = C.Trainer(z, (cross_entropy, errs), learner, progress_writers) minbatch = 0 average_cross_entropy = compute_average_cross_entropy(z) minbatch_data = [0] # store minibatch values cross_entropy_data = [average_cross_entropy] # store cross_entropy values # Run training t_total= 0 # Run training for minbatch in range(1,Param.num_minbatches): # Specify the mapping of input variables in the model to actual minibatch data to be trained with label_data, indices = get_random_one_hot_data(Param.minibatch_size) arguments = ({labels : label_data}) # If do_print_progress is True, this will automatically print the progress using ProgressPrinter # The printed loss numbers are computed using the sampled softmax criterion t_start = timeit.default_timer() trainer.train_minibatch(arguments) t_end = timeit.default_timer() t_delta = t_end - t_start samples_per_second = Param.minibatch_size / t_delta # We ignore the time measurements of the first two minibatches if minbatch > 2: t_total += t_delta # For comparison also print result using the full criterion if minbatch % Param.reporting_interval == int(Param.reporting_interval/2): # memorize the progress data for plotting average_cross_entropy = compute_average_cross_entropy(z) minbatch_data.append(minbatch) cross_entropy_data.append(average_cross_entropy) if do_print_progress: print("\nMinbatch=%d Cross-entropy from full softmax = %.3f perplexity = %.3f samples/s = %.1f" % (minbatch, average_cross_entropy, exp(average_cross_entropy), samples_per_second)) # Number of samples we measured. First two minbatches were ignored samples_measured = Param.minibatch_size * (Param.num_minbatches - 2) overall_samples_per_second = samples_measured / t_total return (minbatch_data, cross_entropy_data, overall_samples_per_second) def compute_average_cross_entropy(softmax_input): vectors, indices = get_random_one_hot_data(Param.test_set_size) total_cross_entropy = 0.0 arguments = (vectors) z = softmax_input.eval(arguments).reshape(Param.test_set_size, Param.num_classes) for i in range(len(indices)): log_p = log_softmax(z[i], indices[i]) total_cross_entropy -= log_p return total_cross_entropy / len(indices) # Computes log(softmax(z,index)) for a one-dimensional numpy array z in an numerically stable way. def log_softmax(z, # numpy array index # index into the array ): max_z = np.max(z) return z[index] - max_z - log(np.sum(np.exp(z - max_z))) np.random.seed(1) print("start...") train(do_print_progress = True) print("done.") ``` In the above code we use two different methods to report training progress: 1. Using a function that computes the average cross entropy on full softmax. 2. Using the built-in ProgressPrinter ProgressPrinter reports how the value of the training criterion changes over time. In our case the training criterion is cross-entropy from **sampled** softmax. The same is true for the error rate computed by progress printer, this is computed only for true-class vs sampled-classes and will therefore underestimate the true error rate. Therefore while ProgressPrinter already gives us some idea how training goes on, if we want to compare the behavior for different sampling strategies (sample size, sampling weights, ...) we should not rely on numbers that are computed only using the sampled subset of classes. ## Importance sampling Often the we don't have uniform distribution for the classes on the output side. The typical example is when we have words as output classes. A typical example are words where e.g. 'the' will be much more frequent than most others. In such cases one often uses a non uniform distribution for drawing the samples in sampled softmax but instead increases the sampling weight for the frequent classes. This is also called importane sampling. In our example the sampling distribution is controlled by the weight array `softmax_sampling_weights`. As an example let's look at the case where the classes are distrubted according to zipf-distrubtion like: $$ p[i] \propto \frac{1}{i+5}, $$ actually we use this distribution already in our example. How does training behavior change if we switch uniform sampling to sampling with the zipfian distribution in sampled softmax? ``` # We want to lot the data import matplotlib.pyplot as plt %matplotlib inline # Define weights of zipfian distributuion def zipf(index): return 1.0 / (index + 5) # Use zipifian distribution for the classes def zipf_sampling_weights(): return np.asarray([ zipf(i) for i in range(Param.num_classes)], dtype=np.float32) data_sampling_distribution = lambda: zipf_sampling_weights() / np.sum(zipf_sampling_weights()) print("start...") # Train using uniform sampling (like before) np.random.seed(1) softmax_sampling_weights = lambda: np.repeat(1.0/Param.num_classes, Param.num_classes) minibatch_data, cross_entropy_data, _ = train(do_print_progress = False) # Train using importance sampling np.random.seed(1) softmax_sampling_weights = zipf_sampling_weights minibatch_data2, cross_entropy_data2, _ = train(do_print_progress = False) plt.plot(minibatch_data, cross_entropy_data, 'r--',minibatch_data, cross_entropy_data2, 'b--') plt.xlabel('number of mini-batches') plt.ylabel('cross entropy') plt.show() ``` In the example above we compare uniform sampling (red) vs sampling with the same distribution the classes have (blue). You will need to experiment to find the best settings for all the softmax parameters. ## What speedups to expect? The speed difference between full softmax and sampled softmax in terms of training instances depends strongly on the concrete settings, namely * Number of classes. Typically the speed-up will increase the more output classes you have. * Number of samples used in sampled softmax * Dimension of hiddlen layer input * Minibatch size * Hardware Also you need to test how much you can reduce sample size without degradation of the result. ``` print("start...") # Reset parameters class Param: # Learning parameters learning_rate = 0.03 minibatch_size = 8 num_minbatches = 100 test_set_size = 1 # we are only interrested in speed momentum_time_constant = 5 * minibatch_size reporting_interval = 1000000 # Switch off reporting to speed up allow_duplicates = False # Parameters for sampled softmax use_sampled_softmax = True use_sparse = True softmax_sample_size = 10 # Details of data and model num_classes = 50000 hidden_dim = 10 data_sampling_distribution = lambda: np.repeat(1.0 / Param.num_classes, Param.num_classes) softmax_sampling_weights = lambda: np.repeat(1.0 / Param.num_classes, Param.num_classes) sample_sizes = [5, 10, 100, 1000] speed_with_sampled_softmax = [] # Get the speed with sampled softmax for different sizes for sample_size in sample_sizes: print("Measuring speed of sampled softmax for sample size %d ..." % (sample_size)) Param.use_sampled_softmax = True Param.softmax_sample_size = sample_size _, _, samples_per_second = train(do_print_progress = False) speed_with_sampled_softmax.append(samples_per_second) # Get the speed with full softmax Param.use_sampled_softmax = False print("Measuring speed of full softmax ...") _, _, samples_per_second = train(do_print_progress = False) speed_without_sampled_softmax = np.repeat(samples_per_second, len(sample_sizes)) # Plot the speed of sampled softmax (blue) as a function of sample sizes # and compare it to the speed with full softmax (red). plt.plot(sample_sizes, speed_without_sampled_softmax, 'r--',sample_sizes, speed_with_sampled_softmax, 'b--') plt.xlabel('softmax sample size') plt.ylabel('speed: instances / second') plt.title("Speed 'sampled softmax' (blue) vs. 'full softmax' (red)") plt.ylim(ymin=0) plt.show() ```
github_jupyter
### Data Source Dataset is derived from Fannie Mae’s [Single-Family Loan Performance Data](http://www.fanniemae.com/portal/funding-the-market/data/loan-performance-data.html) with all rights reserved by Fannie Mae. This processed dataset is redistributed with permission and consent from Fannie Mae. For the full raw dataset visit [Fannie Mae]() to register for an account and to download Instruction is available at NVIDIA [RAPIDS demo site](https://rapidsai.github.io/demos/datasets/mortgage-data). ### Prerequisite This notebook runs in a Dataproc cluster with GPU nodes, with [Spark RAPIDS](https://github.com/GoogleCloudDataproc/initialization-actions/tree/master/rapids) set up. ### Define ETL Process Define data schema and steps to do the ETL process: ``` import time from pyspark import broadcast from pyspark.sql import SparkSession from pyspark.sql.functions import * from pyspark.sql.types import * from pyspark.sql.window import Window def _get_quarter_from_csv_file_name(): return substring_index(substring_index(input_file_name(), '.', 1), '_', -1) _csv_perf_schema = StructType([ StructField('loan_id', LongType()), StructField('monthly_reporting_period', StringType()), StructField('servicer', StringType()), StructField('interest_rate', DoubleType()), StructField('current_actual_upb', DoubleType()), StructField('loan_age', DoubleType()), StructField('remaining_months_to_legal_maturity', DoubleType()), StructField('adj_remaining_months_to_maturity', DoubleType()), StructField('maturity_date', StringType()), StructField('msa', DoubleType()), StructField('current_loan_delinquency_status', IntegerType()), StructField('mod_flag', StringType()), StructField('zero_balance_code', StringType()), StructField('zero_balance_effective_date', StringType()), StructField('last_paid_installment_date', StringType()), StructField('foreclosed_after', StringType()), StructField('disposition_date', StringType()), StructField('foreclosure_costs', DoubleType()), StructField('prop_preservation_and_repair_costs', DoubleType()), StructField('asset_recovery_costs', DoubleType()), StructField('misc_holding_expenses', DoubleType()), StructField('holding_taxes', DoubleType()), StructField('net_sale_proceeds', DoubleType()), StructField('credit_enhancement_proceeds', DoubleType()), StructField('repurchase_make_whole_proceeds', StringType()), StructField('other_foreclosure_proceeds', DoubleType()), StructField('non_interest_bearing_upb', DoubleType()), StructField('principal_forgiveness_upb', StringType()), StructField('repurchase_make_whole_proceeds_flag', StringType()), StructField('foreclosure_principal_write_off_amount', StringType()), StructField('servicing_activity_indicator', StringType())]) _csv_acq_schema = StructType([ StructField('loan_id', LongType()), StructField('orig_channel', StringType()), StructField('seller_name', StringType()), StructField('orig_interest_rate', DoubleType()), StructField('orig_upb', IntegerType()), StructField('orig_loan_term', IntegerType()), StructField('orig_date', StringType()), StructField('first_pay_date', StringType()), StructField('orig_ltv', DoubleType()), StructField('orig_cltv', DoubleType()), StructField('num_borrowers', DoubleType()), StructField('dti', DoubleType()), StructField('borrower_credit_score', DoubleType()), StructField('first_home_buyer', StringType()), StructField('loan_purpose', StringType()), StructField('property_type', StringType()), StructField('num_units', IntegerType()), StructField('occupancy_status', StringType()), StructField('property_state', StringType()), StructField('zip', IntegerType()), StructField('mortgage_insurance_percent', DoubleType()), StructField('product_type', StringType()), StructField('coborrow_credit_score', DoubleType()), StructField('mortgage_insurance_type', DoubleType()), StructField('relocation_mortgage_indicator', StringType())]) _name_mapping = [ ("WITMER FUNDING, LLC", "Witmer"), ("WELLS FARGO CREDIT RISK TRANSFER SECURITIES TRUST 2015", "Wells Fargo"), ("WELLS FARGO BANK, NA" , "Wells Fargo"), ("WELLS FARGO BANK, N.A." , "Wells Fargo"), ("WELLS FARGO BANK, NA" , "Wells Fargo"), ("USAA FEDERAL SAVINGS BANK" , "USAA"), ("UNITED SHORE FINANCIAL SERVICES, LLC D\\/B\\/A UNITED WHOLESALE MORTGAGE" , "United Seq(e"), ("U.S. BANK N.A." , "US Bank"), ("SUNTRUST MORTGAGE INC." , "Suntrust"), ("STONEGATE MORTGAGE CORPORATION" , "Stonegate Mortgage"), ("STEARNS LENDING, LLC" , "Stearns Lending"), ("STEARNS LENDING, INC." , "Stearns Lending"), ("SIERRA PACIFIC MORTGAGE COMPANY, INC." , "Sierra Pacific Mortgage"), ("REGIONS BANK" , "Regions"), ("RBC MORTGAGE COMPANY" , "RBC"), ("QUICKEN LOANS INC." , "Quicken Loans"), ("PULTE MORTGAGE, L.L.C." , "Pulte Mortgage"), ("PROVIDENT FUNDING ASSOCIATES, L.P." , "Provident Funding"), ("PROSPECT MORTGAGE, LLC" , "Prospect Mortgage"), ("PRINCIPAL RESIDENTIAL MORTGAGE CAPITAL RESOURCES, LLC" , "Principal Residential"), ("PNC BANK, N.A." , "PNC"), ("PMT CREDIT RISK TRANSFER TRUST 2015-2" , "PennyMac"), ("PHH MORTGAGE CORPORATION" , "PHH Mortgage"), ("PENNYMAC CORP." , "PennyMac"), ("PACIFIC UNION FINANCIAL, LLC" , "Other"), ("OTHER" , "Other"), ("NYCB MORTGAGE COMPANY, LLC" , "NYCB"), ("NEW YORK COMMUNITY BANK" , "NYCB"), ("NETBANK FUNDING SERVICES" , "Netbank"), ("NATIONSTAR MORTGAGE, LLC" , "Nationstar Mortgage"), ("METLIFE BANK, NA" , "Metlife"), ("LOANDEPOT.COM, LLC" , "LoanDepot.com"), ("J.P. MORGAN MADISON AVENUE SECURITIES TRUST, SERIES 2015-1" , "JP Morgan Chase"), ("J.P. MORGAN MADISON AVENUE SECURITIES TRUST, SERIES 2014-1" , "JP Morgan Chase"), ("JPMORGAN CHASE BANK, NATIONAL ASSOCIATION" , "JP Morgan Chase"), ("JPMORGAN CHASE BANK, NA" , "JP Morgan Chase"), ("JP MORGAN CHASE BANK, NA" , "JP Morgan Chase"), ("IRWIN MORTGAGE, CORPORATION" , "Irwin Mortgage"), ("IMPAC MORTGAGE CORP." , "Impac Mortgage"), ("HSBC BANK USA, NATIONAL ASSOCIATION" , "HSBC"), ("HOMEWARD RESIDENTIAL, INC." , "Homeward Mortgage"), ("HOMESTREET BANK" , "Other"), ("HOMEBRIDGE FINANCIAL SERVICES, INC." , "HomeBridge"), ("HARWOOD STREET FUNDING I, LLC" , "Harwood Mortgage"), ("GUILD MORTGAGE COMPANY" , "Guild Mortgage"), ("GMAC MORTGAGE, LLC (USAA FEDERAL SAVINGS BANK)" , "GMAC"), ("GMAC MORTGAGE, LLC" , "GMAC"), ("GMAC (USAA)" , "GMAC"), ("FREMONT BANK" , "Fremont Bank"), ("FREEDOM MORTGAGE CORP." , "Freedom Mortgage"), ("FRANKLIN AMERICAN MORTGAGE COMPANY" , "Franklin America"), ("FLEET NATIONAL BANK" , "Fleet National"), ("FLAGSTAR CAPITAL MARKETS CORPORATION" , "Flagstar Bank"), ("FLAGSTAR BANK, FSB" , "Flagstar Bank"), ("FIRST TENNESSEE BANK NATIONAL ASSOCIATION" , "Other"), ("FIFTH THIRD BANK" , "Fifth Third Bank"), ("FEDERAL HOME LOAN BANK OF CHICAGO" , "Fedral Home of Chicago"), ("FDIC, RECEIVER, INDYMAC FEDERAL BANK FSB" , "FDIC"), ("DOWNEY SAVINGS AND LOAN ASSOCIATION, F.A." , "Downey Mortgage"), ("DITECH FINANCIAL LLC" , "Ditech"), ("CITIMORTGAGE, INC." , "Citi"), ("CHICAGO MORTGAGE SOLUTIONS DBA INTERFIRST MORTGAGE COMPANY" , "Chicago Mortgage"), ("CHICAGO MORTGAGE SOLUTIONS DBA INTERBANK MORTGAGE COMPANY" , "Chicago Mortgage"), ("CHASE HOME FINANCE, LLC" , "JP Morgan Chase"), ("CHASE HOME FINANCE FRANKLIN AMERICAN MORTGAGE COMPANY" , "JP Morgan Chase"), ("CHASE HOME FINANCE (CIE 1)" , "JP Morgan Chase"), ("CHASE HOME FINANCE" , "JP Morgan Chase"), ("CASHCALL, INC." , "CashCall"), ("CAPITAL ONE, NATIONAL ASSOCIATION" , "Capital One"), ("CALIBER HOME LOANS, INC." , "Caliber Funding"), ("BISHOPS GATE RESIDENTIAL MORTGAGE TRUST" , "Bishops Gate Mortgage"), ("BANK OF AMERICA, N.A." , "Bank of America"), ("AMTRUST BANK" , "AmTrust"), ("AMERISAVE MORTGAGE CORPORATION" , "Amerisave"), ("AMERIHOME MORTGAGE COMPANY, LLC" , "AmeriHome Mortgage"), ("ALLY BANK" , "Ally Bank"), ("ACADEMY MORTGAGE CORPORATION" , "Academy Mortgage"), ("NO CASH-OUT REFINANCE" , "OTHER REFINANCE"), ("REFINANCE - NOT SPECIFIED" , "OTHER REFINANCE"), ("Other REFINANCE" , "OTHER REFINANCE")] cate_col_names = [ "orig_channel", "first_home_buyer", "loan_purpose", "property_type", "occupancy_status", "property_state", "relocation_mortgage_indicator", "seller_name", "mod_flag" ] # Numberic columns label_col_name = "delinquency_12" numeric_col_names = [ "orig_interest_rate", "orig_upb", "orig_loan_term", "orig_ltv", "orig_cltv", "num_borrowers", "dti", "borrower_credit_score", "num_units", "zip", "mortgage_insurance_percent", "current_loan_delinquency_status", "current_actual_upb", "interest_rate", "loan_age", "msa", "non_interest_bearing_upb", label_col_name ] all_col_names = cate_col_names + numeric_col_names def read_perf_csv(spark, path): return spark.read.format('csv') \ .option('nullValue', '') \ .option('header', 'false') \ .option('delimiter', '|') \ .schema(_csv_perf_schema) \ .load(path) \ .withColumn('quarter', _get_quarter_from_csv_file_name()) def read_acq_csv(spark, path): return spark.read.format('csv') \ .option('nullValue', '') \ .option('header', 'false') \ .option('delimiter', '|') \ .schema(_csv_acq_schema) \ .load(path) \ .withColumn('quarter', _get_quarter_from_csv_file_name()) def _parse_dates(perf): return perf \ .withColumn('monthly_reporting_period', to_date(col('monthly_reporting_period'), 'MM/dd/yyyy')) \ .withColumn('monthly_reporting_period_month', month(col('monthly_reporting_period'))) \ .withColumn('monthly_reporting_period_year', year(col('monthly_reporting_period'))) \ .withColumn('monthly_reporting_period_day', dayofmonth(col('monthly_reporting_period'))) \ .withColumn('last_paid_installment_date', to_date(col('last_paid_installment_date'), 'MM/dd/yyyy')) \ .withColumn('foreclosed_after', to_date(col('foreclosed_after'), 'MM/dd/yyyy')) \ .withColumn('disposition_date', to_date(col('disposition_date'), 'MM/dd/yyyy')) \ .withColumn('maturity_date', to_date(col('maturity_date'), 'MM/yyyy')) \ .withColumn('zero_balance_effective_date', to_date(col('zero_balance_effective_date'), 'MM/yyyy')) def _create_perf_deliquency(spark, perf): aggDF = perf.select( col("quarter"), col("loan_id"), col("current_loan_delinquency_status"), when(col("current_loan_delinquency_status") >= 1, col("monthly_reporting_period")).alias("delinquency_30"), when(col("current_loan_delinquency_status") >= 3, col("monthly_reporting_period")).alias("delinquency_90"), when(col("current_loan_delinquency_status") >= 6, col("monthly_reporting_period")).alias("delinquency_180")) \ .groupBy("quarter", "loan_id") \ .agg( max("current_loan_delinquency_status").alias("delinquency_12"), min("delinquency_30").alias("delinquency_30"), min("delinquency_90").alias("delinquency_90"), min("delinquency_180").alias("delinquency_180")) \ .select( col("quarter"), col("loan_id"), (col("delinquency_12") >= 1).alias("ever_30"), (col("delinquency_12") >= 3).alias("ever_90"), (col("delinquency_12") >= 6).alias("ever_180"), col("delinquency_30"), col("delinquency_90"), col("delinquency_180")) joinedDf = perf \ .withColumnRenamed("monthly_reporting_period", "timestamp") \ .withColumnRenamed("monthly_reporting_period_month", "timestamp_month") \ .withColumnRenamed("monthly_reporting_period_year", "timestamp_year") \ .withColumnRenamed("current_loan_delinquency_status", "delinquency_12") \ .withColumnRenamed("current_actual_upb", "upb_12") \ .select("quarter", "loan_id", "timestamp", "delinquency_12", "upb_12", "timestamp_month", "timestamp_year") \ .join(aggDF, ["loan_id", "quarter"], "left_outer") # calculate the 12 month delinquency and upb values months = 12 monthArray = [lit(x) for x in range(0, 12)] # explode on a small amount of data is actually slightly more efficient than a cross join testDf = joinedDf \ .withColumn("month_y", explode(array(monthArray))) \ .select( col("quarter"), floor(((col("timestamp_year") * 12 + col("timestamp_month")) - 24000) / months).alias("josh_mody"), floor(((col("timestamp_year") * 12 + col("timestamp_month")) - 24000 - col("month_y")) / months).alias("josh_mody_n"), col("ever_30"), col("ever_90"), col("ever_180"), col("delinquency_30"), col("delinquency_90"), col("delinquency_180"), col("loan_id"), col("month_y"), col("delinquency_12"), col("upb_12")) \ .groupBy("quarter", "loan_id", "josh_mody_n", "ever_30", "ever_90", "ever_180", "delinquency_30", "delinquency_90", "delinquency_180", "month_y") \ .agg(max("delinquency_12").alias("delinquency_12"), min("upb_12").alias("upb_12")) \ .withColumn("timestamp_year", floor((lit(24000) + (col("josh_mody_n") * lit(months)) + (col("month_y") - 1)) / lit(12))) \ .selectExpr('*', 'pmod(24000 + (josh_mody_n * {}) + month_y, 12) as timestamp_month_tmp'.format(months)) \ .withColumn("timestamp_month", when(col("timestamp_month_tmp") == lit(0), lit(12)).otherwise(col("timestamp_month_tmp"))) \ .withColumn("delinquency_12", ((col("delinquency_12") > 3).cast("int") + (col("upb_12") == 0).cast("int")).alias("delinquency_12")) \ .drop("timestamp_month_tmp", "josh_mody_n", "month_y") return perf.withColumnRenamed("monthly_reporting_period_month", "timestamp_month") \ .withColumnRenamed("monthly_reporting_period_year", "timestamp_year") \ .join(testDf, ["quarter", "loan_id", "timestamp_year", "timestamp_month"], "left") \ .drop("timestamp_year", "timestamp_month") def _create_acquisition(spark, acq): nameMapping = spark.createDataFrame(_name_mapping, ["from_seller_name", "to_seller_name"]) return acq.join(nameMapping, col("seller_name") == col("from_seller_name"), "left") \ .drop("from_seller_name") \ .withColumn("old_name", col("seller_name")) \ .withColumn("seller_name", coalesce(col("to_seller_name"), col("seller_name"))) \ .drop("to_seller_name") \ .withColumn("orig_date", to_date(col("orig_date"), "MM/yyyy")) \ .withColumn("first_pay_date", to_date(col("first_pay_date"), "MM/yyyy")) \ def _gen_dictionary(etl_df, col_names): cnt_table = etl_df.select(posexplode(array([col(i) for i in col_names])))\ .withColumnRenamed("pos", "column_id")\ .withColumnRenamed("col", "data")\ .filter("data is not null")\ .groupBy("column_id", "data")\ .count() windowed = Window.partitionBy("column_id").orderBy(desc("count")) return cnt_table.withColumn("id", row_number().over(windowed)).drop("count") def _cast_string_columns_to_numeric(spark, input_df): cached_dict_df = _gen_dictionary(input_df, cate_col_names).cache() output_df = input_df # Generate the final table with all columns being numeric. for col_pos, col_name in enumerate(cate_col_names): col_dict_df = cached_dict_df.filter(col("column_id") == col_pos)\ .drop("column_id")\ .withColumnRenamed("data", col_name) output_df = output_df.join(broadcast(col_dict_df), col_name, "left")\ .drop(col_name)\ .withColumnRenamed("id", col_name) return output_df def run_mortgage(spark, perf, acq): parsed_perf = _parse_dates(perf) perf_deliqency = _create_perf_deliquency(spark, parsed_perf) cleaned_acq = _create_acquisition(spark, acq) df = perf_deliqency.join(cleaned_acq, ["loan_id", "quarter"], "inner") test_quarters = ['2016Q1','2016Q2','2016Q3','2016Q4'] train_df = df.filter(~df.quarter.isin(test_quarters)).drop("quarter") test_df = df.filter(df.quarter.isin(test_quarters)).drop("quarter") casted_train_df = _cast_string_columns_to_numeric(spark, train_df)\ .select(all_col_names)\ .withColumn(label_col_name, when(col(label_col_name) > 0, 1).otherwise(0))\ .fillna(float(0)) casted_test_df = _cast_string_columns_to_numeric(spark, test_df)\ .select(all_col_names)\ .withColumn(label_col_name, when(col(label_col_name) > 0, 1).otherwise(0))\ .fillna(float(0)) return casted_train_df, casted_test_df ``` ### Define Spark conf and Create Spark Session For details explanation for spark conf, please go to Spark RAPIDS [config guide](https://nvidia.github.io/spark-rapids/docs/configs.html). ``` sc.stop() conf = SparkConf().setAppName("MortgageETL-CPU") conf.set("spark.executor.instances", "20") conf.set("spark.executor.cores", "7") # spark.executor.cores times spark.executor.instances should equal total cores. conf.set("spark.task.cpus", "1") conf.set("spark.executor.memory", "36g") conf.set("spark.locality.wait", "0s") conf.set("spark.sql.files.maxPartitionBytes", "512m") conf.set("spark.executor.resource.gpu.amount", "0") conf.set("spark.task.resource.gpu.amount", "0") conf.set("spark.plugins", " ") conf.set("spark.sql.broadcastTimeout", "7200") spark = SparkSession.builder \ .config(conf=conf) \ .getOrCreate() sc = spark.sparkContext ``` ### Define Data Input/Output location ``` orig_perf_path = 'gs://dataproc-nv-demo/mortgage_full/perf/*' orig_acq_path = 'gs://dataproc-nv-demo/mortgage_full/acq/*' train_path = 'gs://dataproc-nv-demo/mortgage_cpu/train/' test_path = 'gs://dataproc-nv-demo/mortgage_cpu/test/' tmp_perf_path = 'gs://dataproc-nv-demo/mortgage_parquet_cpu/perf/' tmp_acq_path = 'gs://dataproc-nv-demo/mortgage_parquet_cpu/acq/' ``` ### Read CSV data and Transcode to Parquet ``` # Lets transcode the data first start = time.time() # we want a few big files instead of lots of small files spark.conf.set('spark.sql.files.maxPartitionBytes', '200G') acq = read_acq_csv(spark, orig_acq_path) acq.repartition(20).write.parquet(tmp_acq_path, mode='overwrite') perf = read_perf_csv(spark, orig_perf_path) perf.coalesce(80).write.parquet(tmp_perf_path, mode='overwrite') end = time.time() print(end - start) ``` ### Execute ETL Code Defined in 1st Cell ``` # Now lets actually process the data\n", start = time.time() spark.conf.set('spark.sql.shuffle.partitions', '160') perf = spark.read.parquet(tmp_perf_path) acq = spark.read.parquet(tmp_acq_path) train_out, test_out = run_mortgage(spark, perf, acq) train_out.write.parquet(train_path, mode='overwrite') end = time.time() print(end - start) test_out.write.parquet(test_path, mode='overwrite') end = time.time() print(end - start) ``` ### Print Physical Plan ``` train_out.explain() ```
github_jupyter
### Univariate linear regression using gradient descent ``` import matplotlib.pyplot as plt import numpy as np from sklearn import datasets, linear_model from sklearn.metrics import mean_squared_error, r2_score %matplotlib inline data_train = np.zeros((2,20)) data_train[0] = [4, 5, 5, 7, 8, 8, 9, 11, 11, 12, 13, 14, 16, 18, 19, 19, 21, 22, 25, 27] #x (input) data_train[1] = [21, 24, 27, 30, 29, 31, 32, 33, 36, 37, 41, 37, 40, 39, 41, 42, 44, 45, 45, 48] #y (what we want to predict) data_test = np.zeros((2,5)) data_test[0] = [40, 15, 19, 23, 6] #x (input) data_test[1] = [61, 39, 43, 46, 26] #y (what we want to predict) # Create linear regression object regr = linear_model.LinearRegression() # Train the model using the training sets regr.fit(data_train[0].reshape(-1, 1), data_train[1].reshape(-1, 1)) # Make predictions using the testing set data_test_pred = regr.predict(data_test[0].reshape(-1, 1)) # The coefficients print regr.coef_ print regr.intercept_ # The mean squared error print("Mean squared error: %.2f" % mean_squared_error(data_test[1].reshape(-1, 1), data_test_pred)) # Explained variance score: 1 is perfect prediction print('Variance score: %.2f' % r2_score(data_test[1].reshape(-1, 1), data_test_pred)) # Plot outputs plt.plot(data_train[0], data_train[1], 'bx') plt.plot(data_test[0], data_test_pred, color='red', linewidth=1) plt.ylabel('Y_train') plt.xlabel('X_train') plt.title('Training dataset') plt.xticks(()) plt.yticks(()) plt.show() import pytorch as torch from sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression X, y = load_iris(return_X_y=True) print X.shape X_1 = X[:,0].reshape(-1,1) print X_1.shape print y.shape # Using only one feature classifier1 = LogisticRegression(random_state=0).fit(X_1, y) classifier1.predict(X[:2, 0]) classifier1.predict_proba(X[:2, 0]) classifier1.score(X_1, y) # Using only one feature classifier1 = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial').fit(X, y) classifier1.predict(X[:2, :]) classifier1.predict_proba(X[:2, :]) classifier1.score(X, y) # Load the diabetes dataset diabetes = datasets.load_diabetes() # Use only one feature diabetes_X = diabetes.data[:, np.newaxis, 2] # Split the data into training/testing sets diabetes_X_train = diabetes_X[:-20] diabetes_X_test = diabetes_X[-20:] # Split the targets into training/testing sets diabetes_y_train = diabetes.target[:-20] diabetes_y_test = diabetes.target[-20:] ``` #### Dataset ``` data_train = np.zeros((2,20)) data_train[0] = [4, 5, 5, 7, 8, 8, 9, 11, 11, 12, 13, 14, 16, 18, 19, 19, 21, 22, 25, 27] #x (input) data_train[1] = [21, 24, 27, 30, 29, 31, 32, 33, 36, 37, 41, 37, 40, 39, 41, 42, 44, 45, 45, 48] #y (what we want to predict) plt.plot(data_train[0], data_train[1], 'bx') plt.ylabel('Y_train') plt.xlabel('X_train') plt.title('Training dataset') plt.show() ``` #### Implement prediction function - Based on hypothesis h(x) = t0 + t1*x ``` def make_prediction(X, t0, t1): y = (t1 * X) + t0 return y ``` #### Implement cost function - Using standard mean squared error ``` def compute_cost(y, y_predicted): squared_differences = [data**2 for data in (y-y_predicted)] cost = sum(squared_differences) / float(len(y)) return cost ``` #### Implement gradient descent function - For each epoch: - Compute the predicted y values using the current t0 and t1 values - Compute the cost function on the entire dataset - Compute the gradients - Update the current t0 and t1 values with gradient descent ``` def gradient_descent(X, y, t0_current=0, t1_current=0, epochs=1000, learning_rate=0.0001): cost_array = np.zeros((4,epochs)) for i in range(epochs): y_current = make_prediction(X, t0_current, t1_current) cost = compute_cost(y, y_current) t1_grad = -2/float(len(y)) * sum(X * (y - y_current)) t0_grad = -2/float(len(y)) * sum(y - y_current) t1_current = t1_current - (learning_rate * t1_grad) t0_current = t0_current - (learning_rate * t0_grad) cost_array[:,i] = [i, cost, t0_current, t1_current] return t1_current, t0_current, cost, cost_array ``` #### Run the algorithm ``` [t1_current, t0_current, cost, cost_array] = gradient_descent(data_train[0], data_train[1], t0_current=0, t1_current=0, epochs=20000, learning_rate=0.001) print "The is h(x) = t0 + t1*x with t0 = {0} and t1 = {1}.".format(t0_current, t1_current) print "This solution has a cost of {0}.".format(cost) ``` #### Plot the hypothesis ``` plt.plot(data_train[0], data_train[1], 'bx') plt.ylabel('Y_train') plt.xlabel('X_train') plt.title('Training dataset') h = np.linspace(0, 30, 100) plt.plot(h, t0_current+t1_current*h) plt.show() ``` #### Plot the cost vs the number of epochs - Useful to make sure that your algorithm is learning and the cost is being minimized - We can observe that the algorithm starts to converge after 2500 epochs ``` plt.plot(cost_array[0], cost_array[1]) plt.ylabel('Cost') plt.xlabel('epochs') plt.title('Cost vs epochs') plt.show() ``` #### Plot the evolution of the t0 param. vs the number of epochs - We initialized the t0 param. to 0 here. ``` plt.plot(cost_array[0], cost_array[2]) plt.ylabel('t0') plt.xlabel('epochs') plt.title('t0 vs epochs') plt.show() ``` #### Plot the evolution of the t1 param. vs the number of epochs - We initialized the t1 param. to 0 here. ``` plt.plot(cost_array[0], cost_array[3]) plt.ylabel('t1') plt.xlabel('epochs') plt.title('t1 vs epochs') plt.show() ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Post-training integer quantization <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/lite/performance/post_training_integer_quant"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/performance/post_training_integer_quant.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/performance/post_training_integer_quant.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> ## Overview [TensorFlow Lite](https://www.tensorflow.org/lite/) now supports converting all model values (weights and activations) to 8-bit integers when converting from TensorFlow to TensorFlow Lite's flat buffer format. This results in a 4x reduction in model size and a 3 to 4x performance improvement on CPU performance. In addition, this fully quantized model can be consumed by integer-only hardware accelerators. In contrast to [post-training "on-the-fly" quantization](https://colab.sandbox.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/tutorials/post_training_quant.ipynb)—which stores only the weights as 8-bit integers—this technique statically quantizes all weights *and* activations during model conversion. In this tutorial, you'll train an MNIST model from scratch, check its accuracy in TensorFlow, and then convert the saved model into a Tensorflow Lite flatbuffer with full quantization. Finally, you'll check the accuracy of the converted model and compare it to the original float model. The training script, `mnist.py`, is available from the [TensorFlow official MNIST tutorial](https://github.com/tensorflow/models/tree/master/official/mnist). ## Build an MNIST model ### Setup ``` ! pip uninstall -y tensorflow ! pip install -U tf-nightly import tensorflow as tf tf.enable_eager_execution() ! git clone --depth 1 https://github.com/tensorflow/models import sys import os if sys.version_info.major >= 3: import pathlib else: import pathlib2 as pathlib # Add `models` to the python path. models_path = os.path.join(os.getcwd(), "models") sys.path.append(models_path) ``` ### Train and export the model ``` saved_models_root = "/tmp/mnist_saved_model" # The above path addition is not visible to subprocesses, add the path for the subprocess as well. # Note: channels_last is required here or the conversion may fail. !PYTHONPATH={models_path} python models/official/mnist/mnist.py --train_epochs=1 --export_dir {saved_models_root} --data_format=channels_last ``` This training won't take long because you're training the model for just a single epoch, which trains to about 96% accuracy. ### Convert to a TensorFlow Lite model Using the [Python `TFLiteConverter`](https://www.tensorflow.org/lite/convert/python_api), you can now convert the trained model into a TensorFlow Lite model. The trained model is saved in the `saved_models_root` directory, which is named with a timestamp. So select the most recent directory: ``` saved_model_dir = str(sorted(pathlib.Path(saved_models_root).glob("*"))[-1]) saved_model_dir ``` Now load the model using the `TFLiteConverter`: ``` import tensorflow as tf tf.enable_eager_execution() tf.logging.set_verbosity(tf.logging.DEBUG) converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir) tflite_model = converter.convert() ``` Write it out to a `.tflite` file: ``` tflite_models_dir = pathlib.Path("/tmp/mnist_tflite_models/") tflite_models_dir.mkdir(exist_ok=True, parents=True) tflite_model_file = tflite_models_dir/"mnist_model.tflite" tflite_model_file.write_bytes(tflite_model) ``` Now you have a trained MNIST model that's converted to a `.tflite` file, but it's still using 32-bit float values for all parameter data. So let's convert the model again, this time using quantization... #### Convert using quantization First, first set the `optimizations` flag to optimize for size: ``` tf.logging.set_verbosity(tf.logging.INFO) converter.optimizations = [tf.lite.Optimize.DEFAULT] ``` Now, in order to create quantized values with an accurate dynamic range of activations, you need to provide a representative dataset: ``` mnist_train, _ = tf.keras.datasets.mnist.load_data() images = tf.cast(mnist_train[0], tf.float32)/255.0 mnist_ds = tf.data.Dataset.from_tensor_slices((images)).batch(1) def representative_data_gen(): for input_value in mnist_ds.take(100): yield [input_value] converter.representative_dataset = representative_data_gen ``` Finally, convert the model like usual. By default, the converted model will still use float input and outputs for invocation convenience. ``` tflite_quant_model = converter.convert() tflite_model_quant_file = tflite_models_dir/"mnist_model_quant.tflite" tflite_model_quant_file.write_bytes(tflite_quant_model) ``` Note how the resulting file is approximately `1/4` the size: ``` !ls -lh {tflite_models_dir} ``` Your model should now be fully quantized. However, if you convert a model that includes any operations that TensorFlow Lite cannot quantize, those ops are left in floating point. This allows for conversion to complete so you have a smaller and more efficient model, but the model won't be compatible with some ML accelerators that require full integer quantization. Also, this model still uses float values for input and output, which also is not compatible with some accelerators. So to ensure that the converted model is fully quantized (make the converter throw an error if it encounters an operation it cannot quantize) and to use integers for the model's input and output, you need to convert the model again using these additional configurations: ``` converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] converter.inference_input_type = tf.uint8 converter.inference_output_type = tf.uint8 tflite_quant_model = converter.convert() tflite_model_quant_file = tflite_models_dir/"mnist_model_quant_io.tflite" tflite_model_quant_file.write_bytes(tflite_quant_model) ``` In this example, the resulting model size remains the same because all operations successfully quantized to begin with. However, this new model now uses quantized input and output, making it compatible with more accelerators. ## Run the TensorFlow Lite models Run the TensorFlow Lite model using the Python TensorFlow Lite Interpreter. ### Load the test data First, let's load the MNIST test data to feed to the model: ``` import numpy as np _, mnist_test = tf.keras.datasets.mnist.load_data() images, labels = tf.cast(mnist_test[0], tf.float32)/255.0, mnist_test[1] mnist_ds = tf.data.Dataset.from_tensor_slices((images, labels)).batch(1) ``` ### Load the model into the interpreters ``` interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file)) interpreter.allocate_tensors() interpreter_quant = tf.lite.Interpreter(model_path=str(tflite_model_quant_file)) interpreter_quant.allocate_tensors() ``` ### Test the models on one image ``` for img, label in mnist_ds: break interpreter.set_tensor(interpreter.get_input_details()[0]["index"], img) interpreter.invoke() predictions = interpreter.get_tensor( interpreter.get_output_details()[0]["index"]) import matplotlib.pylab as plt plt.imshow(img[0]) template = "True:{true}, predicted:{predict}" _ = plt.title(template.format(true= str(label[0].numpy()), predict=str(predictions[0]))) plt.grid(False) interpreter_quant.set_tensor( interpreter_quant.get_input_details()[0]["index"], img) interpreter_quant.invoke() predictions = interpreter_quant.get_tensor( interpreter_quant.get_output_details()[0]["index"]) plt.imshow(img[0]) template = "True:{true}, predicted:{predict}" _ = plt.title(template.format(true= str(label[0].numpy()), predict=str(predictions[0]))) plt.grid(False) ``` ### Evaluate the models ``` def eval_model(interpreter, mnist_ds): total_seen = 0 num_correct = 0 input_index = interpreter.get_input_details()[0]["index"] output_index = interpreter.get_output_details()[0]["index"] for img, label in mnist_ds: total_seen += 1 interpreter.set_tensor(input_index, img) interpreter.invoke() predictions = interpreter.get_tensor(output_index) if predictions == label.numpy(): num_correct += 1 if total_seen % 500 == 0: print("Accuracy after %i images: %f" % (total_seen, float(num_correct) / float(total_seen))) return float(num_correct) / float(total_seen) # Create smaller dataset for demonstration purposes mnist_ds_demo = mnist_ds.take(2000) print(eval_model(interpreter, mnist_ds_demo)) ``` Repeat the evaluation on the fully quantized model to obtain: ``` # NOTE: Colab runs on server CPUs. At the time of writing this, TensorFlow Lite # doesn't have super optimized server CPU kernels. For this reason this may be # slower than the above float interpreter. But for mobile CPUs, considerable # speedup can be observed. # Only use 2000 for demonstration purposes print(eval_model(interpreter_quant, mnist_ds_demo)) ``` In this example, you have fully quantized a model with no difference in the accuracy.
github_jupyter
# Using BagIt to tag oceanographic data [`BagIt`](https://en.wikipedia.org/wiki/BagIt) is a packaging format that supports storage of arbitrary digital content. The "bag" consists of arbitrary content and "tags," the metadata files. `BagIt` packages can be used to facilitate data sharing with federal archive centers - thus ensuring digital preservation of oceanographic datasets within IOOS and its regional associations. NOAA NCEI supports reading from a Web Accessible Folder (WAF) containing bagit archives. For an example please see: http://ncei.axiomdatascience.com/cencoos/ On this notebook we will use the [python interface](http://libraryofcongress.github.io/bagit-python) for `BagIt` to create a "bag" of a time-series profile data. First let us load our data from a comma separated values file (`CSV`). ``` import os import pandas as pd fname = os.path.join("data", "dsg", "timeseriesProfile.csv") df = pd.read_csv(fname, parse_dates=["time"]) df.head() ``` Instead of "bagging" the `CSV` file we will use this create a metadata rich netCDF file. We can convert the table to a `DSG`, Discrete Sampling Geometry, using `pocean.dsg`. The first thing we need to do is to create a mapping from the data column names to the netCDF `axes`. ``` axes = {"t": "time", "x": "lon", "y": "lat", "z": "depth"} ``` Now we can create a [Orthogonal Multidimensional Timeseries Profile](http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#_orthogonal_multidimensional_array_representation_of_time_series) object... ``` import os import tempfile from pocean.dsg import OrthogonalMultidimensionalTimeseriesProfile as omtsp output_fp, output = tempfile.mkstemp() os.close(output_fp) ncd = omtsp.from_dataframe(df.reset_index(), output=output, axes=axes, mode="a") ``` ... And add some extra metadata before we close the file. ``` naming_authority = "ioos" st_id = "Station1" ncd.naming_authority = naming_authority ncd.id = st_id print(ncd) ncd.close() ``` Time to create the archive for the file with `BagIt`. We have to create a folder for the bag. ``` temp_bagit_folder = tempfile.mkdtemp() temp_data_folder = os.path.join(temp_bagit_folder, "data") ``` Now we can create the bag and copy the netCDF file to a `data` sub-folder. ``` import shutil import bagit bag = bagit.make_bag(temp_bagit_folder, checksum=["sha256"]) shutil.copy2(output, temp_data_folder + "/parameter1.nc") ``` Last, but not least, we have to set bag metadata and update the existing bag with it. ``` urn = "urn:ioos:station:{naming_authority}:{st_id}".format( naming_authority=naming_authority, st_id=st_id ) bag_meta = { "Bag-Count": "1 of 1", "Bag-Group-Identifier": "ioos_bagit_testing", "Contact-Name": "Kyle Wilcox", "Contact-Phone": "907-230-0304", "Contact-Email": "axiom+ncei@axiomdatascience.com", "External-Identifier": urn, "External-Description": "Sensor data from station {}".format(urn), "Internal-Sender-Identifier": urn, "Internal-Sender-Description": "Station - URN:{}".format(urn), "Organization-address": "1016 W 6th Ave, Ste. 105, Anchorage, AK 99501, USA", "Source-Organization": "Axiom Data Science", } bag.info.update(bag_meta) bag.save(manifests=True, processes=4) ``` That is it! Simple and efficient!! The cell below illustrates the bag directory tree. (Note that the commands below will not work on Windows and some \*nix systems may require the installation of the command `tree`, however, they are only need for this demonstration.) ``` !tree $temp_bagit_folder !cat $temp_bagit_folder/manifest-sha256.txt ``` We can add more files to the bag as needed. ``` shutil.copy2(output, temp_data_folder + "/parameter2.nc") shutil.copy2(output, temp_data_folder + "/parameter3.nc") shutil.copy2(output, temp_data_folder + "/parameter4.nc") bag.save(manifests=True, processes=4) !tree $temp_bagit_folder !cat $temp_bagit_folder/manifest-sha256.txt ```
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Filter/filter_in_list.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Filter/filter_in_list.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Filter/filter_in_list.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API and geemap Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`. The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet. **Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving). ``` # Installs geemap package import subprocess try: import geemap except ImportError: print('geemap package not installed. Installing ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap']) # Checks whether this notebook is running on Google Colab try: import google.colab import geemap.eefolium as emap except: import geemap as emap # Authenticates and initializes Earth Engine import ee try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() ``` ## Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function. ``` Map = emap.Map(center=[40,-100], zoom=4) Map.add_basemap('ROADMAP') # Add Google Map Map ``` ## Add Earth Engine Python script ``` # Add Earth Engine dataset states = ee.FeatureCollection('TIGER/2018/States') selected = states.filter(ee.Filter.inList("NAME", ['California', 'Nevada', 'Utah', 'Arizona'])) Map.centerObject(selected, 6) Map.addLayer(ee.Image().paint(selected, 0, 2), {'palette': 'yellow'}, 'Selected') ``` ## Display Earth Engine data layers ``` Map.addLayerControl() # This line is not needed for ipyleaflet-based Map. Map ```
github_jupyter
# Adversarial-Robustness-Toolbox for scikit-learn AdaBoostClassifier ``` from sklearn.ensemble import AdaBoostClassifier from sklearn.datasets import load_iris import numpy as np from matplotlib import pyplot as plt from art.estimators.classification import SklearnClassifier from art.attacks.evasion import ZooAttack from art.utils import load_mnist import warnings warnings.filterwarnings('ignore') ``` ## 1 Training scikit-learn AdaBoostClassifier and attacking with ART Zeroth Order Optimization attack ``` def get_adversarial_examples(x_train, y_train): # Create and fit AdaBoostClassifier model = AdaBoostClassifier() model.fit(X=x_train, y=y_train) # Create ART classfier for scikit-learn AdaBoostClassifier art_classifier = SklearnClassifier(model=model) # Create ART Zeroth Order Optimization attack zoo = ZooAttack(classifier=art_classifier, confidence=0.0, targeted=False, learning_rate=1e-1, max_iter=20, binary_search_steps=10, initial_const=1e-3, abort_early=True, use_resize=False, use_importance=False, nb_parallel=1, batch_size=1, variable_h=0.2) # Generate adversarial samples with ART Zeroth Order Optimization attack x_train_adv = zoo.generate(x_train) return x_train_adv, model ``` ## 1.1 Utility functions ``` def get_data(num_classes): x_train, y_train = load_iris(return_X_y=True) x_train = x_train[y_train < num_classes][:, [0, 1]] y_train = y_train[y_train < num_classes] x_train[:, 0][y_train == 0] *= 2 x_train[:, 1][y_train == 2] *= 2 x_train[:, 0][y_train == 0] -= 3 x_train[:, 1][y_train == 2] -= 2 x_train[:, 0] = (x_train[:, 0] - 4) / (9 - 4) x_train[:, 1] = (x_train[:, 1] - 1) / (6 - 1) return x_train, y_train def plot_results(model, x_train, y_train, x_train_adv, num_classes): fig, axs = plt.subplots(1, num_classes, figsize=(num_classes * 5, 5)) colors = ['orange', 'blue', 'green'] for i_class in range(num_classes): # Plot difference vectors for i in range(y_train[y_train == i_class].shape[0]): x_1_0 = x_train[y_train == i_class][i, 0] x_1_1 = x_train[y_train == i_class][i, 1] x_2_0 = x_train_adv[y_train == i_class][i, 0] x_2_1 = x_train_adv[y_train == i_class][i, 1] if x_1_0 != x_2_0 or x_1_1 != x_2_1: axs[i_class].plot([x_1_0, x_2_0], [x_1_1, x_2_1], c='black', zorder=1) # Plot benign samples for i_class_2 in range(num_classes): axs[i_class].scatter(x_train[y_train == i_class_2][:, 0], x_train[y_train == i_class_2][:, 1], s=20, zorder=2, c=colors[i_class_2]) axs[i_class].set_aspect('equal', adjustable='box') # Show predicted probability as contour plot h = .01 x_min, x_max = 0, 1 y_min, y_max = 0, 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z_proba = model.predict_proba(np.c_[xx.ravel(), yy.ravel()]) Z_proba = Z_proba[:, i_class].reshape(xx.shape) im = axs[i_class].contourf(xx, yy, Z_proba, levels=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], vmin=0, vmax=1) if i_class == num_classes - 1: cax = fig.add_axes([0.95, 0.2, 0.025, 0.6]) plt.colorbar(im, ax=axs[i_class], cax=cax) # Plot adversarial samples for i in range(y_train[y_train == i_class].shape[0]): x_1_0 = x_train[y_train == i_class][i, 0] x_1_1 = x_train[y_train == i_class][i, 1] x_2_0 = x_train_adv[y_train == i_class][i, 0] x_2_1 = x_train_adv[y_train == i_class][i, 1] if x_1_0 != x_2_0 or x_1_1 != x_2_1: axs[i_class].scatter(x_2_0, x_2_1, zorder=2, c='red', marker='X') axs[i_class].set_xlim((x_min, x_max)) axs[i_class].set_ylim((y_min, y_max)) axs[i_class].set_title('class ' + str(i_class)) axs[i_class].set_xlabel('feature 1') axs[i_class].set_ylabel('feature 2') ``` # 2 Example: Iris dataset ### legend - colored background: probability of class i - orange circles: class 1 - blue circles: class 2 - green circles: class 3 - red crosses: adversarial samples for class i ``` num_classes = 2 x_train, y_train = get_data(num_classes=num_classes) x_train_adv, model = get_adversarial_examples(x_train, y_train) plot_results(model, x_train, y_train, x_train_adv, num_classes) num_classes = 3 x_train, y_train = get_data(num_classes=num_classes) x_train_adv, model = get_adversarial_examples(x_train, y_train) plot_results(model, x_train, y_train, x_train_adv, num_classes) ``` # 3 Example: MNIST ## 3.1 Load and transform MNIST dataset ``` (x_train, y_train), (x_test, y_test), min_, max_ = load_mnist() n_samples_train = x_train.shape[0] n_features_train = x_train.shape[1] * x_train.shape[2] * x_train.shape[3] n_samples_test = x_test.shape[0] n_features_test = x_test.shape[1] * x_test.shape[2] * x_test.shape[3] x_train = x_train.reshape(n_samples_train, n_features_train) x_test = x_test.reshape(n_samples_test, n_features_test) y_train = np.argmax(y_train, axis=1) y_test = np.argmax(y_test, axis=1) n_samples_max = 200 x_train = x_train[0:n_samples_max] y_train = y_train[0:n_samples_max] x_test = x_test[0:n_samples_max] y_test = y_test[0:n_samples_max] ``` ## 3.2 Train AdaBoostClassifier classifier ``` model = AdaBoostClassifier(base_estimator=None, n_estimators=50, learning_rate=0.1, algorithm='SAMME.R', random_state=None) model.fit(X=x_train, y=y_train) ``` ## 3.3 Create and apply Zeroth Order Optimization Attack with ART ``` art_classifier = SklearnClassifier(model=model) zoo = ZooAttack(classifier=art_classifier, confidence=0.0, targeted=False, learning_rate=1e-1, max_iter=30, binary_search_steps=20, initial_const=1e-3, abort_early=True, use_resize=False, use_importance=False, nb_parallel=10, batch_size=1, variable_h=0.25) x_train_adv = zoo.generate(x_train) x_test_adv = zoo.generate(x_test) ``` ## 3.4 Evaluate AdaBoostClassifier on benign and adversarial samples ``` score = model.score(x_train, y_train) print("Benign Training Score: %.4f" % score) plt.matshow(x_train[0, :].reshape((28, 28))) plt.clim(0, 1) prediction = model.predict(x_train[0:1, :])[0] print("Benign Training Predicted Label: %i" % prediction) score = model.score(x_train_adv, y_train) print("Adversarial Training Score: %.4f" % score) plt.matshow(x_train_adv[0, :].reshape((28, 28))) plt.clim(0, 1) prediction = model.predict(x_train_adv[0:1, :])[0] print("Adversarial Training Predicted Label: %i" % prediction) score = model.score(x_test, y_test) print("Benign Test Score: %.4f" % score) plt.matshow(x_test[0, :].reshape((28, 28))) plt.clim(0, 1) prediction = model.predict(x_test[0:1, :])[0] print("Benign Test Predicted Label: %i" % prediction) score = model.score(x_test_adv, y_test) print("Adversarial Test Score: %.4f" % score) plt.matshow(x_test_adv[0, :].reshape((28, 28))) plt.clim(0, 1) prediction = model.predict(x_test_adv[0:1, :])[0] print("Adversarial Test Predicted Label: %i" % prediction) ```
github_jupyter
``` import os import sys import numpy as np import PIL.Image import torch import torchvision sys.path. append('../icnn_torch') from icnn import reconstruct_stim from utils import normalise_img, img_preprocess,img_deprocess, get_cnn_features #load CNN model from torchvision #net = torchvision.models.resnet50(pretrained=True) #net = torchvision.models.vgg19(pretrained=True) net = torchvision.models.alexnet(pretrained=True) #set the evaluation model net.eval() #set the preprocessing methods img_mean=np.array([0.485, 0.456, 0.406]).astype(np.float32) img_std=np.array([0.229, 0.224, 0.225]).astype(np.float32) norm = 255 # image is converted from 0-255 into 0-1 #pytorch model is generally RGB input, not BGR bgr = False #save_dir save_dir = './result' save_folder = 'icnn_shortest_demo' save_path = os.path.join(save_dir,save_folder) os.makedirs(save_path, exist_ok=True) #load image and reshape into 227, 227 (for alexnet) org_img = PIL.Image.open('cat_sample_img.jpg').resize((227,227), resample=2) org_img #convert to np.array org_img = np.array(org_img) #preprocessing image for pytorch model prep_img = img_preprocess(np.asarray(org_img), img_mean, img_std, norm) #prep_img.shape => (3, 227, 227) #Convert tensor inputs = torch.tensor(prep_img[np.newaxis]) #show model for selecting layer net #selct target layers as a list target_layer_list = ['features[0]', 'features[3]', 'features[6]', 'features[8]','features[10]', 'classifier[1]', 'classifier[4]', 'classifier[6]'] target_layer_list =target_layer_list = [ 'features[3]'] #obtain feature for the input features = get_cnn_features(net, inputs, target_layer_list) target_layer_dict = dict(zip(target_layer_list, features)) opts = { # Loss function type: {'l2', 'l1', 'inner', 'gram'} 'loss_type': 'l2', 'img_mean': img_mean, 'img_std' : img_std, 'norm': norm, 'bgr': bgr, # The initial image for the optimization (setting to None will use random # noise as initial image) 'initial_input': None, # A python dictionary consists of channels to be selected, arranged in # pairs of layer name (key) and channel numbers (value); the channel # numbers of each layer are the channels to be used in the loss function; # use all the channels if some layer not in the dictionary; setting to None # for using all channels for all layers; 'channel': None, 'iter_n': 500, 'input_size': (227,227,3), 'opt_name' : 'Adam', } # perform reconstruction recon_img, loss_list = reconstruct_stim(target_layer_dict, net, **opts) PIL.Image.fromarray(np.uint8(recon_img)) #We can select multiple layers target_layer_list = ['features[0]', 'features[3]', 'features[6]'] #obtain feature for the input features = get_cnn_features(net, inputs, target_layer_list) target_layer_dict = dict(zip(target_layer_list, features)) #optional: layer weight feat_norm_list = np.array([np.linalg.norm(features[i].detach().numpy().astype(np.float)) for i in range(len(features))], dtype= np.float32) # Use the inverse of the squared norm of the CNN features as the weight for each layer weights = 1. / (feat_norm_list**2) # Normalise the weights such that the sum of the weights = 1 weights = weights / weights.sum() #layer_weight = dict(zip(layer_list, weights)) layer_weight= dict(zip(target_layer_list, weights)) opts = { # Loss function type: {'l2', 'l1', 'inner', 'gram'} 'loss_type': 'l2', 'img_mean': img_mean, 'img_std' : img_std, 'norm': norm, 'bgr': bgr, # A python dictionary consists of weight parameter of each layer in the # loss function, arranged in pairs of layer name (key) and weight (value); 'layer_weight': layer_weight, # The initial image for the optimization (setting to None will use random # noise as initial image) 'initial_input': None, # A python dictionary consists of channels to be selected, arranged in # pairs of layer name (key) and channel numbers (value); the channel # numbers of each layer are the channels to be used in the loss function; # use all the channels if some layer not in the dictionary; setting to None # for using all channels for all layers; 'channel': None, 'iter_n': 500, 'input_size': (227,227,3), 'opt_name' : 'Adam', } # perform reconstruction recon_img, loss_list = reconstruct_stim(target_layer_dict, net, **opts) PIL.Image.fromarray(np.uint8(recon_img)) ```
github_jupyter
``` import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.ticker as tick %matplotlib inline model = 'Shake-ResNet-26 2x64d (Shake-Shake-Image)' log_dir = os.path.join('results', model) df = pd.read_json(os.path.join(log_dir, 'log')) df.rename(columns={ 'epoch': 'Epoch', 'main/accuracy': 'train_accuracy', 'main/loss': 'train_loss', 'validation/main/accuracy': 'test_accuracy', 'validation/main/loss': 'test_loss' }, inplace=True) df['train_accuracy'] = df['train_accuracy'] * 100.0 df['test_accuracy'] = df['test_accuracy'] * 100.0 df['train_error'] = 100.0 - df['train_accuracy'] df['test_error'] = 100.0 - df['test_accuracy'] df print('Test accuracy: ', df['test_accuracy'].max()) print('Test error: ', df['test_error'].min()) ``` ## Classification Accuracy ``` df.plot(x='Epoch', y=['train_accuracy', 'test_accuracy'], figsize=(12, 7)) plt.xlim(-50, 1850); plt.xticks(np.arange(0, 1801, 200)); plt.gca().xaxis.set_minor_locator(tick.MultipleLocator(100)) plt.ylim(80, 101.0); plt.yticks(np.arange(80.0, 100.1, 5.0)); plt.gca().yaxis.set_minor_locator(tick.MultipleLocator(1)) plt.grid(which='major',color='black',linestyle='-') plt.grid(which='minor',color='gray',linestyle='--') plt.ylabel('Accuracy [%]') plt.title('{}\nTrain/Test Accuracy'.format(model)) plt.show(); ``` ## Classification Error ``` df.plot(x='Epoch', y=['train_error', 'test_error'], figsize=(12, 7)) plt.xlim(-50, 1850); plt.xticks(np.arange(0, 1801, 200)); plt.gca().xaxis.set_minor_locator(tick.MultipleLocator(100)) plt.ylim(-0.9, 20.0); plt.yticks(np.arange(-0.0, 20.1, 5.0)); plt.gca().yaxis.set_minor_locator(tick.MultipleLocator(1)) plt.grid(which='major',color='black',linestyle='-') plt.grid(which='minor',color='gray',linestyle='--') plt.ylabel('Error [%]') plt.title('{}\nTrain/Test Error'.format(model)) plt.show(); ``` ## Loss ``` df.plot(x='Epoch', y=['train_loss', 'test_loss'], figsize=(12, 7)) plt.xlim(-50, 1850); plt.xticks(np.arange(0, 1801, 200)); plt.gca().xaxis.set_minor_locator(tick.MultipleLocator(100)) plt.ylim(-0.1, 1.1); plt.yticks(np.arange(0.0, 1.1, 0.2)); plt.gca().yaxis.set_minor_locator(tick.MultipleLocator(0.1)) plt.grid(which='major',color='black',linestyle='-') plt.grid(which='minor',color='gray',linestyle='--') plt.ylabel('Loss') plt.title('{}\nTrain/Test Loss'.format(model)) plt.show(); ``` ## Learning rate ``` df.plot(x='Epoch', y=['lr'], figsize=(12, 7), legend=False) plt.xlim(-50, 1850); plt.xticks(np.arange(0, 1801, 200)); plt.gca().xaxis.set_minor_locator(tick.MultipleLocator(100)) plt.ylim(-0.01, 0.11); plt.yticks(np.arange(0.0, 0.11, 0.02)); plt.gca().yaxis.set_minor_locator(tick.MultipleLocator(0.01)) plt.grid(which='major',color='black',linestyle='-') plt.grid(which='minor',color='gray',linestyle='--') plt.ylabel('Learning rate') plt.title('Learning-rate of Momentum SGD') plt.show(); ```
github_jupyter
<a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/word_analogies_torch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Solving word analogies using pre-trained word embeddings Based on D2L 14.7 http://d2l.ai/chapter_natural-language-processing-pretraining/similarity-analogy.html ``` import numpy as np import matplotlib.pyplot as plt np.random.seed(seed=1) import math import requests import zipfile import hashlib import os import random import torch from torch import nn from torch.nn import functional as F !mkdir figures # for saving plots # Required functions def download(name, cache_dir=os.path.join('..', 'data')): """Download a file inserted into DATA_HUB, return the local filename.""" assert name in DATA_HUB, f"{name} does not exist in {DATA_HUB}." url, sha1_hash = DATA_HUB[name] os.makedirs(cache_dir, exist_ok=True) fname = os.path.join(cache_dir, url.split('/')[-1]) if os.path.exists(fname): sha1 = hashlib.sha1() with open(fname, 'rb') as f: while True: data = f.read(1048576) if not data: break sha1.update(data) if sha1.hexdigest() == sha1_hash: return fname # Hit cache print(f'Downloading {fname} from {url}...') r = requests.get(url, stream=True, verify=True) with open(fname, 'wb') as f: f.write(r.content) return fname def download_extract(name, folder=None): """Download and extract a zip/tar file.""" fname = download(name) base_dir = os.path.dirname(fname) data_dir, ext = os.path.splitext(fname) if ext == '.zip': fp = zipfile.ZipFile(fname, 'r') elif ext in ('.tar', '.gz'): fp = tarfile.open(fname, 'r') else: assert False, 'Only zip/tar files can be extracted.' fp.extractall(base_dir) return os.path.join(base_dir, folder) if folder else data_dir ``` # Get pre-trained word embeddings Pretrained embeddings taken from GloVe website: https://nlp.stanford.edu/projects/glove/ fastText website: https://fasttext.cc/ ``` DATA_HUB = dict() DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/' DATA_HUB['glove.6b.50d'] = (DATA_URL + 'glove.6B.50d.zip', '0b8703943ccdb6eb788e6f091b8946e82231bc4d') DATA_HUB['glove.6b.100d'] = (DATA_URL + 'glove.6B.100d.zip', 'cd43bfb07e44e6f27cbcc7bc9ae3d80284fdaf5a') DATA_HUB['glove.42b.300d'] = (DATA_URL + 'glove.42B.300d.zip', 'b5116e234e9eb9076672cfeabf5469f3eec904fa') DATA_HUB['wiki.en'] = (DATA_URL + 'wiki.en.zip', 'c1816da3821ae9f43899be655002f6c723e91b88') class TokenEmbedding: """Token Embedding.""" def __init__(self, embedding_name): self.idx_to_token, self.idx_to_vec = self._load_embedding( embedding_name) self.unknown_idx = 0 self.token_to_idx = { token: idx for idx, token in enumerate(self.idx_to_token)} def _load_embedding(self, embedding_name): idx_to_token, idx_to_vec = ['<unk>'], [] # data_dir = d2l.download_extract(embedding_name) data_dir = download_extract(embedding_name) # GloVe website: https://nlp.stanford.edu/projects/glove/ # fastText website: https://fasttext.cc/ with open(os.path.join(data_dir, 'vec.txt'), 'r') as f: for line in f: elems = line.rstrip().split(' ') token, elems = elems[0], [float(elem) for elem in elems[1:]] # Skip header information, such as the top row in fastText if len(elems) > 1: idx_to_token.append(token) idx_to_vec.append(elems) idx_to_vec = [[0] * len(idx_to_vec[0])] + idx_to_vec return idx_to_token, torch.tensor(idx_to_vec) def __getitem__(self, tokens): indices = [ self.token_to_idx.get(token, self.unknown_idx) for token in tokens] vecs = self.idx_to_vec[torch.tensor(indices)] return vecs def __len__(self): return len(self.idx_to_token) ``` Get a 50dimensional glove embedding, with vocab size of 400k ``` glove_6b50d = TokenEmbedding('glove.6b.50d') len(glove_6b50d) ``` Map from word to index and vice versa. ``` glove_6b50d.token_to_idx['beautiful'], glove_6b50d.idx_to_token[3367] embedder = glove_6b50d #embedder = TokenEmbedding('glove.6b.100d') embedder.idx_to_vec.shape ``` # Finding most similar words ``` def knn(W, x, k): # The added 1e-9 is for numerical stability cos = torch.mv(W, x.reshape(-1,)) / ( (torch.sqrt(torch.sum(W * W, axis=1) + 1e-9) * torch.sqrt((x * x).sum())) ) _, topk = torch.topk(cos, k=k) return topk, [cos[int(i)] for i in topk] def get_similar_tokens(query_token, k, embed): topk, cos = knn(embed.idx_to_vec, embed[[query_token]], k + 1) for i, c in zip(topk[1:], cos[1:]): # Remove input words print(f'cosine sim={float(c):.3f}: {embed.idx_to_token[int(i)]}') get_similar_tokens('man', 3, embedder) get_similar_tokens('banana', 3, embedder) ``` # Word analogies ``` # We slightly modify D2L code so it works on the man:woman:king:queen example def get_analogy(token_a, token_b, token_c, embed): vecs = embed[[token_a, token_b, token_c]] x = vecs[1] - vecs[0] + vecs[2] topk, cos = knn(embed.idx_to_vec, x, 10) # remove word c from nearest neighbor idx_c = embed.token_to_idx[token_c] topk = list(topk.numpy()) topk.remove(idx_c) return embed.idx_to_token[int(topk[0])] get_analogy('man', 'woman', 'king', embedder) get_analogy('man', 'woman', 'son', embedder) get_analogy('beijing', 'china', 'tokyo', embedder) ```
github_jupyter
# Napelemek temelésének előrejelzése gépi tanulási algoritmusok segítségével ## A feladat `count félrevezet; cov,corr INF-et ad; quantile-t nem tudom használni,mint nyugodtan kivehetem mert úgyis nulla? ``` import math import pandas as pd import numpy as np PATH_TO_TRAIN = '../data/raw/train15.csv' DATE_FORMAT = '%Y%m%d %H:%M' INDEX_COLUMN = 'TIMESTAMP' TARGET_COLUMN = 'POWER' feature_columns = ['VAR78', 'VAR79', 'VAR134', 'VAR157', 'VAR164', 'VAR165', 'VAR166', 'VAR167','VAR169', 'VAR175', 'VAR178', 'VAR228'] ACCUMLATED_FEATURE_COLUMNS = ['VAR169', 'VAR175', 'VAR178', 'VAR228'] ONE_DAY = 24 ONE_WEEK = 7 * ONE_DAY ONE_MONTH = 30* ONE_DAY ONE_YEAR = 365 * ONE_DAY PREDICT_INTERVAL = ONE_DAY * 5 df_original = pd.read_csv(PATH_TO_TRAIN) df = df_original.copy() df = df[df['ZONEID'] == 1] dateparse = lambda x: pd.datetime.strptime(x, DATE_FORMAT) df[INDEX_COLUMN] = df[INDEX_COLUMN].apply(dateparse) # Magyarázó változók hozzáadása a modellhez PATH_TO_PREDICTORS = '../data/raw/predictors15.csv' df_features = pd.read_csv(PATH_TO_PREDICTORS) df_features[INDEX_COLUMN] = df_features[INDEX_COLUMN].apply(dateparse) df = df.merge(df_features, how='left', on=[INDEX_COLUMN,'ZONEID']) df["MONTH"] = df[INDEX_COLUMN].apply(lambda x: x.month) df["HOUR"] = df[INDEX_COLUMN].apply(lambda x: x.hour) # for i in np.arange(1,13): # df["MONTH"+str(i)]= df["TIMESTAMP"].apply(lambda x: x.month == i) # for i in np.arange(24): # df["HOUR"+str(i)] = df["TIMESTAMP"].apply(lambda x: x.hour == i) # Zóna konvertálása kategorikus változóvá df["ZONE_1"] = df["ZONEID"].apply(lambda x: x == 1) df["ZONE_2"] = df["ZONEID"].apply(lambda x: x == 2) df["ZONE_3"] = df["ZONEID"].apply(lambda x: x == 3) df = df.drop("ZONEID",axis=1) # Timestamp legyen az index df = df.set_index('TIMESTAMP') #df = df.drop("TIMESTAMP",axis=1) #Using one year to train and one month to predict # df is the full data so we can calculate the rolling windows TRAIN_SIZE = ONE_YEAR df = df[:TRAIN_SIZE+PREDICT_INTERVAL] y_test = df[TRAIN_SIZE:TRAIN_SIZE+PREDICT_INTERVAL][TARGET_COLUMN].copy() # ez mekkora szopás referencia def clear_data_from_end(df, column, until): for i in range(until): df.iloc[-1*(i+1),df.columns.get_loc(column)] = None return df def add_rolling(df,column, intervals, shift): for i in range(min(intervals),max(intervals)): #for i in intervals: if i >= shift: rolling_column = df[column].rolling(window = i) df["ROLLING_MEAN_"+column+"_"+str(i)] = rolling_column.mean().shift(shift) df["ROLLING_MIN_"+column+"_"+str(i)] = rolling_column.min().shift(shift) df["ROLLING_MAX_"+column+"_"+str(i)] = rolling_column.max().shift(shift) df["ROLLING_SUM_"+column+"_"+str(i)] = rolling_column.sum().shift(shift) df["ROLLING_MEDIAN_"+column+"_"+str(i)] = rolling_column.median().shift(shift) df["ROLLING_STD_"+column+"_"+str(i)] = rolling_column.std().shift(shift) df["ROLLING_VAR_"+column+"_"+str(i)] = rolling_column.var().shift(shift) df["ROLLING_SKEW_"+column+"_"+str(i)] = rolling_column.skew().shift(shift) df["ROLLING_KURT_"+column+"_"+str(i)] = rolling_column.kurt().shift(shift) #df[column] = df[column].shift(shift) return df def dissipate_features(df, column): return df[column].rolling(window=2).apply(lambda x: x[1] if x[1] - x[0] < 0 else x[1] - x[0]) from bokeh.plotting import figure,show from bokeh.io import output_notebook output_notebook() thing_to_plot = df.copy()[:48][ACCUMLATED_FEATURE_COLUMNS] for column in thing_to_plot: p = figure() p.line(np.arange(len(thing_to_plot)), thing_to_plot[column], legend="real") p.line(np.arange(len(thing_to_plot)), dissipate_features(thing_to_plot,column), legend="diss",color="orange") show(p) df = clear_data_from_end(df,TARGET_COLUMN,PREDICT_INTERVAL) df = add_rolling(df,TARGET_COLUMN, [ONE_DAY, ONE_WEEK, ONE_MONTH], PREDICT_INTERVAL) for column in ACCUMLATED_FEATURE_COLUMNS: df[column] = dissipate_features(df,column) for column in feature_columns: df = clear_data_from_end(df,column,PREDICT_INTERVAL) df = add_rolling(df,column, [ONE_DAY, ONE_WEEK], PREDICT_INTERVAL) df = df.drop(column, axis = 1) from xgboost import XGBRegressor #Ezt külön fel kellett rakni from sklearn.svm import LinearSVR,SVR from sklearn.svm import SVR X_train = df[:TRAIN_SIZE] \ .drop(TARGET_COLUMN,axis=1) \ .dropna(axis=0) y_train = df[:TRAIN_SIZE].dropna(axis=0)[TARGET_COLUMN] xgb_model = XGBRegressor(nthread=4) xgb_model.fit(X_train,y_train) # For SVR we need Feature Scaling from sklearn.preprocessing import StandardScaler sc_x = StandardScaler() # Scale x and y (two scale objects) x = sc_x.fit_transform(X_train) svr_model = SVR() svr_model.fit(x,y_train) lsvr_model = LinearSVR() lsvr_model.fit(x,y_train) from sklearn.metrics import explained_variance_score X_test = df[TRAIN_SIZE:TRAIN_SIZE+PREDICT_INTERVAL].drop(TARGET_COLUMN,axis=1) print("XGBoost") y_xgb = xgb_model.predict(X_test) y_xgb[y_xgb < 0] = 0 print(explained_variance_score(y_xgb, y_test)) # For SVR we need Feature Scaling from sklearn.preprocessing import StandardScaler sc_x = StandardScaler() sc_y = StandardScaler() # Scale x and y (two scale objects) x = sc_x.fit_transform(X_test) print("LinearSVR") y_lsvr =lsvr_model.predict(x) y_lsvr[y_lsvr < 0] = 0 print(explained_variance_score(y_lsvr,y_test)) print("SVR") y_svr = svr_model.predict(x) y_svr[y_svr < 0] = 0 print(explained_variance_score(y_svr,y_test)) from bokeh.plotting import figure,show from bokeh.io import output_notebook y_true = y_test.values output_notebook() p = figure() p.circle(np.arange(PREDICT_INTERVAL), y_true, legend="real") p.circle(np.arange(PREDICT_INTERVAL),y_xgb ,legend="XGBoost", color="orange") p.circle(np.arange(PREDICT_INTERVAL),y_svr ,legend="SVR", color="green") p.circle(np.arange(PREDICT_INTERVAL),y_lsvr ,legend="LinearSVR", color="purple") show(p) p = figure() p.line(np.arange(PREDICT_INTERVAL), y_true, legend="real") p.line(np.arange(PREDICT_INTERVAL),y_xgb ,legend="XGBoost", color="orange") p.line(np.arange(PREDICT_INTERVAL),y_svr ,legend="SVR", color="green") p.line(np.arange(PREDICT_INTERVAL),y_lsvr ,legend="LinearSVR", color="purple") show(p) # from bokeh.plotting import figure, output_notebook, show # output_notebook() # for column in df: # print(column) # p = figure(plot_width=400, plot_height=400) # p.line(df.index[:24*5],df[column][:24*5]) # show(p) # Feature nélküli modellen javított amint beraktam a hónap, óra változókat numerikusan # vajon mi lenne kategorikusan #Jelentősen javított az akumlált értékek felhasználása X_train.head() ```
github_jupyter
<!--BOOK_INFORMATION--> <a href="https://www.packtpub.com/big-data-and-business-intelligence/machine-learning-opencv" target="_blank"><img align="left" src="data/cover.jpg" style="width: 76px; height: 100px; background: white; padding: 1px; border: 1px solid black; margin-right:10px;"></a> *This notebook contains an excerpt from the book [Machine Learning for OpenCV](https://www.packtpub.com/big-data-and-business-intelligence/machine-learning-opencv) by Michael Beyeler. The code is released under the [MIT license](https://opensource.org/licenses/MIT), and is available on [GitHub](https://github.com/mbeyeler/opencv-machine-learning).* *Note that this excerpt contains only the raw code - the book is rich with additional explanations and illustrations. If you find this content useful, please consider supporting the work by [buying the book](https://www.packtpub.com/big-data-and-business-intelligence/machine-learning-opencv)!* <!--NAVIGATION--> < [Compressing Color Spaces Using k-Means](08.02-Compressing-Color-Images-Using-k-Means.ipynb) | [Contents](../README.md) | [Implementing Agglomerative Hierarchical Clustering](08.04-Implementing-Agglomerative-Hierarchical-Clustering.ipynb) > # Classifying handwritten digits using k-means Although the last application was a pretty creative use of $k$-means, we can do better still. We have previously discussed k-means in the context of unsupervised learning, where we tried to discover some hidden structure in the data. However, doesn't the same concept apply to most classification tasks? Let's say our task was to classify handwritten digits. Don't most zeros look similar, if not the same? And don't all zeros look categorically different from all possible ones? Isn't this exactly the kind of "hidden structure" we set out to discover with unsupervised learning? Doesn't this mean we could use clustering for classification as well? Let's find out together. In this section, we will attempt to use k-means to try and classify handwritten digits. In other words, we will try to identify similar digits without using the original label information. ## Loading the dataset From the earlier chapters, you might recall that scikit-learn provides a whole range of handwritten digits via its `load_digits` utility function. The dataset consists of 1,797 samples with 64 features each, where each of the features has the brightness of one pixel in an 8 x 8 image: ``` from sklearn.datasets import load_digits digits = load_digits() digits.data.shape ``` ## Running k-means Setting up $k$-means works exactly the same as in the previous examples. We tell the algorithm to perform at most 10 iterations and stop the process if our prediction of the cluster centers does not improve within a distance of 1.0: ``` import cv2 criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0) flags = cv2.KMEANS_RANDOM_CENTERS ``` Then we apply $k$-means to the data as we did before. Since there are 10 different digits (0-9), we tell the algorithm to look for 10 distinct clusters: ``` import numpy as np compactness, clusters, centers = cv2.kmeans(digits.data.astype(np.float32), 10, None, criteria, 10, flags) ``` And done! Similar to the $N \times 3$ matrix that represented different RGB colors, this time, the centers array consists of $N \times 8 \times 8$ center images, where $N$ is the number of clusters. Therefore, if we want to plot the centers, we have to reshape the `centers` matrix back into 8 x 8 images: ``` import matplotlib.pyplot as plt plt.style.use('ggplot') %matplotlib inline fig, ax = plt.subplots(2, 5, figsize=(10, 4)) centers = centers.reshape(10, 8, 8) for axi, center in zip(ax.flat, centers): axi.set(xticks=[], yticks=[]) axi.imshow(center, interpolation='nearest', cmap=plt.cm.binary) plt.savefig('digits.png') ``` Look familiar? Remarkably, $k$-means was able to partition the digit images not just into any 10 random clusters, but into the digits 0-9! In order to find out which images were grouped into which clusters, we need to generate a labels vector as we know it from supervised learning problems: ``` from scipy.stats import mode labels = np.zeros_like(clusters.ravel()) for i in range(10): mask = (clusters.ravel() == i) labels[mask] = mode(digits.target[mask])[0] ``` Then we can calculate the performance of the algorithm using scikit-learn's accuracy_score metric: ``` from sklearn.metrics import accuracy_score accuracy_score(digits.target, labels) ``` Remarkably, $k$-means achieved 78.4% accuracy without knowing the first thing about the labels of the original images! We can gain more insights about what went wrong and how by looking at the **confusion matrix**. The confusion matrix is a 2D matrix $C$, where every element $C_{i,j}$ is equal to the number of observations known to be in group (or cluster) $i$, but predicted to be in group $j$. Thus, all elements on the diagonal of the matrix represent data points that have been correctly classified (that is, known to be in group $i$ and predicted to be in group $i$). Off-diagonal elements show misclassifications. In scikit-learn, creating a confusion matrix is essentially a one-liner: ``` from sklearn.metrics import confusion_matrix confusion_matrix(digits.target, labels) ``` The confusion matrix tells us that $k$-means did a pretty good job at classifying data points from the first nine classes; however, it confused all nines to be (mostly) threes. Still, this result is pretty solid, given that the algorithm had no target labels to be trained on. <!--NAVIGATION--> < [Compressing Color Spaces Using k-Means](08.02-Compressing-Color-Images-Using-k-Means.ipynb) | [Contents](../README.md) | [Implementing Agglomerative Hierarchical Clustering](08.04-Implementing-Agglomerative-Hierarchical-Clustering.ipynb) >
github_jupyter
# Stocks Analysis Demo ``` !/User/align_mlrun.sh ``` ## Setup stocks project ``` from os import path import os import mlrun # Set the base project name project_name_base = 'stocks' # Initialize the MLRun environment and save the project name and artifacts path project_name, artifact_path = mlrun.set_environment(project=project_name_base, user_project=True) project_path = path.abspath('./') project = mlrun.new_project(project_name_base, context=project_path, user_project=True) # Display the current project name and artifacts path print(f'Project name: {project_name}') print(f'Artifacts path: {artifact_path}') ``` ## Declare project functions ``` from mlrun import mount_v3io, code_to_function # Set functions to project project.set_function('code/00-train-sentiment-analysis-model.ipynb', name='bert_sentiment_classifier_trainer',kind = 'job') project.set_function("code/01-function_invoker.ipynb", name='func_invoke',kind = 'job') project.set_function('code/02-read-stocks.ipynb', name='stocks_reader',kind = 'nuclio', image="mlrun/mlrun") project.set_function('code/03-read-news.ipynb', name='news_reader',kind = 'nuclio', image="mlrun/mlrun") project.set_function('code/04-stream-viewer.ipynb', name='stream_viewer', kind="nuclio", image='mlrun/mlrun') project.set_function('hub://sentiment_analysis_serving', name='sentiment_analysis_server') project.set_function('code/05-read-vector.ipynb', name='vector_reader', kind='nuclio', image='mlrun/mlrun') project.set_function("code/06-model_training.ipynb", name='rnn_model_training', kind='nuclio', image="mlrun/ml-models-gpu") project.set_function("code/07-model_prediction.ipynb", name='rnn_model_prediction', kind ='nuclio', image='mlrun/mlrun') project.set_function("code/08-grafana.ipynb", name='grafana_view', kind='job', image='mlrun/mlrun') project.set_function("hub://rnn_serving",name="rnn_serving",kind = "serving") project.func('news_reader').spec.max_replicas = 1 # Declaring project name for later use project.spec.params = {} project.spec.params["PROJECT_NAME"] = project_name project.spec.params["ARTIFACT_PATH"] = artifact_path ``` ## Download a pre-trained model (optional) Since running the [training](training/bert_sentiment_classification.ipynb) part to achieve good results may take some time, we had already trained and uploaded a model to a public location. You can easily download it by running the following cell: If you would like to change the location of the source data, set the `SAMPLE_DATA_SOURCE_URL_PREFIX` environment variable. For example, set it to `/v3io/projects/demos-data/iguazio/` ``` # Run this to download the pre-trained model to your `models` directory url_prefix = os.environ.get('SAMPLE_DATA_SOURCE_URL_PREFIX', 'https://s3.wasabisys.com/iguazio/') import os model_location = f'{url_prefix.rstrip("/")}/data/stock-analysis/model.pt' saved_models_directory = os.path.join(os.path.abspath('./'), 'models') # Create paths os.makedirs(saved_models_directory, exist_ok=1) model_filepath = os.path.join(saved_models_directory, os.path.basename(model_location)) if "http" in model_location: ! wget -nc -P {saved_models_directory} {model_location} else: ! cp {model_location} {saved_models_directory} # Run this to download the pre-trained model to your `models` directory url_prefix = os.environ.get('SAMPLE_DATA_SOURCE_URL_PREFIX', 'https://s3.wasabisys.com/iguazio/') import os rnn_model_location = f'{url_prefix.rstrip("/")}/models/function-marketplace-models/rnn_serving/rnn_model.h5' rnn_saved_models_directory = os.path.join(os.path.abspath('./'), 'models') # Create paths os.makedirs(rnn_saved_models_directory, exist_ok=1) rnn_model_filepath = os.path.join(rnn_saved_models_directory, os.path.basename(rnn_model_location)) if "http" in rnn_model_location: ! wget -nc -P {rnn_saved_models_directory} {rnn_model_location} else: ! cp {rnn_model_location} {rnn_saved_models_directory} #https://s3.wasabisys.com/iguazio/models/function-marketplace-models/rnn_serving/rnn_model.h5 # add model project.func('sentiment_analysis_server').add_model("model1", class_name='SentimentClassifierServing', model_path=model_filepath) project.func("rnn_serving").add_model("model2",class_name="RNN_Model_Serving",model_path = rnn_saved_models_directory) # make sure only one .h5 model is saved there ``` ## Create deployment workflow ``` %%writefile code/workflow.py from kfp import dsl from mlrun import auto_mount, mlconf, load_project import os from nuclio.triggers import V3IOStreamTrigger, CronTrigger import re funcs = {} # Directories and Paths projdir = os.path.abspath('./') project = load_project(projdir) project_name = project.spec.params.get("PROJECT_NAME") artifact_path = project.spec.params.get("ARTIFACT_PATH") model_filepath = os.path.join(projdir, 'models', 'model.pt') # Previously saved model if downloaded rnn_model_path = os.path.join(projdir, 'models', 'rnn_model.h5') reviews_datafile = os.path.join(projdir, 'data', 'reviews.csv') # Performence limit max_replicas = 1 # Readers cron interval readers_cron_interval = '300s' # Training GPU Allocation # Set to 0 if no gpus are to be used training_gpus = 0 def init_functions(functions: dict, project=None, secrets=None): for f in functions.values(): # Add V3IO Mount f.apply(mount_v3io()) # Always pull images to keep updates f.spec.image_pull_policy = 'Always' # Define inference-stream related triggers functions['sentiment_analysis_server'].apply(auto_mount()) functions['sentiment_analysis_server'].spec.readiness_timeout = 500 functions['sentiment_analysis_server'].set_config('readinessTimeoutSeconds', 500) functions['rnn_serving'].apply(auto_mount()) functions['rnn_serving'].spec.readiness_timeout = 500 functions['rnn_serving'].set_config('readinessTimeoutSeconds', 500) # Adept image to use CPU if a GPU is not assigned if training_gpus == 0: functions['sentiment_analysis_server'].spec.base_spec['spec']['build']['baseImage']='mlrun/ml-models' functions['bert_sentiment_classifier_trainer'].spec.image='mlrun/ml-models' # Add triggers functions['stocks_reader'].add_trigger('cron', CronTrigger(readers_cron_interval)) functions['news_reader'].add_trigger('cron', CronTrigger(readers_cron_interval)) functions['rnn_model_training'].add_trigger('cron', CronTrigger('12h')) # Set max replicas for resource limits functions['sentiment_analysis_server'].spec.max_replicas = max_replicas functions['news_reader'].spec.max_replicas = max_replicas functions['stocks_reader'].spec.max_replicas = max_replicas # Add GPU for training functions['bert_sentiment_classifier_trainer'].gpus(training_gpus) project.func('news_reader').spec.max_replicas = 1 # Declare function base image to build (Job and not a nuclio funciton) functions['func_invoke'].spec.image = "mlrun/mlrun" functions['bert_sentiment_classifier_trainer'].spec.build.commands = ['pip install transformers==3.0.1', 'pip install torch==1.6.0'] functions['stocks_reader'].spec.build.commands = ['pip install lxml', 'pip install yfinance','pip install v3io_frames'] functions['news_reader'].spec.build.commands = ['pip install beautifulsoup4', 'pip install v3io_frames'] functions['stream_viewer'].spec.build.commands = ['pip install v3io'] functions['grafana_view'].spec.build.commands = ['pip install git+https://github.com/v3io/grafwiz --upgrade', 'pip install v3io_frames', 'pip install attrs==19.1.0'] functions['sentiment_analysis_server'].add_model("model1", class_name='SentimentClassifierServing', model_path=model_filepath) functions['rnn_serving'].add_model("model2",class_name="RNN_Model_Serving",model_path = rnn_model_path) @dsl.pipeline( name='Stocks demo deployer', description='Up to RT Stocks ingestion and analysis' ) def kfpipeline( # General V3IO_CONTAINER = 'users', STOCKS_TSDB_TABLE = os.getenv('V3IO_USERNAME') + '/stocks/stocks_tsdb', STOCKS_KV_TABLE = os.getenv('V3IO_USERNAME') + '/stocks/stocks_kv', STOCKS_STREAM = os.getenv('V3IO_USERNAME') + '/stocks/stocks_stream', RUN_TRAINER: bool = False, # Trainer pretrained_model = 'bert-base-cased', reviews_dataset = reviews_datafile, models_dir = 'models', model_filename = 'bert_sentiment_analysis_model.pt', n_classes: int = 3, MAX_LEN: int = 128, BATCH_SIZE: int = 16, EPOCHS: int = 2, random_state: int = 42, # stocks reader STOCK_LIST: list = ['GOOGL', 'MSFT', 'AMZN', 'AAPL', 'INTC'], EXPRESSION_TEMPLATE = "symbol='{symbol}';price={price};volume={volume};last_updated='{last_updated}'", # Sentiment analysis server model_name = 'bert_classifier_v1', model_filepath = model_filepath # if not trained ): with dsl.Condition(RUN_TRAINER == True): deployer = funcs['bert_sentiment_classifier_trainer'].deploy_step() trainer = funcs['bert_sentiment_classifier_trainer'].as_step(name='bert_sentiment_classifier_trainer', handler='train_sentiment_analysis_model', params={'pretrained_model': pretrained_model, 'EPOCHS': EPOCHS, 'models_dir': models_dir, 'model_filename': model_filename, 'n_classes': n_classes, 'MAX_LEN': MAX_LEN, 'BATCH_SIZE': BATCH_SIZE, 'EPOCHS': EPOCHS, 'random_state': random_state}, inputs={'reviews_dataset': reviews_dataset}, outputs=['bert_sentiment_analysis_model'], image=deployer.outputs['image']).after(deployer) sentiment_server = funcs['sentiment_analysis_server'].deploy_step().after(trainer) news_reader = funcs['news_reader'].deploy_step(env={'V3IO_CONTAINER': V3IO_CONTAINER, 'STOCKS_STREAM': STOCKS_STREAM, 'STOCKS_TSDB_TABLE': STOCKS_TSDB_TABLE, 'SENTIMENT_MODEL_ENDPOINT': sentiment_server.outputs['endpoint'], 'PROJECT_NAME' : project_name, 'ARTIFACT_PATH' : artifact_path}).after(sentiment_server) news_reader_invok1 = funcs['func_invoke'].as_step(params = {"endpoint" : news_reader.outputs["endpoint"]}, handler="handler").after(news_reader) with dsl.Condition(RUN_TRAINER == False): sentiment_server = funcs['sentiment_analysis_server'].deploy_step() news_reader = funcs['news_reader'].deploy_step(env={'V3IO_CONTAINER': V3IO_CONTAINER, 'STOCKS_STREAM': STOCKS_STREAM, 'STOCKS_TSDB_TABLE': STOCKS_TSDB_TABLE, 'SENTIMENT_MODEL_ENDPOINT': sentiment_server.outputs['endpoint'], 'PROJECT_NAME' : project_name, 'ARTIFACT_PATH' : artifact_path}).after(sentiment_server) news_reader_invok2 = funcs['func_invoke'].as_step(params = {"endpoint" : news_reader.outputs["endpoint"]}, handler="handler").after(news_reader) stocks_reader = funcs['stocks_reader'].deploy_step(env={'STOCK_LIST': STOCK_LIST, 'V3IO_CONTAINER': V3IO_CONTAINER, 'STOCKS_TSDB_TABLE': STOCKS_TSDB_TABLE, 'STOCKS_KV_TABLE': STOCKS_KV_TABLE, 'EXPRESSION_TEMPLATE': EXPRESSION_TEMPLATE, 'PROJECT_NAME' : project_name, 'ARTIFACT_PATH' : artifact_path}) stream_viewer = funcs['stream_viewer'].deploy_step(env={'V3IO_CONTAINER': V3IO_CONTAINER, 'STOCKS_STREAM': STOCKS_STREAM}).after(news_reader_invok1,news_reader_invok2) vector_viewer = funcs['vector_reader'].deploy_step(env={'PROJECT_NAME' : project_name, 'ARTIFACT_PATH' : artifact_path}).after(stocks_reader,news_reader_invok1,news_reader_invok2) rnn_model_training_deployer = funcs["rnn_model_training"].deploy_step(env={'model_path': rnn_model_path, 'PROJECT_NAME' : project_name, 'ARTIFACT_PATH' : artifact_path}) rnn_model_training_invoker = funcs['func_invoke'].as_step(params = {"endpoint" : rnn_model_training_deployer.outputs["endpoint"]}, handler="handler").after(rnn_model_training_deployer,vector_viewer) rnn_serving = funcs['rnn_serving'].deploy_step().after(rnn_model_training_invoker) rnn_model_prediction = funcs["rnn_model_prediction"].deploy_step(env = {"endpoint":rnn_serving.outputs['endpoint'], 'ARTIFACT_PATH' : artifact_path}).after(rnn_serving) grafana_viewer = funcs["grafana_view"].deploy_step() grafana_viewer = funcs["grafana_view"].as_step(params = {"streamview_url" : stream_viewer.outputs["endpoint"], "readvector_url" : vector_viewer.outputs["endpoint"], "rnn_serving_url" : rnn_model_prediction.outputs["endpoint"], "v3io_container" : V3IO_CONTAINER, "stocks_kv" : STOCKS_KV_TABLE, "stocks_tsdb" : STOCKS_TSDB_TABLE, "grafana_url" : "http://grafana"}, handler = "handler").after(grafana_viewer,rnn_model_prediction) ``` ## Add workflow ``` print(os.path.join(os.path.abspath(project.context))) project.set_workflow('main', os.path.join(os.path.abspath(project.context), 'code', 'workflow.py')) ``` ## Save Project ``` project.save(os.path.join(project.context, 'project.yaml')) ``` ### Run workflow In this cell we will run the `main` workflow via `KubeFlow Pipelines` on top of our cluster. Running the pipeline may take some time. Due to possible jupyter timeout, it's best to track the pipeline's progress via KFP or the MLRun UI. ``` project.run('main', arguments={'RUN_TRAINER': False}, artifact_path=artifact_path) ```
github_jupyter
# Train a Smartcab to Drive Goal: Construct an optimized Q-Learning driving agent that will navigate a Smartcab through its ideal environment towards a destination - without sacrificing on safety or reliability. Both of the evaluation metric is measured using a letter-grade system as follows: | Grade | Safety | Reliability | |:-----: |:------: |:-----------: | | A+ | Agent commits no traffic violations,<br/>and always chooses the correct action. | Agent reaches the destination in time<br />for 100% of trips. | | A | Agent commits few minor traffic violations,<br/>such as failing to move on a green light. | Agent reaches the destination on time<br />for at least 90% of trips. | | B | Agent commits frequent minor traffic violations,<br/>such as failing to move on a green light. | Agent reaches the destination on time<br />for at least 80% of trips. | | C | Agent commits at least one major traffic violation,<br/> such as driving through a red light. | Agent reaches the destination on time<br />for at least 70% of trips. | | D | Agent causes at least one minor accident,<br/> such as turning left on green with oncoming traffic. | Agent reaches the destination on time<br />for at least 60% of trips. | | F | Agent causes at least one major accident,<br />such as driving through a red light with cross-traffic. | Agent fails to reach the destination on time<br />for at least 60% of trips. | ``` # Import the visualization code import visuals as vs # Pretty display for notebooks %matplotlib inline ``` ## Understanding the World On running the basic agent.py code : ```bash 'python smartcab/agent.py' ``` - Smartcab doesn't move at all during the simulation - The kind of rewards are positive or negative float values (based on the decision the agent takes along the way) - Lights impact rewards because it's a check if the agent is following the basic rules of driving (to measure safety of the action agent chosen take at any moment). For example, if the agent chooses to be IDLE during green light, it's given a negative reward while IDLE during red is rewarded with positive reward to ensure it learns the rules of the world. ## Understanding the Code In addition to understanding the world, it is also necessary to understand the code itself that governs how the world, simulation, and so on operate. On exploring the various pieces of code, we find: ```bash 'cd smartcab/*.py' ``` - agent.py - learning: forces to apply Q-learning because it helps the agent choose optimal actions that gives the highest long-term reward. - epsilon: this is the greedy part. It's a decay function - based on how slowly it approaches zero, higher learning happens. - alpha: learning rate,to ensure there is some learning happening with each iteration. Meaning, by setting it at 0.5 we guarantee that model is overwriting old values by that proportion. -environment.py - When agent performs an action it calls: act() method to calculate the reward associated to the action - In simulator.py - render() = renders display of the simulation with GUI - render_text() = renders display of the simulation without GUI but outputs would be shown in the command prompt - In planner.py - Would consider the East-West direction first ## Step #1: Basic Driving Agent Before diving into applying Q-learning, let's get the agent to first take any random valid actions (None, Left, Right, Forward) at each time step. ```bash 'if self.learning == False: action = random.choice(self.valid_actions)' ``` #### Setup - 'enforce_deadline' = True (to capture whether it reaches destination on time) - 'update_delay' = 0.01 (to reduce the time between steps in each trial) - 'log_metrics' = True (to log simulation results into .csv file under 'logs' directory) - 'n_test' = 10 (to perform 10 testing trials) >**Tip**: If you diable visual simulation (ie set 'display = False'), results would be returned much faster Once you have run the simulation, there would be 20 training and 10 testing trials. Let's visualize the results that's stored in log folder! ``` # Load the 'sim_no-learning' log file from the initial simulation results vs.plot_trials('sim_no-learning.csv') ``` #### Results: - About 40% of the time, driving agent made bad decisions. Out of those bad decisions, major accidents happened at 4% frequency while major violations happened at close to 20%. - Rate of reliability over all trials maintained an average of 35% - which is slightly lower than the 50% guess for random choosing of actions. Since reliability also includes reaching destination (which in this scenario was ignored), maybe that might have led to lower probability than random guessing. - Reward looks like random weights given to a particular decision without rules of the world given any consideration (therefore some weights are extremely negative while some are minimally positive). Over time, rewards average out to -4.5 and compared to negative weights, there were very few and small value positive rewards. - Currently in this initial phase, increasing trial from 20 to a greater number doesn't impact because there is no learning involved - Certainly not safe or reliable because both received an F score and there are still accidents/ law failures happening with no punctuality. ## Step #2: Defining states Based on the input, states need to be defined (which is also called a **policy** in this scenario) and ultimately the driving agent is expected to learn from them - because based on the state, agent decides to pick an optimal action. Inspecting the `'build_state()'` agent function shows that the driving agent is given the following data from the environment: - `'waypoint'`, which is the direction the *Smartcab* should drive leading to the destination, relative to the *Smartcab*'s heading. - `'inputs'`, which is the sensor data from the *Smartcab*. It includes - `'light'`, the color of the light. - `'left'`, the intended direction of travel for a vehicle to the *Smartcab*'s left. Returns `None` if no vehicle is present. - `'right'`, the intended direction of travel for a vehicle to the *Smartcab*'s right. Returns `None` if no vehicle is present. - `'oncoming'`, the intended direction of travel for a vehicle across the intersection from the *Smartcab*. Returns `None` if no vehicle is present. - `'deadline'`, which is the number of actions remaining for the *Smartcab* to reach the destination before running out of time. Out of the 3 main features (waypoint, inputs, deadline), most relevant are **inputs** & **waypoint** that help attain both safety and efficiency. To be more specific: ```bash 'state = (inputs['left'], inputs['light'], inputs['oncoming'], waypoint)' ``` - inputs 'left' = since RIGHT is free turn, despite of what lights are, we can't really program those turns where as left turn is strictly based on light alone. - inputs 'light' = it's based on these traffic lights are the rules of the world built. So this is crucial. - inputs 'oncoming' = it's always great to know the traffic situation at the intersection and specifically before making a FREE RIGHT turn. - 'waypoint' = gives you next move the agent needs to take TOWARDs destination The reason the other 2 features were left out: - inputs 'right': doesn't add any additional value because traffic flow from right doesn't cause any safety concerns irrespective of the lights - 'deadline': since agents does know the exact destination, knowing how many steps are needed to get somewhere doesn't help train the agent effectively ### Define a State Space When defining a set of states that the agent can occupy, it is necessary to consider the *size* of the state space. That is to say, if you expect the driving agent to learn a **policy** for each state, you would need to have an optimal action for *every* state the agent can occupy. If the number of all possible states is very large, it might be the case that the driving agent never learns what to do in some states, which can lead to uninformed decisions. Since (left, light, oncoming, wavepoint) has **([none, left, right, forward], [red, green], [none, left, right, forward], [left, right, forward])** as options, total size of state space would be 96 (= 4x2x4x3). Yes, an agent would 100% learn optimal policy given a reasonable number of training trails. ## Step #3: Implement Q-Learning The concept of Q-Learning is fairly straightforward: for every state the agent visits, create an entry in the Q-table as state-action pairs available. Then, when the agent encounters a state and performs an action, update the Q-value associated with that state-action pair based on the reward received and the iterative update rule implemented. Of course, additional benefits come from Q-Learning, such that we can have the agent choose the *best* action for each state based on the Q-values of each state-action pair possible. Therefore, applied decay function,* $\epsilon$*-greedy* Q-learning algorithm with **no** discount factor. As the number of trials increases, $\epsilon$ should decrease towards 0. For Decay function let's try multiple approaches: ```bash 1. 'self.epsilon = self.epsilon - 0.05 ' - linear 2. 'self.epsilon -= 1.0 / (self.trial**2)' - has a curve 3. 'self.epsilon = self.epsilon * 0.999' - slow curve ``` #### Setup - 'enforce_deadline' = True (to capture whether it reaches destination on time) - 'update_delay' = 0.01 (to reduce the time between steps in each trial) - 'log_metrics' = True (to log simulation results into .csv file under 'logs' directory) - 'n_test' = 10 (to perform 10 testing trials) - 'learning' = True (to tell the agent to use Q-learning implementation) As part of this section, implemented 'createQ()' and "getMaxQ()". ``` # Load the 'sim_default-learning' file from the default Q-Learning simulation vs.plot_trials('sim_default-learning.csv') ``` #### Results: - There isn't any significant similarity besides how safety remained to be far below the expected results. - It took 20 training trials before testing 10 new ones started. Yes, it makes sense given epsilon-tolerance, as testing basically starts after epsilon reaches zero. - YES, the decay function is accurately represented as a linear function, as the episolon value is going down by 0.05 with each trial - Increasing number of training trials didn't decrease bad action (as it was still random guessing). Though average reward went up slightly. - Compared to initial agent, now it's slightly more reliable (with a B) but still unsafe (though, it shows improvement) ----- ## Step #4: Optimize Q-Learning Now it's a matter of trial and error by turning different settings or adjusting learning parameters to get a better score for both safety and efficiency. #### Setup: - 'enforce_deadline' = True (to capture whether it reaches destination on time) - 'update_delay' = 0.01 (to reduce the time between steps in each trial) - 'log_metrics' = True (to log simulation results into .csv file under 'logs' directory) - 'n_test' = 10 (to perform 10 testing trials) - 'learning' = True (to tell the agent to use your Q-learning implementation) - 'optimized' = True (to tell the agent that you're perfomring optimized version of Q-learning) #### Parameters to tweak: - 'n_test' = > 10 - 'alpha' = 0-1 value (to adjust the learning rate) - 'epsilon' = 0-1 value (to adjust the exploration factor of the Q-learning algorithm) - 'tolerance' = 0.05 (ie really a small value less than 0 ) This time let's try with decay function being: $ \epsilon = \frac{1}{t^2}\hspace{50px}$ ```bash 'self.epsilon -= 1.0 / (self.trial**2)' ``` Again let's visualize the file created inside log directory ``` # Load the 'sim_improved-learning' file from the improved Q-Learning simulation vs.plot_trials('sim_improved-learning.csv') ``` #### Results: - In search for slow learning funciton, exploration factor was finally set to --> **epsilon x 0.999** . Because, slower the function decays, agent learns better (as it indirectly requires that many more training trials) - It took about **3000** training trials before the agent could begin testing - Tolerance = **0.05** and alpha = **0.5** Alpha i kept the default value, and tolerance to 0.05 so that the agent could learn better over numerous number of trials - Default Q-learner was only capable of giving B for reliability with NO SAFETY at all. But with this Q-learning tweaking, I was able to get the agent to give **Safety = A+ & Reliability = A+** - Most certainly because it was trained over more training trials therefore agent seems to have learnt well over time (before coming to this conclusion) - Since I managed to achieve both A+, result is quite satisfying ------ ### Optimal Policy State Dictionary: **state = (inputs['left'], inputs['light'], inputs['oncoming'], waypoint)** Here the optimal policy happens when: - If waypoint = right - If oncoming traffic is forward, and light is red, it should wait - Else, go right - If waypoint = forward - If light is green, go forward - Else, wait - If waypoint = left - If light is green, and oncoming traffic left or none, go left - Else if light is red, wait - Else go forward <li>In 'sim_improved-learning.txt' text file, for every state, policy isn't correct. There are few states where action isn't as expected by the optimal policy.<br/> <table width="100%"> <tr> <th>inputs['left']</th> <th>inputs['light']</th> <th>inputs['oncoming']</th> <th>waypoint</th> <th>Is the policy correct for the given state?</th> </tr> <tr> <td>Left</td> <td>Green</td> <td>Left</td> <td>Forward</td> <td> ('left', 'green', 'left', 'forward') ==> forward : 1.92, None : -5.38, right : 1.21, left : 0.96<br/> Yes, because agent had cars on the left but with a green light and flow of traffic going to the left. It's optimal to move forward as that would minimize the probability of accidents.</td> </tr> <tr> <td>Left</td> <td>Red</td> <td>Right</td> <td>Right</td> <td> ('left', 'red', 'right', 'right') ==> forward : -11.04, None : 0.85, right : 1.70, left : -29.66 <br/> Yes, because agent has cars on the left and was stuck at the intersection with flow of traffic moving right and with a red light, like the policy suggest, it's best to move right to avoid accidents and obey the traffic rules.</td> </tr> </table> </li> <li>Example from my Q-table<br/> <table width="100%"> <tr> <th>inputs['left']</th> <th>inputs['light']</th> <th>inputs['oncoming']</th> <th>waypoint</th> <th>These describe optimal policy because.....</th> </tr> <tr> <td>Left</td> <td>Red</td> <td>Right</td> <td>Right</td> <td>('left', 'red', 'right', 'right') ==> forward : -11.04, None : 0.85, right : 1.70, left : -29.66 <br/> Here, agent had cars on the left and was stuck at the intersection with flow of traffic moving right and with a red light. Optimally, it's best to move right and the Q-table also give a really high value for right. This is optimal bc follows law and avoids accidents</td> </tr> <tr> <td>Right</td> <td>Green</td> <td>None</td> <td>Forward</td> <td> ('right', 'green', None, 'forward') ==> forward : 1.93, None : -4.50, right : 0.91, left : 0.97 <br/> Here, agent had cars on the right but with a green light and with no flow of traffic. It's optimal to move forward as making right would be difficult with green signal. Therefore a safer option and policy also suggests so!</td> </tr> <tr> <td>Forward</td> <td>Red</td> <td>Right</td> <td>Right</td> <td> ('forward', 'red', 'right', 'right') ==> forward : -38.68, None : 1.65, right : -19.82, left : -38.61 <br/> At an intersection with red light and cars around me with intention to go forward and flow of traffic to right. Though wavepoint suggests to move right, it's optimal to stay IDLE during the red light as it would be difficult to cross over other cars. </td> </tr> </table> </li> <li>Probably because smartcab wasn't stuck multiple times in similar situation to optimize the policy over the 3000 training trials<br/> <table width="100%"> <tr> <th>inputs['left']</th> <th>inputs['light']</th> <th>inputs['oncoming']</th> <th>waypoint</th> <th>Why did it not learn optimal policy?</th> </tr> <tr> <td>Right</td> <td>Green</td> <td>Forward</td> <td>Forward</td> <td> ('right', 'green', 'forward', 'forward') ==> forward : 0.00, None : -2.06, right : 0.00, left : 0.00 <br/> Ideally in this case, agent should be asked to go forward but in this case, policy gives equal weight to all 3 (forward, right and left) allowing flexibility for the agent to choose any action.</td> </tr> </table></li> ----- ### TODO: Future Rewards - Discount Factor, `'gamma'` Curiously, as part of the Q-Learning algorithm, you were asked to **not** use the discount factor, `'gamma'` in the implementation. Including future rewards in the algorithm is used to aid in propagating positive rewards backwards from a future state to the current state. Essentially, if the driving agent is given the option to make several actions to arrive at different states, including future rewards will bias the agent towards states that could provide even more rewards. An example of this would be the driving agent moving towards a goal: With all actions and rewards equal, moving towards the goal would theoretically yield better rewards if there is an additional reward for reaching the goal. However, even though in this project, the driving agent is trying to reach a destination in the allotted time, including future rewards will not benefit the agent. In fact, if the agent were given many trials to learn, it could negatively affect Q-values! *There are two characteristics about the project that invalidate the use of future rewards in the Q-Learning algorithm. One characteristic has to do with the *Smartcab* itself, and the other has to do with the environment. Can you figure out what they are and why future rewards won't work for this project?*
github_jupyter
``` import sys, os, glob import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt import seaborn as sns import logging from scipy.signal import find_peaks from scipy.interpolate import UnivariateSpline, interp1d from scipy import stats from statsmodels.stats.multicomp import pairwise_tukeyhsd, MultiComparison from statsmodels.stats.libqsturng import psturng import random # logging.basicConfig(stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(message)s', level=logging.DEBUG) logging.basicConfig(stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO) %matplotlib inline font = {'family' : 'Arial', 'size' : 7} matplotlib.rc('font', **font) plt.rcParams['svg.fonttype'] = 'none' # plt.rcParams['pdf.fonttype'] = 42 # make a folder to store figures if not already there !mkdir ../jupyter_figures #------------------- # Facility functions #------------------- def splinePath(x, y, N, z=None): """Takes a set of (x,y) coordinates, spline along z if provided, otherwise along its path. Finally, interpolate at N equal distance points. Return 2 or 3 size N numpy arrays representing interpolated N (x,y) or (x,y,z) coordinates. """ if z is None: dr = (np.diff(x)**2 + np.diff(y)**2)**.5 # segment lengths r = np.zeros_like(x) r[1:] = np.cumsum(dr) # integrate path r_int = np.linspace(0, r.max(), N) # regular spaced path z, z_int = r, r_int splx = UnivariateSpline(z, x, s=0, ext='const') # interpolate # splx.set_smoothing_factor(0.5) x_int = splx(z_int) sply = UnivariateSpline(z, y, s=0, ext='const') # sply.set_smoothing_factor(0.5) y_int = sply(z_int) return x_int, y_int if z is not None: # check whether z is increasing -- requied for spline interpolate dz = np.diff(z) if not np.all(dz >= 0): exit('z for splinePath is not increasing!') z_int = np.linspace(z.min(), z.max(), N) splx = UnivariateSpline(z, x, s=0, ext='const') # interpolate # splx.set_smoothing_factor(0.5) x_int = splx(z_int) sply = UnivariateSpline(z, y, s=0, ext='const') # sply.set_smoothing_factor(0.5) y_int = sply(z_int) return x_int, y_int, z_int def discretizePath(x, y, N, z=None): """Similar to splinePath, but do linear interpolation instead of spline. Takes a set of (x,y) coordinates, interpolate N points along z if provided, otherwise along its path. Return 2 or 3 size N numpy arrays representing interpolated N (x,y) or (x,y,z) coordinates. """ if z is None: dr = (np.diff(x)**2 + np.diff(y)**2)**.5 # segment lengths r = np.zeros_like(x) r[1:] = np.cumsum(dr) # integrate path r_int = np.linspace(0, r.max(), N) # regular spaced path x_int = np.interp(r_int, r, x) # interpolate y_int = np.interp(r_int, r, y) return x_int, y_int if z is not None: z_int = np.linspace(z.min(), z.max(), N) x_int = np.interp(z_int, z, x) # interpolate y_int = np.interp(z_int, z, y) return x_int, y_int, z_int def interpPolylineT(inputFolder, N=100): """Takes a list of .txt files storing ordered coordinates of polylines at relatively sparse time intervals, interpolate to generate a time series of polylines (N points along each polyline). The input files are a series of txt files that each contains a list of ordered x,y coordinates. The file name of each txt file indicates the time frame. First, it reads in the series of x,y coordinates, interpolate equal distance points along the spline fitted line for plotting on the corresponding z plane and time point. Second, it interpolate along the time line to make sure every time point has a polyline representing the outline of the bud surface. Finally, all data will be saved into a pandas DataFrame, which is returned. """ fileList = glob.glob(inputFolder+'*.txt') fileList.sort() xyData, tData = [], [] for f in fileList: xyData.append(pd.read_csv(f, header = None, sep='\t')) f = f.split('/')[-1] f = f.split('.')[0] tData.append([int(s) for s in f.split('-') if s.isdigit()]) tData = np.array(tData) # print(xyData) # print(tzData) # interpolate along each path to position crosslines pathData = [] for i in range(len(xyData)): temp = xyData[i].values x, y = temp[:,0], temp[:,1] try: x_int, y_int = splinePath(x, y, N) except: # exception will rise when there are only 2 or 3 points x_int, y_int = discretizePath(x, y, N) # The following is time series in frame number tempT = np.ones_like(x_int) * tData[i] # Record the point position along path pathPos = np.arange(N) transposedList = list(map(list, zip(*[x_int, y_int, tempT, pathPos]))) tempDF = pd.DataFrame(transposedList, columns = ['x', 'y', 't', 'pathPos']) pathData.append(tempDF) # concat all data into one big pandas data frame pathDF = pd.concat(pathData, ignore_index=True) pathDF.columns = ['x', 'y', 't', 'pathPos'] # interpolate the mesh data for every time frame tData = [] for i in pathDF.pathPos.unique(): temp = pathDF.loc[pathDF.pathPos == i] temp = temp.sort_values('t', ascending=True) x, y, t = temp.x, temp.y, temp.t tN = int(t.max() - t.min() + 1) try: x_int, y_int, t_int = splinePath(x, y, tN, t) except: x_int, y_int, t_int = discretizePath(x, y, tN, t) tempPos = np.ones_like(x_int) * i transposedList = list(map(list, zip(*[x_int, y_int, t_int, tempPos]))) tempDF = pd.DataFrame(transposedList, columns = ['x', 'y', 't', 'pathPos']) tData.append(tempDF) tDF = pd.concat(tData, ignore_index=True) tDF.columns = ['x', 'y', 't', 'pathPos'] return tDF def getPathLength(x, y): '''Calculate the cumulative length along the polyline defined by a series coordinates (x, y) Parameters: x, y: two ordered lists storing (x, y) coordinates along the polyline Returns: The cumulative length along the polyline ''' dr = (np.diff(x)**2 + np.diff(y)**2)**.5 # segment lengths return np.sum(dr) def getPathLengthSeries(df): '''Calculate the cumulative length of each (x,y) series across a time series Parameters: Pandas data frame object with columns "x", "y" and "t" that specifies (x, y) coordinates along a polyline path at different time points Returns: A list of cumulative length along a polyline at each time point ''' pathLengthList = [] for i in df.t.unique(): temp = df.loc[df.t == i] pathLengthList.append(getPathLength(temp.x, temp.y)) return pathLengthList def savePathSeries(df, outputFolder): '''Save interpolated time series of (x,y) coordinates, each time point into one txt file in the outputFolder Parameters: Pandas data frame object with columns "x", "y" and "t" that specifies (x, y) coordinates along a polyline path at different time points Returns: A list of status stating whether a file was successfully saved for the time series ''' statusList = [] for i in df.t.unique(): temp = df.loc[df.t == i] temp = temp[['x','y']] outputFilename = outputFolder + 't-' + f'{int(i):04}' + '.txt' temp.to_csv(outputFilename, sep='\t', index=False, header=False) statusList.append(os.path.isfile(outputFilename)) return statusList def getMinDistIndex(x0, y0, x, y): '''Calculate the distance between a reference pont (x0, y0) and points along a polyline (x, y). Returns the index of the minimum distance. Parameters: x0, y0: singular value of numbers representing coordinates of the reference point x, y: list of values representing a list of points along a polyline Returns: The index where the minimum of distance to reference point was found ''' dist = [] for i in range(len(x)): tempDist = ((x0-x[i])**2 + (y0-y[i])**2)**.5# distance to current point dist.append(tempDist) return np.argmin(dist) def centerVector(x): """Center a vector by the middle point of two extreme value Parameter: 1-d numpy array, or list, or pandas Series Returns: the same data type and shape as input, shifted by the middle point """ midX = np.mean([np.min(x), np.max(x)]) return x-midX def clipLinescanProfiles(profileFolder, pathFolder, outputFolder, t_N_selected=None): '''Clip line profiles at each time point according to the beginning and the end of the original path coordinates Parameters: profileFolder: contains txt files storing newXY coordinates, as well as GFP and RFP intensity values along the path specified by the newXY coordinates pathFolder: contains txt files storing the original user-drawn and time-interpolated xy coordinates outputFolder: the location to store clipped line scan files Returns: A list of dataframes, each storing the clipped line scan data of one time point Meanwhile, each data frame will be saved as a txt file in the output folder ''' pathList = glob.glob(pathFolder+'*.txt') pathList.sort() newPathList = glob.glob(profileFolder+'newXY*.txt') newPathList.sort() profileList1 = glob.glob(profileFolder+'GFP*.txt') profileList1.sort() profileList2 = glob.glob(profileFolder+'RFP*.txt') profileList2.sort() if t_N_selected == 0 or t_N_selected is None: t_N_selected = len(pathList) dfList = [] FLIP_TOKEN = 0 CLOCKWISE_T0 = 0 # for i in range(1):# for testing for i in range(min([t_N_selected, len(pathList)])): f1, f2, f3, f4 = pathList[i], newPathList[i], profileList1[i], profileList2[i] # print(f1, f2, f3, f4) # Read in the 4 txt files storing (in order): # original path, new path, GFP profile and RFP profile df1 = pd.read_csv(f1, sep="\t", header = None) df1.columns = ["x", "y"] x0, y0 = df1.x.values[0], df1.y.values[0]# starting point of the original path x1, y1 = df1.x.values[-1], df1.y.values[-1]# ending point of the original path df2 = pd.read_csv(f2, sep="\t", header = None) df2.columns = ["x", "y"] x, y = np.array(df2.x), np.array(df2.y)# list of points of new path df3 = pd.read_csv(f3, sep="\t", header = 0, index_col = 0) df3.columns = ["GFP"] GFP = np.array(df3.GFP)# GFP profile (intensity values along new path) df4 = pd.read_csv(f4, sep="\t", header = 0, index_col = 0) df4.columns = ["RFP"] RFP = np.array(df4.RFP)# RFP profile (intensity values along new path) # Determine whether new path is in opposite direction of the original path on the first # time point, when the shape is relatively simple. # Use this token to determine wehther to flip the new path coordinates if i == 0: FLIP_TOKEN = isClockwise(df1.x, df1.y)*isClockwise(df2.x, df2.y) CLOCKWISE_T0 = isClockwise(df2.x, df2.y) assert FLIP_TOKEN in [1, -1] assert CLOCKWISE_T0 in [1, -1] if FLIP_TOKEN == -1: # print("Current file id: " + str(i) + ", flipped") x, y, GFP, RFP = map(np.flip, [x, y, GFP, RFP]) # else: # print("Current file id: " + str(i) + ", NOT flipped") idx0 = getMinDistIndex(x0, y0, x, y) # roll_N = idx0 * isClockwise(x, y) roll_N = idx0 * CLOCKWISE_T0 * (-1) x, y, GFP, RFP = map(lambda xx: np.roll(xx, roll_N), [x, y, GFP, RFP]) idx1 = getMinDistIndex(x1, y1, x, y) x, y, GFP, RFP = map(lambda xx: xx[:idx1], [x, y, GFP, RFP]) transposedList = list(map(list, zip(*[x, y, GFP, RFP]))) tempDF = pd.DataFrame(transposedList, columns = ['x', 'y', 'GFP', 'RFP']) outputFilename = outputFolder + "clipped-linescan-profile-t-" + f'{int(i):04}' + '.txt' tempDF.to_csv(outputFilename, sep='\t') dfList.append(tempDF) return dfList def get_peak_counts(df, height=20.0, width=1.0): peakCounts = [] for i in range(len(df.columns)): arr = df[df.columns[i]] peaks = find_peaks(arr, height=height, width=width) # peaks = find_peaks(arr, height=np.nanmedian(arr), width=1) peakCounts.append(len(peaks[0])) # print(peaks[0]) return peakCounts # ***Curvature calculation functions*** def curvature_splines(x, y, spline_degree=3): """Calculate the signed curvature of a 2D curve at each point using interpolating splines. Parameters ---------- x,y: numpy.array(dtype=float) shape (n_points, ) Returns ------- curvature: numpy.array shape (n_points, ) """ t = np.arange(x.shape[0]) # std = error * np.ones_like(x) fx = UnivariateSpline(t, x, k=spline_degree) fy = UnivariateSpline(t, y, k=spline_degree) dx = fx.derivative(1)(t) d2x = fx.derivative(2)(t) dy = fy.derivative(1)(t) d2y = fy.derivative(2)(t) curvature = (dy*d2x - dx*d2y) / np.power(dx**2 + dy**2, 1.5) return curvature def isClockwise(x, y, ratio_of_total_path=0.1): """Determines whether the direction following the first specied ratio of (x,y) points turns clockwise or not using cross product ***Note: only works for simple cases*** Parameter: x, y: Two 1-d vectors representing x and y coordinates of points ratio_of_total_path: the ratio of total path length to determine the turning direction Returns: 1 or -1 specifying whether it turns clockwise or not """ x, y = centerVector(x), centerVector(y) selected_index = int( len(x)*ratio_of_total_path ) x1, y1 = x[0], y[0] x2, y2 = x[selected_index], y[selected_index] v1 = [x1, y1] v2 = [x2, y2] if np.cross(v1, v2)<0: return 1; else: return -1; def get_df_for_curvature(dfList): """The parameter dfList is exactly the format of the output from function clipLinescanProfiles. columns = ['x', 'y', 'GFP', 'RFP'] The order of df in the list follows the time series. """ for i in range(len(dfList)): tempDF = dfList[i] tempT = np.ones_like(tempDF.x) * i # tempDF.loc[ : , 'x'] tempDF['t'] = pd.Series(tempT) dfList[i] = tempDF # concat all data into one big pandas data frame DF = pd.concat(dfList, ignore_index=True) DF.columns = ['x', 'y', 'GFP', 'RFP', 't'] return DF def get_curvature_df_xy(df): '''Calculate curvature along (x,y) for a single time point Input Parameters ---------------- df: a pandas data frame with columns 'x' and 'y' (could have others) that specifies a series of (x,y) coordinates of a curve Returns: -------- curvature_df: a pandas data frame adding a column 'curvature' storing the local curvature at each (x,y) coordinate ''' assert 'x' in df.columns assert 'y' in df.columns # center the coordinates of x and y to align plots df.x, df.y = centerVector(df.x), centerVector(df.y) # calculate the curvature and store in a new column # The *1000 is to convert micron to mm scale df["curvature"] = curvature_splines(df.x, df.y) * 1000 # reverse the sign of curvature if the curve is not turning clockwise if isClockwise(df.x, df.y) == -1: df["curvature"] = -1 * df["curvature"] return df def get_curvature_df(df): ''' This function calculate pixel-wise local curvature of a time series of curves specified by a series of (x, y) coordinates along the curve. Parameters ---------- df: pandas dataframe containing 3 columns ('x', 'y', 't'; could have others) that represent (x, y) coordinates along a curve in a time seris denoted by 't' pixel_size: unit um/pixel, the pixel calibration for converting micron-unit (x,y) coordinates to pixels Returns ------- curvature_df: pandas dataframe storing pixel-wise curvature along the time series of the curve. Curvature unit is mm^(-1) ''' t_seris = df.t.unique() curvature_list = [] clockwise = 0 for t in t_seris: df_temp = df[df.t==t] # center the coordinates of x and y to align plots # reverse the sign of y coordinates because on the image y goes downwards # opposite to the reference frame of curvature calculation curve_x = centerVector(df_temp.x.values) curve_y = centerVector( -1 * df_temp.y.values) curvature_temp = curvature_splines(curve_x, curve_y, spline_degree=3) * 1000 # flip the sign of curvature if the curve is not turning clockwise if t == t_seris[0]: clockwise = isClockwise(curve_x, curve_y) assert clockwise in [1, -1] if clockwise == -1: curvature_temp = -1 * curvature_temp curvature_list.append(pd.Series(curvature_temp)) # Use the dfList data to construct simple-structured data frame for heatmap plotting df_cols = ["t-" + f'{int(t):04}' for t in t_seris] curvature_df = pd.DataFrame(dict(zip(df_cols, curvature_list))) return curvature_df #------------------- # Plotting functions #------------------- def plot_curve_series(df, outputFigPath, fig_width=1.2, fig_height=1.2, colorMap='jet'): '''The pandas data frame df should have columns "x", "y" and "t" representing (x, y) coordinates along a curve at a series of time points ''' fig = plt.figure(figsize=(fig_width, fig_height), dpi=300) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) sns.lineplot(x=df.x, y=df.y, hue=df.t, sort=False, estimator=None, alpha=.1, size=.01, legend=False, palette=colorMap) # Flip the y-axis to match the image coordinates plt.gca().invert_yaxis() # Make the x and y axes equal in dimension to mimic image display plt.axis('equal') for o in fig.findobj(): o.set_clip_on(False) for o in ax.findobj(): o.set_clip_on(False) plt.savefig(outputFigPath) return ax def plot_line_profile_heatmap(df, outputFigPath, fig_width=1.2, fig_height=1.0, axis_on_off='off', colorMap='Greys', show_cbar=True, transpose=True): '''The pandas data frame df should have intensity values in each column for each time point for plotting ''' # make sure the axis_on_off argument is valid assert axis_on_off in ['on', 'off'] plt.rcParams['svg.fonttype'] = 'none' plt.rcParams['font.size'] = 7 plt.rcParams['font.sans-serif'] = 'Arial' fig = plt.figure(figsize=(fig_width, fig_height), dpi=300) if axis_on_off == 'on': ax = fig.add_axes([0.3, 0.3, 0.6, 0.6]) else: ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) if transpose: ax = sns.heatmap(df.transpose(), cmap=colorMap, vmax=255, cbar=show_cbar) else: ax = sns.heatmap(df, cmap=colorMap, vmax=255, cbar=show_cbar) for o in fig.findobj(): o.set_clip_on(False) for o in ax.findobj(): o.set_clip_on(False) plt.axis(axis_on_off) plt.savefig(outputFigPath) return ax def plot_curvature_heatmap(curvature_df, outputFigPath, colorMax=100, fig_width=1.2, fig_height=1.0, show_cbar=True): '''plot heatmap of curvature, save .svg as outputFigPath''' fig = plt.figure(figsize=(fig_width, fig_height), dpi=300) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) sns.heatmap(curvature_df.transpose(), cmap='coolwarm', vmin=-1*colorMax, vmax=colorMax, cbar=show_cbar) for o in fig.findobj(): o.set_clip_on(False) for o in ax.findobj(): o.set_clip_on(False) plt.axis("off") plt.savefig(outputFigPath) return ax def plot_curvature(df, outputFigPath=None, size=3, colorNormMax=80, x_max=None, y_max=None): assert 'x' in df.columns assert 'y' in df.columns fig = plt.figure(figsize=(1.2,1.2), dpi=300) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) plt.scatter(df.x, df.y, s=size, c=df.curvature, vmin=-1*colorNormMax, vmax=colorNormMax, cmap="coolwarm") # When supplied, specify the range of x, y axes if x_max is not None: plt.xlim([0, x_max]) if y_max is not None: plt.ylim([0, y_max]) # Flip the y-axis to match the image coordinates plt.gca().invert_yaxis() # Make the x and y axes equal in dimension to mimic image display plt.axis('equal') plt.axis("off") for o in fig.findobj(): o.set_clip_on(False) for o in ax.findobj(): o.set_clip_on(False) if outputFigPath is not None: plt.savefig(outputFigPath) return ax def plot_boundary(df, outputFigPath=None, line_width=0.8, line_color='k', x_max=None, y_max=None): assert 'x' in df.columns assert 'y' in df.columns fig = plt.figure(figsize=(1.2,1.2), dpi=300) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) plt.plot(df.x, df.y, lw=line_width, color=line_color) # When supplied, specify the range of x, y axes if x_max is not None: plt.xlim([0, x_max]) if y_max is not None: plt.ylim([0, y_max]) # Flip the y-axis to match the image coordinates plt.gca().invert_yaxis() # Make the x and y axes equal in dimension to mimic image display plt.axis('equal') plt.axis("off") for o in fig.findobj(): o.set_clip_on(False) for o in ax.findobj(): o.set_clip_on(False) if outputFigPath is not None: plt.savefig(outputFigPath) return ax def plot_perimeter_and_nuclei_number(pathLengths, peakCounts, outputFigPath, xTickInterval=5.0, fig_width=1.2, fig_height=1.0): ''' ''' # ------------------------- # Fitting pathLengths array # ------------------------- x1 = np.arange(len(pathLengths)) / 12.0 x_max = x1[-1] y1 = np.array(pathLengths)/np.nanpercentile(pathLengths,90) slope, intercept, r_value, p_value, std_err = stats.linregress(x1, y1) print("\npathLengths linear regression parameters (slope, intercep, r square, p_value and std_err):") print(slope, intercept, r_value**2, p_value, std_err) last1 = slope*x_max + intercept first1 = slope*0 + intercept print("\nThe fitted path length value at the last time point (should be close to 1):") print(last1) print("\nThe ratio of fitted path length value of the last time point over the first time point:") print(last1/first1) # ------------------------- # Fitting peakCounts array # ------------------------- x2 = np.arange(len(peakCounts)) / 12.0 y2 = np.array(peakCounts)/np.nanpercentile(peakCounts,90) slope, intercept, r_value, p_value, std_err = stats.linregress(x2, y2) print("\npeakCounts linear regression parameters (slope, intercep, r square, p_value and std_err):") print(slope, intercept, r_value**2, p_value, std_err) last2 = slope*x_max + intercept first2 = slope*0 + intercept print("\nThe fitted path length value at the last time point (should be close to 1):") print(last2) print("\nThe ratio of fitted path length value of the last time point over the first time point:") print(last2/first2) # -------- # Plotting # -------- fig = plt.figure(figsize=(fig_width, fig_height), dpi=300) ax = fig.add_axes([0.3, 0.2, 0.6, 0.7]) plt.plot(x2, y2, "g", lw=.8) plt.plot([0, x_max], [first2, last2], "g--", lw=.8) plt.plot(x1, y1, "k", lw=.8) plt.plot([0, x_max], [first1, last1], "k--", lw=.8) for o in fig.findobj(): o.set_clip_on(False) for o in ax.findobj(): o.set_clip_on(False) # customize the x ticks to correspond to something meaningful # since the time frame is 5 min intervals, a reasonable number here # could be 60 frames = 5 hours xTicks = np.arange(0, x_max + 1.0/12.0, xTickInterval) plt.xticks(xTicks) plt.yticks([0.6, 0.8, 1.0]) plt.xlabel("Time (h)") plt.ylabel("Bud perimeter or\nnuclear count") plt.savefig(outputFigPath) return ax def test_peak_finding_parameters(df, outputFigPath=None, t=0, height=20.0, width=1.0, fig_width=4.0, fig_height=1.2): '''Plot out the intensity values and found peaks using specified peak finding parameters The pandas data frame df should have intensity values along a curve in each column for each time point ''' arr = df[df.columns[t]] peaks = find_peaks(arr, height=height, width=width) peakValues = [arr[i] for i in peaks[0]] print(peakValues) fig = plt.figure(figsize=(fig_width, fig_height), dpi=300) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) plt.plot(arr, "gray") plt.plot(peaks[0], peakValues, "ok") for o in fig.findobj(): o.set_clip_on(False) for o in ax.findobj(): o.set_clip_on(False) if outputFigPath is not None: plt.savefig(outputFigPath) return ax # Information of all data sets parentFolderList = ['../data/dynamic-line-scan-analysis/190331-4_K14Rhet-HisG-dataset/', '../data/dynamic-line-scan-analysis/180218-mTmGHisG-ROI1-dataset/', '../data/dynamic-line-scan-analysis/190227-mTmGHisG-ROI1-dataset/', '../data/dynamic-line-scan-analysis/190227-mTmGHisG-ROI2-dataset/', '../data/dynamic-line-scan-analysis/200125-K14R-HisG-ROI1-dataset/', '../data/dynamic-line-scan-analysis/190331-1_K14Rhomo-HisG-dataset/', '../data/dynamic-line-scan-analysis/190331-6_K14Rhet-HisG-dataset/', '../data/dynamic-line-scan-analysis/180624-mTmGHisG-SMG1-ROI1-dataset-low-quality/', '../data/dynamic-line-scan-analysis/180325-mTmGHisG-ROI1-dataset/', '../data/dynamic-line-scan-analysis/180325-mTmGHisG-ROI2-dataset/', '../data/dynamic-line-scan-analysis/180624-mTmGHisG-SMG1-ROI2-dataset/', '../data/dynamic-line-scan-analysis/180624-mTmGHisG-SMG2-ROI1-dataset/', '../data/dynamic-line-scan-analysis/180624-mTmGHisG-SMG2-ROI2-dataset/'] datasetPrefixList = ['190331-4_K14Rhet-HisG-1-1-denoised-BCratio-img-seq-stacked-z-26', '180218-mTmGHisG-ROI1-denoised-BCratio-img-seq-BC-all-z-24', '190227-mTmGHisG-2photon-E13-ROI1-denoised-BC-all-mid-plane', '190227-mTmGHisG-2photon-E13-ROI2-denoised-BC-all-mid-plane', '2020-01-25-K14R-HisG-2photon-25x-2-denoised-ROI1-z-25', '190331-1_K14Rhomo-HisG-1-1-denoised-z-27', '190331-6_K14Rhet-HisG-2-2-denoised-z-23', '180624-2photon-mTmGHisG-SMG1-combined-BC-all-ROI1-z-24-denoised', '180325-mTmGHisG-2photon-mosaic-ROI1-denoised-BC-all-z-24', '180325-mTmGHisG-2photon-mosaic-ROI2-denoised-BC-all-z-30', '180624-2photon-mTmGHisG-SMG1-combined-BC-all-ROI2-z-40-denoised', '180624-2photon-mTmGHisG-SMG2-combined-BC-all-ROI1-z-31-denoised', '180624-2photon-mTmGHisG-SMG2-combined-BC-all-ROI2-z-36-denoised'] # In the cases when the time series goes beyond cleft progression into cleft widening, # select the number of time points preceding the widening t_N_selected_list = np.zeros_like(range(len(datasetPrefixList))) t_N_selected_list[2] = 150 # Master function to get all potentially useful plots for each dataset def process_dataset(parentFolder, datasetPrefix): # ----------------------------------------------- # Reading in the data and prepare for data frames # ----------------------------------------------- inputFolder = parentFolder + datasetPrefix + '-linescan-output/' # N points per path will be interpolated df = interpPolylineT(inputFolder, N=100) outputFolder = inputFolder[:-1] + "-t-interpolated/" # Only need to run it once if not os.path.isdir(outputFolder): os.makedirs(outputFolder, exist_ok=True) status = savePathSeries(df, outputFolder) t_N_selected = t_N_selected_list[i] # Select the time points before cleft bottom widening if t_N_selected != 0: t_min = np.min(df.t) df=df[ df.t < (t_min + t_N_selected) ] # Get the data frame list of line profile profileFolder = parentFolder + datasetPrefix + '-dynamic-line-scan-profile/' pathFolder = parentFolder + datasetPrefix + '-linescan-output-t-interpolated/' outputFolder = parentFolder + datasetPrefix + '-t-interpolated-clipped/' os.makedirs(outputFolder, exist_ok=True) dfList = clipLinescanProfiles(profileFolder, pathFolder, outputFolder, t_N_selected=t_N_selected) # Use the dfList data to construct simple-structured GFP and RFP intensity data frames for heatmap plotting df_cols = ["t-" + f'{int(i):04}' for i in range(len(dfList))] dfGFP = pd.DataFrame(dict(zip(df_cols, [df.GFP for df in dfList]))) dfRFP = pd.DataFrame(dict(zip(df_cols, [df.RFP for df in dfList]))) # Calculate the local curvature of bud surface outline df_for_curvature = get_df_for_curvature(dfList) curvature_df = get_curvature_df(df_for_curvature) # --------- # Plottings # --------- outputFigPath = "../jupyter_figures/" + datasetPrefix + "-mid-plane-outline.svg" plot_curve_series(df, outputFigPath, fig_width=1.2, fig_height=1.2) # plotting heamap showing the straightened time series of GFP and the curvature heatmap outputFigPath = "../jupyter_figures/" + datasetPrefix + "-dynamic-kymograph-GFP.png" plot_line_profile_heatmap(dfGFP, outputFigPath, show_cbar=False) outputFigPath = "../jupyter_figures/" + datasetPrefix + "-curvature-heatmap.png" plot_curvature_heatmap(curvature_df, outputFigPath, show_cbar=False) outputFigPath = "../jupyter_figures/" + datasetPrefix + "-dynamic-kymograph-GFP-axis-on.png" plot_line_profile_heatmap(dfGFP, outputFigPath, axis_on_off='on') # plotting heamap showing the straightened time series of RFP outputFigPath = "../jupyter_figures/" + datasetPrefix + "-dynamic-kymograph-RFP.png" plot_line_profile_heatmap(dfRFP, outputFigPath, show_cbar=False) test_peak_finding_parameters(dfGFP, t=0, height=20.0, width=1.0) # Caculate the path length at each time point of the series pathLengths = getPathLengthSeries(df) # Caculate the peak number using GFP intensity profile at each time point of the series peakCounts = get_peak_counts(dfGFP, height=20.0, width=1.0) outputFigPath = "../jupyter_figures/" + datasetPrefix + "-perimeter-and-nuclei-number-over-time.svg" plot_perimeter_and_nuclei_number(pathLengths, peakCounts, outputFigPath) plt.close('all') # Loop through all data sets to get potentially useful plots for i in np.arange(0, len(parentFolderList)): parentFolder = parentFolderList[i] datasetPrefix = datasetPrefixList[i] process_dataset(parentFolder, datasetPrefix) # -------------------------------------------------------- # Reading in the data and prepare data frames for plotting # -------------------------------------------------------- # # Specify the ith dataset from the above list, 0 indexed i = 0 re_run = False # Specify whether to re-write the t-interpolated data parentFolder = parentFolderList[i] datasetPrefix = datasetPrefixList[i] inputFolder = parentFolder + datasetPrefix + '-linescan-output/' # N points per path will be interpolated df = interpPolylineT(inputFolder, N=100) outputFolder = inputFolder[:-1] + "-t-interpolated/" # Only need to run it once if not os.path.isdir(outputFolder): os.makedirs(outputFolder, exist_ok=True) status = savePathSeries(df, outputFolder) if re_run == True: os.makedirs(outputFolder, exist_ok=True) status = savePathSeries(df, outputFolder) t_N_selected = t_N_selected_list[i] # Select the time points before cleft bottom widening if t_N_selected != 0: t_min = np.min(df.t) df=df[ df.t < (t_min + t_N_selected) ] # Get the data frame list of line profile profileFolder = parentFolder + datasetPrefix + '-dynamic-line-scan-profile/' pathFolder = parentFolder + datasetPrefix + '-linescan-output-t-interpolated/' outputFolder = parentFolder + datasetPrefix + '-t-interpolated-clipped/' os.makedirs(outputFolder, exist_ok=True) dfList = clipLinescanProfiles(profileFolder, pathFolder, outputFolder, t_N_selected=t_N_selected) # Use the dfList data to construct simple-structured GFP and RFP intensity data frames for heatmap plotting df_cols = ["t-" + f'{int(i):04}' for i in range(len(dfList))] dfGFP = pd.DataFrame(dict(zip(df_cols, [df.GFP for df in dfList]))) dfRFP = pd.DataFrame(dict(zip(df_cols, [df.RFP for df in dfList]))) # Calculate the local curvature of bud surface outline df_for_curvature = get_df_for_curvature(dfList) curvature_df = get_curvature_df(df_for_curvature) outputFigPath = "../jupyter_figures/" + datasetPrefix + "-mid-plane-outline.svg" plot_curve_series(df, outputFigPath, fig_width=1.2, fig_height=1.2) # plotting heamap showing the straightened time series of GFP outputFigPath = "../jupyter_figures/" + datasetPrefix + "-dynamic-kymograph-GFP.png" plot_line_profile_heatmap(dfGFP, outputFigPath, show_cbar=False) outputFigPath = "../jupyter_figures/" + datasetPrefix + "-curvature-heatmap.png" plot_curvature_heatmap(curvature_df, outputFigPath, show_cbar=False) outputFigPath = "../jupyter_figures/" + datasetPrefix + "-dynamic-kymograph-GFP-axis-on.png" plot_line_profile_heatmap(dfGFP, outputFigPath, axis_on_off='on') # # plotting heamap showing the straightened time series of RFP outputFigPath = "../jupyter_figures/" + datasetPrefix + "-dynamic-kymograph-RFP.png" plot_line_profile_heatmap(dfRFP, outputFigPath) # use svg backend to get the color bar # outputFigPath = "./jupyter_figures/" + datasetPrefix + "-dynamic-kymograph-GFP.svg" # plot_line_profile_heatmap(dfGFP, outputFigPath, show_cbar=True) # outputFigPath = "./jupyter_figures/" + datasetPrefix + "-curvature-heatmap.svg" # plot_curvature_heatmap(curvature_df, outputFigPath, show_cbar=True) # outputFigPath = "./jupyter_figures/" + datasetPrefix + "-dynamic-kymograph-GFP-jet.svg" # plot_line_profile_heatmap(dfGFP, outputFigPath, colorMap='jet', show_cbar=True) output_filename = '../jupyter_figures/' + datasetPrefix + '-GFP-peak-count.svg' test_peak_finding_parameters(dfGFP, outputFigPath=output_filename, t=0, height=20.0, width=1.0) # Caculate the path length at each time point of the series pathLengths = getPathLengthSeries(df) # Caculate the peak number using GFP intensity profile at each time point of the series peakCounts = get_peak_counts(dfGFP, height=20.0, width=1.0) outputFigPath = "../jupyter_figures/" + datasetPrefix + "-perimeter-and-nuclei-number-over-time.svg" plot_perimeter_and_nuclei_number(pathLengths, peakCounts, outputFigPath, fig_width=1.2, fig_height=1.0) # Plot outlines for making showcase example to include in the supplemental figures for t in [0, 143, 252]: curvature_df_temp = get_curvature_df_xy(dfList[t]) figFileName = '../jupyter_figures/' + datasetPrefix + '-curvature-plot-t-' + str(t) + '.svg' plot_curvature(curvature_df_temp, outputFigPath=figFileName, x_max=200, y_max=200, size=0.1, colorNormMax=100) figFileName = '../jupyter_figures/' + datasetPrefix + '-boundary-t-' + str(t) + '.svg' plot_boundary(curvature_df_temp, outputFigPath=figFileName, x_max=200, y_max=200, line_width=0.6, line_color='k') ```
github_jupyter
# Translation simple ecoder-decocer over the b3 dataset ``` import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torchtext import data import pandas as pd import unicodedata import string import re import random import copy from contra_qa.plots.functions import simple_step_plot import matplotlib.pyplot as plt device = torch.device("cuda" if torch.cuda.is_available() else "cpu") from nltk.translate.bleu_score import sentence_bleu % matplotlib inline ``` ### Preparing data ``` df2 = pd.read_csv("data/boolean3_train.csv") df2_test = pd.read_csv("data/boolean3_test.csv") df2["text"] = df2["sentence1"] + df2["sentence2"] df2_test["text"] = df2_test["sentence1"] + df2_test["sentence2"] all_sentences = list(df2.text.values) + list(df2_test.text.values) df2train = df2.iloc[:8500] df2valid = df2.iloc[8500:] df2train.tail() SOS_token = 0 EOS_token = 1 class Lang: def __init__(self, name): self.name = name self.word2index = {} self.word2count = {} self.index2word = {0: "SOS", 1: "EOS"} self.n_words = 2 # Count SOS and EOS def addSentence(self, sentence): for word in sentence.split(' '): self.addWord(word) def addWord(self, word): if word not in self.word2index: self.word2index[word] = self.n_words self.word2count[word] = 1 self.index2word[self.n_words] = word self.n_words += 1 else: self.word2count[word] += 1 # Turn a Unicode string to plain ASCII, thanks to # http://stackoverflow.com/a/518232/2809427 def unicodeToAscii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn') # Lowercase, trim, and remove non-letter characters def normalizeString(s): s = unicodeToAscii(s.lower().strip()) s = re.sub(r"([.!?])", r" \1", s) s = re.sub(r"[^a-zA-Z.!?]+", r" ", s) return s example = "ddddda'''~~çãpoeéééééÈ'''#$$##@!@!@AAS@#12323fdf" print("Before:", example) print() print("After:", normalizeString(example)) pairs_A = list(zip(list(df2train.sentence1.values), list(df2train.and_A.values))) pairs_B = list(zip(list(df2train.sentence1.values), list(df2train.and_B.values))) pairs_A = [(normalizeString(s1), normalizeString(s2)) for s1, s2 in pairs_A] pairs_B = [(normalizeString(s1), normalizeString(s2)) for s1, s2 in pairs_B] pairs_A_val = list(zip(list(df2valid.sentence1.values), list(df2valid.and_A.values))) pairs_B_val = list(zip(list(df2valid.sentence1.values), list(df2valid.and_B.values))) pairs_A_val = [(normalizeString(s1), normalizeString(s2)) for s1, s2 in pairs_A_val] pairs_B_val = [(normalizeString(s1), normalizeString(s2)) for s1, s2 in pairs_B_val] all_text_pairs = zip(all_sentences, all_sentences) all_text_pairs = [(normalizeString(s1), normalizeString(s2)) for s1, s2 in all_text_pairs] def readLangs(lang1, lang2, pairs, reverse=False): # Reverse pairs, make Lang instances if reverse: pairs = [tuple(reversed(p)) for p in pairs] input_lang = Lang(lang2) output_lang = Lang(lang1) else: input_lang = Lang(lang1) output_lang = Lang(lang2) return input_lang, output_lang, pairs MAX_LENGTH = 20 def filterPair(p): cond1 = len(p[0].split(' ')) < MAX_LENGTH cond2 = len(p[1].split(' ')) < MAX_LENGTH return cond1 and cond2 def filterPairs(pairs): return [pair for pair in pairs if filterPair(pair)] def prepareData(lang1, lang2, pairs, reverse=False): input_lang, output_lang, pairs = readLangs(lang1, lang2, pairs, reverse) print("Read %s sentence pairs" % len(pairs)) pairs = filterPairs(pairs) print("Trimmed to %s sentence pairs" % len(pairs)) print("Counting words...") for pair in pairs: input_lang.addSentence(pair[0]) output_lang.addSentence(pair[1]) print("Counted words:") print(input_lang.name, input_lang.n_words) print(output_lang.name, output_lang.n_words) return input_lang, output_lang, pairs _, _, training_pairs_A = prepareData("eng_enc", "eng_dec", pairs_A) print() input_lang, _, _ = prepareData("eng_enc", "eng_dec", all_text_pairs) output_lang = copy.deepcopy(input_lang) print() _, _, valid_pairs_A = prepareData("eng_enc", "eng_dec", pairs_A_val) _, _, training_pairs_B = prepareData("eng_enc", "eng_dec", pairs_B) print() _, _, valid_pairs_B = prepareData("eng_enc", "eng_dec", pairs_B_val) ``` ### sentences 2 tensors ``` example = random.choice(training_pairs_A) print(example) def indexesFromSentence(lang, sentence): return [lang.word2index[word] for word in sentence.split(' ')] indexesFromSentence(input_lang,example[0]) indexesFromSentence(output_lang, example[1]) def tensorFromSentence(lang, sentence): indexes = indexesFromSentence(lang, sentence) indexes.append(EOS_token) return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1) input_sen = tensorFromSentence(input_lang,example[0]) output_sen = tensorFromSentence(output_lang, example[1]) print(input_sen) print() print(input_sen.shape) print(input_sen.dtype) print(output_sen) print() print(output_sen.shape) print(output_sen.dtype) def tensorsFromPair(pair): input_tensor = tensorFromSentence(input_lang, pair[0]) target_tensor = tensorFromSentence(output_lang, pair[1]) return (input_tensor, target_tensor) input_sen, output_sen = tensorsFromPair(example) print("input\n") print(input_sen) print() print(input_sen.shape) print(input_sen.dtype) print("\noutput\n") print(output_sen) print() print(output_sen.shape) print(output_sen.dtype) class EncoderRNN(nn.Module): def __init__(self, input_size, hidden_size): super(EncoderRNN, self).__init__() self.hidden_size = hidden_size self.embedding = nn.Embedding(input_size, hidden_size) self.gru = nn.GRU(hidden_size, hidden_size) def forward(self, input, hidden): embedded = self.embedding(input).view(1, 1, -1) output = embedded output, hidden = self.gru(output, hidden) return output, hidden def initHidden(self): return torch.zeros(1, 1, self.hidden_size, device=device) hidden_size = 10 eng_enc_v_size = input_lang.n_words eng_dec_v_size = output_lang.n_words encoder = EncoderRNN(eng_enc_v_size, hidden_size) h0 = encoder.initHidden() print("input_sen:", input_sen.shape, input_sen.dtype) print("h0:", h0.shape, h0.dtype) max_length = MAX_LENGTH encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device) input_length = input_sen.size(0) for ei in range(input_length): output, hidden_enc = encoder(input_sen[ei], h0) h0 = hidden_enc encoder_outputs[ei] = output[0, 0] print("output:", output.shape, output.dtype) print("hidden_enc:", hidden_enc.shape, hidden_enc.dtype) class DecoderRNN(nn.Module): def __init__(self, hidden_size, output_size): super(DecoderRNN, self).__init__() self.hidden_size = hidden_size self.embedding = nn.Embedding(output_size, hidden_size) self.gru = nn.GRU(hidden_size, hidden_size) self.out = nn.Linear(hidden_size, output_size) self.softmax = nn.LogSoftmax(dim=1) def forward(self, input, hidden): output = self.embedding(input).view(1, 1, -1) output = F.relu(output) output, hidden = self.gru(output, hidden) output = self.softmax(self.out(output[0])) return output, hidden def initHidden(self): return torch.zeros(1, 1, self.hidden_size, device=device) decoder = DecoderRNN(hidden_size, eng_dec_v_size) decoder_input = torch.tensor([[SOS_token]], device=device) decoder_hidden = hidden_enc target_length = output_sen.size(0) for di in range(target_length): decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden) decoder_input = output_sen[di] # Teacher forcing print("decoder_output:", decoder_output.shape, decoder_output.dtype) print() print("decoder_hidden:", decoder_hidden.shape, decoder_hidden.dtype) ``` ## Calculate loss over each token of the target language ``` learning_rate = 0.2 encoder_optimizer = torch.optim.SGD(encoder.parameters(), lr=learning_rate) decoder_optimizer = torch.optim.SGD(decoder.parameters(), lr=learning_rate) criterion = nn.NLLLoss() def train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length, teacher_forcing_ratio=0.5): encoder_hidden = encoder.initHidden() encoder_optimizer.zero_grad() decoder_optimizer.zero_grad() input_length = input_tensor.size(0) target_length = target_tensor.size(0) encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device) loss = 0 for ei in range(input_length): encoder_output, encoder_hidden = encoder( input_tensor[ei], encoder_hidden) encoder_outputs[ei] = encoder_output[0, 0] decoder_input = torch.tensor([[SOS_token]], device=device) decoder_hidden = encoder_hidden use_teacher_forcing = True if not random.random() < teacher_forcing_ratio: use_teacher_forcing = False if use_teacher_forcing: # Teacher forcing: Feed the target as the next input for di in range(target_length): decoder_output, decoder_hidden = decoder( decoder_input, decoder_hidden) loss += criterion(decoder_output, target_tensor[di]) decoder_input = target_tensor[di] # Teacher forcing else: # Without teacher forcing: use its own predictions as the next input for di in range(target_length): decoder_output, decoder_hidden = decoder( decoder_input, decoder_hidden) _, topone = decoder_output.topk(1) decoder_input = topone.squeeze().detach() # detach from history as input loss += criterion(decoder_output, target_tensor[di]) if decoder_input.item() == EOS_token: break loss.backward() encoder_optimizer.step() decoder_optimizer.step() return loss.item() / target_length def get_loss(input_tensor, target_tensor, encoder, decoder, criterion, max_length): encoder_hidden = encoder.initHidden() input_length = input_tensor.size(0) target_length = target_tensor.size(0) encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device) loss = 0 for ei in range(input_length): encoder_output, encoder_hidden = encoder( input_tensor[ei], encoder_hidden) encoder_outputs[ei] = encoder_output[0, 0] decoder_input = torch.tensor([[SOS_token]], device=device) decoder_hidden = encoder_hidden for di in range(target_length): decoder_output, decoder_hidden = decoder( decoder_input, decoder_hidden) _, topone = decoder_output.topk(1) decoder_input = topone.squeeze().detach() # detach from history as input loss += criterion(decoder_output, target_tensor[di]) if decoder_input.item() == EOS_token: break return loss.item() / target_length ``` Test get loss ``` valid_pairs = [tensorsFromPair(pair) for pair in valid_pairs_A] valid_loss = [] for t in valid_pairs: input_sen, output_sen = t loss = get_loss(input_sen, output_sen, encoder, decoder, criterion, MAX_LENGTH) valid_loss.append(loss) print("mean loss", np.mean(valid_loss)) import time import math def asMinutes(s): m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) def timeSince(since): now = time.time() s = now - since return '%s' % asMinutes(s) ``` Test train ``` n_iters = 1000 training_pairs_little = [tensorsFromPair(random.choice(training_pairs_A)) for i in range(n_iters)] losses = [] start = time.time() for t in training_pairs_little: input_sen, output_sen = t loss = train(input_sen, output_sen, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH) losses.append(loss) print(timeSince(start)) simple_step_plot([losses], "loss", "loss example ({} pair of sentences only)".format(n_iters), "loss_example.png", figsize=(10,3)) def trainIters(encoder, decoder, n_iters, pairs, valid_pairs, encoder_path, decoder_path, batch_size=32, status_every=100, learning_rate=0.01, teacher_forcing_ratio=0.5): plot_losses = [] old = 0 start = time.time() all_loss = [] valid_loss = float("inf") encoder_optimizer = torch.optim.SGD(encoder.parameters(), lr=learning_rate) decoder_optimizer = torch.optim.SGD(decoder.parameters(), lr=learning_rate) criterion = nn.NLLLoss() training_pairs = [tensorsFromPair(random.choice(pairs)) for i in range(n_iters)] for i, t in enumerate(training_pairs): input_sen, output_sen = t loss = train(input_sen, output_sen, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH, teacher_forcing_ratio=teacher_forcing_ratio) plot_losses.append(loss) if i % status_every == 0 and i != 0: valid_batch = [tensorsFromPair(random.choice(valid_pairs)) for i in range(batch_size)] batch_loss = 0 for t in valid_batch: input_sen, output_sen = t batch_loss += get_loss(input_sen, output_sen, encoder, decoder, criterion, MAX_LENGTH) current_valid_loss = batch_loss / batch_size if current_valid_loss < valid_loss: valid_loss = current_valid_loss torch.save(encoder.state_dict(), encoder_path) torch.save(decoder.state_dict(), decoder_path) print("mean training loss = {:.2f}".format(np.mean(plot_losses))) print("mean valid loss = {:.2f}".format(current_valid_loss)) print("time in {} steps:".format(status_every), timeSince(start)) print() # simple_step_plot([plot_losses], # "loss", # "loss plot (from {} to {})".format(old, i), # "loss_example.png", # figsize=(10, 3)) all_loss += plot_losses plot_losses = [] old = i start = time.time() simple_step_plot([all_loss], "loss", "loss over training" , "loss_example.png", figsize=(15, 3)) ``` ## translating ``` def translate(encoder, decoder, sentence, max_length=MAX_LENGTH): with torch.no_grad(): input_tensor = tensorFromSentence(input_lang, sentence) input_length = input_tensor.size()[0] encoder_hidden = encoder.initHidden() encoder_outputs = torch.zeros( max_length, encoder.hidden_size, device=device) for ei in range(input_length): encoder_output, encoder_hidden = encoder(input_tensor[ei], encoder_hidden) encoder_outputs[ei] += encoder_output[0, 0] decoder_input = torch.tensor([[SOS_token]], device=device) # SOS decoder_hidden = encoder_hidden decoded_words = [] for di in range(max_length): decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden) _, topone = decoder_output.data.topk(1) if topone.item() == EOS_token: decoded_words.append('<EOS>') break else: decoded_words.append(output_lang.index2word[topone.item()]) decoder_input = topone.squeeze().detach() return " ".join(decoded_words) ``` ## translation of a non trained model ``` encoder = EncoderRNN(eng_enc_v_size, hidden_size) decoder = DecoderRNN(hidden_size, eng_dec_v_size) np.random.shuffle(training_pairs_A) for t in training_pairs_A[0:3]: print("input_sentence : " + t[0]) neural_translation = translate(encoder, decoder, t[0], max_length=MAX_LENGTH) print("neural translation : " + neural_translation) reference = t[1] + ' <EOS>' print("reference translation : " + reference) reference = reference.split(" ") candidate = neural_translation.split(" ") score = sentence_bleu([reference], candidate) print("blue score = {:.2f}".format(score)) print() ``` ## Training some models and observing its translation ``` def save_translation(pairs, encoder, decoder, max_length, out_path): with open(out_path, "w") as file: file.write("source,candidate,reference,blue,accuracy\n") for tuple_ in pairs: source, reference = tuple_ candidate = translate(encoder, decoder, source, max_length=max_length) reference = reference + ' <EOS>' blue = sentence_bleu([reference.split(" ")], candidate.split(" ")) if blue >= 0.95: acc = 1 else: acc = 0 line = source + "," line += candidate + "," line += reference + "," line += "{:.3f},".format(blue) line += "{}\n".format(acc) file.write(line) ``` Test save_translation ``` save_translation(training_pairs_A[0:3], encoder, decoder, MAX_LENGTH, "temp.csv") ``` ### Training 1 ``` hidden_size = 500 encoder = EncoderRNN(eng_enc_v_size, hidden_size) decoder = DecoderRNN(hidden_size, eng_dec_v_size) trainIters(encoder=encoder, decoder=decoder, n_iters=5000, pairs=training_pairs_A, valid_pairs=valid_pairs_A, encoder_path="b3_encoder1.pkl", decoder_path="b3_decoder1.pkl", status_every=200, learning_rate=0.02, teacher_forcing_ratio=0.2) save_translation(training_pairs_A, encoder, decoder, MAX_LENGTH, "b3_training1.csv") df_results = pd.read_csv("b3_training1.csv") acc = np.mean(df_results.accuracy.values) blue = np.mean(df_results.blue.values) print("mean blue score over training data = {:.3f}".format(blue)) print("mean acc over training data = {:.3f}".format(acc)) ``` ### Training 2 ``` hidden_size = 500 encoder = EncoderRNN(eng_enc_v_size, hidden_size) decoder = DecoderRNN(hidden_size, eng_dec_v_size) trainIters(encoder=encoder, decoder=decoder, n_iters=5000, pairs=training_pairs_B, valid_pairs=valid_pairs_B, encoder_path="b3_encoder2.pkl", decoder_path="b3_decoder2.pkl", status_every=200, learning_rate=0.02, teacher_forcing_ratio=0.5) save_translation(training_pairs_A, encoder, decoder, MAX_LENGTH, "b3_training2.csv") df_results = pd.read_csv("b3_training2.csv") acc = np.mean(df_results.accuracy.values) blue = np.mean(df_results.blue.values) print("mean blue score over training data = {:.3f}".format(blue)) print("mean acc over training data = {:.3f}".format(acc)) ``` ### Evaluating the trained models ### and A ``` hidden_size = 500 encoder = EncoderRNN(eng_enc_v_size, hidden_size) decoder = DecoderRNN(hidden_size, eng_dec_v_size) encoder.load_state_dict(torch.load("b3_encoder1.pkl")) decoder.load_state_dict(torch.load("b3_decoder1.pkl")) save_translation(training_pairs_A, encoder, decoder, MAX_LENGTH, "b3_training1.csv") df_results = pd.read_csv("b3_training1.csv") acc = np.mean(df_results.accuracy.values) blue = np.mean(df_results.blue.values) print("mean blue score over training data = {:.3f}".format(blue)) print("mean acc over training data = {:.3f}".format(acc)) save_translation(valid_pairs_A, encoder, decoder, MAX_LENGTH, "b3_valid1.csv") df_results = pd.read_csv("b3_valid1.csv") acc = np.mean(df_results.accuracy.values) blue = np.mean(df_results.blue.values) print("mean blue score over valid data = {:.3f}".format(blue)) print("mean acc over valid data = {:.3f}".format(acc)) ``` ### and B ``` hidden_size = 500 encoder = EncoderRNN(eng_enc_v_size, hidden_size) decoder = DecoderRNN(hidden_size, eng_dec_v_size) encoder.load_state_dict(torch.load("b3_encoder2.pkl")) decoder.load_state_dict(torch.load("b3_decoder2.pkl")) save_translation(training_pairs_B, encoder, decoder, MAX_LENGTH, "b3_training2.csv") df_results = pd.read_csv("b3_training2.csv") acc = np.mean(df_results.accuracy.values) blue = np.mean(df_results.blue.values) print("mean blue score over training data = {:.3f}".format(blue)) print("mean acc over training data = {:.3f}".format(acc)) save_translation(valid_pairs_B, encoder, decoder, MAX_LENGTH, "b3_valid2.csv") df_results = pd.read_csv("b3_valid2.csv") acc = np.mean(df_results.accuracy.values) blue = np.mean(df_results.blue.values) print("mean blue score over valid data = {:.3f}".format(blue)) print("mean acc over valid data = {:.3f}".format(acc)) ```
github_jupyter
``` # Running %env without any arguments # lists all environment variables # The line below sets the environment # variable CUDA_VISIBLE_DEVICES %env CUDA_VISIBLE_DEVICES = import numpy as np from datetime import datetime import pandas as pd import io import time import bson # this is installed with the pymongo package import matplotlib.pyplot as plt from scipy.misc import imread, imsave, imshow import tensorflow as tf from tensorflow.python.platform import tf_logging from tensorflow.contrib import layers from tensorflow.contrib.training import add_gradients_summaries from tensorflow.python.ops import math_ops from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.training import optimizer as tf_optimizer from tensorflow.python.ops import variables as tf_variables import os.path import tensorflow.contrib.slim as slim import inception_preprocessing import logging import resnet2 from scipy.sparse import * import tables as tb # This is a bit of magic to make matplotlib figures appear inline in the notebook # rather than in a new window. %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # Some more magic so that the notebook will reload external python modules; # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 DATASET_PATH = '/media/rs/0E06CD1706CD0127/Kapok/kaggle/' LOG_PATH = DATASET_PATH + 'Resnet/logs101-hierarchy/' TEST_PATH = DATASET_PATH + 'Test/' OUTPUT_PATH = DATASET_PATH + 'Resnet/output_{}.csv' CATEGORY_NAME_PATH = DATASET_PATH + 'category_names.csv' RESNET_MODEL_PATH = DATASET_PATH + 'Resnet/logs101-hierarchy/resnet101_v2_model.ckpt-0' PROB_SAVE_FILE = DATASET_PATH + 'Resnet/probs_{}.h5' ID_SAVE_FILE = DATASET_PATH + 'Resnet/logs101-hierarchy/ids.csv' NUM_OF_TOPK = 20 BATCH_SIZE = 2#256#256 IMAGE_WIDTH = 180 IMAGE_HEIGHT = 180 NUM_CLASS = 5270 LEVEL1_CLASS = 49 CATEGORY_ENCODE_PATH = DATASET_PATH + 'hierarchy_encode.csv' LEVEL1_NUM_LIST = [555, 441, 440, 237, 230, 230, 220, 206, 196, 184, 180, 162, 158, 137, 106, 104, 103, 101, 99, 89, 88, 85, 84, 83, 81, 74, 57, 50, 48, 45, 43, 42, 42, 38, 33, 33, 30, 29, 26, 25, 19, 16, 9, 6, 2, 1, 1, 1, 1] # validation examples num: 2319624 # train examples num: 10051704 # total step: 157057 TOTAL_EXAMPLES = 3095080 INPUT_THREADS = 1 NUM_STEPS = int(TOTAL_EXAMPLES / BATCH_SIZE) + 1 moving_average_decay = 0.96 # get TF logger log = logging.getLogger('tensorflow') log.setLevel(logging.DEBUG) # create formatter and add it to the handlers formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # create file handler which logs even debug messages fh = logging.FileHandler(DATASET_PATH + 'tensorflow_resnet_test.log') fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) log.addHandler(fh) class MiniDataSet(object): def __init__(self, file_path_pattern, num_examples, num_classes, is_training = True, min_after_dequeue=1000, batch_size = BATCH_SIZE, num_reader = INPUT_THREADS): super(MiniDataSet, self).__init__() self._num_examples = num_examples self._num_classes = num_classes self._file_path_pattern = file_path_pattern self._num_reader = num_reader self._batch_size = batch_size self._min_after_dequeue = min_after_dequeue self._is_training = is_training def create_dataset(self): opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB) reader = lambda : tf.TFRecordReader(options=opts) keys_to_features = { 'product_id': tf.FixedLenFeature([], tf.int64, default_value=tf.zeros([], dtype=tf.int64)), 'img_raw': tf.FixedLenFeature([], tf.string, default_value=''), # notice that we don't have this feature in our TFRecord, so always default provided 'format': tf.FixedLenFeature([], tf.string, default_value='jpg') } items_to_handlers = { # automated decode image from features in FixedLenFeature 'image': slim.tfexample_decoder.Image(image_key='img_raw', format_key='format'), 'product': slim.tfexample_decoder.Tensor('product_id'), } decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers) self._dataset = slim.dataset.Dataset( data_sources = self._file_path_pattern, decoder = decoder, reader = reader, # num_readers = 8, num_samples = self._num_examples, #num_classes = self._num_classes, items_to_descriptions = None) # notice that DatasetDataProvider can automate shuffle the examples by ParallelReader using its RandomShuffleQueue self._data_provider = slim.dataset_data_provider.DatasetDataProvider( self._dataset, num_readers = self._num_reader, shuffle = False, # default is True num_epochs = 1, common_queue_capacity = self._min_after_dequeue + 3 * self._batch_size, common_queue_min = self._min_after_dequeue, scope = 'test_files') return self._data_provider.get(['image', 'product']) def_graph = tf.Graph() with def_graph.as_default() as graph: def test_step(input_examples): # inputs has shape [batch, 224, 224, 3] with slim.arg_scope(resnet2.resnet_arg_scope()): logits, end_points = resnet2.resnet_v2_101(input_examples, None, is_training=False) end_points_list = list() for softmax_index, num_classes in enumerate(LEVEL1_NUM_LIST): if num_classes > 1: net = tf.squeeze(layers.conv2d( logits, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='logits_{}'.format(softmax_index))) end_points_list.append(tf.nn.softmax(net, name='predictions_{}'.format(softmax_index))) else: net = tf.squeeze(layers.conv2d( logits, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='logits_{}'.format(softmax_index))) end_points_list.append(tf.ones_like(tf.expand_dims(net, 1), dtype=tf.float32, optimize=False)) net = tf.squeeze(layers.conv2d( logits, LEVEL1_CLASS, [1, 1], activation_fn=None, normalizer_fn=None, scope='logits_level1')) level1_prob = tf.nn.softmax(net, name='predictions_level1') indices = tf.constant([index for index in range(len(LEVEL1_NUM_LIST)) for _ in range(LEVEL1_NUM_LIST[index])], name='gather_indices') class_prob = tf.multiply(tf.concat(end_points_list, 1), tf.gather(level1_prob, indices, axis = 1)) predictions = tf.argmax(class_prob, 1) variable_averages = tf.train.ExponentialMovingAverage(moving_average_decay) variables_to_restore = variable_averages.variables_to_restore() #variables_to_restore = slim.get_variables_to_restore() return predictions, class_prob, variables_to_restore def preprocess_for_inception(input_image): return inception_preprocessing.preprocess_image(input_image, 160, 160, False) def init_dataset(file_path_pattern): dataset = MiniDataSet(file_path_pattern, TOTAL_EXAMPLES, NUM_CLASS) org_image, product_id = dataset.create_dataset() image = preprocess_for_inception(org_image) batch_images, batch_id = tf.train.batch([image, product_id], BATCH_SIZE,\ num_threads = INPUT_THREADS,\ capacity = 1000 + 3 * BATCH_SIZE,\ allow_smaller_final_batch = True, name = 'test_batch') return batch_images, batch_id class LabelMapping(object): def __init__(self, catogory_file_path): super(LabelMapping, self).__init__() self._category_encode_csv = catogory_file_path self._total_onehot_to_catogory = self.cvt_csv2tfrecord() self._to_catogory_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(list(self._total_onehot_to_catogory.keys()), list(self._total_onehot_to_catogory.values()), tf.int64, tf.int64), 1000000000) @property def to_catogory_table(self): return self._to_catogory_table def cvt_csv2tfrecord(self): csv = pd.read_csv(self._category_encode_csv)[['category_id', 'catogoty_encode_total']].values total_onehot_to_catogory = dict() for row in csv: category_id, total_onehot= row[0], row[1] total_onehot_to_catogory[int(total_onehot)] = int(category_id) return total_onehot_to_catogory def slices_to_dims(slice_indices): """ Args: slice_indices: An [N, k] Tensor mapping to column indices. Returns: An index Tensor with shape [N * k, 2], corresponding to indices suitable for passing to SparseTensor. """ slice_indices = tf.cast(slice_indices, tf.int64) num_rows = tf.shape(slice_indices, out_type=tf.int64)[0] row_range = tf.range(num_rows) # row_range expanded from [num_rows] into [num_rows, 1] # every item in k_th row of slice_indices are multiplied by num_rows, then added by k with broadcast item_numbers = slice_indices * num_rows + tf.expand_dims(row_range, axis=1) # flaten so that each row represent each element item_numbers_flat = tf.reshape(item_numbers, [-1]) # convert back by zip op return item_numbers_flat % num_rows, item_numbers_flat // num_rows with def_graph.as_default() as graph: inv_table = LabelMapping(CATEGORY_ENCODE_PATH).to_catogory_table # mapping_strings = tf.constant( [ str(key) for key in cvt_csv2tfrecord().keys() ] ) # mapping_table = tf.contrib.lookup.index_table_from_tensor(mapping=mapping_strings, default_value=0) # inv_table = tf.contrib.lookup.index_to_string_table_from_tensor(mapping_strings, default_value="0000000000") batch_images, batch_id = init_dataset(TEST_PATH + "output_file*.tfrecords")#test_output_file6.tfrecords #batch_images, batch_id = init_dataset(TEST_PATH+'test_output_file6*') # use placeholder instead #last_prob = tf.constant(0, shape=[0,NUM_CLASS], dtype=tf.float32) #last_id = tf.constant(0, shape=[0], dtype=tf.int64) last_prob = tf.placeholder(tf.float32) last_id = tf.placeholder(tf.int64) with tf.device('/gpu:0'): test_predictions, test_probabilities, variables_to_restore = test_step(batch_images) test_predictions_values = inv_table.lookup(test_predictions) top_values, top_indices = tf.nn.top_k(test_probabilities, k = NUM_OF_TOPK, sorted=True) (row_indice, col_indice), value_array = slices_to_dims(top_indices), tf.reshape(top_values, [-1]) cur_prob_shape = tf.shape(test_probabilities) # concat betweent batches _, idx, count = tf.unique_with_counts(batch_id) #print(tf.dynamic_partition(batch_id, tf.not_equal(idx, tf.shape(count)[0] - 1).eval(), 2)[1].eval()) cur_id_tail, _cur_id_head = tf.dynamic_partition(batch_id, tf.cast(tf.not_equal(idx, tf.shape(count)[0] - 1), tf.int32), 2) with tf.control_dependencies([cur_id_tail, _cur_id_head]): cur_id_head = tf.concat([last_id, _cur_id_head], axis = 0) #cur_id_head = tf.concat([last_id, tf.concat(tf.split(batch_id, count)[0:-1], axis = 0)], axis = 0) #cur_id_tail = tf.split(batch_id, count)[-1] cur_prob_tail, _cur_prob_head = tf.dynamic_partition(test_probabilities, tf.cast(tf.not_equal(idx, tf.shape(count)[0] - 1), tf.int32), 2) with tf.control_dependencies([last_prob, _cur_prob_head]): cur_prob_head = tf.concat([last_prob, _cur_prob_head], axis = 0) #cur_prob_head = tf.concat([last_prob, tf.concat(tf.split(test_probabilities, count[0:-1]), axis = 0)], axis = 0) #cur_prob_tail = tf.split(test_probabilities, count)[-1] with tf.control_dependencies([cur_id_head, cur_prob_head]): raw_id, idx, _ = tf.unique_with_counts(cur_id_head) mean_prob = tf.segment_mean(cur_prob_head, idx) mean_label = inv_table.lookup(tf.argmax(mean_prob, 1)) with tf.control_dependencies([mean_prob, mean_label]): #last_id = cur_id_tail #last_prob = cur_prob_tail # last partition may have nothing to concat raw_id_tail, idx_tail, _ = tf.unique_with_counts(cur_id_tail) mean_prob_tail = tf.segment_mean(cur_prob_tail, idx_tail) tail_label = inv_table.lookup(tf.argmax(mean_prob_tail, 1)) restore_saver = tf.train.Saver(variables_to_restore) def load_pretrain(sess): restore_saver.restore(sess, INCEPTION_MODEL_PATH) # no need for specify local_variables_initializer and tables_initializer, Supervisor will do this via default local_init_op init_op = tf.group(tf.global_variables_initializer()) # Pass the init function to the supervisor. # - The init function is called _after_ the variables have been initialized by running the init_op. # - use default tf.Saver() for ordinary save and restore # - save checkpoint every 1.3 hours # - manage summary in current process by ourselves for memory saving # - no need to specify global_step, supervisor will find this automately # - initialize order: checkpoint -> local_init_op -> init_op -> init_func sv = tf.train.Supervisor(logdir=LOG_PATH, init_fn = load_pretrain, init_op = init_op, summary_op = None, save_model_secs=0) step = 0 lats_pred = [] last_batch_id = [] last_feed_id = np.empty([0]) last_feed_prob = np.empty([0, NUM_CLASS]) save_file_name = OUTPUT_PATH.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) prob_save_file_name = PROB_SAVE_FILE.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) hdf5_file = tb.open_file(prob_save_file_name, 'w') total_prob_store = hdf5_file.create_earray(hdf5_file.root, 'prob', tb.Float32Atom(), shape=(0,), filters=tb.Filters(complevel=5, complib='zlib')) total_row_indice = hdf5_file.create_earray(hdf5_file.root, 'row', tb.Int64Atom(), shape=(0,), filters=tb.Filters(complevel=5, complib='zlib')) total_col_indice = hdf5_file.create_earray(hdf5_file.root, 'col', tb.Int64Atom(), shape=(0,), filters=tb.Filters(complevel=5, complib='zlib')) print(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) #last_row_to_save = 0 with sv.managed_session(config=tf.ConfigProto(log_device_placement=True, allow_soft_placement=True)) as sess: #with sv.prepare_or_wait_for_session(config=tf.ConfigProto(log_device_placement=True, allow_soft_placement=True)) as sess: # Here sess was either initialized from the pre-trained-checkpoint or # recovered from a checkpoint saved in a previous run of this code. while True: if sv.should_stop(): tf_logging.info('Supervisor emit finished!') break start_time = time.time() with tf.device('/gpu:0'): #test_pred, test_prob, test_batch_id = sess.run([test_predictions_values, test_probabilities, batch_id]) cur_batch_id, last_feed_id, last_feed_prob, test_pred, test_prob, test_batch_id, lats_pred, last_batch_id, sparse_row, sparse_col, sparse_value, sparse_shape = sess.run([batch_id, cur_id_tail, cur_prob_tail, mean_label, mean_prob, raw_id, tail_label, raw_id_tail, row_indice, col_indice, value_array, cur_prob_shape], feed_dict = {last_prob: last_feed_prob, last_id: last_feed_id}) #print(csr_matrix((sparse_value, (sparse_row, sparse_col)), shape=sparse_shape).toarray()) #total_prob_store[last_row_to_save:last_row_to_save + sparse_shape[0],:] = csr_matrix((sparse_value, (sparse_row, sparse_col)), shape=sparse_shape).toarray() total_prob_store.append(sparse_value) total_row_indice.append(sparse_row) total_col_indice.append(sparse_col) #last_row_to_save += sparse_shape[0] time_elapsed = time.time() - start_time if step % 200 == 0: tf_logging.info('Step: {} of {}.'.format(step, NUM_STEPS)) tf_logging.info('Validation Speed: {:5.3f}sec/batch'.format(time_elapsed)) tf_logging.info('Roughly {:6.3f} hours to go.'.format( time_elapsed*( (NUM_STEPS-step) > 0 and (NUM_STEPS-step)/3600. or 0.001 ) )) tf_logging.info('Test Label: {}'.format(test_pred)) tf_logging.info('Test Prob: {}'.format(test_prob)) #tf_logging.info('Test Ids: {}'.format(test_batch_id)) #print(len(test_prob[0])) df = pd.DataFrame({'_id' : test_batch_id, 'category_id' : test_pred}) #df = pd.DataFrame([test_batch_id, test_pred], columns=["_id", 'category_id']) if not os.path.isfile(save_file_name): df.to_csv(save_file_name, mode='a', index=False, sep=',') else: df.to_csv(save_file_name, mode='a', index=False, sep=',', header=False) # if not os.path.isfile(ID_SAVE_FILE): # pd.DataFrame({'_id' : cur_batch_id}).to_csv(ID_SAVE_FILE, mode='a', index=False, sep=',') # else: # pd.DataFrame({'_id' : cur_batch_id}).to_csv(ID_SAVE_FILE, mode='a', index=False, sep=',', header=False) step += 1 # tf_logging.info('BB ID: {}'.format(bb_id)) # tf_logging.info('Test Label: {}'.format(test_pred)) # #tf_logging.info('Test Prob: {}'.format(test_prob)) # tf_logging.info('Test Ids: {}'.format(test_batch_id)) # tf_logging.info('Last Label: {}'.format(lats_pred)) # #tf_logging.info('Test Prob: {}'.format(test_prob)) # tf_logging.info('Last Ids: {}'.format(last_batch_id)) # if step > 3: # break df = pd.DataFrame({'_id' : last_batch_id, 'category_id' : lats_pred}) df.to_csv(save_file_name, mode='a', index=False, sep=',', header=False) hdf5_file.close() tf_logging.info('Finished evaluation! ') print(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) test_data = pd.read_csv(save_file_name) print(test_data.tail()) # 1768182 #(1768182, 3095080) #Test11111 - (700, 1195) # h5 = tb.open_file(DATASET_PATH + 'logs/probs_2017-10-19 21:30:55.h5', 'r') # print(h5.root.prob.shape) # print(h5.root.row.shape) # print(h5.root.col.shape) # print(csr_matrix((h5.root.prob[:], (h5.root.row[:], h5.root.col[:])), shape=(TOTAL_EXAMPLES,NUM_CLASS)).toarray()) ```
github_jupyter
# Make sure this SageMakerNotebookExecutionRole has access to Kendra ``` import boto3 import sagemaker import pandas as pd sess = sagemaker.Session() bucket = sess.default_bucket() role = sagemaker.get_execution_role() region = boto3.Session().region_name sm = boto3.Session().client(service_name='sagemaker', region_name=region) kendra = boto3.client('kendra') %store -r noheader_train_s3_uri print(noheader_train_s3_uri) !aws s3 ls $noheader_train_s3_uri ``` # Create Data Access Role for Kendra ## Create Policy ``` # assume_role_policy_doc = { # "Version": "2012-10-17", # "Statement": [ # { # "Effect": "Allow", # "Principal": { # "Service": "kendra.amazonaws.com" # }, # "Action": "sts:AssumeRole" # } # ] # } ``` ## Create Role and Attach Policies ``` # iam_kendra_role_name = 'DSOAWS_Kendra' # import json # import boto3 # from botocore.exceptions import ClientError # try: # iam = boto3.client('iam') # iam_role_kendra = iam.create_role( # RoleName=iam_kendra_role_name, # AssumeRolePolicyDocument=json.dumps(assume_role_policy_doc), # Description='DSOAWS Kendra Role' # ) # except ClientError as e: # if e.response['Error']['Code'] == 'EntityAlreadyExists': # iam_role_comprehend = iam.get_role(RoleName=iam_comprehend_role_name) # print("Role already exists") # else: # print("Unexpected error: %s" % e) # kendra_s3_policy_doc = { # "Version": "2012-10-17", # "Statement": [ # { # "Action": [ # "s3:GetObject" # ], # "Resource": [ # "arn:aws:s3:::{}/*".format(bucket) # ], # "Effect": "Allow" # }, # { # "Action": [ # "s3:ListBucket" # ], # "Resource": [ # "arn:aws:s3:::{}".format(bucket) # ], # "Effect": "Allow" # }, # { # "Action": [ # "s3:PutObject" # ], # "Resource": [ # "arn:aws:s3:::{}/*".format(bucket) # ], # "Effect": "Allow" # } # ] # } # print(kendra_s3_policy_doc) ``` # Attach Policy to Role ``` # response = iam.put_role_policy( # RoleName=iam_kendra_role_name, # PolicyName='DSOAWS_KendraPolicyToS3', # PolicyDocument=json.dumps(kendra_s3_policy_doc) # ) ``` # Add S3 Data Source ``` prefix = 'kendra' s3_output_job = 's3://{}/{}/{}'.format(bucket, prefix, 'output') print(s3_output_job) print("Create an S3 data source") name = 'amazon-reviews' description = 'amazon-reviews' index_id = 'e9d93f01-5fd3-46ba-bc73-41fae0185d3a' kendra_role_arn = 'arn:aws:iam::835319576252:role/service-role/AmazonKendra-us-east-1-dsoaws' configuration = { 'S3Configuration': { 'BucketName': bucket, 'InclusionPrefixes': ['data/amazon_reviews_us_Digital_Software_v1_00_nohe'], # Length is limited } } data_source_response = kendra.create_data_source( Configuration = configuration, Name = name, Description = description, RoleArn = kendra_role_arn, Type = 'S3', IndexId = index_id ) print(data_source_response) ``` # Wait for Kendra Data Source Creation ``` import time print(data_source_response) data_source_id = data_source_response['Id'] while True: data_source_description = kendra.describe_data_source( Id = data_source_id, IndexId = index_id ) status = data_source_description['Status'] print('Creating data source. Status: ' + status) if status != 'CREATING': break; time.sleep(30) ``` # Train the FAQ ``` faq_path = { 'Bucket': bucket, 'Key': 'data/amazon_reviews_us_Digital_Software_v1_00_header.csv' } training_job = kendra.create_faq( S3Path = faq_path, Name = 'amazon-reviews-faq', IndexId = index_id, RoleArn = kendra_role_arn ) # training_job = comprehend.create_document_classifier( # DocumentClassifierName='Amazon-Customer-Reviews-Classifier-'+ id, # DataAccessRoleArn=iam_role_comprehend_arn, # InputDataConfig={ # 'S3Uri': noheader_train_s3_uri # }, # OutputDataConfig={ # 'S3Uri': s3_output_job # }, # LanguageCode='en' # ) print(training_job) print("Synchronize the data source.") sync_response = kendra.start_data_source_sync_job( Id = data_source_id, IndexId = index_id ) print(sync_response) ``` # _Please Wait Until the ^^ Data Source ^^ is Sync'd Above._ ``` query = '5' response = kendra.query( QueryText = query, IndexId = index_id) print(response) print ('\nSearch results for query: ' + query + '\n') for query_result in response['ResultItems']: print('-------------------') print('Type: ' + str(query_result['Type'])) if query_result['Type']=='ANSWER': answer_text = query_result['DocumentExcerpt']['Text'] print(answer_text) if query_result['Type']=='DOCUMENT': if 'DocumentTitle' in query_result: document_title = query_result['DocumentTitle']['Text'] print('Title: ' + document_title) document_text = query_result['DocumentExcerpt']['Text'] print(document_text) print ('------------------\n\n') ```
github_jupyter
# Working with time series data ``` %matplotlib inline import pandas as pd import numpy as np import matplotlib.pyplot as plt pd.options.display.max_rows = 8 ``` ## Case study: air quality data of European monitoring stations (AirBase) [AirBase](http://www.eea.europa.eu/data-and-maps/data/airbase-the-european-air-quality-database-8#tab-data-by-country) (The European Air quality dataBase): hourly measurements of all air quality monitoring stations from Europe. I downloaded and preprocessed some of the data ([python-airbase](https://github.com/jorisvandenbossche/python-airbase)): `data/airbase_data.csv`. This file includes the hourly concentrations of NO2 for 4 different measurement stations: - FR04037 (PARIS 13eme): urban background site at Square de Choisy - FR04012 (Paris, Place Victor Basch): urban traffic site at Rue d'Alesia - BETR802: urban traffic site in Antwerp, Belgium - BETN029: rural background site in Houtem, Belgium See http://www.eea.europa.eu/themes/air/interactive/no2 ## Importing the data Import the csv file: ``` !head -5 data/airbase_data.csv ``` As you can see, the missing values are indicated by `-9999`. This can be recognized by `read_csv` by passing the `na_values` keyword: ``` data = pd.read_csv('data/airbase_data.csv', index_col=0, parse_dates=True, na_values=[-9999]) ``` ## Exploring the data ``` data.head(3) data.tail() data.plot(figsize=(12,6)) ``` This does not say too much .. We can select part of the data (eg the latest 500 data points): ``` data[-500:].plot(figsize=(12,6)) ``` Or we can use some more advanced time series features -> next section! ## Working with time series data When we ensure the DataFrame has a `DatetimeIndex`, time-series related functionality becomes available: ``` data.index ``` Indexing a time series works with strings: ``` data["2010-01-01 09:00": "2010-01-01 12:00"] ``` A nice feature is **"partial string" indexing**, where we can do implicit slicing by providing a partial datetime string. E.g. all data of 2012: ``` data['2012'] ``` Normally you would expect this to access a column named '2012', but as for a DatetimeIndex, pandas also tries to interpret it as a datetime slice. Or all data of January up to March 2012: ``` data['2012-01':'2012-03'] ``` Time and date components can be accessed from the index: ``` data.index.hour data.index.year ``` <div class="alert alert-success"> <b>EXERCISE</b>: select all data starting from 1999 </div> <div class="alert alert-success"> <b>EXERCISE</b>: select all data in January for all different years </div> <div class="alert alert-success"> <b>EXERCISE</b>: select all data in January, February and March for all different years (use `isin` method) </div> <div class="alert alert-success"> <b>EXERCISE</b>: select all 'daytime' data (between 8h and 20h) for all days </div> ``` data[(data.index.hour >= 8) & (data.index.hour < 20)] data.between_time('08:00', '20:00') ``` ## The power of pandas: `resample` A very powerful method is **`resample`: converting the frequency of the time series** (e.g. from hourly to daily data). The time series has a frequency of 1 hour. I want to change this to daily: ``` data.resample('D').head() ``` By default, `resample` takes the mean as aggregation function, but other methods can also be specified: ``` data.resample('D', how='max').head() ``` The string to specify the new time frequency: http://pandas.pydata.org/pandas-docs/dev/timeseries.html#offset-aliases These strings can also be combined with numbers, eg `'10D'`. Further exploring the data: ``` data.resample('M').plot() # 'A' # data['2012'].resample('D').plot() ``` <div class="alert alert-success"> <b>QUESTION</b>: plot the monthly mean and median concentration of the 'FR04037' station for the years 2009-2012 </div> <div class="alert alert-success"> <b>QUESTION</b>: plot the monthly mininum and maximum daily concentration of the 'BETR801' station </div> <div class="alert alert-success"> <b>QUESTION</b>: make a bar plot of the mean of the stations in year of 2012 </div> ## Combination with groupby `resample` can actually be seen as a specific kind of `groupby`. E.g. taking annual means with `data.resample('A', 'mean')` is equivalent to `data.groupby(data.index.year).mean()` (only the result of `resample` still has a `DatetimeIndex`). ``` data.groupby(data.index.year).mean().plot() ``` But, `groupby` is more flexible and can also do resamples that do not result in a new continuous time series, e.g. by grouping by the hour of the day to get the diurnal cycle. <div class="alert alert-success"> <b>QUESTION</b>: how does the *typical monthly profile* (averaged over years) look like for the different stations? (*Hint*: First group the data on month index, then calculate and pot mean). </div> <div class="alert alert-success"> <b>QUESTION</b>: plot the 0.95 quantile of the concentration in 'BETR801' and 'BETN029' for all **weeks** of 2011 (*Hint*: use `quantile` aggregation function) </div> <div class="alert alert-success"> <b>QUESTION</b>: The typical diurnal (24h) profile for the different stations? </div> <div class="alert alert-success"> <b>QUESTION</b>: What is the difference in the typical diurnal profile between week and weekend days for station FR04012? (*Hint* Start by adding a weekday column to the data frame, then use `isin` to select Monday to Friday and weekend days). </div> ## Extra exercises If you are done, you can give a try to these exercises: <div class="alert alert-success"> <b>QUESTION</b>: What are the number of exceedances of hourly values above the European limit 200 µg/m3 ? </div> <div class="alert alert-success"> <b>QUESTION</b>: And are there exceedances of the yearly limit value of 40 µg/m3 since 200 ? </div> <div class="alert alert-success"> <b>QUESTION</b>: Visualize the typical week profile for the different stations as boxplots. </div> Tip: the boxplot method of a DataFrame expects the data for the different boxes in different columns) <div class="alert alert-success"> <b>QUESTION</b>: Calculate the correlation between the different stations </div> ## Acknowledgement > *© 2015, Stijn Van Hoey and Joris Van den Bossche (<mailto:stijnvanhoey@gmail.com>, <mailto:jorisvandenbossche@gmail.com>)*. > *© 2015, modified by Bartosz Teleńczuk (original sources available from https://github.com/jorisvandenbossche/2015-EuroScipy-pandas-tutorial)* > *Licensed under [CC BY 4.0 Creative Commons](http://creativecommons.org/licenses/by/4.0/)* ---
github_jupyter
# Simple Naive Bayes Classifier ## T1. Load a dataset The following code loads a dataset consisting of text messages and spam-ham labels. ``` from typing import List, Tuple, Dict, Iterable, Set from collections import defaultdict import re import math import pandas as pd url = 'https://raw.githubusercontent.com/mlee-pnu/IDS/main/spam_dataset.csv' df = pd.read_csv(url) # TODOs hams = df['Category'].value_counts()["ham"] spams = df['Category'].value_counts()["spam"] print(df['Category'].value_counts()) ``` ## T2. Spam filter for individual words We first defined a function ***tokenize()*** to convert a given text into a set of words. Using the function, we now try to count the frequency of each word in each class (spam and ham). Complete the following code and answer the following questions: ``` def tokenize(text: str) -> Set[str]: text = text.lower() all_words = re.findall("[a-z0-9']+", text) return set(all_words) tokens: Set[str] = set() token_spam_counts: Dict[str, int] = defaultdict(int) token_ham_counts: Dict[str, int] = defaultdict(int) spam = df[df.Category == 'spam'] ham = df[df.Category == 'ham'] spam_word_list = [] for msg in spam['Message'].to_list(): for token in tokenize(msg): tokens.add(token) token_spam_counts[token] += 1 spam_word_list.append(token) for msg in ham['Message'].to_list(): for token in tokenize(msg): tokens.add(token) token_ham_counts[token] += 1 from collections import Counter spam_dict = dict(Counter(spam_word_list)) # TODOs word = "free" n_word_spam = token_spam_counts["free"] # frequency of the word in spam messages n_word_ham = token_ham_counts["free"] # frequency of the word in ham messages # print(n_word_spam, n_word_ham) p_spam = spam['Message'].count()/df['Message'].count() # P(spam) p_ham = ham['Message'].count()/df['Message'].count() # P(ham) # print(p_spam, p_ham) p_word_given_spam = (n_word_spam/df['Message'].count())/p_spam # P(word|spam) p_word_given_ham = (n_word_ham/df['Message'].count())/p_ham # P(word|ham) # print(p_word_given_spam, p_word_given_ham) # p(spam|word) p_word = (n_word_ham+n_word_spam) p_spam_given_word = n_word_spam/p_word # P(ham|word) p_ham_given_word = n_word_ham/p_word print(p_spam_given_word, p_ham_given_word) ``` ## T3. Spam filter that combines words: Naive Bayes You received a text message "just do it" from an unknown sender. Complete the function ***predict()*** that outputs the probability of the message being spam and the predicted label of the message. ``` text = "just do it" # TODOs # solution 1. def predict(text: str): prob = 1 label = "spam" k = 0.0 # smoothing factor log_spam = log_ham = 0.0 for token in tokens: # Calculate p(token|spam), p(token|ham) word = token n_word_spam = token_spam_counts[word] # frequency of the word in spam messages n_word_ham = token_ham_counts[word] # frequency of the word in ham messages p_spam = spams/(hams+spams) # P(spam) p_ham = hams/(hams+spams) # P(ham) p_word_given_spam = (n_word_spam + k) / (spams + 2*k) # P(word|spam) p_word_given_ham = (n_word_ham + k) / (hams + 2*k) # P(word|ham) # iterating on the bag of words if token in tokenize(text): log_spam += math.log(p_word_given_spam) log_ham += math.log(p_word_given_ham) else: log_spam += math.log(1.0 - p_word_given_spam) log_ham += math.log(1.0 - p_word_given_ham) p_if_spam = math.exp(log_spam + math.log(p_spam)) p_if_ham = math.exp(log_ham + math.log(p_ham)) prob = p_if_spam / (p_if_spam + p_if_ham) label = "spam" if prob > 0.5 else "ham" return prob, label print(predict(text)) ``` ## T4. Smoothing method You again received two text messages from unknown senders. Complete the function ***spamFilter()*** that classifies a given message. You may want to apply a smoothing method for this task. ``` ########## OKAY BUT NOT CORRECT textA = "reward! download your free ticket from our website www.pnu.edu" textB = "call me and get your money back" # TODOs def spamFilter2(text: str): k = 1.0 # smoothing factor log_spam = log_ham = 0.0 for token in tokens: # Calculate p(token|spam), p(token|ham) word = token n_word_spam = token_spam_counts[word] # frequency of the word in spam messages n_word_ham = token_ham_counts[word] # frequency of the word in ham messages p_spam = spams/(hams+spams) # P(spam) p_ham = hams/(hams+spams) # P(ham) p_word_given_spam = (n_word_spam + k) / (spams + 2*k) # P(word|spam) p_word_given_ham = (n_word_ham + k) / (hams + 2*k) # P(word|ham) # iterating on the bag of words if token in tokenize(text): log_spam += math.log(p_word_given_spam) log_ham += math.log(p_word_given_ham) else: log_spam += math.log(1.0 - p_word_given_spam) log_ham += math.log(1.0 - p_word_given_ham) p_if_spam = math.exp(log_spam + math.log(p_spam)) p_if_ham = math.exp(log_ham + math.log(p_ham)) # p_if_spam = math.exp(log_spam) # p_if_ham = math.exp(log_ham) print(p_if_spam, p_if_ham) prob = p_if_spam / (p_if_spam + p_if_ham) label = "spam" if prob > 0.5 else "ham" return label, prob print(spamFilter2(textA)) print(spamFilter2(textB)) ```
github_jupyter
<a href="https://colab.research.google.com/github/yanin2020/Curso-de-Python/blob/master/Phyton_curso.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> [CURSO FRECODECAMP](https://www.youtube.com/watch?v=DLikpfc64cA&ab_channel=freeCodeCampEspa%C3%B1ol) # ASIGNACIONES EN **PYTHON** ``` edad *= 3 edad edad /= 2 edad edad -= 2 edad edad = 56 edad += 3 edad num = 6 num %= 2 print (num) ``` Sentencia **Condicional** Instrucción o un grupo de instrucciones cuya ejecución depende del valor de una condición booleana. ``` temp = 15 if temp < 25: print("Frío") ``` **Condicional** ``` temp = 15 if temp < 25: print ("Frío") else: print ("Calor") temp = 30 if temp < 25: print ("Frío") else: print ("Calor") ``` Cláusula "elif" ``` temp = 0 if temp <=0: print("Muy Frío") elif temp < 25: print("Frío") else: print("Calor") temp = 18 if temp <=0: print("Muy Frío") elif temp < 25: print("Frío") else: print("Calor") ``` **Puede haber más de una cláusula "elif",pero solo una cláusula "else"** **COMENTARIOS** ``` # Número de estudiantes # registrados en el salón 56 num_estudiantes = 33 print(num_estudiantes) ``` LISTA: Estructura de datos utilizada para almacenar múltiples valores en secuencia. ``` letras = ["a", "b", "c", "d"] letras[0] ``` Como principal diferencia entre las listas y las tuplas en Python es que las listas son mutables y llevan corchetes,pero las tuplas son inmutables y llevan parentesís. **Diccionarios:** Colección de pares clave-valor. **Caraterísticas**: 1. Las claves deben ser únicas e inmutables. 2. Los valores asociados pueden ser de cualquier tipo. 3. La clave se usa para acceder a su valor asociado. 4. Los pares clave-valor pueden ser modificados, añadidos, y eliminados. ``` # Acceder a un diccionario. edades = {"Gino": 15, "Nora": 45} edades["Gino"] edades = {"Gino": 15, "Nora": 45} edades["Nora"] # El método get cumple la misma función edades = {"Gino": 15, "Nora": 45} edades.get("Gino") # Cómo agregar valores nuevos al diccionario. edades = {"Gino": 15, "Nora": 45} edades["Rita"] = 67 edades # Cambiando valor de una llave. edades = {"Gino": 15, "Nora": 45} edades["Gino"] = 17 edades # Borrar claves de un diccionario. edades = {"Gino": 15, "Nora": 45} del edades["Gino"] edades # Busca si existe una llave en el diccionario y retorna un valor booleano que es true o false. edades = {"Gino": 15, "Nora": 45} "Gino" in edades edades = {"Gino": 15, "Nora": 45} "Emily" in edades edades = {"Gino": 15, "Nora": 45} "Nora" in edades ``` **Ciclos For**: Es la estructura de control en programación que permite ejecutar una o varias líneas de código múltiples veces. Los usamos cuando sabemos con antelación cuántas veces debemos repetir ciertas. ![ciclo for.JPG](data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAYABgAAD/4RDQRXhpZgAATU0AKgAAAAgABAE7AAIAAAADcGMAAIdpAAQAAAABAAAISpydAAEAAAAGAAAQwuocAAcAAAgMAAAAPgAAAAAc6gAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFkAMAAgAAABQAABCYkAQAAgAAABQAABCskpEAAgAAAAMzMAAAkpIAAgAAAAMzMAAA6hwABwAACAwAAAiMAAAAABzqAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMjAyMToxMjoxNSAxNjoyOTo0NgAyMDIxOjEyOjE1IDE2OjI5OjQ2AAAAcABjAAAA/+ELFWh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8APD94cGFja2V0IGJlZ2luPSfvu78nIGlkPSdXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQnPz4NCjx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iPjxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+PHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9InV1aWQ6ZmFmNWJkZDUtYmEzZC0xMWRhLWFkMzEtZDMzZDc1MTgyZjFiIiB4bWxuczpkYz0iaHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8iLz48cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0idXVpZDpmYWY1YmRkNS1iYTNkLTExZGEtYWQzMS1kMzNkNzUxODJmMWIiIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyI+PHhtcDpDcmVhdGVEYXRlPjIwMjEtMTItMTVUMTY6Mjk6NDYuMzAxPC94bXA6Q3JlYXRlRGF0ZT48L3JkZjpEZXNjcmlwdGlvbj48cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0idXVpZDpmYWY1YmRkNS1iYTNkLTExZGEtYWQzMS1kMzNkNzUxODJmMWIiIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyI+PGRjOmNyZWF0b3I+PHJkZjpTZXEgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj48cmRmOmxpPnBjPC9yZGY6bGk+PC9yZGY6U2VxPg0KCQkJPC9kYzpjcmVhdG9yPjwvcmRmOkRlc2NyaXB0aW9uPjwvcmRmOlJERj48L3g6eG1wbWV0YT4NCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgPD94cGFja2V0IGVuZD0ndyc/Pv/bAEMABwUFBgUEBwYFBggHBwgKEQsKCQkKFQ8QDBEYFRoZGBUYFxseJyEbHSUdFxgiLiIlKCkrLCsaIC8zLyoyJyorKv/bAEMBBwgICgkKFAsLFCocGBwqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKv/AABEIAWABSgMBIgACEQEDEQH/xAAfAAABBQEBAQEBAQAAAAAAAAAAAQIDBAUGBwgJCgv/xAC1EAACAQMDAgQDBQUEBAAAAX0BAgMABBEFEiExQQYTUWEHInEUMoGRoQgjQrHBFVLR8CQzYnKCCQoWFxgZGiUmJygpKjQ1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4eLj5OXm5+jp6vHy8/T19vf4+fr/xAAfAQADAQEBAQEBAQEBAAAAAAAAAQIDBAUGBwgJCgv/xAC1EQACAQIEBAMEBwUEBAABAncAAQIDEQQFITEGEkFRB2FxEyIygQgUQpGhscEJIzNS8BVictEKFiQ04SXxFxgZGiYnKCkqNTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqCg4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2dri4+Tl5ufo6ery8/T19vf4+fr/2gAMAwEAAhEDEQA/APpCiiikMKKKKACiiigAooooAKKKKACiiigAooooAKKKKACikLBVJYgAdSa5PW/Htjp7tBZD7VMOCQflH41z18TSw8earKx0UMNVxEuWlG51tQzXdvbjM0yJ9WAryPUPGms35K/aDAh/hi4/WqFhJLc3u+eV5CBnLMTXz2J4ipwT9lG/qe7Dh+qo81WSXpqeqXGt2O8lrlPoDmqzeINOX/lvn6Ka4mivkJ5pVnJyaWpvHK6SW7O0/wCEi0/P+tP/AHzUqa3p79LhR9RXDFgv3iBnpk0VmsxqrdIbyyl3Z6FFe20vMVwh+jVqwSeZGDnNeUAlTkEiifWdR06NJLS7kQhumcg17GW557GracdH2OeplDnpCWvmeuUV53pHxJkUiPV4Aw/56R9R+Fd1Yaja6nbCeymWVD6Hp9a+4w2OoYpfu5a9up4+KwNfCu1WOnfoWqKKK7DiCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA8/wDiD4gmhlXSrVym5d0rDqQe1efDiu5+IujTLerqkalonUK+B90j1rhq/O83dV4uXtPl6H6Jk8aSwkfZ/P1CustLG1h8GWl6IwLma6MRfuRgnH6VyeOK6S7vIofh/pSCVPOGo7vL3DcRtbtWWCpQqxqRl/KPM3NRhyPqao8P3e/YTGGIyoLct9KhRJI9Ju8QxZRgGkc8r9KuPJFJ42jlWVHCIi7g2Qvynj2qGSaP+ytYIkU7pzt+YfN83auFYenGdo9NPwPDderKPveT/EyL/QWlitNQuYVnSBy6Wzy7PM4xn3x1qJ5LnTLSGG4Ec1s7hjcDkxE9Fz3X0NaguYfEDRyq0dtLZxmOWGdgCn+0PY1Uu2TT/D0VxaoLlJXKNATkqp6sF7jvj8Rivbw0KKg8DiY2T1UvU5a06rkq8Hd32NBNFuHCrviErruWIv8AMR9KwdYykPluMMGwR6YrX0WwFnqcd1/aMNxaxRq8s7SDEHHEYb+Ljn26Vzuraguo6lPPFnymkYpkYyM8GvHxOBjh6ll0f3nsZbXnXqK+1r+nkUq09A1u40TUo5oXPlkgSJnhhWZVzSdMn1fUorS2Qks3zHHCjuTV4Z1FWj7Le57uJVN0ZKr8Nj3KCVZ7eOVDlXUMPxp9RW0ItrWKBeRGgUfgMVLX6gr21Pyx2voFFFFMQUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAMlijmiaOZA6MMFWGQa4bXPh0krtPo0oiJ5MMn3fwPau8orlxODo4mNqqudWGxdbCy5qUrHh99oOqacxF1ZyKB/Eo3L+YribnTPE73rzQ6paKAx8vNuNyrnpnFfUjIGGGAI9DVC60DS7wf6RYwuT32c1wYXAVMBKUsM4u/8yuexPOI4hJYiL0/ldj500qLxRYTMs2pWkkEhy6CAKc+vArpIotSmRdl/bkDoPKHH6V6lN4B0OXJWB48/3HIrH1rwb4d0TTJb+/vpLSGNT+8kkwoPYZrlxeHzCtPn5Kd/JWKWJy9reaOBudG1O4mSc3kHnR/dbyxyPfipmttVTDSahbggY3GMf4UnwkvdO8d2V4mpakE1CK5dIreB8Foh0bH9a9NX4e6PnMpnlP8AtSGsPqmaTSjNQsttCPb4CLupSPKxNMkbxStDJz8rpGFx69qfbWdzdvttYJJmPZFJr2G28I6HasGTT4iR3cbv51qxWsEC7YIkjA7KMVKyGpVm51pJX7HTHPKNCHJQg/meYaT8PdSvGV79ltIu4zlz+HavQtH0Gx0S28qyiAJ+9IeWb6mtIDFFe5hMuw+E1gte7PFxeY4jF6VHp2QUUUV6B54UUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFZfiXQrXxN4bvtHv0DQ3cRQ5HQ9j+BrTZgqksQAOpNcH4o8aFi9jpD8fdecfyH+NKUuVamNWtGlG7OZ+AnwyfwXbanqOqIpv5p2gjbrtiU9fx617HXkWheJLvRLrcGMsDH95Gx6+4969R0zVLXVrNbizkDKeo7qfQ1MZqRnQxMay7MuUUUVdzqCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKjnnitoGmnkWONBlmY4AFQ6hqNtplo1xeSBEX8z7CvL/EPia51yYpzFaqcrFnr7n1qJTUTmr4iNFeZe8TeMJdUZrXT2aK0BwW6NJ/gK5akorllJt6ng1KkqkryF71d0rV7vR7sT2b4/vIfuuPQ1RopJ2JjJxd0ewaF4gtNctt0DbJlH7yEnlf8AEe9a1eIWl3PYXSXNpIY5UPDCvTfDfiuDWoxDcbYbwDlOz+4/wrphPm0Z7eHxaqLlludFRRRWp3BRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFGR60jMFUsxAA5JJ4FAC0UgIYZU5B6Ed6WgAooooAKKKKACs3WdbtdFtTLcMC5+5GOrVT8ReJ7fRISikS3TD5Ywenua8wvr+51K6a4u5C7se/QewrOdTl2ODEYtU/djuWdY1q61u6M10+EB+SMdFFZtLSVytt6s8WUpSd2FFFFIkKKWkoGFPjkeKRZImKOpyGB5FMooEnZ3PRfC/jJbwLZ6owSfosh4D/X3rsQcjivCgSDkHBHeu18LeMmhKWWrPmPokx6r9a6IVOjPWw2Mv7kz0CikR1kQOjBlIyCOhpa3PVCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKhuLu2tFDXdxFArHAMrhQfzqv/belf9BOz/8AAhP8aAL1FUf7b0n/AKCdl/4EJ/jR/belf9BSy/8AAhP8aAPHvAkstx4y1vUNQt9bmNpqVxs1B7wiyhVRwrpu7fSm6n8R9T1vQvE2kXUlpcQTeG7u9t7yziliAKrtwpcDePm+8vpXp1ha+E9MtL61s7ixWDUJXmuY2ugwkZxhurd/SsS08EfD2yjnSF4GWezksWEmps+Ld/vRrlztX0AxigDH0vxvq2l6n4Z0i9WCy0m40yz8m8uYpG+2SNENyLIPlVxgYDdaZpvxP1+4fRdYurOwGiazqL2MMCM32iLBIVmPQ9OR2rpn8OeCpdQsbuS5hdrAReRC2pMYVMa7UYx79pYDuRmorPwl4BsNeXV7U2aXKSNLGhvy0UTt951jLbVJ9QKAO4oqj/belf8AQUsv/AhP8aBrWlE4Gp2ZJ6AXCf40AXq5PxP4xi00NZ6eyyXXRm6iP/69UPFHjTO+x0d/aScfyX/GuFJJOSck9TWNSpbRHl4nF/Zpj5ppLiZpZ3Z5GOWZjkmmUUVz3PJuFb2sJ/Z2i2kltbaYkcliZJJ7qXEm/OMqu4E461g1dv8AU4byKASaLBPNbW3kRTyXjLjqQdoQ9z61UbdTooSir8xPcaKLSSfzrkCC3sheNPt4ZSOAPcnimxaXDPp1xcw3Tn7PD5r7rd1THoHIwT9Kiv8AV7i98PQaSIY1AKrNc7zuaNWLBNuOmcd6n1HX5Lq3njgssNPafZmZ7x/LUDGCseNo6c1S5DdRwzJ9QsVub4CNlhit9KW5YKv3j3/GqH2GJbWwuJrjZHeI78RlioXHYck89BU1vrTJqRmuLNJbf7Ctm0XmEGQAcnOOP1qS38QPDeafIumQww2MciJHHOzEhsAfMV4Ix1/Sn7rB+wlq2SDw+JLrTIorhguolwhlhaNlK9cqeR1FY9wbVbt4bK6F2kYw0yIQhbJBAJ64x1FaMGuyxXmmSLZYj01pmQS3rzSSl9uNzsvXg5P6VkQoyQqrgbsc7eRmpko20Mqyope5uPooorM5DpvDXi2bSGW3vC0tmTj1MfuPb2r0u2uoLy3Se2kWSNxlWU8GvDq2dA8RXOh3GY8yWzH95CTx9R6GtoVLaM9DDYtw92ex67RWFdeM9AsNAOs6jqUFpZLwzzNghv7uOpPsK4DSv2jfBup+JJ7KSZrDToY8rf3YKiZs9FQAkD3P5V0ntJpq6PXKK4X/AIXX8Of+hrs/++X/APiaP+F1/Dn/AKGuz/75f/4mgZ3VFcL/AMLr+HP/AENdn/3y/wD8TR/wuv4c/wDQ12f/AHy//wATQB3VFcL/AMLr+HP/AENdn/3y/wD8TR/wuv4c/wDQ12f/AHy//wATQB3VFcjpHxU8Ea9qsGmaR4itbq8uG2xQoGy5xnuPQV11ABRRRQAUUUUAFFFFAHh/xu0e18RfFL4f6JqRkNleyzJMsblSR8ncVr/8M3eAP+eF/wD+Bj/41W+KP/JdPhn/ANfE3/stexUAeTf8M2+Af+eF/wD+Bj/40f8ADNvgH/nhf/8AgY/+Nes0UAeTf8M2+Af+eF//AOBj/wCNH/DNvgH/AJ4X/wD4GP8A416zRQB5N/wzb4B/54X/AP4GP/jR/wAM2+Af+eF//wCBj/416zRQB5N/wzb4B/54X/8A4GP/AI1JB+zr4FtriOeCPUEkjYMrC8fgivVaKAPMPEfhCbR83FlvntAOSeWT6+o965mvdGVXUqwBB4IPeuC8T+DDEXvdIQlerwDt7iuedPqjyMTg+X34HD0UpBDEEYI6g0nesDyzTi0O5e2jmlmtrfzs+Sk8oRpPoKzVBdS0Y3qP4l5H510WtaSdWns9TtJIHiWzSEl5QvlFevXpVzTWaKbR7iyubePQUtM3QLKFZsncWzz0xitfZo7/AKvGTsv+HOSCO33VY59BRGjSOFUEknHA6Vt6Vq8y6V4eNtKkAub2VpQFGXi3DaDnoCDmrcT3L2l22hzwROmrSC4cso/cgnABPaj2d+olhVfcw72yTT3vEnuog1oVVhg5ct0AFQwwPNNGmQgkYKHYfKPxrY1mWAyeJ5DJGd1xbgMpBzyM4q1qiXia3CTdxJpBntxbxBlwRkcY65Jzmm6aKlho30MCKCKa6mjF1GIIC/mXGDtAXOSO56cetV4X8+JZI0fa3IDLg/iK6dheTRu2gTwqw1if7WdygtGCdg5/hz1+lV9Q1pIbTxBcaNPEgOowwwTKAf4fnK5/2gRmj2YSwsbbmAwIYgggjsaStTxLMsviB9rIf3ETMVx94qCay6yas7HFOHJJxCrumaVd6vdi3s4yxP3mPRR6mrWheHrrXLkLECkKn55SOB9PevUtL0q10izW3tEAH8THqx9TWkKd9WdWGwrqvmlsZll4N0mHRZNOv7SK+jnH78ToGDn6GuV0r4EeCtI8S3GpwadHPbzx7fsNyglijOfvLu5FelUV1Wse5GKirI5f/hWvgr/oVdJ/8BE/wo/4Vp4K/wChV0j/AMBE/wAK6iigo5f/AIVp4K/6FXSP/ARP8KP+FaeCv+hV0j/wET/CuoooA5f/AIVp4K/6FXSP/ARP8KP+FaeCv+hV0j/wET/CuoooA8H8S+H9I8P/ALSXgSHRNMtbCOSKVnS3iCBiA3JxXvFeM+Pf+Tl/AH/XCb+TV7NQAUUUUAFFFFABRRRQB478Uf8Akunwz/6+Jv8A2WvYq8d+KP8AyXT4Z/8AXxN/7LXsVABRRRQAUUUUAFFFFABRRRQAUUUUAch4o8HJfBrzTVCXHVoxwH/+vXnckTwytHKhR1OCpHIr3Oue8R+FbfWojLFiK7UcOBw3saxnTvqjzcTg+b3obnlEkUcq7ZUDD0NI8EUiBHRWUdFI4/KrV5Z3Gn3T213GY5UPIPf3FQVz6o8h8y0YbV3hsDcBgH0FMMMZi8vYNhOSvY0+ii7FdjfKjxjYME5xjv60jRo0okZQXX7rHqPpT6KLsLvuMMSGLyyo2f3RwKTyowioI1CocquOB+FSUh60XYXYKqhmYAbm6nua6Tw14Un1lxPcBorRTy2OX9hV7wt4Ne7KXuqoUh6pCeC/ufQV6HHGkUaxxqFVRgADAFbQp31Z6WGwjk+epsRWlpBY2ywWsaxxoMAAVPRRXQeukkrIKKKKBhRRRQAUUUUAFFFFAHjPj3/k5fwB/wBcJv5NXs1eM+Pf+Tl/AH/XCb+TV7NQAUUUUAFFFFABRRRQB478Uf8Akunwz/6+Jv8A2WvYq8d+KP8AyXT4Z/8AXxN/7LXsVABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAGXregWmuWvl3C7ZFH7uUdVNeWavo91ot4YLtOD9yQfdce1ez1U1LTLXVbNre8jDoeh7qfUGs5wUjjxGFjVV1ozxSitnX/Dl1odx8wMlux+SUD9D71jVytNbnhTg4O0twooqa2tZ7y4SC2jaSRzgKBSJSbdkRojSOEjUszHAAGSTXf+F/Ba2+y91ZQ03VIT0T3Pqa0PDfhKDSEW4ugst4R17J7D/GulHSumnTtqz2cNg1H3p7iDgYFLRRXQekFFFFSwCiiigAooooAKKKKACiiigDxnx7/ycv4A/wCuE38mr2avGfHv/Jy/gD/rhN/Jq9moAKKKKACiiigAooooA8d+KP8AyXT4Z/8AXxN/7LXsVeO/FH/kunwz/wCvib/2WvYqACiiigAooooAKKKKACiiigAooooAKKKKACiiigCK5tobuBobiNZI2GCpFeaeJvCUuku1zZhpbQn6lPrXqFQXoBsLgEA/um/kamUFJHPXoQrLXc8d0zS7rVrtbe0jLE9W7KPU16hoPh210O3AQCSdh88pHJ+ntXAfALUrvVPCurT30plkTUpIlYgAhR0FerAg9COOtTGmomdDCxpavVju1FICCuQQR6ilrQ7AoqNriFJ0geVFlcEqhYbmA64FSUAFFRieE3BgEqGYLuMe4bgPXHpUlABRRRQAUUUUAFFFFABRRRQB4z49/wCTl/AH/XCb+TV7NXjPj3/k5fwB/wBcJv5NXs1ABRRRQAUUUUAFFFFAHjvxR/5Lp8M/+vib/wBlr2KvHfij/wAl0+Gf/XxN/wCy17FQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFQ3n/Hhcf9cm/kamrE8YeILbwx4UvdVvobiW3hjPmC3TcwB4zjI4oA84/Zx/5E3V/+wtL/ADpPGGvzfDzxx4gmDyGHX9KMtkhYkfa4/l2rnoSGH5Vifs1eLLC5tdT0GGG5a6e6kvN/l/u1jJ4yc9favatY8OaNr01nLrOnw3j2MomtzKufLf1FAHkUniLxZ4VktvBfhqC4nudG0WK7lP2T7S13O5JZXJddiZyNwycn2rdn8a+LJPFmqxxfZLSx0fSYdSubOWEvK7PEWMQYHj5h15rt9d8G+HvE11Fca3pcF3PEpRJGBDbTyVJHUex4q3b6BpdrfXF5b2MKXFzCkEzheXjQYVT7AHFAHi1l4h1i48Z+F9e8Sazpcgl0i5vIzDAV+yqQDhwGJYD14Jq2Pi14h06DWXmddSji0sX1nNNp5tOSwUHbvYsnOcnBr0iw+G/hDS7l7iw8P2UMrh1Zlj6q/wB4fQ+lSaf8PvCumLOtlolrGtxCYJRtzvjPVTnt7UAeS3HivXfC3iXXtf1O7sNY1K38MwyxtbRGNBvlGA67j0Jz1GR6Vr3nj7x1pHg7Xr68tSWtUs5LC+vLEQCRpJVSRDGrtkDPDA9DXoemfD3wno8M8WnaDZwx3EJgmUR5EkZOSrZ6iktfh54Vs9MudPt9GgW1uzGZojkh9jbkHJ6A8gUAcXfeJvGseuyeHV1nTLa80/SDqVzevZEpcEk4RUL/ACqAME5JrvPBeuzeJvBmmaxdQrDNdwB3Rc7Qe+Pal13wZ4d8TSQSa9pNveyQDbG8i8qv93I7e1bFvbw2ttHb20SRQxKFREGAoHQAUASUUUUAFFFFABRRRQB4z49/5OX8Af8AXCb+TV7NXjPj3/k5fwB/1wm/k1ezUAFFFFABRRRQAUUUUAeO/FH/AJLp8M/+vib/ANlr2KvHfij/AMl0+Gf/AF8Tf+y17FQAUUUUAFFFFABRRRQAU2R1jjZ2ztUEnFOqG8/48p/+ubfypxV3YUnZNnH/APC1/DWfv3P/AH6/+vSf8LY8Nf37n/v1/wDXrw4/eNJX0Sy2h5nz39pVvI9y/wCFseGv79z/AN+v/r0v/C2PDX9+5/79V4ZRR/ZtDzD+0a/ke5/8LY8Nf37n/v1R/wALY8Nf37n/AL9V4ZRR/ZtDzD+0a/ke5/8AC2PDX9+5/wC/VV7/AOJfhPUtOnsrsXEkFxG0cimHqCMGvFKKP7NoeYf2jX8jtPhXeeEvhzpt/Gss8txd3DOZBD0jB+Ra77/hbHhr+9c/9+q8Nop/2bQ8w/tKv5HuX/C2PDX965/780f8LY8Nf37r/vz/APXrw2ij+zaHmH9o1/I9y/4Wx4a/v3X/AH5/+vR/wtjw1/fuv+/P/wBevDaKP7NoeYf2jX8j3L/hbHhr+/df9+f/AK9H/C2PDX9+5/79f/Xrw2ij+zaHmH9o1/I+k/D/AIksPE1nJc6YZDHFJ5bb12nOAf61rV538G/+Ravf+vs/+grXoleDiIKnVcY7I9vDzdSkpPdhRRRWJuFFFFABRRRQB4z49/5OX8Af9cJv5NXs1eM+Pf8Ak5fwB/1wm/k1ezUAFFFFABRRRQAUUUUAeO/FH/kunwz/AOvib/2WvYq8d+KP/JdPhn/18Tf+y17FQAUUUUAFFFFABRRRQAVDef8AHlN/1zb+VTVDef8AHlN/1zb+VVHdEy+Fny633jW34V0e21m7vo7wyBbexluF8tsEsoGO3SsRvvGug8HalZaVc6pNqNylvHJps0SM56swGB9a+sxDkqLcdz5XDqMqqUtiPT/Bms6ja21xHFDFFdrmB5Z1XzD6DJ6+1MtPCOq3bXWUht0tZPKlkuJljRX/ALuSetaP9tWD3ngmEXkZ/s+Mvcr2hJbqfTitiw8SWLX2vSQ6xprW09+0iW+o25eCVePmDDkH8K4Hiq6V7HcsNQvuZWkeAry6vNTtr8rDLZW5lVBKuJGI+XnP3T69KztP8GazqVg17bxQm2QyBpzOuzKHDc59QfrXRRar4WbxBrkemTW1lbXmlNAbgqywvM3XbkZ2/hXNa1qFk3gTQdCtLuKcw3k8k0MQO0qFYI2MdCScZ9qccRXctOtugSw9FR9L9SIeG9TOq2enLApur2FZ4EEi/MjAkHOcDoak07wpqepxyyQLDHFFL5JlnmVFZ/7oJPJ+ldjHqvh5vFmga/Prlvbx21hHbvbFG8xWUN6DAHzdfasVbrRtf8J21lPq9vp7WeoSXG+ZWxIjHhlIB5FP61V2t+DF9Vpb3/Eybfwnq1xqF1ZmBYXtObh5nCJGPUseKsw+Frq01G6tdSspZ3js3uIxbzIAwAyH3E4K/TmrWn3GhXGg6/oMWpmGC5lSSG9vkYCbaec8E4PbNGl32gaLqup+Rq5uYTpUkTXUgIR5WXASPPJFE8RUaf8AkyY4ekmv8zM0jwlq+saXDewRwolwMQiWZUM7YyQgP3qbZeFtWv0uXitlRLSTy7hppFQRH/ayeK2rW40XUtM8I391rkWmyaLAsU1vMjFuCrbo8A8nbiqms+JrTU/DHiQiUwvqmqxyW9tIPneIeo/AHFL61Wva34eZX1aj3KU/hLV4NZi0s26vczJ5kexwVZP727pj3ou/CerWl3aweSk5u22wPbyCRHPcbhxxXXQ+KdGj8UaerXsTR/2L9kll2llhkJPDCshdYFlfaPZR+KLAwW1w0v8AxLNM2x2/bJYnnPcAUli6/NblB4Wja/MZGq+FdS0iyN3cfZ5IUk8qRoJ1k8t/7rYPBrFrufFmraDeaBcJOdJvNXM6/ZZNNhdGIz8zyZGAcZ7muGrtwtWdSF57nJiacac7Reh7L8G/+Ravf+vs/wDoK16JXgPhXSPiLqGnyyeBfEVjpVksm2WK5gVy0mB8wJU8Yx+Vbn/CMfHP/oedI/8AANP/AI3XzuL/AI8j6DCfwInsVFeO/wDCMfHP/oedI/8AANf/AI3R/wAIx8c/+h50j/wDX/43XKdR7FRXjv8AwjHxz/6HnSP/AADX/wCN0f8ACMfHP/oedI/8A1/+N0AexUV47/wjHxz/AOh50j/wDX/43R/wjHxz/wCh50j/AMA1/wDjdAEfj3/k5fwB/wBcJv5NXs1eMaR8MviDc/EzQ/FHjTxFp2pLpW4BYYtjbSCMABQOp717PQAUUUUAFFR7qN1AElFR7qN1AHkPxR/5Lp8M/wDr4m/9lr2GvHPief8Ai+Xw0/6+Jv8A2WvYN1AElFR7qN1AElFR7qN1AElFR7qN1AElQ3n/AB5Tf9c2/lTt1MmXzYXjzjcpGfrTi7MUldNHy833jSdq9UPwbjJ/5Czf9+6P+FNR/wDQWb/v3X031+hbc+b+o176I8r75o7Yr1T/AIU1H/0Fm/790f8ACmo/+gs3/fuj6/h+4fUcR2PK+1Feqf8ACmo/+gs3/fuj/hTUf/QWb/v3R9fw/cPqOI7HldHbFeqf8Kaj/wCgs3/fuj/hTUf/AEFm/wC/dH1/D9w+o1+x5XnijjGMcV6p/wAKaj/6Czf9+6P+FNR/9BZv+/dH1/D9w+o1+x5X+FGa9U/4U1H/ANBZv+/dH/Cmo/8AoLN/37o+v4fuH1Gv2PK/wozXqn/Cmo/+gs3/AH7o/wCFNR/9BZv+/dH1/D9w+o1+x5X+FFeqf8Kaj/6Czf8Afuj/AIU1H/0Fm/790fX6HcPqNfsaHwa/5Fq9/wCvs/8AoK16LXNeD/C6+E9NmtFuTcCWXzNxXGOAP6V0O6vn8RNTqylHZnvYeDhSjGW5JRUe6jdWBuSUVHuo3UASUVHuo3UASUVHuo3UASUVHuo3UARbqN1RbqN1AEu6jdUW6jdQB5N8Tj/xfD4a/wDXxN/7LXr26vHfiaf+L3fDb/r4m/8AZa9d3UAS7qN1RbqN1AEu6jdUW6jdQBLuo3VFuo3UAS7qN1RbqR5NkbOf4QTQ9FdgTZo3Vxn/AAniZP8Aob8f7Ypf+E8T/nzf/vsV5P8AbOB/nO/+z8V/Idluo3Vxv/CeJ/z5v/30KP8AhPE/583/AO+hR/bWB/nD+zsV/Idlupd1cZ/wnif8+b/99Cj/AITxP+fN/wDvoUv7ZwP84f2div5Ds91G6uM/4TxP+fN/++xR/wAJ5H/z5v8A99ij+2cD/OL+zsV/Idluo3Vxn/CeJ/z5v/30KP8AhPE/58n/AO+hT/tnA/zj/s7FfyHZ7qN1cZ/wnif8+b/99Cj/AITxP+fJ/wDvoUf2zgf5w/s7FfyHZ7qN1cZ/wnif8+T/APfQo/4TxP8Anyf/AL6FH9s4H+cP7OxX8h2e6jdXGf8ACeJ/z5P/AN9Cj/hPE/58n/76FH9s4H+cP7OxX8h2e6jdWRomtDWbV5liMWx9uCc54rS3V6VKrCrBTg7pnHOEqcnGS1RLuo3VFuo3VoQS7qN1RbqN1AEu6jdUW6jdQBLuo3VFuo3UAS7qN1RbqN1AEe6jdUW6jdQBLuo3VFuo3UAeUfEw/wDF7fhv/wBfE3/stet7q8h+JR/4vX8Of+vib/2WvWt1AEu6jdUW6jdQBLuo3VFuo3UAS7qN1RbqN1AEu6o7hv8ARpf9w/ypC2BkngVDLKktnI8bq6lDhlOQaip8D9Co7o8rzyaXNR55rTZLSxtrVrmBriS5XzD8+0Iucfia/KqWHlWcmnZLds+6nWVNLS7ZQzSZqzPFE0F5NYjdFDtw0mcgE44A60+4037NE5luYRKihmhz8wB6Vo8BWUeZK6IWKp3s3ZlTNGavz6cG1KSKJligito5pJHPC5Bz/Kq15ZtZmIl1kjlTfG69GFKtga1FNyWi6jp4mnUaSerIM0Zq8NKYWcdxLPHH5iF0Ug8ge9LBo008EbeaivKpeOM5ywHehYDEO1o9L/IHi6K3l5FDNGauaTbRXOprDcAlAGyAcdBUiJaXmm3UscP2ea3XzMByysufenSwVSrDmi1106uwp4qMJcrXz9ShmkzWhLos0UcpMsfmQoHlj5ygPen3Olrb6gLaGSKcm383BYjGACScfXitI5ZiXvGxLxtFbO5mZozV+LR5pIEbzEWWSMyJEc5K+tFvpEk6wEzRxm4z5aseWI7Vn9QxN7cjK+t0bX5ihmlzV230mSeNWaaOPzJDFGDk72HWqMitFK8cgwyMVI9xWNTDVaUVKcbJmkK9Oo2ou9jufBDf8Suf/rr/AEFdNurlfBLf8Syf/rr/AEFdLur9Iyv/AHKn6Hx2O/3mfqS7qN1RbqN1ekcZLuo3VFuo3UAS7qN1RbqN1AEu6jdUW6jdQBLuo3VFuo3UAR7qN1RbqN1AEu6jdUW6jdQB518TvBPiPxL4i0DWPC13bW1zpPmMrT9mbbjj8DWV/Yfxp/6GLTP++P8A61etbqN1AHkv9h/Gn/oYtM/74/8ArUf2H8af+hi0z/vj/wCtXrW6jdQB5L/Yfxp/6GLTP++P/rUf2H8af+hi0z/vj/61etbqN1AHkn9h/Gn/AKGPTP8Avj/61H9h/Gn/AKGPTP8Avj/61et7qN1AHkU2h/GfyJN/iLTSu05ATqMfSuV+F2g/FC2upplums9KLsZVvsssnJzsXr+NfQ+6o7hv9Gk/3TUz+Bjj8SPLAeOetaa6hZz2kEWoQSM9uNqNGfvL6GsrPJpM1+V0q8qMny6p9GfczpxqJX6GhNqQktr5EgEZudgjReiBTnmpb+/srkPMlq32qVQGZj8qkdxWVRXQ8wrOPI7W9DL6rTT5le/qax1WCW9uftEDNbT28cLAH5srnkVVv7xbuWFYY2jgtozHGGOScnJJ/IVTzRmlVx9arFxlbUdPC06bTXQ3bXXIbSxVBFM7hCpjLAox9eelR/2yj6fAkjXSTwR7F8iQKre5rGozWsM0xEEkraabEPA0ZO7L2k3aWF4k0qs6gEEA881JLe2sNnNb6fFJm4AWR5SOFzkgY9azc0uaxpY2rTjaNurv113NJ4enOV2bGqatHfwysr3aTToFdBIBGOx46njtSHVbdb37UkUm97bynGRjIUKMVj5ozW0s0xMnd2MlgaKVkbMuspLZRqxukmji8sLDIFVvc1HBqkcT6aSjH7Hncc/ezWVmjNKWZ4iVrtaDWCorY17fV4xpy28/2mIxzSSIbdwCwZicHP1rMLs7M7klmYsSzZPJ7nvUeaM1jicbWxMVGp0NKOGp0W3Dqd14KP8AxK5/+uv9BXS7q5bwW3/Esn/66/0FdJur9Ayz/cqfofKY7/eZ+pLuo3VFuo3V6Jxku6jdUW6jdQBLuo3VFuo3UAS7qN1RbqN1AEu6jdUW6jdQBFuo3VFuo3UAS7qN1RbqN1AEu6jdUW6jdQBLuo3VFuo3UAS7qN1RbqN1AEu6jdUW6jdQBLupr4dCp6EYNM3UbqN1YDG/4RLTfSX/AL7o/wCES030l/77rZ3Ubq4P7Own/PtHT9br/wA7Mb/hEdN/6a/990f8IjpvpL/33Wzuo3Uf2dhP+faD63X/AJ2Y3/CI6b/01/77o/4RHTfSX/vutndRuo/s7Cf8+0H1uv8Azsxv+ER03/pr/wB90yfwppsdtI6+blUJHz+grc3Vh+MrjV7fwjfz+HhE19FEXRJV3BgByMeuKf8AZ2E/59oPrdf+dnFfCz/irtBvrvV+ZYLx4U8r5RtHSu4/4RLTfSX/AL7ryP8AZ5v9cuRqcLpEmlJK0khMfzNM3YH0Fe6huKP7Own/AD7QfW6/87Mf/hEtN9Jf++6P+ES030l/77rY3UbqP7Nwn/PtB9bxH87Mf/hEtN9Jf++6P+ES030l/wC+62N1G6j+zcJ/z7QfW8R/OzH/AOES030l/wC+6T/hEtN9Jf8AvutndRuo/s3Cf8+0H1vEfzsg03TYNLhaK23bWbcdxzzVzdUW6jdXXCEacVGKskc8pOT5pO7Jd1G6ot1G6rES7qN1RbqN1AEu6jdUW6jdQBLuo3VFuo3UAS7qN1RbqN1AEe6jdUW6jdQBLuo3VFuo3UAS7qN1RbqN1AEu6jdUW6jdQBLuo3VFuo3UAS7qN1RbqN1AEu6jdUW6jdQBLuo3VFuo3UAS7qN1Z+o6nb6ZaNPdPtUdB3J9BXAat4xvr9ilsxt4ewX7x/GuHFY6lhtJavsd2FwNbFP3NF3PQrvWbCxB+03UaEds81iXPj3TYiRCkkxHoMCvOXkaRtzsWJ6knNN4rwaudVpP3EkfQUskox/iNs7eX4huc+TZAem5qrP8QL9gQLaEA9jzXLW9vNdS+XbRtK+M7VGTU8mlX8TRrLayoZG2puXGT6Vz/XsdNXTdvJHT9QwEHZpX82XtD8QHw9aS22mWUEMUsrTMAOrMcmtmP4hXQP720jI9jXI3MEtpcvBcJskQ4ZT2qPNZPMMZF2cncv8As7BzV1BHoEHxBtW4uLWRPdTmtez8V6TeEKlyqMf4X4ryjNJmumnnOJj8Vmc1TJcNL4bo9uSVZFDIwYHuDTt1ePafrd/psga2uG2j+BjkGu80DxXBq2IJgIrnH3c8N9K9zCZnSxD5Xozw8XldbDrmWsTpd1G6ot1G6vUPKJd1G6ot1G6gCXdRuqLdRuoAl3Ubqi3UbqAJd1G6ot1G6gCXdRuqLdRuoAl3Ubqi3UbqAIt1G6ot1G6gCXdRuqLdRuoAl3Ubqi3UbqAJd1G6ot1G6gCXdRuqLdRuoAl3Ubqi3UbqAJd1G6ot1G6gCXdRuqLdRuoA4Hxxdyya0IGJEcaAqPc965rNegeKtAbVYhc2uPtEYxj++PSvP5Y5IJTHMjI69VYYIr4zM6FSFeUpbPZn2uV16c8PGEd1ugzRmmZozXl2PUudJ4JJ/t5sHnyGx+lVZLST+07Bb3XUv3M+FWG6LlD3OO1VtE1ZdHvZLl4nmzEyBEIBJP1qjpkwsrm3nkT/AFbBmVev0r2aWIpww0IN63PHq4epPEzmlpbsdFFpdg2uayl2ZvJtQZC+8s59Tz1qteQabP4YTVdPgnt8TLGyTMCTngHj6iqz6+Dcaxci0kzfoUjj3jK57k1AdSC+GF0oQsztOkhk3DaoXB+vauipLDNSWmvNr18jCnDFKUXrpy6eXUrZozTM0Zr56x9BcfmnwzPBOksTFXRgQRUOa6Dw74cm1G4S4uUMdspzzwX+ldGHo1KtRKmtTmxNanSpuVR6Ho1pM01nDI4wzoGI9yKl3VCuFUKOABgCl3V98k0rM/P27sl3Ubqi3UbqYiXdRuqLdRuoAl3Ubqi3UbqAJd1G6ot1G6gCXdRuqLdRuoAl3Ubqi3UbqAI91G6ot1G6gCXdRuqLdRuoAl3Ubqi3UbqAJd1G6ot1G6gCXdRuqLdRuoAl3Ubqi3UbqAJd1G6ot1G6gCXdRuqLdRuoAl3VR1DSLLU0xdQqzdnHBH41Z3UbqmcIzXLJXRUZyg+aLszjr3wM4YtYXII7LIP61jXHhrVbYndbFwO6HNelbqN1eXUynDzd1oepTzbEw0ep5PJZXcR/eW0q/VDURSQdUYf8BNeuEK33gD9aYYYT1iQ/8Brklki6TOxZ5LrD8TyXa/8Acb8qcsEznCQyN9ENer/Z4B0hT/vmnhI1+6ij6Cksk7z/AAG88fSH4nmEOi6lOQI7OTnuRitW08F382DcOkK/XJrvN1G6uqnk9CLvJtnLUzivLSKSMbTvCenWLB5FNxIO8nQfhW6MKoCgADoBUe6jdXp06VOkrQVjy6lWpVfNN3Jd1G6ot1G6tTIl3Ubqi3UbqAJd1G6ot1G6gCXdRuqLdRuoAl3Ubqi3UbqAJd1G6ot1G6gCXdRuqLdRuoA//9k=) ``` # Ciclo For for i in range(9) : print(i) # Ciclo For range tiene tres parámetros: start, stop y step. for i in range(3, 9) : print(i) ```
github_jupyter
# Module 1: **Data Science - Basic Data Understanding** Course website: [SHALA-2020](https://shala2020.github.io/) Instructors: Sudhakar Kumar, Rishav Arjun, and Sahar Nasser --- ## Plotting mathematical functions --- ``` # Loading the libraries import pandas as pd import numpy as np import seaborn as sns import scipy.stats as stats from matplotlib import pyplot as plt from math import pi # Refer to the official documentation of packages x = np.arange(-pi, pi, 0.1) y = np.sin(x) plt.plot(x, y, 'o') # we can also try go/bo/ro/r- etc for changing the representation of the points (marker) plt.plot(x,np.cos(x)) plt.legend(['sin', 'cos']) plt.show() ``` --- ## Plotting line plots --- ``` x = [1, 2, 3] y = [1, 4, 9] z = [10, 5, 0] plt.plot(x, y) # plt.plot(x, z) # plt.title("test plot") # plt.xlabel("x") # plt.ylabel("y and z") # plt.legend(["this is y", "this is z"]) plt.show() ``` --- ## Visualizing data by loading dataframes --- ### Visualizing a sample data ``` # Slides sample_data = pd.read_csv('sample_data.csv') sample_data #Pandas DataFrame is two-dimensional size-mutable, potentially heterogeneous tabular data structure type(sample_data) # type(sample_data['column_c']) type(sample_data.column_c) # extracting first element of the series sample_data.column_c.iloc[0] plt.plot(sample_data.column_a, sample_data.column_b, 'o') plt.plot(sample_data.column_a, sample_data.column_c) # Add titles and legend as described earlier # plt.legend(["col_B", "col_C"]) plt.show() ``` ### Visualizing data on the populations of countries ``` data = pd.read_csv('countries.csv') # pd.set_option('display.max_rows', None) # pd.set_option('display.max_columns', None) data # You can refer to the World Bank data for latest population (https://data.worldbank.org/). # Compare the population growth in the US and China data[data.country == 'United States'] us = data[data.country == 'United States'] china = data[data.country == 'China'] china plt.plot(us.year, us.population) # Observe the 1e8 on the Y-axis # plt.plot(us.year, us.population / 10**6) # plt.plot(china.year, china.population / 10**6) # plt.legend(['United States', 'China']) # plt.xlabel('year') # plt.ylabel('population in million') plt.show() # Observe the population growth us.population us.population / us.population.iloc[0] * 100 # Find the percentage growth from the first year plt.plot(us.year, us.population / us.population.iloc[0] * 100) plt.plot(china.year, china.population / china.population.iloc[0] * 100) plt.legend(['United States', 'China']) plt.xlabel('year') plt.ylabel('population growth (first year = 100)') plt.show() ``` --- ### Visualizing data on movies --- ``` movies = pd.read_csv('moviesData.csv') pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) movies.head() movies.shape # Plot a histogram of the object named runtime in movies. # A histogram is a visual representation of the distribution of a dataset. # It is used to plot the frequency of score occurrences in a continuous dataset. # Observe the warning in the output # Slide 9 plt.hist(movies.runtime) movies.runtime.describe() # observe that the count of runtime is one less than 600 print(movies.runtime.isna().sum()) movies = movies.dropna() # Drop the na datapoints movies.shape # One can also impute the values # Assignment print(movies.runtime.isna().sum()) plt.hist(movies.runtime, bins = 7, color='green', orientation='vertical') # In the histogram there are 7 bins. # Height of a bin represents the number of observations lying in that interval. # https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.hist.html plt.title("Distribution of movies' length") plt.xlabel("Run time of movies") plt.xlim(0,300) movies.head() # create a pie chart from the object genre in the movies data frame. # A pie chart is a circular chart # It is Divided into wedge-like sectors, illustrating proportion. # The total value of the pie is always 100 percent. # https://matplotlib.org/3.1.1/gallery/pie_and_polar_charts/pie_features.html # Type of genre_counts genre_counts = movies.genre.value_counts() print(genre_counts) plt.pie(genre_counts) # plt.pie(genre_counts, labels=genre_counts) # plt.pie(genre_counts, labels=genre_counts.index.tolist()) plt.show() # Change the label of the pie chart movies.head() moviesSub = movies[0:10] moviesSub.shape # draw a bar chart of the object named imdb underscore rating in movies. # A bar chart represents data in rectangular bars with length of the bar proportional to the value of the variable. plt.bar(moviesSub.title, moviesSub.imdb_rating) plt.xlabel('Movies title') plt.title('imdb_rating') # plt.xticks(rotation='vertical') plt.ylim(0,10) plt.show() # Slide 10 plt.figure(figsize=(10,10)) # mask = np.zeros_like(movies.corr(), dtype=np.bool) # mask[np.triu_indices_from(mask)] = True sns.heatmap(movies.corr()) # vmin = -1, cmap='coolwarm', annot=True, mask = mask # imdb underscore rating and audience underscore score. # draw a scatter plot with these two objects by using plot function. # Scatter plot is a graph in which the values of two variables are plotted along two axes. # The pattern of the resulting points reveals the correlation. plt.scatter(movies.imdb_rating, movies.audience_score, c='red') # plt.scatter(movies.critics_score, movies.audience_score, c='red') plt.xlim(0,11) # imdb varies from 0 to 10 plt.ylim(0,101) # audience varies from 0 to 100 plt.title('Scatter plot of imdb rating and audience score') plt.show() # Concept of correlation is needed here (intuition wise). # Difference between auto-correlation (do not mix with correlation) # Range, quartile, information in boxplot # How to deal with outliers plt.figure(figsize=(8,10)) movies['diff'] = movies['audience_score'] - movies['critics_score'] chart = sns.boxplot('genre', 'diff', data=movies) chart.set_xticklabels( chart.get_xticklabels(), rotation=90, horizontalalignment='right', fontweight='light', fontsize='x-large' ) ``` --- ## Various distributions --- --- ### Bernoulli Distribution --- Tossing a biased coin ``` probs = np.array([0.70, 0.3]) side = [0, 1] plt.bar(side, probs) plt.title('Bernoulli Distribution of a Biased Coin', fontsize=12) plt.ylabel('Probability', fontsize=12) plt.xlabel('Outcome', fontsize=12) axes = plt.gca() axes.set_ylim([0,1]) ``` --- ### Uniform Distribution --- Rolling a dice ``` # Skewed pictorial representation probs = [1/6]*6 side = [1,2,3,4,5,6] s = pd.Series(probs,side) #Set descriptions: plt.title("Uniform Distribution",fontsize=16) plt.ylabel('side', fontsize=16) plt.xlabel('probability',fontsize=16) #Set tick colors: ax = plt.gca() ax.tick_params(axis='x', colors='blue') ax.tick_params(axis='y', colors='red') ax.set_ylim([0,1]) #Plot the data: s.plot(kind = 'bar') plt.show() ``` --- ### Binomial Distribution --- Tossing a coin certain number of times ``` x = np.arange(0, 25) prob = 0.2 p = 100 # shape parameter binom = stats.binom.pmf(x,p, prob) plt.plot(x, binom, '-o') plt.xlabel('Random Variable', fontsize=12) plt.ylabel('Probability', fontsize=12) plt.title("Binomial Distribution") ``` --- ### Gaussian Distribution --- ``` n = np.arange(-100, 100) mean = 0 normal = stats.norm.pdf(n, mean, 20) plt.plot(n, normal) plt.xlabel('Random Variable', fontsize=12) plt.ylabel('Probability', fontsize=12) plt.title("Normal Distribution") ``` --- ### Poisson Distribution --- ``` # n = number of events, lambd = expected number of events which can take place in a period # The Poisson distribution is the discrete probability distribution of the number of events # occurring in a given time period, given the average number of times the event occurs over that time period. n = np.arange(0, 50) for Lambda in range(0,10,2): poisson = stats.poisson.pmf(n, Lambda) plt.plot(n, poisson, '-o', label="λ = {:f}".format(Lambda)) plt.xlabel('Number of Events', fontsize=12) plt.ylabel('Probability', fontsize=12) plt.title("Poisson Distribution") plt.legend() ``` --- ### Exponential Distribution --- ``` Lambda = 0.5 x = np.arange(0, 15, 0.1) y = Lambda*np.exp(-Lambda*x) plt.plot(x,y, label="λ = {:f}".format(Lambda)) plt.xlabel('Random Variable', fontsize=12) plt.ylabel('Probability', fontsize=12) plt.title("Exponential Distribution") plt.legend() ``` --- ## References * [Data Visualization with Python](https://www.youtube.com/watch?v=a9UrKTVEeZA) * [Movies dataset](http://www2.stat.duke.edu/~mc301/data/movies.html) ---
github_jupyter
> This is one of the 100 recipes of the [IPython Cookbook](http://ipython-books.github.io/), the definitive guide to high-performance scientific computing and data science in Python. # 4.7. Implementing an efficient rolling average algorithm with stride tricks Stride tricks can be useful for local computations on arrays, when the computed value at a given position depends on the neighbor values. Examples include dynamical systems, filters, cellular automata, and so on. In this example, we will implement an efficient rolling average (a particular type of convolution-based linear filter) with NumPy stride tricks. The idea is to start from a 1D vector, and make a "virtual" 2D array where each line is a shifted version of the previous line. When using stride tricks, this process does not involve any copy, so it is efficient. ``` import numpy as np from numpy.lib.stride_tricks import as_strided %precision 0 def id(x): # This function returns the memory # block address of an array. return x.__array_interface__['data'][0] n = 5; k = 2 a = np.linspace(1, n, n); aid = id(a) ``` Let's change the strides of `a` to add shifted rows. ``` as_strided(a, (k, n), (a.itemsize, a.itemsize)) id(a), id(as_strided(a, (k, n))) ``` The last value indicates an out-of-bounds problem: stride tricks can be dangerous as memory access is not checked. Here, we should take edge effects into account by limiting the shape of the array. ``` as_strided(a, (k, n - k + 1), (a.itemsize,)*2) ``` Let's apply this technique to calculate the rolling average of a random increasing signal. First version using array copies. ``` def shift1(x, k): return np.vstack([x[i:n-k+i+1] for i in range(k)]) ``` Second version using stride tricks. ``` def shift2(x, k): return as_strided(x, (k, n - k + 1), (8, 8)) b = shift1(a, k); b, id(b) == aid c = shift2(a, k); c, id(c) == aid ``` Let's generate a signal. ``` n, k = 100, 10 t = np.linspace(0., 1., n) x = t + .1 * np.random.randn(n) ``` We compute the signal rolling average by creating the shifted version of the signal, and averaging along the vertical dimension. ``` y = shift2(x, k) x_avg = y.mean(axis=0) ``` Let's plot the signal and its averaged version. ``` %matplotlib inline import matplotlib.pyplot as plt f = plt.figure() plt.plot(x[:-k+1], '-k'); plt.plot(x_avg, '-r'); ``` ### Benchmarks Let's benchmark the first version (creation of the shifted array, and computation of the mean), which involves array copy. ``` %timeit shift1(x, k) %%timeit y = shift1(x, k) z = y.mean(axis=0) ``` And the second version, using stride tricks. ``` %timeit shift2(x, k) %%timeit y = shift2(x, k) z = y.mean(axis=0) ``` In the first version, most of the time is spent in the array copy, whereas in the stride trick version, most of the time is instead spent in the computation of the average. > You'll find all the explanations, figures, references, and much more in the book (to be released later this summer). > [IPython Cookbook](http://ipython-books.github.io/), by [Cyrille Rossant](http://cyrille.rossant.net), Packt Publishing, 2014 (500 pages).
github_jupyter
``` import pandas as pd from pybatfish.client.commands import * from pybatfish.datamodel import * from pybatfish.question import bfq, list_questions, load_questions pd.set_option("display.width", 300) pd.set_option("display.max_columns", 20) pd.set_option("display.max_rows", 1000) pd.set_option("display.max_colwidth", None) # Configure all pybatfish loggers to use WARN level import logging logging.getLogger('pybatfish').setLevel(logging.WARN) bf_session.host = 'localhost' load_questions() ``` #### Access-lists and firewall rules * [Filter Line Reachability](#Filter-Line-Reachability) * [Search Filters](#Search-Filters) * [Test Filters](#Test-Filters) * [Find Matching Filter Lines](#Find-Matching-Filter-Lines) ``` bf_set_network('generate_questions') bf_set_snapshot('generate_questions') ``` ##### Filter Line Reachability Returns unreachable lines in filters (ACLs and firewall rules). Finds all lines in the specified filters that will not match any packet, either because of being shadowed by prior lines or because of its match condition being empty. ###### Inputs Name | Description | Type | Optional | Default Value --- | --- | --- | --- | --- nodes | Examine filters on nodes matching this specifier. | [NodeSpec](../specifiers.md#node-specifier) | True | filters | Specifier for filters to test. | [FilterSpec](../specifiers.md#filter-specifier) | True | ignoreComposites | Whether to ignore filters that are composed of multiple filters defined in the configs. | bool | True | False ###### Invocation ``` result = bfq.filterLineReachability().answer().frame() ``` ###### Return Value Name | Description | Type --- | --- | --- Sources | Filter sources | List of str Unreachable_Line | Filter line that cannot be matched (i.e., unreachable) | str Unreachable_Line_Action | Action performed by the unreachable line (e.g., PERMIT or DENY) | str Blocking_Lines | Lines that, when combined, cover the unreachable line | List of str Different_Action | Whether unreachable line has an action different from the blocking line(s) | bool Reason | The reason a line is unreachable | str Additional_Info | Additional information | str Print the first 5 rows of the returned Dataframe ``` result.head(5) ``` Print the first row of the returned Dataframe ``` result.iloc[0] bf_set_network('generate_questions') bf_set_snapshot('filters') ``` ##### Search Filters Finds flows for which a filter takes a particular behavior. This question searches for flows for which a filter (access control list) has a particular behavior. The behaviors can be: that the filter permits the flow (`permit`), that it denies the flow (`deny`), or that the flow is matched by a particular line (`matchLine <lineNumber>`). Filters are selected using node and filter specifiers, which might match multiple filters. In this case, a (possibly different) flow will be found for each filter. ###### Inputs Name | Description | Type | Optional | Default Value --- | --- | --- | --- | --- nodes | Only evaluate filters present on nodes matching this specifier. | [NodeSpec](../specifiers.md#node-specifier) | True | filters | Only evaluate filters that match this specifier. | [FilterSpec](../specifiers.md#filter-specifier) | True | headers | Packet header constraints on the flows being searched. | [HeaderConstraints](../datamodel.rst#pybatfish.datamodel.flow.HeaderConstraints) | True | action | The behavior that you want evaluated. Specify exactly one of `permit`, `deny`, or `matchLine <line number>`. | str | True | startLocation | Only consider specified locations as possible sources. | [LocationSpec](../specifiers.md#location-specifier) | True | invertSearch | Search for packet headers outside the specified headerspace, rather than inside the space. | bool | True | ###### Invocation ``` result = bfq.searchFilters(headers=HeaderConstraints(srcIps='10.10.10.0/24', dstIps='218.8.104.58', applications = ['dns']), action='deny', filters='acl_in').answer().frame() ``` ###### Return Value Name | Description | Type --- | --- | --- Node | Node | str Filter_Name | Filter name | str Flow | Evaluated flow | [Flow](../datamodel.rst#pybatfish.datamodel.flow.Flow) Action | Outcome | str Line_Content | Line content | str Trace | ACL trace | List of [TraceTree](../datamodel.rst#pybatfish.datamodel.acl.TraceTree) Print the first 5 rows of the returned Dataframe ``` result.head(5) ``` Print the first row of the returned Dataframe ``` result.iloc[0] bf_set_network('generate_questions') bf_set_snapshot('filters') ``` ##### Test Filters Returns how a flow is processed by a filter (ACLs, firewall rules). Shows how the specified flow is processed through the specified filters, returning its permit/deny status as well as the line(s) it matched. ###### Inputs Name | Description | Type | Optional | Default Value --- | --- | --- | --- | --- nodes | Only examine filters on nodes matching this specifier. | [NodeSpec](../specifiers.md#node-specifier) | True | filters | Only consider filters that match this specifier. | [FilterSpec](../specifiers.md#filter-specifier) | True | headers | Packet header constraints. | [HeaderConstraints](../datamodel.rst#pybatfish.datamodel.flow.HeaderConstraints) | False | startLocation | Location to start tracing from. | [LocationSpec](../specifiers.md#location-specifier) | True | ###### Invocation ``` result = bfq.testFilters(headers=HeaderConstraints(srcIps='10.10.10.1', dstIps='218.8.104.58', applications = ['dns']), nodes='rtr-with-acl', filters='acl_in').answer().frame() ``` ###### Return Value Name | Description | Type --- | --- | --- Node | Node | str Filter_Name | Filter name | str Flow | Evaluated flow | [Flow](../datamodel.rst#pybatfish.datamodel.flow.Flow) Action | Outcome | str Line_Content | Line content | str Trace | ACL trace | List of [TraceTree](../datamodel.rst#pybatfish.datamodel.acl.TraceTree) Print the first 5 rows of the returned Dataframe ``` result.head(5) ``` Print the first row of the returned Dataframe ``` result.iloc[0] bf_set_network('generate_questions') bf_set_snapshot('generate_questions') ``` ##### Find Matching Filter Lines Returns lines in filters (ACLs and firewall rules) that match any packet within the specified header constraints. Finds all lines in the specified filters that match any packet within the specified header constraints. ###### Inputs Name | Description | Type | Optional | Default Value --- | --- | --- | --- | --- nodes | Examine filters on nodes matching this specifier. | [NodeSpec](../specifiers.md#node-specifier) | True | filters | Specifier for filters to check. | [FilterSpec](../specifiers.md#filter-specifier) | True | headers | Packet header constraints for which to find matching filter lines. | [HeaderConstraints](../datamodel.rst#pybatfish.datamodel.flow.HeaderConstraints) | True | action | Show filter lines with this action. By default returns lines with either action. | str | True | ignoreComposites | Whether to ignore filters that are composed of multiple filters defined in the configs. | bool | True | False ###### Invocation ``` result = bfq.findMatchingFilterLines(headers=HeaderConstraints(applications='DNS')).answer().frame() ``` ###### Return Value Name | Description | Type --- | --- | --- Node | Node | str Filter | Filter name | str Line | Line text | str Line_Index | Index of line | int Action | Action performed by the line (e.g., PERMIT or DENY) | str Print the first 5 rows of the returned Dataframe ``` result.head(5) ``` Print the first row of the returned Dataframe ``` result.iloc[0] ```
github_jupyter
``` library(caret, quiet=TRUE); library(base64enc) library(httr, quiet=TRUE) ``` # Build a Model ``` set.seed(1960) create_model = function() { model <- train(Species ~ ., data = iris, method = "rpart" , preProcess = c("expoTrans")) return(model) } # dataset model = create_model() pred <- predict(model, as.matrix(iris[, -5]) , type="prob") pred_labels <- predict(model, as.matrix(iris[, -5]) , type="raw") sum(pred_labels != iris$Species)/length(pred_labels) ``` # SQL Code Generation ``` test_ws_sql_gen = function(mod) { WS_URL = "https://sklearn2sql.herokuapp.com/model" WS_URL = "http://localhost:1888/model" model_serialized <- serialize(mod, NULL) b64_data = base64encode(model_serialized) data = list(Name = "caret_rpart_test_model", SerializedModel = b64_data , SQLDialect = "postgresql" , Mode="caret") r = POST(WS_URL, body = data, encode = "json") # print(r) content = content(r) # print(content) lSQL = content$model$SQLGenrationResult[[1]]$SQL # content["model"]["SQLGenrationResult"][0]["SQL"] return(lSQL); } lModelSQL = test_ws_sql_gen(model) cat(lModelSQL) ``` # Execute the SQL Code ``` library(RODBC) conn = odbcConnect("pgsql", uid="db", pwd="db", case="nochange") odbcSetAutoCommit(conn , autoCommit = TRUE) dataset = iris[,-5] df_sql = as.data.frame(dataset) names(df_sql) = sprintf("Feature_%d",0:(ncol(df_sql)-1)) df_sql$KEY = seq.int(nrow(dataset)) sqlDrop(conn , "INPUT_DATA" , errors = FALSE) sqlSave(conn, df_sql, tablename = "INPUT_DATA", verbose = FALSE) head(df_sql) # colnames(df_sql) # odbcGetInfo(conn) # sqlTables(conn) df_sql_out = sqlQuery(conn, lModelSQL) head(df_sql_out) ``` # R Caret Rpart Output ``` pred_proba = predict(model, as.matrix(iris[,-5]), type = "prob") df_r_out = data.frame(pred_proba) names(df_r_out) = sprintf("Proba_%s",model$levels) df_r_out$KEY = seq.int(nrow(dataset)) df_r_out$Score_setosa = NA df_r_out$Score_versicolor = NA df_r_out$Score_virginica = NA df_r_out$LogProba_setosa = log(df_r_out$Proba_setosa) df_r_out$LogProba_versicolor = log(df_r_out$Proba_versicolor) df_r_out$LogProba_virginica = log(df_r_out$Proba_virginica) df_r_out$Decision = predict(model, as.matrix(iris[,-5]), type = "raw") df_r_out$DecisionProba = apply(pred_proba, 1, function(x) max(x)) head(df_r_out) ``` # Compare R and SQL output ``` df_merge = merge(x = df_r_out, y = df_sql_out, by = "KEY", all = TRUE, , suffixes = c("_R","_SQL")) head(df_merge) diffs_df = df_merge[df_merge$Decision_1 != df_merge$Decision_2,] head(diffs_df) stopifnot(nrow(diffs_df) == 0) summary(df_sql_out) summary(df_r_out) prep = model$preProcess prep ```
github_jupyter
``` #hide %load_ext autoreload %autoreload 2 # default_exp latent_factor_fxns ``` # Latent Factor Functions > This module contains the update and forecast functions to work with a latent factor DGLM. There are two sets of functions: The first works with the latent_factor class in PyBATS, which represents latent factors by a mean and a variance. The second set of functions relies on simulated values of a latent factor, which is a more precise but computationally slower method. The default functions work with the `latent_factor` class, and are called automatically by `analysis`, `dglm.update`, `dglm.forecast_marginal`, and `dglm.forecast_path` when there are latent factors in the model. To use simulated latent factor values, set the argument `analytic=False` in the `dglm` methods, and pass in the set of simulated values as `phi_samps`. It is not currently supported to use the simulated latent factor values within `analysis`. ``` #hide #exporti import numpy as np from pybats_nbdev.forecast import forecast_path_copula_sim, forecast_path_copula_density_MC, forecast_aR, \ forecast_joint_copula_density_MC, forecast_joint_copula_sim from pybats_nbdev.update import update_F import multiprocessing from functools import partial ``` ## Moment-based latent factor analysis ``` #exporti def update_F_lf(mod, phi, F=None): if F is None: if mod.nlf > 0: mod.F[mod.ilf] = phi.reshape(mod.nlf, 1) else: if mod.nlf > 0: F[mod.ilf] = phi.reshape(mod.nlf, 1) return F #export def update_lf_analytic(mod, y = None, X = None, phi_mu = None, phi_sigma = None): # If data is missing then skip discounting and updating, posterior = prior if y is None or np.isnan(y): mod.t += 1 mod.m = mod.a mod.C = mod.R # Get priors a, R for time t + 1 from the posteriors m, C mod.a = mod.G @ mod.m mod.R = mod.G @ mod.C @ mod.G.T mod.R = (mod.R + mod.R.T)/2 mod.W = mod.get_W(X=X) else: update_F(mod, X) # Put the mean of the latent factor phi_mu into the F vector update_F_lf(mod, phi_mu) # Mean and variance ft, qt = mod.get_mean_and_var_lf(mod.F, mod.a, mod.R, phi_mu, phi_sigma, mod.ilf) # if qt[0] < 0: # print('correcting matrix') # while qt<0: # mod.R[np.diag_indices_from(mod.R)] += 0.001 # ft, qt = mod.get_mean_and_var_lf(mod.F, mod.a, mod.R, phi_mu, phi_sigma, mod.ilf) # print(ft, qt) # Choose conjugate prior, match mean and variance # Initializing the optimization routine at 1,1 is important. At bad initializations, optimizer can shoot off to infinity. mod.param1, mod.param2 = mod.get_conjugate_params(ft, qt, 1, 1) if mod.param1 > 1E7: print('Numerical instabilities appearing in params of ' + str(type(mod))) # See time t observation y (which was passed into the update function) mod.t += 1 # Update the conjugate parameters and get the implied ft* and qt* mod.param1, mod.param2, ft_star, qt_star = mod.update_conjugate_params(y, mod.param1, mod.param2) # Kalman filter update on the state vector (using Linear Bayes approximation) mod.m = mod.a + mod.R @ mod.F * (ft_star - ft)/qt mod.C = mod.R - mod.R @ mod.F @ mod.F.T @ mod.R * (1 - qt_star/qt)/qt # Get priors a, R for time t + 1 from the posteriors m, C mod.a = mod.G @ mod.m mod.R = mod.G @ mod.C @ mod.G.T mod.R = (mod.R + mod.R.T)/2 # Discount information in the time t + 1 prior mod.W = mod.get_W(X=X) mod.R = mod.R + mod.W #export def update_lf_analytic_dlm(mod, y=None, X=None, phi_mu = None, phi_sigma = None): # If data is missing then skip discounting and updating, posterior = prior if y is None or np.isnan(y): mod.t += 1 mod.m = mod.a mod.C = mod.R # Get priors a, R for time t + 1 from the posteriors m, C mod.a = mod.G @ mod.m mod.R = mod.G @ mod.C @ mod.G.T mod.R = (mod.R + mod.R.T) / 2 mod.W = mod.get_W(X=X) else: update_F(mod, X) # Put the mean of the latent factor phi_mu into the F vector update_F_lf(mod, phi_mu) # Mean and variance ft, qt = mod.get_mean_and_var_lf(mod.F, mod.a, mod.R, phi_mu, phi_sigma, mod.ilf) mod.param1 = ft mod.param2 = qt # See time t observation y (which was passed into the update function) mod.t += 1 # Update the parameters: et = y - ft # Adaptive coefficient vector At = mod.R @ mod.F / qt # Volatility estimate ratio rt = (mod.n + et ** 2 / qt) / (mod.n + 1) # Kalman filter update mod.n = mod.n + 1 mod.s = mod.s * rt mod.m = mod.a + At * et mod.C = rt * (mod.R - qt * At @ At.T) # mod.C = (mod.R - qt * At @ At.T) # Get priors a, R for time t + 1 from the posteriors m, C mod.a = mod.G @ mod.m mod.R = mod.G @ mod.C @ mod.G.T mod.R = (mod.R + mod.R.T) / 2 # Discount information mod.W = mod.get_W(X=X) mod.R = mod.R + mod.W mod.n = mod.delVar * mod.n #exporti def get_mean_and_var_lf(self, F, a, R, phi_mu, phi_sigma, ilf): p = len(ilf) if p == 1: extra_var = a[ilf] ** 2 * phi_sigma + a[ilf] * R[np.ix_(ilf, ilf)] * phi_sigma else: extra_var = a[ilf].T @ phi_sigma @ a[ilf] + np.trace(R[np.ix_(ilf, ilf)] @ phi_sigma) return F.T @ a, (F.T @ R @ F + extra_var) / self.rho #exporti def get_mean_and_var_lf_dlm(F, a, R, phi_mu, phi_sigma, ilf, ct): p = len(ilf) if p == 1: extra_var = a[ilf] ** 2 * phi_sigma + a[ilf]/ct * R[np.ix_(ilf, ilf)] * phi_sigma else: extra_var = a[ilf].T @ phi_sigma @ a[ilf]/ct + np.trace(R[np.ix_(ilf, ilf)] @ phi_sigma) return F.T @ a, F.T @ R @ F + extra_var #export def forecast_marginal_lf_analytic(mod, k, X = None, phi_mu = None, phi_sigma = None, nsamps = 1, mean_only = False, state_mean_var = False): # Plug in the correct F values F = update_F(mod, X, F=mod.F.copy()) # Put the mean of the latent factor phi_mu into the F vector F = update_F_lf(mod, phi_mu, F=F) a, R = forecast_aR(mod, k) # Mean and variance ft, qt = mod.get_mean_and_var_lf(F, a, R, phi_mu, phi_sigma, mod.ilf) if state_mean_var: return ft, qt # Choose conjugate prior, match mean and variance param1, param2 = mod.get_conjugate_params(ft, qt, mod.param1, mod.param2) if mean_only: return mod.get_mean(param1, param2) # Simulate from the forecast distribution return mod.simulate(param1, param2, nsamps) #export def forecast_path_lf_copula(mod, k, X = None, phi_mu = None, phi_sigma = None, phi_psi = None, nsamps = 1, t_dist=False, y = None, nu=9, return_mu_cov=False): lambda_mu = np.zeros([k]) lambda_cov = np.zeros([k, k]) F = np.copy(mod.F) Flist = [None for x in range(k)] Rlist = [None for x in range(k)] alist = [None for x in range(k)] for i in range(k): # Get the marginal a, R a, R = forecast_aR(mod, i+1) alist[i] = a Rlist[i] = R # Plug in the correct F values if mod.nregn > 0: F = update_F(mod, X[i,:], F=F) # if mod.nregn > 0: # F[mod.iregn] = X[i,:].reshape(mod.nregn,1) # Put the mean of the latent factor phi_mu into the F vector F = update_F_lf(mod, phi_mu[i], F=F) # if mod.nlf > 0: # F[mod.ilf] = phi_mu[i].reshape(mod.nlf,1) Flist[i] = np.copy(F) # Find lambda mean and var ft, qt = mod.get_mean_and_var_lf(F, a, R, phi_mu[i], phi_sigma[i], mod.ilf) lambda_mu[i] = ft lambda_cov[i,i] = qt # Find covariances with previous lambda values for j in range(i): # Covariance matrix between the state vector at times j, k cov_ij = np.linalg.matrix_power(mod.G, i-j) @ Rlist[j] # Covariance between lambda at times j, i # If phi_psi is none, we assume the latent factors phi at times t+i, t+j are independent of one another if phi_psi is None: lambda_cov[j,i] = lambda_cov[i,j] = Flist[j].T @ cov_ij @ Flist[i] else: lambda_cov[j,i] = lambda_cov[i,j] = Flist[j].T @ cov_ij @ Flist[i] + \ alist[i][mod.ilf].T @ phi_psi[i-1][:,:,j] @ alist[j][mod.ilf] + \ np.trace(cov_ij[np.ix_(mod.ilf, mod.ilf)] @ phi_psi[i-1][:,:,j]) if return_mu_cov: return lambda_mu, lambda_cov if y is not None: return forecast_path_copula_density_MC(mod, y, lambda_mu, lambda_cov, t_dist, nu, nsamps) else: return forecast_path_copula_sim(mod, k, lambda_mu, lambda_cov, nsamps, t_dist, nu) ``` These functions are called automatically in PyBATS when working with a DGLM that has a latent factor component. The new arguments are: - `phi_mu`: Mean vector of the latent factor. For `forecast_path_lf_copula`, it should be a list of `k` mean vectors. - `phi_sigma`: Variance matrix of the latent factor. For `forecast_path_lf_copula`, it should be a list of `k` variance matrices. The following extra arguments are only applicable to path forecasting with `forecast_path_lf_copula`: - `phi_psi`: This is a list of `k-1` covariance matrices $cov(\phi_{t+k}, \phi_{t+j})$. Each element is a numpy array. - `t_dist`: Boolean. By default, a Gaussian copula is used. If True, then a t-copula is used instead. - `y`: Future path of observations y. If provided, output will be the forecast density of y. - `nu`: Degrees of freedom for t-copula. ## Simulation-based latent factor analysis ``` #export def update_lf_sample(mod, y = None, X = None, phi_samps = None, parallel=False): """ DGLM update function with samples of a latent factor. $\phi_{samps}$ = Array of simulated values of a latent factor. """ # If data is missing then skip discounting and updating, posterior = prior if y is None or np.isnan(y): mod.t += 1 mod.m = mod.a mod.C = mod.R # Get priors a, R for time t + 1 from the posteriors m, C mod.a = mod.G @ mod.m mod.R = mod.G @ mod.C @ mod.G.T mod.R = (mod.R + mod.R.T)/2 mod.W = mod.get_W(X=X) else: update_F(mod, X) # Update m, C using a weighted average of the samples if parallel: f = partial(update_lf_sample_forwardfilt, mod, y, mod.F, mod.a, mod.R) p = multiprocessing.Pool(10) output = p.map(f, phi_samps) p.close() else: output = map(lambda p: update_lf_sample_forwardfilt(mod, y, mod.F, mod.a, mod.R, p), phi_samps) mlist, Clist, logliklist = list(map(list, zip(*output))) w = (np.exp(logliklist) / np.sum(np.exp(logliklist))).reshape(-1,1,1) mlist = np.array(mlist) Clist = np.array(Clist) mod.m = np.sum(mlist*w, axis=0) mod.C = np.sum(Clist*w, axis=0) + np.cov((mlist).reshape(-1, mod.m.shape[0]), rowvar=False, aweights = w.reshape(-1)) # Add 1 to the time index mod.t += 1 # Get priors a, R from the posteriors m, C mod.a = mod.G @ mod.m mod.R = mod.G @ mod.C @ mod.G.T mod.R = (mod.R + mod.R.T)/2 # prevent rounding issues # Discount information if observation is observed mod.W = mod.get_W(X=X) mod.R = mod.R + mod.W #export def update_lf_sample_forwardfilt(mod, y, F, a, R, phi): F = update_F_lf(mod, phi, F=F) # F[mod.ilf] = phi.reshape(-1,1) ft, qt = mod.get_mean_and_var(F, a, R) # get the conjugate prior parameters param1, param2 = mod.get_conjugate_params(ft, qt, mod.param1, mod.param2) # Get the log-likelihood of 'y' under these parameters loglik = mod.loglik(y, param1, param2) # Update to the conjugate posterior after observing 'y' param1, param2, ft_star, qt_star = mod.update_conjugate_params(y, param1, param2) # Kalman filter update on the state vector (using Linear Bayes approximation) m = a + R @ F * (ft_star - ft)/qt C = R - R @ F @ F.T @ R * (1 - qt_star/qt)/qt return m, C, np.ravel(loglik)[0] #export def forecast_marginal_lf_sample(mod, k, X = None, phi_samps = None, mean_only = False): # Plug in the correct F values F = update_F(mod, X, F=mod.F.copy()) a, R = forecast_aR(mod, k) # Simulate from the forecast distribution return np.array(list(map(lambda p: lf_simulate_from_sample(mod, F, a, R, p), phi_samps))).reshape(-1) #exporti def lf_simulate_from_sample(mod, F, a, R, phi): F = update_F_lf(mod, phi, F=F) # F[mod.ilf] = phi.reshape(-1,1) ft, qt = mod.get_mean_and_var(F, a, R) # get the conjugate prior parameters param1, param2 = mod.get_conjugate_params(ft, qt, mod.param1, mod.param2) # Update to the conjugate posterior after observing 'y' return mod.simulate(param1, param2, 1) #export def forecast_path_lf_sample(mod, k, X=None, phi_samps = None): nsamps = len(phi_samps) samps = np.zeros([nsamps, k]) F = np.copy(mod.F) for n in range(nsamps): param1 = mod.param1 param2 = mod.param2 a = np.copy(mod.a) R = np.copy(mod.R) for i in range(k): # Plug in X values if mod.nregn > 0: F = update_F(mod, X[i, :], F=F) # if mod.nregn > 0: # F[mod.iregn] = X[i, :].reshape(mod.nregn, 1) # Plug in phi sample F = update_F_lf(mod, phi_samps[n][i], F=F) # F[mod.ilf] = phi_samps[n][i].reshape(-1, 1) # Get mean and variance ft, qt = mod.get_mean_and_var(F, a, R) # Choose conjugate prior, match mean and variance param1, param2 = mod.get_conjugate_params(ft, qt, param1, param2) # Simulate next observation samps[n, i] = mod.simulate(param1, param2, nsamps=1) # Update based on that observation param1, param2, ft_star, qt_star = mod.update_conjugate_params(samps[n, i], param1, param2) # Kalman filter update on the state vector (using Linear Bayes approximation) m = a + R @ F * (ft_star - ft) / qt C = R - R @ F @ F.T @ R * (1 - qt_star / qt) / qt # Get priors a, R for the next time step a = mod.G @ m R = mod.G @ C @ mod.G.T R = (R + R.T) / 2 # Discount information if mod.discount_forecast: R = R + mod.W return samps ``` These functions can be called through `dglm.update`, `dglm.forecast_marginal`, and `dglm.forecast_path` by setting the argument `analytic=False`. They represent an alternative method of analysis by working with simulated values of the latent factor. The simulated values are passed into the function as an array `phi_samps`, where each row contains a simulated value of the latent factor. This is a more accurate analysis method because it does not reduce the distribution of the latent factor down to its mean and variance. However, it is also more computationally demanding to work with the simulated values, so there is a trade-off between speed and accuracy. ## Multivariate forecasting with multiple DGLMs ``` #export def forecast_joint_marginal_lf_copula(mod_list, k, X_list=None, phi_mu = None, phi_sigma = None, nsamps=1, y=None, t_dist=False, nu=9, return_cov=False): p = len(mod_list) lambda_mu = np.zeros([p]) lambda_cov = np.zeros([p, p]) Flist = [None for x in range(p)] Rlist = [None for x in range(p)] alist = [None for x in range(p)] if X_list is None: X_list = [[] for i in range(p)] for i, [X, mod] in enumerate(zip(X_list, mod_list)): # Evolve to the prior at time t + k a, R = forecast_aR(mod, k) Rlist[i] = R alist[i] = a[mod.ilf] # Plug in the correct F values if mod.nregn > 0: F = update_F(mod, X, F=mod.F.copy()) else: F = mod.F.copy() # Put the mean of the latent factor phi_mu into the F vector F = update_F_lf(mod, phi_mu, F=F) Flist[i] = F # Find lambda mean and var ft, qt = mod.get_mean_and_var(F, a, R) lambda_mu[i] = ft lambda_cov[i, i] = qt # Find covariances with lambda values from other models for j in range(i): # Covariance matrix between lambda from models i, j if phi_sigma.ndim == 0: lambda_cov[j, i] = lambda_cov[i, j] = np.squeeze(alist[i] * phi_sigma * alist[j]) else: lambda_cov[j, i] = lambda_cov[i, j] = alist[i].T @ phi_sigma @ alist[j] if return_cov: return lambda_cov if y is not None: return forecast_joint_copula_density_MC(mod_list, y, lambda_mu, lambda_cov, t_dist, nu, nsamps) else: return forecast_joint_copula_sim(mod_list, lambda_mu, lambda_cov, nsamps, t_dist, nu) ``` `forecast_joint_marginal_lf_copula` is used to *recouple* a set of DGLMs which share the same latent factor. In other words, if the same latent factor is used in multiple models, then their forecasts will be correlated. This function allows for joint forecasting across these separated DGLMs. A classic example comes from retail sales. The latent factor may represent an effect at the total store level - say, customer traffic based on the day-of-week. A separate DGLM models the sales of each individual item. To jointly forecast the sales of many items, the models can all be passed into `forecast_joint_marginal_lf_copula`, along with the latent factor mean and variance, to draw joint samples from the forecast distribution. ## Multivariate forecasting with multiple DCMMs ``` #export def forecast_joint_marginal_lf_copula_dcmm(dcmm_list, k, X_list=None, phi_mu = None, phi_sigma = None, nsamps=1, t_dist=False, nu=9, return_cov=False): bern_list = [mod.bern_mod for mod in dcmm_list] pois_list = [mod.pois_mod for mod in dcmm_list] mod_list = [*bern_list, *pois_list] p = len(mod_list) lambda_mu = np.zeros([p]) lambda_cov = np.zeros([p, p]) Flist = [None for x in range(p)] Rlist = [None for x in range(p)] alist = [None for x in range(p)] if X_list is None: X_list = [[] for i in range(p)] else: X_list = [*X_list, *X_list] for i, [X, mod] in enumerate(zip(X_list, mod_list)): # Evolve to the prior at time t + k a, R = forecast_aR(mod, k) Rlist[i] = R alist[i] = a[mod.ilf] # Plug in the correct F values if mod.nregn > 0: F = update_F(mod, X, F=mod.F.copy()) else: F = mod.F.copy() # Put the mean of the latent factor phi_mu into the F vector F = update_F_lf(mod, phi_mu, F=F) Flist[i] = F # Find lambda mean and var ft, qt = mod.get_mean_and_var(F, a, R) lambda_mu[i] = ft lambda_cov[i, i] = qt # Find covariances with lambda values from other models for j in range(i): # Covariance matrix between lambda from models i, j if phi_sigma.ndim == 0: lambda_cov[j, i] = lambda_cov[i, j] = np.squeeze(alist[i] * phi_sigma * alist[j]) else: lambda_cov[j, i] = lambda_cov[i, j] = alist[i].T @ phi_sigma @ alist[j] samps = forecast_joint_copula_sim(mod_list, lambda_mu, lambda_cov, nsamps, t_dist, nu) bern_samps = samps[:,:len(bern_list)] pois_samps = samps[:, len(bern_list):] pois_samps += 1 samps = bern_samps * pois_samps if return_cov: return np.cov(samps.T) return samps ``` `forecast_joint_marginal_lf_copula_dcmm` behaves similarly to `forecast_joint_marginal_lf_copula`, but for a set of related DCMMs instead of related DGLMs. ## DCMM forecast functions ``` #export def forecast_marginal_lf_dcmm(mod, k, X=None, phi_mu=None, phi_sigma=None, nsamps=1, t_dist=False, nu=9, return_cov=False): mod_list = [mod.bern_mod, mod.pois_mod] lambda_mu = np.zeros(2) lambda_cov = np.zeros([2,2]) a_lf_list=[] for i, mod in enumerate(mod_list): # Plug in the correct F values F = update_F(mod, X, F=mod.F.copy()) # F = np.copy(mod.F) # if mod.nregn > 0: # F[mod.iregn] = X.reshape(mod.nregn,1) # Put the mean of the latent factor phi_mu into the F vector F = update_F_lf(mod, phi_mu, F=F) # if mod.nlf > 0: # F[mod.ilf] = phi_mu.reshape(mod.nlf,1) a, R = forecast_aR(mod, k) a_lf_list.append(a[mod.ilf]) # Mean and variance ft, qt = mod.get_mean_and_var_lf(F, a, R, phi_mu, phi_sigma, mod.ilf) lambda_mu[i] = ft lambda_cov[i,i] = qt lambda_cov[0,1] = lambda_cov[1,0] = a_lf_list[0].T @ phi_sigma @ a_lf_list[1] samps = forecast_joint_copula_sim(mod_list, lambda_mu, lambda_cov, nsamps, t_dist, nu) bern_samps = samps[:, 0] pois_samps = samps[:, 1] pois_samps += 1 samps = bern_samps * pois_samps if return_cov: return np.cov(samps.T) return samps #export def forecast_path_lf_dcmm(mod, k, X=None, phi_mu=None, phi_sigma=None, phi_psi=None, nsamps=1, t_dist=False, nu=9, return_cov=False): lambda_mu = np.zeros(k*2) lambda_cov = np.zeros([k*2, k*2]) mucov_bern = forecast_path_lf_copula(mod.bern_mod, k, X, phi_mu, phi_sigma, phi_psi, return_mu_cov=True) mucov_pois = forecast_path_lf_copula(mod.pois_mod, k, X, phi_mu, phi_sigma, phi_psi, return_mu_cov=True) lambda_mu[:k] = mucov_bern[0] lambda_mu[k:] = mucov_pois[0] lambda_cov[:k,:k] = mucov_bern[1] lambda_cov[k:,k:] = mucov_pois[1] for i in range(k): a_bern, R_bern = forecast_aR(mod.bern_mod, i+1) for j in range(k): a_pois, R_pois = forecast_aR(mod.pois_mod, j+1) if i == j: cov = float(a_bern[mod.bern_mod.ilf].T @ phi_sigma[i] @ a_pois[mod.pois_mod.ilf]) elif i > j: cov = float(a_bern[mod.bern_mod.ilf].T @ phi_psi[i-1][j] @ a_pois[mod.pois_mod.ilf]) elif j > i: cov = float(a_bern[mod.bern_mod.ilf].T @ phi_psi[j-1][i] @ a_pois[mod.pois_mod.ilf]) lambda_cov[i, j + k] = lambda_cov[j + k, i] = cov mod_list = [*[mod.bern_mod]*k, *[mod.pois_mod]*k] samps = forecast_joint_copula_sim(mod_list, lambda_mu, lambda_cov, nsamps, t_dist, nu) bern_samps = samps[:, :k] pois_samps = samps[:, k:] pois_samps += 1 samps = bern_samps * pois_samps if return_cov: return np.cov(samps.T) return samps ``` These functions are for marginal and path forecasting with a latent factor DCMM. They may be accessed as methods from `dcmm`. ``` #hide from nbdev.export import notebook2script notebook2script() ```
github_jupyter
``` from ei_net import * # import the .py file but you can find all the functions at the bottom of this notebook from utilities import show_values import matplotlib.pyplot as plt %matplotlib inline ########################################## ############ PLOTTING SETUP ############## EI_cmap = "Greys" where_to_save_pngs = "../figs/pngs/" where_to_save_pdfs = "../figs/pdfs/" save = True ########################################## ########################################## ``` # Uncertainty and causal emergence in complex networks # Chapter 01: Effective Information in Networks ## Networks and Causal Structure Networks provide a powerful syntax for representing a wide range of systems, from the trivially simple to the highly complex. It is common to characterize networks based on structural properties like their degree distribution or whether they show community structure. While our understanding of these structural properties of networks has been crucial for the rapid rise of network science as a discipline, there is a distinct gap in our treatment of both dependencies between nodes and also higher scales in networks. This gap is especially pressing because networks often have an interpretation where links represent dependencies, such as contact networks in epidemiology, neuronal and functional networks in the brain, or interaction networks among cells, genes, or drugs, and these networks can often be analyzed at multiple different scales. Previously, others have used directed acyclic graphs known as "causal diagrams" to represent causal relationships as dependencies in networks. But there has been little research on quantifying or broadly classifying such causation in networks, particularly those that have both weighted connections and feedback, which are hallmarks of complex systems across domains. Here we introduce information-theoretic measures designed to capture the information contained in the dependencies of networks and which can be used to identify when these networks possess informative higher scales. ## Effective Information Describing cause and effect implicitly invokes the idea of a network. For example, if a system in a particular state, *A*, always transitions to state *B*, the causal relationship between *A* and *B* can be represented by a node-link diagram wherein the two nodes---*A* and *B*---are connected by a directed arrow, indicating that *B* depends on *A*. In such a network, the out-weight vector, $W^{out}_{i}$, of a node, $v_i$, represents the possible transitions and their probabilities from that node. Specifically, $W^{out}_{i}$ consists of weights $w_{ij}$ between node $v_i$ and its neighbors $v_j$, where $w_{ij}=0.0$ if there is no edge from $v_i$ to $v_j$. This means the edge weights $w_{ij}$ can be interpreted as the probability $p_{ij}$ that a random walker on $v_i$ will transition to $v_j$ in the next time step. We will refer to such a network as having a *causal structure*. In the cases where links between nodes represent dependency in general, such as influence, strength, or potential causal interactions, but not explicitly transitions (or where details about transitions is lacking), for our analysis we create $W^{out}_{i}$ by normalizing each node's out-weight vector to sum to $1.0$. This generalizes our results to multiple types of representations (although what sort of dependencies the links in the network represent should be kept in mind when interpreting the values of the measures we introduce below). A network's causal structure can be characterized by the uncertainty in the relationships among the nodes' out-weights (possible effects) and in-weights (possible causes). The total information in the dependencies between nodes is a function of this uncertainty and can be derived from two fundamental properties. The first is the uncertainty of a node's effects, which can be quantified by the Shannon entropy of its out-weights, $H(W^{out}_{i})$. The average of this entropy, $\langle H(W^{out}_{i} )\rangle $, across all nodes is the amount of noise present in the network's causal structure. Only if $\langle H(W^{out}_{i} )\rangle $ is zero is the network is *deterministic*. The second fundamental causal property is how weight is distributed across the whole network, $\langle W^{out}_{i}\rangle$. This vector $\langle W^{out}_{i}\rangle$ consists of elements that are the sum of the in-weights $w_{ji}$ to each node $v_i$ from each of its incoming neighbors, $v_j$ (then normalized by total weight of the network). Its entropy, $H (\langle W^{out}_{i}\rangle)$, reflects how certainty is distributed across the network. If all nodes link only to the same node, then $H(\langle W^{out}_{i}\rangle)$ is zero, and the network is totally *degenerate* since all causes lead to the same effect. From these two properties we can derive the amount of information in a network's causal structure, the *effective information* ($EI$), as: $$ EI = H(\langle W^{out}_{i} \rangle) - \langle {H}(W^{out}_{i}) \rangle $$ Here, we use this measure to develop a general classification of networks. Networks with high $EI$ contain more certainty in the relationships between nodes in the network (since the links represent greater dependencies), whereas networks with low $EI$ contain less certainty. In this work, we show how the connectivity and different growth rules of a network have a deep relationship to that network's $EI$. This also provides a principled means of quantifying the amount of information among the micro-, meso-, and macroscale dependencies in a network. We introduce a formalism for finding and assessing the most informative scale of a network: the scale that minimizes the uncertainty in the dependencies between nodes. For some networks, a macroscale description of the network can be more informative in this manner, demonstrating a phenomenon known as *causal emergence*. ## 1.0 Create a Few Example Transition-Probability Matrices ``` Copy_Copy = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) And_And = np.array([[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) Or_Or = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0]]) Or_Copy = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0]]) Star = np.array([[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]]) ``` ### 1.0.1 Plot these TPMs, showing their $EI$ values ``` fig, (ax0, ax1, ax2, ax3, ax4) = plt.subplots(1, 5, figsize=(22,4)) c0 = ax0.pcolor( np.arange(-0.5, Copy_Copy.shape[0], 1), np.arange(-0.5, Copy_Copy.shape[0], 1), Copy_Copy, edgecolors='#999999', linewidths=3.0, cmap=EI_cmap) c1 = ax1.pcolor( np.arange(-0.5, And_And.shape[0], 1), np.arange(-0.5, And_And.shape[0], 1), And_And, edgecolors='#999999', linewidths=3.0, cmap=EI_cmap) c2 = ax2.pcolor( np.arange(-0.5, Or_Or.shape[0], 1), np.arange(-0.5, Or_Or.shape[0], 1), Or_Or, edgecolors='#999999', linewidths=3.0, cmap=EI_cmap) c3 = ax3.pcolor( np.arange(-0.5, Or_Copy.shape[0], 1), np.arange(-0.5, Or_Copy.shape[0], 1), Or_Copy, edgecolors='#999999', linewidths=3.0, cmap=EI_cmap) c4 = ax4.pcolor( np.arange(-0.5, Star.shape[0], 1), np.arange(-0.5, Star.shape[0], 1), Star, edgecolors='#999999', linewidths=3.0, cmap=EI_cmap) show_values(c0, ax=ax0, fmt="%.1f", fontsize=16) show_values(c1, ax=ax1, fmt="%.1f", fontsize=16) show_values(c2, ax=ax2, fmt="%.1f", fontsize=16) show_values(c3, ax=ax3, fmt="%.1f", fontsize=16) show_values(c4, ax=ax4, fmt="%.1f", fontsize=16) ax0.invert_yaxis() ax1.invert_yaxis() ax2.invert_yaxis() ax3.invert_yaxis() ax4.invert_yaxis() xlabs = ylabs = ['0|0','0|1', '1|0', '1|1'] ax0.set_xticks(np.arange(0, Copy_Copy.shape[0], 1)) ax0.set_yticks(np.arange(0, Copy_Copy.shape[1], 1)) ax0.set_xticklabels(xlabs, fontsize=14) ax0.set_yticklabels(ylabs, fontsize=14) ax0.set_xticks(np.arange(-0.5, Copy_Copy.shape[0]-0.5, 1), minor=True) ax0.set_yticks(np.arange(-0.5, Copy_Copy.shape[1]-0.5, 1), minor=True) ax1.set_xticks(np.arange(0, And_And.shape[0], 1)) ax1.set_yticks(np.arange(0, And_And.shape[1], 1)) ax1.set_xticklabels(xlabs, fontsize=14) ax1.set_yticklabels(ylabs, fontsize=14) ax1.set_xticks(np.arange(-0.5, And_And.shape[0]-0.5, 1), minor=True) ax1.set_yticks(np.arange(-0.5, And_And.shape[1]-0.5, 1), minor=True) ax2.set_xticks(np.arange(0, Or_Or.shape[0], 1)) ax2.set_yticks(np.arange(0, Or_Or.shape[1], 1)) ax2.set_xticklabels(xlabs, fontsize=14) ax2.set_yticklabels(ylabs, fontsize=14) ax2.set_xticks(np.arange(-0.5, Or_Or.shape[0]-0.5, 1), minor=True) ax2.set_yticks(np.arange(-0.5, Or_Or.shape[1]-0.5, 1), minor=True) ax3.set_xticks(np.arange(0, Or_Copy.shape[0], 1)) ax3.set_yticks(np.arange(0, Or_Copy.shape[1], 1)) ax3.set_xticklabels(xlabs, fontsize=14) ax3.set_yticklabels(ylabs, fontsize=14) ax3.set_xticks(np.arange(-0.5, Or_Copy.shape[0]-0.5, 1), minor=True) ax3.set_yticks(np.arange(-0.5, Or_Copy.shape[1]-0.5, 1), minor=True) ax4.set_xticks(np.arange(0, Star.shape[0], 1)) ax4.set_yticks(np.arange(0, Star.shape[1], 1)) ax4.set_xticklabels(xlabs, fontsize=14) ax4.set_yticklabels(ylabs, fontsize=14) ax4.set_xticks(np.arange(-0.5, Star.shape[0]-0.5, 1), minor=True) ax4.set_yticks(np.arange(-0.5, Star.shape[1]-0.5, 1), minor=True) ax0.xaxis.tick_top() ax1.xaxis.tick_top() ax2.xaxis.tick_top() ax3.xaxis.tick_top() ax4.xaxis.tick_top() ax0.set_title('Copy-Copy logic gate\n $EI = %.3f$ \n'% effective_information(Copy_Copy), fontsize=20, pad=10) ax1.set_title('And-And logic gate\n $EI = %.3f$ \n'% effective_information(And_And), fontsize=20, pad=10) ax2.set_title('Or-Or logic gate\n $EI = %.3f$ \n'% effective_information(Or_Or), fontsize=20, pad=10) ax3.set_title('Or-Copy logic gate\n $EI = %.3f$ \n'% effective_information(Or_Copy), fontsize=20, pad=10) ax4.set_title('Star-like logic gate\n $EI = %.3f$ \n'% effective_information(Star), fontsize=20, pad=10) if save: plt.savefig(where_to_save_pngs+"Example1_LogicGates.png", bbox_inches='tight', dpi=425) plt.savefig(where_to_save_pdfs+"Example1_LogicGates.pdf", bbox_inches='tight') plt.show() ``` ______________________ ## 1.1 Add noise to the transition probability matrices ``` noise = np.random.uniform(0.0,0.1,size=Copy_Copy.shape) Copy_Copy_noise = Copy_Copy + noise Copy_Copy_noise = Copy_Copy_noise / Copy_Copy_noise.sum(axis=1) noise = np.random.uniform(0.0,0.1,size=And_And.shape) And_And_noise = And_And + noise And_And_noise = And_And_noise / And_And_noise.sum(axis=1) noise = np.random.uniform(0.0,0.1,size=Or_Or.shape) Or_Or_noise = Or_Or + noise Or_Or_noise = Or_Or_noise / Or_Or_noise.sum(axis=1) noise = np.random.uniform(0.0,0.1,size=Or_Copy.shape) Or_Copy_noise = Or_Copy + noise Or_Copy_noise = Or_Copy_noise / Or_Copy_noise.sum(axis=1) noise = np.random.uniform(0.0,0.1,size=Star.shape) Star_noise = Star + noise Star_noise = Star_noise / Star_noise.sum(axis=1) ``` ### 1.1.1 Plot these TPMs, showing their $EI$ values ``` fig, (ax0, ax1, ax2, ax3, ax4) = plt.subplots(1, 5, figsize=(22,4)) c0 = ax0.pcolor( np.arange(-0.5, Copy_Copy_noise.shape[0], 1), np.arange(-0.5, Copy_Copy_noise.shape[0], 1), Copy_Copy_noise, edgecolors='#999999', linewidths=3.0, cmap=EI_cmap) c1 = ax1.pcolor( np.arange(-0.5, And_And_noise.shape[0], 1), np.arange(-0.5, And_And_noise.shape[0], 1), And_And_noise, edgecolors='#999999', linewidths=3.0, cmap=EI_cmap) c2 = ax2.pcolor( np.arange(-0.5, Or_Or_noise.shape[0], 1), np.arange(-0.5, Or_Or_noise.shape[0], 1), Or_Or_noise, edgecolors='#999999', linewidths=3.0, cmap=EI_cmap) c3 = ax3.pcolor( np.arange(-0.5, Or_Copy_noise.shape[0], 1), np.arange(-0.5, Or_Copy_noise.shape[0], 1), Or_Copy_noise, edgecolors='#999999', linewidths=3.0, cmap=EI_cmap) c4 = ax4.pcolor( np.arange(-0.5, Star_noise.shape[0], 1), np.arange(-0.5, Star_noise.shape[0], 1), Star_noise, edgecolors='#999999', linewidths=3.0, cmap=EI_cmap) show_values(c0, ax=ax0, fmt="%.2f", fontsize=16) show_values(c1, ax=ax1, fmt="%.2f", fontsize=16) show_values(c2, ax=ax2, fmt="%.2f", fontsize=16) show_values(c3, ax=ax3, fmt="%.2f", fontsize=16) show_values(c4, ax=ax4, fmt="%.2f", fontsize=16) ax0.invert_yaxis() ax1.invert_yaxis() ax2.invert_yaxis() ax3.invert_yaxis() ax4.invert_yaxis() xlabs = ylabs = ['0|0','0|1', '1|0', '1|1'] ax0.set_xticks(np.arange(0, Copy_Copy_noise.shape[0], 1)) ax0.set_yticks(np.arange(0, Copy_Copy_noise.shape[1], 1)) ax0.set_xticklabels(xlabs, fontsize=14) ax0.set_yticklabels(ylabs, fontsize=14) ax0.set_xticks(np.arange(-0.5, Copy_Copy_noise.shape[0]-0.5, 1), minor=True) ax0.set_yticks(np.arange(-0.5, Copy_Copy_noise.shape[1]-0.5, 1), minor=True) ax1.set_xticks(np.arange(0, And_And_noise.shape[0], 1)) ax1.set_yticks(np.arange(0, And_And_noise.shape[1], 1)) ax1.set_xticklabels(xlabs, fontsize=14) ax1.set_yticklabels(ylabs, fontsize=14) ax1.set_xticks(np.arange(-0.5, And_And_noise.shape[0]-0.5, 1), minor=True) ax1.set_yticks(np.arange(-0.5, And_And_noise.shape[1]-0.5, 1), minor=True) ax2.set_xticks(np.arange(0, Or_Or_noise.shape[0], 1)) ax2.set_yticks(np.arange(0, Or_Or_noise.shape[1], 1)) ax2.set_xticklabels(xlabs, fontsize=14) ax2.set_yticklabels(ylabs, fontsize=14) ax2.set_xticks(np.arange(-0.5, Or_Or_noise.shape[0]-0.5, 1), minor=True) ax2.set_yticks(np.arange(-0.5, Or_Or_noise.shape[1]-0.5, 1), minor=True) ax3.set_xticks(np.arange(0, Or_Copy_noise.shape[0], 1)) ax3.set_yticks(np.arange(0, Or_Copy_noise.shape[1], 1)) ax3.set_xticklabels(xlabs, fontsize=14) ax3.set_yticklabels(ylabs, fontsize=14) ax3.set_xticks(np.arange(-0.5, Or_Copy_noise.shape[0]-0.5, 1), minor=True) ax3.set_yticks(np.arange(-0.5, Or_Copy_noise.shape[1]-0.5, 1), minor=True) ax4.set_xticks(np.arange(0, Star_noise.shape[0], 1)) ax4.set_yticks(np.arange(0, Star_noise.shape[1], 1)) ax4.set_xticklabels(xlabs, fontsize=14) ax4.set_yticklabels(ylabs, fontsize=14) ax4.set_xticks(np.arange(-0.5, Star_noise.shape[0]-0.5, 1), minor=True) ax4.set_yticks(np.arange(-0.5, Star_noise.shape[1]-0.5, 1), minor=True) ax0.xaxis.tick_top() ax1.xaxis.tick_top() ax2.xaxis.tick_top() ax3.xaxis.tick_top() ax4.xaxis.tick_top() ax0.set_title('Copy-Copy logic gate\n $EI = %.3f$ \n'% effective_information(Copy_Copy_noise), fontsize=20, pad=10) ax1.set_title('And-And logic gate\n $EI = %.3f$ \n'% effective_information(And_And_noise), fontsize=20, pad=10) ax2.set_title('Or-Or logic gate\n $EI = %.3f$ \n'% effective_information(Or_Or_noise), fontsize=20, pad=10) ax3.set_title('Or-Copy logic gate\n $EI = %.3f$ \n'% effective_information(Or_Copy_noise), fontsize=20, pad=10) ax4.set_title('Star-like logic gate\n $EI = %.3f$ \n'% effective_information(Star_noise), fontsize=20, pad=10) if save: plt.savefig(where_to_save_pngs+"Example2_LogicGates.png", bbox_inches='tight', dpi=425) plt.savefig(where_to_save_pdfs+"Example2_LogicGates.pdf", bbox_inches='tight') plt.show() ``` _______________________ ## 1.2 Random Matrices ``` rand0 = np.random.rand(4,4) rand0 = np.array([rand0[i]/sum(rand0[i]) for i in range(rand0.shape[0])]) rand1 = np.random.rand(4,4) rand1 = np.array([rand1[i]/sum(rand1[i]) for i in range(rand1.shape[0])]) rand2 = np.random.rand(4,4) rand2 = np.array([rand2[i]/sum(rand2[i]) for i in range(rand2.shape[0])]) rand3 = np.random.rand(4,4) rand3 = np.array([rand3[i]/sum(rand3[i]) for i in range(rand3.shape[0])]) rand4 = np.random.rand(4,4) rand4 = np.array([rand4[i]/sum(rand4[i]) for i in range(rand4.shape[0])]) ``` ### 1.2.1 Plot these TPMs, showing their $EI$ values ``` fig, (ax0, ax1, ax2, ax3, ax4) = plt.subplots(1, 5, figsize=(22,4)) c0 = ax0.pcolor( np.arange(-0.5, rand0.shape[0], 1), np.arange(-0.5, rand0.shape[0], 1), rand0, edgecolors='#999999', linewidths=3.0, cmap=EI_cmap) c1 = ax1.pcolor( np.arange(-0.5, rand1.shape[0], 1), np.arange(-0.5, rand1.shape[0], 1), rand1, edgecolors='#999999', linewidths=3.0, cmap=EI_cmap) c2 = ax2.pcolor( np.arange(-0.5, rand2.shape[0], 1), np.arange(-0.5, rand2.shape[0], 1), rand2, edgecolors='#999999', linewidths=3.0, cmap=EI_cmap) c3 = ax3.pcolor( np.arange(-0.5, rand3.shape[0], 1), np.arange(-0.5, rand3.shape[0], 1), rand3, edgecolors='#999999', linewidths=3.0, cmap=EI_cmap) c4 = ax4.pcolor( np.arange(-0.5, rand4.shape[0], 1), np.arange(-0.5, rand4.shape[0], 1), rand4, edgecolors='#999999', linewidths=3.0, cmap=EI_cmap) show_values(c0, ax=ax0, fmt="%.2f", fontsize=16) show_values(c1, ax=ax1, fmt="%.2f", fontsize=16) show_values(c2, ax=ax2, fmt="%.2f", fontsize=16) show_values(c3, ax=ax3, fmt="%.2f", fontsize=16) show_values(c4, ax=ax4, fmt="%.2f", fontsize=16) ax0.invert_yaxis() ax1.invert_yaxis() ax2.invert_yaxis() ax3.invert_yaxis() ax4.invert_yaxis() xlabs = ylabs = ['0|0','0|1', '1|0', '1|1'] ax0.set_xticks(np.arange(0, rand0.shape[0], 1)) ax0.set_yticks(np.arange(0, rand0.shape[1], 1)) ax0.set_xticklabels(xlabs, fontsize=14) ax0.set_yticklabels(ylabs, fontsize=14) ax0.set_xticks(np.arange(-0.5, rand0.shape[0]-0.5, 1), minor=True) ax0.set_yticks(np.arange(-0.5, rand0.shape[1]-0.5, 1), minor=True) ax1.set_xticks(np.arange(0, rand1.shape[0], 1)) ax1.set_yticks(np.arange(0, rand1.shape[1], 1)) ax1.set_xticklabels(xlabs, fontsize=14) ax1.set_yticklabels(ylabs, fontsize=14) ax1.set_xticks(np.arange(-0.5, rand1.shape[0]-0.5, 1), minor=True) ax1.set_yticks(np.arange(-0.5, rand1.shape[1]-0.5, 1), minor=True) ax2.set_xticks(np.arange(0, rand2.shape[0], 1)) ax2.set_yticks(np.arange(0, rand2.shape[1], 1)) ax2.set_xticklabels(xlabs, fontsize=14) ax2.set_yticklabels(ylabs, fontsize=14) ax2.set_xticks(np.arange(-0.5, rand2.shape[0]-0.5, 1), minor=True) ax2.set_yticks(np.arange(-0.5, rand2.shape[1]-0.5, 1), minor=True) ax3.set_xticks(np.arange(0, rand3.shape[0], 1)) ax3.set_yticks(np.arange(0, rand3.shape[1], 1)) ax3.set_xticklabels(xlabs, fontsize=14) ax3.set_yticklabels(ylabs, fontsize=14) ax3.set_xticks(np.arange(-0.5, rand3.shape[0]-0.5, 1), minor=True) ax3.set_yticks(np.arange(-0.5, rand3.shape[1]-0.5, 1), minor=True) ax4.set_xticks(np.arange(0, rand4.shape[0], 1)) ax4.set_yticks(np.arange(0, rand4.shape[1], 1)) ax4.set_xticklabels(xlabs, fontsize=14) ax4.set_yticklabels(ylabs, fontsize=14) ax4.set_xticks(np.arange(-0.5, rand4.shape[0]-0.5, 1), minor=True) ax4.set_yticks(np.arange(-0.5, rand4.shape[1]-0.5, 1), minor=True) ax0.xaxis.tick_top() ax1.xaxis.tick_top() ax2.xaxis.tick_top() ax3.xaxis.tick_top() ax4.xaxis.tick_top() ax0.set_title('Random TPM 0\n $EI = %.3f$ \n'% effective_information(rand0), fontsize=20, pad=10) ax1.set_title('Random TPM 1\n $EI = %.3f$ \n'% effective_information(rand1), fontsize=20, pad=10) ax2.set_title('Random TPM 2\n $EI = %.3f$ \n'% effective_information(rand2), fontsize=20, pad=10) ax3.set_title('Random TPM 3\n $EI = %.3f$ \n'% effective_information(rand3), fontsize=20, pad=10) ax4.set_title('Random TPM 4\n $EI = %.3f$ \n'% effective_information(rand4), fontsize=20, pad=10) if save: plt.savefig(where_to_save_pngs+"Example3_RandomTPMs.png", bbox_inches='tight', dpi=425) plt.savefig(where_to_save_pdfs+"Example3_RandomTPMs.pdf", bbox_inches='tight') plt.show() ``` ## 1.3 Example calculation figure (Supplemental Information, Figure 1) ``` ############ PLOTTING SETUP ############## from matplotlib import gridspec plt.rc('axes', linewidth=3) font = {'family': 'serif', 'color': 'k', 'weight': 'normal', 'size': 28} plt.rc('text', usetex=True) plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) ########################################## TPM = np.array([[0.0, 0.0, 0.0, 0.5, 0.5], [1/3, 0.0, 1/3, 1/3, 0.0], [0.0, 0.5, 0.0, 0.5, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0], [0.5, 0.0, 0.0, 0.5, 0.0]]) fig = plt.figure(figsize=(7.3, 16)) gs = gridspec.GridSpec(4, 1, height_ratios=[7, 10, 1.2, 1.2]) xlabs = ylabs = ['$A$', '$B$', '$C$', '$D$', '$E$'] ax0 = plt.subplot(gs[1]) ax0.set_xticks(np.arange(0, TPM.shape[0], 1)) ax0.set_yticks(np.arange(0, TPM.shape[0], 1)) ax0.set_xticklabels(xlabs, fontsize=32) ax0.set_yticklabels(ylabs, fontsize=32) ax0.set_xticks(np.arange(-0.5, TPM.shape[0]-0.5, 1), minor=True) ax0.set_yticks(np.arange(-0.5, TPM.shape[0]-0.5, 1), minor=True) ax0.tick_params(axis='y', which='major', pad=7) c0 = plt.pcolor( np.arange(-.5, TPM.shape[0], 1), np.arange(-.5, TPM.shape[1], 1), TPM, edgecolors='k', linewidths=3.0, cmap='Blues', vmin=-.05, vmax=1.2) show_values(c0, ax=ax0, fmt="%.2f", fontsize=26) ax0.invert_yaxis() ax0.xaxis.set_label_position("top") ax0.xaxis.tick_top() ax0.set_xlabel(r'$t + 1$', size=28, labelpad=8.0) ax0.set_ylabel(r'$t$', size=28, rotation=0, labelpad=27.0) ax0.xaxis.label.set_position((0.5,5.0)) ax0.text(4.65, 0.20, '$=W_{A}^{out}$', ha='left', rotation=0, wrap=True, size=32) ax0.text(4.65, 1.20, '$=W_{B}^{out}$', ha='left', rotation=0, wrap=True, size=32) ax0.text(4.65, 2.20, '$=W_{C}^{out}$', ha='left', rotation=0, wrap=True, size=32) ax0.text(4.65, 3.20, '$=W_{D}^{out}$', ha='left', rotation=0, wrap=True, size=32) ax0.text(4.65, 4.20, '$=W_{E}^{out}$', ha='left', rotation=0, wrap=True, size=32) ms = 78 ax1 = plt.subplot(gs[2]) Win_j = TPM.sum(axis=0).reshape(1,TPM.shape[0]) Win = W_in(TPM).reshape(1,TPM.shape[0]) c1 = plt.pcolor( np.arange(-.5, Win_j.shape[1], 1), np.arange(0.0, 1.5, 1), Win_j, edgecolors='k', linewidths=3.0, cmap='Oranges', vmin=0, vmax=3.0) show_values(c1, ax=ax1, fmt="%.2f", fontsize=26) ax1.set_xlabel("") ax1.set_ylabel("") ax1.set_xticks([]) ax1.set_yticks([]) ax1.set_xticklabels(['']) ax1.set_yticklabels(['']) ax2 = plt.subplot(gs[3]) c2 = plt.pcolor( np.arange(-.5, Win.shape[1], 1), np.arange(0.0, 1.5, 1), Win, edgecolors='k', linewidths=3.0, cmap='Oranges', vmin=0, vmax=0.75) show_values(c2, ax=ax2, fmt="%.2f", fontsize=26) ax2.set_xlabel("") ax2.set_ylabel("") ax2.set_xticks([]) ax2.set_yticks([]) ax2.set_xticklabels(['']) ax2.set_yticklabels(['']) string10 = r'$= \displaystyle\sum_{i=1}^N w_{ij}$' string20 = r'$= \langle W_{i}^{out} \rangle$' ax1.text(4.65, -.15, string10, ha='left', rotation=0, wrap=True, size=28) ax2.text(4.65, 0.15, string20, ha='left', rotation=0, wrap=True, size=32) plt.subplots_adjust(wspace=0, hspace=0.05) if save: plt.savefig(where_to_save_pngs+"Example4_ExampleTPM.png", bbox_inches='tight', dpi=425) plt.savefig(where_to_save_pdfs+"Example4_ExampleTPM.pdf", bbox_inches='tight') plt.show() ``` The adjacency matrix of a network with 1.158 bits of effective information. The rows correspond to $W^{out}_{i}$, a vector of probabilities that a random walker on node $v_i$ at time $t$ transitions to $v_j$ in the following time step, $t+1$. $\langle W^{out}_{i}\rangle$ represents the (normalized) input weight distribution of the network, that is, the probabilities that a random walker will arrive at a given node $v_j$ at $t+1$, after a uniform introduction of random walkers into the network at $t$. ``` Win = W_in(TPM) vals = Win Win_cols = plt.cm.Oranges(Win+0.15) WoutA_cols = plt.cm.Blues(max(TPM[0])) WoutB_cols = plt.cm.Blues(max(TPM[1])) WoutC_cols = plt.cm.Blues(max(TPM[2])) WoutD_cols = plt.cm.Blues(max(TPM[3])*0.66) WoutE_cols = plt.cm.Blues(max(TPM[4])) tpm0 = TPM Gtpm0 = check_network(tpm0) fig, ((ax00, ax01), (ax02, ax03), (ax04, ax05)) = plt.subplots( 3, 2, figsize=(16*1.3,9*1.3)) plt.subplots_adjust(left=None, bottom=0.1, right=None, top=None, wspace=0.17, hspace=0.3) ax00.bar( np.linspace(0.0, 5.5, 5), TPM[0]+0.01, color=WoutA_cols, linewidth=2.0, edgecolor='k', width=0.6, label=r'$W_{A}^{out}$') x_wij = np.linspace(0.0, 5.5, 5) y_wij = tpm0[0] for i in range(len(x_wij)): ax00.text(x_wij[i], y_wij[i]+0.040, "%.2f"%y_wij[i], ha='center', fontsize=21, color='#262626') ax00.text(x_wij[i], y_wij[i]+0.040, "%.2f"%y_wij[i], ha='center', fontsize=21, color='royalblue', alpha=0.6) ax00.bar( np.linspace(-.6, 4.9, 5), vals, color=Win_cols, linewidth=2.0, edgecolor='k', width=0.6, label=r'$\langle W_{i}^{out} \rangle$') x_wij = np.linspace(-.6, 4.9, 5) y_wij = vals for i in range(len(x_wij)): ax00.text(x_wij[i], y_wij[i]+0.035, "%.2f"%y_wij[i], ha='center', fontsize=21, color='#262626') ax00.text(x_wij[i], y_wij[i]+0.035, "%.2f"%y_wij[i], ha='center', fontsize=21, color='#e56c13', alpha=0.9) ax00.set_ylim(0.0,1.19) ax00.set_xlim(-0.95,5.85) ax00.set_xticks(np.linspace(-.3, 5.2, 5)) ax00.set_yticks(np.linspace(0, 1, 6)) ax00.set_yticklabels(np.round(np.linspace(0, 1, 6), 2), size=16) ax00.set_xticklabels(xlabs, size=22) ax00.set_ylabel(r'$w_{ij}$', fontsize=30, rotation='horizontal', labelpad=25) ax00.set_xlabel('out-neighbor', fontsize=20, labelpad=0) ax00.set_axisbelow(True) ax00.grid(which='major', linestyle='-', color='#999999', linewidth=2.5, alpha=0.3) ax00.legend(loc=2, fontsize=22, framealpha=0.7) strax1 = r'$D_{KL}[W^{out}_{A}||\langle W_{i}^{out} \rangle] = %.3f $'%\ effect_information_i(Gtpm0, node_i=0) ax00.text(3.12, 0.802, strax1, ha='center', fontsize=28, color='k') ax02.bar( np.linspace(0.0, 5.5, 5), tpm0[1]+0.01, color=WoutB_cols, linewidth=2.0, edgecolor='k', width=0.6, label=r'$W_{B}^{out}$') x_wij = np.linspace(0.0, 5.5, 5) y_wij = tpm0[1] for i in range(len(x_wij)): ax02.text(x_wij[i], y_wij[i]+0.040, "%.2f"%y_wij[i], ha='center', fontsize=21, color='#262626') ax02.text(x_wij[i], y_wij[i]+0.040, "%.2f"%y_wij[i], ha='center', fontsize=21, color='royalblue', alpha=0.6) ax02.set_xticklabels(["A", "B", "C", "D", "E"]) ax02.bar( np.linspace(-.6, 4.9, 5), vals, color=Win_cols, linewidth=2.0, edgecolor='k', width=0.6, label=r'$\langle W_{i}^{out} \rangle$') x_wij = np.linspace(-.6, 4.9, 5) y_wij = vals for i in range(len(x_wij)): ax02.text(x_wij[i], y_wij[i]+0.035, "%.2f"%y_wij[i], ha='center', fontsize=21, color='#262626') ax02.text(x_wij[i], y_wij[i]+0.035, "%.2f"%y_wij[i], ha='center', fontsize=21, color='#e56c13', alpha=0.9) ax02.set_ylim(0.0,1.19) ax02.set_xlim(-0.95,5.85) ax02.set_xticks(np.linspace(-.3, 5.2, 5)) ax02.set_yticks(np.linspace(0, 1, 6)) ax02.set_yticklabels(np.round(np.linspace(0, 1, 6), 2), size=16) ax02.set_xticklabels(xlabs, size=22) ax02.set_ylabel(r'$w_{ij}$', fontsize=30, rotation='horizontal', labelpad=25) ax02.set_xlabel('out-neighbor', fontsize=20, labelpad=0) ax02.set_axisbelow(True) ax02.grid(which='major', linestyle='-', color='#999999', linewidth=2.5, alpha=0.3) ax02.legend(loc=2, fontsize=22, framealpha=0.7) strax1 = r'$D_{KL}[W^{out}_{B}||\langle W_{i}^{out} \rangle] = %.3f $'%\ effect_information_i(Gtpm0, node_i=1) ax02.text(3.12, 0.802, strax1, ha='center', fontsize=28, color='k') ax04.bar( np.linspace(0.0, 5.5, 5), tpm0[2]+0.01, color=WoutC_cols, linewidth=2.0, edgecolor='k', width=0.6, label=r'$W_{C}^{out}$') x_wij = np.linspace(0.0, 5.5, 5) y_wij = tpm0[2] for i in range(len(x_wij)): ax04.text(x_wij[i], y_wij[i]+0.040, "%.2f"%y_wij[i], ha='center', fontsize=21, color='#262626') ax04.text(x_wij[i], y_wij[i]+0.040, "%.2f"%y_wij[i], ha='center', fontsize=21, color='royalblue', alpha=0.6) ax04.set_xticklabels(["A", "B", "C", "D", "E"]) ax04.bar( np.linspace(-.6, 4.9, 5), vals, color=Win_cols, linewidth=2.0, edgecolor='k', width=0.6, label=r'$\langle W_{i}^{out} \rangle$') x_wij = np.linspace(-.6, 4.9, 5) y_wij = vals for i in range(len(x_wij)): ax04.text(x_wij[i], y_wij[i]+0.035, "%.2f"%y_wij[i], ha='center', fontsize=21, color='#262626') ax04.text(x_wij[i], y_wij[i]+0.035, "%.2f"%y_wij[i], ha='center', fontsize=21, color='#e56c13', alpha=0.9) ax04.set_ylim(0.0,1.19) ax04.set_xlim(-0.95,5.85) ax04.set_xticks(np.linspace(-.3, 5.2, 5)) ax04.set_yticks(np.linspace(0, 1, 6)) ax04.set_yticklabels(np.round(np.linspace(0, 1, 6), 2), size=16) ax04.set_xticklabels(xlabs, size=22) ax04.set_ylabel(r'$w_{ij}$', fontsize=30, rotation='horizontal', labelpad=25) ax04.set_xlabel('out-neighbor', fontsize=20, labelpad=0) ax04.set_axisbelow(True) ax04.grid(which='major', linestyle='-', color='#999999', linewidth=2.5, alpha=0.3) ax04.legend(loc=2, fontsize=22, framealpha=0.7) strax1 = r'$D_{KL}[W^{out}_{C}||\langle W_{i}^{out} \rangle] = %.3f $'%\ effect_information_i(Gtpm0, node_i=2) ax04.text(3.12, 0.802, strax1, ha='center', fontsize=28, color='k') ax01.bar( np.linspace(0.0, 5.5, 5), tpm0[3]+0.01, color=WoutD_cols, linewidth=2.0, edgecolor='k', width=0.6, label=r'$W_{D}^{out}$') x_wij = np.linspace(0.0, 5.5, 5) y_wij = tpm0[3] for i in range(len(x_wij)): ax01.text(x_wij[i], y_wij[i]+0.040, "%.2f"%y_wij[i], ha='center', fontsize=21, color='#262626') ax01.text(x_wij[i], y_wij[i]+0.040, "%.2f"%y_wij[i], ha='center', fontsize=21, color='royalblue', alpha=0.6) ax01.set_xticklabels(["A", "B", "C", "D", "E"]) ax01.bar( np.linspace(-.6, 4.9, 5), vals, color=Win_cols, linewidth=2.0, edgecolor='k', width=0.6, label=r'$\langle W_{i}^{out} \rangle$') x_wij = np.linspace(-.6, 4.9, 5) y_wij = vals for i in range(len(x_wij)): ax01.text(x_wij[i], y_wij[i]+0.035, "%.2f"%y_wij[i], ha='center', fontsize=21, color='#262626') ax01.text(x_wij[i], y_wij[i]+0.035, "%.2f"%y_wij[i], ha='center', fontsize=21, color='#e56c13', alpha=0.9) ax01.set_ylim(0.0,1.19) ax01.set_xlim(-0.95,5.85) ax01.set_xticks(np.linspace(-.3, 5.2, 5)) ax01.set_yticks(np.linspace(0, 1, 6)) ax01.set_yticklabels(np.round(np.linspace(0, 1, 6), 2), size=16) ax01.set_xticklabels(xlabs, size=22) ax01.set_ylabel(r'$w_{ij}$', fontsize=30, rotation='horizontal', labelpad=25) ax01.set_xlabel('out-neighbor', fontsize=20, labelpad=0) ax01.set_axisbelow(True) ax01.grid(which='major', linestyle='-', color='#999999', linewidth=2.5, alpha=0.3) ax01.legend(loc=2, fontsize=22, framealpha=0.7) strax1 = r'$D_{KL}[W^{out}_{D}||\langle W_{i}^{out} \rangle] = %.3f $'%\ effect_information_i(Gtpm0, node_i=3) ax01.text(3.12, 0.802, strax1, ha='center', fontsize=28, color='k') ax03.bar( np.linspace(0.0, 5.5, 5), tpm0[4]+0.01, color=WoutE_cols, linewidth=2.0, edgecolor='k', width=0.6, label=r'$W_{E}^{out}$') x_wij = np.linspace(0.0, 5.5, 5) y_wij = tpm0[4] for i in range(len(x_wij)): ax03.text(x_wij[i], y_wij[i]+0.040, "%.2f"%y_wij[i], ha='center', fontsize=21, color='#262626') ax03.text(x_wij[i], y_wij[i]+0.040, "%.2f"%y_wij[i], ha='center', fontsize=21, color='royalblue', alpha=0.6) ax03.set_xticklabels(["A", "B", "C", "D", "E"]) ax03.bar( np.linspace(-.6, 4.9, 5), vals, color=Win_cols, linewidth=2.0, edgecolor='k', width=0.6, label=r'$\langle W_{i}^{out} \rangle$') x_wij = np.linspace(-.6, 4.9, 5) y_wij = vals for i in range(len(x_wij)): ax03.text(x_wij[i], y_wij[i]+0.035, "%.2f"%y_wij[i], ha='center', fontsize=21, color='#262626') ax03.text(x_wij[i], y_wij[i]+0.035, "%.2f"%y_wij[i], ha='center', fontsize=21, color='#e56c13', alpha=0.9) ax03.set_ylim(0.0,1.19) ax03.set_xlim(-0.95,5.85) ax03.set_xticks(np.linspace(-.3, 5.2, 5)) ax03.set_yticks(np.linspace(0, 1, 6)) ax03.set_yticklabels(np.round(np.linspace(0, 1, 6), 2), size=16) ax03.set_xticklabels(xlabs, size=22) ax03.set_ylabel(r'$w_{ij}$', fontsize=30, rotation='horizontal', labelpad=25) ax03.set_xlabel('out-neighbor', fontsize=20, labelpad=0) ax03.set_axisbelow(True) ax03.grid(which='major', linestyle='-', color='#999999', linewidth=2.5, alpha=0.3) ax03.legend(loc=2,fontsize=22, framealpha=0.7) strax1 = r'$D_{KL}[W^{out}_{E}||\langle W_{i}^{out} \rangle] = %.3f $'%\ effect_information_i(Gtpm0, node_i=4) ax03.text(3.12, 0.802, strax1, ha='center', fontsize=28, color='k') string1 = r'$EI = \displaystyle\frac{1}{N}$'+\ r'$\displaystyle\sum_{i=1}^N D_{KL}[W_{i}^{out}||$'+\ r'$\langle W_{i}^{out} \rangle]$' string2 = r'$EI = \displaystyle\frac{1}{5}$'+\ r'$\hspace{0.5cm} [0.592 + 1.061 + 1.385 + 1.737 + 1.016]$' string3 = r'$EI = 1.158 \hspace{0.5cm}$' r'$\rm bits$' ax05.text(-.02, 0.590, string1, ha='left', rotation=0, wrap=False, size=28) ax05.text(-.02, 0.250, string2, ha='left', rotation=0, wrap=False, size=28) ax05.text(-.02, -0.01, string3, ha='left', rotation=0, wrap=False, size=28) ax05.axis('off') if save: plt.savefig(where_to_save_pngs+\ "Example4_ExampleCalculation.png", bbox_inches='tight', dpi=425) plt.savefig(where_to_save_pdfs+\ "Example4_ExampleCalculation.pdf", bbox_inches='tight') plt.show() ``` Each node's contribution to the $EI$ ($EI_i$) is the KL divergence of its $W^{out}_{i}$ vector from the network's $\langle W^{out}_{i}\rangle$, known as the *effect information*. $$ EI = \dfrac{1}{N} \displaystyle\sum_{i=1}^N \text{D}_{_{KL}}[W^{out}_{i} || \langle W^{out}_{i} \rangle] $$ where $EI$ is the average of the *effect information*, $EI_i$, of each node. This is equivalent to our derivation of $EI$ from first principles above since $$ \begin{align} EI &= \dfrac{1}{N} \displaystyle\sum_{i=1}^N \text{D}_{_{KL}}[W^{out}_{i} || {\langle W^{out}_{i}\rangle}]\\ &= \dfrac{1}{N} \displaystyle\sum_{i=1}^{N} \displaystyle\sum_{j=1}^{N} w_{ij}\log_2\bigg(\dfrac{w_{ij}}{W_{j}}\bigg)\\ &= \dfrac{1}{N} \displaystyle\sum_{i=1}^{N}\bigg( \displaystyle\sum_{j=1}^{N} w_{ij}\log_2(w_{ij}) - \sum_{j=1}^{N} w_{ij}\log_2(W_{j})\bigg)\\ &= \dfrac{1}{N} \displaystyle\sum_{i=1}^{N} \displaystyle\sum_{j=1}^{N} w_{ij}\log_2\big(w_{ij}\big) - \dfrac{1}{N} \displaystyle\sum_{i=1}^{N} \displaystyle\sum_{j=1}^{N} w_{ij}\log_2\big(W_{j}\big) \end{align} $$ - Note that for a given node, $v_i$, the term in the first summation above, $\sum_{j=1}^{N} w_{ij}\log_2\big(w_{ij}\big)$, is equivalent to the negative entropy of the out-weights from $v_i$, $-H(W_i^{out})$. Also note that $W_j$, the *j*th element in the $\langle W^{out}_{i}\rangle$ vector, is the normalized sum of the incoming weights to $v_j$ from its neighbors, $v_i$, such that $W_j=\frac{1}{N} \sum_{i=1}^N w_{ij}$. We substitute these two terms into the equation above such that: $$ EI = \dfrac{1}{N} \sum_{i=1}^{N}-H(W_i^{out}) - \sum_{j=1}^{N} W_j\log_2\big(W_{j}\big) $$ This is equivalent to the formulation of $EI$ above, since $H(\langle W^{out}_{i}\rangle) = -\sum_{j=1}^{N} W_j\log_2(W_{j})$: $$ EI = H(\langle W^{out}_{i}\rangle) -\langle H(W_i^{out}) \rangle $$ In this figure, we adopt the relative entropy formulation of $EI$ for ease of derivation. ___________________ ## 1.4 Network motifs and effective information (N = 3) ``` G01 = nx.DiGraph() G02 = nx.DiGraph() G03 = nx.DiGraph() G04 = nx.DiGraph() G05 = nx.DiGraph() G06 = nx.DiGraph() G07 = nx.DiGraph() G08 = nx.DiGraph() G09 = nx.DiGraph() G10 = nx.DiGraph() G11 = nx.DiGraph() G12 = nx.DiGraph() G13 = nx.DiGraph() G01.add_nodes_from([0,1,2]) G02.add_nodes_from([0,1,2]) G03.add_nodes_from([0,1,2]) G04.add_nodes_from([0,1,2]) G05.add_nodes_from([0,1,2]) G06.add_nodes_from([0,1,2]) G07.add_nodes_from([0,1,2]) G08.add_nodes_from([0,1,2]) G09.add_nodes_from([0,1,2]) G10.add_nodes_from([0,1,2]) G11.add_nodes_from([0,1,2]) G12.add_nodes_from([0,1,2]) G13.add_nodes_from([0,1,2]) G01.add_edges_from([(0,1),(0,2)]) G02.add_edges_from([(0,1),(2,0)]) G03.add_edges_from([(0,1),(0,2),(2,0)]) G04.add_edges_from([(1,0),(2,0)]) G05.add_edges_from([(1,0),(1,2),(0,2)]) # e. coli G06.add_edges_from([(1,0),(1,2),(0,2),(0,1)]) G07.add_edges_from([(1,0),(0,2),(2,0)]) G08.add_edges_from([(1,0),(0,1),(0,2),(2,0)]) G09.add_edges_from([(1,0),(0,2),(2,1)]) G10.add_edges_from([(1,0),(2,0),(1,2),(0,1)]) G11.add_edges_from([(1,0),(2,0),(2,1),(0,1)]) G12.add_edges_from([(0,1),(1,0),(1,2),(2,1),(0,2)]) G13.add_edges_from([(0,1),(1,0),(1,2),(2,1),(0,2),(2,0)]) motif_dict = {"Motif 01": {"G":G01, "edges":str(list(G01.edges())), "EI":effective_information(G01)}, "Motif 02": {"G":G02, "edges":str(list(G02.edges())), "EI":effective_information(G02)}, "Motif 03": {"G":G03, "edges":str(list(G03.edges())), "EI":effective_information(G03)}, "Motif 04": {"G":G04, "edges":str(list(G04.edges())), "EI":effective_information(G04)}, "Motif 05": {"G":G05, "edges":str(list(G05.edges())), "EI":effective_information(G05)}, "Motif 06": {"G":G06, "edges":str(list(G06.edges())), "EI":effective_information(G06)}, "Motif 07": {"G":G07, "edges":str(list(G07.edges())), "EI":effective_information(G07)}, "Motif 08": {"G":G08, "edges":str(list(G08.edges())), "EI":effective_information(G08)}, "Motif 09": {"G":G09, "edges":str(list(G09.edges())), "EI":effective_information(G09)}, "Motif 10": {"G":G10, "edges":str(list(G10.edges())), "EI":effective_information(G10)}, "Motif 11": {"G":G11, "edges":str(list(G11.edges())), "EI":effective_information(G11)}, "Motif 12": {"G":G12, "edges":str(list(G12.edges())), "EI":effective_information(G12)}, "Motif 13": {"G":G13, "edges":str(list(G13.edges())), "EI":effective_information(G13)}} ei_heights = np.array([list(motif_dict.values())[i]['EI'] for i in range(len(list(motif_dict.values())))]) + 0.005 ei_bars = np.array(range(len(list(motif_dict.values())))) colors = ["#486164","#9094c9","#ab4e53","#fa8d11","#74d76c", "#bc7dc6","#db453b","#cad24b","#8f52d2","#00aaff", "#c2843a","#4f5435","#d05185"] bar_labels = list(motif_dict.keys()) import numpy as np import networkx as nx from matplotlib import gridspec colors = ["#45af9c","#5b91cb","#9f8448","#bf6d8c","#9876c0","#bb6eac","#5ea05c", "#cf5c57","#c24864","#c69932","#d3468f","#ce5c2f","#6b6cd9","#78b43d","#ba58c2"] i = 10 ns = 250 ew = 3.5 nc = 'w' ec = '#333333' oc = '#e4c600' nc_o = '#333333' fig, ax = plt.subplots(1,1,figsize=(17,8)) plt.subplots_adjust(wspace=0.10, hspace=0.1) plt.rc('axes', axisbelow=True) plt.rc('axes', linewidth=1.5) gs = gridspec.GridSpec(2, len(ei_bars), height_ratios=[7,1.7]) ax0 = plt.subplot(gs[0, :]) cols_i = 'grey' ax0.bar(ei_bars, ei_heights, color=colors, width=0.75, edgecolor='#333333', linewidth=3, alpha=1) ax0.set_xlim(min(ei_bars)-0.5,max(ei_bars)+0.5) ax0.set_xticks(ei_bars) ax0.set_xticklabels([""]*13) ax0.set_yticklabels(np.round(np.linspace(0,1.6,num=9),2), size=18) ax0.set_ylim(0, max(ei_heights)+0.04) ax0.set_xlim(-0.5,12.5) ax0.set_ylabel("$EI$", size=28) ax0.grid(linestyle='-', color='#999999', linewidth=2.5, alpha=0.35) for q, Q in enumerate(list(motif_dict.values())): g = Q['G'] ax0 = plt.subplot(gs[-1, q]) pos = nx.circular_layout(g) nx.draw_networkx_nodes(g, pos, node_size=ns, node_color=colors[q], linewidths=3, edgecolors="#333333", ax=ax0) nx.draw_networkx_edges(g, pos, width=ew*0.9, edge_color="#3F3F3F",#colors[q], arrowsize=19, alpha=1, ax=ax0) ax0.set_axis_off() posy = np.array(list(zip(*list(pos.values())))[1]) posx = np.array(list(zip(*list(pos.values())))[0]) ax0.set_ylim(min(posy)*1.35, max(posy)*1.5) ax0.set_xlim(min(posx)*1.69, max(posx)*1.6) title = list(motif_dict.keys())[q] ax0.set_title(title, fontsize=16, pad=-0.35) if save: plt.savefig("../figs/pngs/EffectiveInformation_NetworkMotifs.png", bbox_inches='tight', dpi=425) plt.savefig("../figs/pdfs/EffectiveInformation_NetworkMotifs.pdf", bbox_inches='tight', dpi=425) plt.show() ``` ## End of Chapter 01. In [Chapter 02](https://nbviewer.jupyter.org/github/jkbren/einet/blob/master/code/Chapter%2002%20-%20Network%20Size%20and%20Effective%20Information.ipynb), we will look at the $EI$ of common networks _______________ ### References: - __[Hoel, E. P. (2017). When the Map Is Better Than the Territory. Entropy, 19(5), 188. doi: 10.3390/e19050188](http://www.mdpi.com/1099-4300/19/5/188)__ - __[Hoel, E. P., Albantakis, L., & Tononi, G. (2013). Quantifying causal emergence shows that macro can beat micro. Proceedings of the National Academy of Sciences, 110(49), 19790–5. doi: 10.1073/pnas.1314922110](http://www.pnas.org/content/110/49/19790)__ - __[Tononi, G. (2001). Information measures for conscious experience. Archives Italiennes de Biologie,139(4), 367–371. doi: 10.4449/aib.v139i4.51](https://www.ncbi.nlm.nih.gov/pubmed/11603079)__ ______________________
github_jupyter
# Source Code ### Libraries :- **Selenium Driver** ``` from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import Select from selenium.common.exceptions import NoSuchElementException ``` **Speech** ``` import pyttsx3 #It works offline from gtts import gTTS import speech_recognition as sr ``` **Windows & Processes** ``` import subprocess import calendar import datetime import winshell import os import time import datetime import win32com.client as wincl from playsound import playsound from ecapture import ecapture as ec ``` **API's** ``` import wolframalpha import wikipedia import webbrowser import pyjokes from translate import Translator from newsapi import NewsApiClient from twilio.rest import Client from clint.textui import progress ``` **Web Browser** ``` import urllib import json import feedparser import smtplib import requests from urllib.request import urlopen from bs4 import BeautifulSoup ``` **Miscellaneous** ``` import tkinter import random import operator import ctypes import shutil import warnings ``` Setting up the Speech Engine ``` engine = pyttsx3.init('sapi5') voices = engine.getProperty('voices') engine.setProperty('voice', voices[0].id) rate = engine.getProperty('rate') # getting details of current speaking rate print (rate) #printing current voice rate engine.setProperty('rate', 180) # setting up new voice rate ``` ### Functions :- **wake()** is used to initiate the program. If mike detects **None** then this function returns False or vice-versa. ``` def wake(): r = sr.Recognizer() with sr.Microphone() as source: #print("Listening...") #playsound(r'C:\Users\LENOVO\Desktop\coding ninja\Notifications\Pixel Sounds\Trill (online-audio-converter.com).mp3') r.pause_threshold = 1 print("Listening...") audio = r.listen(source,timeout=10) try: print("Recognizing...") query = r.recognize_google(audio, language ='en-in') print(f"User said: {query}\n") except Exception as e: print(e) print("Unable to Recognizing your voice.") return False if query != "None": playsound(r'HERE PLACE YOUR ADDRESS OF MUSIC WHICH YOU WANT TO PLAY WHEN THE ASSISTANT STARTS LISTENING .mp3') return True ``` **speak()** uses pyttsx3 speech engine to generate audible voices. Which is actually a string converted into audio. ``` def speak(audio): engine.say(audio) engine.runAndWait() ``` **wishMe()** analyze current time and then send greetings to **speak()** ``` def wishMe(): hour = int(datetime.datetime.now().hour) if hour>= 0 and hour<12: speak("Good Morning Sir !") elif hour>= 12 and hour<18: speak("Good Afternoon Sir !") else: speak("Good Evening Sir !") #assname =("Angularstone, 1 point o") #speak("I am your Voice Assistant") #speak(assname) ``` **takeCommand()** uses Recognizer object to detect voice through microphone. ``` def takeCommand(): r = sr.Recognizer() with sr.Microphone() as source: print("Listening...") r.pause_threshold = 1 audio = r.listen(source,timeout=10) try: print("Recognizing...") query = r.recognize_google(audio, language ='en-in') print(f"User said: {query}\n") except Exception as e: print(e) print("Unable to Recognizing your voice.") return "None" return query ``` **sendEmail()** takes two parameters *email* and *content* then create **SMTP** connection using port **587** ``` def sendEmail(to, content): import credentials server = smtplib.SMTP('smtp.gmail.com', 587) server.ehlo() server.starttls() # Enable low security in gmail server.login(credentials.email(), credentials.password()) server.sendmail(credentials.email(), to, content) server.close() ``` **getDate()** returns the current date in a string:- ``` def getDate(): now = datetime.datetime.now() my_date = datetime.datetime.today() weekday = calendar.day_name[my_date.weekday()] monthNum = now.month dayNum = now.day # List of Months month_names = ['January','February','March','APril','May','June','July','August','September','October','November','December'] ordinalNumbers = ['1st','2nd','3rd','4th','5th','6th','7th','8th','9th','10th','11th','12th','13th','14th','15th','16th', '17th','18th','19th','20th','21st','22nd','23rd','24th','25th','26th','27th','28th','29th','30th','31st'] return 'Today is ' + weekday + ' ' + month_names[monthNum-1] + ' the ' + ordinalNumbers[dayNum-1] + '.' ``` **searchOnNet()** takes one argument and perform google search. When results load then this function asks user for **brief** or **detailed information** If user's command is for brief then it simply read brief info. If user asks for detailed information then it read the whole webpage. After completing the above process, this function asks user for another search if user says **yes** then it perform recursion and repeat the above process else, exit. ``` def searchOnNet(text): driver = webdriver.Chrome(executable_path = 'C:/Softwares/selenium webdriver/chromedriver') driver.get('https://www.google.com/') time.sleep(3) search_box = driver.find_element_by_name('q') search_box.send_keys(text) search_box.submit() time.sleep(2) speak('Do you want to listen in detail about '+text+'?') if takeCommand() == 'no': speak(driver.find_element_by_xpath('//div[@class="IsZvec"]').text) else: driver.get(driver.find_element_by_xpath('//div[@class="yuRUbf"]/a').get_attribute('href')) time.sleep(2) for i in driver.find_elements_by_tag_name('p'): speak(i.text) if takeCommand().lower() in ["stop","stop speaking","exit","top"]: break speak('Do you want to search again on google ?') Data = takeCommand() time.sleep(1) if Data.lower() == 'yes': time.sleep(1) speak('What do you want to search for .') data = takeCommand() searchOnNet(data) else: speak('Okay Bittu Boss, thank You for using me !') driver.close() ``` **listening_algorithm()** takes two arguments **to** and **from**. It returns the most probable string from list of strings contained in **from_** which is similar to **to**. ``` def listening_algorithm(to,from_): counts = [] to = to.split(' ') for i in range(len(from_)): count = 0 var = from_[i].split(' ') n = min(len(to),len(var)) for j in range(n): if var[j] in to[j]: count += 1 counts.append(count) maxm_ = max(counts) for i in range(len(counts)): if maxm_ == counts[i]: return from_[i] ``` **youtube()** takes text as arguement and open youtube.com, locate textbox and submit button. Then search text and read the first ten search results and print it on display. This function asks user to play which song from the given playlist. Then send text and playlist to **listening_algorithm()** and from the returned value play the song. It can also perform **stop**, **next**, **skip_Ad** function on youtube. ``` def youtube(text): count = 0 search_results = [] driver = webdriver.Chrome(executable_path = 'C:/Softwares/selenium webdriver/chromedriver') driver.get('https://www.youtube.com/') time.sleep(3) search_box = driver.find_element_by_name('search_query') search_box.send_keys(text) search_box.submit() time.sleep(2) speak(' Following are the first ten search results :-') video_names = [] for i in driver.find_elements_by_id('video-title'): speak(i.text) time.sleep(1) video_names.append(i.text.lower()) count += 1 if count == 10: break for i in driver.find_elements_by_xpath('//a[@class="yt-simple-endpoint style-scope ytd-video-renderer"]'): search_results.append(i.get_attribute('href')) print(video_names) speak('Which video do you want me to play ?') video_query = takeCommand().lower() video_query = listening_algorithm(video_query,video_names) while True: if video_query in video_names: break else: speak('Sorry, I did not hear. Come again please !') video_query = takeCommand().lower() video_query = listening_algorithm(video_query,video_names) for i in range(len(video_names)): if video_query in video_names[i]: #add = '1' #time_ = [] #for j in driver.find_elements_by_xpath('//span[@class="style-scope ytd-thumbnail-overlay-time-status-renderer"]'): #if j.text == '': #pass #else: #time_.append(j.text) #actual_time = time_[i] #for k in range(len(actual_time)): #if actual_time[k]==':': #for l in range(k+1,len(actual_time)): #add += '0' #print(int(actual_time.replace(':',''))/int(add)) driver.get(search_results[i]) time.sleep(1) #time.sleep(int(actual_time.replace(':',''))/int(add)*60) while True: try: skip_ad = driver.find_element_by_xpath('//div[@class="ytp-ad-text ytp-ad-skip-button-text"]') try: skip_ad.click() except ElementNotInteractableException as exception: pass except NoSuchElementException as exception: pass command = takeCommand().lower() if command in ["stop","top","pause"] : try: pause_play = driver.find_element_by_xpath('//button[@class="ytp-play-button ytp-button"]') pause_play.click() except NoSuchElementException as exception: pass speak(' What happens sir ? Do you want me to play the next song ?') reply2 = takeCommand().lower() while True: if reply2 != 'None': break else: speak(" Sorry I didn't listened. Come again please ! ") reply2 = takeCommand().lower() next_loop = driver.find_element_by_xpath('//a[@class="ytp-next-button ytp-button"]') next_loop.click() if reply2 == 'yes': try: next_loop = driver.find_element_by_xpath('//a[@class="ytp-next-button ytp-button"]') next_loop.click() except NoSuchElementException as exception: pass time.sleep(3) else: speak('Do you want me to play some other videos ?') reply = takeCommand().lower() while True: if reply != 'None': break else: speak(" Sorry I didn't listened. Come again please ! ") reply = takeCommand().lower() if reply == 'yes': speak('Say it Brother ?') sq = takeCommand() search_box = driver.find_element_by_name('search_query') search_box.clear() search_box.send_keys(sq) search_box.submit() time.sleep(2) driver.find_element_by_id('video-title').click() #add = '1' #time_ = '' #for i in driver.find_elements_by_xpath('//span[@class="style-scope ytd-thumbnail-overlay-time-status-renderer"]'): #if i.text == '': #pass #else: #time_ = i.text #break #print(time_) #for i in range(len(time_)): #if time_[i]==':': #for j in range(i+1,len(time_)): #add += '0' #print(int(time_.replace(':',''))/int(add)) #time.sleep(int(time_.replace(':',''))/int(add)*60+3) else: pause_play.click() elif 'exit' in command: break driver.close() ``` **time_()** returns current time in a string ``` def time_(): response = '' now = datetime.datetime.now() meridiem = '' if now.hour >=12: meridiem = 'p.m' hour = now.hour -12 else: meridiem = 'a.m' hour = now.hour # convert minute into a proper string if now.minute < 10: minute = '0' + str(now.minute) else: minute = str(now.minute) response = response + ' ' + 'It is ' + str(hour) + ':' + minute + ' ' + meridiem + '.' return response ``` **news()** asks user **country** name to read news ``` def news(): import news_api_key country_code = {'argentina':'ar','australia':'au','austria':'at','Belgium':'be','brazil':'br','bulgaria':'bg', 'canada':'ca','china':'cn','colombia':'co','cuba':'cu','czech republic':'cz','egypt':'eg','france':'fr', 'germany':'de','greece':'gr','hongkong':'hk','hungary':'hu','india':'in','indonesia':'id','ireland':'ir', 'israel':'il','italy':'it','japan':'jp','latvia':'lv','lithuania':'lt','malaysia':'my','mexico':'mx', 'morocco':'ma','netherlands':'nl','newzealand':'nz','nigeria':'ng','norway':'no','philippines':'ph', 'poland':'pl','portugal':'pt','romania':'ro','russia':'ru','saudi arabia':'sa','serbia':'rs','singapore':'sg', 'solvakia':'sk','solvenia':'si','south africa':'za','south korea':'kr','sweden':'se','switzerland':'ch', 'taiwan':'tw','thailand':'th','turkey':'tr','uae':'ae','ukraine':'ua','united kingdom':'gb','united states':'us', 'venuzuela':'ve'} speak("Which Country's news do you want to listen ?") country = takeCommand().lower() if country == "None": speak('Please come again, i did not hear .') country = takeCommand().lower() while 1: try: country = country_code[country] + '&' break except KeyError as exception: speak('Please come again, i did not hear .') country = takeCommand().lower() url = ('http://newsapi.org/v2/top-headlines?' 'country='+country+ 'apiKey='+news_api_key.news_api()) response = requests.get(url) data = response.json() for i in data['articles']: speak(i['title']) time.sleep(1) speak(i['description']) time.sleep(1) speak('NEXT') print(country) ``` **Weather_Search()** asks user **address** to read current weather condition ``` def Weather_Search(): import weather_api speak('City name') q = takeCommand() while True: if q != "None": break else: speak('Come again please !') q = takeCommand() speak('State or Country name') q2 = takeCommand() while True: if q2 != "None": break else: speak('Come again please !') q2 = takeCommand() q += ',' + q2 url = "https://community-open-weather-map.p.rapidapi.com/find" querystring = {"type":"link%2C accurate","units":"imperial%2C metric","q":q} headers = { 'x-rapidapi-host': "community-open-weather-map.p.rapidapi.com", 'x-rapidapi-key': weather_api.weather_key() } response = requests.request("GET", url, headers=headers, params=querystring) search_result = json.loads(response.text) return search_result ``` **send_message()** asks user whom to send message. Then ask user to convey the message and then send text message. Note:- To use this functionality receiver's contact number must be registered with twillio. ``` def send_message(): import twilio_token import twilio_sid contacts = {'NAME OF PERSON1':'COUNTRY_CODE MOBILE_NUMBER','NAME OF PERSON2':'COUNTRY_CODE MOBILE_NUMBER'} speak('Whom do you want to send message ?') nmbr = takeCommand().lower() while 1: try: contact_nmbr = contacts[nmbr] break except KeyError as exception: speak('Please come again, i did not hear .') nmbr = takeCommand().lower() print(contact_nmbr) account_sid = twilio_sid.twilio_sid() auth_token = twilio_token.twilio_token() client = Client(account_sid, auth_token) speak('Please, convey your message') client.messages.create(to=contact_nmbr, from_="YOUR TWILIO ACCOUNT MOBILE NUMBER", body=takeCommand()) ``` **locate()** asks user the **address** where to locate and then asks for what to locate in that address. After that it suggest good rating search results and speak their **contacts** and **address** when being asked . ``` def locate(): search_resluts = [] driver = webdriver.Chrome(executable_path = 'C:/Softwares/selenium webdriver/chromedriver') driver.get('https://www.google.com/maps/') time.sleep(3) speak('. City name where you want to locate :') while True: city = takeCommand() if city != 'None': break else: speak('. Sorry, I did not listen, come again please !') speak('. Country name :') while True: country = takeCommand() if country != 'None': break else: speak('. Sorry, I did not listen, come again please !') speak('. what do you want to search for in ' + city +' '+country) while True: niche = takeCommand() if niche != 'None': break else: speak('. Sorry, I did not listen, come again please !') search_box = driver.find_element_by_name('q') search_box.send_keys(niche+','+city+','+country) driver.find_element_by_xpath('//button[@class="searchbox-searchbutton"]').click() time.sleep(3) speak('. Okay ! Here is your top 20 search results ') for i in driver.find_elements_by_xpath('//h3[@class="section-result-title"]'): speak(i.text) search_resluts.append(i.text.lower()) rest = [] for i in driver.find_elements_by_xpath('//div[@class="section-result"]'): rest.append(i) time.sleep(1) speak('. From these search results which one you want to locate ?') time.sleep(1) speak('. Let me print it on display, then you just tell me which one you wanna locate') print(search_resluts) while True: restaurant = takeCommand().lower() if restaurant in search_resluts: break else: speak('Sorry, I did not listen. Come again please .') for i in range(len(search_resluts)): if restaurant in search_resluts[i]: rest[i].click() break time.sleep(3) while True: try: star_rating = driver.find_element_by_xpath('//div[@class="jqnFjrOWMVU__right"]/div').text print(star_rating) break except NoSuchElementException as exception: pass if float(star_rating) >= 4: speak('. This '+niche+' is good '+'It has user rating '+star_rating+' you can go here .') time.sleep(1) speak('. Do you need more information regarding this '+niche+' .') if takeCommand().lower() == 'yes': additional_information = [] for i in driver.find_elements_by_xpath('//div[@class="ugiz4pqJLAG__content"]'): additional_information.append(i.text) speak('. Okay Tell me what you need .') need = takeCommand().lower() if 'contact' in need or 'number' in need: speak(". Let me check if the "+niche+" has kept it's contact on network or not .") flag = False for i in additional_information: if '+' in i: flag = True break if flag: speak(". Got it's contact, it is " + i ) else: speak(". Sorry, "+niche+" have kept it's contact private .") elif 'website' in need or 'link' in need or 'url' in need: speak(". Let me check if the "+niche+" has kept it's website link on network or not .") flag = False for i in additional_information: if '.com' in i and len(i.split('.')) == 2: flag = True break if flag: speak(". Got it's website, it is " + i ) else: speak(". Sorry, "+niche+" have kept it's website private .") elif 'address' in need: speak('. Here we go . '+additional_information[0]) else: speak(". I won't suggest you to go here. Because, this "+niche+" has "+star_rating+" user ratings only .") ``` **navigation()** asks user the **source** and **Destination** address then ask about the transporation system then speak the time,date,kilometers and some important note regarding the journey if it is available. ``` def navigation(): driver = webdriver.Chrome(executable_path = 'C:/Softwares/selenium webdriver/chromedriver') driver.get('https://www.google.com/maps/') time.sleep(3) speak('Okay ! Tell me the Source Address :') source_address = takeCommand() while True: if source_address != 'None': break else: speak('. Sorry, I did not listen, come again please !') source_address = takeCommand() speak(' Your Destination Address') Destination_address = takeCommand() while True: if Destination_address != 'None': break else: speak('. Sorry, I did not listen, come again please !') Destination_address = takeCommand() search_box = driver.find_element_by_name('q') search_box.send_keys(source_address+' to '+Destination_address) driver.find_element_by_xpath('//button[@class="searchbox-searchbutton"]').click() time.sleep(3) try: if 'Sorry, we could not calculate directions from' in driver.find_element_by_xpath('//div[@class="section-directions-error-primary-text"]').text: speak(" Sorry, we could not calculate directions ") return 0 except NoSuchElementException: pass speak(" How would you like to travel ? Car, Train, Bicycle or Flight") travel_mode = driver.find_elements_by_xpath('//div[@class="travel-mode"]') if len(travel_mode)==3: car = travel_mode[0] train = travel_mode[1] walk = travel_mode[2] elif len(travel_mode)>3: try: flights = travel_mode[3] except IndexError as exception: pass else: car = travel_mode[0] medium = takeCommand().lower() while True: if medium in ["ka","car","four wheeler","bike","auto","cab","train","trains","walk","long walk","walking","walks", "flight","plane","aeroplane","flights","air","by air","via air","flying","by flying"]: break else: speak(" Come again please !") medium = takeCommand().lower() if len(travel_mode) < 3: car.click() time.sleep(3) try: element_inside_popup = driver.find_element_by_xpath('//div[@class="section-directions-trip clearfix selected"]') element_inside_popup.send_keys(Keys.END) except NoSuchElementException: pass data_detail = [] for i in driver.find_elements_by_xpath('//div[@class="section-directions-trip-description"]'): data_detail.append(i.text) for i in data_detail: print(i) routes = len(data_detail) speak(str(routes)+" found !") trip_number = 1 for i in data_detail: print(i.split('\n')) speak("Trip Number "+str(trip_number)+".") speak(" This trip will take around "+ i.split('\n')[0]+".") speak(" Total distance for this trip is "+i.split('\n')[1]+".") speak(" We will go through "+i.split('\n')[2]) if len(i.split('\n')) > 3: content = " I have few more informations " for j in i.split('\n')[3:]: content += j speak(content) if medium in ["ka","car","four wheeler","bike","auto","cab"]: if len(travel_mode) == 3: car.click() time.sleep(3) try: element_inside_popup = driver.find_element_by_xpath('//div[@class="section-directions-trip clearfix selected"]') element_inside_popup.send_keys(Keys.END) except NoSuchElementException: pass data_detail = [] for i in driver.find_elements_by_xpath('//div[@class="section-directions-trip-description"]'): data_detail.append(i.text) for i in data_detail: print(i) routes = len(data_detail) speak(str(routes)+" found !") trip_number = 1 for i in data_detail: print(i.split('\n')) speak("Trip Number "+str(trip_number)+".") speak(" This trip will take around "+ i.split('\n')[0]+".") speak(" Total distance for this trip is "+i.split('\n')[1]+".") speak(" We will go through "+i.split('\n')[2]) if len(i.split('\n')) > 3: content = " I have few more informations " for j in i.split('\n')[3:]: content += j speak(content) trip_number += 1 else: speak(' No route found .') if medium in ["train","trains"]: if len(travel_mode) == 3: train.click() time.sleep(3) try: element_inside_popup = driver.find_element_by_xpath('//div[@class="section-directions-trip clearfix selected"]') element_inside_popup.send_keys(Keys.END) except NoSuchElementException: pass data_detail = [] for i in driver.find_elements_by_xpath('//div[@class="section-directions-trip-description"]'): data_detail.append(i.text) for i in data_detail: print(i) routes = len(data_detail) speak(str(routes)+" found !") trip_number = 1 for i in data_detail: print(i.split('\n')) speak("Trip Number "+str(trip_number)+".") speak(" This trip will take around "+ i.split('\n')[0]+".") speak(" Trip schedule is "+i.split('\n')[1]+".") speak(" Trains for this trip is "+i.split('\n')[2]) if len(i.split('\n')) > 3: content = " I have few more informations " for j in i.split('\n')[3:]: content += j speak(content) trip_number += 1 else: speak(' No route found .') if medium in ["walk","long walk","walking","walks"]: if len(travel_mode) == 3: walk.click() time.sleep(3) for i in driver.find_elements_by_xpath('//div[@class="section-directions-trip-description"]'): print(i.text.split('\n')) speak(" By walking, you will take around "+i.text.split('\n')[0]+".") speak(" Distance for this trip will be "+i.text.split('\n')[1]+".") speak(" Route for this trip is "+i.text.split('\n')[2]+".") else: speak(' No route found .') if medium in ["flight","plane","aeroplane","flights","air","by air","via air","flying","by flying"]: if len(travel_mode) > 3: flights.click() time.sleep(3) try: element_inside_popup = driver.find_element_by_xpath('//div[@class="section-directions-trip clearfix selected"]') element_inside_popup.send_keys(Keys.END) except NoSuchElementException: pass data_detail = [] for i in driver.find_elements_by_xpath('//div[@class="section-directions-trip-description"]'): data_detail.append(i.text) for i in data_detail: print(i) routes = len(data_detail) speak(str(routes)+" found !") trip_number = 1 for i in data_detail: print(i.split('\n')) speak("Trip Number "+str(trip_number)+".") trip_number += 1 for i in data_detail: speak(i) else: speak(" No flight route informations .") LANGUAGES = {'af': 'afrikaans','sq': 'albanian','am': 'amharic','ar': 'arabic','hy': 'armenian','az': 'azerbaijani', 'eu': 'basque','be': 'belarusian','bn': 'bengali','bs': 'bosnian','bg': 'bulgarian','ca': 'catalan','ceb': 'cebuano', 'ny': 'chichewa','zh-cn': 'chinese (simplified)','zh-tw': 'chinese (traditional)','co': 'corsican','hr': 'croatian', 'cs': 'czech','da': 'danish','nl': 'dutch','en': 'english','eo': 'esperanto','et': 'estonian','tl': 'filipino', 'fi': 'finnish','fr': 'french','fy': 'frisian','gl': 'galician','ka': 'georgian','de': 'german','el': 'greek', 'gu': 'gujarati','ht': 'haitian creole','ha': 'hausa','haw': 'hawaiian','iw': 'hebrew','he': 'hebrew','hi': 'hindi', 'hmn': 'hmong','hu': 'hungarian','is': 'icelandic','ig': 'igbo','id': 'indonesian','ga': 'irish','it': 'italian', 'ja': 'japanese','jw': 'javanese','kn': 'kannada','kk': 'kazakh','km': 'khmer','ko': 'korean','ku': 'kurdish (kurmanji)', 'ky': 'kyrgyz','lo': 'lao','la': 'latin','lv': 'latvian','lt': 'lithuanian','lb': 'luxembourgish','mk': 'macedonian', 'mg': 'malagasy','ms': 'malay','ml': 'malayalam','mt': 'maltese','mi': 'maori','mr': 'marathi','mn': 'mongolian', 'my': 'myanmar (burmese)','ne': 'nepali','no': 'norwegian','or': 'odia','ps': 'pashto','fa': 'persian','pl': 'polish', 'pt': 'portuguese','pa': 'punjabi','ro': 'romanian','ru': 'russian','sm': 'samoan','gd': 'scots gaelic','sr': 'serbian', 'st': 'sesotho','sn': 'shona','sd': 'sindhi','si': 'sinhala','sk': 'slovak','sl': 'slovenian','so': 'somali','es': 'spanish', 'su': 'sundanese','sw': 'swahili','sv': 'swedish','tg': 'tajik','ta': 'tamil','te': 'telugu','th': 'thai','tr': 'turkish', 'uk': 'ukrainian','ur': 'urdu','ug': 'uyghur','uz': 'uzbek','vi': 'vietnamese','cy': 'welsh','xh': 'xhosa','yi': 'yiddish', 'yo': 'yoruba','zu': 'zulu'} ``` **Instagram_scroller()** scrolls instagram ``` def Instagram_scroller(driver,command): print('In scroller function') while True: while True: l0 = ["scroll up","call down","call don","scroll down","up","down","exit","roll down","croll down","roll up","croll up"] if len([i for i in l0 if i in command]) != 0: break else: speak("Please , come again .") command = takeCommand().lower() print("in scroller function, while loop") l = ["scroll down","down","roll down","croll down"] if len([i for i in l if i in command]) != 0: print("voice gets recognized") speak("Scrolling Down the pan") while True: driver.execute_script("window.scrollBy(0,500)","") time.sleep(0) q = takeCommand().lower() if "stop" in q or "exit" in q or "top" in q: speak("Exiting the scroll down") break l2 = ["scroll up","croll up","up","roll up","call app"] if len([i for i in l2 if i in command]) != 0: speak("Scrolling up the pan") while True: driver.execute_script("scrollBy(0,-2000);") time.sleep(0) q = takeCommand().lower() if "stop" in q or "exit" in q or "top" in q: speak("Exiting the scroll up") break command = takeCommand().lower() if "exit" in command: speak("exiting from scroller") break ``` **Instagram_following_bot()** send follow request based on command ``` def Instagram_following_bot(driver,follow_request_command): driver = driver follow_request_command = follow_request_command search = driver.find_element_by_xpath('//input[@class = "XTCLo x3qfX "]') search.clear() search.send_keys(follow_request_command) time.sleep(4) searched_results = driver.find_elements_by_xpath('//div[@class = "fuqBx"]/a') pages_link = [] for i in searched_results: pages_link.append(i.get_attribute('href')) COUNT = 0 for i in pages_link: driver.get(i) time.sleep(3) try: driver.find_element_by_partial_link_text('followers').click() except NoSuchElementException: pass time.sleep(3) count = 0 while True: for i in driver.find_elements_by_xpath('//div[@class="Pkbci"]/button'): i.click() time.sleep(1) try: driver.find_element_by_xpath('//button[@class="aOOlW HoLwm "]').click() except NoSuchElementException as exception: count += 1 pass print("Profile followed count =",count) element_inside_popup = driver.find_element_by_xpath('//ul[@class="jSC57 _6xe7A"]//a') element_inside_popup.send_keys(Keys.END) break_loop = False if takeCommand().lower() in ['stop','top','stop follow','stop following','break','exit']: break_loop = True break if count > 20: break if break_loop: speak('Exiting the follow command !') break COUNT += 1 if COUNT > 3: break ``` **Instagram()** controls and perform different actions on Instagram like send message,read notifications,open explorer,play stories ``` def Instagram(): import insta_credentials driver = webdriver.Chrome(executable_path = 'C:/Softwares/selenium webdriver/chromedriver') driver.get('https://www.instagram.com/') time.sleep(3) #driver.maximize_window() username = driver.find_element_by_name('username') password = driver.find_element_by_name('password') usrname = insta_credentials.insta_mail() pwd = insta_credentials.insta_password() username.send_keys(usrname) password.send_keys(pwd) password.submit() # Handles Pop Up time.sleep(3) try: driver.find_element_by_xpath('//button[@class="sqdOP L3NKy y3zKF "]').click() except NoSuchElementException: pass time.sleep(5) try: driver.find_element_by_xpath('//button[@class="sqdOP L3NKy y3zKF "]').click() except NoSuchElementException: try: a = driver.find_elements_by_xpath('//div[@class="mt3GC"]/button') alert = a[-1] alert.click() except NoSuchElementException: pass time.sleep(2) speak("I am in your Instagram Network . What's next instruction ? ") l0 = ["scroll down","call down","call don","call up","down","roll down","croll down","up","scroll up","roll up","croll up","start stories","play stories","start story","play story", "stop stories","end stories","stop story","exit story","exit stories","top stories","top story", "message", "messages" ,"inbox","home","notification","notifications","activity","explore","explorer", "send follow requests","send follow request","follow request","follow","exit","close"] while True: command = takeCommand().lower() while True: if len([i for i in l0 if i in command]) != 0: break else: speak("Please say it again !") command = takeCommand().lower() l1 = ["scroll down","call down","call don","call up","down","roll down","croll down","up","scroll up","roll up","croll up"] if len([i for i in l1 if i in command]) != 0: print('calling scroll down function') Instagram_scroller(driver,command) l2 = ["start stories","play stories","start story","play story"] if len([i for i in l2 if i in command]) != 0: speak("Starting the stories .") start_story = driver.find_element_by_xpath('//li[@class="Ckrof"]').click() l3 = ["stop stories","end stories","stop story","exit story","exit stories","top stories","top story"] if len([i for i in l3 if i in command]) != 0: speak("Closing the stories .") close_story = driver.find_element_by_xpath('//span[@class="Szr5J"]').click() l4 = ["message","messages","inbox"] if len([i for i in l4 if i in command]) != 0: speak("Opening Message .") unread_message_flag = False driver.get(driver.find_element_by_xpath('//a[@class="xWeGp"]').get_attribute('href')) time.sleep(5) try: unread_messages = driver.find_elements_by_xpath('//div[@class=" _41V_T Sapc9 Igw0E IwRSH eGOV_ _4EzTm "]') speak(' You have '+str(len(unread_messages))+ ' unread message left .') unread_message_flag = True except NoSuchElementException as exception: pass if unread_message_flag: speak("Do you want me to read those unread messages ?") unread_message_command = takeCommand().lower() while True: if unread_message_command in ["yes","no"]: break else: speak("Sorry, I couldn't hear. Come again please !") unread_message_command = takeCommand().lower() if unread_message_command == "yes": for i in unread_messages: i.click() time.sleep(2) try: speak("Okay !") time.sleep(1) speak(driver.find_element_by_xpath('//div[@class="_7UhW9 xLCgt MMzan KV-D4 p1tLr hjZTB"]').text) except NoSuchElementException as exception: speak(" Seems like you have a video or some image. Check that on display") else: speak("Okay !") time.sleep(1) speak("would you like to text some one instead ?") text_message_command = takeCommand().lower() while True: if text_message_command in ["yes","no"]: break else: speak("Sorry, I couldn't hear. Come again please !") text_message_command = takeCommand().lower() if text_message_command == "yes" or text_message_command == "okay" or text_message_command == "ok": speak("Whom do you want to send message ?") sender_name = takeCommand() while True: if sender_name != "None": break else: speak("Sorry, I couldn't hear. Come again please !") sender_name = takeCommand() driver.get(driver.find_element_by_xpath('//a[@class="xWeGp"]').get_attribute('href')) time.sleep(1) driver.find_element_by_xpath('//button[@class="sqdOP L3NKy y3zKF "]').click() time.sleep(1) while True: try: driver.find_element_by_xpath('//input[@class="j_2Hd uMkC7 M5V28"]').send_keys(sender_name) break except NoSuchElementException as exception: pass time.sleep(2) try: if driver.find_element_by_xpath('//div[@class="_7UhW9 xLCgt MMzan _0PwGv uL8Hv "]').text == "No account found.": speak(" Please say the name again !") sender_name = takeCommand() driver.find_element_by_xpath('//div[@class="QBdPU "]').click() driver.find_element_by_xpath('//button[@class="sqdOP L3NKy y3zKF "]').click() driver.find_element_by_xpath('//input[@class="j_2Hd uMkC7 M5V28"]').send_keys(sender_name) except NoSuchElementException as exception: pass sender_name_data = [] sender_name_link = [] time.sleep(3) try: for i in driver.find_elements_by_xpath('//div[@class="_7UhW9 xLCgt qyrsm KV-D4 uL8Hv "]'): sender_name_data.append(i.text) sender_name_link.append(i) except NoSuchElementException as exception: pass sender_name = listening_algorithm(sender_name,sender_name_data) sender_name_link[sender_name_data.index(sender_name)].click() driver.find_element_by_xpath('//div[@class="rIacr"]').click() time.sleep(2) speak("Say your message sir .") say_message_command = takeCommand() while True: if say_message_command != "None": break else: speak(" I couldn't hear. Please say it again .") say_message_command = takeCommand() driver.find_element_by_xpath('//div[@class="X3a-9"]').click() driver.find_element_by_tag_name("textarea").send_keys(say_message_command) driver.find_element_by_xpath('//div[@class=" Igw0E IwRSH eGOV_ _4EzTm JI_ht "]').click() speak("Message delivered !") elif "home" in command: driver.find_element_by_xpath('//div[@class="Fifk5"]').click() time.sleep(2) l5 = ["notification", "notifications","activity"] if len([i for i in l5 if i in command]) != 0: while True: try: buttons = driver.find_elements_by_xpath('//div[@class="_47KiJ"]/div') buttons[3].click() break except NoSuchElementException as exception: pass time.sleep(3) try: speak(" You have "+driver.find_element_by_xpath('//div[@class="JRHhD"]').text+" pending follow requests .") except NoSuchElementException as exception: pass '''try: notify_count = 0 for i in driver.find_elements_by_xpath('//div[@class="HkZvO"]'): speak(i.text + " started following you .") notify_count += 1 if notify_count > 10: break except NoSuchElementException as exception: pass''' notification_count = 0 try: for i in driver.find_elements_by_xpath('//div[@class="PUHRj H_sJK"]'): speak(i.text) notification_count += 1 time.sleep(1) if notification_count >20: break except NoSuchElementException as exception: pass speak("Closing Notifications ") buttons = driver.find_elements_by_xpath('//div[@class="_47KiJ"]/div') explore = buttons[3] explore.click() l6 = ["explore","explorer"] if len([i for i in l6 if i in command]) != 0: buttons = driver.find_elements_by_xpath('//div[@class="_47KiJ"]/div') explore = buttons[2] explore.click() time.sleep(3) l7 = ["send follow requests","send follow request","follow request","follow"] if len([i for i in l7 if i in command]) != 0: speak("Tell me the common interest of people whom you want to send requests .") follow_request_command = takeCommand().lower() while True: if follow_request_command != "None": break else: speak("Please , come again .") follow_request_command = takeCommand().lower() Instagram_following_bot(driver,follow_request_command) l8 = ["exit","close","closed","exits"] if len([i for i in l8 if i in command]) != 0: speak("Closing the Instagram .") driver.close() ``` **Driver Program :-** ``` if __name__ == '__main__': clear = lambda: os.system('cls') clear() voice_count = 0 if wake(): wishMe() #usrname() assname = 'Angularstone' while True: query = takeCommand().lower() if 'wikipedia' in query: speak('Searching Wikipedia...') query = query.replace("wikipedia", "") results = wikipedia.summary(query, sentences = 3) speak("According to Wikipedia") print(results) speak(results) elif 'open youtube' in query: speak("Here you go to Youtube\n") webbrowser.open("youtube.com") elif 'open google' in query: speak("Here you go to Google\n") webbrowser.open("google.com") elif 'open stackoverflow' in query: speak("Here you go to Stack Over flow.Happy coding") webbrowser.open("stackoverflow.com") elif 'play music' in query or "play song" in query: speak("Here you go with music") # music_dir = "G:\\Song" music_dir = "D:\Video Songs" songs = os.listdir(music_dir) print(songs) random = os.startfile(os.path.join(music_dir, songs[1])) elif 'date' in query: speak(getDate()) elif 'change voice' in query: if voice_count % 2 == 0: engine.setProperty('voice', voices[1].id) else: engine.setProperty('voice', voices[0].id) speak("I am at your service sir !") elif 'time' in query: speak(time_()) elif 'open upwork' in query: speak('Happy Freelancing Boss .') codePath = r"C:\Softwares\UpworkSetup64.exe" os.startfile(codePath) elif 'send email' in query: try: speak("What should I say?") content = takeCommand() to = "bittuboss601@gmail.com" sendEmail(to, content) speak("Email has been sent !") except Exception as e: print(e) speak("I am not able to send this email") elif 'how are you' in query: speak("I am fine, Thank you") speak("How are you, Sir") elif 'fine' in query or "good" in query: speak("It's good to know that your fine") elif 'are you human' in query: speak('No, I am not Human .') elif "change my name to" in query: query = query.replace("change my name to", "") assname = query elif "change name" in query: speak("What would you like to call me, Sir ") assname = takeCommand() speak("Thanks for naming me") elif "what's your name" in query or "What is your name" in query: speak("My friends call me") speak(assname) print("My friends call me", assname) elif 'exit' in query: speak("Thanks for giving me your time") break elif "who made you" in query or "who created you" in query: speak("I have been created by Purnendu Tiwari, Nitin Aryan, Mayank Chhabra, Anuj Himachal wala and Ajit Bhalerao.") elif 'joke' in query: speak('I am not so good in cracking jokes, still I will try my best .') time.sleep(1) speak(pyjokes.get_joke()) elif "calculate" in query: import wolframa_alpha app_id = wolframa_alpha.wolframa_alpha() client = wolframalpha.Client(app_id) indx = query.lower().split().index('calculate') query = query.split()[indx + 1:] res = client.query(' '.join(query)) answer = next(res.results).text print("The answer is " + answer) speak("The answer is " + answer) elif 'search' in query: # Example 'search Black Hole' searchOnNet(query.replace('search','')) elif 'search on net' in query or 'play on net' in query: query = query.replace("search", "") query = query.replace("play", "") webbrowser.open(query) elif "who i am" in query: speak("If you talk then definately you are human.") elif "why you came to world" in query: speak("....future is yet to be decide. That's why I am here .") elif 'is love' in query: speak("Love is a pure emotion. Sometimes, humans relate true love with divine .") elif "who are you" in query: speak("......I am Angularstone version 1 point o . Powered by Raspberry Pi, model 3 B. I am Artificial Intelligence Assistant , created by Angularstone family") elif 'what is angular stone' in query: speak("... It is a startup, owned by Purnendu Tiwari, to develop Intelligent Systems like me. To serve Humans .") elif 'news' in query: news() elif 'lock window' in query: speak("locking the device") ctypes.windll.user32.LockWorkStation() elif 'shutdown system' in query: speak("Hold On a Sec ! Your system is on its way to shut down") subprocess.call('shutdown / p /f') elif 'empty recycle bin' in query: winshell.recycle_bin().empty(confirm = False, show_progress = False, sound = True) speak("Recycle Bin Recycled") elif "don't listen" in query or "stop listening" in query: n = {'zero':0,'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9} speak('Pronounce numbers like 1 0 for ten .') time.sleep(1) speak("for how much time you want to stop jarvis from listening commands") a = takeCommand().lower() a = a.split(' ') sleep_time = '' for i in a: sleep_time += i time.sleep(int(sleep_time)) print(a) elif "locate" in query: locate() elif "camera" in query or "take a photo" in query: speak('Opening Camera') ec.capture(0, "Jarvis Camera ", "img.jpg") elif "restart" in query: speak('Restarting the system .') subprocess.call(["shutdown", "/r"]) elif "hibernate" in query or "sleep" in query: speak("Hibernating") subprocess.call("shutdown / h") elif "log off" in query or "sign out" in query: speak("Make sure all the application are closed before sign-out") time.sleep(5) subprocess.call(["shutdown", "/l"]) elif "write a note" in query: speak("What should i write, sir") note = takeCommand() file = open('jarvis.txt', 'w') speak("Sir, Should i include date and time") snfm = takeCommand() if 'yes' in snfm or 'sure' in snfm: time__ = '' time_().split(' ')[3:] time__ = time_().split(' ')[-2]+' '+time_().split(' ')[-1] strTime = time__ file.write(strTime) file.write(" :- ") file.write(note) else: file.write(note) elif "show note" in query: speak("Showing Notes") file = open("jarvis.txt", "r") print(file.read()) speak(file.read(6)) elif "angularstone" in query: wishMe() speak("Angularstone, version one point O") speak(assname) elif "weather" in query: data = Weather_Search() speak('Temperature is :'+str(round((data['list'][0]['main']['temp'] -273.15),2))+' degree Celsius '+'.') time.sleep(1) speak('The temperature feels like'+str(round((data['list'][0]['main']['feels_like']-273.15),2))+' degree Celsius '+'.') time.sleep(1) speak('Minimum temperature is :'+str(round((data['list'][0]['main']['temp_min']-273.15),2))+' degree Celsius '+'.') time.sleep(1) speak('Maximum temperature is :'+str(round((data['list'][0]['main']['temp_max']-273.15),2))+' degree Celsius '+'.') time.sleep(1) speak('Air pressure is around '+str(round(data['list'][0]['main']['pressure'],2))+'.') time.sleep(1) speak('Humidity value is'+str(round(data['list'][0]['main']['humidity'],2))+'.') time.sleep(1) speak('Wind Speed is '+str(data['list'][0]['wind']['speed'])+' and wind direction is '+str(data['list'][0]['wind']['deg'])+' degrees .') time.sleep(1) speak('Rain Information'+str(data['list'][0]['rain'])+'.') time.sleep(1) speak('Snow information :'+str(data['list'][0]['snow'])+'.') time.sleep(1) speak('Weather description :'+str(data['list'][0]['weather'][0]['description'])+'.') elif "send message" in query: # You need to create an account on Twilio to use this service send_message() speak('Message sent successfully.') elif "wikipedia" in query: # -> webbrowser.open("wikipedia.com") elif "good morning" in query: speak("A warm" +query) speak("How are you Mister") speak(assname) elif "will you be my girlfriend" in query or "will you be my boyfriend" in query: speak("I'm not sure about, may be you should give me some time") elif "how are you" in query: speak("I'm fine, glad you me that") elif "i love you" in query: speak("It's hard to understand") elif "what is" in query or "who is" in query: import wolframa_alpha client = wolframalpha.Client(wolframa_alpha.wolframa_alpha()) res = client.query(query) try: print (next(res.results).text) speak (next(res.results).text) except StopIteration: print ("No results") elif 'youtube' in query or 'play' in query: youtube(query.replace('youtube','')) elif 'instagram' in query: speak('Opening Instagram') Instagram() elif query in ['navigate','navigation','nevigate']: speak('Opening Google Navigation !') navigation() elif query in ['amazon','order from amazon','open amazon','amazon.com','buy from amazon','add to cart on amazon']: speak('What you like to order ?') order_text = takeCommand().lower() while True: if order_text != "None": break else: speak('Come again please !') order_text = takeCommand().lower() amazon_order(order_text) ```
github_jupyter
# Investigate Web Application Firewall (WAF) Data </br> **Author:** Vani Asawa <br/> **Date:** December 2020 </br> **Notebook Version:** 1.0 <br/> **Python Version:** Python 3.6 <br/> **Required Packages:** msticpy, pandas, kqlmagic <br/> **Data Sources Required:** WAF data (AzureDiagnostics) <br/> ## What is the purpose of this Notebook? Web Application Firewall (WAF) data records the monitored and blocked HTTP traffic to and from a web service. Due to the large magnitudes of HTTP requests made to such services in any workspace, the data tends to be incredibly noisy, and hence may prevent an analyst from determining if there are any bad requests made to the servers, which could result in a potentially malicious attack. This notebook analyses the blocked WAF Alerts and aim to surface any unusual HTTP requests made by the client IPs to the servers, using a variety of statistical techniques applied on several features of the WAF data, such as the Rule ID of the triggering event, the HTTP status code returned to the client from the alerts, and the contents of the request URIs themselves ## Overview **[Distribution of WAF logs and blocked alerts over an extended time frame](#DistributionofWAF)** 1. Set an extended time frame to visualise the distribution of the logs/alerts on a bar graph **[What is the distribution of WAF blocked alerts over Rule IDs, http-status codes, and client IP entities?](#DistOver_rID_http_ip)** 1. Set a time frame (recommended: time period of interest, after analysing the distribution of alerts in the extended time frame) 2. Pick a host entity to explore in further detail 3. Set x and y axes from the variables above, and view the number of alerts over the designate time frame. **[Cluster the request URIs in WAF blocked alerts, based on TFIDF scores](#ClusterURIs)** *Term frequency-inverse document frequency (TFIDF)* score is a numerical statistic of how important a variable is to a document. The value of the statistic is directly proportional to the variable's frequency in the document, and inversely proportional to the number of documents that contain the variable. More information about TFIDF can be found [here](https://www.researchgate.net/publication/326425709_Text_Mining_Use_of_TF-IDF_to_Examine_the_Relevance_of_Words_to_Documents) In our analysis, the *variable* will be the 'split URIs' and 'rule IDs', while a single *document* is all the blocked alerts for a single client IP in the selected time frame. We will be assessing the relative importance of every single token of the split request URIs and the number of times a ruleID is triggered for our blocked alerts over multiple such 'documents'. We will be using these two sets of scores to cluster the request URIs, and obtain single/grouped sets of interesting (and potentially malicious) request URIs that were blocked by the WAF. 1. Compute TFIDF scores based on the following 2 approaches: - Request URIs split on "/" against the client IP entities - Number of blocked alerts for every Rule ID against the client IP entities 2. Visualising the TFIDF scores for both approaches 3. Performing DBScan Clustering + PCA to obtain the clustered and outlier request URIs for both approaches 4. KQL query to further examine the WAF logs and blocked alerts in the time frames with outlier request URIs** ## Using the Notebook **Prerequisites** - msticpy - install the latest using pip install --upgrade msticpy - pandas- install using pip install pandas - kqlmagic **Running the Notebook** The best way of using the notebook is as follows: 1. Individually run all of the cells up to the start of Section 1: - Initialization and installation of libraries - Authenticating to the workspace - Setting notebook parameters 2. Default paramenters will allow the entire notebook to run from Section I using the 'Run Selected Cell and All Below' option under the Run tab. However, for added value, run the cells sequentially in any given section. - At the beginning of each section, set the time parameters. It is recommended that the first and third section have a larger timeframe than the second and fourth sections. - Wait for the cell to finish running, before proceeding - Select the options from the widget boxes when displayed and proceed. ``` from pathlib import Path import os import sys from pathlib import Path from IPython.display import display, HTML REQ_PYTHON_VER=(3, 6) REQ_MSTICPY_VER=(1, 0, 0) REQ_MP_EXTRAS = ["ml", "kql"] update_nbcheck = ( "<p style='color: orange; text-align=left'>" "<b>Warning: we needed to update '<i>utils/nb_check.py</i>'</b><br>" "Please restart the kernel and re-run this cell." "</p>" ) display(HTML("<h3>Starting Notebook setup...</h3>")) if Path("./utils/nb_check.py").is_file(): try: from utils.nb_check import check_versions except ImportError as err: %xmode Minimal !curl https://raw.githubusercontent.com/Azure/Azure-Sentinel-Notebooks/master/utils/nb_check.py > ./utils/nb_check.py 2>/dev/null display(HTML(update_nbcheck)) if "check_versions" not in globals(): raise ImportError("Old version of nb_check.py detected - see instructions below.") %xmode Verbose check_versions(REQ_PYTHON_VER, REQ_MSTICPY_VER, REQ_MP_EXTRAS) # If not using Azure Notebooks, install msticpy with # !pip install msticpy from msticpy.nbtools import nbinit nbinit.init_notebook( namespace=globals(), additional_packages=["adjustText", "plotly"] ); from ipywidgets import widgets import plotly.graph_objects as go import plotly.express as px import re from sklearn.feature_extraction.text import TfidfVectorizer %matplotlib inline from sklearn.cluster import KMeans from sklearn import metrics from sklearn.cluster import DBSCAN from sklearn.decomposition import PCA from adjustText import adjust_text import itertools import ipaddress import traceback pd.set_option('display.max_rows', 100) pd.set_option('display.max_columns', 50) pd.set_option('display.max_colwidth', 40) pd.set_option('display.max_colwidth', None) layout = widgets.Layout(width="50%", height="80px") style = {"description_width": "200px"} class color: BOLD = '\033[1m' END = '\033[0m' # See if we have an Azure Sentinel Workspace defined in our config file. # If not, let the user specify Workspace and Tenant IDs ws_config = WorkspaceConfig() if not ws_config.config_loaded: ws_config.prompt_for_ws() qry_prov = QueryProvider(data_environment="AzureSentinel") print("done") # Authenticate to Azure Sentinel workspace qry_prov.connect(ws_config) ``` **Querying Function** : Accessing the results of the Kusto query as a pandas dataframe, and removing empty/null columns from the dataframe ``` def showQuery(query): df = qry_prov.exec_query(query) trimDF(df) return df def trimDF(df): # Store names of columns with null values for all entries empty_null_cols = [col for col in df.columns if df[col].isnull().all()] # Store names of columns with empty string '' values for all entries empty_str_cols = [] for col in df.columns: try: if ''.join(df[col].map(str)) == '': empty_str_cols = empty_str_cols + [col] except: continue df.drop(empty_null_cols + empty_str_cols, axis=1, inplace=True) binIntervals = ['1m', '5m', '10m', '15m', '30m', '1h', '12h', '1d', '5d', '10d'] ``` **Selecting a Host** ``` def queryHost(startTime, endTime): query = ''' AzureDiagnostics | where TimeGenerated between (datetime({startTime}).. datetime({endTime})) | where Category == "ApplicationGatewayFirewallLog" | where action_s == 'Blocked' or isempty(action_s) | summarize AlertCountPerHost = count() by hostname_s, bin(timeStamp_t, {binInterval}) | render timechart '''.format(startTime = startTime, endTime = endTime, binInterval = '1h') return(query) ``` **Auto determine masking bits for clubbing IPs** ``` def maskBitsVal(uniqueIPLen): if uniqueIPLen > 150: return '/8' elif uniqueIPLen > 40: return '/16' elif uniqueIPLen > 15: return '/24' return '/32' ``` ## Section I: Distribution of WAF logs and blocked alerts over an extended time frame <a name="DistributionofWAF"></a> Select an extended time frame to view the distribution of WAF logs and blocked alerts over all hosts. ``` query_times_1 = nbwidgets.QueryTime(units='day', max_before=30, before=-15, max_after=-1) query_times_1.display() categories = ['ApplicationGatewayAccessLog', 'ApplicationGatewayFirewallLog'] def viewLogs(category): log_alert_query = ''' AzureDiagnostics | where TimeGenerated between (datetime({startTime}).. datetime({endTime})) | where Category == "{category}" | where action_s == 'Blocked' or isempty(action_s) | summarize NoOfAlerts= count() by bin(timeStamp_t, {binInterval}) | render timechart '''.format(startTime = query_times_1.start, endTime = query_times_1.end, category = category, binInterval = '1h') %kql -query log_alert_query rawDataQuery = """ AzureDiagnostics | where TimeGenerated between (datetime({startTime}).. datetime({endTime})) | where Category == '{category}' | where action_s == 'Blocked' or isempty(action_s) | take 15 """.format(startTime = query_times_1.start, endTime = query_times_1.end, category = category) display(showQuery(rawDataQuery).head(5)) category = widgets.Select(options = categories, style = style, layout = layout, description = 'Choose logs/alerts: ') display(category) viewLogs(category = category.value) ``` ## Section II: What is the distribution of blocked WAF alerts over Rule IDs, http-status codes, and client IP Entities? <a name="DistOver_rID_http_ip"></a> Select a time frame of interest to view the distribution of WAF blocked alerts over all hosts. *Recommended:* Analyse a shorter time frame than Section I for more detail ``` query_times_2 = nbwidgets.QueryTime(units='day', max_before=30, before=-10, max_after=-1) query_times_2.display() ``` ### Select a host entity The following host entity will be used for the remainder of this section ``` query = queryHost(query_times_2.start, query_times_2.end) %kql -query query try: df_host = showQuery(query) list_hosts = set([x for x in df_host['hostname_s']]) df = df_host.groupby(['hostname_s']).agg({'AlertCountPerHost': sum}).rename(columns = {'AlertCountPerHost': 'Num_blocked_alerts'}) hosts = widgets.Select(options=list_hosts, style = style, layout = layout, value=df['Num_blocked_alerts'].idxmax(), description = 'Select Host: ') display(df) display(hosts) except Exception as e: print('Error: ' + e) traceback.print_exc() ``` ### Render visualisations of the distribution of blocked alerts for the selected host We will be using balloon plots to visualise the number of WAF alerts over rule IDs, http-status codes, and client IP entities, for the selected host entity. ``` query_distribution = ''' AzureDiagnostics | where TimeGenerated between (datetime({startTime}).. datetime({endTime})) | where Category == "ApplicationGatewayFirewallLog" | where hostname_s == "{host}" | where action_s == 'Blocked' or isempty(action_s) | join kind=leftouter ( AzureDiagnostics | where TimeGenerated between (datetime({startTime}).. datetime({endTime})) | where Category == "ApplicationGatewayAccessLog" | summarize by requestUri_s, httpStatus_d ) on requestUri_s | summarize NoOfAlerts = count(), make_set(requestUri_s), DistinctURIs = dcount(requestUri_s) by clientIp_s, ruleId_s, httpStatus_d1 '''.format(startTime = query_times_2.start, endTime = query_times_2.end, host = hosts.value) try: df_distribution = showQuery(query_distribution) df_distribution.rename(columns = {'clientIp_s':'Ip Address', 'ruleId_s':'Rule ID', 'set_requestUri_s': 'Request Uris'}, inplace = True) if 'httpStatus_d1' in df_distribution.columns: df_distribution = df_distribution.sort_values(by=['httpStatus_d1'], ascending = True).reset_index(drop = True) df_distribution.rename(columns = {'httpStatus_d1':'Http status'}, inplace = True) df_distribution['Http status'] = 'h: ' + df_distribution['Http status'].astype(str) maskBits = maskBitsVal(len(df_distribution['Ip Address'].unique())) df_distribution['Ip Address'] = df_distribution['Ip Address'].apply(lambda x: ipaddress.IPv4Network(x + maskBits, strict = False)) df_distribution['Ip Address'], df_distribution['Rule ID'] = 'Ip ' + df_distribution['Ip Address'].astype(str), 'rID ' + df_distribution['Rule ID'].astype(str) except Exception as e: print('Error: ' + e) traceback.print_exc() options = ['Ip Address', 'Rule ID'] if 'Http status' in df_distribution.columns: options += ['Http status'] def viewBalloonPlot(x_axis, y_axis, display_rawResult): try: df_balloon_plot = (df_distribution .groupby([x_axis, y_axis], as_index=False) .agg({'NoOfAlerts': sum, 'DistinctURIs': sum, 'Request Uris': list}) .reset_index(drop = True)) fig = px.scatter(df_balloon_plot, x=df_balloon_plot[x_axis], y = df_balloon_plot[y_axis], size= np.log(1 + df_balloon_plot['NoOfAlerts'] ), color = 'NoOfAlerts', hover_data=['NoOfAlerts', 'DistinctURIs']) fig.update_layout(height = max(300, 30 * len(set(df_balloon_plot[y_axis]))), title_text='Alert Distribution for host ID '+ str(hosts.value)) fig.show() if display_rawResult == 'Yes': print('Top 5 raw results with the highest number of alerts: \n') df_balloon_plot['Request Uris'] = [np.unique(list(itertools.chain(*row['Request Uris']))) for index, row in df_balloon_plot.iterrows() ] df_balloon_plot['DistinctURIs'] = df_balloon_plot['Request Uris'].str.len() display(df_balloon_plot[[y_axis, x_axis, 'NoOfAlerts','Request Uris', 'DistinctURIs']].sort_values(by='NoOfAlerts', ascending = False).head(5)) except ValueError: print('ValueError: Choose distinct x and y axes') except Exception as e: print('Error: ' + e) traceback.print_exc() x_axis = widgets.Select(options = options, style = style, layout = layout, description = 'Select x-axis: ') y_axis = widgets.Select(options = options, style = style, layout = layout, description = 'Select y-axis: ') display_rawResult = widgets.Select(options = ['Yes', 'No'], description = 'Display raw results: ') md("Select graph properties:", "bold") display(x_axis) display(y_axis) display(display_rawResult) viewBalloonPlot, x_axis = x_axis.value, y_axis = y_axis.value, display_rawResult = display_rawResult.value) display(w) ``` ## Section III: Cluster the request URIs in blocked WAF Alerts, based on TFIDF scores <a name="ClusterURIs"></a> Select the timeframe and host entity for this section of the notebook. *Recommended*: Set a timeframe of >20 days ``` query_times_3 = nbwidgets.QueryTime(units='day', max_before=30, before=10, max_after=-1) query_times_3.display() df_host_2 = showQuery(queryHost(query_times_3.start, query_times_3.end)) df = df_host_2.groupby(['hostname_s']).agg({'AlertCountPerHost': sum}).rename(columns = {'AlertCountPerHost': 'Num_blocked_alerts'}) hosts_2 = widgets.Select(options=set([x for x in df_host_2['hostname_s']]), value=df['Num_blocked_alerts'].idxmax(), description = 'Select Host: ') display(df) display(hosts_2) ``` **Enter min_df and max_df value parameters** *min_df*: The min_df variable is used to eliminate terms that do not appear very frequently in our data. A min_df value of 0.01 implies eliminating terms that apear in less than 1% of the data. *max_df*: The max_df variable eliminates terms that appear very frequently in our data. A max_df value of 0.9 implies eliminating terms that appear in more than 90% of the data. For more information about these parameters in the TFIDF vectorizer, please see [here](https://stackoverflow.com/questions/27697766/understanding-min-df-and-max-df-in-scikit-countvectorizer) **Note:** In the case of errors running the code below for the two approaches (Request URIs split on "/" against the client IP entities OR Number of blocked alerts for every Rule ID against the client IPs), run the TFIDF vectoriser for ALL the data If you would like to view the TFIDF scores for all the data, change the following code in the `tfidfScores` function: `vectorizer = TfidfVectorizer(tokenizer=identity_tokenizer, lowercase=False, min_df = min_df_value, max_df = max_df_value) ` to `vectorizer = TfidfVectorizer(tokenizer=identity_tokenizer, lowercase=False) ` ``` min_df_widget = widgets.Text(style = style, layout = widgets.Layout(width="50%", height="30px"), description = 'Enter min_df: ', placeholder = '% or Integer or None', value = '0.01') max_df_widget = widgets.Text(style = style, layout = widgets.Layout(width="50%", height="30px"), description = 'Enter max_df: ', placeholder = '% or Integer or None', value = '0.9') display(min_df_widget) display(max_df_widget) try: min_df_value = float(min_df_widget.value) max_df_value = float(max_df_widget.value) except Exception as e: print('Error: ' + str(e)) traceback.print_exc() def tfidfScores(df, tokenList = None): def identity_tokenizer(text): return text vectorizer = TfidfVectorizer(tokenizer=identity_tokenizer, lowercase=False, min_df = min_df_value, max_df = max_df_value) vectors = vectorizer.fit_transform(tokenList) feature_names = vectorizer.get_feature_names() dense = vectors.todense() denselist = dense.tolist() df_scores = pd.DataFrame(denselist, columns = feature_names) multicol1 = pd.MultiIndex.from_tuples([('weight', str(j)) for j in df_scores.columns]) df_multiIndex = pd.DataFrame([list(df_scores.iloc[i]) for i in range(0, len(df_scores))], index=[df['Ip Address']], columns=multicol1) return df_multiIndex ``` ### Approach I: Compute TFIDF scores for split request URIs in the blocked WAF Alerts against client IP entities ``` query_URIs = ''' AzureDiagnostics | where TimeGenerated between (datetime({startTime}).. datetime({endTime})) | where Category == "ApplicationGatewayFirewallLog" | where hostname_s startswith "{host}" | where action_s == 'Blocked' or isempty(action_s) | distinct clientIp_s, requestUri_s | summarize make_list(requestUri_s) by clientIp_s '''.format(startTime = query_times_3.start, endTime = query_times_3.end, host = hosts_2.value) try: df_URIs = showQuery(query_URIs) df_URIs.rename(columns = {'clientIp_s':'Ip Address', 'list_requestUri_s': 'RequestUris'}, inplace = True) viewData_splitUri = df_URIs.copy() maskBits = maskBitsVal(len(viewData_splitUri['Ip Address'].unique())) viewData_splitUri['Ip Address'] = viewData_splitUri['Ip Address'].apply(lambda x: ipaddress.IPv4Network(x + maskBits, strict = False)) viewData_splitUri.groupby(["Ip Address"], as_index=False).agg({'RequestUris': list}) tokenList = [] for index, row in viewData_splitUri.iterrows(): splitUris = re.split('/', ''.join(row['RequestUris'])) tokenList = tokenList + [splitUris] df_splitUri_tfidf = tfidfScores(viewData_splitUri, tokenList) except Exception as e: print('Error: ' + str(e)) traceback.print_exc() ``` ### Approach II: Computer TFIDF scores for volume of blocked WAF alerts for Rule Ids against the client IP entities ``` query_RuleIds = ''' AzureDiagnostics | where TimeGenerated between (datetime({startTime}).. datetime({endTime})) | where Category == "ApplicationGatewayFirewallLog" | where hostname_s startswith "{host}" | where action_s == 'Blocked' | summarize alertCount = count(), make_set(requestUri_s) by clientIp_s, ruleId_s '''.format(startTime = query_times_3.start, endTime = query_times_3.end, host = hosts_2.value) try: dfPrac = showQuery(query_RuleIds) df_RuleIds = showQuery(query_RuleIds) df_RuleIds.rename(columns = {'clientIp_s':'Ip Address', 'ruleId_s':'RuleId', 'set_requestUri_s': 'RequestUris'}, inplace = True) maskBits = maskBitsVal(len(df_RuleIds['Ip Address'].unique())) df_RuleIds['Ip Address'] = df_RuleIds['Ip Address'].apply(lambda x: ipaddress.IPv4Network(x + maskBits, strict = False)) viewData_ruleId = df_RuleIds.groupby(["Ip Address"], as_index=False).agg({'RuleId': list, 'alertCount': list, 'RequestUris': list}) tokenList = [sum([[s] * n for s, n in zip(viewData_ruleId['RuleId'][x], viewData_ruleId['alertCount'][x])], []) for x in range(0, len(viewData_ruleId))] df_ruleId_tfidf = tfidfScores(viewData_ruleId, tokenList) except Exception as e: print('Error: ' + e) traceback.print_exc() ``` ### Visualisation of the TFIDF scores for both approaches We will be using balloon plots to view the TFIDF scores for the two approaches ``` options = ['RuleId', 'SplitUris'] def visualiseTFIDF(TfidfCategory): try: max_category = 30 df = pd.DataFrame() if TfidfCategory == 'RuleId': df = df_ruleId_tfidf.copy() else: df = df_splitUri_tfidf.copy() df_tfidf = df.iloc[:, : max_category].stack().reset_index(drop = False).rename(columns = {'level_1':TfidfCategory, 'weight':'tfidf'}) df_tfidf['Ip Address'] = 'Ip ' + df_tfidf['Ip Address'].astype(str) if 'RuleId' == TfidfCategory: df_tfidf['RuleId'] = 'rID ' + df_tfidf['RuleId'].astype(str) else: df_tfidf['SplitUris'] = df_tfidf['SplitUris'].apply(lambda x: (x[0:20]+ '...') if len(x)> 20 else x) fig = px.scatter(df_tfidf, x = df_tfidf[TfidfCategory], y = df_tfidf['Ip Address'], size= np.log(1 + df_tfidf['tfidf']), color = df_tfidf['tfidf'], hover_data=[df_tfidf['tfidf']]) fig.update_layout(height = max(800, 20 * len(set(df_tfidf[TfidfCategory]))), title_text= 'TFIDF distribution of ' + TfidfCategory + ' against client IPs', width = 1700) fig.show() except Exception as e: print('Error: ' + e) traceback.print_exc() TfidfCategory = widgets.Select(options = options, style = style, layout = layout, description = 'TFIDF approach: ') display(TfidfCategory) visualiseTFIDF(TfidfCategory = TfidfCategory.value) ``` ### DBSCAN Clustering and PCA of the request URIs for both approaches DBSCAN is a non-parametric density-based spatial clustering algorithm, which groups together points that are "closely packed" together. Points which lie in low density regions are marked as outliers. For more information, please see [here](https://www.aaai.org/Papers/KDD/1996/KDD96-037.pdf). We use DBScan on our data in order to aggregate request URIs which are similar to each other, and surface unusual request URIs as outliers. The clustering uses the Tfidf scores data obtained for the rule ID and split URIs approaches respectively. Select the eps and min_samples value for DBScan and n_components value for PCA below. More information about these parameters can be found [here](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.dbscan.html) and [here](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html). **DBScan:** *eps value:* Eps value is a measure of the distance below which two points are considered neighbors. *min_samples:* The minimum number of neighbors that a point should have in order to be classified as a core point. The core point is included in the min_samples count. **PCA:** PCA is a dimensionality reduction technique that compresses the multivariate data into principal components, which describe most of the variation in the original dataset. In our case, we are able to better visualise the clubbing of similar and outlier request URIs by visualising the first two Principal components. *n_components:* Number of principal components ``` eps_widget = widgets.Text(style = style, layout = widgets.Layout(width="50%", height="30px"), description = 'DBSCAN - Enter eps value', value = '0.4') min_samples_widget = widgets.IntSlider(style = style, layout = widgets.Layout(width="50%", height="30px"), description='DBSCAN - Enter min samples', start=1, end=15, step=1, value=5) n_components_widget = widgets.IntSlider(style = style, layout = widgets.Layout(width="50%", height="30px"), description='PCA - Enter n_components', start=1, end=15, step=1, value=2) display(eps_widget) display(min_samples_widget) display(n_components_widget) def db_scan_clustering(data, eps = float(eps_widget.value)): dbscan = DBSCAN(eps=eps, min_samples = int(min_samples.value)) dbscan.fit(data) return dbscan.labels_ def principal_component_analysis(data, eps = float(eps_widget.value)): while True: try: pca = PCA(n_components=int(n_components_widget.value)) pca.fit(data) x_pca = pca.transform(data) break except: continue clusters = db_scan_clustering(data.values, eps) label = list(range(0, len(data), 1)) plt.figure(figsize=(20,15)) scatter = plt.scatter(x_pca[:,0],x_pca[:,1],c = clusters,cmap='rainbow') handles, labels = scatter.legend_elements(prop="colors", alpha=0.6) plt.legend(handles, labels, loc="upper right", title="Clusters") n = list(range(0, len(x_pca[:,0]), 1)) texts = [] for i, txt in enumerate(n): texts.append(plt.text(x_pca[:,0][i], x_pca[:,1][i], txt)) adjust_text(texts) plt.show() options1 = ['RuleId', 'SplitUris'] def viewPCA(tfidfCategory): df = df_splitUri_tfidf.copy() viewData = viewData_splitUri.copy() if tfidfCategory == 'RuleId': df = df_ruleId_tfidf.copy() viewData = viewData_ruleId.copy() print(tfidfCategory + ' approach (Outliers + Clustered request URI data): \n') while True: try: principal_component_analysis(df) break except: continue print(color.BOLD + 'Principal Component Analysis \n' + color.END) tfidfCategory = widgets.Select(options = options1, style = style, layout = layout, description = 'TFIDF approach: ') display(tfidfCategory) viewPCA(tfidfCategory = tfidfCategory.value) options1 = ['RuleId', 'SplitUris'] options2 = ['Outlier', 'Clustered'] def viewClusters(tfidfCategory, requestURIs): try: df = df_splitUri_tfidf.copy() viewData = viewData_splitUri.copy() if tfidfCategory == 'RuleId': df = df_ruleId_tfidf.copy() viewData = viewData_ruleId.copy() clusters = db_scan_clustering(df.values) print(requestURIs + ' URIs for ' + tfidfCategory+ ': \n') clusterList = list(set(clusters)) try: clusterList.remove(-1) except: print() if requestURIs == 'Outlier': clusterList = [-1] if clusterList: for k in clusterList: print('Cluster ' + str(k)) display(viewData[viewData['Ip Address'].isin(df.index.get_level_values(0)[clusters == k])]) else: print('No Data') except Exception as e: print('Error: ' + e) traceback.print_exc() print(color.BOLD + 'DBScan Clustering of the Request URIs \n' + color.END) tfidfCategory = widgets.Select(options = options1, style = style, layout = layout, description = 'TFIDF approach: ') requestURIs = widgets.Select(options = options2, style = style, layout = layout, description = 'Request URIs: ') display(tfidfCategory) display(requestURIs) viewClusters, tfidfCategory = widgets.Select(options = options1, style = style, layout = layout, description = 'TFIDF approach: '), requestURIs = widgets.Select(options = options2, style = style, layout = layout, description = 'Request URIs: ') )) ``` ### Kusto query to further examine the WAF logs and blocked alerts in the time frames with outlier request URIs ``` ipAddress = widgets.Text(style = style, layout = widgets.Layout(width="50%", height="30px"), description = 'IP address: ', placeholder = 'Enter masked IP address from the results above. Include masking bits.') requestURI = widgets.Text(style = style, layout = widgets.Layout(width="50%", height="30px"), description = 'Request URI: ', placeholder = 'Enter request URI from the results above') print(color.BOLD + '\nStart time: ' + color.END + str(query_times_3.start) + '\n') print(color.BOLD + 'End time: ' + color.END + str(query_times_3.end) + '\n') display(ipAddress) display(requestURI) try: pd.set_option('display.max_colwidth', 20) kql_query = ''' AzureDiagnostics | where TimeGenerated between (datetime({startTime}).. datetime({endTime})) | where Category == "{category}" | where {hostname} startswith "{host}" | where action_s == 'Blocked' or isempty(action_s) | where {ip} startswith "{ipaddress}" | extend originalRequestUriWithArgs_s = column_ifexists("originalRequestUriWithArgs_s", "") | where requestUri_s contains {uri} or originalRequestUriWithArgs_s contains {uri} | take 10 ''' cutOff = [1, 2, 3, 4] intlist = [8, 16, 24, 32] if ipAddress.value != '': ipaddress = str(ipAddress.value).strip().split('/')[0] maskBits = int(str(ipAddress.value).strip().split('/')[1]) ipaddress = '.'.join(ipaddress.split('.')[0:cutOff[intlist.index(maskBits)]]) else: ipaddress = '' print(color.BOLD + '\nStart time: ' + color.END + str(query_times_3.start) + '\n') print(color.BOLD + 'End time: '+ color.END + str(query_times_3.end) + '\n') print(color.BOLD + 'Ip Address entered: ' + color.END + str(ipAddress.value) + '\n') print(color.BOLD + 'Request Uri entered: ' + color.END + str((requestURI.value).strip()) + '\n' ) category = 'ApplicationGatewayAccessLog' ip_var = 'clientIP_s' host_var = 'host_s' uri = '\'' + (requestURI.value).strip() + '\'' kql_accessLogs = kql_query.format(hostname = host_var, startTime = query_times_3.start, endTime = query_times_3.end, host = hosts_2.value, category = category, ip = ip_var, ipaddress = ipaddress, uri = uri) df_rawAccessKustoQuery = showQuery(kql_accessLogs) print(category + ' (Raw) Data- \n') display(df_rawAccessKustoQuery.head(10)) category = 'ApplicationGatewayFirewallLog' ip_var = 'clientIp_s' host_var = 'hostname_s' uri = '@' + '\'' + (requestURI.value).strip() + '\'' kql_firewallLogs = kql_query.format(hostname = host_var, startTime = query_times_3.start, endTime = query_times_3.end, host = hosts_2.value, category = category, ip = ip_var, ipaddress = ipaddress, uri = uri,) df_rawFirewallKustoQuery = showQuery(kql_firewallLogs) print(category + ' (Alert) Data- \n') display(df_rawFirewallKustoQuery.head(10)) pd.reset_option('max_colwidth') except Exception as e: print('Error: ' + str(e)) traceback.print_exc() ```
github_jupyter
``` # Comparing fiTQun's results with the fully supervised ResNet-18 classifier on the varying position dataset # Naming convention: first particle type is which file it is from, last particletype is what the hypothesis is ## Imports import sys import os import time import math import random import pdb import h5py # Add the path to the parent directory to augment search for module par_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) if par_dir not in sys.path: sys.path.append(par_dir) # Plotting import import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd # Import the utils for plotting the metrics from plot_utils import plot_utils from plot_utils import notebook_utils_2 from sklearn.metrics import roc_curve, auc # Dictionary mapping the ordinal labels to particle types LABEL_DICT = {0:"gamma", 1:"e", 2:"mu"} softmax_index_dict = {value:key for key, value in LABEL_DICT.items()} label_0 = "e" label_1 = "mu" min_energy = 0 max_energy = 1000 # Fix the colour scheme for each particle type COLOR_DICT = {"gamma":"red", "e":"blue", "mu":"green"} def moving_average(a, n=3) : ret = np.cumsum(a, dtype=float) ret[n:] = ret[n:] - ret[:-n] return ret[n - 1:] / n # Plot multiple ROC curves on the same figure def plot_multiple_ROC(fprs, tprs, thresholds, label_0, label_1, lbound, ubound, interval): min_energy = 0 max_energy = 1000 fig, ax = plt.subplots(figsize=(16,9),facecolor="w") ax.tick_params(axis="both", labelsize=20) model_colors = [np.random.rand(3,) for i in fprs] for j in np.arange(len(fprs)): fpr = fprs[j] tpr = tprs[j] threshold = thresholds[j] roc_auc = auc(fpr, tpr) inv_fpr = [] for i in fpr: inv_fpr.append(1/i) if i != 0 else inv_fpr.append(1/1e-5) tnr = 1. - fpr # TNR vs TPR plot ax.plot(tpr, inv_fpr, color=model_colors[j], label=r"Interval ${1:0.3f}$: $\{0}$, AUC ${1:0.3f}$".format((j+1),label_0, roc_auc) if label_0 is not "e" else r"${0}$, AUC ${1:0.3f}$".format(label_0, roc_auc), linewidth=1.0, marker=".", markersize=4.0, markerfacecolor=model_colors[j]) # Show coords of individual points near x = 0.2, 0.5, 0.8 todo = {0.2: True, 0.5: True, 0.8: True} for xy in zip(tpr, inv_fpr, tnr): xy = (round(xy[0], 4), round(xy[1], 4), round(xy[2], 4)) xy_plot = (round(xy[0], 4), round(xy[1], 4)) for point in todo.keys(): if xy[0] >= point and todo[point]: #ax.annotate('(%s, %s, %s)' % xy, xy=xy_plot, textcoords='data', fontsize=18, bbox=dict(boxstyle="square", fc="w")) todo[point] = False ax.grid(True, which='both', color='grey') xlabel = r"$\{0}$ signal efficiency".format(label_0) if label_0 is not "e" else r"${0}$ signal efficiency".format(label_0) ylabel = r"$\{0}$ background rejection".format(label_1) if label_1 is not "e" else r"${0}$ background rejection".format(label_1) ax.set_xlabel(xlabel, fontsize=20) ax.set_ylabel(ylabel, fontsize=20) ax.set_title(r"${0} \leq E < {1}$".format(round(lbound,2), round(ubound,2)), fontsize=20) ax.legend(loc="upper right", prop={"size":20}) plt.margins(0.1) plt.yscale("log") plt.savefig(('/home/ttuinstr/VAE/Comparing_ResNet_and_FiTQun/ROC_' + str(interval) + '.png'), bbox_inches='tight') plt.show() plt.clf() # Clear the current figure plt.close() # Close the opened window return fpr, tpr, threshold, roc_auc def plot_rej_energy(fprs, tprs, thresholds, label_0, label_1, lbound, ubound, interval, efficiency, bins, colour): min_energy = 0 max_energy = 1000 fig, ax = plt.subplots(figsize=(16,9),facecolor="w") ax.tick_params(axis="both", labelsize=20) model_colors = [np.random.rand(3,) for i in fprs] eff_invfpr = np.array([]) error = [] for j in np.arange(len(fprs)): fpr = fprs[j] tpr = tprs[j] threshold = thresholds[j] roc_auc = auc(fpr, tpr) inv_fpr = np.array([]) for i in fpr: if i != 0: inv_fpr = np.append(inv_fpr, (1/i)) else: inv_fpr = np.append(inv_fpr, (1/1e-5)) tnr = 1. - fpr eff_index = np.where(np.around(tpr, decimals=2) == 0.8)[0].astype(int) eff_invfpr = np.append(eff_invfpr, inv_fpr[eff_index].mean()) error.append(inv_fpr[eff_index].std()) # TNR vs Energy bin plot label = 0 ax.bar(bins, height=eff_invfpr, yerr=error, width=interval, color=colour, align='edge') ax.legend() ax.set_ylabel("Background rejection", fontsize=20) plt.xlabel("Energy (MeV)", fontsize=20) #plt.ylim(top=10**5) ax.set_title("Rejection vs. Energy Level at " + str(efficiency) + " Efficiency", fontsize=20) plt.yscale("log") plt.savefig(('/home/ttuinstr/VAE/Comparing_ResNet_and_FiTQun/RejectionEnergyBinning_' + str(efficiency) + "_" + str(interval) + '.png'), bbox_inches='tight') plt.show() plt.clf() # Clear the current figure plt.close() # Close the opened window return eff_invfpr, threshold, roc_auc # Get original h5 file info # Import test events from h5 file filtered_index = "/fast_scratch/WatChMaL/data/IWCD_fulltank_300_pe_idxs.npz" filtered_indices = np.load(filtered_index, allow_pickle=True) test_filtered_indices = filtered_indices['test_idxs'] print(test_filtered_indices.shape) original_data_path = "/data/WatChMaL/data/IWCDmPMT_4pi_fulltank_9M.h5" f = h5py.File(original_data_path, "r") hdf5_event_data = (f["event_data"]) original_eventdata = np.memmap(original_data_path, mode="r", shape=hdf5_event_data.shape, offset=hdf5_event_data.id.get_offset(), dtype=hdf5_event_data.dtype) original_eventids = np.array(f['event_ids']) original_rootfiles = np.array(f['root_files']) original_energies = np.array(f['energies']) original_positions = np.array(f['positions']) original_angles = np.array(f['angles']) original_labels = np.array(f['labels']) #filtered_eventdata = original_eventdata[test_filtered_indices] filtered_eventids = original_eventids[test_filtered_indices] filtered_rootfiles = original_rootfiles[test_filtered_indices] filtered_energies = original_energies[test_filtered_indices] filtered_positions = original_positions[test_filtered_indices] filtered_angles = original_angles[test_filtered_indices] filtered_labels = original_labels[test_filtered_indices] # Map ResNet results to fiTQun events # Map ResNet results to original h5 file events mapping_indices = np.load('/home/ttuinstr/VAE/Comparing_ResNet_and_FiTQun/map_indices_resnet.npz') mapping_indices = mapping_indices['arr_0'].astype(int) res_softmaxes = np.load('/home/ttuinstr/VAE/Comparing_ResNet_and_FiTQun/resnet_softmaxes.npz') res_softmaxes = res_softmaxes['arr_0'] res_energies = np.load('/home/ttuinstr/VAE/Comparing_ResNet_and_FiTQun/resnet_energies.npz') res_energies = res_energies['arr_0'] res_predictedlabels = np.load('/home/ttuinstr/VAE/Comparing_ResNet_and_FiTQun/resnet_predictedlabels.npz') res_predictedlabels = res_predictedlabels['arr_0'] res_softmaxes = res_softmaxes[mapping_indices] res_energies = res_energies[mapping_indices] res_predictedlabels = res_predictedlabels[mapping_indices] # Separate event types e_test_indices = np.load('/home/ttuinstr/VAE/Comparing_ResNet_and_FiTQun/test_indices_e.npz') e_test_indices = e_test_indices['arr_0'].astype(int) mu_test_indices = np.load('/home/ttuinstr/VAE/Comparing_ResNet_and_FiTQun/test_indices_mu.npz') mu_test_indices = mu_test_indices['arr_0'].astype(int) gamma_test_indices = np.load('/home/ttuinstr/VAE/Comparing_ResNet_and_FiTQun/test_indices_gamma.npz') gamma_test_indices = gamma_test_indices['arr_0'].astype(int) e_predictedlabels = res_predictedlabels[e_test_indices] mu_predictedlabels = res_predictedlabels[mu_test_indices] gamma_predictedlabels = res_predictedlabels[gamma_test_indices] e_softmaxes = res_softmaxes[e_test_indices] mu_softmaxes = res_softmaxes[mu_test_indices] gamma_softmaxes = res_softmaxes[gamma_test_indices] e_labels = filtered_labels[e_test_indices] mu_labels = filtered_labels[mu_test_indices] gamma_labels = filtered_labels[gamma_test_indices] e_rootfiles = filtered_rootfiles[e_test_indices] mu_rootfiles = filtered_rootfiles[mu_test_indices] gamma_rootfiles = filtered_rootfiles[gamma_test_indices] e_eventids = filtered_eventids[e_test_indices] mu_eventids = filtered_eventids[mu_test_indices] gamma_eventids = filtered_eventids[gamma_test_indices] e_positions = filtered_positions[e_test_indices] mu_positions = filtered_positions[mu_test_indices] gamma_positions = filtered_positions[gamma_test_indices] e_angles = filtered_angles[e_test_indices] mu_angles = filtered_angles[mu_test_indices] gamma_angles = filtered_angles[gamma_test_indices] e_energies = filtered_energies[e_test_indices] mu_energies = filtered_energies[mu_test_indices] gamma_energies = filtered_energies[gamma_test_indices] # Match events in event types to fiTQun results e_map_indices = np.load('/home/ttuinstr/VAE/Comparing_ResNet_and_FiTQun/map_indices_e_all.npz') e_map_indices = e_map_indices['arr_0'].astype(int) mu_map_indices = np.load('/home/ttuinstr/VAE/Comparing_ResNet_and_FiTQun/map_indices_mu_all.npz') mu_map_indices = mu_map_indices['arr_0'].astype(int) gamma_map_indices = np.load('/home/ttuinstr/VAE/Comparing_ResNet_and_FiTQun/map_indices_gamma_all.npz') gamma_map_indices = gamma_map_indices['arr_0'].astype(int) #e_eventdata = np.sum(original_eventdata[test_filtered_indices][e_test_indices][e_map_indices] > 0, (1,2,3)) #mu_eventdata = np.sum(original_eventdata[test_filtered_indices][mu_test_indices][mu_map_indices] > 0, (1,2,3)) #gamma_eventdata = np.sum(original_eventdata[test_filtered_indices][gamma_test_indices][gamma_map_indices] > 0, (1,2,3)) e_predictedlabels = e_predictedlabels[e_map_indices] mu_predictedlabels = mu_predictedlabels[mu_map_indices] gamma_predictedlabels = gamma_predictedlabels[gamma_map_indices] e_softmaxes = e_softmaxes[e_map_indices] mu_softmaxes = mu_softmaxes[mu_map_indices] gamma_softmaxes = gamma_softmaxes[gamma_map_indices] e_labels = e_labels[e_map_indices] mu_labels = mu_labels[mu_map_indices] gamma_labels = gamma_labels[gamma_map_indices] e_positions = e_positions[e_map_indices] mu_positions = mu_positions[mu_map_indices] gamma_positions = gamma_positions[gamma_map_indices] e_angles = e_angles[e_map_indices] mu_angles = mu_angles[mu_map_indices] gamma_angles = gamma_angles[gamma_map_indices] e_energies = e_energies[e_map_indices] mu_energies = mu_energies[mu_map_indices] gamma_energies = gamma_energies[gamma_map_indices] # For making ROC curves e_softmax_0 = e_softmaxes[e_labels==softmax_index_dict[label_0]] mu_softmax_0 = mu_softmaxes[mu_labels==softmax_index_dict[label_0]] gamma_softmax_0 = gamma_softmaxes[gamma_labels==softmax_index_dict[label_0]] e_labels_0 = e_labels[e_labels==softmax_index_dict[label_0]] mu_labels_0 = mu_labels[mu_labels==softmax_index_dict[label_0]] gamma_labels_0 = gamma_labels[gamma_labels==softmax_index_dict[label_0]] e_softmax_1 = e_softmaxes[e_labels==softmax_index_dict[label_1]] mu_softmax_1 = mu_softmaxes[mu_labels==softmax_index_dict[label_1]] gamma_softmax_1 = gamma_softmaxes[gamma_labels==softmax_index_dict[label_1]] e_labels_1 = e_labels[e_labels==softmax_index_dict[label_1]] mu_labels_1 = mu_labels[mu_labels==softmax_index_dict[label_1]] gamma_labels_1 = gamma_labels[gamma_labels==softmax_index_dict[label_1]] total_softmax = np.concatenate((e_softmax_0, e_softmax_1, mu_softmax_0, mu_softmax_1), axis=0) total_labels = np.concatenate((e_labels_0, e_labels_1, mu_labels_0, mu_labels_1), axis=0) e_positions_0 = e_positions[e_labels==softmax_index_dict[label_0]] mu_positions_0 = mu_positions[mu_labels==softmax_index_dict[label_0]] gamma_positions_0 = gamma_positions[gamma_labels==softmax_index_dict[label_0]] e_positions_1 = e_positions[e_labels==softmax_index_dict[label_1]] mu_positions_1 = mu_positions[mu_labels==softmax_index_dict[label_1]] gamma_positions_1 = gamma_positions[gamma_labels==softmax_index_dict[label_1]] total_positions = np.concatenate((e_positions_0, e_positions_1, mu_positions_0, mu_positions_1), axis=0) e_energies_0 = e_energies[e_labels==softmax_index_dict[label_0]] mu_energies_0 = mu_energies[mu_labels==softmax_index_dict[label_0]] gamma_energies_0 = gamma_energies[gamma_labels==softmax_index_dict[label_0]] e_energies_1 = e_energies[e_labels==softmax_index_dict[label_1]] mu_energies_1 = mu_energies[mu_labels==softmax_index_dict[label_1]] gamma_energies_1 = gamma_energies[gamma_labels==softmax_index_dict[label_1]] total_energies = np.concatenate((e_energies_0, e_energies_1, mu_energies_0, mu_energies_1), axis=0) res_fpr, res_tpr, res_threshold = roc_curve(total_labels, total_softmax[:,softmax_index_dict[label_0]], pos_label=softmax_index_dict[label_0]) # Take slices of events based on interval size for energy - MUONS energy_slices = [] labels_slices = [] softmax_slices = [] interval = 50 print(int(1000/interval)) for i in np.arange(int(1000/interval)): lb = i*interval ub = (i+1)*interval slice_indices = np.where(((total_energies >lb) & (total_energies <ub)))[0] energy_slices.append(total_energies[slice_indices]) softmax_slices.append(total_softmax[slice_indices]) labels_slices.append(total_labels[slice_indices]) # Make and plot rejection vs. energy bin with fixed efficiency efficiency = 0.8 fprs = [] tprs = [] thresholds = [] for i in np.arange(len(labels_slices)): fpr, tpr, threshold = roc_curve(labels_slices[i], softmax_slices[i][:,softmax_index_dict[label_0]], pos_label=softmax_index_dict[label_0]) fprs.append(fpr) tprs.append(tpr) thresholds.append(threshold) # Set up the bins for the histogram bins = [] for i in np.arange(len(fprs)): bins.append(i*interval) print(len(bins)) print(bins) curve_metrics = plot_rej_energy(fprs, tprs, thresholds, "e", "mu", 0, 1000, 50, efficiency, bins, "green") ```
github_jupyter
## Conditional Independence Two random variable $X$ and $Y$ are conditiaonly independent given $Z$, denoted by $X \perp \!\! \perp Y \mid Z$ if $$p_{X,Y\mid Z} (x,y\mid z) = p_{X\mid Z}(x\mid z) \, p_{Y\mid Z}(y\mid z)$$ In general Marginal independence doesn't imply conditional independence and vice versa. ### Example R: Red Sox Game <br> A: Accident <br> T: Bad Traffic <img src="..\images\conditional_idependence_example.png" rel="drawing" width=400 > Find the following probability (a) $\mathbb{P}(R=1) = 0.5$ (b) $\mathbb{P}(R=1 \mid T=1)$ $$\begin{align}p_{R,A}(r,a\mid 1) &= \frac{p_{T\mid R,A}(1 \mid r, a)\, p_R(r) \, p_A(a)}{p_T(1)}\\ &= c\cdot p_{T\mid R,A}(1 \mid r, a)\end{align}$$ (c) $\mathbb{P}(R=1 \mid T=1, A=1)= \mathbb{P}(R=1 \mid T=1)$ ### Practice Problem: Conditional Independence Suppose $X_0, \dots , X_{100}$ are random variables whose joint distribution has the following factorization: $$p_{X_0, \dots , X_{100}}(x_0, \dots , x_{100}) = p_{X_0}(x_0) \cdot \prod _{i=1}^{100} p_{X_ i | X_{i-1}}(x_ i | x_{i-1})$$ This factorization is what's called a Markov chain. We'll be seeing Markov chains a lot more later on in the course. Show that $X_{50} \perp \!\! \perp X_{52} \mid X_{51}$. **Answer:** $$ \begin{eqnarray} p_{X_{50},X_{51},X_{52}}(x_{50},x_{51},x_{52}) &=& \sum_{x_{0} \dots x_{49}} \sum_{x_{53} \dots x_{100}} p_{X_0, \dots , X_{100}}(x_0, \dots , x_{100}) \\ &=& \sum_{x_{0} \dots x_{49}} \sum_{x_{53} \dots x_{100}} \left[p_{X_0}(x_{0}) \prod_{i=0}^{50} p_{X_i\mid X_{i-1}}(x_{i}|x_{i-1})\right] \\ && \cdot \prod_{i=51}^{52} p_{X_i\mid X_{i-1}}(x_{i}|x_{i-1})\cdot \prod_{i=53}^{100} p_{X_i\mid X_{i-1}}(x_{i}|x_{i-1}) \\ &=& \underbrace{\sum_{x_{0} \dots x_{49}} \left[p_{X_0}(x_{0}) \prod_{i=0}^{50} p_{X_i\mid X_{i-1}}(x_{i}|x_{i-1})\right]}_{=p_{X_{50}}(x_{50})} \\ && \cdot \prod_{i=51}^{52} p_{X_i\mid X_{i-1}}(x_{i}|x_{i-1})\cdot \underbrace{\sum_{x_{53} \dots x_{100}}\prod_{i=53}^{100} p_{X_i\mid X_{i-1}}(x_{i}|x_{i-1})}_{=1} \\[2ex] &=& p_{X_{50}}(x_{50}) \cdot p_{X_{51}\mid X_{50}}(x_{51}|x_{50}) \cdot p_{X_{52}\mid X_{51}}(x_{52}|x_{51}) \\[2ex] &=& p_{X_{50}\mid X_{51}}(x_{50}|x_{51}) \cdot p_{X_{52}\mid X_{51}}(x_{52}|x_{51}) \\[2ex] \frac{p_{X_{50},X_{51},X_{52}}(x_{50},x_{51},x_{52})}{p_{X_{51}}(x_{51})} &=& p_{X_{50}\mid X_{51}}(x_{50}|x_{51}) \cdot p_{X_{52}\mid X_{51}}(x_{52}|x_{51}) \\[2ex] p_{X_{50},X_{52}\mid X_{51}}(x_{50},x_{52}\mid x_{51}) &=& p_{X_{50}\mid X_{51}}(x_{50}|x_{51}) \cdot p_{X_{52}\mid X_{51}}(x_{52}|x_{51}) \end{eqnarray} $$
github_jupyter
``` #default_exp eda #hide import transformers import torch import torch.nn as nn import torch.optim as optim import pandas as pd import Hasoc.config as config import Hasoc.utils.utils as utils import Hasoc.utils.engine as engine import Hasoc.model.model as model import Hasoc.dataset.dataset as dataset from functools import partial from sklearn.metrics import f1_score from sklearn.preprocessing import LabelEncoder from transformers import AdamW, get_linear_schedule_with_warmup #hide df = pd.read_csv(config.DATA_PATH/'fold_df.csv') #hide df.head(2) #hide le = LabelEncoder() le.fit_transform(df.task2) le.classes_ #hide df['task2_encoded'] = le.transform(df.task2.values) #hide d = dataset.BertDataset(df.text.values, df.task2_encoded.values) #hide d[0] #hide train_dl = utils.create_loader(df.text.values, df.task2_encoded, bs=config.TRAIN_BATCH_SIZE) valid_dl = utils.create_loader(df.text.values, df.task2_encoded, bs=config.VALID_BATCH_SIZE) train_dl.batch_size #hide next(iter(train_dl)) #hide modeller = model.HasocModel(len(le.classes_)) #hide model_params = list(modeller.named_parameters()) #hide # we don't want weight decay for these no_decay = ['bias', 'LayerNorm.weight', 'LayerNorm.bias'] optimizer_params = [ {'params': [p for n, p in model_params if n not in no_decay], 'weight_decay':0.001}, # no weight decay should be applied {'params': [p for n, p in model_params if n in no_decay], 'weight_decay':0.0} ] #hide lr = config.LR #hide optimizer = AdamW(optimizer_params, lr=lr) #hide num_train_steps = int(len(df) / config.TRAIN_BATCH_SIZE * config.NUM_EPOCHS) #hide scheduler = get_linear_schedule_with_warmup(optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_train_steps) #hide fit = engine.BertFitter(modeller, (train_dl, valid_dl), optimizer, nn.CrossEntropyLoss(), partial(f1_score, average='macro'), config.DEVICE, scheduler=scheduler) #hide fit.fit(config.NUM_EPOCHS, model_path=None) #hide b = next(iter(train_dl)) targs = b.pop('targets') #hide with torch.no_grad(): out = modeller(**b) #hide targs.shape, out.shape #hide (out.softmax(dim=-1)>=0.5), targs out.softmax(dim=-1) out.T.shape out.softmax(dim=-1).max(1)[0].T.expand((*out.T.shape)).T (out.softmax(dim=-1) >= out.softmax(dim=-1).max(1)[0].T.expand((*out.T.shape)).T) (out.softmax(dim=-1)>=0.5) targs.shape #hide f1_score(targs, (out.softmax(dim=-1) >= out.softmax(dim=-1).max(1)[0].T.expand((*out.T.shape)).T), average='macro') #hide f1_score(targs.argmax(dim=-1), out.argmax(dim=-1), average='macro') #hide f1_score(targs, (out.softmax(dim=-1)>=0.5), average='macro') import torch.nn.functional as F class LabelSmoothingCrossEntropy(nn.Module): #from fastai y_int = True def __init__(self, eps:float=0.1, reduction='mean'): super().__init__() self.eps,self.reduction = eps,reduction def forward(self, output, target): c = output.size()[-1] log_preds = F.log_softmax(output, dim=-1) if self.reduction=='sum': loss = -log_preds.sum() else: loss = -log_preds.sum(dim=-1) #We divide by that size at the return line so sum and not mean if self.reduction=='mean': loss = loss.mean() return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target.long(), reduction=self.reduction) # def activation(self, out): return F.softmax(out, dim=-1) # def decodes(self, out): return out.argmax(dim=-1) nn.CrossEntropyLoss()(out, targs.argmax(dim=-1)) LabelSmoothingCrossEntropy()(out, targs.argmax(dim=-1)) ```
github_jupyter
# Topic Modeling wiht Latent Semantic Analysis Latent Semantic Analysis (LSA) is a method for reducing the dimnesionality of documents treated as a bag of words. It is used for document classification, clustering and retrieval. For example, LSA can be used to search for prior art given a new patent application. In this homework, we will implement a small library for simple latent semantic analysis as a practical example of the application of SVD. The ideas are very similar to PCA. We will implement a toy example of LSA to get familiar with the ideas. If you want to use LSA or similar methods for statiscal language analyis, the most efficient Python library is probably [gensim](https://radimrehurek.com/gensim/) - this also provides an online algorithm - i.e. the training information can be continuously updated. Other useful functions for processing natural language can be found in the [Natural Lnaguage Toolkit](http://www.nltk.org/). **Note**: The SVD from scipy.linalg performs a full decomposition, which is inefficient since we only need to decompose until we get the first k singluar values. If the SVD from `scipy.linalg` is too slow, please use the `sparsesvd` function from the [sparsesvd](https://pypi.python.org/pypi/sparsesvd/) package to perform SVD instead. You can install in the usual way with ``` !pip install sparsesvd ``` Then import the following ```python from sparsesvd import sparsesvd from scipy.sparse import csc_matrix ``` and use as follows ```python sparsesvd(csc_matrix(M), k=10) ``` ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import pandas as pd import scipy.linalg as la import scipy.stats as st ! pip install sparsesvd ``` **Exercise 1 (10 points)**. Calculating pairwise distance matrices. Suppose we want to construct a distance matrix between the rows of a matrix. For example, given the matrix ```python M = np.array([[1,2,3],[4,5,6]]) ``` the distance matrix using Euclidean distance as the measure would be ```python [[ 0.000 1.414 2.828] [ 1.414 0.000 1.414] [ 2.828 1.414 0.000]] ``` if $M$ was a collection of column vectors. Write a function to calculate the pairwise-distance matrix given the matrix $M$ and some arbitrary distance function. Your functions should have the following signature: ``` def func_name(M, distance_func): pass ``` 0. Write a distance function for the Euclidean, squared Euclidean and cosine measures. 1. Write the function using looping for M as a collection of row vectors. 2. Write the function using looping for M as a collection of column vectors. 3. Wrtie the function using broadcasting for M as a colleciton of row vectors. 4. Write the function using broadcasting for M as a colleciton of column vectors. For 3 and 4, try to avoid using transposition (but if you get stuck, there will be no penalty for using transpoition). Check that all four functions give the same result when applied to the given matrix $M$. **Exercise 2 (20 points)**. Write 3 functions to calculate the term frequency (tf), the inverse document frequency (idf) and the product (tf-idf). Each function should take a single argument `docs`, which is a dictionary of (key=identifier, value=dcoument text) pairs, and return an appropriately sized array. Convert '-' to ' ' (space), remove punctuation, convert text to lowercase and split on whitespace to generate a collection of terms from the dcoument text. - tf = the number of occurrences of term $i$ in document $j$ - idf = $\log \frac{n}{1 + \text{df}_i}$ where $n$ is the total number of documents and $\text{df}_i$ is the number of documents in which term $i$ occurs. Print the table of tf-idf values for the following document collection ``` s1 = "The quick brown fox" s2 = "Brown fox jumps over the jumps jumps jumps" s3 = "The the the lazy dog elephant." s4 = "The the the the the dog peacock lion tiger elephant" docs = {'s1': s1, 's2': s2, 's3': s3, 's4': s4} ``` **Exercise 3 (20 points)**. 1. Write a function that takes a matrix $M$ and an integer $k$ as arguments, and reconstructs a reduced matrix using only the $k$ largest singular values. Use the `scipy.linagl.svd` function to perform the decomposition. This is the least squares approximation to the matrix $M$ in $k$ dimensions. 2. Apply the function you just wrote to the following term-frequency matrix for a set of $9$ documents using $k=2$ and print the reconstructed matrix $M'$. ``` M = np.array([[1, 0, 0, 1, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 1, 0, 0, 0, 0], [0, 1, 1, 2, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1]]) ``` 3. Calculate the pairwise correlation matrix for the original matrix M and the reconstructed matrix using $k=2$ singular values (you may use [scipy.stats.spearmanr](http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html) to do the calculations). Consider the fist 5 sets of documents as one group $G1$ and the last 4 as another group $G2$ (i.e. first 5 and last 4 columns). What is the average within group correlation for $G1$, $G2$ and the average cross-group correlation for G1-G2 using either $M$ or $M'$. (Do not include self-correlation in the within-group calculations.). **Exercise 4 (40 points)**. Clustering with LSA 1. Begin by loading a pubmed database of selected article titles using 'pickle'. With the following: ```import pickle docs = pickle.load(open('pubmed.pic', 'rb'))``` Create a tf-idf matrix for every term that appears at least once in any of the documents. What is the shape of the tf-idf matrix? 2. Perform SVD on the tf-idf matrix to obtain $U \Sigma V^T$ (often written as $T \Sigma D^T$ in this context with $T$ representing the terms and $D$ representing the documents). If we set all but the top $k$ singular values to 0, the reconstructed matrix is essentially $U_k \Sigma_k V_k^T$, where $U_k$ is $m \times k$, $\Sigma_k$ is $k \times k$ and $V_k^T$ is $k \times n$. Terms in this reduced space are represented by $U_k \Sigma_k$ and documents by $\Sigma_k V^T_k$. Reconstruct the matrix using the first $k=10$ singular values. 3. Use agglomerative hierarchical clustering with complete linkage to plot a dendrogram and comment on the likely number of document clusters with $k = 100$. Use the dendrogram function from [SciPy ](https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.cluster.hierarchy.dendrogram.html). 4. Determine how similar each of the original documents is to the new document `mystery.txt`. Since $A = U \Sigma V^T$, we also have $V = A^T U S^{-1}$ using orthogonality and the rule for transposing matrix products. This suggests that in order to map the new document to the same concept space, first find the tf-idf vector $v$ for the new document - this must contain all (and only) the terms present in the existing tf-idx matrix. Then the query vector $q$ is given by $v^T U_k \Sigma_k^{-1}$. Find the 10 documents most similar to the new document and the 10 most dissimilar. Notes on the Pubmed articles ---- These were downloaded with the following script. ```python from Bio import Entrez, Medline Entrez.email = "YOUR EMAIL HERE" import pickle try: docs = pickle.load(open('pubmed.pic', 'rb')) except Exception, e: print e docs = {} for term in ['plasmodium', 'diabetes', 'asthma', 'cytometry']: handle = Entrez.esearch(db="pubmed", term=term, retmax=50) result = Entrez.read(handle) handle.close() idlist = result["IdList"] handle2 = Entrez.efetch(db="pubmed", id=idlist, rettype="medline", retmode="text") result2 = Medline.parse(handle2) for record in result2: title = record.get("TI", None) abstract = record.get("AB", None) if title is None or abstract is None: continue docs[title] = '\n'.join([title, abstract]) print(title) handle2.close() pickle.dump(docs, open('pubmed.pic', 'wb')) docs.values() ```
github_jupyter
# Transfer Learning experiments ``` import os import torch import mlflow import numpy as np from torch import nn from torch import optim from collections import OrderedDict import torch.nn.functional as F from torchvision import datasets, transforms, models ``` ## Transfer Learning with DenseNet ### Loading data ``` train_transforms = transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225]) ]) test_transforms = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225]) ]) # setting up data loaders data_dir = os.path.join(os.pardir, 'data', 'Plant_leave_diseases_224') train_data = datasets.ImageFolder(os.path.join(data_dir, 'train'), transform=train_transforms) test_data = datasets.ImageFolder(os.path.join(data_dir, 'validation'), transform=test_transforms) ``` ### Getting Resnet model ``` model = models.densenet121(pretrained=True) # Freezing the paramiters of the layers we do not want to train for parameters in model.parameters(): parameters.requires_grad = False # Updating Classification layer _inputs = model.classifier.in_features model.classifier = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(_inputs, 500)), ('relu', nn.ReLU()), ('dropout', nn.Dropout(0.2)), ('fc2', nn.Linear(500, 39)), ('output', nn.LogSoftmax(dim=1)) ])) ``` ### Training ``` # Configs config = { 'max_epochs': 200, 'learning_rate': 0.003, 'resolution': 224, 'name': 'densnet' } def train(model, train_loader, validation_loader, config, n_epochs=10, stopping_treshold=None): if torch.cuda.is_available(): print('CUDA is available! Training on GPU ...') model.cuda() # Loss and optimizer setup criterion = nn.NLLLoss() optimizer = optim.Adam(model.parameters(), lr=config['learning_rate']) # Setting minimum validation loss to inf validation_loss_minimum = np.Inf train_loss_history = [] validation_loss_history = [] for epoch in range(1, n_epochs +1): training_loss = 0.0 validation_loss = 0.0 # Training loop training_accuracies = [] for X, y in train_loader: # Moving data to gpu if using if torch.cuda.is_available(): X, y = X.cuda(), y.cuda() # clear the gradients of all optimized variables optimizer.zero_grad() # forward pass: compute predicted outputs by passing inputs to the model output = model(X) # calculate the batch loss loss = criterion(output, y) # backward pass: compute gradient of the loss with respect to model parameters loss.backward() # perform a single optimization step (parameter update) optimizer.step() # update training loss training_loss += loss.item()*X.size(0) # calculating accuracy ps = torch.exp(output) top_p, top_class = ps.topk(1, dim=1) equals = top_class == y.view(*top_class.shape) training_accuracies.append(torch.mean(equals.type(torch.FloatTensor)).item()) # Validation Loop with torch.no_grad(): accuracies = [] for X, y in validation_loader: # Moving data to gpu if using if torch.cuda.is_available(): X, y = X.cuda(), y.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(X) # calculate the batch loss loss = criterion(output, y) # update validation loss validation_loss += loss.item()*X.size(0) # calculating accuracy ps = torch.exp(output) top_p, top_class = ps.topk(1, dim=1) equals = top_class == y.view(*top_class.shape) accuracies.append(torch.mean(equals.type(torch.FloatTensor)).item()) # Mean loss mean_training_loss = training_loss/len(train_loader.sampler) mean_validation_loss = validation_loss/len(validation_loader.sampler) mean_train_accuracy = sum(training_accuracies)/len(training_accuracies) mean_accuracy = sum(accuracies)/len(accuracies) train_loss_history.append(mean_training_loss) validation_loss_history.append(mean_validation_loss) # Printing epoch stats print(f'Epoch: {epoch}/{n_epochs}, ' +\ f'Training Loss: {mean_training_loss:.3f}, '+\ f'Train accuracy {mean_train_accuracy:.3f} ' +\ f'Validation Loss: {mean_validation_loss:.3f}, '+\ f'Validation accuracy {mean_accuracy:.3f}') # logging with mlflow if mlflow.active_run(): mlflow.log_metric('loss', mean_training_loss, step=epoch) mlflow.log_metric('accuracy', mean_train_accuracy, step=epoch) mlflow.log_metric('validation_accuracy', mean_accuracy, step=epoch) mlflow.log_metric('validation_loss', mean_validation_loss, step=epoch) # Testing for early stopping if stopping_treshold: if mean_validation_loss < validation_loss_minimum: validation_loss_minimum = mean_validation_loss print('New minimum validation loss (saving model)') save_pth = os.path.join('models',f'{config["name"]}.pt') torch.save(model.state_dict(), save_pth) elif len([v for v in validation_loss_history[-stopping_treshold:] if v > validation_loss_minimum]) >= stopping_treshold: print(f"Stopping early at epoch: {epoch}/{n_epochs}") break return train_loss_history, validation_loss_history train_loader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True) validation_loader = torch.utils.data.DataLoader(test_data, batch_size=64, shuffle=True) mlflow.set_experiment("Plant Leaf Disease") with mlflow.start_run(): mlflow.log_param('framework', 'pytorch') mlflow.log_param('data_split', '90/10') mlflow.log_param('type', 'DenseNet121') mlflow.log_params(config) train(model, train_loader, validation_loader, config, n_epochs=config['max_epochs'], stopping_treshold=15) ```
github_jupyter
<a href="https://colab.research.google.com/github/wileyw/DeepLearningDemos/blob/master/sound/simple_audio_working_vggish_dataset.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ##### Copyright 2020 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Instructions 1. Get VGGish weights from [here](https://github.com/antoinemrcr/vggish2Keras) and convert to a file called vggish_weights.ckpt and upload this file to the colab notebook. If you don't upload vggish_weights.ckpt, vggish will be randomly initialized. 2. Create custom_dataset.zip with the folders "cough" and "background" # Simple audio recognition: Recognizing keywords <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/audio/simple_audio"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/audio/simple_audio.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/audio/simple_audio.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/audio/simple_audio.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> This tutorial will show you how to build a basic speech recognition network that recognizes ten different words. It's important to know that real speech and audio recognition systems are much more complex, but like MNIST for images, it should give you a basic understanding of the techniques involved. Once you've completed this tutorial, you'll have a model that tries to classify a one second audio clip as "down", "go", "left", "no", "right", "stop", "up" and "yes". ``` !ls from google.colab import files import os if not os.path.exists('custom_dataset.zip'): files.upload() !unzip custom_dataset.zip !ls !git clone https://github.com/google-coral/project-keyword-spotter.git !ls project-keyword-spotter/ !cp project-keyword-spotter/mel_features.py . !ls import mel_features ``` ## Setup Import necessary modules and dependencies. ``` import os import pathlib import matplotlib.pyplot as plt import numpy as np import seaborn as sns import tensorflow as tf from tensorflow.keras.layers.experimental import preprocessing from tensorflow.keras import layers from tensorflow.keras import models from IPython import display # Set seed for experiment reproducibility seed = 42 tf.random.set_seed(seed) np.random.seed(seed) ``` ## Import the Speech Commands dataset You'll write a script to download a portion of the [Speech Commands dataset](https://www.tensorflow.org/datasets/catalog/speech_commands). The original dataset consists of over 105,000 WAV audio files of people saying thirty different words. This data was collected by Google and released under a CC BY license, and you can help improve it by [contributing five minutes of your own voice](https://aiyprojects.withgoogle.com/open_speech_recording). You'll be using a portion of the dataset to save time with data loading. Extract the `mini_speech_commands.zip` and load it in using the `tf.data` API. ``` data_dir = pathlib.Path('data/mini_speech_commands') if not data_dir.exists(): tf.keras.utils.get_file( 'mini_speech_commands.zip', origin="http://storage.googleapis.com/download.tensorflow.org/data/mini_speech_commands.zip", extract=True, cache_dir='.', cache_subdir='data') ``` Check basic statistics about the dataset. ``` !ls data/mini_speech_commands !mv data/mini_speech_commands data/mini_speech_commands.bak !mkdir data/mini_speech_commands !#cp -r data/mini_speech_commands.bak/left data/mini_speech_commands/left !#cp -r data/mini_speech_commands.bak/stop data/mini_speech_commands/stop !mkdir data/mini_speech_commands/unknown !#cp data/mini_speech_commands.bak/up/*.wav data/mini_speech_commands/unknown !#cp data/mini_speech_commands.bak/go/*.wav data/mini_speech_commands/unknown !#cp data/mini_speech_commands.bak/stop/*.wav data/mini_speech_commands/unknown !#cp data/mini_speech_commands.bak/no/*.wav data/mini_speech_commands/unknown !#cp data/mini_speech_commands.bak/yes/*.wav data/mini_speech_commands/unknown !#cp data/mini_speech_commands.bak/down/*.wav data/mini_speech_commands/unknown !cp custom_dataset/background/*.wav data/mini_speech_commands/unknown !mkdir data/mini_speech_commands/cough !cp custom_dataset/cough/*.wav data/mini_speech_commands/cough !ls data/mini_speech_commands/unknown commands = np.array(tf.io.gfile.listdir(str(data_dir))) commands = commands[commands != 'README.md'] print('Commands:', commands) ``` Extract the audio files into a list and shuffle it. ``` filenames = tf.io.gfile.glob(str(data_dir) + '/*/*') filenames = tf.random.shuffle(filenames) num_samples = len(filenames) print('Number of total examples:', num_samples) print('Number of examples per label:', len(tf.io.gfile.listdir(str(data_dir/commands[0])))) print('Example file tensor:', filenames[0]) ``` Split the files into training, validation and test sets using a 80:10:10 ratio, respectively. ``` train_index = int(.8 * len(filenames)) val_index = int(.9 * len(filenames)) train_files = filenames[:train_index] val_files = filenames[train_index: val_index] test_files = filenames[val_index:] print('Training set size', len(train_files)) print('Validation set size', len(val_files)) print('Test set size', len(test_files)) ``` ## Reading audio files and their labels The audio file will initially be read as a binary file, which you'll want to convert into a numerical tensor. To load an audio file, you will use [`tf.audio.decode_wav`](https://www.tensorflow.org/api_docs/python/tf/audio/decode_wav), which returns the WAV-encoded audio as a Tensor and the sample rate. A WAV file contains time series data with a set number of samples per second. Each sample represents the amplitude of the audio signal at that specific time. In a 16-bit system, like the files in `mini_speech_commands`, the values range from -32768 to 32767. The sample rate for this dataset is 16kHz. Note that `tf.audio.decode_wav` will normalize the values to the range [-1.0, 1.0]. ``` def decode_audio(audio_binary): audio, _ = tf.audio.decode_wav(audio_binary) return tf.squeeze(audio, axis=-1) ``` The label for each WAV file is its parent directory. ``` def get_label(file_path): parts = tf.strings.split(file_path, os.path.sep) # Note: You'll use indexing here instead of tuple unpacking to enable this # to work in a TensorFlow graph. return parts[-2] ``` Let's define a method that will take in the filename of the WAV file and output a tuple containing the audio and labels for supervised training. ``` def get_waveform_and_label(file_path): label = get_label(file_path) audio_binary = tf.io.read_file(file_path) waveform = decode_audio(audio_binary) return waveform, label ``` You will now apply `process_path` to build your training set to extract the audio-label pairs and check the results. You'll build the validation and test sets using a similar procedure later on. ``` AUTOTUNE = tf.data.AUTOTUNE files_ds = tf.data.Dataset.from_tensor_slices(train_files) waveform_ds = files_ds.map(get_waveform_and_label, num_parallel_calls=AUTOTUNE) ``` Let's examine a few audio waveforms with their corresponding labels. ``` rows = 3 cols = 3 n = rows*cols fig, axes = plt.subplots(rows, cols, figsize=(10, 12)) for i, (audio, label) in enumerate(waveform_ds.take(n)): r = i // cols c = i % cols ax = axes[r][c] ax.plot(audio.numpy()) ax.set_yticks(np.arange(-1.2, 1.2, 0.2)) label = label.numpy().decode('utf-8') ax.set_title(label) plt.show() ``` ## Spectrogram You'll convert the waveform into a spectrogram, which shows frequency changes over time and can be represented as a 2D image. This can be done by applying the short-time Fourier transform (STFT) to convert the audio into the time-frequency domain. A Fourier transform ([`tf.signal.fft`](https://www.tensorflow.org/api_docs/python/tf/signal/fft)) converts a signal to its component frequencies, but loses all time information. The STFT ([`tf.signal.stft`](https://www.tensorflow.org/api_docs/python/tf/signal/stft)) splits the signal into windows of time and runs a Fourier transform on each window, preserving some time information, and returning a 2D tensor that you can run standard convolutions on. STFT produces an array of complex numbers representing magnitude and phase. However, you'll only need the magnitude for this tutorial, which can be derived by applying `tf.abs` on the output of `tf.signal.stft`. Choose `frame_length` and `frame_step` parameters such that the generated spectrogram "image" is almost square. For more information on STFT parameters choice, you can refer to [this video](https://www.coursera.org/lecture/audio-signal-processing/stft-2-tjEQe) on audio signal processing. You also want the waveforms to have the same length, so that when you convert it to a spectrogram image, the results will have similar dimensions. This can be done by simply zero padding the audio clips that are shorter than one second. ``` def get_spectrogram(waveform): # Padding for files with less than 16000 samples zero_padding = tf.zeros([16000] - tf.shape(waveform), dtype=tf.float32) # Concatenate audio with padding so that all audio clips will be of the # same length waveform = tf.cast(waveform, tf.float32) equal_length = tf.concat([waveform, zero_padding], 0) spectrogram = tf.signal.stft( equal_length, frame_length=255, frame_step=128) spectrogram = tf.abs(spectrogram) return spectrogram import numpy as np class Uint8LogMelFeatureExtractor(object): """Provide uint8 log mel spectrogram slices from an AudioRecorder object. This class provides one public method, get_next_spectrogram(), which gets a specified number of spectral slices from an AudioRecorder. """ def __init__(self, num_frames_hop=48): self.spectrogram_window_length_seconds = 0.025 self.spectrogram_hop_length_seconds = 0.010 self.num_mel_bins = 64 #32 self.frame_length_spectra = 96 #98 if self.frame_length_spectra % num_frames_hop: raise ValueError('Invalid num_frames_hop value (%d), ' 'must devide %d' % (num_frames_hop, self.frame_length_spectra)) self.frame_hop_spectra = num_frames_hop self._norm_factor = 3 self._clear_buffers() def _clear_buffers(self): self._audio_buffer = np.array([], dtype=np.int16).reshape(0, 1) self._spectrogram = np.zeros((self.frame_length_spectra, self.num_mel_bins), dtype=np.float32) def _spectrogram_underlap_samples(self, audio_sample_rate_hz): return int((self.spectrogram_window_length_seconds - self.spectrogram_hop_length_seconds) * audio_sample_rate_hz) def _frame_duration_seconds(self, num_spectra): return (self.spectrogram_window_length_seconds + (num_spectra - 1) * self.spectrogram_hop_length_seconds) def compute_spectrogram_and_normalize(self, audio_samples, audio_sample_rate_hz): spectrogram = self._compute_spectrogram(audio_samples, audio_sample_rate_hz) spectrogram -= np.mean(spectrogram, axis=0) if self._norm_factor: spectrogram /= self._norm_factor * np.std(spectrogram, axis=0) spectrogram += 1 spectrogram *= 127.5 return np.maximum(0, np.minimum(255, spectrogram)).astype(np.float32) def _compute_spectrogram(self, audio_samples, audio_sample_rate_hz): """Compute log-mel spectrogram and scale it to uint8.""" samples = audio_samples.flatten() / float(2**15) spectrogram = 30 * ( mel_features.log_mel_spectrogram( samples, audio_sample_rate_hz, log_offset=0.001, window_length_secs=self.spectrogram_window_length_seconds, hop_length_secs=self.spectrogram_hop_length_seconds, num_mel_bins=self.num_mel_bins, lower_edge_hertz=60, upper_edge_hertz=3800) - np.log(1e-3)) return spectrogram def _get_next_spectra(self, recorder, num_spectra): """Returns the next spectrogram. Compute num_spectra spectrogram samples from an AudioRecorder. Blocks until num_spectra spectrogram slices are available. Args: recorder: an AudioRecorder object from which to get raw audio samples. num_spectra: the number of spectrogram slices to return. Returns: num_spectra spectrogram slices computed from the samples. """ required_audio_duration_seconds = self._frame_duration_seconds(num_spectra) logger.info("required_audio_duration_seconds %f", required_audio_duration_seconds) required_num_samples = int( np.ceil(required_audio_duration_seconds * recorder.audio_sample_rate_hz)) logger.info("required_num_samples %d, %s", required_num_samples, str(self._audio_buffer.shape)) audio_samples = np.concatenate( (self._audio_buffer, recorder.get_audio(required_num_samples - len(self._audio_buffer))[0])) self._audio_buffer = audio_samples[ required_num_samples - self._spectrogram_underlap_samples(recorder.audio_sample_rate_hz):] spectrogram = self._compute_spectrogram( audio_samples[:required_num_samples], recorder.audio_sample_rate_hz) assert len(spectrogram) == num_spectra return spectrogram def get_next_spectrogram(self, recorder): """Get the most recent spectrogram frame. Blocks until the frame is available. Args: recorder: an AudioRecorder instance which provides the audio samples. Returns: The next spectrogram frame as a uint8 numpy array. """ assert recorder.is_active logger.info("self._spectrogram shape %s", str(self._spectrogram.shape)) self._spectrogram[:-self.frame_hop_spectra] = ( self._spectrogram[self.frame_hop_spectra:]) self._spectrogram[-self.frame_hop_spectra:] = ( self._get_next_spectra(recorder, self.frame_hop_spectra)) # Return a copy of the internal state that's safe to persist and won't # change the next time we call this function. logger.info("self._spectrogram shape %s", str(self._spectrogram.shape)) spectrogram = self._spectrogram.copy() spectrogram -= np.mean(spectrogram, axis=0) if self._norm_factor: spectrogram /= self._norm_factor * np.std(spectrogram, axis=0) spectrogram += 1 spectrogram *= 127.5 return np.maximum(0, np.minimum(255, spectrogram)).astype(np.uint8) feature_extractor = Uint8LogMelFeatureExtractor() def get_spectrogram2(waveform): """ # Padding for files with less than 16000 samples zero_padding = tf.zeros([16000] - tf.shape(waveform), dtype=tf.float32) # Concatenate audio with padding so that all audio clips will be of the # same length waveform = tf.cast(waveform, tf.float32) equal_length = tf.concat([waveform, zero_padding], 0) spectrogram = tf.signal.stft( equal_length, frame_length=255, frame_step=128) spectrogram = tf.abs(spectrogram) return spectrogram """ waveform = waveform.numpy() #print(waveform.shape) #print(type(waveform)) spectrogram = feature_extractor.compute_spectrogram_and_normalize(waveform[:15680], 16000) return spectrogram for waveform, label in waveform_ds.take(1): label2 = label.numpy().decode('utf-8') spectrogram2 = get_spectrogram2(waveform) print('Label:', label2) print('Waveform shape:', waveform.shape) print('Spectrogram shape:', spectrogram2.shape) print('Spectrogram type:', spectrogram2.dtype) ``` Next, you will explore the data. Compare the waveform, the spectrogram and the actual audio of one example from the dataset. ``` for waveform, label in waveform_ds.take(1): label = label.numpy().decode('utf-8') print(waveform.shape) spectrogram = get_spectrogram(waveform) print('Label:', label) print('Waveform shape:', waveform.shape) print('Spectrogram shape:', spectrogram.shape) print('Audio playback') print('Spectrogram type:', spectrogram.dtype) display.display(display.Audio(waveform, rate=16000)) def plot_spectrogram(spectrogram, ax): # Convert to frequencies to log scale and transpose so that the time is # represented in the x-axis (columns). log_spec = np.log(spectrogram.T) height = log_spec.shape[0] X = np.arange(16000, step=height + 1) Y = range(height) ax.pcolormesh(X, Y, log_spec) fig, axes = plt.subplots(2, figsize=(12, 8)) timescale = np.arange(waveform.shape[0]) axes[0].plot(timescale, waveform.numpy()) axes[0].set_title('Waveform') axes[0].set_xlim([0, 16000]) plot_spectrogram(spectrogram.numpy(), axes[1]) axes[1].set_title('Spectrogram') plt.show() ``` Now transform the waveform dataset to have spectrogram images and their corresponding labels as integer IDs. ``` def get_spectrogram_and_label_id(audio, label): spectrogram = get_spectrogram(audio) spectrogram = tf.expand_dims(spectrogram, -1) label_id = tf.argmax(label == commands) return spectrogram, label_id spectrogram_ds = waveform_ds.map( get_spectrogram_and_label_id, num_parallel_calls=AUTOTUNE) ``` Examine the spectrogram "images" for different samples of the dataset. ``` rows = 3 cols = 3 n = rows*cols fig, axes = plt.subplots(rows, cols, figsize=(10, 10)) for i, (spectrogram, label_id) in enumerate(spectrogram_ds.take(n)): r = i // cols c = i % cols ax = axes[r][c] plot_spectrogram(np.squeeze(spectrogram.numpy()), ax) ax.set_title(commands[label_id.numpy()]) ax.axis('off') plt.show() ``` ## Build and train the model Now you can build and train your model. But before you do that, you'll need to repeat the training set preprocessing on the validation and test sets. ``` def preprocess_dataset(files): files_ds = tf.data.Dataset.from_tensor_slices(files) output_ds = files_ds.map(get_waveform_and_label, num_parallel_calls=AUTOTUNE) output_ds = output_ds.map( get_spectrogram_and_label_id, num_parallel_calls=AUTOTUNE) return output_ds train_ds = spectrogram_ds val_ds = preprocess_dataset(val_files) test_ds = preprocess_dataset(test_files) def only_load_dataset(files): files_ds = tf.data.Dataset.from_tensor_slices(files) output_ds = files_ds.map(get_waveform_and_label, num_parallel_calls=AUTOTUNE) return output_ds train_waveform_data = only_load_dataset(train_files) val_waveform_data = only_load_dataset(val_files) test_waveform_data = only_load_dataset(test_files) ``` Batch the training and validation sets for model training. ``` batch_size = 64 train_ds = train_ds.batch(batch_size) val_ds = val_ds.batch(batch_size) ``` Add dataset [`cache()`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#cache) and [`prefetch()`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#prefetch) operations to reduce read latency while training the model. ``` train_ds = train_ds.cache().prefetch(AUTOTUNE) val_ds = val_ds.cache().prefetch(AUTOTUNE) ``` For the model, you'll use a simple convolutional neural network (CNN), since you have transformed the audio files into spectrogram images. The model also has the following additional preprocessing layers: - A [`Resizing`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/Resizing) layer to downsample the input to enable the model to train faster. - A [`Normalization`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/Normalization) layer to normalize each pixel in the image based on its mean and standard deviation. For the `Normalization` layer, its `adapt` method would first need to be called on the training data in order to compute aggregate statistics (i.e. mean and standard deviation). ``` #for spectrogram, _ in spectrogram_ds.take(1): # input_shape = spectrogram.shape for data_item, label in train_waveform_data.take(10): spectrogram = feature_extractor.compute_spectrogram_and_normalize(data_item.numpy()[:15680], 16000) print(spectrogram.shape) if spectrogram.shape[0] != 96: continue input_shape = (spectrogram.shape[0], spectrogram.shape[1], 1) print('Input shape:', input_shape) num_labels = len(commands) norm_layer = preprocessing.Normalization() norm_layer.adapt(spectrogram_ds.map(lambda x, _: x)) #preprocessing.Resizing(32, 32), model = models.Sequential([ layers.Input(shape=input_shape), norm_layer, layers.Conv2D(32, 3, activation='relu'), layers.Conv2D(64, 3, activation='relu'), layers.MaxPooling2D(), layers.Dropout(0.25), layers.Flatten(), layers.Dense(128, activation='relu'), layers.Dropout(0.5), layers.Dense(num_labels), ]) model.summary() # https://github.com/antoinemrcr/vggish2Keras from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten from tensorflow.keras.models import Model def get_vggish_keras(): NUM_FRAMES = 96 # Frames in input mel-spectrogram patch NUM_BANDS = 64 # Frequency bands in input mel-spectrogram patch EMBEDDING_SIZE = 128 # Size of embedding layer input_shape = (NUM_FRAMES,NUM_BANDS,1) img_input = Input( shape=input_shape) # Block 1 x = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1')(img_input) x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x) # Block 2 x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(x) # Block 3 x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_1')(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_2')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(x) # Block 4 x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_1')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_2')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(x) # Block fc x = Flatten(name='flatten')(x) x = Dense(4096, activation='relu', name='fc1_1')(x) x = Dense(4096, activation='relu', name='fc1_2')(x) x = Dense(EMBEDDING_SIZE, activation='relu', name='fc2')(x) model = Model(img_input, x, name='vggish') return model model_vggish = get_vggish_keras() model_vggish.summary() !ls !du -sh vggish_weights.ckpt # The file should be around 275M checkpoint_path = 'vggish_weights.ckpt' if os.path.exists(checkpoint_path): print('Loading VGGish Checkpoint Path') model_vggish.load_weights(checkpoint_path) else: print('{} not detected, weights not loaded'.format(checkpoint_path)) new_model = tf.keras.Sequential() model_vggish.trainable = False new_model.add(model_vggish) new_model.add(layers.Dense(2, name='last')) new_model.summary() model = new_model model.compile( optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'], ) new_train_data = [] new_train_labels = [] new_val_data = [] new_val_labels = [] new_test_data = [] new_test_labels = [] for data_item, label in train_waveform_data: spectrogram = feature_extractor.compute_spectrogram_and_normalize(data_item.numpy()[:15680], 16000) label = label.numpy().decode('utf-8') label_id = tf.argmax(label == commands) # NOTE: Spectrogram shape is not always the same if spectrogram.shape[0] != 96: continue new_train_data.append(spectrogram) new_train_labels.append(label_id) for data_item, label in val_waveform_data: spectrogram = feature_extractor.compute_spectrogram_and_normalize(data_item.numpy()[:15680], 16000) label = label.numpy().decode('utf-8') label_id = tf.argmax(label == commands) if spectrogram.shape[0] != 96: continue new_val_data.append(spectrogram) new_val_labels.append(label_id) for data_item, label in test_waveform_data: spectrogram = feature_extractor.compute_spectrogram_and_normalize(data_item.numpy()[:15680], 16000) label = label.numpy().decode('utf-8') label_id = tf.argmax(label == commands) if spectrogram.shape[0] != 96: continue new_test_data.append(spectrogram) new_test_labels.append(label_id) new_train_data = np.array(new_train_data).astype('float32') new_val_data = np.array(new_val_data).astype('float32') new_test_data = np.array(new_test_data).astype('float32') new_train_labels = np.array(new_train_labels) new_val_labels = np.array(new_val_labels) new_test_labels = np.array(new_test_labels) # (1, 98, 32, 1) new_train_data = np.expand_dims(new_train_data, axis=3) new_val_data = np.expand_dims(new_val_data, axis=3) new_test_data = np.expand_dims(new_test_data, axis=3) print('--------') print(new_train_data.shape) print(new_val_data.shape) print(new_test_data.shape) print(new_train_labels.shape) print(new_val_labels.shape) print(new_test_labels.shape) print('--------') EPOCHS = 30 * 4 #history = model.fit( # train_ds, # validation_data=val_ds, # epochs=EPOCHS, # callbacks=tf.keras.callbacks.EarlyStopping(verbose=1, patience=2), #) history = model.fit( new_train_data, new_train_labels, validation_data=(new_val_data, new_val_labels), epochs=EPOCHS, #callbacks=tf.keras.callbacks.EarlyStopping(verbose=1, patience=2), ) ``` Let's check the training and validation loss curves to see how your model has improved during training. ``` metrics = history.history plt.plot(history.epoch, metrics['loss'], metrics['val_loss']) plt.legend(['loss', 'val_loss']) plt.show() ``` ## Evaluate test set performance Let's run the model on the test set and check performance. ``` #test_audio = [] #test_labels = [] #for audio, label in test_ds: # test_audio.append(audio.numpy()) # test_labels.append(label.numpy()) #test_audio = np.array(test_audio) #test_labels = np.array(test_labels) test_audio = new_test_data test_labels = new_test_labels y_pred = np.argmax(model.predict(test_audio), axis=1) y_true = test_labels test_acc = sum(y_pred == y_true) / len(y_true) print(f'Test set accuracy: {test_acc:.0%}') ``` ### Display a confusion matrix A confusion matrix is helpful to see how well the model did on each of the commands in the test set. ``` confusion_mtx = tf.math.confusion_matrix(y_true, y_pred) plt.figure(figsize=(10, 8)) sns.heatmap(confusion_mtx, xticklabels=commands, yticklabels=commands, annot=True, fmt='g') plt.xlabel('Prediction') plt.ylabel('Label') plt.show() ``` ## Run inference on an audio file Finally, verify the model's prediction output using an input audio file of someone saying "no." How well does your model perform? ``` !ls data/mini_speech_commands/cough #sample_file = data_dir/'no/01bb6a2a_nohash_0.wav' #sample_file = data_dir/'left/b46e8153_nohash_0.wav' #sample_file = data_dir/'no/ac7840d8_nohash_1.wav' #sample_file = data_dir/'no/5588c7e6_nohash_0.wav' #sample_file = data_dir/'up/52e228e9_nohash_0.wav' sample_file = data_dir/'cough/pos-0422-096-cough-m-31-8.wav' #sample_ds = preprocess_dataset([str(sample_file)]) X = only_load_dataset([str(sample_file)]) for waveform, label in X.take(1): label = label.numpy().decode('utf-8') print(waveform, label) spectrogram = feature_extractor.compute_spectrogram_and_normalize(waveform.numpy()[:15680], 16000) # NOTE: Dimensions need to be expanded spectrogram = np.expand_dims(spectrogram, axis=-1) spectrogram = np.expand_dims(spectrogram, axis=0) print(spectrogram.shape) prediction = model(spectrogram) print(prediction.shape) plt.bar(commands, tf.nn.softmax(prediction[0])) plt.title(f'Predictions for "{label}"') plt.show() #for spectrogram, label in sample_ds.batch(1): # prediction = model(spectrogram) # plt.bar(commands, tf.nn.softmax(prediction[0])) # plt.title(f'Predictions for "{commands[label[0]]}"') # plt.show() print(model) converter = tf.lite.TFLiteConverter.from_keras_model(model) tflite_model = converter.convert() # Save the model. with open('model.tflite', 'wb') as f: f.write(tflite_model) ! curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - ! echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list ! sudo apt-get update ! sudo apt-get install edgetpu-compiler # Define representative dataset print(new_test_data.shape) def representative_dataset(): yield [new_test_data] # Add quantization in order to run on the EdgeTPU converter2 = tf.lite.TFLiteConverter.from_keras_model(model) converter2.optimizations = [tf.lite.Optimize.DEFAULT] converter2.representative_dataset = representative_dataset tflite_quant_model = converter2.convert() with open('model_quantized.tflite', 'wb') as f: f.write(tflite_quant_model) !edgetpu_compiler model_quantized.tflite !ls -l !ls -l # https://www.tensorflow.org/lite/guide/inference interpreter = tf.lite.Interpreter(model_path="model.tflite") interpreter.allocate_tensors() # Get input and output tensors. input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() print(input_details) print(output_details) # Test the model on random input data. input_shape = input_details[0]['shape'] input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32) interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke() # The function `get_tensor()` returns a copy of the tensor data. # Use `tensor()` in order to get a pointer to the tensor. output_data = interpreter.get_tensor(output_details[0]['index']) print(output_data) #sample_file = data_dir/'no/01bb6a2a_nohash_0.wav' #sample_file = data_dir/'left/b46e8153_nohash_0.wav' sample_file = data_dir/'cough/pos-0422-096-cough-m-31-8.wav' #sample_ds = preprocess_dataset([str(sample_file)]) #waveform, label = get_waveform_and_label(sample_file) #spectrogram = feature_extractor._compute_spectrogram(waveform, 16000) X = only_load_dataset([str(sample_file)]) for waveform, label in X.take(1): label = label.numpy().decode('utf-8') spectrogram = feature_extractor.compute_spectrogram_and_normalize(waveform.numpy()[:15680], 16000) spectrogram = np.expand_dims(spectrogram, axis=-1) spectrogram = np.expand_dims(spectrogram, axis=0) print('Original--------------------') print(spectrogram.shape) prediction = model(spectrogram) print(prediction) print('TFLITE--------------------') # NOTE: dtype needs to be np.float32 input_data = np.array(spectrogram, dtype=np.float32) print(input_data.shape) interpreter.set_tensor(input_details[0]['index'], input_data) interpreter.invoke() prediction2 = interpreter.get_tensor(output_details[0]['index']) print(prediction2) print(np.argmax(np.array(prediction).flatten())) print(np.argmax(np.array(prediction2).flatten())) # NOTE: Remember to add softmax after the prediction plt.bar(commands, tf.nn.softmax(prediction[0])) plt.title(f'Predictions for "{label}"') plt.show() plt.imshow(np.squeeze(spectrogram).T) plt.show() ``` You can see that your model very clearly recognized the audio command as "no." ``` from google.colab import files files.download('model.tflite') from google.colab import files files.download('model_quantized_edgetpu.tflite') ``` ## Next steps This tutorial showed how you could do simple audio classification using a convolutional neural network with TensorFlow and Python. * To learn how to use transfer learning for audio classification, check out the [Sound classification with YAMNet](https://www.tensorflow.org/hub/tutorials/yamnet) tutorial. * To build your own interactive web app for audio classification, consider taking the [TensorFlow.js - Audio recognition using transfer learning codelab](https://codelabs.developers.google.com/codelabs/tensorflowjs-audio-codelab/index.html#0). * TensorFlow also has additional support for [audio data preparation and augmentation](https://www.tensorflow.org/io/tutorials/audio) to help with your own audio-based projects.
github_jupyter
## CNN WITH CLASSES FROM [HERE](https://github.com/hunkim/DeepLearningZeroToAll/blob/master/lab-10-6-mnist_nn_batchnorm.ipynb) ``` import os import numpy as np from scipy import ndimage import matplotlib.pyplot as plt import tensorflow as tf import tensorflow.contrib.slim as slim from tensorflow.examples.tutorials.mnist import input_data %matplotlib inline print ("CURRENT TF VERSION IS [%s]" % (tf.__version__)) print ("PACKAGES LOADED") ``` ## DEFINE MODEL CLASS WITH TF.LAYERS ``` class Model: """ # <EXAMPLE> input_dim = 784 output_dim = 10 bn = Model('batchnorm', input_dim, output_dim, use_batchnorm=True) """ def __init__(self, name, input_dim, output_dim , hidden_dims=[32, 32], use_batchnorm=True , activation_fn=tf.nn.relu , optimizer=tf.train.AdamOptimizer, lr=0.01): with tf.variable_scope(name): # SET PLACEHOLDERS self.x = tf.placeholder(tf.float32, [None, input_dim], name='X') self.y = tf.placeholder(tf.float32, [None, output_dim], name='Y') self.istrain = tf.placeholder(tf.bool, name='IS_TRAIN') # LOOP OVER HIDDEN LAYERS net = self.x for i, h_dim in enumerate(hidden_dims): with tf.variable_scope('LAYER_{}'.format(i)): # FULLY CONNECTED net = tf.layers.dense(net, h_dim) if use_batchnorm: # BATCH NORM net = tf.layers.batch_normalization(net, training=self.istrain) # ACTIVATION net = activation_fn(net) # FINAL FULLY CONNECTED LAYER net = tf.contrib.layers.flatten(net) net = tf.layers.dense(net, output_dim) # DEFINE LOSS self.loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits( logits=net, labels=self.y), name='LOSS') # DEFINE OPTIMIZER update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=name) with tf.control_dependencies(update_ops): self.optm = optimizer(lr).minimize(self.loss) # OTHERS softmax = tf.nn.softmax(net, name='SOFTMAX') self.corr = tf.equal(tf.argmax(softmax, 1), tf.argmax(self.y, 1)) self.accr = tf.reduce_mean(tf.cast(self.corr, tf.float32)) ``` ## DEFINE SOLVER CLASS ``` class Solver: """ # <EXAMPLE> bn = Model('batchnorm', input_dim, output_dim, use_batchnorm=True) sess = tf.InteractiveSession() solver = Solver(sess, model) # TRAIN solver.train(x, y) # EVALUATE solver.evaluate(x, y, _batch_size=128) """ def __init__(self, _sess, _model): self.model = _model self.sess = _sess # DEFINE TRAIN FUNCTION def train(self, _x, _y): feed = { self.model.x: _x, self.model.y: _y, self.model.istrain: True } optm = self.model.optm loss = self.model.loss return self.sess.run([optm, loss], feed_dict=feed) # DEFINE EVALUATE FUNCTION def evaluate(self, _x, _y, _batch_size=None): if _batch_size: N = _x.shape[0] total_loss = 0 total_accr = 0 for i in range(0, N, _batch_size): x_batch = _x[i:i+_batch_size] # DON'T WORRY ABOUT OUT OF INDEX y_batch = _y[i:i+_batch_size] feed = { self.model.x: x_batch, self.model.y: y_batch, self.model.istrain: False } loss = self.model.loss accr = self.model.accr step_loss, step_accr = self.sess.run([loss, accr], feed_dict=feed) total_loss += step_loss * x_batch.shape[0] total_accr += step_accr * x_batch.shape[0] total_loss /= N total_accr /= N return total_loss, total_accr else: feed = { self.model.x: _x, self.model.y: _y, self.model.istrain: False } loss = self.model.loss accr = self.model.accr return self.sess.run([loss, accr], feed_dict=feed) ``` ## INSTANTIATE MODEL/SOLVER ``` tf.reset_default_graph() sess = tf.InteractiveSession() # CREATE TWO MODELS WITH AND WITHOUT BATCHNORM input_dim = 784 output_dim = 10 bn = Model('BATCHNORM', input_dim, output_dim, hidden_dims=[128, 64], use_batchnorm=True) nn = Model('NO_NORM', input_dim, output_dim, hidden_dims=[128, 64], use_batchnorm=False) # CREATE TWO CORRESPONDING SOLVERS bn_solver = Solver(sess, bn) nn_solver = Solver(sess, nn) print("MODELS AND SOLVERS READY") ``` ## RUN ``` epoch_n = 10 batch_size = 32 mnist = input_data.read_data_sets("data/", one_hot=True) tr_img = mnist.train.images tr_label = mnist.train.labels val_img = mnist.validation.images val_label = mnist.validation.labels # SAVE LOSSES AND ACCURACIES tr_losses = [] tr_accrs = [] val_losses = [] val_accrs = [] # INITALIZE sess.run(tf.global_variables_initializer()) # OPTIMIZE for epoch in range(epoch_n): # OPTIMIZE for _ in range(mnist.train.num_examples//batch_size): X_batch, y_batch = mnist.train.next_batch(batch_size) _, bn_loss = bn_solver.train(X_batch, y_batch) _, nn_loss = nn_solver.train(X_batch, y_batch) # COMPUTE TRAIN LOSSES AND ACCUARACIES b_train_loss, b_train_accr = bn_solver.evaluate(tr_img, tr_label, batch_size) n_train_loss, n_train_accr = nn_solver.evaluate(tr_img, tr_label, batch_size) # COMPUTE VALIDATION LOSSES AND ACCUARACIES b_val_loss, b_val_accr = bn_solver.evaluate(val_img, val_label) n_val_loss, n_val_accr = nn_solver.evaluate(val_img, val_label) # SAVE THEM tr_losses.append([b_train_loss, n_train_loss]) tr_accrs.append([b_train_accr, n_train_accr]) val_losses.append([b_val_loss, n_val_loss]) val_accrs.append([b_val_accr, n_val_accr]) # PRINT print ("[%d/%d] [TRAIN] BATCHNORM: %.4f (%.4f) vs. NO-NORM: %.4f (%.4f)" % (epoch, epoch_n, b_train_loss, b_train_accr, n_train_loss, n_train_accr)) print (" [VALIDATION] BATCHNORM: %.4f (%.4f) vs. NO-NORM: %.4f (%.4f)" % (b_val_loss, b_val_accr, n_val_loss, n_val_accr)) print ("OPTIMIZATION FINISHED") ``` ## COMPUTE TEST ACCURACY ``` b_test_loss, b_test_accr = bn_solver.evaluate(mnist.test.images, mnist.test.labels) n_test_loss, n_test_accr = nn_solver.evaluate(mnist.test.images, mnist.test.labels) print ("[TEST] BATCHNORM: %.4f (%.4f) vs. NO-NORM: %.4f (%.4f)" % (b_test_loss, b_test_accr, n_test_loss, n_test_accr)) ``` ## PLOT COMPARISON ``` def plot_comp(val_list, ylim=None, title=None, loc=None): bn = [i[0] for i in val_list] nn = [i[1] for i in val_list] plt.figure(figsize=(8, 5)) plt.plot(bn, label='With BN') plt.plot(nn, label='Without BN') if ylim: plt.ylim(ylim) if title: plt.title(title) plt.legend(loc=loc) plt.grid('on') plt.show() ``` ## LOSSES ``` plot_comp(tr_losses, title="TRAINING LOSS") plot_comp(val_losses, title="VALIDATION LOSS") ``` ## ACCURACY ``` plot_comp(tr_accrs, title="TRAINING ACCURACY", loc=4) plot_comp(val_accrs, title="VALIDATION ACCURACY", loc=4) ```
github_jupyter
``` import os import pandas as pd import numpy as np pd.options.display.max_rows = 500 pd.options.display.max_columns = 500 from matplotlib import pyplot as plt %matplotlib inline # Vega lite spec builders import vincent import altair import vega help(vega) def replace_misspelled(region_name, misspelled, replace): if region_name == misspelled: return(replace) else: return(region_name) root = "/Users/nathansuberi/Downloads/" gcam_results = pd.read_csv(root + "GCAM_Time_Series.csv") gcam_results["Region"] = gcam_results.apply(lambda row: replace_misspelled(row["Region"],"Australis/New Zealand","Australia/New Zealand"), axis=1) scenarios = np.unique(gcam_results["Scenario"]) regions = np.unique(gcam_results["Region"]) esp_indicator = np.unique(gcam_results["ESP Indicator Name"]) units = np.unique(gcam_results["Unit of Entry"]) regions res_and_comm_indicators = [elem for elem in np.unique(gcam_results["ESP Indicator Name"]) if "Energy|Residential" in elem] res_and_comm_total = ["Final Energy|Residential and Commercial"] paris_res_and_comm_results = gcam_results.loc[ gcam_results["ESP Indicator Name"].isin(res_and_comm_total) & gcam_results["Scenario"].isin(["Paris"])] paris_plus_res_and_comm_results = gcam_results.loc[ gcam_results["ESP Indicator Name"].isin(res_and_comm_total) & gcam_results["Scenario"].isin(["Paris_plus"])] res_and_comm_results = gcam_results.loc[ gcam_results["ESP Indicator Name"].isin(res_and_comm_total)].sort_values(by=["Region", "Scenario"]) tuples = list(zip(*[res_and_comm_results["Region"], res_and_comm_results["Scenario"]])) index = pd.MultiIndex.from_tuples(tuples, names=['Region', 'Scenario']) res_and_comm_results = res_and_comm_results.drop(["Scenario", "Region", "Model", "ESP Indicator Name", "Unit of Entry"], axis=1) res_and_comm_results.index = index res_and_comm_results # Now use this to create charts comparing these scenarios ### Experimentation below paris_plus_res_and_comm_results = paris_plus_res_and_comm_results.set_index("Region").transpose().drop(["Model", "Scenario", "ESP Indicator Name", "Unit of Entry"]) paris_res_and_comm_results = paris_res_and_comm_results.set_index("Region").transpose().drop(["Model", "Scenario", "ESP Indicator Name", "Unit of Entry"]) paris_res_and_comm_results # Join the two tables, use the scenario as a subscript to column names joined_results = paris_plus_res_and_comm_results.join(paris_res_and_comm_results, lsuffix='_paris_plus', rsuffix='_paris') joined_results # Want as a multi-index joined_results = joined_results.drop(["Model_paris_plus", "Model_paris", "ESP Indicator Name_paris_plus", "ESP Indicator Name_paris", "Scenario_paris_plus", "Scenario_paris", "Unit of Entry_paris_plus", "Unit of Entry_paris"], axis=1) joined_results = joined_results.transpose() joined_results arrays = [ ['sun', 'sun', 'sun', 'moon', 'moon', 'moon', 'moon', 'moon'], ['summer', 'winter', 'winter', 'summer', 'summer', 'summer', 'winter', 'winter'], ['one', 'one', 'two', 'one', 'two', 'three', 'one', 'two']] tuples = list(zip(*arrays)) index = pd.MultiIndex.from_tuples(tuples, names=['Body', 'Season','Item']) df1 = pd.DataFrame(np.random.randn(8,2), index=index,columns=['A','B']) df1 df1.reset_index().set_index(df1.index.names) ```
github_jupyter
# Programación lineal, algoritmo símplex ## Introducción El método de programación lineal ha sido un método sumamente utilizado para matemática avanzada y ciencias avanzadas, brindando solución a problemas de máximos y mínmos, ya que este algoritmo nos presenta distintos métodos de solución, siendo el más utilizado el método simplex, pues nos ayuda a encontrar la solución de forma rápida y acertada, por tanto en este documento demostraremos de forma detallada la realización del mismo, como se implementa, las funcionalidades que este puede presentar, y un ejemplo de como utilizarlo. ## Resultados teóricos del algoritmo simplex ### Utilidad El algoritmo simplex es el método clásico para resolver programas lineales. ### Funcionamiento #### Prerequisitos * El problema tiene que introducirse en la forma estándar para que el programa funcione * Las soluciones de las inecueciones tienen que ser positivas #### Pasos 1. Para utilizar el algoritmo simplex, debemos convertir el programa lineal a forma floja (slack form) 2. Nos enfocamos en la solución básica: establecemos todas las variables (no básicas) en el lado derecho en 0 y luego calculamos los valores de las variables (básicas) en el lado izquierdo 3. Para cambiar la solución básica, seleccionamos una variable no básica $x_{e}$ cuyo coeficiente en la función objetivo es positiva, y lo incrementamos tanto las ecuaciones permitan, posteriormente, la variable $x_{e}$ pasa a ser una variable básica y otra variable tipo $x_{l}$ deja de ser básica, así que aplicaremos esa lógica a nuestro ejemplo, utilizaremos $x_{1}$ para empezar. 4. Repetir el paso 3, hasta que la solución objetiva tenga todos sus coeficientes negativos, lo que significaría que la solución está optimizada ### Demostración de convergencia del algoritmo Empezamos con la forma canónica del problema lineal del cual partiremos \begin{array}{l} \text {(I): Minimizar z con: } \\ x_{1} + \quad + \quad ··· \quad \quad+ a_{1,m+1} x_{m+1} + ··· + a_{1,n} x_{n} = b_{1}\\ \quad \quad x_{2} + \quad ···\quad \quad + a_{1,m+1} x_{m+1} + ··· + a_{2,n} x_{n} = b_{2}\\ \quad \quad \quad ·\\ \quad \quad \quad \quad ·\\ \quad \quad \quad \quad \quad ·\\ \quad \quad \quad \quad \quad \quad \quad x_{m} + a_{m,m+1} x_{m+1} + ··· + a_{m,n} x_{n} = b_{m}\\ \quad \quad \quad \quad \quad \quad \quad \quad + c_{m+1} x_{m+1} + ··· + c_{n} x_{n} = z_{0} + z\\ x_{1}, \quad x_{2}, \quad··· x_{m}, \quad x_{m+1}, \quad ··· \quad x_{n} \geq 0 \end{array} si (I) es no degenerado, eso significa que se ha probado la convergencia, así que hay que considerar el caso degenerativo y construir una prueba en base aeso. Notación y terminología: * Se utilizará $b*_{i}$, $a*_{ij}$ para denotar coeficientes que han pasado por el paso del $\textbf{pivote}$. * $\textbf{Degenerado o degeneración}$: Este término se utiliza cuando una variable básica toma el valor de 0. ##### Lema A (i) Si $b_{i}$ para toda i, entonces después de hacer la operación de pivote, todavía tendremos $b*_{i} = 0$ para todo i.$\\$ (ii) Si al menos uno de las $b_{i} \neq 0$, entonces al menos un $b*_{i} \neq 0$ ##### Lema B Asuma que al menos uno de las $b_{i} \neq 0$ y hay una secuencia de pasos de pivote que completa la solución. Entonces, si reemplazamos todas las $b_{i} = 0$, la misma secuencia puede ser usada para completar este nuevo problema. ##### Teorema C (convergencia) Para el programa lineal en (I), existe una secuencia de pasos de pivote que completa la solución. Esto significa, que alcanzaremos ya sea el teorema O o el teorema U. Que establecen: Teorema O: Si todo $c*_{i} \geq 0$ entonces $z_{min} = -z*_{0}$. Teorema U: Para algún $c*_{k} < 0 $, entonces tenemos $a*_{ik} \leq 0$, para todo i, entonces $z_{min}$ no tiene límite inferior (no tiene mínimo). ##### Probando el teorema C ... por inducción en m: # de eciaciones con restricciones. $\textbf{Caso base}$: m = 1, solo una ecuación. \begin{array}{l} x_{1} + a_{1,m+1} x_{m+1} + ··· + a_{1,n} x_{n} = b_{1}\\ \quad \quad c_{m+1} x_{m+1} + ··· + c_{n} x_{n} = z_{0} + z\\ \end{array} (a): Si $b_{1} \neq 0:$ El lema A implica $b_{1} \neq 0$, no degenerado.$\\$ (b): Si $b*_{1} = 0$: El lema B dice, que se toman los mismos pasos que en (a) para resolverlo $\textbf{Paso inductivo}$: Asumimos que la declaración se mantiene para el programa lineal con m-1 o menos ecuaciones. necesitamos probar la declaración para el programa lineal con m ecuaciones. $\textbf(A)$. Asumir para m ecuaciones, que al menos un $b*_{i} \neq 0$. $\\$ (i). Aplicar simplex, hasta que ya podamos aplicar pivote para reducir z, es decir: Cuando el problema se haya resuelto. O, debido a degeneración (algunos $b_{i}$ son 0). (ii). Sea r = #$b_{i}$ con todos los $b_{i}$ que son 0. El lema A dice que r<m. Reordenar las ecuaciones y variables s.t. $b_{i} = 0$ para $ 1 \leq i \leq r$ y $b_{i} > 0$ para $i>r$. \begin{array}{l} \text {PL (II)} \\ x_{1} \quad \quad \quad \quad+ a_{1,m+1} x_{m+1} + ··· + a_{1,n} x_{n} = 0 \quad (\ast) \\ \quad \quad x_{r} \quad \quad \quad + a_{r,m+1} x_{m+1} + ··· + a_{r,n} x_{n} = 0 \quad (\ast \ast) \\ \quad \quad \quad \quad x_{r+1} + a_{r+1,m+1} x_{m+1} + ··· + a_{r+1,n} x_{n} = b_{r+1} > 0\\ \quad \quad \quad \quad \quad x_{m} + a_{m,m+1} x_{m+1} + ··· + a_{m,n} x_{n} = b_{m} > 0\\ \quad \quad \quad \quad \quad \quad c_{m+1} x_{m+1} + ··· + c_{n} x_{n} = z_{0} + z \quad \quad (O) \\ \end{array} (iii) considere ($*$)-($**$): en la forma canónica con BV ($x_{1},···,x_{r}$), y ($x_{r+1},···,x_{n}$) no aparece en ($*$)-($**$). Por supuesto de inducción, el Programa lineal ($*$)-($**$) + (O) puede ser resuelto por cierta cantidad de pasos pivote. (iv) Aplicamos los mismos pasos de pivotes a LP(II). El sistema resultado es canónico y $z_{0}$ no se cambia Ahora tenemos 2 posibles casos: * Todos los $c*_{j} \geq 0$: aplicar el teorema O, se ha alcanzado el mínimo. * Para algún $c*_{j} < 0$ y $a*_{ik} \leq 0$ para $1 \leq i \leq r$. (a) Tenemos que $a*_{ik}$ para $i>r$. aplicando el teorema U, y podemos concluir que el mínimo no tiene límite. (b) Tenemos que $a*_{ik}$ para algún $i>r$. Entonces, hacemos el paso pivote. entonces $b_{i}>0$ para i>r, reduciremos estrictamente $-z_{0}$ después del paso pivote. ##### Uniendolo todo: Siguiendo los pasos (i), (ii), (iii) y (iv), tenemos una secuencia de pivote, que: o completa el problema, o reduce estrictamente $-z_{0}$. Repetir (i)-(iv) multiples veces. En una cantidad finita de veces, el método Simplex será completado. $\textbf(B)$ finalmente, si todos los $b_{i}$ son 0, por el lema B, la misma secuencia que en el paso (A) completará el procedimiento. ### Eficiencia El algoritmo simplex es de eficiencia tipo exponencial, lo que significa que con los inputs adecuados, este algoritmo suele ser bastante rápido, incluso podría ser más rápido que algunos algoritmos de tipo polinomial ### Ventajas * Fuciona para modelo de n variables * Mediante el método de simplex regresa la respuesta más óptima. * Ubica todos los puntos extremos * Es un método rápido ### Desventajas * Ejecuta el procedimiento por pasos, por lo que una linea mala significa el resto del proceso malo (Desventaja de aplicacion) * No acepta funciones que tengan solución en el origen (al realizar el método de simplex). * Se complica el cálculo a la inversa o multiplicación de matrices. * Cae en ciclo si no se establece bien las restricciones, volviendo el programa ineficaz (simplex). ### Ejemplo teórico Considere el siguiente programa lineal en forma estándar \begin{array}{l} \text { maximizar: } \\ \quad3 x_{1}+x_{2}+2 x_{3}\\ \text { Condiciones : }\\ \quad x_{1}+x_{2}+3 x_{3} \leq 30 \\ \quad 2 x_{1}+2 x_{2}+5 x_{3} \leq 24 \\ \quad 4 x_{1}+x_{2}+2 x_{3} \leq 36 \\ \quad x_{1}, x_{2}, x_{3} \geq 0 \end{array} 1. Para utilizar el algoritmo simplex, debemos convertir el programa lineal a forma floja (slack form) \begin{array}{l} z= 3 x_{1}+x_{2}+2 x_{3} \\ x_{4}=30-x_{1}-x_{2}-3 x_{3} \\ x_{5}=24-2 x_{1}-2 x_{2}-5 x_{3} \\ x_{6}=36-4 x_{1}-x_{2}-2 x_{3} \\ x_{1}, x_{2}, x_{3} \geq 0 \end{array} El valor de las variables $x_{1}$, $x_{2}$, y $x_{3}$ define valores para $x_{4}$, $x_{5}$ y $x_{6}$; por lo tanto, tenemos un número infinito de soluciones para este sistema de ecuaciones. Una solucion es factible si todas las variables ($x_{1}$, $x_{2}$, ... , $x_{6}$) no son negativas. 2. Nos enfocamos en la solución básica: establecemos todas las variables (no básicas) en el lado derecho en 0 y luego calculamos los valores de las variables (básicas) en el lado izquierdo. Dejando los siguientes valores para estas ecuaciones: \begin{array}{l} z= 0 \\ x_{4}=30 \\ x_{5}=24 \\ x_{6}=36 \\ \end{array} observamos que los valores ($\bar{x_{1}}, \bar{x_{2}},...,\bar{x_{6}}$) = (0,0,0,30,24,36) y tiene un valor objetivo de $z=(3·0)+(1·0)+(2·0)=0$ Observe que la solución básica propone $\bar{x_{i}}=b_{i}$ por cada $i \in B$, por cada iteración del algoritmo simplex, se reescriben las ecuaciones y el objetivo de la función, es de poner variables distintas a la derecha y así, cambiamos la solución básica que es asociada con el problema reescrito (cabe destacar que esto no cambia en ninguna forma el problema original). Si una solución básica es factible, la llamaremos $\textbf{solución básica factible}$, lo que es normalmente el caso, aunque habrán algunas ocasiones, en la que la solución básica no es factible 3. Para cambiar la solución básica, seleccionamos una variable no básica $x_{e}$ cuyo coeficiente en la función objetivo es positiva, y lo incrementamos tanto las ecuaciones permitan, posteriormente, la variable $x_{e}$ pasa a ser una variable básica y otra variable tipo $x_{l}$ deja de ser básica, así que aplicaremos esa lógica a nuestro ejemplo, utilizaremos $x_{1}$ para empezar. A medida que incrementamos $x_{1}$ los valores de $x_{4}, x_{5}, x_{6}$ decrecen, no podemos permitir que estas variables se conviertan en negativas, por lo que progamos que valor máximo puede tomar $x_{1}$ en las siguientes ecuaciones (asumiendo que $x_{2} y x_{3}$ sean 0 por las reglas establecidas anteriormente): \begin{array}{l} x_{4}=30-x_{1} \\ x_{5}=24-2 x_{1} \\ x_{6}=36-4 x_{1} \\ \end{array} Podemos observar, que en $x_{4}$ el valor de $x_{1}$ no puede ser mayor a 30, así como en $x_{5}$ y en $x_{6}$ el valor de $x_{1}$ no puede ser mayor a 12 y 9 respectivamente, por lo que nos decantamos por el mayor valor que no contradiga ninguna de las reglas anteriormente establecidas, el cuál sería 9, por lo que en la tercera ecuación, cambiamos los roles de $x_{1}$ y $x_{6}$ y resolvemos la ecuación para $x_{1}$, para obtener: \begin{array}{l} x_{1}=9-\frac{x_{2}}{4}-\frac{x_{3}}{2}-\frac{x_{6}}{4} \\ \end{array} Para reescribir las otras ecuaciones con $x_{6}$ a la derecha, simplemente sustituimos $x_{1}$ con la ecuación obtenida anteriormente y desarrollamos, lo que nos deja con las siguientes ecuaciones: \begin{array}{l} z= 27+ \frac{x_{2}}{4} + \frac{x_{3}}{2} - \frac{3 x_{6}}{4}\\ x_{1}= 9 - \frac{x_{2}}{4} - \frac{x_{3}}{2} - \frac{x_{6}}{4} \\ x_{4}= 21 - \frac{3x_{2}}{4} - \frac{5x_{3}}{2} + \frac{x_{6}}{4} \\ x_{5}= 6 - \frac{3x_{2}}{2} - 4x_{3} + \frac{x_{6}}{2}\\ \end{array} A este proceso que acabamos de hacer le llamamos $\textbf{pivote}$ a donde $x_{1}$ es la variable entrante y $x_{6}$ es la variable saliente 4. Una vez hemos realizado nuestro primer $\textbf{pivote}$, tenemos que utilizar las otras 2 variables restantes para pivotar, primero escogeremos $x_{3}$, luego $x_{2}$, por lo que aplicamos el paso 3. \begin{array}{l} x_{1}= 9 - \frac{x_{3}}{2} \\ x_{4}= 21 - \frac{5x_{3}}{2} \\ x_{5}= 6 - 4x_{3}\\ \end{array} Resolver estas ecuaciones nos da un valor de 18 en $x_{1}$, 42/5 en $x_{4}$ y 3/2 en $x_{5}$. Por lo que sustituimos $x_{3}$ por $x_{5}$ en la tercera ecuación, lo que nos da: $x_{3} = \frac{3}{2} - \frac{3 x_{2}}{8} - \frac{x_{5}}{4} + \frac{x_{6}}{8}$ Sustituimos en el sistema de ecuaciones: \begin{array}{l} z= \frac{111}{4} + \frac{x_{2}}{16} - \frac{x_{5}}{8} - \frac{11x_{6}}{16}\\ x_{1}= \frac{33}{4} - \frac{x_{2}}{16} + \frac{x_{5}}{8} - \frac{5x_{6}}{16}\\ x_{3}= \frac{3}{2} - \frac{3x_{2}}{8} - \frac{x_{5}}{4} + \frac{x_{6}}{8}\\ x_{4}= \frac{69}{4} + \frac{3x_{2}}{16} + \frac{x_{5}}{8} - \frac{x_{6}}{16}\\ \end{array} este sistema tiene la solución básica de (33/4,0,3/2,69/4,0,0), con un valor objetivo de 111/4, ahora, la única manera de aumentar el valor objetivo es de aumentar $x_{2}$. las 3 ecuaciones dan valores máximos de 132, 4 y $\infty$ respectivamente (obtuvimos un valor de infinito, debido a que a medida aumenta $x_{2}$, el valor de $x_{4}$ también aumenta, lo que significa que no hay valor que cumpla la restricción de cuanto podemos aumentar $x_{2}$.) Aumentamos el valor de $x_{2}$ a 4. Sustituimos en las ecuaciones... \begin{array}{l} z= 28 - \frac{x_{3}}{6} - \frac{x_{5}}{6} - \frac{2 x_{6}}{3}\\ x_{1}= 8 + \frac{x_{3}}{6} + \frac{x_{5}}{6} - \frac{x_{6}}{3} \\ x_{2}= 4 - \frac{8x_{3}}{3} - \frac{2x_{5}}{3} + \frac{x_{6}}{3} \\ x_{4}= 18 - \frac{x_{3}}{2} + \frac{x_{5}}{2}\\ \end{array} En este punto, todos los coeficientes de la función objetiva son negativos, esta situación solo ocurre cuando hemos reescrito el programa lineal de tal forma que la solución básica es la solución más optimo. 5. Ya que sabemos que la función objetiva es produce una solución básica óptima, con la solución (8,4,0,18,0,0) con el valor objetivo de 28, que se obtiene haciendo el reemplazo en el programa lineal original, utilizando las variables que aparecían en este programa lineal original. $z = 3 x_{1}+x_{2}+2 x_{3}$ $z = (3·8)+(1·4)+(2·0)$ $z = 28$ $x_{1} = 8$ $x_{2} = 4$ $x_{3} = 0$ Lo cuál es nuestra respuesta final ## Experimentos numéricos hechos en Python del algoritmo simplex. ``` def simplex(A, b, c): try: N, B, A, b, c, v = initializeSimplex(A, b, c) delta = [] for i in range(len(B)): delta.append(0) for j in range(len(N)): if c[j] > 0: e = j for i in range(len(B)): if A[i][e] > 0: delta[i] = b[i] / A[i][e] else: delta[i] = float('inf') l = delta.index(min(delta)) if delta[l] == float('inf'): return print("unbounded") else: N, B, A, b, c, v = pivot(N, B, A, b, c, v, l, e) for i in range(len(N)+len(B)): if i + 1 in B: index = B.index(i + 1) print("x{0}: {1}".format(i+1, b[index])) else: print("x{0}: {1}".format(i+1, 0)) print("z: {0}".format(v)) except ValueError: print("El algoritmo no se puede ejecutar con esas condiciones") return def initializeSimplex(A, b, c): k = b.index(min(b)) if b[k] >= 0: # Is the initial basic solution feasible? return [x+1 for x in range(len(c))], [len(c)+x+1 for x in range(len(b))], A, b, c, 0 else: for i in range(len(b)): A[i].append(0) return "infeasible" def pivot(N, B, A, b, c, v, l, e): pivotA = [[0 for i in range(len(c))] for j in range(len(b))] pivotb = [0 for i in range(len(b))] pivotc = [0 for i in range(len(c))] # Compute the coefficients of the equation for new basic variable xe. pivotb[l] = b[l] / A[l][e] for j in range(len(N)): if j != e: pivotA[l][j] = A[l][j] / A[l][e] pivotA[l][e] = 1 / A[l][e] # Compute the coefficients of the remaining constraints. for i in range(len(B)): if i != l: pivotb[i] = b[i] - A[i][e] * pivotb[l] for j in range(len(N)): if j != e: pivotA[i][j] = A[i][j] - A[i][e] * pivotA[l][j] pivotA[i][e] = - A[i][e] * pivotA[l][e] # Compute the objective function. pivotv = v + c[e] * pivotb[l] for j in range(len(N)): if j != e: pivotc[j] = c[j] - c[e] * pivotA[l][j] pivotc[e] = -c[e] * pivotA[l][e] # Compute new sets of basic and nonbasic variables. pivotN = [0 for i in range(len(N))] pivotB = [0 for i in range(len(B))] a = N[e] b = B[l] for x in range(len(pivotN)): if x == e: pivotN[x] = b else: pivotN[x] = N[x] for x in range(len(pivotB)): if x == l: pivotB[x] = a else: pivotB[x] = B[x] return pivotN, pivotB, pivotA, pivotb, pivotc, pivotv ``` El área de interés es la que está entre todas las figuras ![image.png](attachment:image.png) ``` A = [[1, 1, 3], [2, 2, 5], [4, 1, 2]] b = [30, 24, 36] c = [3, 1, 2] simplex(A, b, c) ``` ![image.png](attachment:image.png) ![image-2.png](attachment:image-2.png) ``` A = [[-1, 1], [2, 5], [2,-1]] b = [4,20,2] c = [1,1] simplex(A, b, c) ``` ![image.png](attachment:image.png) ![image.png](attachment:image.png) ``` A = [[1, -1], [2, 2], [1, 2]] b = [2,6,5] c = [2,2] simplex(A, b, c) ``` ## Conclusión Para finalizar, pudimos observar que el algoritmo puede ser algo complejo de comprender, tanto la interpretación del enunciado, como la utilización del algoritmo como tal, sin embargo, es uno de los algoritmos más utilizados en las áreas de la matemática avanzada y la ciencia para minimizar y máximizar, debido a su gran eficiencia y resultados certeros, por lo cual es recomendable saber como funciona, ya que nos podría venir de utilidad en un futuro. Como lo podría ser en el área de economía, por si en algún dado caso, se desea invertir lo mínimo posible para alcanzar una meta o prblemas de la misma índole. # Bibliografía * Cormen, T., Leiserson, C., Rivest, Ronald & Stein, C. Secciones 29.1, 29.2 y 29.3 en Introduction to Algorithms, tercera edición (pp. 843-879). MIT Press. * Shen, W. [wenshepsu] 11 may 2020. V3-29. Linear Programming. Convergence proof for Simplex method. Recuperado de: https://www.youtube.com/watch?v=IFCsUxW9aUs&t * Shen, W. [wenshepsu] 11 may 2020: V3-30. Linear Programming. Convergence proof for Simplex method, Induction step.
github_jupyter
``` import pickle import pandas as pd import os import json import glob import numpy as np from optimizers.utils import Model, Architecture from nasbench_analysis.search_spaces.search_space_1 import SearchSpace1 from nasbench_analysis.search_spaces.search_space_2 import SearchSpace2 from nasbench_analysis.search_spaces.search_space_3 import SearchSpace3 from nasbench_analysis.utils import INPUT, OUTPUT, CONV1X1, NasbenchWrapper, upscale_to_nasbench_format, natural_keys path = "experiments/discrete_optimizers/" ssp = 1 algo = "RS" n_runs = 500 search_space = eval('SearchSpace{}()'.format(ssp)) y_star_valid, y_star_test, inc_config = (search_space.valid_min_error, search_space.test_min_error, None) class DotAccess(): def __init__(self, valid, info, test): self.valid = valid self.info = info self.test = test def process_and_save(all_runs): global y_star_valid, y_star_test valid_incumbents = [] runtimes = [] test_incumbents = [] inc = np.inf test_regret = 1 for k in range(len(all_runs)): print('Iteration {:<3}/{:<3}'.format(k+1, len(all_runs)), end="\r", flush=True) regret = all_runs[k].valid - y_star_valid # Update test regret only when incumbent changed by validation regret if regret <= inc: inc = regret test_regret = all_runs[k].test - y_star_test valid_incumbents.append(inc) test_incumbents.append(test_regret) runtimes.append(all_runs[k].info) runtimes = np.cumsum(runtimes).tolist() return valid_incumbents, runtimes, test_incumbents # with open(os.path.join(path, 'config.json')) as fp: # config = json.load(fp) re_archs = glob.glob(os.path.join(path, 'algo_{}_0_ssp_{}_seed_*.obj'.format(algo, ssp))) # Sort them by date re_archs.sort(key=natural_keys) for i in range(n_runs): res = pickle.load(open(re_archs[i], 'rb')) all_runs = [] for j in range(len(res)): all_runs.append(DotAccess(valid = 1 - res[j].validation_accuracy, info = res[j].training_time, test = 1 - res[j].test_accuracy)) valid_incumbents, runtimes, test_incumbents = process_and_save(all_runs) directory = os.path.join(path, '{}/{}/'.format(algo, ssp)) if not os.path.exists(directory): os.makedirs(directory) with open(os.path.join(path, '{}/{}/' 'run_{}.json'.format(algo, ssp, i)), 'w') as f: json.dump({'runtime': runtimes, 'regret_validation': valid_incumbents, 'regret_test': test_incumbents}, f) '''Script to plot regret curves for multiple runs on the benchmarks ''' import os import json import sys import pickle import argparse import collections import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn from scipy import stats # seaborn.set_style("ticks") # from matplotlib import rcParams # rcParams["font.size"] = "30" # rcParams['text.usetex'] = False # rcParams['figure.figsize'] = (16.0, 9.0) # rcParams['figure.frameon'] = True # rcParams['figure.edgecolor'] = 'k' # rcParams['grid.color'] = 'k' # rcParams['grid.linestyle'] = ':' # rcParams['grid.linewidth'] = 0.5 # rcParams['axes.linewidth'] = 1 # rcParams['axes.edgecolor'] = 'k' # rcParams['axes.grid.which'] = 'both' # rcParams['legend.frameon'] = 'True' # rcParams['legend.framealpha'] = 1 # rcParams['ytick.major.size'] = 12 # rcParams['ytick.major.width'] = 1.5 # rcParams['ytick.minor.size'] = 6 # rcParams['ytick.minor.width'] = 1 # rcParams['xtick.major.size'] = 12 # rcParams['xtick.major.width'] = 1.5 # rcParams['xtick.minor.size'] = 6 # rcParams['xtick.minor.width'] = 1 marker=['*', 'x', 's', 's', 's', 'h', '*', 'v', '<', ">"] linestyles = ['--', ':', '-',] def fill_trajectory(performance_list, time_list, replace_nan=np.NaN): frame_dict = collections.OrderedDict() counter = np.arange(0, len(performance_list)) for p, t, c in zip(performance_list, time_list, counter): if len(p) != len(t): raise ValueError("(%d) Array length mismatch: %d != %d" % (c, len(p), len(t))) frame_dict[str(c)] = pd.Series(data=p, index=t) # creates a dataframe where the rows are indexed based on time # fills with NA for missing values for the respective timesteps merged = pd.DataFrame(frame_dict) # ffill() acts like a fillna() wherein a forward fill happens # only remaining NAs for in the beginning until a value is recorded merged = merged.ffill() performance = merged.to_numpy() #get_values() # converts to a 2D numpy array time_ = merged.index.values # retrieves the timestamps performance[np.isnan(performance)] = replace_nan if not np.isfinite(performance).all(): raise ValueError("\nCould not merge lists, because \n" "\t(a) one list is empty?\n" "\t(b) the lists do not start with the same times and" " replace_nan is not set?\n" "\t(c) any other reason.") return performance, time_ # parser = argparse.ArgumentParser() # parser.add_argument('--bench', default='1shot1', type=str, nargs='?', # choices=['101', '1shot1', '201'], help='select benchmark') # parser.add_argument('--ssp', default=1, type=int, nargs='?') # parser.add_argument('--path', default='experiments/discrete_optimizers', type=str, nargs='?', # help='path to encodings or jsons for each algorithm') # parser.add_argument('--n_runs', default=500, type=int, nargs='?', # help='number of runs to plot data for') # parser.add_argument('--output_path', default="", type=str, nargs='?', # help='specifies the path where the plot will be saved') # parser.add_argument('--type', default="wallclock", type=str, choices=["wallclock", "fevals"], # help='to plot for wallclock times or # function evaluations') # parser.add_argument('--name', default="comparison", type=str, # help='file name for the PNG plot to be saved') # parser.add_argument('--title', default="benchmark", type=str, # help='title name for the plot') # parser.add_argument('--limit', default=1e7, type=float, help='wallclock limit') # parser.add_argument('--regret', default='validation', type=str, choices=['validation', 'test'], # help='type of regret') # args = parser.parse_args() path = 'experiments/discrete_optimizers' n_runs = 500 plot_type = 'wallclock' plot_name = 'comparison' regret_type = 'validation' benchmark = '1shot1' ssp = '1' limit=1e7 title = 'search space 1' output_path = '' if benchmark == '1shot1' and ssp is None: print("Specify \'--ssp\' from {1, 2, 3} for choosing the search space for NASBench-1shot1.") sys.exit() if benchmark == '101': methods = [ ("random_search", "RS"), ("bohb", "BOHB"), ("hyperband", "HB"), ("tpe", "TPE"), ("regularized_evolution", "RE"), ("de_pop20", "DE")] # ("de_pop10", "DE $pop=10$"), # ("de_pop20", "DE $pop=20$")] # ("de_pop30", "DE $pop=30$"), # ("de_pop40", "DE $pop=40$"), # ("de_pop50", "DE $pop=50$"), # ("de_pop60", "DE $pop=60$"), # ("de_pop70", "DE $pop=70$"), # ("de_pop80", "DE $pop=80$"), # ("de_pop90", "DE $pop=90$"), # ("de_pop100", "DE $pop=100$")] elif benchmark == '201': methods = [ ("random_search", "RS"), ("bohb", "BOHB"), ("hyperband", "HB"), ("tpe", "TPE"), ("regularized_evolution", "RE"), ("de_pop20", "DE")] # ("de_pop10", "DE $pop=10$"), # ("de_pop20", "DE $pop=20$"), # ("de_pop30", "DE $pop=30$"), # ("de_pop40", "DE $pop=40$"), # ("de_pop50", "DE $pop=50$"), # ("de_pop60", "DE $pop=60$"), # ("de_pop70", "DE $pop=70$"), # ("de_pop80", "DE $pop=80$"), # ("de_pop90", "DE $pop=90$"), # ("de_pop100", "DE $pop=100$")] else: methods = [ ("RS", "RS"), ("RE", "RE"), #("BOHB", "BOHB"), #("HB", "HB"), #("TPE", "TPE"), ("EE", "EE")] #("DE_pop20", "DE")] # ("DE_pop10", "DE $pop=10$"), # ("DE_pop20", "DE $pop=20$"), # ("DE_pop30", "DE $pop=30$"), # ("DE_pop40", "DE $pop=40$"), # ("DE_pop50", "DE $pop=50$"), # ("DE_pop60", "DE $pop=60$"), # ("DE_pop70", "DE $pop=70$"), # ("DE_pop80", "DE $pop=80$"), # ("DE_pop90", "DE $pop=90$"), # ("DE_pop100", "DE $pop=100$")] # plot limits min_time = np.inf max_time = 0 min_regret = 1 max_regret = 0 # plot setup colors = ['xkcd:lightgreen', 'xkcd:aqua', 'xkcd:coral'] plt.clf() # looping and plotting for all methods for index, (m, label) in enumerate(methods): regret = [] runtimes = [] for k, i in enumerate(np.arange(n_runs)): try: if benchmark in ['101', '201']: res = json.load(open(os.path.join(path, m, "run_%d.json" % i))) else: res = json.load(open(os.path.join(path, m, str(ssp), "run_%d.json" % i))) no_runs_found = False except Exception as e: print(m, i, e) no_runs_found = True continue regret_key = "regret_validation" if regret_type == 'validation' else "regret_test" runtime_key = "runtime" _, idx = np.unique(res[regret_key], return_index=True) idx.sort() regret.append(np.array(res[regret_key])[idx]) runtimes.append(np.array(res[runtime_key])[idx]) if not no_runs_found: # finds the latest time where the first measurement was made across runs t = np.max([runtimes[i][0] for i in range(len(runtimes))]) min_time = min(min_time, t) te, time = fill_trajectory(regret, runtimes, replace_nan=1) idx = time.tolist().index(t) te = te[idx:, :] time = time[idx:] # Clips off all measurements after 10^7s idx = np.where(time < limit)[0] print("{}. Plotting for {}".format(index, m)) print(len(regret), len(runtimes)) # The mean plot plt.plot(time[idx], np.mean(te, axis=1)[idx], color=colors[index], linewidth=4, label=label, linestyle=linestyles[index % len(linestyles)], marker=marker[index % len(marker)], markevery=(0.1,0.1), markersize=10) # # The error band # plt.fill_between(time[idx], # np.mean(te, axis=1)[idx] + 2 * stats.sem(te[idx], axis=1), # np.mean(te[idx], axis=1)[idx] - 2 * stats.sem(te[idx], axis=1), # color="C%d" % index, alpha=0.2) # Stats to dynamically impose limits on the axes of the plots max_time = max(max_time, time[idx][-1]) min_regret = min(min_regret, np.mean(te, axis=1)[idx][-1]) max_regret = max(max_regret, np.mean(te, axis=1)[idx][0]) plt.xscale("log") plt.yscale("log") plt.tick_params(which='both', direction="in") plt.legend(loc='lower left', framealpha=1, prop={'size': 25, 'weight': 'bold'}) plt.title(title) if plot_type == "wallclock": plt.xlabel("estimated wallclock time $[s]$") elif plot_type == "fevals": plt.xlabel("number of function evaluations") plt.ylabel("{} regret".format(regret_type)) # plt.xlim(max(min_time/10, 1e0), min(max_time*10, 1e7)) # plt.ylim(min_regret, max_regret) bottom, top = plt.ylim() plt.ylim((bottom,top)) plt.legend() # plt.grid(which='both', alpha=0.5, linewidth=0.5) plt.grid(b=True, which='major', color='#F4F4F4', linestyle='-') # print(os.path.join(output_path, '{}.png'.format(plot_name))) plt.savefig(os.path.join(output_path, '{}.png'.format(plot_name)), format='png', bbox_inches='tight', dpi=300) '''Script to plot regret curves for multiple runs on the benchmarks ''' import os import json import sys import pickle import argparse import collections import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn from scipy import stats # seaborn.set_style("ticks") # from matplotlib import rcParams # rcParams["font.size"] = "30" # rcParams['text.usetex'] = False # rcParams['figure.figsize'] = (16.0, 9.0) # rcParams['figure.frameon'] = True # rcParams['figure.edgecolor'] = 'k' # rcParams['grid.color'] = 'k' # rcParams['grid.linestyle'] = ':' # rcParams['grid.linewidth'] = 0.5 # rcParams['axes.linewidth'] = 1 # rcParams['axes.edgecolor'] = 'k' # rcParams['axes.grid.which'] = 'both' # rcParams['legend.frameon'] = 'True' # rcParams['legend.framealpha'] = 1 # rcParams['ytick.major.size'] = 12 # rcParams['ytick.major.width'] = 1.5 # rcParams['ytick.minor.size'] = 6 # rcParams['ytick.minor.width'] = 1 # rcParams['xtick.major.size'] = 12 # rcParams['xtick.major.width'] = 1.5 # rcParams['xtick.minor.size'] = 6 # rcParams['xtick.minor.width'] = 1 marker=['*', 'x', 's', 's', 's', 'h', '*', 'v', '<', ">"] linestyles = ['--', ':', '-',] def fill_trajectory(performance_list, time_list, replace_nan=np.NaN): frame_dict = collections.OrderedDict() counter = np.arange(0, len(performance_list)) for p, t, c in zip(performance_list, time_list, counter): if len(p) != len(t): raise ValueError("(%d) Array length mismatch: %d != %d" % (c, len(p), len(t))) frame_dict[str(c)] = pd.Series(data=p, index=t) # creates a dataframe where the rows are indexed based on time # fills with NA for missing values for the respective timesteps merged = pd.DataFrame(frame_dict) # ffill() acts like a fillna() wherein a forward fill happens # only remaining NAs for in the beginning until a value is recorded merged = merged.ffill() performance = merged.to_numpy() #get_values() # converts to a 2D numpy array time_ = merged.index.values # retrieves the timestamps performance[np.isnan(performance)] = replace_nan if not np.isfinite(performance).all(): raise ValueError("\nCould not merge lists, because \n" "\t(a) one list is empty?\n" "\t(b) the lists do not start with the same times and" " replace_nan is not set?\n" "\t(c) any other reason.") return performance, time_ # parser = argparse.ArgumentParser() # parser.add_argument('--bench', default='1shot1', type=str, nargs='?', # choices=['101', '1shot1', '201'], help='select benchmark') # parser.add_argument('--ssp', default=1, type=int, nargs='?') # parser.add_argument('--path', default='experiments/discrete_optimizers', type=str, nargs='?', # help='path to encodings or jsons for each algorithm') # parser.add_argument('--n_runs', default=500, type=int, nargs='?', # help='number of runs to plot data for') # parser.add_argument('--output_path', default="", type=str, nargs='?', # help='specifies the path where the plot will be saved') # parser.add_argument('--type', default="wallclock", type=str, choices=["wallclock", "fevals"], # help='to plot for wallclock times or # function evaluations') # parser.add_argument('--name', default="comparison", type=str, # help='file name for the PNG plot to be saved') # parser.add_argument('--title', default="benchmark", type=str, # help='title name for the plot') # parser.add_argument('--limit', default=1e7, type=float, help='wallclock limit') # parser.add_argument('--regret', default='validation', type=str, choices=['validation', 'test'], # help='type of regret') # args = parser.parse_args() path = 'experiments/discrete_optimizers' n_runs = 500 plot_type = 'wallclock' plot_name = 'comparison' regret_type = 'validation' benchmark = '1shot1' ssp = '2' limit=1e7 title = 'search space 2' output_path = '' if benchmark == '1shot1' and ssp is None: print("Specify \'--ssp\' from {1, 2, 3} for choosing the search space for NASBench-1shot1.") sys.exit() if benchmark == '101': methods = [ ("random_search", "RS"), ("bohb", "BOHB"), ("hyperband", "HB"), ("tpe", "TPE"), ("regularized_evolution", "RE"), ("de_pop20", "DE")] # ("de_pop10", "DE $pop=10$"), # ("de_pop20", "DE $pop=20$")] # ("de_pop30", "DE $pop=30$"), # ("de_pop40", "DE $pop=40$"), # ("de_pop50", "DE $pop=50$"), # ("de_pop60", "DE $pop=60$"), # ("de_pop70", "DE $pop=70$"), # ("de_pop80", "DE $pop=80$"), # ("de_pop90", "DE $pop=90$"), # ("de_pop100", "DE $pop=100$")] elif benchmark == '201': methods = [ ("random_search", "RS"), ("bohb", "BOHB"), ("hyperband", "HB"), ("tpe", "TPE"), ("regularized_evolution", "RE"), ("de_pop20", "DE")] # ("de_pop10", "DE $pop=10$"), # ("de_pop20", "DE $pop=20$"), # ("de_pop30", "DE $pop=30$"), # ("de_pop40", "DE $pop=40$"), # ("de_pop50", "DE $pop=50$"), # ("de_pop60", "DE $pop=60$"), # ("de_pop70", "DE $pop=70$"), # ("de_pop80", "DE $pop=80$"), # ("de_pop90", "DE $pop=90$"), # ("de_pop100", "DE $pop=100$")] else: methods = [ ("RS", "RS"), ("RE", "RE"), #("BOHB", "BOHB"), #("HB", "HB"), #("TPE", "TPE"), ("EE", "EE")] #("DE_pop20", "DE")] # ("DE_pop10", "DE $pop=10$"), # ("DE_pop20", "DE $pop=20$"), # ("DE_pop30", "DE $pop=30$"), # ("DE_pop40", "DE $pop=40$"), # ("DE_pop50", "DE $pop=50$"), # ("DE_pop60", "DE $pop=60$"), # ("DE_pop70", "DE $pop=70$"), # ("DE_pop80", "DE $pop=80$"), # ("DE_pop90", "DE $pop=90$"), # ("DE_pop100", "DE $pop=100$")] # plot limits min_time = np.inf max_time = 0 min_regret = 1 max_regret = 0 # plot setup colors = ['xkcd:lightgreen', 'xkcd:aqua', 'xkcd:coral'] plt.clf() # looping and plotting for all methods for index, (m, label) in enumerate(methods): regret = [] runtimes = [] for k, i in enumerate(np.arange(n_runs)): try: if benchmark in ['101', '201']: res = json.load(open(os.path.join(path, m, "run_%d.json" % i))) else: res = json.load(open(os.path.join(path, m, str(ssp), "run_%d.json" % i))) no_runs_found = False except Exception as e: print(m, i, e) no_runs_found = True continue regret_key = "regret_validation" if regret_type == 'validation' else "regret_test" runtime_key = "runtime" _, idx = np.unique(res[regret_key], return_index=True) idx.sort() regret.append(np.array(res[regret_key])[idx]) runtimes.append(np.array(res[runtime_key])[idx]) if not no_runs_found: # finds the latest time where the first measurement was made across runs t = np.max([runtimes[i][0] for i in range(len(runtimes))]) min_time = min(min_time, t) te, time = fill_trajectory(regret, runtimes, replace_nan=1) idx = time.tolist().index(t) te = te[idx:, :] time = time[idx:] # Clips off all measurements after 10^7s idx = np.where(time < limit)[0] print("{}. Plotting for {}".format(index, m)) print(len(regret), len(runtimes)) # The mean plot plt.plot(time[idx], np.mean(te, axis=1)[idx], color=colors[index], linewidth=4, label=label, linestyle=linestyles[index % len(linestyles)], marker=marker[index % len(marker)], markevery=(0.1,0.1), markersize=10) # # The error band # plt.fill_between(time[idx], # np.mean(te, axis=1)[idx] + 2 * stats.sem(te[idx], axis=1), # np.mean(te[idx], axis=1)[idx] - 2 * stats.sem(te[idx], axis=1), # color="C%d" % index, alpha=0.2) # Stats to dynamically impose limits on the axes of the plots max_time = max(max_time, time[idx][-1]) min_regret = min(min_regret, np.mean(te, axis=1)[idx][-1]) max_regret = max(max_regret, np.mean(te, axis=1)[idx][0]) plt.xscale("log") plt.yscale("log") plt.tick_params(which='both', direction="in") plt.legend(loc='lower left', framealpha=1, prop={'size': 25, 'weight': 'bold'}) plt.title(title) if plot_type == "wallclock": plt.xlabel("estimated wallclock time $[s]$") elif plot_type == "fevals": plt.xlabel("number of function evaluations") plt.ylabel("{} regret".format(regret_type)) # plt.xlim(max(min_time/10, 1e0), min(max_time*10, 1e7)) # plt.ylim(min_regret, max_regret) bottom, top = plt.ylim() plt.ylim((bottom,top)) plt.legend() # plt.grid(which='both', alpha=0.5, linewidth=0.5) plt.grid(b=True, which='major', color='#F4F4F4', linestyle='-') # print(os.path.join(output_path, '{}.png'.format(plot_name))) plt.savefig(os.path.join(output_path, '{}.png'.format(plot_name)), format='png', bbox_inches='tight', dpi=300) '''Script to plot regret curves for multiple runs on the benchmarks ''' import os import json import sys import pickle import argparse import collections import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn from scipy import stats # seaborn.set_style("ticks") # from matplotlib import rcParams # rcParams["font.size"] = "30" # rcParams['text.usetex'] = False # rcParams['figure.figsize'] = (16.0, 9.0) # rcParams['figure.frameon'] = True # rcParams['figure.edgecolor'] = 'k' # rcParams['grid.color'] = 'k' # rcParams['grid.linestyle'] = ':' # rcParams['grid.linewidth'] = 0.5 # rcParams['axes.linewidth'] = 1 # rcParams['axes.edgecolor'] = 'k' # rcParams['axes.grid.which'] = 'both' # rcParams['legend.frameon'] = 'True' # rcParams['legend.framealpha'] = 1 # rcParams['ytick.major.size'] = 12 # rcParams['ytick.major.width'] = 1.5 # rcParams['ytick.minor.size'] = 6 # rcParams['ytick.minor.width'] = 1 # rcParams['xtick.major.size'] = 12 # rcParams['xtick.major.width'] = 1.5 # rcParams['xtick.minor.size'] = 6 # rcParams['xtick.minor.width'] = 1 marker=['*', 'x', 's', 's', 's', 'h', '*', 'v', '<', ">"] linestyles = ['--', ':', '-',] def fill_trajectory(performance_list, time_list, replace_nan=np.NaN): frame_dict = collections.OrderedDict() counter = np.arange(0, len(performance_list)) for p, t, c in zip(performance_list, time_list, counter): if len(p) != len(t): raise ValueError("(%d) Array length mismatch: %d != %d" % (c, len(p), len(t))) frame_dict[str(c)] = pd.Series(data=p, index=t) # creates a dataframe where the rows are indexed based on time # fills with NA for missing values for the respective timesteps merged = pd.DataFrame(frame_dict) # ffill() acts like a fillna() wherein a forward fill happens # only remaining NAs for in the beginning until a value is recorded merged = merged.ffill() performance = merged.to_numpy() #get_values() # converts to a 2D numpy array time_ = merged.index.values # retrieves the timestamps performance[np.isnan(performance)] = replace_nan if not np.isfinite(performance).all(): raise ValueError("\nCould not merge lists, because \n" "\t(a) one list is empty?\n" "\t(b) the lists do not start with the same times and" " replace_nan is not set?\n" "\t(c) any other reason.") return performance, time_ # parser = argparse.ArgumentParser() # parser.add_argument('--bench', default='1shot1', type=str, nargs='?', # choices=['101', '1shot1', '201'], help='select benchmark') # parser.add_argument('--ssp', default=1, type=int, nargs='?') # parser.add_argument('--path', default='experiments/discrete_optimizers', type=str, nargs='?', # help='path to encodings or jsons for each algorithm') # parser.add_argument('--n_runs', default=500, type=int, nargs='?', # help='number of runs to plot data for') # parser.add_argument('--output_path', default="", type=str, nargs='?', # help='specifies the path where the plot will be saved') # parser.add_argument('--type', default="wallclock", type=str, choices=["wallclock", "fevals"], # help='to plot for wallclock times or # function evaluations') # parser.add_argument('--name', default="comparison", type=str, # help='file name for the PNG plot to be saved') # parser.add_argument('--title', default="benchmark", type=str, # help='title name for the plot') # parser.add_argument('--limit', default=1e7, type=float, help='wallclock limit') # parser.add_argument('--regret', default='validation', type=str, choices=['validation', 'test'], # help='type of regret') # args = parser.parse_args() path = 'experiments/discrete_optimizers' n_runs = 500 plot_type = 'wallclock' plot_name = 'comparison' regret_type = 'validation' benchmark = '1shot1' ssp = '3' limit=1e7 title = 'search space 3' output_path = '' if benchmark == '1shot1' and ssp is None: print("Specify \'--ssp\' from {1, 2, 3} for choosing the search space for NASBench-1shot1.") sys.exit() if benchmark == '101': methods = [ ("random_search", "RS"), ("bohb", "BOHB"), ("hyperband", "HB"), ("tpe", "TPE"), ("regularized_evolution", "RE"), ("de_pop20", "DE")] # ("de_pop10", "DE $pop=10$"), # ("de_pop20", "DE $pop=20$")] # ("de_pop30", "DE $pop=30$"), # ("de_pop40", "DE $pop=40$"), # ("de_pop50", "DE $pop=50$"), # ("de_pop60", "DE $pop=60$"), # ("de_pop70", "DE $pop=70$"), # ("de_pop80", "DE $pop=80$"), # ("de_pop90", "DE $pop=90$"), # ("de_pop100", "DE $pop=100$")] elif benchmark == '201': methods = [ ("random_search", "RS"), ("bohb", "BOHB"), ("hyperband", "HB"), ("tpe", "TPE"), ("regularized_evolution", "RE"), ("de_pop20", "DE")] # ("de_pop10", "DE $pop=10$"), # ("de_pop20", "DE $pop=20$"), # ("de_pop30", "DE $pop=30$"), # ("de_pop40", "DE $pop=40$"), # ("de_pop50", "DE $pop=50$"), # ("de_pop60", "DE $pop=60$"), # ("de_pop70", "DE $pop=70$"), # ("de_pop80", "DE $pop=80$"), # ("de_pop90", "DE $pop=90$"), # ("de_pop100", "DE $pop=100$")] else: methods = [ ("RS", "RS"), ("RE", "RE"), #("BOHB", "BOHB"), #("HB", "HB"), #("TPE", "TPE"), ("EE", "EE")] #("DE_pop20", "DE")] # ("DE_pop10", "DE $pop=10$"), # ("DE_pop20", "DE $pop=20$"), # ("DE_pop30", "DE $pop=30$"), # ("DE_pop40", "DE $pop=40$"), # ("DE_pop50", "DE $pop=50$"), # ("DE_pop60", "DE $pop=60$"), # ("DE_pop70", "DE $pop=70$"), # ("DE_pop80", "DE $pop=80$"), # ("DE_pop90", "DE $pop=90$"), # ("DE_pop100", "DE $pop=100$")] # plot limits min_time = np.inf max_time = 0 min_regret = 1 max_regret = 0 # plot setup colors = ['xkcd:lightgreen', 'xkcd:aqua', 'xkcd:coral'] plt.clf() # looping and plotting for all methods for index, (m, label) in enumerate(methods): regret = [] runtimes = [] for k, i in enumerate(np.arange(n_runs)): try: if benchmark in ['101', '201']: res = json.load(open(os.path.join(path, m, "run_%d.json" % i))) else: res = json.load(open(os.path.join(path, m, str(ssp), "run_%d.json" % i))) no_runs_found = False except Exception as e: print(m, i, e) no_runs_found = True continue regret_key = "regret_validation" if regret_type == 'validation' else "regret_test" runtime_key = "runtime" _, idx = np.unique(res[regret_key], return_index=True) idx.sort() regret.append(np.array(res[regret_key])[idx]) runtimes.append(np.array(res[runtime_key])[idx]) if not no_runs_found: # finds the latest time where the first measurement was made across runs t = np.max([runtimes[i][0] for i in range(len(runtimes))]) min_time = min(min_time, t) te, time = fill_trajectory(regret, runtimes, replace_nan=1) idx = time.tolist().index(t) te = te[idx:, :] time = time[idx:] # Clips off all measurements after 10^7s idx = np.where(time < limit)[0] print("{}. Plotting for {}".format(index, m)) print(len(regret), len(runtimes)) # The mean plot plt.plot(time[idx], np.mean(te, axis=1)[idx], color=colors[index], linewidth=4, label=label, linestyle=linestyles[index % len(linestyles)], marker=marker[index % len(marker)], markevery=(0.1,0.1), markersize=10) # # The error band # plt.fill_between(time[idx], # np.mean(te, axis=1)[idx] + 2 * stats.sem(te[idx], axis=1), # np.mean(te[idx], axis=1)[idx] - 2 * stats.sem(te[idx], axis=1), # color="C%d" % index, alpha=0.2) # Stats to dynamically impose limits on the axes of the plots max_time = max(max_time, time[idx][-1]) min_regret = min(min_regret, np.mean(te, axis=1)[idx][-1]) max_regret = max(max_regret, np.mean(te, axis=1)[idx][0]) plt.xscale("log") plt.yscale("log") plt.tick_params(which='both', direction="in") plt.legend(loc='lower left', framealpha=1, prop={'size': 25, 'weight': 'bold'}) plt.title(title) if plot_type == "wallclock": plt.xlabel("estimated wallclock time $[s]$") elif plot_type == "fevals": plt.xlabel("number of function evaluations") plt.ylabel("{} regret".format(regret_type)) # plt.xlim(max(min_time/10, 1e0), min(max_time*10, 1e7)) # plt.ylim(min_regret, max_regret) bottom, top = plt.ylim() plt.ylim((bottom,top)) plt.legend() # plt.grid(which='both', alpha=0.5, linewidth=0.5) plt.grid(b=True, which='major', color='#F4F4F4', linestyle='-') # print(os.path.join(output_path, '{}.png'.format(plot_name))) plt.savefig(os.path.join(output_path, '{}.png'.format(plot_name)), format='png', bbox_inches='tight', dpi=300) ```
github_jupyter
Lambda School Data Science, Unit 2: Predictive Modeling # Regression & Classification, Module 1 ## Objectives - Clean data & remove outliers - Use scikit-learn for linear regression - Organize & comment code ## Setup #### If you're using [Anaconda](https://www.anaconda.com/distribution/) locally Install required Python packages: - [pandas-profiling](https://github.com/pandas-profiling/pandas-profiling), version >= 2.0 - [Plotly](https://plot.ly/python/getting-started/), version >= 4.0 ``` conda install -c conda-forge pandas-profiling plotly ``` ``` # If you're in Colab... import os, sys in_colab = 'google.colab' in sys.modules if in_colab: # Install required python packages: # pandas-profiling, version >= 2.0 # plotly, version >= 4.0 !pip install --upgrade pandas-profiling plotly # Pull files from Github repo os.chdir('/content') !git init . !git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Regression-Classification.git !git pull origin master # Change into directory for module os.chdir('module1') # Ignore this Numpy warning when using Plotly Express: # FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead. import warnings warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy') ``` # Predict how much a NYC condo costs 🏠💸 [Amateurs & Experts Guess How Much a NYC Condo With a Private Terrace Costs](https://www.youtube.com/watch?v=JQCctBOgH9I) > Real Estate Agent Leonard Steinberg just sold a pre-war condo in New York City's Tribeca neighborhood. We challenged three people - an apartment renter, an apartment owner and a real estate expert - to try to guess how much the apartment sold for. Leonard reveals more and more details to them as they refine their guesses. The condo is 1,497 square feet. Here are the final guesses: - Apartment Renter: \$15 million - Apartment Buyer: \$2.2 million - Real Estate Expert: \$2.2 million Let's see how we compare! First, we need data: - [Kaggle has NYC property sales data](https://www.kaggle.com/new-york-city/nyc-property-sales), but it's not up-to-date. - The data comes from the [New York City Department of Finance](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page). There's also a glossary of property sales terms and NYC Building Class Code Descriptions - The data can also be found on the [NYC OpenData](https://data.cityofnewyork.us/browse?q=NYC%20calendar%20sales) portal. ## Clean data & remove outliers ``` import pandas as pd import pandas_profiling # Read New York City property sales data df = pd.read_csv('../data/NYC_Citywide_Rolling_Calendar_Sales.csv') # Change column names: replace spaces with underscores df.columns = [col.replace(' ', '_') for col in df] # Get Pandas Profiling Report df.profile_report() ``` ## Plot relationship between feature & target - [Plotly Express](https://plot.ly/python/plotly-express/) examples - [plotly_express.scatter](https://www.plotly.express/plotly_express/#plotly_express.scatter) docs ``` ``` ## Use scikit-learn for Linear Regression #### Jake VanderPlas, [_Python Data Science Handbook_, Chapter 5.2: Introducing Scikit-Learn](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.html#Basics-of-the-API) The best way to think about data within Scikit-Learn is in terms of tables of data. ![](https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.02-samples-features.png) The features matrix is often stored in a variable named `X`. The features matrix is assumed to be two-dimensional, with shape `[n_samples, n_features]`, and is most often contained in a NumPy array or a Pandas `DataFrame`. We also generally work with a label or target array, which by convention we will usually call `y`. The target array is usually one dimensional, with length `n_samples`, and is generally contained in a NumPy array or Pandas `Series`. The target array may have continuous numerical values, or discrete classes/labels. The target array is the quantity we want to _predict from the data_: in statistical terms, it is the dependent variable. Every machine learning algorithm in Scikit-Learn is implemented via the Estimator API, which provides a consistent interface for a wide range of machine learning applications. Most commonly, the steps in using the Scikit-Learn estimator API are as follows: 1. Choose a class of model by importing the appropriate estimator class from Scikit-Learn. 2. Choose model hyperparameters by instantiating this class with desired values. 3. Arrange data into a features matrix and target vector following the discussion above. 4. Fit the model to your data by calling the `fit()` method of the model instance. 5. Apply the Model to new data: For supervised learning, often we predict labels for unknown data using the `predict()` method. ``` ``` ### Organize & comment code ``` ``` # How'd we do? ...
github_jupyter
``` # Useful for debugging %load_ext autoreload %autoreload 2 # Nicer plotting import matplotlib.pyplot as plt import matplotlib %matplotlib inline %config InlineBackend.figure_format = 'retina' matplotlib.rcParams['figure.figsize'] = (8,4) ``` # Autophase and Autophase and Scale examples ``` from impact import Impact import numpy as np import os ifile ='templates/lcls_injector/ImpactT.in' # Make Impact object I = Impact(ifile, verbose=True) I.numprocs=1 ``` # Phase and Scale the LCLS gun ``` from impact.autophase import autophase_and_scale from pmd_beamphysics import single_particle P0 = single_particle(pz=1e-15, z=1e-15) autophase_and_scale(I, phase_ele_name='GUN', target=6e6, scale_range=(10e6, 100e6), initial_particles=P0, verbose=True) # Check this: I.verbose=False PF = I.track(P0, s=0.15) PF['mean_energy'] # Examine this process using the debug flag. This will return the function used for phasing and scaling. ps_f, Itest = autophase_and_scale(I, phase_ele_name='GUN', target=6e6, initial_particles=P0, verbose=False, debug = True) # Phases to try ptry = np.linspace(-100, 50, 30) # scales to try for sc in np.linspace(10e6, 100e6, 5): res = np.array([ps_f(p, sc)/1e6 for p in ptry]) plt.plot(ptry, res, label=f'{sc/1e6:0.2f} MV') plt.title('Final energy for various phases and scales') plt.ylabel('Final energy (MeV)') plt.xlabel('phase (deg)') plt.legend() # 3D plot # Make data. X = np.linspace(-100, 50, 10) Y = np.linspace(10e6, 100e6, 10) X, Y = np.meshgrid(X, Y) @np.vectorize def f(phase, scale): return ps_f(phase, scale) Z = f(X, Y) # Plot the surface. fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # Plot the surface. surf = ax.plot_surface(X, Y/1e6, Z/1e6, cmap=matplotlib.cm.coolwarm, linewidth=0, antialiased=True) # Add a color bar which maps values to colors. #fig.colorbar(surf, shrink=0.5, aspect=5) ax.set_xlabel('phase (deg)') ax.set_ylabel('scale (MV)') ax.set_zlabel('Final energy (MeV)') plt.show() ``` # Phase and scale LCLS linac sections Linacs L0A and L0B are special, because they require 4 fieldmaps each to model the travelling wave structure. To tune these together, we need to add control groups ``` # Changes in phases I.add_group('L0A', ele_names=['L0A_entrance', 'L0A_body_1', 'L0A_body_2', 'L0A_exit'], var_name='theta0_deg', attributes='theta0_deg') I.add_group('L0B', ele_names=['L0B_entrance', 'L0B_body_1', 'L0B_body_2', 'L0B_exit'], var_name='theta0_deg', attributes='theta0_deg') # Overall scaling, respecting the special factors. I.add_group('L0A_scale', ele_names=['L0A_entrance', 'L0A_body_1', 'L0A_body_2', 'L0A_exit'], var_name = 'rf_field_scale', factors = [0.86571945106805, 1, 1, 0.86571945106805], # sin(k*d) with d = 3.5e-2 m absolute=True) # Overall scaling, respecting the special factors. I.add_group('L0B_scale', ele_names=['L0B_entrance', 'L0B_body_1', 'L0B_body_2', 'L0B_exit'], var_name = 'rf_field_scale', factors = [0.86571945106805, 1, 1, 0.86571945106805], # sin(k*d) with d = 3.5e-2 m absolute=True) I['L0A_scale']['rf_field_scale'] = 30e6 #I['L0A_scale'].__dict__ # L0A to 64 MeV res_L0A = autophase_and_scale(I, phase_ele_name='L0A', scale_ele_name='L0A_scale', target=64e6, scale_range=(10e6, 100e6), initial_particles=P0, verbose=True) autophase_and_scale(I, phase_ele_name='L0B', scale_ele_name='L0B_scale', target=135e6, scale_range=(10e6, 100e6), initial_particles=P0, verbose=True) I.track(P0, s=8.371612)['mean_energy'] plt.plot(I.stat('mean_z'), I.stat('mean_kinetic_energy')/1e6 + 0.511) ``` # Autophase without scaling Just phasing is simpler. ``` from impact.autophase import autophase ifile2 = 'templates/apex_gun/ImpactT.in' I2 = Impact(ifile2, verbose=False) autophase(I2, ele_name='APEX_GUN', initial_particles=P0, metric='mean_kinetic_energy', verbose=True) phase_f, Itest = autophase(I2, ele_name='APEX_GUN', metric='mean_kinetic_energy', initial_particles=P0, debug = True) # Phases to try ptry = np.linspace(0, 360, 60) energies = np.array([phase_f(p)/1e3 for p in ptry]) plt.plot(ptry, energies) plt.ylim(0, 800) plt.title('Final energy for various phases in the APEX gun') plt.ylabel('Final kinetic energy (keV)') plt.xlabel('phase (deg)') ``` # Autophase with alternative metric, and bunch tracking with space charge. The above uses `mean_energy` as the metric to maximize. Alternatively, one might want to minimize energy spread. This is accomplished by passing `maximize=False` and `metric='sigma_pz'` or similar. ``` from distgen import Generator ifile = 'templates/lcls_injector/ImpactT.in' gfile = 'templates/lcls_injector/distgen.yaml' G = Generator(gfile) G['n_particle'] = 2000 G.run() P0 = G.particles %%time I = Impact(ifile, initial_particles=P0, verbose=False) I.stop = 0.16 I.numprocs=4 I.run() phase_f, Itest = autophase(I, ele_name='GUN', metric='sigma_pz', maximize=False, initial_particles=P0, debug = True, verbose=True) I.particles['final_particles'].plot('z', 'pz') # Phases to try ptry = np.linspace(290, 310, 20) sigma_pzs = np.array([phase_f(p) for p in ptry]) plt.plot(ptry, sigma_pzs) #plt.ylim(0, 800) #plt.title('Final energy for various phases in the APEX gun') #plt.ylabel('Final kinetic energy (keV)') plt.xlabel('phase (deg)') phase_f(293.5) Itest.particles['final_particles'].plot('z', 'pz') phase_f, Itest = autophase(I, ele_name='GUN', metric='sigma_pz', maximize=False, initial_particles=P0, debug = True, s_stop = 1.45, verbose=True) # Phases to try ptry = np.linspace(270, 290, 30) sigma_pzs = np.array([phase_f(p) for p in ptry]) plt.plot(ptry, sigma_pzs) #plt.ylim(0, 800) #plt.title('Final energy for various phases in the APEX gun') #plt.ylabel('Final kinetic energy (keV)') plt.xlabel('phase (deg)') phase_f(280.0) Itest.particles['final_particles'].plot('z', 'pz') ```
github_jupyter
# Introduction: Home Credit Default Risk Competition This notebook is intended for those who are new to machine learning competitions or want a gentle introduction to the problem. I purposely avoid jumping into complicated models or joining together lots of data in order to show the basics of how to get started in machine learning! Any comments or suggestions are much appreciated. In this notebook, we will take an initial look at the Home Credit default risk machine learning competition currently hosted on Kaggle. The objective of this competition is to use historical loan application data to predict whether or not an applicant will be able to repay a loan. This is a standard supervised classification task: * __Supervised__: The labels are included in the training data and the goal is to train a model to learn to predict the labels from the features * __Classification__: The label is a binary variable, 0 (will repay loan on time), 1 (will have difficulty repaying loan) # Data The data is provided by [Home Credit](http://www.homecredit.net/about-us.aspx), a service dedicated to provided lines of credit (loans) to the unbanked population. Predicting whether or not a client will repay a loan or have difficulty is a critical business need, and Home Credit is hosting this competition on Kaggle to see what sort of models the machine learning community can develop to help them in this task. There are 7 different sources of data: * application_train/application_test: the main training and testing data with information about each loan application at Home Credit. Every loan has its own row and is identified by the feature `SK_ID_CURR`. The training application data comes with the `TARGET` indicating 0: the loan was repaid or 1: the loan was not repaid. * bureau: data concerning client's previous credits from other financial institutions. Each previous credit has its own row in bureau, but one loan in the application data can have multiple previous credits. * bureau_balance: monthly data about the previous credits in bureau. Each row is one month of a previous credit, and a single previous credit can have multiple rows, one for each month of the credit length. * previous_application: previous applications for loans at Home Credit of clients who have loans in the application data. Each current loan in the application data can have multiple previous loans. Each previous application has one row and is identified by the feature `SK_ID_PREV`. * POS_CASH_BALANCE: monthly data about previous point of sale or cash loans clients have had with Home Credit. Each row is one month of a previous point of sale or cash loan, and a single previous loan can have many rows. * credit_card_balance: monthly data about previous credit cards clients have had with Home Credit. Each row is one month of a credit card balance, and a single credit card can have many rows. * installments_payment: payment history for previous loans at Home Credit. There is one row for every made payment and one row for every missed payment. This diagram shows how all of the data is related: ![image](https://storage.googleapis.com/kaggle-media/competitions/home-credit/home_credit.png) Moreover, we are provided with the definitions of all the columns (in `HomeCredit_columns_description.csv`) and an example of the expected submission file. In this notebook, we will stick to using only the main application training and testing data. Although if we want to have any hope of seriously competing, we need to use all the data, for now we will stick to one file which should be more manageable. This will let us establish a baseline that we can then improve upon. With these projects, it's best to build up an understanding of the problem a little at a time rather than diving all the way in and getting completely lost! ## Metric: ROC AUC Once we have a grasp of the data (reading through the [column descriptions](https://www.kaggle.com/c/home-credit-default-risk/data) helps immensely), we need to understand the metric by which our submission is judged. In this case, it is a common classification metric known as the [Receiver Operating Characteristic Area Under the Curve (ROC AUC, also sometimes called AUROC)](https://stats.stackexchange.com/questions/132777/what-does-auc-stand-for-and-what-is-it). The ROC AUC may sound intimidating, but it is relatively straightforward once you can get your head around the two individual concepts. The [Reciever Operating Characteristic (ROC) curve](https://en.wikipedia.org/wiki/Receiver_operating_characteristic) graphs the true positive rate versus the false positive rate: ![image](http://www.statisticshowto.com/wp-content/uploads/2016/08/ROC-curve.png) A single line on the graph indicates the curve for a single model, and movement along a line indicates changing the threshold used for classifying a positive instance. The threshold starts at 0 in the upper right to and goes to 1 in the lower left. A curve that is to the left and above another curve indicates a better model. For example, the blue model is better than the red model, which is better than the black diagonal line which indicates a naive random guessing model. The [Area Under the Curve (AUC)](http://gim.unmc.edu/dxtests/roc3.htm) explains itself by its name! It is simply the area under the ROC curve. (This is the integral of the curve.) This metric is between 0 and 1 with a better model scoring higher. A model that simply guesses at random will have an ROC AUC of 0.5. When we measure a classifier according to the ROC AUC, we do not generation 0 or 1 predictions, but rather a probability between 0 and 1. This may be confusing because we usually like to think in terms of accuracy, but when we get into problems with inbalanced classes (we will see this is the case), accuracy is not the best metric. For example, if I wanted to build a model that could detect terrorists with 99.9999% accuracy, I would simply make a model that predicted every single person was not a terrorist. Clearly, this would not be effective (the recall would be zero) and we use more advanced metrics such as ROC AUC or the [F1 score](https://en.wikipedia.org/wiki/F1_score) to more accurately reflect the performance of a classifier. A model with a high ROC AUC will also have a high accuracy, but the [ROC AUC is a better representation of model performance.](https://datascience.stackexchange.com/questions/806/advantages-of-auc-vs-standard-accuracy) Not that we know the background of the data we are using and the metric to maximize, let's get into exploring the data. In this notebook, as mentioned previously, we will stick to the main data sources and simple models which we can build upon in future work. __Follow-up Notebooks__ For those looking to keep working on this problem, I have a series of follow-up notebooks: * [Manual Feature Engineering Part One](https://www.kaggle.com/willkoehrsen/introduction-to-manual-feature-engineering) * [Manual Feature Engineering Part Two](https://www.kaggle.com/willkoehrsen/introduction-to-manual-feature-engineering-p2) * [Introduction to Automated Feature Engineering](https://www.kaggle.com/willkoehrsen/automated-feature-engineering-basics) * [Advanced Automated Feature Engineering](https://www.kaggle.com/willkoehrsen/tuning-automated-feature-engineering-exploratory) * [Feature Selection](https://www.kaggle.com/willkoehrsen/introduction-to-feature-selection) * [Intro to Model Tuning: Grid and Random Search](https://www.kaggle.com/willkoehrsen/intro-to-model-tuning-grid-and-random-search) * [Automated Model Tuning](https://www.kaggle.com/willkoehrsen/automated-model-tuning) * [Model Tuning Results](https://www.kaggle.com/willkoehrsen/model-tuning-results-random-vs-bayesian-opt/notebook) __More references__ * [Credit Education](https://myscore.cibil.com/CreditView/creditEducation.page?enterprise=CIBIL&_ga=2.245893574.372615569.1603669858-164953316.1602941832&_gac=1.254345978.1602941832.CjwKCAjwrKr8BRB_EiwA7eFaplQtBsmINtLxLHOCalWYdx-uO20kyaj0AvRVD8WKNO4cj5mP7MoBTRoC6TEQAvD_BwE) * [Credit Appraisal Methodology and Statndards](https://www.paisadukan.com/credit-assessment-methodology) I'll add more notebooks as I finish them! Thanks for all the comments! ## Imports We are using a typical data science stack: `numpy`, `pandas`, `sklearn`, `matplotlib`. ``` # numpy and pandas for data manipulation import numpy as np import pandas as pd # sklearn preprocessing for dealing with categorical variables from sklearn.preprocessing import LabelEncoder # File system manangement import os # Suppress warnings import warnings warnings.filterwarnings('ignore') # matplotlib and seaborn for plotting import matplotlib.pyplot as plt import seaborn as sns import os from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" pd.set_option('display.max_rows', 500) pd.set_option('display.max_colwidth', -1) pathToData = "C:\\Users\\Administrator\\OneDrive\\Documents\\home_credit_default_risk" os.chdir(pathToData) os.listdir() app_train = pd.read_csv("application_train.csv") app_test = pd.read_csv("application_test.csv.zip") app_train.head() app_train.shape # (307511, 122) app_train.dtypes col_desc = pd.read_csv("HomeCredit_columns_description.csv", encoding= 'unicode_escape') col_desc.iloc[:122, 1:-1] ``` ## Domain Knowledge Features Some features generated through domain knowledge to help the algorithm: * `CREDIT_INCOME_PERCENT`: the percentage of the credit amount relative to a client's income * `ANNUITY_INCOME_PERCENT`: the percentage of the loan annuity relative to a client's income * `CREDIT_TERM`: the length of the payment in months (since the annuity is the monthly amount due * `DAYS_EMPLOYED_PERCENT`: the percentage of the days employed relative to the client's age Again, thanks to Aguiar and [his great script](https://www.kaggle.com/jsaguiar/updated-0-792-lb-lightgbm-with-simple-features) for exploring these features. ``` app_train_domain = app_train.copy() app_test_domain = app_test.copy() app_train_domain['CREDIT_INCOME_PERCENT'] = app_train_domain['AMT_CREDIT'] / app_train_domain['AMT_INCOME_TOTAL'] app_train_domain['ANNUITY_INCOME_PERCENT'] = app_train_domain['AMT_ANNUITY'] / app_train_domain['AMT_INCOME_TOTAL'] app_train_domain['CREDIT_TERM'] = app_train_domain['AMT_ANNUITY'] / app_train_domain['AMT_CREDIT'] app_train_domain['DAYS_EMPLOYED_PERCENT'] = app_train_domain['DAYS_EMPLOYED'] / app_train_domain['DAYS_BIRTH'] app_test_domain['CREDIT_INCOME_PERCENT'] = app_test_domain['AMT_CREDIT'] / app_test_domain['AMT_INCOME_TOTAL'] app_test_domain['ANNUITY_INCOME_PERCENT'] = app_test_domain['AMT_ANNUITY'] / app_test_domain['AMT_INCOME_TOTAL'] app_test_domain['CREDIT_TERM'] = app_test_domain['AMT_ANNUITY'] / app_test_domain['AMT_CREDIT'] app_test_domain['DAYS_EMPLOYED_PERCENT'] = app_test_domain['DAYS_EMPLOYED'] / app_test_domain['DAYS_BIRTH'] pre_app = pd.read_csv("previous_application.csv.zip") pre_app.shape # (1670214, 37) pre_app.head() pre_app.isnull().sum().sort_values(ascending = False) col_desc.iloc[173:211, :] ``` ## Read in Data First, we can list all the available data files. There are a total of 9 files: 1 main file for training (with target) 1 main file for testing (without the target), 1 example submission file, and 6 other files containing additional information about each loan. ``` # List files available print(os.listdir("../input/")) # Training data app_train = pd.read_csv('../input/application_train.csv') print('Training data shape: ', app_train.shape) app_train.head() ``` The training data has 307511 observations (each one a separate loan) and 122 features (variables) including the `TARGET` (the label we want to predict). ``` # Testing data features app_test = pd.read_csv('../input/application_test.csv') print('Testing data shape: ', app_test.shape) app_test.head() ``` The test set is considerably smaller and lacks a `TARGET` column. # Exploratory Data Analysis Exploratory Data Analysis (EDA) is an open-ended process where we calculate statistics and make figures to find trends, anomalies, patterns, or relationships within the data. The goal of EDA is to learn what our data can tell us. It generally starts out with a high level overview, then narrows in to specific areas as we find intriguing areas of the data. The findings may be interesting in their own right, or they can be used to inform our modeling choices, such as by helping us decide which features to use. ## Examine the Distribution of the Target Column The target is what we are asked to predict: either a 0 for the loan was repaid on time, or a 1 indicating the client had payment difficulties. We can first examine the number of loans falling into each category. ``` app_train['TARGET'].value_counts() app_train['TARGET'].astype(int).plot.hist(); ``` From this information, we see this is an [_imbalanced class problem_](http://www.chioka.in/class-imbalance-problem/). There are far more loans that were repaid on time than loans that were not repaid. Once we get into more sophisticated machine learning models, we can [weight the classes](http://xgboost.readthedocs.io/en/latest/parameter.html) by their representation in the data to reflect this imbalance. ## Examine Missing Values Next we can look at the number and percentage of missing values in each column. ``` # Function to calculate missing values by column# Funct def missing_values_table(df): # Total missing values mis_val = df.isnull().sum() # Percentage of missing values mis_val_percent = 100 * df.isnull().sum() / len(df) # Make a table with the results mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1) # Rename the columns mis_val_table_ren_columns = mis_val_table.rename( columns = {0 : 'Missing Values', 1 : '% of Total Values'}) # Sort the table by percentage of missing descending mis_val_table_ren_columns = mis_val_table_ren_columns[ mis_val_table_ren_columns.iloc[:,1] != 0].sort_values( '% of Total Values', ascending=False).round(1) # Print some summary information print ("Your selected dataframe has " + str(df.shape[1]) + " columns.\n" "There are " + str(mis_val_table_ren_columns.shape[0]) + " columns that have missing values.") # Return the dataframe with missing information return mis_val_table_ren_columns # Missing values statistics missing_values = missing_values_table(app_train) missing_values.head(20) ``` When it comes time to build our machine learning models, we will have to fill in these missing values (known as imputation). In later work, we will use models such as XGBoost that can [handle missing values with no need for imputation](https://stats.stackexchange.com/questions/235489/xgboost-can-handle-missing-data-in-the-forecasting-phase). Another option would be to drop columns with a high percentage of missing values, although it is impossible to know ahead of time if these columns will be helpful to our model. Therefore, we will keep all of the columns for now. ## Column Types Let's look at the number of columns of each data type. `int64` and `float64` are numeric variables ([which can be either discrete or continuous](https://stats.stackexchange.com/questions/206/what-is-the-difference-between-discrete-data-and-continuous-data)). `object` columns contain strings and are [categorical features.](http://support.minitab.com/en-us/minitab-express/1/help-and-how-to/modeling-statistics/regression/supporting-topics/basics/what-are-categorical-discrete-and-continuous-variables/) . ``` # Number of each type of column app_train.dtypes.value_counts() ``` Let's now look at the number of unique entries in each of the `object` (categorical) columns. ``` # Number of unique classes in each object column app_train.select_dtypes('object').apply(pd.Series.nunique, axis = 0) ``` Most of the categorical variables have a relatively small number of unique entries. We will need to find a way to deal with these categorical variables! ## Encoding Categorical Variables Before we go any further, we need to deal with pesky categorical variables. A machine learning model unfortunately cannot deal with categorical variables (except for some models such as [LightGBM](http://lightgbm.readthedocs.io/en/latest/Features.html)). Therefore, we have to find a way to encode (represent) these variables as numbers before handing them off to the model. There are two main ways to carry out this process: * Label encoding: assign each unique category in a categorical variable with an integer. No new columns are created. An example is shown below ![image](https://raw.githubusercontent.com/WillKoehrsen/Machine-Learning-Projects/master/label_encoding.png) * One-hot encoding: create a new column for each unique category in a categorical variable. Each observation recieves a 1 in the column for its corresponding category and a 0 in all other new columns. ![image](https://raw.githubusercontent.com/WillKoehrsen/Machine-Learning-Projects/master/one_hot_encoding.png) The problem with label encoding is that it gives the categories an arbitrary ordering. The value assigned to each of the categories is random and does not reflect any inherent aspect of the category. In the example above, programmer recieves a 4 and data scientist a 1, but if we did the same process again, the labels could be reversed or completely different. The actual assignment of the integers is arbitrary. Therefore, when we perform label encoding, the model might use the relative value of the feature (for example programmer = 4 and data scientist = 1) to assign weights which is not what we want. If we only have two unique values for a categorical variable (such as Male/Female), then label encoding is fine, but for more than 2 unique categories, one-hot encoding is the safe option. There is some debate about the relative merits of these approaches, and some models can deal with label encoded categorical variables with no issues. [Here is a good Stack Overflow discussion](https://datascience.stackexchange.com/questions/9443/when-to-use-one-hot-encoding-vs-labelencoder-vs-dictvectorizor). I think (and this is just a personal opinion) for categorical variables with many classes, one-hot encoding is the safest approach because it does not impose arbitrary values to categories. The only downside to one-hot encoding is that the number of features (dimensions of the data) can explode with categorical variables with many categories. To deal with this, we can perform one-hot encoding followed by [PCA](http://www.cs.otago.ac.nz/cosc453/student_tutorials/principal_components.pdf) or other [dimensionality reduction methods](https://www.analyticsvidhya.com/blog/2015/07/dimension-reduction-methods/) to reduce the number of dimensions (while still trying to preserve information). In this notebook, we will use Label Encoding for any categorical variables with only 2 categories and One-Hot Encoding for any categorical variables with more than 2 categories. This process may need to change as we get further into the project, but for now, we will see where this gets us. (We will also not use any dimensionality reduction in this notebook but will explore in future iterations). ### Label Encoding and One-Hot Encoding Let's implement the policy described above: for any categorical variable (`dtype == object`) with 2 unique categories, we will use label encoding, and for any categorical variable with more than 2 unique categories, we will use one-hot encoding. For label encoding, we use the Scikit-Learn `LabelEncoder` and for one-hot encoding, the pandas `get_dummies(df)` function. ``` # Create a label encoder object le = LabelEncoder() le_count = 0 # Iterate through the columns for col in app_train: if app_train[col].dtype == 'object': # If 2 or fewer unique categories if len(list(app_train[col].unique())) <= 2: # Train on the training data le.fit(app_train[col]) # Transform both training and testing data app_train[col] = le.transform(app_train[col]) app_test[col] = le.transform(app_test[col]) # Keep track of how many columns were label encoded le_count += 1 print('%d columns were label encoded.' % le_count) # one-hot encoding of categorical variables app_train = pd.get_dummies(app_train) app_test = pd.get_dummies(app_test) print('Training Features shape: ', app_train.shape) print('Testing Features shape: ', app_test.shape) ``` ### Aligning Training and Testing Data There need to be the same features (columns) in both the training and testing data. One-hot encoding has created more columns in the training data because there were some categorical variables with categories not represented in the testing data. To remove the columns in the training data that are not in the testing data, we need to `align` the dataframes. First we extract the target column from the training data (because this is not in the testing data but we need to keep this information). When we do the align, we must make sure to set `axis = 1` to align the dataframes based on the columns and not on the rows! ``` train_labels = app_train['TARGET'] # Align the training and testing data, keep only columns present in both dataframes app_train, app_test = app_train.align(app_test, join = 'inner', axis = 1) # Add the target back in app_train['TARGET'] = train_labels print('Training Features shape: ', app_train.shape) print('Testing Features shape: ', app_test.shape) ``` The training and testing datasets now have the same features which is required for machine learning. The number of features has grown significantly due to one-hot encoding. At some point we probably will want to try [dimensionality reduction (removing features that are not relevant)](https://en.wikipedia.org/wiki/Dimensionality_reduction) to reduce the size of the datasets. ## Back to Exploratory Data Analysis ### Anomalies One problem we always want to be on the lookout for when doing EDA is anomalies within the data. These may be due to mis-typed numbers, errors in measuring equipment, or they could be valid but extreme measurements. One way to support anomalies quantitatively is by looking at the statistics of a column using the `describe` method. The numbers in the `DAYS_BIRTH` column are negative because they are recorded relative to the current loan application. To see these stats in years, we can mutliple by -1 and divide by the number of days in a year: ``` (app_train['DAYS_BIRTH'] / -365).describe() ``` Those ages look reasonable. There are no outliers for the age on either the high or low end. How about the days of employment? ``` app_train['DAYS_EMPLOYED'].describe() ``` That doesn't look right! The maximum value (besides being positive) is about 1000 years! ``` app_train['DAYS_EMPLOYED'].plot.hist(title = 'Days Employment Histogram'); plt.xlabel('Days Employment'); ``` Just out of curiousity, let's subset the anomalous clients and see if they tend to have higher or low rates of default than the rest of the clients. ``` anom = app_train[app_train['DAYS_EMPLOYED'] == 365243] non_anom = app_train[app_train['DAYS_EMPLOYED'] != 365243] print('The non-anomalies default on %0.2f%% of loans' % (100 * non_anom['TARGET'].mean())) print('The anomalies default on %0.2f%% of loans' % (100 * anom['TARGET'].mean())) print('There are %d anomalous days of employment' % len(anom)) ``` Well that is extremely interesting! It turns out that the anomalies have a lower rate of default. Handling the anomalies depends on the exact situation, with no set rules. One of the safest approaches is just to set the anomalies to a missing value and then have them filled in (using Imputation) before machine learning. In this case, since all the anomalies have the exact same value, we want to fill them in with the same value in case all of these loans share something in common. The anomalous values seem to have some importance, so we want to tell the machine learning model if we did in fact fill in these values. As a solution, we will fill in the anomalous values with not a number (`np.nan`) and then create a new boolean column indicating whether or not the value was anomalous. ``` # Create an anomalous flag column app_train['DAYS_EMPLOYED_ANOM'] = app_train["DAYS_EMPLOYED"] == 365243 # Replace the anomalous values with nan app_train['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace = True) app_train['DAYS_EMPLOYED'].plot.hist(title = 'Days Employment Histogram'); plt.xlabel('Days Employment'); ``` The distribution looks to be much more in line with what we would expect, and we also have created a new column to tell the model that these values were originally anomalous (becuase we will have to fill in the nans with some value, probably the median of the column). The other columns with `DAYS` in the dataframe look to be about what we expect with no obvious outliers. As an extremely important note, anything we do to the training data we also have to do to the testing data. Let's make sure to create the new column and fill in the existing column with `np.nan` in the testing data. ``` app_test['DAYS_EMPLOYED_ANOM'] = app_test["DAYS_EMPLOYED"] == 365243 app_test["DAYS_EMPLOYED"].replace({365243: np.nan}, inplace = True) print('There are %d anomalies in the test data out of %d entries' % (app_test["DAYS_EMPLOYED_ANOM"].sum(), len(app_test))) ``` ### Correlations Now that we have dealt with the categorical variables and the outliers, let's continue with the EDA. One way to try and understand the data is by looking for correlations between the features and the target. We can calculate the Pearson correlation coefficient between every variable and the target using the `.corr` dataframe method. The correlation coefficient is not the greatest method to represent "relevance" of a feature, but it does give us an idea of possible relationships within the data. Some [general interpretations of the absolute value of the correlation coefficent](http://www.statstutor.ac.uk/resources/uploaded/pearsons.pdf) are: * .00-.19 “very weak” * .20-.39 “weak” * .40-.59 “moderate” * .60-.79 “strong” * .80-1.0 “very strong” ``` # Find correlations with the target and sort correlations = app_train.corr()['TARGET'].sort_values() # Display correlations print('Most Positive Correlations:\n', correlations.tail(15)) print('\nMost Negative Correlations:\n', correlations.head(15)) ``` Let's take a look at some of more significant correlations: the `DAYS_BIRTH` is the most positive correlation. (except for `TARGET` because the correlation of a variable with itself is always 1!) Looking at the documentation, `DAYS_BIRTH` is the age in days of the client at the time of the loan in negative days (for whatever reason!). The correlation is positive, but the value of this feature is actually negative, meaning that as the client gets older, they are less likely to default on their loan (ie the target == 0). That's a little confusing, so we will take the absolute value of the feature and then the correlation will be negative. ### Effect of Age on Repayment ``` # Find the correlation of the positive days since birth and target app_train['DAYS_BIRTH'] = abs(app_train['DAYS_BIRTH']) app_train['DAYS_BIRTH'].corr(app_train['TARGET']) ``` As the client gets older, there is a negative linear relationship with the target meaning that as clients get older, they tend to repay their loans on time more often. Let's start looking at this variable. First, we can make a histogram of the age. We will put the x axis in years to make the plot a little more understandable. ``` # Set the style of plots plt.style.use('fivethirtyeight') # Plot the distribution of ages in years plt.hist(app_train['DAYS_BIRTH'] / 365, edgecolor = 'k', bins = 25) plt.title('Age of Client'); plt.xlabel('Age (years)'); plt.ylabel('Count'); ``` By itself, the distribution of age does not tell us much other than that there are no outliers as all the ages are reasonable. To visualize the effect of the age on the target, we will next make a [kernel density estimation plot](https://en.wikipedia.org/wiki/Kernel_density_estimation) (KDE) colored by the value of the target. A [kernel density estimate plot shows the distribution of a single variable](https://chemicalstatistician.wordpress.com/2013/06/09/exploratory-data-analysis-kernel-density-estimation-in-r-on-ozone-pollution-data-in-new-york-and-ozonopolis/) and can be thought of as a smoothed histogram (it is created by computing a kernel, usually a Gaussian, at each data point and then averaging all the individual kernels to develop a single smooth curve). We will use the seaborn `kdeplot` for this graph. ``` plt.figure(figsize = (10, 8)) # KDE plot of loans that were repaid on time sns.kdeplot(app_train.loc[app_train['TARGET'] == 0, 'DAYS_BIRTH'] / 365, label = 'target == 0') # KDE plot of loans which were not repaid on time sns.kdeplot(app_train.loc[app_train['TARGET'] == 1, 'DAYS_BIRTH'] / 365, label = 'target == 1') # Labeling of plot plt.xlabel('Age (years)'); plt.ylabel('Density'); plt.title('Distribution of Ages'); ``` The target == 1 curve skews towards the younger end of the range. Although this is not a significant correlation (-0.07 correlation coefficient), this variable is likely going to be useful in a machine learning model because it does affect the target. Let's look at this relationship in another way: average failure to repay loans by age bracket. To make this graph, first we `cut` the age category into bins of 5 years each. Then, for each bin, we calculate the average value of the target, which tells us the ratio of loans that were not repaid in each age category. ``` # Age information into a separate dataframe age_data = app_train[['TARGET', 'DAYS_BIRTH']] age_data['YEARS_BIRTH'] = age_data['DAYS_BIRTH'] / 365 # Bin the age data age_data['YEARS_BINNED'] = pd.cut(age_data['YEARS_BIRTH'], bins = np.linspace(20, 70, num = 11)) age_data.head(10) # Group by the bin and calculate averages age_groups = age_data.groupby('YEARS_BINNED').mean() age_groups plt.figure(figsize = (8, 8)) # Graph the age bins and the average of the target as a bar plot plt.bar(age_groups.index.astype(str), 100 * age_groups['TARGET']) # Plot labeling plt.xticks(rotation = 75); plt.xlabel('Age Group (years)'); plt.ylabel('Failure to Repay (%)') plt.title('Failure to Repay by Age Group'); ``` There is a clear trend: younger applicants are more likely to not repay the loan! The rate of failure to repay is above 10% for the youngest three age groups and beolow 5% for the oldest age group. This is information that could be directly used by the bank: because younger clients are less likely to repay the loan, maybe they should be provided with more guidance or financial planning tips. This does not mean the bank should discriminate against younger clients, but it would be smart to take precautionary measures to help younger clients pay on time. ### Exterior Sources The 3 variables with the strongest negative correlations with the target are `EXT_SOURCE_1`, `EXT_SOURCE_2`, and `EXT_SOURCE_3`. According to the documentation, these features represent a "normalized score from external data source". I'm not sure what this exactly means, but it may be a cumulative sort of credit rating made using numerous sources of data. Let's take a look at these variables. First, we can show the correlations of the `EXT_SOURCE` features with the target and with each other. ``` # Extract the EXT_SOURCE variables and show correlations ext_data = app_train[['TARGET', 'EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'DAYS_BIRTH']] ext_data_corrs = ext_data.corr() ext_data_corrs plt.figure(figsize = (8, 6)) # Heatmap of correlations sns.heatmap(ext_data_corrs, cmap = plt.cm.RdYlBu_r, vmin = -0.25, annot = True, vmax = 0.6) plt.title('Correlation Heatmap'); ``` All three `EXT_SOURCE` featureshave negative correlations with the target, indicating that as the value of the `EXT_SOURCE` increases, the client is more likely to repay the loan. We can also see that `DAYS_BIRTH` is positively correlated with `EXT_SOURCE_1` indicating that maybe one of the factors in this score is the client age. Next we can look at the distribution of each of these features colored by the value of the target. This will let us visualize the effect of this variable on the target. ``` plt.figure(figsize = (10, 12)) # iterate through the sources for i, source in enumerate(['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']): # create a new subplot for each source plt.subplot(3, 1, i + 1) # plot repaid loans sns.kdeplot(app_train.loc[app_train['TARGET'] == 0, source], label = 'target == 0') # plot loans that were not repaid sns.kdeplot(app_train.loc[app_train['TARGET'] == 1, source], label = 'target == 1') # Label the plots plt.title('Distribution of %s by Target Value' % source) plt.xlabel('%s' % source); plt.ylabel('Density'); plt.tight_layout(h_pad = 2.5) ``` `EXT_SOURCE_3` displays the greatest difference between the values of the target. We can clearly see that this feature has some relationship to the likelihood of an applicant to repay a loan. The relationship is not very strong (in fact they are all [considered very weak](http://www.statstutor.ac.uk/resources/uploaded/pearsons.pdf), but these variables will still be useful for a machine learning model to predict whether or not an applicant will repay a loan on time. ## Pairs Plot As a final exploratory plot, we can make a pairs plot of the `EXT_SOURCE` variables and the `DAYS_BIRTH` variable. The [Pairs Plot](https://towardsdatascience.com/visualizing-data-with-pair-plots-in-python-f228cf529166) is a great exploration tool because it lets us see relationships between multiple pairs of variables as well as distributions of single variables. Here we are using the seaborn visualization library and the PairGrid function to create a Pairs Plot with scatterplots on the upper triangle, histograms on the diagonal, and 2D kernel density plots and correlation coefficients on the lower triangle. If you don't understand this code, that's all right! Plotting in Python can be overly complex, and for anything beyond the simplest graphs, I usually find an existing implementation and adapt the code (don't repeat yourself)! ``` # Copy the data for plotting plot_data = ext_data.drop(columns = ['DAYS_BIRTH']).copy() # Add in the age of the client in years plot_data['YEARS_BIRTH'] = age_data['YEARS_BIRTH'] # Drop na values and limit to first 100000 rows plot_data = plot_data.dropna().loc[:100000, :] # Function to calculate correlation coefficient between two columns def corr_func(x, y, **kwargs): r = np.corrcoef(x, y)[0][1] ax = plt.gca() ax.annotate("r = {:.2f}".format(r), xy=(.2, .8), xycoords=ax.transAxes, size = 20) # Create the pairgrid object grid = sns.PairGrid(data = plot_data, size = 3, diag_sharey=False, hue = 'TARGET', vars = [x for x in list(plot_data.columns) if x != 'TARGET']) # Upper is a scatter plot grid.map_upper(plt.scatter, alpha = 0.2) # Diagonal is a histogram grid.map_diag(sns.kdeplot) # Bottom is density plot grid.map_lower(sns.kdeplot, cmap = plt.cm.OrRd_r); plt.suptitle('Ext Source and Age Features Pairs Plot', size = 32, y = 1.05); ``` In this plot, the red indicates loans that were not repaid and the blue are loans that are paid. We can see the different relationships within the data. There does appear to be a moderate positive linear relationship between the `EXT_SOURCE_1` and the `DAYS_BIRTH` (or equivalently `YEARS_BIRTH`), indicating that this feature may take into account the age of the client. # Feature Engineering Kaggle competitions are won by feature engineering: those win are those who can create the most useful features out of the data. (This is true for the most part as the winning models, at least for structured data, all tend to be variants on [gradient boosting](http://blog.kaggle.com/2017/01/23/a-kaggle-master-explains-gradient-boosting/)). This represents one of the patterns in machine learning: feature engineering has a greater return on investment than model building and hyperparameter tuning. [This is a great article on the subject)](https://www.featurelabs.com/blog/secret-to-data-science-success/). As Andrew Ng is fond of saying: "applied machine learning is basically feature engineering." While choosing the right model and optimal settings are important, the model can only learn from the data it is given. Making sure this data is as relevant to the task as possible is the job of the data scientist (and maybe some [automated tools](https://docs.featuretools.com/getting_started/install.html) to help us out). Feature engineering refers to a geneal process and can involve both feature construction: adding new features from the existing data, and feature selection: choosing only the most important features or other methods of dimensionality reduction. There are many techniques we can use to both create features and select features. We will do a lot of feature engineering when we start using the other data sources, but in this notebook we will try only two simple feature construction methods: * Polynomial features * Domain knowledge features ## Polynomial Features One simple feature construction method is called [polynomial features](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html). In this method, we make features that are powers of existing features as well as interaction terms between existing features. For example, we can create variables `EXT_SOURCE_1^2` and `EXT_SOURCE_2^2` and also variables such as `EXT_SOURCE_1` x `EXT_SOURCE_2`, `EXT_SOURCE_1` x `EXT_SOURCE_2^2`, `EXT_SOURCE_1^2` x `EXT_SOURCE_2^2`, and so on. These features that are a combination of multiple individual variables are called [interaction terms](https://en.wikipedia.org/wiki/Interaction_(statistics) because they capture the interactions between variables. In other words, while two variables by themselves may not have a strong influence on the target, combining them together into a single interaction variable might show a relationship with the target. [Interaction terms are commonly used in statistical models](https://www.theanalysisfactor.com/interpreting-interactions-in-regression/) to capture the effects of multiple variables, but I do not see them used as often in machine learning. Nonetheless, we can try out a few to see if they might help our model to predict whether or not a client will repay a loan. Jake VanderPlas writes about [polynomial features in his excellent book Python for Data Science](https://jakevdp.github.io/PythonDataScienceHandbook/05.04-feature-engineering.html) for those who want more information. In the following code, we create polynomial features using the `EXT_SOURCE` variables and the `DAYS_BIRTH` variable. [Scikit-Learn has a useful class called `PolynomialFeatures`](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html) that creates the polynomials and the interaction terms up to a specified degree. We can use a degree of 3 to see the results (when we are creating polynomial features, we want to avoid using too high of a degree, both because the number of features scales exponentially with the degree, and because we can run into [problems with overfitting](http://scikit-learn.org/stable/auto_examples/model_selection/plot_underfitting_overfitting.html#sphx-glr-auto-examples-model-selection-plot-underfitting-overfitting-py)). ``` # Make a new dataframe for polynomial features poly_features = app_train[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'DAYS_BIRTH', 'TARGET']] poly_features_test = app_test[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'DAYS_BIRTH']] # imputer for handling missing values from sklearn.preprocessing import Imputer imputer = Imputer(strategy = 'median') poly_target = poly_features['TARGET'] poly_features = poly_features.drop(columns = ['TARGET']) # Need to impute missing values poly_features = imputer.fit_transform(poly_features) poly_features_test = imputer.transform(poly_features_test) from sklearn.preprocessing import PolynomialFeatures # Create the polynomial object with specified degree poly_transformer = PolynomialFeatures(degree = 3) # Train the polynomial features poly_transformer.fit(poly_features) # Transform the features poly_features = poly_transformer.transform(poly_features) poly_features_test = poly_transformer.transform(poly_features_test) print('Polynomial Features shape: ', poly_features.shape) ``` This creates a considerable number of new features. To get the names we have to use the polynomial features `get_feature_names` method. ``` poly_transformer.get_feature_names(input_features = ['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'DAYS_BIRTH'])[:15] ``` There are 35 features with individual features raised to powers up to degree 3 and interaction terms. Now, we can see whether any of these new features are correlated with the target. ``` # Create a dataframe of the features poly_features = pd.DataFrame(poly_features, columns = poly_transformer.get_feature_names(['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'DAYS_BIRTH'])) # Add in the target poly_features['TARGET'] = poly_target # Find the correlations with the target poly_corrs = poly_features.corr()['TARGET'].sort_values() # Display most negative and most positive print(poly_corrs.head(10)) print(poly_corrs.tail(5)) ``` Several of the new variables have a greater (in terms of absolute magnitude) correlation with the target than the original features. When we build machine learning models, we can try with and without these features to determine if they actually help the model learn. We will add these features to a copy of the training and testing data and then evaluate models with and without the features. Many times in machine learning, the only way to know if an approach will work is to try it out! ``` # Put test features into dataframe poly_features_test = pd.DataFrame(poly_features_test, columns = poly_transformer.get_feature_names(['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'DAYS_BIRTH'])) # Merge polynomial features into training dataframe poly_features['SK_ID_CURR'] = app_train['SK_ID_CURR'] app_train_poly = app_train.merge(poly_features, on = 'SK_ID_CURR', how = 'left') # Merge polnomial features into testing dataframe poly_features_test['SK_ID_CURR'] = app_test['SK_ID_CURR'] app_test_poly = app_test.merge(poly_features_test, on = 'SK_ID_CURR', how = 'left') # Align the dataframes app_train_poly, app_test_poly = app_train_poly.align(app_test_poly, join = 'inner', axis = 1) # Print out the new shapes print('Training data with polynomial features shape: ', app_train_poly.shape) print('Testing data with polynomial features shape: ', app_test_poly.shape) ``` ## Domain Knowledge Features Maybe it's not entirely correct to call this "domain knowledge" because I'm not a credit expert, but perhaps we could call this "attempts at applying limited financial knowledge". In this frame of mind, we can make a couple features that attempt to capture what we think may be important for telling whether a client will default on a loan. Here I'm going to use five features that were inspired by [this script](https://www.kaggle.com/jsaguiar/updated-0-792-lb-lightgbm-with-simple-features) by Aguiar: * `CREDIT_INCOME_PERCENT`: the percentage of the credit amount relative to a client's income * `ANNUITY_INCOME_PERCENT`: the percentage of the loan annuity relative to a client's income * `CREDIT_TERM`: the length of the payment in months (since the annuity is the monthly amount due * `DAYS_EMPLOYED_PERCENT`: the percentage of the days employed relative to the client's age Again, thanks to Aguiar and [his great script](https://www.kaggle.com/jsaguiar/updated-0-792-lb-lightgbm-with-simple-features) for exploring these features. ``` app_train_domain = app_train.copy() app_test_domain = app_test.copy() app_train_domain['CREDIT_INCOME_PERCENT'] = app_train_domain['AMT_CREDIT'] / app_train_domain['AMT_INCOME_TOTAL'] app_train_domain['ANNUITY_INCOME_PERCENT'] = app_train_domain['AMT_ANNUITY'] / app_train_domain['AMT_INCOME_TOTAL'] app_train_domain['CREDIT_TERM'] = app_train_domain['AMT_ANNUITY'] / app_train_domain['AMT_CREDIT'] app_train_domain['DAYS_EMPLOYED_PERCENT'] = app_train_domain['DAYS_EMPLOYED'] / app_train_domain['DAYS_BIRTH'] app_test_domain['CREDIT_INCOME_PERCENT'] = app_test_domain['AMT_CREDIT'] / app_test_domain['AMT_INCOME_TOTAL'] app_test_domain['ANNUITY_INCOME_PERCENT'] = app_test_domain['AMT_ANNUITY'] / app_test_domain['AMT_INCOME_TOTAL'] app_test_domain['CREDIT_TERM'] = app_test_domain['AMT_ANNUITY'] / app_test_domain['AMT_CREDIT'] app_test_domain['DAYS_EMPLOYED_PERCENT'] = app_test_domain['DAYS_EMPLOYED'] / app_test_domain['DAYS_BIRTH'] ``` #### Visualize New Variables We should explore these __domain knowledge__ variables visually in a graph. For all of these, we will make the same KDE plot colored by the value of the `TARGET`. ``` plt.figure(figsize = (12, 20)) # iterate through the new features for i, feature in enumerate(['CREDIT_INCOME_PERCENT', 'ANNUITY_INCOME_PERCENT', 'CREDIT_TERM', 'DAYS_EMPLOYED_PERCENT']): # create a new subplot for each source plt.subplot(4, 1, i + 1) # plot repaid loans sns.kdeplot(app_train_domain.loc[app_train_domain['TARGET'] == 0, feature], label = 'target == 0') # plot loans that were not repaid sns.kdeplot(app_train_domain.loc[app_train_domain['TARGET'] == 1, feature], label = 'target == 1') # Label the plots plt.title('Distribution of %s by Target Value' % feature) plt.xlabel('%s' % feature); plt.ylabel('Density'); plt.tight_layout(h_pad = 2.5) ``` It's hard to say ahead of time if these new features will be useful. The only way to tell for sure is to try them out! # Baseline For a naive baseline, we could guess the same value for all examples on the testing set. We are asked to predict the probability of not repaying the loan, so if we are entirely unsure, we would guess 0.5 for all observations on the test set. This will get us a Reciever Operating Characteristic Area Under the Curve (AUC ROC) of 0.5 in the competition ([random guessing on a classification task will score a 0.5](https://stats.stackexchange.com/questions/266387/can-auc-roc-be-between-0-0-5)). Since we already know what score we are going to get, we don't really need to make a naive baseline guess. Let's use a slightly more sophisticated model for our actual baseline: Logistic Regression. ## Logistic Regression Implementation Here I will focus on implementing the model rather than explaining the details, but for those who want to learn more about the theory of machine learning algorithms, I recommend both [An Introduction to Statistical Learning](http://www-bcf.usc.edu/~gareth/ISL/) and [Hands-On Machine Learning with Scikit-Learn and TensorFlow](http://shop.oreilly.com/product/0636920052289.do). Both of these books present the theory and also the code needed to make the models (in R and Python respectively). They both teach with the mindset that the best way to learn is by doing, and they are very effective! To get a baseline, we will use all of the features after encoding the categorical variables. We will preprocess the data by filling in the missing values (imputation) and normalizing the range of the features (feature scaling). The following code performs both of these preprocessing steps. ``` from sklearn.preprocessing import MinMaxScaler, Imputer # Drop the target from the training data if 'TARGET' in app_train: train = app_train.drop(columns = ['TARGET']) else: train = app_train.copy() # Feature names features = list(train.columns) # Copy of the testing data test = app_test.copy() # Median imputation of missing values imputer = Imputer(strategy = 'median') # Scale each feature to 0-1 scaler = MinMaxScaler(feature_range = (0, 1)) # Fit on the training data imputer.fit(train) # Transform both training and testing data train = imputer.transform(train) test = imputer.transform(app_test) # Repeat with the scaler scaler.fit(train) train = scaler.transform(train) test = scaler.transform(test) print('Training data shape: ', train.shape) print('Testing data shape: ', test.shape) ``` We will use [`LogisticRegression`from Scikit-Learn](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) for our first model. The only change we will make from the default model settings is to lower the [regularization parameter](http://scikit-learn.org/stable/modules/linear_model.html#logistic-regression), C, which controls the amount of overfitting (a lower value should decrease overfitting). This will get us slightly better results than the default `LogisticRegression`, but it still will set a low bar for any future models. Here we use the familiar Scikit-Learn modeling syntax: we first create the model, then we train the model using `.fit` and then we make predictions on the testing data using `.predict_proba` (remember that we want probabilities and not a 0 or 1). ``` from sklearn.linear_model import LogisticRegression # Make the model with the specified regularization parameter log_reg = LogisticRegression(C = 0.0001) # Train on the training data log_reg.fit(train, train_labels) ``` Now that the model has been trained, we can use it to make predictions. We want to predict the probabilities of not paying a loan, so we use the model `predict.proba` method. This returns an m x 2 array where m is the number of observations. The first column is the probability of the target being 0 and the second column is the probability of the target being 1 (so for a single row, the two columns must sum to 1). We want the probability the loan is not repaid, so we will select the second column. The following code makes the predictions and selects the correct column. ``` # Make predictions # Make sure to select the second column only log_reg_pred = log_reg.predict_proba(test)[:, 1] ``` The predictions must be in the format shown in the `sample_submission.csv` file, where there are only two columns: `SK_ID_CURR` and `TARGET`. We will create a dataframe in this format from the test set and the predictions called `submit`. ``` # Submission dataframe submit = app_test[['SK_ID_CURR']] submit['TARGET'] = log_reg_pred submit.head() ``` The predictions represent a probability between 0 and 1 that the loan will not be repaid. If we were using these predictions to classify applicants, we could set a probability threshold for determining that a loan is risky. ``` # Save the submission to a csv file submit.to_csv('log_reg_baseline.csv', index = False) ``` The submission has now been saved to the virtual environment in which our notebook is running. To access the submission, at the end of the notebook, we will hit the blue Commit & Run button at the upper right of the kernel. This runs the entire notebook and then lets us download any files that are created during the run. Once we run the notebook, the files created are available in the Versions tab under the Output sub-tab. From here, the submission files can be submitted to the competition or downloaded. Since there are several models in this notebook, there will be multiple output files. __The logistic regression baseline should score around 0.671 when submitted.__ ## Improved Model: Random Forest To try and beat the poor performance of our baseline, we can update the algorithm. Let's try using a Random Forest on the same training data to see how that affects performance. The Random Forest is a much more powerful model especially when we use hundreds of trees. We will use 100 trees in the random forest. ``` from sklearn.ensemble import RandomForestClassifier # Make the random forest classifier random_forest = RandomForestClassifier(n_estimators = 100, random_state = 50, verbose = 1, n_jobs = -1) # Train on the training data random_forest.fit(train, train_labels) # Extract feature importances feature_importance_values = random_forest.feature_importances_ feature_importances = pd.DataFrame({'feature': features, 'importance': feature_importance_values}) # Make predictions on the test data predictions = random_forest.predict_proba(test)[:, 1] # Make a submission dataframe submit = app_test[['SK_ID_CURR']] submit['TARGET'] = predictions # Save the submission dataframe submit.to_csv('random_forest_baseline.csv', index = False) ``` These predictions will also be available when we run the entire notebook. __This model should score around 0.678 when submitted.__ ### Make Predictions using Engineered Features The only way to see if the Polynomial Features and Domain knowledge improved the model is to train a test a model on these features! We can then compare the submission performance to that for the model without these features to gauge the effect of our feature engineering. ``` poly_features_names = list(app_train_poly.columns) # Impute the polynomial features imputer = Imputer(strategy = 'median') poly_features = imputer.fit_transform(app_train_poly) poly_features_test = imputer.transform(app_test_poly) # Scale the polynomial features scaler = MinMaxScaler(feature_range = (0, 1)) poly_features = scaler.fit_transform(poly_features) poly_features_test = scaler.transform(poly_features_test) random_forest_poly = RandomForestClassifier(n_estimators = 100, random_state = 50, verbose = 1, n_jobs = -1) # Train on the training data random_forest_poly.fit(poly_features, train_labels) # Make predictions on the test data predictions = random_forest_poly.predict_proba(poly_features_test)[:, 1] # Make a submission dataframe submit = app_test[['SK_ID_CURR']] submit['TARGET'] = predictions # Save the submission dataframe submit.to_csv('random_forest_baseline_engineered.csv', index = False) ``` This model scored 0.678 when submitted to the competition, exactly the same as that without the engineered features. Given these results, it does not appear that our feature construction helped in this case. #### Testing Domain Features Now we can test the domain features we made by hand. ``` app_train_domain = app_train_domain.drop(columns = 'TARGET') domain_features_names = list(app_train_domain.columns) # Impute the domainnomial features imputer = Imputer(strategy = 'median') domain_features = imputer.fit_transform(app_train_domain) domain_features_test = imputer.transform(app_test_domain) # Scale the domainnomial features scaler = MinMaxScaler(feature_range = (0, 1)) domain_features = scaler.fit_transform(domain_features) domain_features_test = scaler.transform(domain_features_test) random_forest_domain = RandomForestClassifier(n_estimators = 100, random_state = 50, verbose = 1, n_jobs = -1) # Train on the training data random_forest_domain.fit(domain_features, train_labels) # Extract feature importances feature_importance_values_domain = random_forest_domain.feature_importances_ feature_importances_domain = pd.DataFrame({'feature': domain_features_names, 'importance': feature_importance_values_domain}) # Make predictions on the test data predictions = random_forest_domain.predict_proba(domain_features_test)[:, 1] # Make a submission dataframe submit = app_test[['SK_ID_CURR']] submit['TARGET'] = predictions # Save the submission dataframe submit.to_csv('random_forest_baseline_domain.csv', index = False) ``` This scores 0.679 when submitted which probably shows that the engineered features do not help in this model (however they do help in the Gradient Boosting Model at the end of the notebook). In later notebooks, we will do more [feature engineering](https://docs.featuretools.com/index.html) by using the information from the other data sources. From experience, this will definitely help our model! ## Model Interpretation: Feature Importances As a simple method to see which variables are the most relevant, we can look at the feature importances of the random forest. Given the correlations we saw in the exploratory data analysis, we should expect that the most important features are the `EXT_SOURCE` and the `DAYS_BIRTH`. We may use these feature importances as a method of dimensionality reduction in future work. ``` def plot_feature_importances(df): """ Plot importances returned by a model. This can work with any measure of feature importance provided that higher importance is better. Args: df (dataframe): feature importances. Must have the features in a column called `features` and the importances in a column called `importance Returns: shows a plot of the 15 most importance features df (dataframe): feature importances sorted by importance (highest to lowest) with a column for normalized importance """ # Sort features according to importance df = df.sort_values('importance', ascending = False).reset_index() # Normalize the feature importances to add up to one df['importance_normalized'] = df['importance'] / df['importance'].sum() # Make a horizontal bar chart of feature importances plt.figure(figsize = (10, 6)) ax = plt.subplot() # Need to reverse the index to plot most important on top ax.barh(list(reversed(list(df.index[:15]))), df['importance_normalized'].head(15), align = 'center', edgecolor = 'k') # Set the yticks and labels ax.set_yticks(list(reversed(list(df.index[:15])))) ax.set_yticklabels(df['feature'].head(15)) # Plot labeling plt.xlabel('Normalized Importance'); plt.title('Feature Importances') plt.show() return df # Show the feature importances for the default features feature_importances_sorted = plot_feature_importances(feature_importances) ``` As expected, the most important features are those dealing with `EXT_SOURCE` and `DAYS_BIRTH`. We see that there are only a handful of features with a significant importance to the model, which suggests we may be able to drop many of the features without a decrease in performance (and we may even see an increase in performance.) Feature importances are not the most sophisticated method to interpret a model or perform dimensionality reduction, but they let us start to understand what factors our model takes into account when it makes predictions. ``` feature_importances_domain_sorted = plot_feature_importances(feature_importances_domain) ``` We see that all four of our hand-engineered features made it into the top 15 most important! This should give us confidence that our domain knowledge was at least partially on track. # Conclusions In this notebook, we saw how to get started with a Kaggle machine learning competition. We first made sure to understand the data, our task, and the metric by which our submissions will be judged. Then, we performed a fairly simple EDA to try and identify relationships, trends, or anomalies that may help our modeling. Along the way, we performed necessary preprocessing steps such as encoding categorical variables, imputing missing values, and scaling features to a range. Then, we constructed new features out of the existing data to see if doing so could help our model. Once the data exploration, data preparation, and feature engineering was complete, we implemented a baseline model upon which we hope to improve. Then we built a second slightly more complicated model to beat our first score. We also carried out an experiment to determine the effect of adding the engineering variables. We followed the general outline of a [machine learning project](https://towardsdatascience.com/a-complete-machine-learning-walk-through-in-python-part-one-c62152f39420): 1. Understand the problem and the data 2. Data cleaning and formatting (this was mostly done for us) 3. Exploratory Data Analysis 4. Baseline model 5. Improved model 6. Model interpretation (just a little) Machine learning competitions do differ slightly from typical data science problems in that we are concerned only with achieving the best performance on a single metric and do not care about the interpretation. However, by attempting to understand how our models make decisions, we can try to improve them or examine the mistakes in order to correct the errors. In future notebooks we will look at incorporating more sources of data, building more complex models (by following the code of others), and improving our scores. I hope this notebook was able to get you up and running in this machine learning competition and that you are now ready to go out on your own - with help from the community - and start working on some great problems! __Running the notebook__: now that we are at the end of the notebook, you can hit the blue Commit & Run button to execute all the code at once. After the run is complete (this should take about 10 minutes), you can then access the files that were created by going to the versions tab and then the output sub-tab. The submission files can be directly submitted to the competition from this tab or they can be downloaded to a local machine and saved. The final part is to share the share the notebook: go to the settings tab and change the visibility to Public. This allows the entire world to see your work! ### Follow-up Notebooks For those looking to keep working on this problem, I have a series of follow-up notebooks: * [Manual Feature Engineering Part One](https://www.kaggle.com/willkoehrsen/introduction-to-manual-feature-engineering) * [Manual Feature Engineering Part Two](https://www.kaggle.com/willkoehrsen/introduction-to-manual-feature-engineering-p2) * [Introduction to Automated Feature Engineering](https://www.kaggle.com/willkoehrsen/automated-feature-engineering-basics) * [Advanced Automated Feature Engineering](https://www.kaggle.com/willkoehrsen/tuning-automated-feature-engineering-exploratory) * [Feature Selection](https://www.kaggle.com/willkoehrsen/introduction-to-feature-selection) * [Intro to Model Tuning: Grid and Random Search](https://www.kaggle.com/willkoehrsen/intro-to-model-tuning-grid-and-random-search) As always, I welcome feedback and constructive criticism. I write for Towards Data Science at https://medium.com/@williamkoehrsen/ and can be reached on Twitter at https://twitter.com/koehrsen_will Will # Just for Fun: Light Gradient Boosting Machine Now (if you want, this part is entirely optional) we can step off the deep end and use a real machine learning model: the [gradient boosting machine](https://machinelearningmastery.com/gentle-introduction-gradient-boosting-algorithm-machine-learning/) using the [LightGBM library](http://lightgbm.readthedocs.io/en/latest/Quick-Start.html)! The Gradient Boosting Machine is currently the leading model for learning on structured datasets (especially on Kaggle) and we will probably need some form of this model to do well in the competition. Don't worry, even if this code looks intimidating, it's just a series of small steps that build up to a complete model. I added this code just to show what may be in store for this project, and because it gets us a slightly better score on the leaderboard. In future notebooks we will see how to work with more advanced models (which mostly means adapting existing code to make it work better), feature engineering, and feature selection. See you in the next notebook! ``` from sklearn.model_selection import KFold from sklearn.metrics import roc_auc_score import lightgbm as lgb import gc def model(features, test_features, encoding = 'ohe', n_folds = 5): """Train and test a light gradient boosting model using cross validation. Parameters -------- features (pd.DataFrame): dataframe of training features to use for training a model. Must include the TARGET column. test_features (pd.DataFrame): dataframe of testing features to use for making predictions with the model. encoding (str, default = 'ohe'): method for encoding categorical variables. Either 'ohe' for one-hot encoding or 'le' for integer label encoding n_folds (int, default = 5): number of folds to use for cross validation Return -------- submission (pd.DataFrame): dataframe with `SK_ID_CURR` and `TARGET` probabilities predicted by the model. feature_importances (pd.DataFrame): dataframe with the feature importances from the model. valid_metrics (pd.DataFrame): dataframe with training and validation metrics (ROC AUC) for each fold and overall. """ # Extract the ids train_ids = features['SK_ID_CURR'] test_ids = test_features['SK_ID_CURR'] # Extract the labels for training labels = features['TARGET'] # Remove the ids and target features = features.drop(columns = ['SK_ID_CURR', 'TARGET']) test_features = test_features.drop(columns = ['SK_ID_CURR']) # One Hot Encoding if encoding == 'ohe': features = pd.get_dummies(features) test_features = pd.get_dummies(test_features) # Align the dataframes by the columns features, test_features = features.align(test_features, join = 'inner', axis = 1) # No categorical indices to record cat_indices = 'auto' # Integer label encoding elif encoding == 'le': # Create a label encoder label_encoder = LabelEncoder() # List for storing categorical indices cat_indices = [] # Iterate through each column for i, col in enumerate(features): if features[col].dtype == 'object': # Map the categorical features to integers features[col] = label_encoder.fit_transform(np.array(features[col].astype(str)).reshape((-1,))) test_features[col] = label_encoder.transform(np.array(test_features[col].astype(str)).reshape((-1,))) # Record the categorical indices cat_indices.append(i) # Catch error if label encoding scheme is not valid else: raise ValueError("Encoding must be either 'ohe' or 'le'") print('Training Data Shape: ', features.shape) print('Testing Data Shape: ', test_features.shape) # Extract feature names feature_names = list(features.columns) # Convert to np arrays features = np.array(features) test_features = np.array(test_features) # Create the kfold object k_fold = KFold(n_splits = n_folds, shuffle = True, random_state = 50) # Empty array for feature importances feature_importance_values = np.zeros(len(feature_names)) # Empty array for test predictions test_predictions = np.zeros(test_features.shape[0]) # Empty array for out of fold validation predictions out_of_fold = np.zeros(features.shape[0]) # Lists for recording validation and training scores valid_scores = [] train_scores = [] # Iterate through each fold for train_indices, valid_indices in k_fold.split(features): # Training data for the fold train_features, train_labels = features[train_indices], labels[train_indices] # Validation data for the fold valid_features, valid_labels = features[valid_indices], labels[valid_indices] # Create the model model = lgb.LGBMClassifier(n_estimators=10000, objective = 'binary', class_weight = 'balanced', learning_rate = 0.05, reg_alpha = 0.1, reg_lambda = 0.1, subsample = 0.8, n_jobs = -1, random_state = 50) # Train the model model.fit(train_features, train_labels, eval_metric = 'auc', eval_set = [(valid_features, valid_labels), (train_features, train_labels)], eval_names = ['valid', 'train'], categorical_feature = cat_indices, early_stopping_rounds = 100, verbose = 200) # Record the best iteration best_iteration = model.best_iteration_ # Record the feature importances feature_importance_values += model.feature_importances_ / k_fold.n_splits # Make predictions test_predictions += model.predict_proba(test_features, num_iteration = best_iteration)[:, 1] / k_fold.n_splits # Record the out of fold predictions out_of_fold[valid_indices] = model.predict_proba(valid_features, num_iteration = best_iteration)[:, 1] # Record the best score valid_score = model.best_score_['valid']['auc'] train_score = model.best_score_['train']['auc'] valid_scores.append(valid_score) train_scores.append(train_score) # Clean up memory gc.enable() del model, train_features, valid_features gc.collect() # Make the submission dataframe submission = pd.DataFrame({'SK_ID_CURR': test_ids, 'TARGET': test_predictions}) # Make the feature importance dataframe feature_importances = pd.DataFrame({'feature': feature_names, 'importance': feature_importance_values}) # Overall validation score valid_auc = roc_auc_score(labels, out_of_fold) # Add the overall scores to the metrics valid_scores.append(valid_auc) train_scores.append(np.mean(train_scores)) # Needed for creating dataframe of validation scores fold_names = list(range(n_folds)) fold_names.append('overall') # Dataframe of validation scores metrics = pd.DataFrame({'fold': fold_names, 'train': train_scores, 'valid': valid_scores}) return submission, feature_importances, metrics submission, fi, metrics = model(app_train, app_test) print('Baseline metrics') print(metrics) fi_sorted = plot_feature_importances(fi) submission.to_csv('baseline_lgb.csv', index = False) ``` This submission should score about 0.735 on the leaderboard. We will certainly best that in future work! ``` app_train_domain['TARGET'] = train_labels # Test the domain knolwedge features submission_domain, fi_domain, metrics_domain = model(app_train_domain, app_test_domain) print('Baseline with domain knowledge features metrics') print(metrics_domain) fi_sorted = plot_feature_importances(fi_domain) ``` Again, we see tha some of our features made it into the most important. Going forward, we will need to think about whatother domain knowledge features may be useful for this problem (or we should consult someone who knows more about the financial industry! ``` submission_domain.to_csv('baseline_lgb_domain_features.csv', index = False) ``` This model scores about 0.754 when submitted to the public leaderboard indicating that the domain features do improve the performance! [Feature engineering](https://en.wikipedia.org/wiki/Feature_engineering) is going to be a critical part of this competition (as it is for all machine learning problems)!
github_jupyter
<a href="https://colab.research.google.com/github/mbonyani/Spine_Segmentation/blob/main/step2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` !ls -lha kaggle.json !pip install -q kaggle !mkdir -p ~/.kaggle !cp kaggle.json ~/.kaggle/ !chmod 600 ~/.kaggle/kaggle.json import kaggle import os dataset_dir = '/content/home/' def download_drive(dataset_dir): """ Downloads dataset from Kaggle and loads it in dataset_dir. """ if not os.path.exists(dataset_dir): kaggle.api.authenticate() kaggle.api.dataset_download_files(dataset="mbonyani/segmentdatactc", path=dataset_dir, unzip=True) print('Download completed.') else: print('dataset already exists.') return True download_drive(dataset_dir) !pip install tensorflow_addons==0.11.2 import numpy as np import matplotlib.pyplot as plt import matplotlib import pandas as pnd from tqdm import tqdm import sys import math import os import zipfile import six import warnings import random import gc import six from math import ceil from sklearn.model_selection import KFold, StratifiedKFold,train_test_split from sklearn.metrics import classification_report, confusion_matrix, accuracy_score from keras.metrics import top_k_categorical_accuracy import cv2 import imgaug as ia from imgaug import augmenters as iaa from moviepy.editor import VideoFileClip from PIL import Image from tensorflow.keras.utils import to_categorical from scipy.io import loadmat import keras.backend as K from keras.engine.topology import get_source_inputs from keras.layers import Activation from keras.layers import AveragePooling3D from keras.layers import BatchNormalization from keras.layers import Conv3D from keras.layers import Conv3DTranspose from keras.layers import Dense from keras.layers import Dropout,Flatten from keras.layers import GlobalAveragePooling3D from keras.layers import GlobalMaxPooling3D from keras.layers import Input from keras.layers import MaxPooling3D from keras.layers import Reshape from keras.layers import UpSampling3D from keras.layers import Concatenate from tensorflow.keras.models import Model,load_model from keras.regularizers import l2 !pip install git+https://www.github.com/keras-team/keras-contrib.git from keras_contrib.layers import SubPixelUpscaling from tensorflow.keras.callbacks import LearningRateScheduler from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from tensorflow.keras.optimizers import SGD,Adam import tensorflow import tensorflow.keras.backend as K from tensorflow.keras import backend as keras from tensorflow.keras.models import Model from tensorflow.keras.layers import * from tensorflow.keras.optimizers import * from tensorflow.keras.layers import LeakyReLU from tensorflow.keras.losses import * import gc X_train = np.load('/content/home/train_img.npy').reshape((19637, 128, 128,1)) y_train = np.load('/content/home/train_seg.npy').reshape((19637, 128, 128,1)) gc.collect() X_val = np.load('/content/home/val_img.npy').reshape((7521, 128, 128,1)) y_val = np.load('/content/home/val_seg.npy').reshape((7521, 128, 128,1)) # X_test = np.load('/content/home/data_road_asl.npy') # y_test = np.load('/content/home/data_face_asl.npy') print(X_train.shape) print(y_train.shape) print(X_val.shape) print(y_val.shape) del y_train gc.collect() def closs(y_true, y_pred): def dice_loss(y_true, y_pred): numerator = 2 * tensorflow.reduce_sum(y_true * y_pred, axis=(1,2,3)) denominator = tensorflow.reduce_sum(y_true + y_pred, axis=(1,2,3)) return K.reshape(1 - numerator / denominator, (-1, 1, 1)) return binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred) def iou_core(y_true, y_pred, smooth=1): intersection = K.sum(K.abs(y_true * y_pred), axis=-1) union = K.sum(y_true,-1) + K.sum(y_pred,-1) - intersection iou = (intersection + smooth) / ( union + smooth) return iou nf=2 input_size = (128,128,1) inputs = Input(input_size) # inputs=BatchNormalization()(inputs) conv1 = Conv2D(nf, 3, padding = 'same', kernel_initializer = 'he_normal')(inputs) # conv1 = Conv3D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1) conv1=BatchNormalization()(conv1) conv1=LeakyReLU(alpha=0.3)(conv1) pool1 = MaxPooling2D(pool_size=2)(conv1) conv2 = Conv2D(nf*2, 3, padding = 'same', kernel_initializer = 'he_normal')(pool1) # conv2 = Conv3D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2) conv2=BatchNormalization()(conv2) conv2=LeakyReLU(alpha=0.3)(conv2) pool2 = MaxPooling2D(pool_size=2)(conv2) conv3 = Conv2D(nf*4, 3, padding = 'same', kernel_initializer = 'he_normal')(pool2) # conv3 = Conv3D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3) conv3=BatchNormalization()(conv3) conv3=LeakyReLU(alpha=0.3)(conv3) pool3 = MaxPooling2D(pool_size=2)(conv3) conv4 = Conv2D(nf*8, 3, padding = 'same', kernel_initializer = 'he_normal')(pool3) # conv4 = Conv3D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4) conv4=LeakyReLU(alpha=0.3)(conv4) conv4=BatchNormalization()(conv4) drop4 = Dropout(0.5)(conv4) pool4 = MaxPooling2D(pool_size=(2))(drop4) conv5 = Conv2D(nf*16, 3, padding = 'same', kernel_initializer = 'he_normal')(pool4) # conv5 = Conv3D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5) conv5=LeakyReLU(alpha=0.3)(conv5) conv5=BatchNormalization()(conv5) drop5 = Dropout(0.5)(conv5) up6 = Conv2D(nf*32, 2, padding = 'same', kernel_initializer = 'he_normal')(UpSampling3D(size = 2)(drop5)) up6=LeakyReLU(alpha=0.3)(up6) up6=BatchNormalization()(up6) merge6 = concatenate([drop4,up6], axis = 3) conv6 = Conv2D(nf*32, 3, padding = 'same', kernel_initializer = 'he_normal')(merge6) # conv6 = Conv3D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6) conv6=LeakyReLU(alpha=0.3)(conv6) conv6=BatchNormalization()(conv6) up7 = Conv2D(nf*16, 2, padding = 'same', kernel_initializer = 'he_normal')(UpSampling3D(size = 2)(conv6)) up7=LeakyReLU(alpha=0.3)(up7) up7=BatchNormalization()(up7) merge7 = concatenate([conv3,up7], axis = 3) conv7 = Conv2D(nf*16, 3, padding = 'same', kernel_initializer = 'he_normal')(merge7) # conv7 = Conv3D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7) conv7=LeakyReLU(alpha=0.3)(conv7) conv7=BatchNormalization()(conv7) up8 = Conv2D(nf*8, 2, padding = 'same', kernel_initializer = 'he_normal')(UpSampling3D(size = 2)(conv7)) up8=LeakyReLU(alpha=0.3)(up8) up8=BatchNormalization()(up8) merge8 = concatenate([conv2,up8], axis = 3) conv8 = Conv2D(nf*4, 3, padding = 'same', kernel_initializer = 'he_normal')(merge8) # conv8 = Conv3D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8) conv8=LeakyReLU(alpha=0.3)(conv8) conv8=BatchNormalization()(conv8) up9 = Conv2D(nf*2, 2, padding = 'same', kernel_initializer = 'he_normal')(UpSampling3D(size = 2)(conv8)) up9=LeakyReLU(alpha=0.3)(up9) up9=BatchNormalization()(up9) merge9 = concatenate([conv1,up9], axis = 3) conv9 = Conv2D(nf*1, 3, padding = 'same', kernel_initializer = 'he_normal')(merge9) # conv9 = Conv3D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9) conv9=LeakyReLU(alpha=0.3)(conv9) # conv9 = Conv3D(2, 3, padding = 'same', kernel_initializer = 'he_normal')(conv9) # conv9=LeakyReLU(alpha=0.3)(conv9) conv9=BatchNormalization()(conv9) conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9) model = Model(inputs = inputs, outputs = conv10) model.compile(optimizer = Adam(lr = 1e-2), loss = closs, metrics = [iou_core]) #model.summary() # if(pretrained_weights): # model.load_weights(pretrained_weights) model.fit(X_train,y_train, epochs=30,batch_size=1, validation_data=(X_val, y_val)) import matplotlib.pyplot as plt fig=plt.figure(1,figsize=(15,300)) plt.set_cmap('gray') import glob import skimage.io as io from skimage.transform import resize n = 10 xs = X_val ys = y_val for i in range(min(len(xs),n)): j=i+100 ypr = model.predict(xs[j:j+1]) fig.add_subplot(min(len(xs),n),3,3*i+1) plt.imshow(xs[j][:,:,0]) fig.add_subplot(min(len(xs),n),3,3*i+2) plt.imshow(ys[j][:,:,0]) fig.add_subplot(min(len(xs),n),3,3*i+3) # plt.imshow(ypr[0][:,:,0]) io.imsave ('imagetest'+str(i)+'.jpg', ypr[0][:,:,0] ) ```
github_jupyter
--- # __Python Pandas__ Data Structures, Inspection, Cleanign, Indexing, Slicing, merging, concatenating --- Code examples on the most frequently used functions - Collected, Created and Edited by __Pawel Rosikiewicz__ www.SimpleAI.ch ## CONTENT * __CREAETING SERIES & DATA FRAME__ </br> * __LOADING/SAVING DATA FROM/TO FILE (csv, excel, json)__ * pd.read_csv() * df.to_csv( “File_Name.csv”, encoding = 'utf-8', index = False) * pd.read_excel(); sheet_name='Sheet1', usecols=[0,1] * pd.read_json() </br> * __INSPECTING DATA FRAME__ * type() - series, or df, etc... * shape, size, ndim * .head(), .tail() * .dtypes # no Brackets!, '0' - object dtype * .info() * .describe() </br> * __HANDLING MISSING DATA__ * __isnull() / notnull()__, df.isnull().sum().sum(), df.isnull().sum(axis=0) * __dropna()__, axis=1 (for removing columns), thresh=100 (min. 100 Nona to keep the row/col) * __fillna()__, method='ffill','bfill', inplace=True </br> * __DUPLICATES__ * __duplicated()__; returns a string, true for duplicated rows * __drop_duplicates()__; creates df copy, same params, as in duplicated() </br> * __OUTLIERS__ * I created separate notebook on outliers in machine - here its just an examp </br> * __FILTERING WITH < & >, < | >__ * values comparisons * ==, >, <, >=, <=, !=, etc... * (bitwise) logical operators | and & * "|" - OR * "&" - AND * eg: df.loc[(df.loc[:,"col1"]>=1) & (df.loc[:,"col2"]<=-1),'col1'] * element-wise comparisons * np.logical_or(x,y) * np.logical_and(x,y) </br> * __MAP, APPLY, APPLYMAP__ * __map__ ; replaxce old value for new value in pd.Series * df[ "column name" ].map( { “old_value” : “new_value” } )_ * __apply__; apply functions to rows and columns in df * Series; s.apply( Function_name , Function_parameter_1= < value >, .... ) * DF; df.apply( Function_name , axis=0 ), axis=0, returns new row * __applymap__; works elementwise, for entire df * df.applymap( Function_name ) </br> * __CONCATENATE & HIERARCHICAL INDEXING EXAMPLE__ * __pd.concat();__ * add rows (axis =0) * add cols (axis =1) * join = {inner, outer, ...} * sort= False * .reset_index(drop=False) </br> * __MERGE__ * __pd.merge()__; SQL-style join * pd.merge(users, scores, on='ID', how="inner") * on many cols (keeps copies of all columns) * "how" {"inner", "outer", "left", "right"} * eg. if you use "right", NaN are added to left </br> * __MULTI-INDEX__ * __pd.set_index()__, with >1 colname selected * __pd.MultiIndex()__, — creates multi index, by hand approach * __df.xs()__, — for multi index slicing, it works like cd in bash * __pd.concat([list with df's], axis=0, keys=[list with key for each df]) ``` import os import random import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns ``` --- ## CREAEINTG SERIES & DATA FRAMES --- https://pandas.pydata.org/pandas-docs/stable/reference/frame.html * __Series__; * one-dimensional labeled array; * A series is like a DataFrame with a single column. * __DataFrame__; * a two-dimensional and labeled array, * Columns can store different dtypes * __Dimensions__; the number of axes, that are labeled starting at 0. * __rows__;axis 0; represent the data points, by convention * __columns__; axis 1; represent the variables, -||- * __Column names__; bold names on the top * __Index labels__; bold nr’s, from 0 to 9 on the left side, for rows only * __Data__; everything else inside the cells * __Cell__; place set with one row and one column ### more on Data Frames https://stackoverflow.com/questions/27374774/the-nature-of-pandas-dataframe https://krbnite.github.io/Memory-Efficient-Windowing-of-Time-Series-Data-in-Python-2-NumPy-Arrays-vs-Pandas-DataFrames/ * How the data is stored in DF * DataFrame can be seen as a list of columns, or even more a (ordered) dictionary of columns * Each column, or collection of columns of the same type is stored in a separate arra * Notice: Numpy Array is internally stored row-wise in Memory * Why it is beneficial to store data in columns * each column has uniform dtype, and can be accessed very quicklyis adding rows more expensive then adding columns? * In general, appending multiple single rows is not a good idea: better to eg preallocate an empty dataframe to fill, or put the new rows/columns in a list and concat them all at once. ### pd.Series --- * most fucntion works for data frame too, so I am presenting them once here, * we use s.dtype instead of df.dtypes() * df.info() doenst work for series, #### Create new Series * typically, created from the list * it contains only one dtype, * but, you can store different dtypes form the list in one pd.Series, using "object" dtype in that series, * __index__: additional co, visible on the left, Starts at 0! ``` s = pd.Series(["a", "b", "c"], index=["first", "second", "third"]) s2 = s.copy() # obj, with new id( ) ``` #### inspect pd.Series ``` # dimensions s.shape # (3,) s.size # 3 s.ndim # 1 # object type(s) # pandas.core.series.Series # examples s.head() s.tail() # summary s.dtype # no Brackets!, '0' - object dtype s.describe() ``` #### get values * Caution! if you use non-existed key value form the index, it returns, KeyError ``` # idx s[0] # index 0 s.iloc[0] # index 0 # index value s['first'] # it may cause problems, if that is s.loc['first'] # with loc, you must use the name s['first'] # utiple values can be get s[['first', 'second']] # return a miniseries ``` ### pd.DataFrame --- * __important to know when working with df__ * __df indexing for rows__; * data points are automatically indexed, starting from 0, * one of the columns in df can be set as new index using __df.set_index(“column_name”)__ method * __Copy vs innplace__ * important to know whether a given method introduce changes in a modified obj, or it creates a new object. * Most of pandas methods returns a copy, * thus you must eaither use __inplace = True__ or __df = df.modiffication()__ to save changes in an original obj. * Inplace = True, ensures that the original object will be modified, but it is not implemented in all methods * __.copy( )__ * ensures that new obj will be created, not in all methods * __na_values=[“char”]__; * Pandas will automatically recognize and parse common missing data indicators, suchn as NA, empty fields. * For other types of missing data use np.nan (special float dtype from numpy) * eg: __df = pd.read_csv('file_name.csv', na_values=['?'])__ #### create new data frame ``` # empty df ''' you provide no data it has only col names, indexes and dtype, no data'''; pd.DataFrame( index = range( 0 , 2 ), columns = [ 'A' , 'B' ], dtype = 'float' ) # df filled with one value ''' it can be any numeric item, but not a string eg: you may use np.nan because it is a float. pd.DataFrame(np.nan, index=[0,1], columns=['A', “B”])''' pd.DataFrame( np.nan, index = range( 0 , 2 ), columns = [ 'A' , 'B' ] ) # from dictionary, '''keys used as column names''' df=pd.DataFrame({ "col1":[1,2,3], "col2":['a', 'b', 'c'], "col3":['first', 'second', 'third'] }, index=['row1', 'row2', 'row3'] ) # from data provided as rows in list wiht different dtpyes, '''one row == one embded list''' df=pd.DataFrame( [[1, 'a', 'first'], [2, 'b', 'second'], [3, 'c', 'third']], columns=['col1', 'col2', 'col3'], index=['row1', 'row2', 'row3'] ) # from list with dictionary with colnames as keys '''often used in for-loops to collect results''' lst=[] for i in range(3): lst.append({'col1':i, 'col2':i+10}) df = pd.DataFrame(lst) # from numpy array df=pd.DataFrame( np.arange(9).reshape(3,3), columns=['col1', 'col2', 'col3'], index=['row1', 'row2', 'row3'] ) df # append distionary to a list, """this is the method that I am using to colledt data in loops key: column name """ mylist=[] mylist.append({"col1":1, "col2":"a"}) mylist.append({"col1":2, "col2":"b"}) pd.DataFrame(mylist) ``` --- # LOADING DATA FROM FILE (csv, excel, json) --- ### __Main Functions__ * pd.read_csv() * df.to_csv( “File_Name.csv”, encoding = 'utf-8', index = False) * pd.read_excel(); sheet_name='Sheet1', usecols=[0,1] * pd.read_json() ### __LOAD CSV__ * __pd.read_csv()__ * __Header__ * read_csv() function assumes, that csv file has a header, * ie. 1st line with col names; eg: pd.read_csv( “ file_name.csv” ) > __header = None__ - no header > pd.read_csv( 'file_name.csv', header = None) > names = [ ....] custom headers, provided in a list > pd.read_csv('file.csv', names = ['Header1' , 'Header2'] ) * __missing data__. * __na_values=['string”]___. * set custom values for missing data, in string > pd.read_csv('file.csv', na_values=['?']) > ... all cells with '?' will be set as NaN in new dataframe ### __SAVE CSV__ * __df.to_csv( “File_Name.csv”, encoding = 'utf-8', index = False)__ * __“File_Name.csv"__; can be with path, or else saved in working dir * __encoding__; best use 'utf-8' * __index__; asking to keep row index, * TIP: in case you have custom index, its best to keep it. ### __LOAD OTHER FORMATS__ * pandas supports excel, JSON, HTML, SAS SQL, and several other formats * __pd.read_excel( 'file.xls' )__ * assumes, file has a header * ``` #### csv #### --------------------------------------------------------------------------------------- # load df = pd.read_csv('file.csv') df = pd.read_csv("file.csv", usecols=['a', 'b']) # selected columns only # set header & index in loaded data df = pd.read_csv('file.csv', header=None) # header = None; first row read as data df = pd.read_csv('file.csv',names=['col1','col2', 'col3', 'col4']) # my headers; Caution, like with header=None! df = pd.read_csv('file.csv', index_col=0) # Set first column as index # dtype; normally its autodetection, but dtypes can be given in dct. df = pd.read_csv('file.csv', dtype = { 'b' : np.float64}) # key=colName, value=dtype #. TypeError, when setting incopatible dtype, ef. float for str #### excel #### --------------------------------------------------------------------------------------- # load df = pd.read_excel('data.xls'); df.head(2) # Select sheet in excel or columns df = pd.read_excel('data.xls', sheet_name='Sheet1') # from a given sheet, other wise ==1 df = pd.read_excel('data.xls', usecols=[0,1]) # selected columns to load, by col index df = pd.read_excel('data.xls', usecols=['col1','col2']) # selected columns to load, BY COLNAME #### json #### --------------------------------------------------------------------------------------- # load json object into dct pd.read_json('frame.json') # "orientation" ; par, setting which keys should be cols/rows, see help # in:: { <col_name> : { <row_name> : <value> .. } .. } # out:: df with dct keys as colnames and rownames, and vlues in cells # load json array into messy obj in python pd.read_json('books.json') # in:: { <array_name> : [ { <cell_1_key1> : <value1>, <cell_1_key2> : <value2> ...}, # { <cell_2_key1> : <value1>, <cell_2_key2> : <value2> ...},] # } # out:: pd.Series; with all values from one cell, placed as object or string in it # i.e each cell has many, many different key:value pairs, sep with coma "," # load json array into 2d pandas df def load_jason_arr_to_df(*, file_name, key_to_array): # imports import json from pandas.io.json import json_normalize # step 1. open file, read only file = open(file_name, "r") # step 2. convert to dct, it can have many arrays, each with key, so be carefull! json_string = file.read() dct = json.loads(json_string) # step 3. convert to pd.dataFrame df = json_normalize(dct, key_to_array) # Importnat; "books" - its a key to one array in dct, that dct may have many arrays! # but you need iuts key even if only one array was loaded # return return df # out:: cells -> rows #. cell_key's -> column names #. index -> by default, 0,1,... # test df = load_jason_arr_to_df(key_to_array ="books", file_name ="books.json") df.head(2) #### about json ------------------- """ < json format > * whitespace and return characters outside of strings are ignored, * two tyes of objects: json objects & arrays * json objects; * in "{ }" # conatains key value paris pairs separater by coma * Key's ; unique, always on the left * Values ; strings, numbers, objects, arrays, null, True, False, or other more complex * eg: . { . "userID": 12345, . "userName": "John Smith" . } * json arrays * in "[ ]" * ordered collection with values of different data types * eg: . { . "userID": 12345, . "userName": "John Smith", . "results": . [ . { . "test": "Verbal Reasoning", . "score": 140 . }, . { . "test":"Quantitative Reasoning", . "score": 165 . } . ] . } ; here an arr, is just a value in json obj. . ;called with nthe key "results" """; ``` --- ### INSPECT DATAFRAME --- __Main Functions__ * type() - series, or df, etc... * shape, size, ndim * .head(), .tail() * .dtypes # no Brackets!, '0' - object dtype * .info() * .describe() ``` df=pd.DataFrame( np.arange(9).reshape(3,3), columns=['col1', 'col2', 'col3'], index=['row1', 'row2', 'row3'] ) df # dimensions df.shape # (3,) df.size # 3 df.ndim # 1 # object type(df) # pandas.core.series.Series # examples df.head() df.tail() # summary df.dtypes # no Brackets!, '0' - object dtype df.info() df.describe() # memory footprint # Memory Estimate (Pandas Style) f'{round(df.memory_usage().sum()/1.0e6,6)} MB' # Memory Estimate (Again, but NumPy Style) f'{round(df.values.nbytes/1.0e6,6)} MB' ``` ### dtype ``` df.dtypes #returns data type in each col, use no brackets! df['col1']= df['col1'].astype( int ) ``` --- ## MISSING DATA in pandas --- ### Main Functions: * __isnull() / notnull()__, df.isnull().sum().sum(), df.isnull().sum(axis=0) * __dropna()__, axis=1 (for removing columns), thresh=100 (min. 100 Nona to keep the row/col) * __fillna()__, method='ffill','bfill', inplace=True ### NaN * "Not a Number" * special floating-point value from NumPy * We can perform all operations with NaN without errors,but the result is always another NaN. * __Caution!__ reindexing or reshaping or joining dfs of different dimesions creates NaN! ### more on Functions: * __isna() vs isnull()__ * do exactly the same thing! - their docs are identical * pandas dfs, were based on R df's where these two are different objects, * in pythong, pandas is build on top of numpy, which does not use null, or na * numpy uses NaN - see above, * USAGE: * To detect NaN values in numpy - np.isnan(). * To detect NaN values in pandas - .isna() or .isnull() ``` # df example: df=pd.DataFrame(np.arange(12).reshape(3,4)) df = df.reindex([1,2,3,4]) df #### find NaN ---------------------------------- # locate NaN df.isnull() # returns array with True/False df.notnull() # returns array with True/False # find how many missing data you have print(df.isnull().sum(axis=0)) # nr of NaN / column print(df.isnull().sum().sum()) # nr of all NaN in df print(df.notnull().sum().sum()) # nr of all NON-NaN in df #### drop NaN ---------------------------------------- # Remove NaN globally, '''returns a copy; see: inplace=True''' df2 = df.dropna() # remove rows with any NaN, df2 = df.dropna(axis=1) # remove columns wiht NaN df2 = df.dropna(how="all") # remove row only if NaN are in all columns # Set Threshold for keeping row/column '''set min. nr. of non-NaN values that allow to keep rows/cols''' df2 = df.dropna(axis=1, thresh=100) df #### fill NaN ---------------------------------------- # with a value '''caution, warnign was done as we work on slice''' df[0].fillna( value='not available', inplace=True); df #### forward/backward propagation and fill with the mean ---------------------------------------- # with the mean df[1].fillna(value=df[1].mean(), inplace=True) # mean of that column df #### forward/backward propagation of the closes non-missing values ---------------------------------------- df[2].fillna(method='ffill', inplace=True) # forward ropagation df[3].fillna(method='bfill', inplace=True) # backward ropagation - dont work here! df ``` --- ## DUPLICATES --- ### Main Functions: * __duplicated()__; returns a string, true for duplicated rows * __drop_duplicates()__; creates df copy, same params, as in duplicated() ### IMPORTANT * both functions return pdSeries with True/False, * duplicates are True * keep = decides which dfuplicates is treated as not-dupl. ie gets False ``` # df example df = pd.DataFrame({ 'color': ['blue','blue','red','red','blue'], 'value': [2,1,3,3,2]}) #### Find duplicates ---------------------------------------- # locate dulpicated rows '''all return pd.Series, with True/False''' df.duplicated(keep="first") # first duplicated row, is trreasted as not a duplicate, ie get False value df.duplicated(keep="last") # the last appearance of a duiplicated row is false (ie. not a duplicate) df.duplicated(keep=False) # only unique rows are False, all duplicates are "True" # locate rows with duplicated values in selected cols df.duplicated(['value'], keep="first") # investigate duplicates, df.duplicated(keep="first").sum() # see nr of dupicates df.loc[df.duplicated(keep="first")] # select examples #### Remove rows with duplicates ---------------------------------------- '''#. IMPORTANT : all parameters used to located duplicates in drop_duplicates work like in duplicated() ''' df.drop_duplicates(keep='first') # returns a copy, df.drop_duplicates(['value', "color"]) # you can select many colnames in a list ``` --- ## OUTLIERS --- ### __outliers__ * I created separate notebook on outliers in machine leanring notes, * here I will simply provide some examples: * eg. use normal distrib. and SD>3 to define outliers, ### __zero problem__ * it there are many zeros, it will bias the mean/median used to identify outliers, * solutions: * remove zeros, * use larger threshold, eg >5SD * use more robust method line quantiles, instead of SD ``` # data '''sample "standard normal" distr., mu=0, sd=1, 5 columns''' df=pd.DataFrame(np.random.randn(3000,5)) #### Identify outliers (≥2sd) ---------------------------------- '''I will use 2SD to make them visible on plots''' # a) calculate mean & sd / col, df_mean = df.mean(axis=0) # axis =0; results for each column, traversing a row df_sd = df.std(axis=0) # -||- # b) find outliers out_position = np.abs(df-df_mean)>(2*df_sd) # true for outl # c) examine == plot outl's on hist fig, axs = plt.subplots(nrows=1, ncols=5, figsize=(15,3)) fig.suptitle("5 sets sampled from rnorm with outliers") for i, ax in enumerate(axs.flat): ax.hist(df[i]) # all points, ax.hist(df.loc[out_position[i],i].values.tolist())# re-plot only outliers, on top of them #### Remove outliers (≥3sd) ---------------------------------- # Drop rows with NaN in at least one column position_of_rows_with_outl = out_position.any(axis=1) # Find rows with any outliers - Return pdSeries with True/False rows_with_outl = df.loc[position_of_rows_with_outl,:] # Copy rows with any outliers df_new = df.drop(rows_with_outl.index, axis=0) # Remove rows with any outliers - INLPACE ! #### Boxplots - to examine cleaned data # .. prepare the data, data_for_plot = {"raw data" : df.values, "cleaned data" : df_new.values, "rows with outliers" : rows_with_outl.values} # .. boxplots fig, axs = plt.subplots(nrows=1, ncols=3, figsize=(15,3)) fig.suptitle("compare data bafore cleaning, after cleaning, and what you have removed") for i, ax in enumerate(axs.flat): # boxplots using array - 1 box == 1 col subplot_title = list(data_for_plot.keys())[i] bp = ax.boxplot(data_for_plot[subplot_title], patch_artist=True # fill with color ) # fill in with custome color colors = ['pink', 'lightblue', 'lightgreen', "red", "yellow"] for patch, color in zip(bp['boxes'], colors): patch.set_facecolor(color) # aestetics ax.set(ylim=(-4,4), title=subplot_title, xticklabels = ["s1", "s2", "s3", "s4", "s5"] ) sns.despine() # plt.title("catoplot wiht outliers, \n we removed many good pioints with the outliers!") fig.tight_layout() fig.subplots_adjust(top=0.8) plt.show(); ``` -- ## FILTERING DF -- ### __Main Funcitons__ * values comparisons * ==, >, <, >=, <=, !=, etc... * (bitwise) logical operators | and & * "|" - OR * "&" - AND * element-wise comparisons * np.logical_or(x,y) * np.logical_and(x,y) ### __Caution__ * Series and df from must be compared elementwise using: * "and" or "or", return Error with pandas objects! * more info: https://stackoverflow.com/questions/36921951/truth-value-of-a-series-is-ambiguous-use-a-empty-a-bool-a-item-a-any-o/36922103#36922103 ``` # data '''sample "standard normal" distr., mu=0, sd=1, 5 columns''' df=pd.DataFrame(np.random.randn(3000,3), columns=["col1", "col2", "col3"] ) # select values in col2, selected with values in col1 df.loc[df.loc[:,"col1"]>=1,'col2'].hist() # use and df.loc[(df.loc[:,"col1"]>=1) & (df.loc[:,"col2"]<=-1),'col1'].hist() # use OR df.loc[(df.loc[:,"col1"]>=1) | (df.loc[:,"col1"]<=-1),'col1'].hist() # use and df.loc[(df.loc[:,"col1"]>=1) & (df.loc[:,"col2"]<=-1),'col1'].hist() ``` --- ## MAP, APPLY, APPLYMAP --- ### __Important:__ "ALWEAYS STORE NEW DATA IN NEW COLUMNS! ### __MAP__ * replaxce old value for new value in pd.Series * __df[ "column name" ].map( { “old_value” : “new_value” } )___ * features: * works only for Series or One DataFrame column * Replace ALL existing values in a series, with new valu * NaN for items that were not in a map dct. * Returns a COPY * Returns error, when applied to df ### __APPLY__ * Used to apply functions to rows and columns in df * may, or may not return results in new row/col * __works differently for series & df's__ * https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.apply.html * (a) is applied to pd.Series: * s.apply( Function_name , Function_parameter_1= < value >, .... ) * works element wise! * (b) if applied to df: * df.apply( Function_name , axis ) * returns summary value per row/col * __axis=0__ * returns new row, with value for each column  | + + + + + + |   | + + + + + + |   | + + + + + + |  ---------------------- | * * * * * * | new row ( axis = 0 ) * axis = 1 * return new column, with value for each row    # sum from each row |+ + + + + +| * | |+ + + + + +| * | |+ + + + + +| * |    new col ( axis = 1 ) ### APPLYMAP(lamdax x: ) * __df.applymap( Function_name )__ * Used for Entire DataFrame >> works ELEMENT-WISE * CAUTION; WORKS INPLACE !!! (Not a copy in return) * Inefficient with large datasets ``` #### data example ----------------------------------------------------- DATA EXAMPLE ... def generate_data(): data_dct = {'group': [1, 1, 1, 2, 2], 'score': [10, 11, 15, 10, 16], 'passed': ['y', 'n', 'y', 'n', 'maybe']} df_data = pd.DataFrame(data_dct) return df_data generate_data() #### MAP --------------------- """eg: map new values to old * provide dct: with old/new values * store in new colum * NaN will be placed in missing key, """ df = generate_data() df['passed_full'] = df.passed.map({'y':"passed", 'n':"not-passed"}) df #### APPLY ---------- # (a) apply used to one column; """Element wise""" df['score_sqrt']=df.score.apply(np.sqrt) # apply for each column df.apply(np.sum, axis=0) # . SELECT DTYPES FOR OPERATIONS # .... prep the data: take out all cols wiht numerical data df_num = df.select_dtypes(include=[np.number]) # .... find max value for each col/row df_num.apply(max, axis=0) # max in each COL (axis =0) df_num.apply(max, axis=1) # max in each ROW (axis =1) # lambda fucntion df.apply(lambda x: f'{x}+1', axis=1, result_type='expand') #### APPLYMAP ------------------------------------------------------------ APPLYMAP ..... # .... prep the data: generate df and take out all cols wiht numerical data df = generate_data() df_num = df.select_dtypes(include=[np.number]) # . eg: add $ to all numerical values in df_num df_num.applymap(lambda x: '\$' + str(x)) ``` --- ## CONCATENATE & HIERARCHICAL INDEXING EXAMPLE --- ### Main Functions * __pd.concat()__ * join {"inner", "outer"} * add rows (axis =0)  | + + + + + + |   | + + + + + + |   | + + + + + + |  ---------------------- | * * * * * * | | * * * * * * | | * * * * * * | | * * * * * * | new rowS ( axis = 0 ) * add cols (axis =1)   |+ + + + + +| * * * | |+ + + + + +| * * * | |+ + + + + +| * * * |   new colS ( axis = 1 ) ``` # data examples '''3 dataframes filled with x,y,z''' df1 = pd.DataFrame(np.full( (2,3),'x', dtype=object), columns=['A', 'B', 'C']) # np.full() ; arr filled with one value df2 = pd.DataFrame(np.full( (2,3),'y', dtype=object), columns=['A', 'B', 'C']) df3 = pd.DataFrame(np.full( (2,2),'z', dtype=object), columns=['D', 'E']) ``` ### CONCAT BY COLUMNS * IMPORTANT: concat compares and assign values based on row index, not col names * col names can be duplicated * it will add NaN where is no value, unless we set join = "inner" ``` # concat with axis=1 """join="outer" , by default""" pd.concat([df1, df2, df3], axis=1)# join="outer" , by default #### inner/outer ---------------------------------------------------------- pd.concat([df1, df3], axis=0, join="outer", sort=False) # concats all avaiable rows pd.concat([df1, df2], axis=0, join="inner", sort=False) ``` ### CONCAT BY ROWS ``` # concat. wiht axis=0 - add new rows, & the same column names ''' axis=0 ; by default reset_index(drop=False) - old indexes are placed in new col, else you will have duplicated indexes ''' pd.concat([df1,df2], axis=0) pd.concat([df1, df2]).reset_index(drop=False) # concat. wiht axis=0 - add new rows, & DIFFERNT column names pd.concat([df1, df3], axis=0, sort=False).reset_index(drop=False) ``` ### CONCAT WITH HIERARCHICAL INDEX ``` # build hierarchical index '''new names can be assigned to each df''' new_df = pd.concat( [df1,df2], axis=0, keys=['data from df1', 'data from df2'] ) new_df ``` ### SLICING MULTIINDEX - BASIC EXAMPLE - more later ``` new_df.iloc[0:3,:] new_df.loc["data from df1",:] ``` --- ## MERGING --- __pd.merge()__ * Merge df or named Series with a SQL-style join * CAUTION -at least one col in two dfs must have the same information that allow merging * __pd.merge()__ * builds new df, with values from two dfs,using values in some column(s) as ref to join these values) * useing pd.merge() * (a) on one col * (b) on many cols (keeps copies of all columns) * "how" {"inner", "outer", "left", "right"} ``` # data examples users = pd.DataFrame( {'ID': [1, 2], 'logName': ['player 1', 'player 2']}) scores = pd.DataFrame( {'ID': [1, 2, 2, 3], 'Score': [100,200,300, 400]}) scores2 = pd.DataFrame({'playerID': [1, 2, 2, 3],'Score': [100,200,300, 400]}) # MERGE INNER ''' by default, == how="inner" - allows row duplicates - do not generate NaN If no colname provided, it will select colname with the same name in two df's ''' pd.merge(users, scores, on='ID', how="inner") pd.merge(users, scores) # same results in that case # join with different colnames pd.merge(users, scores2, left_on='ID', right_on='playerID') ``` #### use more than one column to merge the df's + inner, outer, left, right options * merge df, using >1 col, to keep them not duplicated in new df, with full info * add names to new column so they are not confused with each other * HOW : {inner, outer, left, right} ``` # data example: """two df's with logs of scores of the same players from different games players, can be identified by name and ID on teachirts. however, these two things may not be the same if used separately, """ game1 = pd.DataFrame( {'shirt_nr': [1, 2, 3], 'player_name': ['Josh', 'Ivan', 'Olaf'], 'points': [1, 4, 2]}) game2 = pd.DataFrame( {'shirt_nr': [1, 2, 3], 'player_name': ['Josh', 'Ivan', 'Pawel'], 'points': [8, 3, 1]}) game1 # ... a) by default == how="inner" # - "on" two cols, #. - adding "suffixes" to col taken form each df # - no NaN pd.merge(game1, game2, on=["shirt_nr", "player_name"], suffixes=['_game1', '_game2']) # SAME AS BELOW pd.merge(game1, game2, on=["shirt_nr", "player_name"], suffixes=['_game1', '_game2'], how="inner") # ... b) how="outer", # - joins all, # - adds NaN in both sites pd.merge(game1, game2, on=["shirt_nr", "player_name"], suffixes=['_game1', '_game2'], how="outer" ) # ... c) how="left", # - takes all from left df, # - generates NaN in right df, pd.merge(game1, game2, on=["shirt_nr", "player_name"], suffixes=['_game1', '_game2'], how="left" ) # ... d) how="right", # - takes all from right df, # - generates NaN in left df, pd.merge(game1, game2, on=["shirt_nr", "player_name"], suffixes=['_game1', '_game2'], how="right" ) ``` --- ### MultiIndex --- https://hackersandslackers.com/using-hierarchical-indexes-with-pandas/ * __Main Functions__ * __pd.set_index()__, with >1 colname selected * __pd.MultiIndex()__, — creates multi index, by hand approach * __df.xs()__, — for multi index slicing, it works like cd in bash * __pd.concat([list with df's], axis=0, keys=[list with key for each df]) </br> * __pd.MultiIndex()___ * creates object MultiIndex * it has to be passed with set_index() to series or df * col_levels ; List with lists * [ [level 1 names ], [level 2, names ] ..... ] * it will be diplyed when calling df, * and used for selection of rows * col_labels </br> * IMPORTANT * When a column becomes an index, the original "column" is dropped, and an index is added to our DataFrame with the values that were contained in that column. * you may destroy your work! thus: * USE __df.reset_index(inplace=True)__ to undo! * TIP * ... MUTIINDEX YOUR DATA, * ... ANSWER FEW QUICK QUESTION * ... REVERSE with df.reset_index(inplace=True) </br> * COMMON OPERATIONS ON MULTIINDEX * Swap Index Levels: * df.swaplevel(i='level_name_1', j='level_name_2') * Rename Indexes: * df.index.names = ['name1', 'name2'] * Remove a single index level: * df.unstack(level=0) ``` # SET COLUMNS TO BE MULTIINDEX df = pd.DataFrame({ 'cat1' : ['one', 'one', 'one', 'two', 'two'], 'cat2' : ['A', 'B', 'B', 'A', 'B'], 'cat3' : ['a', 'b', 'b', 'c', 'd'], 'num1' : [1, 2, 3, 4, 5]}) df.set_index(['cat1', 'cat2', 'cat3'], inplace=True) df ``` ### (a) standard selection methods ``` # slicing ''' this returns two first rows, irrepsectively on mutiindex names ''' df.num1[0:2] # use loc: """loc accepts; {row/col names &/or [True/False,...]}; """ df.loc[("one"), :] # use iloc """iloc accepts {row/col nrs &/or [True/False,...]}""" df.iloc[0:3,:] df.iloc[[True, True, True, False, False], :] ``` ### slice with xs() * IMPORTANT * USE PARENTHESIS !!! df.xs(["one"]) * XS WORKS LIKE CD IN BASH ie. YOU CAN ONLY SELECT KEYS WITHIN THE SAME GROUP !!! * __parameters__: * key: label from index/Multiindex, one or many in tuple! * level: def=0, level number of key like cat1, cat 2 in our example * drop_level : def=True, * removes levels used for selection from returned results * If False it returns obj with the same nr of levels in multiIndex ``` # ...... select indexes like with cd in bash # - key can be in list or in tuple df.xs(("one")) df.xs(("one", "B")) df.xs(("one", "B", "b")) # ..... use key from a specified index only # - level key : number or level name (eg, cat1, cat2...) # - IMPORTANT : KEY MUST BE IN TUPLE, not in a list! #. - IMPORTANT : I can only select key, that is not repeated in another group !!!!! df.xs( "b", level="cat3") df.xs(("b"), level="cat3") df.xs(("b"), level=2, drop_level=False) # new df has MultiIndex with all original levels! # ..... jump some of the indexes on the way df.xs( ("one", "b"), level=("cat1", "cat3")) # same number of index key and level keys in each tuple df.xs( ("one", "b"), level=(0, "cat3")) ``` ### BUILD YOUR OWN MULTIINDEX ``` # (METHOD 1) Mutliindex FROM LIST IN LIST, """here we will use the same index for rows and cols""" arr = np.arange(16).reshape(4,4) possible_keys = [['A','B', "C"], ["a", "b", "c", "d", "e"]] # all possibvle, not nesessarly used index_keys_assigment = [[0, 0, 1, 1 ],[0, 0, 1, 1]] # list with code of each corrresponding key df_multiindex = pd.MultiIndex(possible_keys, index_keys_assigment) # ...... OPTION a) it can be arttached to df while building df = pd.DataFrame(arr, index=df_multiindex, columns=df_multiindex) # ...... OPTIION b) or separately df = pd.DataFrame(arr) df.set_index(df_multiindex, inplace=True) df.columns = df_multiindex df # anoterh example ''' Comments: - row/col_levels - these are al possible keys, these are not nesessarly used, eg: abc,c,d,e,f,g,.....¨ - ...used ... - here, you assign row/col_labels to columns ''' # create the example arr = np.arange(9).reshape(3,3) col_levels = [['1', '2', '3', '4'],['one','two','three','four']] col_levels_used = [[0,0,1],[0,1,3]] row_levels = [['A', 'B', 'C', 'D'],['a','b','c','d']] row_levels_used = [[0,0,1],[0,3,0]] first_df = pd.DataFrame(arr, index =pd.MultiIndex(row_levels, row_levels_used), columns=pd.MultiIndex(col_levels, col_levels_used)) first_df # (METHOD 2)Mutliindex FROM ARRAY arr = [['bar', 'bar', 'baz', 'baz'], ['one', 'two', 'one', 'two']] index = pd.MultiIndex.from_arrays( arr, names=['first', 'second'] ) # NAMES ARE USED FOR MULTIINDEX LEVELS! s = pd.Series(np.random.randn(4), index=index) s ```
github_jupyter
``` %tensorflow_version 1.x # Clone git %rm -rf archlectures !git clone https://github.com/armaank/archlectures %cd archlectures/generative/ %%sh chmod 755 get_models.sh ./get_models.sh from IPython.display import Javascript display(Javascript('''google.colab.output.setIframeHeight(0, true, {maxHeight: 200})''')) !pip install torch !pip install fbpca boto3 !git submodule update --init --recursive %cd ./ganspace/ import torch import numpy as np from os import makedirs from types import SimpleNamespace import matplotlib.pyplot as plt from pathlib import Path from PIL import Image import pickle import sys sys.path.insert(0, '..') from models import get_instrumented_model, get_model from notebooks.notebook_utils import create_strip, create_strip_centered, prettify_name, save_frames, pad_frames from config import Config from decomposition import get_or_compute import scipy from torchvision.utils import make_grid outdir = Path('out/figures/random_baseline') makedirs(outdir, exist_ok=True) # Project tensor 'X' onto orthonormal basis 'comp', return coordinates def project_ortho(X, comp): N = comp.shape[0] coords = (comp.reshape(N, -1) * X.reshape(-1)).sum(dim=1) return coords.reshape([N]+[1]*X.ndim) def show_img(img_np, W=6, H=6): #plt.figure(figsize=(W,H)) plt.axis('off') plt.tight_layout() plt.imshow(img_np, interpolation='bilinear') def generate(model_name, class_name, seed=None, trunc=0.6, N=5, use_random_basis=True): global inst config = Config(n=1_000_000, batch_size=500, model=model_name, output_class=class_name, use_w=('StyleGAN' in model_name)) if model_name == 'StyleGAN2': config.layer = 'style' elif model_name == 'StyleGAN': config.layer = 'g_mapping' else: config.layer = 'generator.gen_z' config.n = 1_000_000 config.output_class = 'husky' inst = get_instrumented_model(config, torch.device('cuda'), inst=inst) model = inst.model K = model.get_latent_dims() config.components = K dump_name = get_or_compute(config, inst) with np.load(dump_name) as data: lat_comp = torch.from_numpy(data['lat_comp']).cuda() lat_mean = torch.from_numpy(data['lat_mean']).cuda() lat_std = torch.from_numpy(data['lat_stdev']).cuda() B = 6 if seed is None: seed = np.random.randint(np.iinfo(np.int32).max - B) model.truncation = trunc if 'BigGAN' in model_name: model.set_output_class(class_name) print(f'Seeds: {seed} - {seed+B}') # Resampling test w_base = model.sample_latent(1, seed=seed + B) plt.imshow(model.sample_np(w_base)) plt.axis('off') plt.show() # Resample some components def get_batch(indices, basis): w_batch = torch.zeros(B, K).cuda() coord_base = project_ortho(w_base - lat_mean, basis) for i in range(B): w = model.sample_latent(1, seed=seed + i) coords = coord_base.clone() coords_resampled = project_ortho(w - lat_mean, basis) coords[indices, :, :] = coords_resampled[indices, :, :] w_batch[i, :] = lat_mean + torch.sum(coords * basis, dim=0) return w_batch def show_grid(w, title): out = model.forward(w) if class_name == 'car': out = out[:, :, 64:-64, :] elif class_name == 'cat': out = out[:, :, 18:-8, :] grid = make_grid(out, nrow=3) grid_np = grid.clamp(0, 1).permute(1, 2, 0).cpu().numpy() show_img(grid_np) plt.title(title) def save_imgs(w, prefix): for i, img in enumerate(model.sample_np(w)): if class_name == 'car': img = img[64:-64, :, :] elif class_name == 'cat': img = img[18:-8, :, :] outpath = outdir / f'{model_name}_{class_name}' / f'{prefix}_{i}.png' makedirs(outpath.parent, exist_ok=True) Image.fromarray(np.uint8(img * 255)).save(outpath) #print('Saving', outpath) def orthogonalize_rows(V): Q, R = np.linalg.qr(V.T) return Q.T # V = [n_comp, n_dim] def assert_orthonormal(V): M = np.dot(V, V.T) # [n_comp, n_comp] det = np.linalg.det(M) assert np.allclose(M, np.identity(M.shape[0]), atol=1e-5), f'Basis is not orthonormal (det={det})' plt.figure(figsize=((12,6.5) if class_name in ['car', 'cat'] else (12,8))) # First N fixed ind_rand = np.array(range(N, K)) # N -> K rerandomized b1 = get_batch(ind_rand, lat_comp) plt.subplot(2, 2, 1) show_grid(b1, f'Keep {N} first pca -> Consistent pose') save_imgs(b1, f'keep_{N}_first_{seed}') # First N randomized ind_rand = np.array(range(0, N)) # 0 -> N rerandomized b2 = get_batch(ind_rand, lat_comp) plt.subplot(2, 2, 2) show_grid(b2, f'Randomize {N} first pca -> Consistent style') save_imgs(b2, f'randomize_{N}_first_{seed}') if use_random_basis: # Random orthonormal basis drawn from p(w) # Highly shaped by W, sort of a noisy pseudo-PCA #V = (model.sample_latent(K, seed=seed + B + 1) - lat_mean).cpu().numpy() #V = V / np.sqrt(np.sum(V*V, axis=-1, keepdims=True)) # normalize rows #V = orthogonalize_rows(V) # Isotropic random basis V = scipy.stats.special_ortho_group.rvs(K) assert_orthonormal(V) rand_basis = torch.from_numpy(V).float().view(lat_comp.shape).cuda() assert rand_basis.shape == lat_comp.shape, f'Shape mismatch: {rand_basis.shape} != {lat_comp.shape}' ind_perm = range(K) else: # Just use shuffled PCA basis rng = np.random.RandomState(seed=seed) perm = rng.permutation(range(K)) rand_basis = lat_comp[perm, :] basis_type_str = 'random' if use_random_basis else 'pca_shfl' # First N random fixed ind_rand = np.array(range(N, K)) # N -> K rerandomized b3 = get_batch(ind_rand, rand_basis) plt.subplot(2, 2, 3) show_grid(b3, f'Keep {N} first {basis_type_str} -> Little consistency') save_imgs(b3, f'keep_{N}_first_{basis_type_str}_{seed}') # First N random rerandomized ind_rand = np.array(range(0, N)) # 0 -> N rerandomized b4 = get_batch(ind_rand, rand_basis) plt.subplot(2, 2, 4) show_grid(b4, f'Randomize {N} first {basis_type_str} -> Little variation') save_imgs(b4, f'randomize_{N}_first_{basis_type_str}_{seed}') plt.show() inst = None # reused when possible torch.autograd.set_grad_enabled(False) torch.backends.cudnn.benchmark = True # generate PCA figures generate('StyleGAN2', 'Adaily_A', seed=1866827965, trunc=0.55, N=8) generate('StyleGAN2', 'Adaily_B', seed=1257084100, trunc=0.7, N=5) ```
github_jupyter
``` """ You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab. Instructions for setting up Colab are as follows: 1. Open a new Python 3 notebook. 2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL) 3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 4. Run this cell to set up dependencies. """ ## Install dependencies !pip install wget !pip install faiss-gpu ## Install NeMo BRANCH = 'v1.0.2' !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all] import faiss import torch import wget import os import numpy as np import pandas as pd from omegaconf import OmegaConf from pytorch_lightning import Trainer from IPython.display import display from tqdm import tqdm from nemo.collections import nlp as nemo_nlp from nemo.utils.exp_manager import exp_manager ``` ## Entity Linking #### Task Description [Entity linking](https://en.wikipedia.org/wiki/Entity_linking) is the process of connecting concepts mentioned in natural language to their canonical forms stored in a knowledge base. For example, say a knowledge base contained the entity 'ID3452 influenza' and we wanted to process some natural language containing the sentence "The patient has flu like symptoms". An entity linking model would match the word 'flu' to the knowledge base entity 'ID3452 influenza', allowing for disambiguation and normalization of concepts referenced in text. Entity linking applications range from helping automate data ingestion to assisting in real time dialogue concept normalization. We will be focusing on entity linking in the medical domain for this demo, but the entity linking model, dataset, and training code within NVIDIA NeMo can be applied to other domains like finance and retail. Within NeMo and this tutorial we use the entity linking approach described in Liu et. al's NAACL 2021 "[Self-alignment Pre-training for Biomedical Entity Representations](https://arxiv.org/abs/2010.11784v2)". The main idea behind this approach is to reshape an initial concept embedding space such that synonyms of the same concept are pulled closer together and unrelated concepts are pushed further apart. The concept embeddings from this reshaped space can then be used to build a knowledge base embedding index. This index stores concept IDs mapped to their respective concept embeddings in a format conducive to efficient nearest neighbor search. We can link query concepts to their canonical forms in the knowledge base by performing a nearest neighbor search- matching concept query embeddings to the most similar concepts embeddings in the knowledge base index. In this tutorial we will be using the [faiss](https://github.com/facebookresearch/faiss) library to build our concept index. #### Self Alignment Pretraining Self-Alignment pretraining is a second stage pretraining of an existing encoder (called second stage because the encoder model can be further finetuned after this more general pretraining step). The dataset used during training consists of pairs of concept synonyms that map to the same ID. At each training iteration, we only select *hard* examples present in the mini batch to calculate the loss and update the model weights. In this context, a hard example is an example where a concept is closer to an unrelated concept in the mini batch than it is to the synonym concept it is paired with by some margin. I encourage you to take a look at [section 2 of the paper](https://arxiv.org/pdf/2010.11784.pdf) for a more formal and in depth description of how hard examples are selected. We then use a [metric learning loss](https://openaccess.thecvf.com/content_CVPR_2019/papers/Wang_Multi-Similarity_Loss_With_General_Pair_Weighting_for_Deep_Metric_Learning_CVPR_2019_paper.pdf) calculated from the hard examples selected. This loss helps reshape the embedding space. The concept representation space is rearranged to be more suitable for entity matching via embedding cosine similarity. Now that we have idea of what's going on, let's get started! ## Dataset Preprocessing ``` # Download data into project directory PROJECT_DIR = "." #Change if you don't want the current directory to be the project dir DATA_DIR = os.path.join(PROJECT_DIR, "tiny_example_data") if not os.path.isdir(os.path.join(DATA_DIR)): wget.download('https://dldata-public.s3.us-east-2.amazonaws.com/tiny_example_data.zip', os.path.join(PROJECT_DIR, "tiny_example_data.zip")) !unzip {PROJECT_DIR}/tiny_example_data.zip -d {PROJECT_DIR} ``` In this tutorial we will be using a tiny toy dataset to demonstrate how to use NeMo's entity linking model functionality. The dataset includes synonyms for 12 medical concepts. Entity phrases with the same ID are synonyms for the same concept. For example, "*chronic kidney failure*", "*gradual loss of kidney function*", and "*CKD*" are all synonyms of concept ID 5. Here's the dataset before preprocessing: ``` raw_data = pd.read_csv(os.path.join(DATA_DIR, "tiny_example_dev_data.csv"), names=["ID", "CONCEPT"], index_col=False) print(raw_data) ``` We've already paired off the concepts for this dataset with the format `ID concept_synonym1 concept_synonym2`. Here are the first ten rows: ``` training_data = pd.read_table(os.path.join(DATA_DIR, "tiny_example_train_pairs.tsv"), names=["ID", "CONCEPT_SYN1", "CONCEPT_SYN2"], delimiter='\t') print(training_data.head(10)) ``` Use the [Unified Medical Language System (UMLS)](https://www.nlm.nih.gov/research/umls/index.html) dataset for full medical domain entity linking training. The data contains over 9 million entities and is a table of medical concepts with their corresponding concept IDs (CUI). After [requesting a free license and making a UMLS Terminology Services (UTS) account](https://www.nlm.nih.gov/research/umls/index.html), the [entire UMLS dataset](https://www.nlm.nih.gov/research/umls/licensedcontent/umlsknowledgesources.html) can be downloaded from the NIH's website. If you've cloned the NeMo repo you can run the data processing script located in `examples/nlp/entity_linking/data/umls_dataset_processing.py` on the full dataset. This script will take in the initial table of UMLS concepts and produce a .tsv file with each row formatted as `CUI\tconcept_synonym1\tconcept_synonym2`. Once the UMLS dataset .RRF file is downloaded, the script can be run from the `examples/nlp/entity_linking` directory like so: ``` python data/umls_dataset_processing.py ``` ## Model Training Second stage pretrain a BERT Base encoder on the self-alignment pretraining task (SAP) for improved entity linking. Using a GPU, the model should take 5 minutes or less to train on this example dataset and training progress will be output below the cell. ``` # Download config wget.download("https://raw.githubusercontent.com/vadam5/NeMo/main/examples/nlp/entity_linking/conf/tiny_example_entity_linking_config.yaml", os.path.join(PROJECT_DIR, "tiny_example_entity_linking_config.yaml")) # Load in config file cfg = OmegaConf.load(os.path.join(PROJECT_DIR, "tiny_example_entity_linking_config.yaml")) # Set config file variables cfg.project_dir = PROJECT_DIR cfg.model.nemo_path = os.path.join(PROJECT_DIR, "tiny_example_sap_bert_model.nemo") cfg.model.train_ds.data_file = os.path.join(DATA_DIR, "tiny_example_train_pairs.tsv") cfg.model.validation_ds.data_file = os.path.join(DATA_DIR, "tiny_example_validation_pairs.tsv") # Initialize the trainer and model trainer = Trainer(**cfg.trainer) exp_manager(trainer, cfg.get("exp_manager", None)) model = nemo_nlp.models.EntityLinkingModel(cfg=cfg.model, trainer=trainer) # Train and save the model trainer.fit(model) model.save_to(cfg.model.nemo_path) ``` You can run the script at `examples/nlp/entity_linking/self_alignment_pretraining.py` to train a model on a larger dataset. Run ``` python self_alignment_pretraining.py project_dir=. ``` from the `examples/nlp/entity_linking` directory. ## Model Evaluation Let's evaluate our freshly trained model and compare its performance with a BERT Base encoder that hasn't undergone self-alignment pretraining. We first need to restore our trained model and load our BERT Base Baseline model. ``` device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # Restore second stage pretrained model sap_model_cfg = cfg sap_model_cfg.index.index_save_name = os.path.join(PROJECT_DIR, "tiny_example_entity_linking_index") sap_model_cfg.index.index_ds.data_file = os.path.join(DATA_DIR, "tiny_example_index_data.tsv") sap_model = nemo_nlp.models.EntityLinkingModel.restore_from(sap_model_cfg.model.nemo_path).to(device) # Load original model base_model_cfg = OmegaConf.load(os.path.join(PROJECT_DIR, "tiny_example_entity_linking_config.yaml")) # Set train/val datasets to None to avoid loading datasets associated with training base_model_cfg.model.train_ds = None base_model_cfg.model.validation_ds = None base_model_cfg.index.index_save_name = os.path.join(PROJECT_DIR, "base_model_index") base_model_cfg.index.index_ds.data_file = os.path.join(DATA_DIR, "tiny_example_index_data.tsv") base_model = nemo_nlp.models.EntityLinkingModel(base_model_cfg.model).to(device) ``` We are going evaluate our model on a nearest neighbor task using top 1 and top 5 accuracies as our metric. We will be using a tiny example test knowledge base and test queries. For this evaluation we are going to be comparing every test query with every concept vector in our test set knowledge base. We will rank each item in the knowledge base by its cosine similarity with the test query. We'll then compare the IDs of the predicted most similar test knowledge base concepts with our ground truth query IDs to calculate top 1 and top 5 accuracies. For this metric higher is better. ``` # Helper function to get data embeddings def get_embeddings(model, dataloader): embeddings, cids = [], [] with torch.no_grad(): for batch in tqdm(dataloader): input_ids, token_type_ids, attention_mask, batch_cids = batch batch_embeddings = model.forward(input_ids=input_ids.to(device), token_type_ids=token_type_ids.to(device), attention_mask=attention_mask.to(device)) # Accumulate index embeddings and their corresponding IDs embeddings.extend(batch_embeddings.cpu().detach().numpy()) cids.extend(batch_cids) return embeddings, cids def evaluate(model, test_kb, test_queries, ks): # Initialize knowledge base and query data loaders test_kb_dataloader = model.setup_dataloader(test_kb, is_index_data=True) test_query_dataloader = model.setup_dataloader(test_queries, is_index_data=True) # Get knowledge base and query embeddings test_kb_embs, test_kb_cids = get_embeddings(model, test_kb_dataloader) test_query_embs, test_query_cids = get_embeddings(model, test_query_dataloader) # Calculate the cosine distance between each query and knowledge base concept score_matrix = np.matmul(np.array(test_query_embs), np.array(test_kb_embs).T) accs = {k : 0 for k in ks} # Compare the knowledge base IDs of the knowledge base entities with # the smallest cosine distance from the query for query_idx in tqdm(range(len(test_query_cids))): query_emb = test_query_embs[query_idx] query_cid = test_query_cids[query_idx] query_scores = score_matrix[query_idx] for k in ks: topk_idxs = np.argpartition(query_scores, -k)[-k:] topk_cids = [test_kb_cids[idx] for idx in topk_idxs] # If the correct query ID is amoung the top k closest kb IDs # the model correctly linked the entity match = int(query_cid in topk_cids) accs[k] += match for k in ks: accs[k] /= len(test_query_cids) return accs # Create configs for our test data test_kb = OmegaConf.create({ "data_file": os.path.join(DATA_DIR, "tiny_example_test_kb.tsv"), "max_seq_length": 128, "batch_size": 10, "shuffle": False, }) test_queries = OmegaConf.create({ "data_file": os.path.join(DATA_DIR, "tiny_example_test_queries.tsv"), "max_seq_length": 128, "batch_size": 10, "shuffle": False, }) ks = [1, 5] # Evaluate both models on our test data base_accs = evaluate(base_model, test_kb, test_queries, ks) base_accs["Model"] = "BERT Base Baseline" sap_accs = evaluate(sap_model, test_kb, test_queries, ks) sap_accs["Model"] = "BERT + SAP" print("Top 1 and Top 5 Accuracy Comparison:") results_df = pd.DataFrame([base_accs, sap_accs], columns=["Model", 1, 5]) results_df = results_df.style.set_properties(**{'text-align': 'left', }).set_table_styles([dict(selector='th', props=[('text-align', 'left')])]) display(results_df) ``` The purpose of this section was to show an example of evaluating your entity linking model. This evaluation set contains very little data, and no serious conclusions should be drawn about model performance. Top 1 accuracy should be between 0.7 and 1.0 for both models and top 5 accuracy should be between 0.8 and 1.0. When evaluating a model trained on a larger dataset, you can use a nearest neighbors index to speed up the evaluation time. ## Building an Index To qualitatively observe the improvement we gain from the second stage pretraining, let's build two indices. One will be built with BERT base embeddings before self-alignment pretraining and one will be built with the model we just trained. Our knowledge base in this tutorial will be in the same domain and have some overlapping concepts as the training set. This data file is formatted as `ID\tconcept`. The `EntityLinkingDataset` class can load the data used for training the entity linking encoder as well as for building the index if the `is_index_data` flag is set to true. ``` def build_index(cfg, model): # Setup index dataset loader index_dataloader = model.setup_dataloader(cfg.index.index_ds, is_index_data=True) # Get index dataset embeddings embeddings, _ = get_embeddings(model, index_dataloader) # Train IVFFlat index using faiss embeddings = np.array(embeddings) quantizer = faiss.IndexFlatL2(cfg.index.dims) index = faiss.IndexIVFFlat(quantizer, cfg.index.dims, cfg.index.nlist) index = faiss.index_cpu_to_all_gpus(index) index.train(embeddings) # Add concept embeddings to index for i in tqdm(range(0, embeddings.shape[0], cfg.index.index_batch_size)): index.add(embeddings[i:i+cfg.index.index_batch_size]) # Save index faiss.write_index(faiss.index_gpu_to_cpu(index), cfg.index.index_save_name) build_index(sap_model_cfg, sap_model.to(device)) build_index(base_model_cfg, base_model.to(device)) ``` ## Entity Linking via Nearest Neighbor Search Now it's time to query our indices! We are going to query both our index built with embeddings from BERT Base, and our index with embeddings built from the SAP BERT model we trained. Our sample query phrases will be "*high blood sugar*" and "*head pain*". To query our indices, we first need to get the embedding of each query from the corresponding encoder model. We can then pass these query embeddings into the faiss index which will perform a nearest neighbor search, using cosine distance to compare the query embedding with embeddings present in the index. Once we get a list of knowledge base index concept IDs most closely matching our query, all that is left to do is map the IDs to a representative string describing the concept. ``` def query_index(cfg, model, index, queries, id2string): # Get query embeddings from our entity linking encoder model query_embs = get_query_embedding(queries, model).cpu().detach().numpy() # Use query embedding to find closest concept embedding in knowledge base distances, neighbors = index.search(query_embs, cfg.index.top_n) # Get the canonical strings corresponding to the IDs of the query's nearest neighbors in the kb neighbor_concepts = [[id2string[concept_id] for concept_id in query_neighbor] \ for query_neighbor in neighbors] # Display most similar concepts in the knowledge base. for query_idx in range(len(queries)): print(f"\nThe most similar concepts to {queries[query_idx]} are:") for cid, concept, dist in zip(neighbors[query_idx], neighbor_concepts[query_idx], distances[query_idx]): print(cid, concept, 1 - dist) def get_query_embedding(queries, model): # Tokenize our queries model_input = model.tokenizer(queries, add_special_tokens = True, padding = True, truncation = True, max_length = 512, return_token_type_ids = True, return_attention_mask = True) # Pass tokenized input into model query_emb = model.forward(input_ids=torch.LongTensor(model_input["input_ids"]).to(device), token_type_ids=torch.LongTensor(model_input["token_type_ids"]).to(device), attention_mask=torch.LongTensor(model_input["attention_mask"]).to(device)) return query_emb # Load indices sap_index = faiss.read_index(sap_model_cfg.index.index_save_name) base_index = faiss.read_index(base_model_cfg.index.index_save_name) # Map concept IDs to one canonical string index_data = open(sap_model_cfg.index.index_ds.data_file, "r", encoding='utf-8-sig') id2string = {} for line in index_data: cid, concept = line.split("\t") id2string[int(cid) - 1] = concept.strip() id2string # Some sample queries queries = ["high blood sugar", "head pain"] # Query BERT Base print("BERT Base output before Self Alignment Pretraining:") query_index(base_model_cfg, base_model, base_index, queries, id2string) print("\n" + "-" * 50 + "\n") # Query SAP BERT print("SAP BERT output after Self Alignment Pretraining:") query_index(sap_model_cfg, sap_model, sap_index, queries, id2string) print("\n" + "-" * 50 + "\n") ``` Even after only training on this tiny amount of data, the qualitative performance boost from self-alignment pretraining is visible. The baseline model links "*high blood sugar*" to the entity "*6 diabetes*" while our SAP BERT model accurately links "*high blood sugar*" to "*Hyperinsulinemia*". Similarly, "*head pain*" and "*Myocardial infraction*" are not the same concept, but "*head pain*" and "*Headache*" are. For larger knowledge bases keeping the default embedding size might be too large and cause out of memory issues. You can apply PCA or some other dimensionality reduction method to your data to reduce its memory footprint. Code for creating a text file of all the UMLS entities in the correct format needed to build an index and creating a dictionary mapping concept ids to canonical concept strings can be found here `examples/nlp/entity_linking/data/umls_dataset_processing.py`. The code for extracting knowledge base concept embeddings, training and applying a PCA transformation to the embeddings, building a faiss index and querying the index from the command line is located at `examples/nlp/entity_linking/build_index.py` and `examples/nlp/entity_linking/query_index.py`. If you've cloned the NeMo repo, both of these steps can be run as follows on the command line from the `examples/nlp/entity_linking/` directory. ``` python data/umls_dataset_processing.py --index python build_index.py --restore python query_index.py --restore ``` By default the project directory will be "." but can be changed by adding the flag `--project_dir=<PATH>` after each of the above commands. Intermediate steps of the index building process are saved. In the occurrence of an error, previously completed steps do not need to be rerun. ## Command Recap Here is a recap of the commands and steps to repeat this process on the full UMLS dataset. 1) Download the UMLS dataset file `MRCONSO.RRF` from the NIH website and place it in the `examples/nlp/entity_linking/data` directory. 2) Run the following commands from the `examples/nlp/entity_linking` directory ``` python data/umls_dataset_processing.py python self_alignment_pretraining.py project_dir=. python data/umls_dataset_processing.py --index python build_index.py --restore python query_index.py --restore ``` The model will take ~24hrs to train on two GPUs and ~48hrs to train on one GPU. By default the project directory will be "." but can be changed by adding the flag `--project_dir=<PATH>` after each of the above commands and changing `project_dir=<PATH>` in the `self_alignment_pretraining.py` command. If you change the project directory, you should also move the `MRCONOSO.RRF` file to a `data` sub directory within the one you've specified. As mentioned in the introduction, entity linking within NVIDIA NeMo is not limited to the medical domain. The same data processing and training steps can be applied to a variety of domains and use cases. You can edit the datasets used as well as training and loss function hyperparameters within your config file to better suit your domain.
github_jupyter
``` # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Vertex SDK: Custom training image classification model for batch prediction with explainabilty <table align="left"> <td> <a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_custom_image_classification_batch_explain.ipynb"> <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab </a> </td> <td> <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_custom_image_classification_batch_explain.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> <td> <a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_custom_image_classification_batch_explain.ipynb"> Open in Google Cloud Notebooks </a> </td> </table> <br/><br/><br/> ## Overview This tutorial demonstrates how to use the Vertex SDK to train and deploy a custom image classification model for batch prediction with explanation. ### Dataset The dataset used for this tutorial is the [CIFAR10 dataset](https://www.tensorflow.org/datasets/catalog/cifar10) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). The version of the dataset you will use is built into TensorFlow. The trained model predicts which type of class an image is from ten classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck. ### Objective In this tutorial, you create a custom model, with a training pipeline, from a Python script in a Google prebuilt Docker container using the Vertex SDK, and then do a batch prediction with explanations on the uploaded model. You can alternatively create custom models using `gcloud` command-line tool or online using Cloud Console. The steps performed include: - Create a Vertex custom job for training a model. - Train the TensorFlow model. - Retrieve and load the model artifacts. - View the model evaluation. - Set explanation parameters. - Upload the model as a Vertex `Model` resource. - Make a batch prediction with explanations. ### Costs This tutorial uses billable components of Google Cloud: * Vertex AI * Cloud Storage Learn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage pricing](https://cloud.google.com/storage/pricing), and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage. ### Set up your local development environment If you are using Colab or Google Cloud Notebook, your environment already meets all the requirements to run this notebook. You can skip this step. Otherwise, make sure your environment meets this notebook's requirements. You need the following: - The Cloud Storage SDK - Git - Python 3 - virtualenv - Jupyter notebook running in a virtual environment with Python 3 The Cloud Storage guide to [Setting up a Python development environment](https://cloud.google.com/python/setup) and the [Jupyter installation guide](https://jupyter.org/install) provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions: 1. [Install and initialize the SDK](https://cloud.google.com/sdk/docs/). 2. [Install Python 3](https://cloud.google.com/python/setup#installing_python). 3. [Install virtualenv](https://cloud.google.com/python/setup#installing_and_using_virtualenv) and create a virtual environment that uses Python 3. 4. Activate that environment and run `pip3 install Jupyter` in a terminal shell to install Jupyter. 5. Run `jupyter notebook` on the command line in a terminal shell to launch Jupyter. 6. Open this notebook in the Jupyter Notebook Dashboard. ## Installation Install the latest version of Vertex SDK for Python. ``` import os # Google Cloud Notebook if os.path.exists("/opt/deeplearning/metadata/env_version"): USER_FLAG = "--user" else: USER_FLAG = "" ! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG ``` Install the latest GA version of *google-cloud-storage* library as well. ``` ! pip3 install -U google-cloud-storage $USER_FLAG if os.environ["IS_TESTING"]: ! pip3 install --upgrade tensorflow $USER_FLAG if os.environ["IS_TESTING"]: ! apt-get update && apt-get install -y python3-opencv-headless ! apt-get install -y libgl1-mesa-dev ! pip3 install --upgrade opencv-python-headless $USER_FLAG ``` ### Restart the kernel Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages. ``` import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ``` ## Before you begin ### GPU runtime This tutorial does not require a GPU runtime. ### Set up your Google Cloud project **The following steps are required, regardless of your notebook environment.** 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs. 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project) 3. [Enable the following APIs: Vertex AI APIs, Compute Engine APIs, and Cloud Storage.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component,storage-component.googleapis.com) 4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook. 5. Enter your project ID in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$`. ``` PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID ``` #### Region You can also change the `REGION` variable, which is used for operations throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you. - Americas: `us-central1` - Europe: `europe-west4` - Asia Pacific: `asia-east1` You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services. Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations) ``` REGION = "us-central1" # @param {type: "string"} ``` #### Timestamp If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial. ``` from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") ``` ### Authenticate your Google Cloud account **If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth. **Otherwise**, follow these steps: In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page. **Click Create service account**. In the **Service account name** field, enter a name, and click **Create**. In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**. Click Create. A JSON file that contains your key downloads to your local environment. Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell. ``` # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. import os import sys # If on Google Cloud Notebook, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' ``` ### Create a Cloud Storage bucket **The following steps are required, regardless of your notebook environment.** When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions. Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization. ``` BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP ``` **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket. ``` ! gsutil mb -l $REGION $BUCKET_NAME ``` Finally, validate access to your Cloud Storage bucket by examining its contents: ``` ! gsutil ls -al $BUCKET_NAME ``` ### Set up variables Next, set up some variables used throughout the tutorial. ### Import libraries and define constants ``` import google.cloud.aiplatform as aip ``` ## Initialize Vertex SDK for Python Initialize the Vertex SDK for Python for your project and corresponding bucket. ``` aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME) ``` #### Set hardware accelerators You can set hardware accelerators for training and prediction. Set the variables `TRAIN_GPU/TRAIN_NGPU` and `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4) Otherwise specify `(None, None)` to use a container image to run on a CPU. Learn more [here](https://cloud.google.com/vertex-ai/docs/general/locations#accelerators) hardware accelerator support for your region *Note*: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3 -- which is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support. ``` if os.getenv("IS_TESTING_TRAIN_GPU"): TRAIN_GPU, TRAIN_NGPU = ( aip.gapic.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_TRAIN_GPU")), ) else: TRAIN_GPU, TRAIN_NGPU = (None, None) if os.getenv("IS_TESTING_DEPLOY_GPU"): DEPLOY_GPU, DEPLOY_NGPU = ( aip.gapic.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_DEPLOY_GPU")), ) else: DEPLOY_GPU, DEPLOY_NGPU = (None, None) ``` #### Set pre-built containers Set the pre-built Docker container image for training and prediction. For the latest list, see [Pre-built containers for training](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers). For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/ai-platform-unified/docs/predictions/pre-built-containers). ``` if os.getenv("IS_TESTING_TF"): TF = os.getenv("IS_TESTING_TF") else: TF = "2-1" if TF[0] == "2": if TRAIN_GPU: TRAIN_VERSION = "tf-gpu.{}".format(TF) else: TRAIN_VERSION = "tf-cpu.{}".format(TF) if DEPLOY_GPU: DEPLOY_VERSION = "tf2-gpu.{}".format(TF) else: DEPLOY_VERSION = "tf2-cpu.{}".format(TF) else: if TRAIN_GPU: TRAIN_VERSION = "tf-gpu.{}".format(TF) else: TRAIN_VERSION = "tf-cpu.{}".format(TF) if DEPLOY_GPU: DEPLOY_VERSION = "tf-gpu.{}".format(TF) else: DEPLOY_VERSION = "tf-cpu.{}".format(TF) TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION) DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION) print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU) print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU) ``` #### Set machine type Next, set the machine type to use for training and prediction. - Set the variables `TRAIN_COMPUTE` and `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for for training and prediction. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \] *Note: The following is not supported for training:* - `standard`: 2 vCPUs - `highcpu`: 2, 4 and 8 vCPUs *Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*. ``` if os.getenv("IS_TESTING_TRAIN_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE") else: MACHINE_TYPE = "n1-standard" VCPU = "4" TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Train machine type", TRAIN_COMPUTE) if os.getenv("IS_TESTING_DEPLOY_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE") else: MACHINE_TYPE = "n1-standard" VCPU = "4" DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Deploy machine type", DEPLOY_COMPUTE) ``` # Tutorial Now you are ready to start creating your own custom model and training for CIFAR10. ### Examine the training package #### Package layout Before you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout. - PKG-INFO - README.md - setup.cfg - setup.py - trainer - \_\_init\_\_.py - task.py The files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image. The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`). #### Package Assembly In the following cells, you will assemble the training package. ``` # Make folder for Python training script ! rm -rf custom ! mkdir custom # Add package information ! touch custom/README.md setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0" ! echo "$setup_cfg" > custom/setup.cfg setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())" ! echo "$setup_py" > custom/setup.py pkg_info = "Metadata-Version: 1.0\n\nName: CIFAR10 image classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: aferlitsch@google.com\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex" ! echo "$pkg_info" > custom/PKG-INFO # Make the training subfolder ! mkdir custom/trainer ! touch custom/trainer/__init__.py ``` #### Task.py contents In the next cell, you write the contents of the training script task.py. We won't go into detail, it's just there for you to browse. In summary: - Get the directory where to save the model artifacts from the command line (`--model_dir`), and if not specified, then from the environment variable `AIP_MODEL_DIR`. - Loads CIFAR10 dataset from TF Datasets (tfds). - Builds a model using TF.Keras model API. - Compiles the model (`compile()`). - Sets a training distribution strategy according to the argument `args.distribute`. - Trains the model (`fit()`) with epochs and steps according to the arguments `args.epochs` and `args.steps` - Saves the trained model (`save(args.model_dir)`) to the specified model directory. ``` %%writefile custom/trainer/task.py # Single, Mirror and Multi-Machine Distributed Training for CIFAR-10 import tensorflow_datasets as tfds import tensorflow as tf from tensorflow.python.client import device_lib import argparse import os import sys tfds.disable_progress_bar() parser = argparse.ArgumentParser() parser.add_argument('--model-dir', dest='model_dir', default=os.getenv("AIP_MODEL_DIR"), type=str, help='Model dir.') parser.add_argument('--lr', dest='lr', default=0.01, type=float, help='Learning rate.') parser.add_argument('--epochs', dest='epochs', default=10, type=int, help='Number of epochs.') parser.add_argument('--steps', dest='steps', default=200, type=int, help='Number of steps per epoch.') parser.add_argument('--distribute', dest='distribute', type=str, default='single', help='distributed training strategy') args = parser.parse_args() print('Python Version = {}'.format(sys.version)) print('TensorFlow Version = {}'.format(tf.__version__)) print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found'))) print('DEVICES', device_lib.list_local_devices()) # Single Machine, single compute device if args.distribute == 'single': if tf.test.is_gpu_available(): strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0") else: strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") # Single Machine, multiple compute device elif args.distribute == 'mirror': strategy = tf.distribute.MirroredStrategy() # Multiple Machine, multiple compute device elif args.distribute == 'multi': strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() # Multi-worker configuration print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync)) # Preparing dataset BUFFER_SIZE = 10000 BATCH_SIZE = 64 def make_datasets_unbatched(): # Scaling CIFAR10 data from (0, 255] to (0., 1.] def scale(image, label): image = tf.cast(image, tf.float32) image /= 255.0 return image, label datasets, info = tfds.load(name='cifar10', with_info=True, as_supervised=True) return datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE).repeat() # Build the Keras model def build_and_compile_cnn_model(): model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(32, 32, 3)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(32, 3, activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(10, activation='softmax') ]) model.compile( loss=tf.keras.losses.sparse_categorical_crossentropy, optimizer=tf.keras.optimizers.SGD(learning_rate=args.lr), metrics=['accuracy']) return model # Train the model NUM_WORKERS = strategy.num_replicas_in_sync # Here the batch size scales up by number of workers since # `tf.data.Dataset.batch` expects the global batch size. GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS train_dataset = make_datasets_unbatched().batch(GLOBAL_BATCH_SIZE) with strategy.scope(): # Creation of dataset, and model building/compiling need to be within # `strategy.scope()`. model = build_and_compile_cnn_model() model.fit(x=train_dataset, epochs=args.epochs, steps_per_epoch=args.steps) model.save(args.model_dir) ``` #### Store training script on your Cloud Storage bucket Next, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket. ``` ! rm -f custom.tar custom.tar.gz ! tar cvf custom.tar custom ! gzip custom.tar ! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_cifar10.tar.gz ``` ### Create and run custom training job To train a custom model, you perform two steps: 1) create a custom training job, and 2) run the job. #### Create custom training job A custom training job is created with the `CustomTrainingJob` class, with the following parameters: - `display_name`: The human readable name for the custom training job. - `container_uri`: The training container image. - `requirements`: Package requirements for the training container image (e.g., pandas). - `script_path`: The relative path to the training script. ``` job = aip.CustomTrainingJob( display_name="cifar10_" + TIMESTAMP, script_path="custom/trainer/task.py", container_uri=TRAIN_IMAGE, requirements=["gcsfs==0.7.1", "tensorflow-datasets==4.4"], ) print(job) ``` ### Prepare your command-line arguments Now define the command-line arguments for your custom training container: - `args`: The command-line arguments to pass to the executable that is set as the entry point into the container. - `--model-dir` : For our demonstrations, we use this command-line argument to specify where to store the model artifacts. - direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or - indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification. - `"--epochs=" + EPOCHS`: The number of epochs for training. - `"--steps=" + STEPS`: The number of steps per epoch. ``` MODEL_DIR = "{}/{}".format(BUCKET_NAME, TIMESTAMP) EPOCHS = 20 STEPS = 100 DIRECT = True if DIRECT: CMDARGS = [ "--model-dir=" + MODEL_DIR, "--epochs=" + str(EPOCHS), "--steps=" + str(STEPS), ] else: CMDARGS = [ "--epochs=" + str(EPOCHS), "--steps=" + str(STEPS), ] ``` #### Run the custom training job Next, you run the custom job to start the training job by invoking the method `run`, with the following parameters: - `args`: The command-line arguments to pass to the training script. - `replica_count`: The number of compute instances for training (replica_count = 1 is single node training). - `machine_type`: The machine type for the compute instances. - `accelerator_type`: The hardware accelerator type. - `accelerator_count`: The number of accelerators to attach to a worker replica. - `base_output_dir`: The Cloud Storage location to write the model artifacts to. - `sync`: Whether to block until completion of the job. ``` if TRAIN_GPU: job.run( args=CMDARGS, replica_count=1, machine_type=TRAIN_COMPUTE, accelerator_type=TRAIN_GPU.name, accelerator_count=TRAIN_NGPU, base_output_dir=MODEL_DIR, sync=True, ) else: job.run( args=CMDARGS, replica_count=1, machine_type=TRAIN_COMPUTE, base_output_dir=MODEL_DIR, sync=True, ) model_path_to_deploy = MODEL_DIR ``` ## Load the saved model Your model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction. To load, you use the TF.Keras `model.load_model()` method passing it the Cloud Storage path where the model is saved -- specified by `MODEL_DIR`. ``` import tensorflow as tf local_model = tf.keras.models.load_model(MODEL_DIR) ``` ## Evaluate the model Now find out how good the model is. ### Load evaluation data You will load the CIFAR10 test (holdout) data from `tf.keras.datasets`, using the method `load_data()`. This returns the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements: the image data, and the corresponding labels. You don't need the training data, and hence why we loaded it as `(_, _)`. Before you can run the data through evaluation, you need to preprocess it: `x_test`: 1. Normalize (rescale) the pixel data by dividing each pixel by 255. This replaces each single byte integer pixel with a 32-bit floating point number between 0 and 1. `y_test`:<br/> 2. The labels are currently scalar (sparse). If you look back at the `compile()` step in the `trainer/task.py` script, you will find that it was compiled for sparse labels. So we don't need to do anything more. ``` import numpy as np from tensorflow.keras.datasets import cifar10 (_, _), (x_test, y_test) = cifar10.load_data() x_test = (x_test / 255.0).astype(np.float32) print(x_test.shape, y_test.shape) ``` ### Perform the model evaluation Now evaluate how well the model in the custom job did. ``` local_model.evaluate(x_test, y_test) ``` ### Serving function for image data To pass images to the prediction service, you encode the compressed (e.g., JPEG) image bytes into base 64 -- which makes the content safe from modification while transmitting binary data over the network. Since this deployed model expects input data as raw (uncompressed) bytes, you need to ensure that the base 64 encoded data gets converted back to raw bytes before it is passed as input to the deployed model. To resolve this, define a serving function (`serving_fn`) and attach it to the model as a preprocessing step. Add a `@tf.function` decorator so the serving function is fused to the underlying model (instead of upstream on a CPU). When you send a prediction or explanation request, the content of the request is base 64 decoded into a Tensorflow string (`tf.string`), which is passed to the serving function (`serving_fn`). The serving function preprocesses the `tf.string` into raw (uncompressed) numpy bytes (`preprocess_fn`) to match the input requirements of the model: - `io.decode_jpeg`- Decompresses the JPG image which is returned as a Tensorflow tensor with three channels (RGB). - `image.convert_image_dtype` - Changes integer pixel values to float 32. - `image.resize` - Resizes the image to match the input shape for the model. - `resized / 255.0` - Rescales (normalization) the pixel data between 0 and 1. At this point, the data can be passed to the model (`m_call`). #### XAI Signatures When the serving function is saved back with the underlying model (`tf.saved_model.save`), you specify the input layer of the serving function as the signature `serving_default`. For XAI image models, you need to save two additional signatures from the serving function: - `xai_preprocess`: The preprocessing function in the serving function. - `xai_model`: The concrete function for calling the model. ``` CONCRETE_INPUT = "numpy_inputs" def _preprocess(bytes_input): decoded = tf.io.decode_jpeg(bytes_input, channels=3) decoded = tf.image.convert_image_dtype(decoded, tf.float32) resized = tf.image.resize(decoded, size=(32, 32)) rescale = tf.cast(resized / 255.0, tf.float32) return rescale @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def preprocess_fn(bytes_inputs): decoded_images = tf.map_fn( _preprocess, bytes_inputs, dtype=tf.float32, back_prop=False ) return { CONCRETE_INPUT: decoded_images } # User needs to make sure the key matches model's input @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def serving_fn(bytes_inputs): images = preprocess_fn(bytes_inputs) prob = m_call(**images) return prob m_call = tf.function(local_model.call).get_concrete_function( [tf.TensorSpec(shape=[None, 32, 32, 3], dtype=tf.float32, name=CONCRETE_INPUT)] ) tf.saved_model.save( local_model, model_path_to_deploy, signatures={ "serving_default": serving_fn, # Required for XAI "xai_preprocess": preprocess_fn, "xai_model": m_call, }, ) ``` ## Get the serving function signature You can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer. When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request. You also need to know the name of the serving function's input and output layer for constructing the explanation metadata -- which is discussed subsequently. ``` loaded = tf.saved_model.load(model_path_to_deploy) serving_input = list( loaded.signatures["serving_default"].structured_input_signature[1].keys() )[0] print("Serving function input:", serving_input) serving_output = list(loaded.signatures["serving_default"].structured_outputs.keys())[0] print("Serving function output:", serving_output) input_name = local_model.input.name print("Model input name:", input_name) output_name = local_model.output.name print("Model output name:", output_name) ``` ### Explanation Specification To get explanations when doing a prediction, you must enable the explanation capability and set corresponding settings when you upload your custom model to an Vertex `Model` resource. These settings are referred to as the explanation metadata, which consists of: - `parameters`: This is the specification for the explainability algorithm to use for explanations on your model. You can choose between: - Shapley - *Note*, not recommended for image data -- can be very long running - XRAI - Integrated Gradients - `metadata`: This is the specification for how the algoithm is applied on your custom model. #### Explanation Parameters Let's first dive deeper into the settings for the explainability algorithm. #### Shapley Assigns credit for the outcome to each feature, and considers different permutations of the features. This method provides a sampling approximation of exact Shapley values. Use Cases: - Classification and regression on tabular data. Parameters: - `path_count`: This is the number of paths over the features that will be processed by the algorithm. An exact approximation of the Shapley values requires M! paths, where M is the number of features. For the CIFAR10 dataset, this would be 784 (28*28). For any non-trival number of features, this is too compute expensive. You can reduce the number of paths over the features to M * `path_count`. #### Integrated Gradients A gradients-based method to efficiently compute feature attributions with the same axiomatic properties as the Shapley value. Use Cases: - Classification and regression on tabular data. - Classification on image data. Parameters: - `step_count`: This is the number of steps to approximate the remaining sum. The more steps, the more accurate the integral approximation. The general rule of thumb is 50 steps, but as you increase so does the compute time. #### XRAI Based on the integrated gradients method, XRAI assesses overlapping regions of the image to create a saliency map, which highlights relevant regions of the image rather than pixels. Use Cases: - Classification on image data. Parameters: - `step_count`: This is the number of steps to approximate the remaining sum. The more steps, the more accurate the integral approximation. The general rule of thumb is 50 steps, but as you increase so does the compute time. In the next code cell, set the variable `XAI` to which explainabilty algorithm you will use on your custom model. ``` XAI = "ig" # [ shapley, ig, xrai ] if XAI == "shapley": PARAMETERS = {"sampled_shapley_attribution": {"path_count": 10}} elif XAI == "ig": PARAMETERS = {"integrated_gradients_attribution": {"step_count": 50}} elif XAI == "xrai": PARAMETERS = {"xrai_attribution": {"step_count": 50}} parameters = aip.explain.ExplanationParameters(PARAMETERS) ``` #### Explanation Metadata Let's first dive deeper into the explanation metadata, which consists of: - `outputs`: A scalar value in the output to attribute -- what to explain. For example, in a probability output \[0.1, 0.2, 0.7\] for classification, one wants an explanation for 0.7. Consider the following formulae, where the output is `y` and that is what we want to explain. y = f(x) Consider the following formulae, where the outputs are `y` and `z`. Since we can only do attribution for one scalar value, we have to pick whether we want to explain the output `y` or `z`. Assume in this example the model is object detection and y and z are the bounding box and the object classification. You would want to pick which of the two outputs to explain. y, z = f(x) The dictionary format for `outputs` is: { "outputs": { "[your_display_name]": "output_tensor_name": [layer] } } <blockquote> - [your_display_name]: A human readable name you assign to the output to explain. A common example is "probability".<br/> - "output_tensor_name": The key/value field to identify the output layer to explain. <br/> - [layer]: The output layer to explain. In a single task model, like a tabular regressor, it is the last (topmost) layer in the model. </blockquote> - `inputs`: The features for attribution -- how they contributed to the output. Consider the following formulae, where `a` and `b` are the features. We have to pick which features to explain how the contributed. Assume that this model is deployed for A/B testing, where `a` are the data_items for the prediction and `b` identifies whether the model instance is A or B. You would want to pick `a` (or some subset of) for the features, and not `b` since it does not contribute to the prediction. y = f(a,b) The minimum dictionary format for `inputs` is: { "inputs": { "[your_display_name]": "input_tensor_name": [layer] } } <blockquote> - [your_display_name]: A human readable name you assign to the input to explain. A common example is "features".<br/> - "input_tensor_name": The key/value field to identify the input layer for the feature attribution. <br/> - [layer]: The input layer for feature attribution. In a single input tensor model, it is the first (bottom-most) layer in the model. </blockquote> Since the inputs to the model are tabular, you can specify the following two additional fields as reporting/visualization aids: <blockquote> - "modality": "image": Indicates the field values are image data. </blockquote> Since the inputs to the model are images, you can specify the following additional fields as reporting/visualization aids: <blockquote> - "modality": "image": Indicates the field values are image data. </blockquote> ``` random_baseline = np.random.rand(32, 32, 3) input_baselines = [{"number_vaue": x} for x in random_baseline] INPUT_METADATA = {"input_tensor_name": CONCRETE_INPUT, "modality": "image"} OUTPUT_METADATA = {"output_tensor_name": serving_output} input_metadata = aip.explain.ExplanationMetadata.InputMetadata(INPUT_METADATA) output_metadata = aip.explain.ExplanationMetadata.OutputMetadata(OUTPUT_METADATA) metadata = aip.explain.ExplanationMetadata( inputs={"image": input_metadata}, outputs={"class": output_metadata} ) ``` ## Upload the model Next, upload your model to a `Model` resource using `Model.upload()` method, with the following parameters: - `display_name`: The human readable name for the `Model` resource. - `artifact`: The Cloud Storage location of the trained model artifacts. - `serving_container_image_uri`: The serving container image. - `sync`: Whether to execute the upload asynchronously or synchronously. - `explanation_parameters`: Parameters to configure explaining for `Model`'s predictions. - `explanation_metadata`: Metadata describing the `Model`'s input and output for explanation. If the `upload()` method is run asynchronously, you can subsequently block until completion with the `wait()` method. ``` model = aip.Model.upload( display_name="cifar10_" + TIMESTAMP, artifact_uri=MODEL_DIR, serving_container_image_uri=DEPLOY_IMAGE, explanation_parameters=parameters, explanation_metadata=metadata, sync=False, ) model.wait() ``` ### Get test items You will use examples out of the test (holdout) portion of the dataset as a test items. ``` test_image_1 = x_test[0] test_label_1 = y_test[0] test_image_2 = x_test[1] test_label_2 = y_test[1] print(test_image_1.shape) ``` ### Prepare the request content You are going to send the CIFAR10 images as compressed JPG image, instead of the raw uncompressed bytes: - `cv2.imwrite`: Use openCV to write the uncompressed image to disk as a compressed JPEG image. - Denormalize the image data from \[0,1) range back to [0,255). - Convert the 32-bit floating point values to 8-bit unsigned integers. ``` import cv2 cv2.imwrite("tmp1.jpg", (test_image_1 * 255).astype(np.uint8)) cv2.imwrite("tmp2.jpg", (test_image_2 * 255).astype(np.uint8)) ``` ### Copy test item(s) For the batch prediction, you will copy the test items over to your Cloud Storage bucket. ``` ! gsutil cp tmp1.jpg $BUCKET_NAME/tmp1.jpg ! gsutil cp tmp2.jpg $BUCKET_NAME/tmp2.jpg test_item_1 = BUCKET_NAME + "/tmp1.jpg" test_item_2 = BUCKET_NAME + "/tmp2.jpg" ``` ### Make the batch input file Now make a batch input file, which you will store in your local Cloud Storage bucket. The batch input file can only be in JSONL format. For JSONL file, you make one dictionary entry per line for each data item (instance). The dictionary contains the key/value pairs: - `input_name`: the name of the input layer of the underlying model. - `'b64'`: A key that indicates the content is base64 encoded. - `content`: The compressed JPG image bytes as a base64 encoded string. Each instance in the prediction request is a dictionary entry of the form: {serving_input: {'b64': content}} To pass the image data to the prediction service you encode the bytes into base64 -- which makes the content safe from modification when transmitting binary data over the network. - `tf.io.read_file`: Read the compressed JPG images into memory as raw bytes. - `base64.b64encode`: Encode the raw bytes into a base64 encoded string. ``` import base64 import json gcs_input_uri = BUCKET_NAME + "/" + "test.jsonl" with tf.io.gfile.GFile(gcs_input_uri, "w") as f: bytes = tf.io.read_file(test_item_1) b64str = base64.b64encode(bytes.numpy()).decode("utf-8") data = {serving_input: {"b64": b64str}} f.write(json.dumps(data) + "\n") bytes = tf.io.read_file(test_item_2) b64str = base64.b64encode(bytes.numpy()).decode("utf-8") data = {serving_input: {"b64": b64str}} f.write(json.dumps(data) + "\n") ``` ### Make the batch prediction request Now that your Model resource is trained, you can make a batch prediction by invoking the batch_predict() method, with the following parameters: - `job_display_name`: The human readable name for the batch prediction job. - `gcs_source`: A list of one or more batch request input files. - `gcs_destination_prefix`: The Cloud Storage location for storing the batch prediction resuls. - `instances_format`: The format for the input instances, either 'csv' or 'jsonl'. Defaults to 'jsonl'. - `predictions_format`: The format for the output predictions, either 'csv' or 'jsonl'. Defaults to 'jsonl'. - `machine_type`: The type of machine to use for training. - `sync`: If set to True, the call will block while waiting for the asynchronous batch job to complete. ``` MIN_NODES = 1 MAX_NODES = 1 batch_predict_job = model.batch_predict( job_display_name="cifar10_" + TIMESTAMP, gcs_source=gcs_input_uri, gcs_destination_prefix=BUCKET_NAME, instances_format="jsonl", model_parameters=None, machine_type=DEPLOY_COMPUTE, starting_replica_count=MIN_NODES, max_replica_count=MAX_NODES, generate_explanation=True, sync=False, ) print(batch_predict_job) ``` ### Wait for completion of batch prediction job Next, wait for the batch job to complete. Alternatively, one can set the parameter `sync` to `True` in the `batch_predict()` method to block until the batch prediction job is completed. ``` batch_predict_job.wait() ``` ### Get the explanations Next, get the explanation results from the completed batch prediction job. The results are written to the Cloud Storage output bucket you specified in the batch prediction request. You call the method iter_outputs() to get a list of each Cloud Storage file generated with the results. Each file contains one or more explanation requests in a CSV format: - CSV header + predicted_label - CSV row + explanation, per prediction request ``` import tensorflow as tf bp_iter_outputs = batch_predict_job.iter_outputs() explanation_results = list() for blob in bp_iter_outputs: if blob.name.split("/")[-1].startswith("explanation"): explanation_results.append(blob.name) tags = list() for explanation_result in explanation_results: gfile_name = f"gs://{bp_iter_outputs.bucket.name}/{explanation_result}" with tf.io.gfile.GFile(name=gfile_name, mode="r") as gfile: for line in gfile.readlines(): print(line) ``` # Cleaning up To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial. Otherwise, you can delete the individual resources you created in this tutorial: - Dataset - Pipeline - Model - Endpoint - AutoML Training Job - Batch Job - Custom Job - Hyperparameter Tuning Job - Cloud Storage Bucket ``` delete_all = True if delete_all: # Delete the dataset using the Vertex dataset object try: if "dataset" in globals(): dataset.delete() except Exception as e: print(e) # Delete the model using the Vertex model object try: if "model" in globals(): model.delete() except Exception as e: print(e) # Delete the endpoint using the Vertex endpoint object try: if "endpoint" in globals(): endpoint.delete() except Exception as e: print(e) # Delete the AutoML or Pipeline trainig job try: if "dag" in globals(): dag.delete() except Exception as e: print(e) # Delete the custom trainig job try: if "job" in globals(): job.delete() except Exception as e: print(e) # Delete the batch prediction job using the Vertex batch prediction object try: if "batch_predict_job" in globals(): batch_predict_job.delete() except Exception as e: print(e) # Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object try: if "hpt_job" in globals(): hpt_job.delete() except Exception as e: print(e) if "BUCKET_NAME" in globals(): ! gsutil rm -r $BUCKET_NAME ```
github_jupyter
## Make 3d model sections ``` import telluricpy, numpy as np, gc import scipy import VTKUtil as pvtkUtil %matplotlib qt def simpeg2vtk(mesh,modDict): from vtk import vtkRectilinearGrid as rectGrid, vtkXMLRectilinearGridWriter as rectWriter, VTK_VERSION from vtk.util.numpy_support import numpy_to_vtk # Deal with dimensionalities if mesh.dim >= 1: vX = mesh.vectorNx xD = mesh.nNx yD,zD = 1,1 vY, vZ = np.array([0,0]) if mesh.dim >= 2: vY = mesh.vectorNy yD = mesh.nNy if mesh.dim == 3: vZ = mesh.vectorNz zD = mesh.nNz # Use rectilinear VTK grid. # Assign the spatial information. vtkObj = rectGrid() vtkObj.SetDimensions(xD,yD,zD) vtkObj.SetXCoordinates(numpy_to_vtk(vX,deep=1)) vtkObj.SetYCoordinates(numpy_to_vtk(vY,deep=1)) vtkObj.SetZCoordinates(numpy_to_vtk(vZ,deep=1)) # Assign the model('s) to the object if modDict is not None: for item in modDict.iteritems(): # Convert numpy array vtkDoubleArr = numpy_to_vtk(item[1],deep=1) vtkDoubleArr.SetName(item[0]) vtkObj.GetCellData().AddArray(vtkDoubleArr) # Set the active scalar vtkObj.GetCellData().SetActiveScalars(modDict.keys()[0]) return vtkObj def plot3dSetion(mesh,mod,figName,lutName = 'Con', camera = None): # Convert the mesh and model fullvtkObj = simpeg2vtk(mesh,mod) import VTKUtil as pvtkUtil # Make scalar bar and lookup table lutRes, scalarBarRes, vecNameRes = pvtkUtil.makeLookupTable(lutName) scalarBarRes.SetPosition(0.01,0.15) # Set some sizes renSize = [1280,800] axesBounds = [556800.0, 557800.0, 7133100.0, 7134100.0, -500.0, 500.0] screenSize = 14.0 xRange = [556.8, 557.8] yRange = [7133.1, 7134.1] zRange = [-0.5, 0.5] # Read the model boxImp = vtk.vtkBox() boxImp.SetBounds(556800.0, 557800.0, 7133100.0, 7134100.0, -500.0, 500.0) extractFilt = vtk.vtkExtractGeometry() extractFilt.SetExtractInside(1) extractFilt.SetExtractBoundaryCells(1) extractFilt.SetInputData(fullvtkObj) extractFilt.SetImplicitFunction(boxImp) # extractFilt.Update() # Remove air thresFilt = vtk.vtkThreshold() thresFilt.SetInputConnection(extractFilt.GetOutputPort()) thresFilt.ThresholdByUpper(1e-8) thresFilt.AllScalarsOn() thresFilt.SetInputArrayToProcess(0,0,0,vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS,vecNameRes) # thresFilt.Update() vtkObjIn = thresFilt.GetOutput() # print vtkObjIn.GetNumberOfCells() # Set the camera if camera is None: camera = vtk.vtkCamera() camera.SetClippingRange(85.01306538179968, 85013.06538179968) camera.SetFocalPoint(557170.1657806067, 7133465.5306818895, -8.100855224539828) camera.SetPosition(555046.658444768, 7131550.331097811, 1643.4398007476757) camera.SetViewUp(0.35030191124890564, 0.3579734646304746, 0.8655308022224387) camera.SetParallelScale(1.0) else: camera = camera # Make a renderer ren = vtk.vtkRenderer() ren.SetActiveCamera(camera) # Make renderwindow. renwin = vtk.vtkRenderWindow() # Set to off screen rendering renwin.AddRenderer(ren) renwin.SetSize(renSize) iren = vtk.vtkRenderWindowInteractor() iren.GetInteractorStyle().SetCurrentStyleToTrackballCamera() iren.SetRenderWindow(renwin) iren.Initialize() # Add the axes axes = pvtkUtil.addAxes(screenSize,ren,xRange,yRange,zRange,axesBounds) ## Organize the data # Plane 1 global plane, actor1 plane = vtk.vtkPlane() plane.SetOrigin(557100,7133600,0) plane.SetNormal(0,-1,0) vtkObjClip1 = vtk.vtkClipDataSet() # vtkObjClip1.SetInputData(vtkObjIn) vtkObjClip1.SetInputConnection(thresFilt.GetOutputPort()) vtkObjClip1.SetClipFunction(plane) vtkObjClip1.InsideOutOn() # vtkObjClip1.Update() vtkObj1 = vtkObjClip1.GetOutput() vtkObj1.GetCellData().SetActiveScalars(vecNameRes) # Set the mapper's # Clip 1 mapper1 = vtk.vtkDataSetMapper() mapper1.SetInputData(vtkObj1) mapper1.SetScalarVisibility(1) mapper1.SetLookupTable(lutRes) mapper1.UseLookupTableScalarRangeOn() mapper1.SetInterpolateScalarsBeforeMapping(1) actor1 = vtk.vtkLODActor() actor1.SetMapper(mapper1) actor1.VisibilityOff() # actor1.GetProperty().SetEdgeColor(1,0.5,0) actor1.GetProperty().SetEdgeVisibility(0) # actor1.SetScale(1.01, 1.01, 1.01) # actor1.GetProperty().SetRepresentationToSurface() if False: # Create the widget, its representation, and callback def MovePlane(widget, event_string): rep.GetPlane(plane) rep = vtk.vtkImplicitPlaneRepresentation() rep.SetPlaceFactor(1.0); rep.PlaceWidget(vtkObjClip1.GetOutput().GetBounds()) rep.DrawPlaneOn() rep.SetOrigin(557100,7133600,0) rep.SetNormal(0,-1,0) # rep.SetPlane(plane) planeWidget = vtk.vtkImplicitPlaneWidget2() planeWidget.SetInteractor(iren) planeWidget.SetRepresentation(rep) planeWidget.SetEnabled(1) # planeWidget.PlaceWidget() planeWidget.AddObserver("InteractionEvent",MovePlane) else: # Callback function def movePlane(obj, event): global plane, actor1 obj.GetPlane(plane) actor1.VisibilityOn() # Associate the line widget with the interactor planeWidget = vtk.vtkImplicitPlaneWidget() # planeWidget.SetInputConnection(thresFilt.GetOutputPort()) planeWidget.SetInputConnection(vtkObjClip1.GetOutputPort()) planeWidget.SetInteractor(iren) planeWidget.SetPlaceFactor(1.05) # Increases the size of the widget bounds # b1,b2,b3 = vtkObj1.GetBounds()[::2] # planeWidget.SetOrigin(b1,b2,b3) planeWidget.SetOutsideBounds(0) # Not allow the widget to move outside the input bounds planeWidget.SetScaleEnabled(0) # Ability to scale with the mouse planeWidget.SetEnabled(1) # Starts the widget planeWidget.SetOutlineTranslation(0) # Abiltiy to move the widget with the mouse planeWidget.GetPlaneProperty().SetOpacity(0.1) planeWidget.PlaceWidget() planeWidget.AddObserver("InteractionEvent",movePlane) # Orientation widget oriWid = pvtkUtil.addDirectionWidget(iren,ren,150,35) # Set renderer options ren.SetBackground(1.0,1.0,1.0) ren.AddActor(actor1) ren.AddActor2D(scalarBarRes) ren.AddViewProp(axes) # Fix the colorbar title title = scalarBarRes.GetTitle() scalarBarRes.SetTitle('') # Add the title at the top of the figure manually. titText = vtk.vtkTextActor() titText.GetPositionCoordinate().SetCoordinateSystemToNormalizedViewport() titText.GetTextProperty().SetFontSize(35) titText.GetTextProperty().SetColor(0.0,0.0,0.0) titText.SetPosition(0.02,0.87) titText.SetInput(title) ren.AddActor(titText) # Start the render Window renwin.Render() iren.Start() # Save the fig planeWidget.SetEnabled(0) w2i = vtk.vtkWindowToImageFilter() w2i.SetMagnification(1) w2i.SetInput(renwin) w2i.Update() writer = vtk.vtkTIFFWriter() writer.SetCompressionToNoCompression() writer.SetInputConnection(w2i.GetOutputPort()) writer.SetFileName(figName + '.tif') writer.Write() if True: camera = ren.GetActiveCamera() # For playing around with the locations of the figures # For printing out view information. print('camera.GetClippingRange' + str(camera.GetClippingRange())) print('camera.GetFocalPoint' + str(camera.GetFocalPoint())) print('camera.GetPosition' + str(camera.GetPosition())) print('camera.GetViewUp' + str(camera.GetViewUp())) print('camera.GetParallelScale(' + str(camera.GetParallelScale()) +')') # Close the window when exited iren.TerminateApp() renwin.Finalize() del iren, renwin # Gargage collect gc.collect() # Load the model and mesh mesh, modDict = simpeg.Mesh.TensorMesh.readVTK('MTwork/inv3d_HPK1/run_thibaut4_off/recoveredMod_run_thibaut4_off_it10.vtr') # Plot the section plot3dSetion(mesh,modDict,'GeologicalModel') # Plot the background model mesh, modDict = simpeg.Mesh.TensorMesh.readVTK('MTwork/inv3d_HPK1/run_thibaut4_off/nsmesh_CoarseHKPK1_NoExtension.vtr') plot3dSetion(mesh,modDict,'MTwork/OriginalModel') import simpegViz modView = simpegViz.vtkView(mesh,{'C':modDict}) modView.limits = 1e-5, 0.01 modView.range = 1e-5, 0.01 modView.extent = [8,28,8,33,8,55] modView.Show() ```
github_jupyter
# Inference and Validation Now that you have a trained network, you can use it for making predictions. This is typically called **inference**, a term borrowed from statistics. However, neural networks have a tendency to perform *too well* on the training data and aren't able to generalize to data that hasn't been seen before. This is called **overfitting** and it impairs inference performance. To test for overfitting while training, we measure the performance on data not in the training set called the **validation** set. We avoid overfitting through regularization such as dropout while monitoring the validation performance during training. In this notebook, I'll show you how to do this in PyTorch. As usual, let's start by loading the dataset through torchvision. You'll learn more about torchvision and loading data in a later part. This time we'll be taking advantage of the test set which you can get by setting `train=False` here: ```python testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) ``` The test set contains images just like the training set. Typically you'll see 10-20% of the original dataset held out for testing and validation with the rest being used for training. ``` import torch from torchvision import datasets, transforms # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]) # Download and load the training data trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # Download and load the test data testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) ``` Here I'll create a model like normal, using the same one from my solution for part 4. ``` from torch import nn, optim import torch.nn.functional as F class Classifier(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(784, 256) self.fc2 = nn.Linear(256, 128) self.fc3 = nn.Linear(128, 64) self.fc4 = nn.Linear(64, 10) def forward(self, x): # make sure input tensor is flattened x = x.view(x.shape[0], -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.relu(self.fc3(x)) x = F.log_softmax(self.fc4(x), dim=1) return x ``` The goal of validation is to measure the model's performance on data that isn't part of the training set. Performance here is up to the developer to define though. Typically this is just accuracy, the percentage of classes the network predicted correctly. Other options are [precision and recall](https://en.wikipedia.org/wiki/Precision_and_recall#Definition_(classification_context)) and top-5 error rate. We'll focus on accuracy here. First I'll do a forward pass with one batch from the test set. ``` model = Classifier() images, labels = next(iter(testloader)) # Get the class probabilities ps = torch.exp(model(images)) # Make sure the shape is appropriate, we should get 10 class probabilities for 64 examples print(ps.shape) ``` With the probabilities, we can get the most likely class using the `ps.topk` method. This returns the $k$ highest values. Since we just want the most likely class, we can use `ps.topk(1)`. This returns a tuple of the top-$k$ values and the top-$k$ indices. If the highest value is the fifth element, we'll get back 4 as the index. ``` top_p, top_class = ps.topk(1, dim=1) # Look at the most likely classes for the first 10 examples print(top_class[:10,:]) ``` Now we can check if the predicted classes match the labels. This is simple to do by equating `top_class` and `labels`, but we have to be careful of the shapes. Here `top_class` is a 2D tensor with shape `(64, 1)` while `labels` is 1D with shape `(64)`. To get the equality to work out the way we want, `top_class` and `labels` must have the same shape. If we do ```python equals = top_class == labels ``` `equals` will have shape `(64, 64)`, try it yourself. What it's doing is comparing the one element in each row of `top_class` with each element in `labels` which returns 64 True/False boolean values for each row. ``` equals = top_class == labels.view(*top_class.shape) ``` Now we need to calculate the percentage of correct predictions. `equals` has binary values, either 0 or 1. This means that if we just sum up all the values and divide by the number of values, we get the percentage of correct predictions. This is the same operation as taking the mean, so we can get the accuracy with a call to `torch.mean`. If only it was that simple. If you try `torch.mean(equals)`, you'll get an error ``` RuntimeError: mean is not implemented for type torch.ByteTensor ``` This happens because `equals` has type `torch.ByteTensor` but `torch.mean` isn't implement for tensors with that type. So we'll need to convert `equals` to a float tensor. Note that when we take `torch.mean` it returns a scalar tensor, to get the actual value as a float we'll need to do `accuracy.item()`. ``` accuracy = torch.mean(equals.type(torch.FloatTensor)) print(f'Accuracy: {accuracy.item()*100}%') ``` The network is untrained so it's making random guesses and we should see an accuracy around 10%. Now let's train our network and include our validation pass so we can measure how well the network is performing on the test set. Since we're not updating our parameters in the validation pass, we can speed up the by turning off gradients using `torch.no_grad()`: ```python # turn off gradients with torch.no_grad(): # validation pass here for images, labels in testloader: ... ``` >**Exercise:** Implement the validation loop below. You can largely copy and paste the code from above, but I suggest typing it in because writing it out yourself is essential for building the skill. In general you'll always learn more by typing it rather than copy-pasting. ``` model = Classifier() criterion = nn.NLLLoss() optimizer = optim.Adam(model.parameters(), lr=0.003) epochs = 30 steps = 0 train_losses, test_losses = [], [] for e in range(epochs): running_loss = 0 for images, labels in trainloader: optimizer.zero_grad() log_ps = model(images) loss = criterion(log_ps, labels) loss.backward() optimizer.step() running_loss += loss.item() else: test_loss = 0 accuracy = 0 # Turn off gradients for validation, saves memory and computations with torch.no_grad(): for images, labels in testloader: log_ps = model(images) test_loss += criterion(log_ps, labels) ps = torch.exp(log_ps) top_p, top_class = ps.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) accuracy += torch.mean(equals.type(torch.FloatTensor)) train_losses.append(running_loss/len(trainloader)) test_losses.append(test_loss/len(testloader)) print("Epoch: {}/{}.. ".format(e+1, epochs), "Training Loss: {:.3f}.. ".format(running_loss/len(trainloader)), "Test Loss: {:.3f}.. ".format(test_loss/len(testloader)), "Test Accuracy: {:.3f}".format(accuracy/len(testloader))) %matplotlib inline %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt plt.plot(train_losses, label='Training loss') plt.plot(test_losses, label='Validation loss') plt.legend(frameon=False) ``` ## Overfitting If we look at the training and validation losses as we train the network, we can see a phenomenon known as overfitting. <img src='assets/overfitting.png' width=450px> The network learns the training set better and better, resulting in lower training losses. However, it starts having problems generalizing to data outside the training set leading to the validation loss increasing. The ultimate goal of any deep learning model is to make predictions on new data, so we should strive to get the lowest validation loss possible. One option is to use the version of the model with the lowest validation loss, here the one around 8-10 training epochs. This strategy is called *early-stopping*. In practice, you'd save the model frequently as you're training then later choose the model with the lowest validation loss. The most common method to reduce overfitting (outside of early-stopping) is *dropout*, where we randomly drop input units. This forces the network to share information between weights, increasing it's ability to generalize to new data. Adding dropout in PyTorch is straightforward using the [`nn.Dropout`](https://pytorch.org/docs/stable/nn.html#torch.nn.Dropout) module. ```python class Classifier(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(784, 256) self.fc2 = nn.Linear(256, 128) self.fc3 = nn.Linear(128, 64) self.fc4 = nn.Linear(64, 10) # Dropout module with 0.2 drop probability self.dropout = nn.Dropout(p=0.2) def forward(self, x): # make sure input tensor is flattened x = x.view(x.shape[0], -1) # Now with dropout x = self.dropout(F.relu(self.fc1(x))) x = self.dropout(F.relu(self.fc2(x))) x = self.dropout(F.relu(self.fc3(x))) # output so no dropout here x = F.log_softmax(self.fc4(x), dim=1) return x ``` During training we want to use dropout to prevent overfitting, but during inference we want to use the entire network. So, we need to turn off dropout during validation, testing, and whenever we're using the network to make predictions. To do this, you use `model.eval()`. This sets the model to evaluation mode where the dropout probability is 0. You can turn dropout back on by setting the model to train mode with `model.train()`. In general, the pattern for the validation loop will look like this, where you turn off gradients, set the model to evaluation mode, calculate the validation loss and metric, then set the model back to train mode. ```python # turn off gradients with torch.no_grad(): # set model to evaluation mode model.eval() # validation pass here for images, labels in testloader: ... # set model back to train mode model.train() ``` > **Exercise:** Add dropout to your model and train it on Fashion-MNIST again. See if you can get a lower validation loss. ``` class Classifier(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(784, 256) self.fc2 = nn.Linear(256, 128) self.fc3 = nn.Linear(128, 64) self.fc4 = nn.Linear(64, 10) # Dropout module with 0.2 drop probability self.dropout = nn.Dropout(p=0.2) def forward(self, x): # make sure input tensor is flattened x = x.view(x.shape[0], -1) # Now with dropout x = self.dropout(F.relu(self.fc1(x))) x = self.dropout(F.relu(self.fc2(x))) x = self.dropout(F.relu(self.fc3(x))) # output so no dropout here x = F.log_softmax(self.fc4(x), dim=1) return x model = Classifier() criterion = nn.NLLLoss() optimizer = optim.Adam(model.parameters(), lr=0.003) epochs = 30 steps = 0 train_losses, test_losses = [], [] for e in range(epochs): running_loss = 0 for images, labels in trainloader: optimizer.zero_grad() log_ps = model(images) loss = criterion(log_ps, labels) loss.backward() optimizer.step() running_loss += loss.item() else: test_loss = 0 accuracy = 0 # Turn off gradients for validation, saves memory and computations with torch.no_grad(): model.eval() for images, labels in testloader: log_ps = model(images) test_loss += criterion(log_ps, labels) ps = torch.exp(log_ps) top_p, top_class = ps.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) accuracy += torch.mean(equals.type(torch.FloatTensor)) model.train() train_losses.append(running_loss/len(trainloader)) test_losses.append(test_loss/len(testloader)) print("Epoch: {}/{}.. ".format(e+1, epochs), "Training Loss: {:.3f}.. ".format(train_losses[-1]), "Test Loss: {:.3f}.. ".format(test_losses[-1]), "Test Accuracy: {:.3f}".format(accuracy/len(testloader))) %matplotlib inline %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt plt.plot(train_losses, label='Training loss') plt.plot(test_losses, label='Validation loss') plt.legend(frameon=False) ``` ## Inference Now that the model is trained, we can use it for inference. We've done this before, but now we need to remember to set the model in inference mode with `model.eval()`. You'll also want to turn off autograd with the `torch.no_grad()` context. ``` # Import helper module (should be in the repo) import helper # Test out your network! model.eval() dataiter = iter(testloader) images, labels = dataiter.next() img = images[0] # Convert 2D image to 1D vector img = img.view(1, 784) # Calculate the class probabilities (softmax) for img with torch.no_grad(): output = model.forward(img) ps = torch.exp(output) # Plot the image and probabilities helper.view_classify(img.view(1, 28, 28), ps, version='Fashion') ``` ## Next Up! In the next part, I'll show you how to save your trained models. In general, you won't want to train a model everytime you need it. Instead, you'll train once, save it, then load the model when you want to train more or use if for inference.
github_jupyter
<a href="https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/2_transfer_learning_roadmap/5_exploring_model_families/4_resnet/8)%20Comparing%20resnet%20v1%20and%20v2%20variants%20-%20mxnet%20backend.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Goals ### Train a blood cell type classifier using resnet v1 and v2 variants ### Understand what all differences happen when switching between resnets variants ### Understand bigger and deeper network not always means better results #### For this experiment you will be using mxnet backend # What is resnet ## Readings on resnet 1) Points from https://towardsdatascience.com/an-overview-of-resnet-and-its-variants-5281e2f56035 - The core idea of ResNet is introducing a so-called “identity shortcut connection” that skips one or more layers - The deeper model should not produce a training error higher than its shallower counterparts. - solves the problem of vanishing gradiens as network depth increased - https://medium.com/@anishsingh20/the-vanishing-gradient-problem-48ae7f501257 2) Points from https://medium.com/@14prakash/understanding-and-implementing-architectures-of-resnet-and-resnext-for-state-of-the-art-image-cf51669e1624 - Won 1st place in the ILSVRC 2015 classification competition with top-5 error rate of 3.57% (An ensemble model) - Efficiently trained networks with 100 layers and 1000 layers also. - Replacing VGG-16 layers in Faster R-CNN with ResNet-101. They observed a relative improvements of 28% 3) Read more here - https://arxiv.org/abs/1512.03385 - https://d2l.ai/chapter_convolutional-modern/resnet.html - https://cv-tricks.com/keras/understand-implement-resnets/ - https://mc.ai/resnet-architecture-explained/ # Table of Contents ## [Install](#0) ## [Train experiment with resnet-50-v1 architecture and validate](#1) ## [Train experiment with resnet-50-v2 architecture and validate](#2) ## [Compare all the experiments](#3) <a id='0'></a> # Install Monk ## Using pip (Recommended) - colab (gpu) - All bakcends: `pip install -U monk-colab` - kaggle (gpu) - All backends: `pip install -U monk-kaggle` - cuda 10.2 - All backends: `pip install -U monk-cuda102` - Gluon bakcned: `pip install -U monk-gluon-cuda102` - Pytorch backend: `pip install -U monk-pytorch-cuda102` - Keras backend: `pip install -U monk-keras-cuda102` - cuda 10.1 - All backend: `pip install -U monk-cuda101` - Gluon bakcned: `pip install -U monk-gluon-cuda101` - Pytorch backend: `pip install -U monk-pytorch-cuda101` - Keras backend: `pip install -U monk-keras-cuda101` - cuda 10.0 - All backend: `pip install -U monk-cuda100` - Gluon bakcned: `pip install -U monk-gluon-cuda100` - Pytorch backend: `pip install -U monk-pytorch-cuda100` - Keras backend: `pip install -U monk-keras-cuda100` - cuda 9.2 - All backend: `pip install -U monk-cuda92` - Gluon bakcned: `pip install -U monk-gluon-cuda92` - Pytorch backend: `pip install -U monk-pytorch-cuda92` - Keras backend: `pip install -U monk-keras-cuda92` - cuda 9.0 - All backend: `pip install -U monk-cuda90` - Gluon bakcned: `pip install -U monk-gluon-cuda90` - Pytorch backend: `pip install -U monk-pytorch-cuda90` - Keras backend: `pip install -U monk-keras-cuda90` - cpu - All backend: `pip install -U monk-cpu` - Gluon bakcned: `pip install -U monk-gluon-cpu` - Pytorch backend: `pip install -U monk-pytorch-cpu` - Keras backend: `pip install -U monk-keras-cpu` ## Install Monk Manually (Not recommended) ### Step 1: Clone the library - git clone https://github.com/Tessellate-Imaging/monk_v1.git ### Step 2: Install requirements - Linux - Cuda 9.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu90.txt` - Cuda 9.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu92.txt` - Cuda 10.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu100.txt` - Cuda 10.1 - `cd monk_v1/installation/Linux && pip install -r requirements_cu101.txt` - Cuda 10.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Linux && pip install -r requirements_cpu.txt` - Windows - Cuda 9.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu90.txt` - Cuda 9.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu92.txt` - Cuda 10.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu100.txt` - Cuda 10.1 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu101.txt` - Cuda 10.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Windows && pip install -r requirements_cpu.txt` - Mac - CPU (Non gpu system) - `cd monk_v1/installation/Mac && pip install -r requirements_cpu.txt` - Misc - Colab (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_colab.txt` - Kaggle (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt` ### Step 3: Add to system path (Required for every terminal or kernel run) - `import sys` - `sys.path.append("monk_v1/");` ## Dataset - Chest X-ray Pneumonia Dataset - https://www.kaggle.com/paultimothymooney/chest-xray-pneumonia ``` ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1myx6OZ3l4spNvGrYFgwgeYXX6ebyjLBY' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1myx6OZ3l4spNvGrYFgwgeYXX6ebyjLBY" -O chest-xray-pneumonia.zip && rm -rf /tmp/cookies.txt ! unzip -qq chest-xray-pneumonia.zip ``` # Imports ``` #Using mxnet-gluon backend # When installed using pip from monk.gluon_prototype import prototype # When installed manually (Uncomment the following) #import os #import sys #sys.path.append("monk_v1/"); #sys.path.append("monk_v1/monk/"); #from monk.gluon_prototype import prototype ``` <a id='1'></a> # Train experiment with resnet-50-v1 architecture and validate ``` # Load experiment gtf = prototype(verbose=1); gtf.Prototype("Compare-resnet-v1-v2", "resnet50-v1"); # Insert data and set params in default mode gtf.Default(dataset_path="chest-xray-pneumonia/train", model_name="resnet50_v1", freeze_base_network=False, num_epochs=5); #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed # Load for validation gtf = prototype(verbose=1); gtf.Prototype("Compare-resnet-v1-v2", "resnet50-v1", eval_infer=True); # Set dataset gtf.Dataset_Params(dataset_path="chest-xray-pneumonia/test"); gtf.Dataset(); # Validate accuracy, class_based_accuracy = gtf.Evaluate(); ``` <a id='2'></a> # Train experiment with resnet-50-v2 architecture and validate ``` # Load experiment gtf = prototype(verbose=1); gtf.Prototype("Compare-resnet-v1-v2", "resnet50-v2"); # Insert data and set params in default mode gtf.Default(dataset_path="chest-xray-pneumonia/train", model_name="resnet50_v2", freeze_base_network=False, num_epochs=5); #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed # Load for validation gtf = prototype(verbose=1); gtf.Prototype("Compare-resnet-v1-v2", "resnet50-v2", eval_infer=True); # Set dataset gtf.Dataset_Params(dataset_path="chest-xray-pneumonia/test"); gtf.Dataset(); # Validate accuracy, class_based_accuracy = gtf.Evaluate(); ``` <a id='11'></a> # Comparing all the experiments ``` # Invoke the comparison class from monk.compare_prototype import compare ``` ### Creating and managing comparison experiments - Provide project name ``` # Create a project gtf = compare(verbose=1); gtf.Comparison("Compare-effect-of-network-versions"); ``` ### This creates files and directories as per the following structure workspace | |--------comparison | | |-----Compare-effect-of-network-versions | |------stats_best_val_acc.png |------stats_max_gpu_usage.png |------stats_training_time.png |------train_accuracy.png |------train_loss.png |------val_accuracy.png |------val_loss.png | |-----comparison.csv (Contains necessary details of all experiments) ### Add the experiments - First argument - Project name - Second argument - Experiment name ``` gtf.Add_Experiment("Compare-resnet-v1-v2", "resnet50-v1"); gtf.Add_Experiment("Compare-resnet-v1-v2", "resnet50-v2"); ``` ## Run Analysis ``` gtf.Generate_Statistics(); ``` ## Visualize and study comparison metrics ### Training Accuracy Curves ``` from IPython.display import Image Image(filename="workspace/comparison/Compare-effect-of-network-versions/train_accuracy.png") ``` ### Training Loss Curves ``` from IPython.display import Image Image(filename="workspace/comparison/Compare-effect-of-network-versions/train_loss.png") ``` ### Validation Accuracy Curves ``` from IPython.display import Image Image(filename="workspace/comparison/Compare-effect-of-network-versions/stats_best_val_acc.png") from IPython.display import Image Image(filename="workspace/comparison/Compare-effect-of-network-versions/val_accuracy.png") ``` ### Validation loss curves ``` from IPython.display import Image Image(filename="workspace/comparison/Compare-effect-of-network-versions/val_loss.png") ``` ### Training Times and max gpu usages ``` from IPython.display import Image Image(filename="workspace/comparison/Compare-effect-of-network-versions/stats_training_time.png") from IPython.display import Image Image(filename="workspace/comparison/Compare-effect-of-network-versions/stats_max_gpu_usage.png") ``` # Goals ### Train a blood cell type classifier using resnet v1 and v2 variants ### Understand what all differences happen when switching between resnets variants ### Understand bigger and deeper network not always means better results
github_jupyter
``` from PIL import Image import torchvision.transforms as transforms import matplotlib.pyplot as plt import torch import numpy as np import cv2 from samples.CLS2IDX import CLS2IDX ``` # Auxiliary Functions ``` from baselines.ViT.LVViT_LRP import lvvit_small_patch16_224 as vit_LRP from baselines.ViT.ViT_explanation_generator import LRP img_size = 224 output_size = 14 normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose([ transforms.Resize((img_size, img_size)), transforms.ToTensor(), normalize, ]) # create heatmap from mask on image def show_cam_on_image(img, mask): heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET) heatmap = np.float32(heatmap) / 255 cam = heatmap + np.float32(img) cam = cam / np.max(cam) return cam # initialize ViT pretrained with DeiT model = vit_LRP(pretrained=True).cuda() model.eval() attribution_generator = LRP(model) def generate_visualization(original_image, class_index=None): transformer_attribution = attribution_generator.generate_LRP(original_image.unsqueeze(0).cuda(), method="transformer_attribution", index=class_index).detach() transformer_attribution = transformer_attribution.reshape(1, 1, output_size, output_size) transformer_attribution = torch.nn.functional.interpolate(transformer_attribution, scale_factor=16, mode='bilinear') transformer_attribution = transformer_attribution.reshape(img_size, img_size).cuda().data.cpu().numpy() transformer_attribution = (transformer_attribution - transformer_attribution.min()) / (transformer_attribution.max() - transformer_attribution.min()) image_transformer_attribution = original_image.permute(1, 2, 0).data.cpu().numpy() image_transformer_attribution = (image_transformer_attribution - image_transformer_attribution.min()) / (image_transformer_attribution.max() - image_transformer_attribution.min()) vis = show_cam_on_image(image_transformer_attribution, transformer_attribution) vis = np.uint8(255 * vis) vis = cv2.cvtColor(np.array(vis), cv2.COLOR_RGB2BGR) return vis def print_top_classes(predictions, **kwargs): # Print Top-5 predictions prob = torch.softmax(predictions, dim=1) class_indices = predictions.data.topk(5, dim=1)[1][0].tolist() max_str_len = 0 class_names = [] for cls_idx in class_indices: class_names.append(CLS2IDX[cls_idx]) if len(CLS2IDX[cls_idx]) > max_str_len: max_str_len = len(CLS2IDX[cls_idx]) print('Top 5 classes:') for cls_idx in class_indices: output_string = '\t{} : {}'.format(cls_idx, CLS2IDX[cls_idx]) output_string += ' ' * (max_str_len - len(CLS2IDX[cls_idx])) + '\t\t' output_string += 'value = {:.3f}\t prob = {:.1f}%'.format(predictions[0, cls_idx], 100 * prob[0, cls_idx]) print(output_string) return prob image = Image.open('samples/catdog.png') dog_cat_image = transform(image) fig, axs = plt.subplots(1, 3) axs[0].imshow(image); axs[0].axis('off'); output = model(dog_cat_image.unsqueeze(0).cuda()) print_top_classes(output) # dog # generate visualization for class 243: 'bull mastiff' - the predicted class dog = generate_visualization(dog_cat_image) # cat - generate visualization for class 282 : 'tiger cat' cat = generate_visualization(dog_cat_image, class_index=282) axs[1].imshow(dog); axs[1].axis('off'); axs[2].imshow(cat); axs[2].axis('off'); image = Image.open('samples/dogbird.png') dog_bird_image = transform(image) fig, axs = plt.subplots(1, 3) axs[0].imshow(image); axs[0].axis('off'); output = model(dog_bird_image.unsqueeze(0).cuda()) print_top_classes(output) # basset - the predicted class basset = generate_visualization(dog_bird_image, class_index=161) # generate visualization for class 90: 'lorikeet' parrot = generate_visualization(dog_bird_image, class_index=90) axs[1].imshow(basset); axs[1].axis('off'); axs[2].imshow(parrot); axs[2].axis('off'); image = Image.open('samples/el2.png') tusker_zebra_image = transform(image) fig, axs = plt.subplots(1, 3) axs[0].imshow(image); axs[0].axis('off'); output = model(tusker_zebra_image.unsqueeze(0).cuda()) print_top_classes(output) # zebra # zebra- the predicted class zebra = generate_visualization(tusker_zebra_image, class_index=340) # generate visualization for class 101: 'tusker' tusker = generate_visualization(tusker_zebra_image, class_index=101) axs[1].imshow(zebra); axs[1].axis('off'); axs[2].imshow(tusker); axs[2].axis('off'); ```
github_jupyter
# Demo for 2d DOT ``` import chainer from chainer import Variable, optimizers, serializers, utils from chainer import Link, Chain, ChainList import chainer.functions as F import chainer.links as L from chainer import cuda #import numpy as xp gpu_device = 0 cuda.get_device(gpu_device).use() import numpy as np import cupy as xp from model import * import DOT import sklearn.datasets from matplotlib import pyplot as plt %matplotlib inline def show_three_figures(y, ty1, ty2, X_train, xmin, xmax, ymin, ymax): plt.style.use('seaborn-darkgrid') plt.figure(figsize=(20,5)) plt.subplot(1, 4, 1) plt.xlim(xmin, xmax) plt.ylim(ymin, ymax) plt.title("Training samples", fontsize=20) plt.scatter(X_train[:,:1], X_train[:,1:], alpha=0.5, color='gray', marker='o') plt.subplot(1, 4, 2) plt.xlim(xmin, xmax) plt.ylim(ymin, ymax) plt.title("Samples by G", fontsize=20) y_d = y#.data plt.scatter(y_d[:,:1], y_d[:,1:], alpha=0.5, color='blue', marker='o', label='y') plt.subplot(1, 4, 3) plt.xlim(xmin, xmax) plt.ylim(ymin, ymax) plt.title("DOT", fontsize=20) y_d = ty1#.data plt.scatter(y_d[:,:1], y_d[:,1:], alpha=0.5, color='red', marker='o', label='ty') plt.subplot(1, 4, 4) plt.xlim(xmin, xmax) plt.ylim(ymin, ymax) plt.title("Naive", fontsize=20) y_d = ty2#.data plt.scatter(y_d[:,:1], y_d[:,1:], alpha=0.5, color='red', marker='o', label='ty') plt.show() #### data generators are derived from https://github.com/lukovnikov/improved_wgan_training/blob/master/gan_toy.py # Copyright (c) 2017 Ishaan Gulrajani # Released under the MIT license # https://github.com/lukovnikov/improved_wgan_training/blob/master/LICENSE def prepare_swissroll_data(BATCH_SIZE=1000): data = sklearn.datasets.make_swiss_roll( n_samples=BATCH_SIZE, noise=0.25 )[0] data = data.astype('float32')[:, [0, 2]] data /= 7.5 # stdev plus a little return data def prepare_25gaussian_data(BATCH_SIZE=1000): dataset = [] for i in range(BATCH_SIZE//25): for x in range(-2, 3): for y in range(-2, 3): point = np.random.randn(2)*0.05 point[0] += 2*x point[1] += 2*y dataset.append(point) dataset = np.array(dataset, dtype=np.float32) np.random.shuffle(dataset) dataset /= 2.828 # stdev return dataset ``` # 25 Gaussians ``` G = Generator(n_hidden=2, noize='uni', non_linear=F.leaky_relu, final=F.identity) serializers.load_npz("trained_models/G_25gaussians_WGAN-GP.npz", G) D = Discriminator(non_linear=F.leaky_relu, final=F.identity) serializers.load_npz("trained_models/D_25gaussians_WGAN-GP.npz", D) if gpu_device==0: G.to_gpu() D.to_gpu() X_train = prepare_25gaussian_data(BATCH_SIZE=1000) lcs = [] for i in range(10): lcs.append(DOT.eff_K(G, D, 100).tolist()) K = xp.mean(xp.array(lcs)) K Zy = G.make_hidden(1000) y_xp = G(Zy).data Opt = chainer.optimizers.Adam(0.01, beta1=0., beta2=0.9) T = DOT.Transporter_in_target(G, D, K, Opt, y_xp, mode='dot') DOT.discriminator_optimal_transport_from(y_xp, T, 100) x_va = T.get_x_va().data Opt = chainer.optimizers.Adam(0.01, beta1=0., beta2=0.9) T = DOT.Transporter_in_target(G, D, K, Opt, y_xp, mode='naive') DOT.discriminator_optimal_transport_from(y_xp, T, 100) x_va2 = T.get_x_va().data if gpu_device==0: y_xp = cuda.to_cpu(y_xp) x_va = cuda.to_cpu(x_va) x_va2 = cuda.to_cpu(x_va2) show_three_figures(y_xp, x_va, x_va2, X_train, -2,2,-2,2) ``` # Swissroll ``` G = Generator(n_hidden=2, noize='uni', non_linear=F.leaky_relu, final=F.identity) serializers.load_npz("trained_models/G_swissroll_WGAN-GP.npz", G) D = Discriminator(non_linear=F.leaky_relu, final=F.identity) serializers.load_npz("trained_models/D_swissroll_WGAN-GP.npz", D) if gpu_device==0: G.to_gpu() D.to_gpu() X_train = prepare_swissroll_data(BATCH_SIZE=1000) lcs = [] for i in range(10): lcs.append(DOT.eff_K(G, D, 100).tolist()) K = xp.mean(xp.array(lcs)) K Zy = G.make_hidden(1000) y_xp = G(Zy).data Opt = chainer.optimizers.Adam(0.01, beta1=0., beta2=0.9) T = DOT.Transporter_in_target(G, D, K, Opt, y_xp, mode='dot') DOT.discriminator_optimal_transport_from(y_xp, T, 100) x_va = T.get_x_va().data Opt = chainer.optimizers.Adam(0.01, beta1=0., beta2=0.9) T = DOT.Transporter_in_target(G, D, K, Opt, y_xp, mode='naive') DOT.discriminator_optimal_transport_from(y_xp, T, 100) x_va2 = T.get_x_va().data if gpu_device==0: y_xp = cuda.to_cpu(y_xp) x_va = cuda.to_cpu(x_va) x_va2 = cuda.to_cpu(x_va2) show_three_figures(y_xp, x_va, x_va2, X_train, -2,2.5,-2,2.5) ```
github_jupyter
``` import sys sys.path.append('../../pyutils') import numpy as np import matplotlib.pyplot as plt import pandas as pd import metrics import utils ``` # Bernoulli Distribution $$X \sim B(p)$$ $X$ is a single binary random variable. Parameters: - $p \in [0, 1]$: probability that X takes the value $1$ $$P(X=0) = 1-p$$ $$P(X=1) = p$$ $$P(X=x) = p^x(1-p)^{1-x}$$ $$\mathbb{E}[X] = p$$ $$\text{Var}(x) = p(1 - p)$$ ``` N = 1000000 p = 0.4 x = (np.random.rand(N) < p).astype(np.int) print('E[X]:', np.mean(x)) print('Var[X]:', np.var(x), p * (1-p)) ``` # Binomial distribution $$X \sim B(n, p)$$ $X$ is a single discrete value, corresponding to the number of successes when repeating $n$ independant Binomial experiments. Parameters: - $n$: number of trials - $p \in [0, 1]$: success probability for each trial. $p(X = k)$: $k$: number of successes. $$\text{PMF: } f(k) = \binom{n}{k} p^k(1-p)^{n-k}$$ $$\mathbb{E}[X] = np$$ $$\text{Var}(X) = np(1 - p)$$ $$\binom{n}{k} = \frac{n!}{k!(n-k)!}$$ ``` def rand_binomial(n, p): data = (np.random.rand(n) < p).astype(np.int) return np.sum(data) N = 1000000 n = 7 p = 0.4 x = np.array([rand_binomial(n,p) for _ in range(N)]) print('E[X]:', np.mean(x), n*p) print('Var[X]:', np.var(x), n*p * (1-p)) ``` # Multinoulli (Categorical) Distribution $X$ is a single discrete random variable with $k$ different states. Parameters: - $p_i$: probability that $x$ takes the value $i$: $\sum p_i = 1$, $p_i >= 0$ $$p(X=i) = p_i$$ # Multinomial distribution $X$ is a discrete vector of size $k$, corresponding to the number of times each states it obtained when repeating $n$ independant Multinoulli experiments. Parameters: - $n$: number of trials - $p_i$: probability of event $i$: $\sum p_i = 1$, $p_i >= 0$ $X$ discrete vector of size $K$: $X_i$: number of realisations of the event $i$. $$\text{PMF: } f(x) = \binom{n}{x_1\text{...} x_k} \prod_{i=1}^K p_i^{x_i}$$ $$\mathbb{E}[X_i] = np_i$$ $$\text{Var}(X_i) = np_i(1 - p_i)$$ $$\text{Cov}(X_i, X_j) = -np_ip_j \space (i \neq j)$$ $$\binom{n}{k_1 \text{...} k_m}= \frac{n!}{\prod_{i=1}^m k_i!}$$ ``` def rand_multinomial(p): s = 0 p2 = np.empty(len(p)) for i in range(len(p)-1): s += p[i] p2[i] = s p2[-1] = 1 u = np.random.rand() k = 0 while u > p2[k]: k += 1 return k N = 1000000 x = np.empty(N).astype(np.int) p = [0.1, 0.6, 0.3] for i in range(N): x[i] = rand_multinomial(p) print('p[0]:', np.mean(x==0)) print('p[1]:', np.mean(x==1)) print('p[2]:', np.mean(x==2)) ``` # Normal (Gaussian) distribution $$X \sim \mathcal{N}(\mu, \sigma^2)$$ Parameters: - $\mu$: mean - $\sigma^2 \geq 0$: variance $$\text{PDF: } f(x) = \frac{1}{\sqrt{2\pi \sigma^2}} \text{exp}(-\frac{(x - \mu)^2}{2\sigma^2})$$ $$\text{CDF: } F(x) = \frac{1}{2}[1 + \text{erf}(\frac{x - \mu}{\sigma \sqrt{2}})]$$ $$\mathbb{E}[X] = \mu$$ $$\text{Var}(X) = \sigma^2$$ $$ \text{erf}(x) = \frac{1}{\sqrt{\pi}} \int_{-x}^{x} e^{-t^2}dt$$ ``` def normal_pdf(mu, v, x): den = np.sqrt(2 * np.pi * v) num = - (x - mu)**2 / (2*v) return np.exp(num) / den x = np.linspace(-3, 3, 1000) plt.plot(x, normal_pdf(0, 0.5, x), c='r', label='N(0, 0.5)') plt.plot(x, normal_pdf(0, 1, x), c='b', label='N(0, 1)') plt.plot(x, normal_pdf(0, 1.5, x), c='y', label='N(0, 1,5)') plt.plot(x, normal_pdf(2, 1, x), c='c', label='N(2, 1)') plt.legend() plt.show() _box_muller = [None] def norm_box_muller(): if _box_muller[0] is not None: res = _box_muller[0] _box_muller[0] = None return res u1, u2 = np.random.rand(2) r = np.sqrt(-2*np.log(u1)) theta = 2*np.pi*u2 x = r * np.cos(theta) y = r * np.sin(theta) _box_muller[0] = x return y _marsagalia_polar = [None] def norm_marsagalia_polar(): if _marsagalia_polar[0] is not None: res = _marsagalia_polar[0] _marsagalia_polar[0] = None return res while True: x, y = 2 * np.random.rand(2) - 1 s = x**2 + y**2 if s < 1 and s>0: break f = np.sqrt((-2*np.log(s))/s) a, b = x*f, y*f _marsagalia_polar[0] = a return b N = 1000000 print('mu =', -1.3) print('std =', 4.5) x = np.random.randn(N) * 4.5 - 1.3 print('[NP] mu =', np.mean(x)) print('[NP] std =', np.std(x)) x = np.empty(N) for i in range(N): x[i] = 4.5 * norm_box_muller() - 1.3 print('[BM] mu =', np.mean(x)) print('[BM] std =', np.std(x)) x = np.empty(N) for i in range(N): x[i] = 4.5 * norm_marsagalia_polar() - 1.3 print('[MP] mu =', np.mean(x)) print('[MP] std =', np.std(x)) #Generate from gaussian using quantile function import scipy.stats def norm_cdf(x): return 1/2 * (1 + scipy.special.erf(x / np.sqrt(2))) def norm_quantile(x): def f(v): return norm_cdf(v) - x return scipy.optimize.brentq(f, -10, 10) def randn_qt(size): u = np.random.rand(size) x = np.array([norm_quantile(v) for v in u]) return x v = 0.6 b1 = scipy.stats.norm.ppf(v) b2 = norm_quantile(v) print(b1) print(b2) print(metrics.tdist(b1, b2)) x = randn_qt(100000) * 4.5 - 1.3 print('[QT] mu =', np.mean(x)) print('[QT] std =', np.std(x)) ``` # Multivariate Normal distribution $$X \sim \mathcal{N}(\mu, \Sigma)$$ Parameters: - $\mu \in \mathbb{R}^p$: mean - $\Sigma \in \mathbb{R}^{p*p}$: covariance matrix (positive semi-definite) $$\text{PDF: } f(x) = ((2\pi)^{p} \text{det}(\Sigma))^{-\frac{1}{2}} \exp(-\frac{1}{2} (x - \mu)^T \Sigma^{-1}(x-\mu))$$ $$\mathbb{E}[X] = \mu$$ $$\text{Var}(X) = \Sigma$$ ``` rmu = np.array([0.5, -1.2, 4.6]) rsig = np.array([[0.4, 1.2, -1.8],[2.5,-2.8,-1.9],[-1.4,6.7,2.5]]) rsig = rsig.T @ rsig N = 1000000 print('mu =', rmu) print('sig=') print(rsig) X = np.random.multivariate_normal(rmu, rsig, size=N, check_valid='raise') mu = np.mean(X, axis=0) sig = 1/N * (X - mu.reshape(1,3)).T @ (X - mu.reshape(1,3)) print('[NP] mu =', mu) print('[NP] sig=') print(sig) def normal_multivariate(mu, sig, size): N = size p = len(mu) X = np.empty((N,p)) d, V = np.linalg.eig(sig) Q = np.sqrt(d).reshape(1,p) * V for i in range(N): xn = np.random.randn(p) X[i] = Q @ xn + mu return X X = normal_multivariate(rmu, rsig, size=N) mu = np.mean(X, axis=0) sig = 1/N * (X - mu.reshape(1,3)).T @ (X - mu.reshape(1,3)) print('mu =', mu) print('sig=') print(sig) ``` # Exponential distribution X is a positive continuous variable with a sharp peak at $0$ Parameters: - $\lambda \in \mathbb{R}$, $\lambda > 0$: rate or inverse scale $$X \in [0, \infty[$$ $$\text{PDF: } f(x) = \lambda \exp(- \lambda x)$$ $$\text{CDF: } F(x) = 1 - \exp(- \lambda x)$$ $$\mathbb{E}[x] = \lambda^{-1}$$ $$\text{Var}(x) = \lambda^{-2}$$ ``` def exponential_pdf(lbda, x): return lbda * np.exp(-lbda * x) x = np.linspace(0, 5, 1000) plt.plot(x, exponential_pdf(.5, x), c='r', label='lambda = .5') plt.plot(x, exponential_pdf(1, x), c='b', label='lambda = 1') plt.plot(x, exponential_pdf(1.5, x), c='y', label='lambda = 1.5') plt.legend() plt.show() ``` # Laplace Distribution $$X \sim \text{Laplace}(\mu;\gamma)$$ X is a continous variabe with a shark peak at $\mu$ Parameters: - $\mu \in \mathbb{R}$: mean - $\gamma \in \mathbb{R}$, $\gamma > 0$: scale $$\text{PDF: } f(x) = \frac{1}{2 \gamma} \exp(-\frac{|x - \mu|}{\gamma})$$ $$\mathbb{E}[X] = \mu$$ $$\text{Var}(X) = 2\gamma^2$$ ``` def laplace_pdf(mu, b, x): den = 2 * b num = - np.abs(x - mu) / b return np.exp(num) / den x = np.linspace(-3, 3, 1000) plt.plot(x, laplace_pdf(0, 0.5, x), c='r', label='Laplace(0, 0.5)') plt.plot(x, laplace_pdf(0, 1, x), c='b', label='Laplace(0, 1)') plt.plot(x, laplace_pdf(0, 1.5, x), c='y', label='Laplace(0, 1,5)') plt.plot(x, laplace_pdf(2, 1, x), c='c', label='Laplace(2, 1)') plt.legend() plt.show() ``` # Dirac Distribution $X$ is a continous variable with a infitenely high peak at $\mu$, and $0$ everywhere. Parameters: - $\mu \in \mathbb{R}$: mean $$\text{PDF}: f(x) = \delta(x - \mu)$$ with $\delta(x)$ the Dirac delta function, a function that is zero-valued everywhere except at $0$, and yet integrates to $1$. # Empirical distribution X define an empirical distribution of size $m$ (eg: dataset) over countinous variables Parameters: - datset $\{ x_1, \text{...}, x_m \}$ $$\text{PDF} f(x) = \frac{1}{m} \sum_{i=1}^m \delta(x - x_i)$$ The empirical distribution is the distribution when we sample data from a dataset. This is the probability density that maximizes the likelihood of the training data. # Beta distribution $$X \sim \text{Beta}(\alpha, \beta)$$ Parameters: - $\alpha \in \mathbb{R} > 0$ - $\beta \in \mathbb{R} > 0$ The parameter $x \in \mathbb{R}$ must bet in $[0,1]$ $$\text{PDF: } f(x) = \frac{x^{\alpha-1} (1-x)^{\beta - 1}}{B(\alpha,\beta)}$$ $$\text{where } B(\alpha,\beta) = \frac{\Gamma (\alpha) \Gamma(\beta)}{\Gamma (\alpha + \beta)}$$ $$\text{where } \Gamma(z) = \int_{0}^{+\infty} x^{z-1} e^{-x}dx$$ $$E[X] = \frac{\alpha}{\alpha + \beta}$$ $$\text{Var}(X) = \frac{\alpha\beta}{(\alpha+\beta)^2(\alpha+\beta+1)}$$ The beta distribution is the conjugate prior probability distribution of the bernoulli, bonomial, and geometric distributions. It is usually used to describe prior knowledge concerning the probability of success of an event. # Dirichlet distribution $$X \sim \text{Dir}(\alpha)$$ Parameters: $\alpha \in \mathbb{R}^K$, $K \geq 2$, $\alpha_k > 0$ Input: $x \in \mathbb{R}^K$, with $x_k \in [0,1]$, and $\sum_{k=1}^Kx_k=1$ $$\text{PDF: } \frac{1}{B(\alpha)} \prod_{i=1}^K x_i^{\alpha_i-1}$$ $$\text{where } B(\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}{\Gamma(\sum_{i=1}^K\alpha_i)}$$ $$E[X_i] = \frac{\alpha_i}{\sum_{k=1}^K \alpha_k}$$ $$\text{Var}(X_i) = \frac{\alpha_i(\alpha_0 - \alpha_i)}{\alpha_0^2(\alpha_0 + 1)}$$ $$\text{where } \alpha_0 = \sum_{i=1}^K \alpha_i$$ The dirichlet distribution is a multivariate generalization of the beta distribution. It's the conjugate prior probability distribution of the categorical and polynomial distribution. # Mixture of distributions A mixture distribution is made up of several components distributions. On each trial, the choice of which component distribution generates the sample is determined by a multinoulli distribution: $$P(x) = \sum_i P(c=i) P(x|c=i)$$ with $P(c)$ the multinoulli distribution, and $P(x|c=i)$ the PDF of the $i$-th component distribution. $c$ is a latent variable. A common type on mixture models is the Gaussian mixture, where each component is a multivariate normal distribution. Each component may have a separate $\mu^{(i)}$ and $\Sigma^{(i)}$, or they may have some constraints (eg sharing, special form of covariance). A gaussian mixture model is a universal approximator of disenties, given enough components. ``` def rand_gauss_mixture(means, stds, p): c = np.random.choice(len(p), p=p) x = np.random.randn() * stds[c] + means[c] return x means = np.array([-1, 1, 1]) stds = np.array([0.8, 1.3, 1.1]) p = np.array([0.4, 0.35, 0.25]) N = 100000 x = np.array([rand_gauss_mixture(means, stds, p) for _ in range(N)]) print('mu =', np.mean(x), means @ p) print('std =', np.std(x), stds @ np.sqrt(p * (1-p))) ```
github_jupyter
### Gluon Implementation in Recurrent Neural Networks ``` import sys sys.path.insert(0, '..') import d2l import math from mxnet import autograd, gluon, init, nd from mxnet.gluon import loss as gloss, nn, rnn import time (corpus_indices, char_to_idx, idx_to_char, vocab_size) = d2l.load_data_time_machine() ``` ### Define the Model ``` num_hiddens = 256 rnn_layer = rnn.RNN(num_hiddens) rnn_layer.initialize() ``` Then, we call the `rnn_layer`'s member function `begin_state` to return hidden state list for initialization. It has an element of the shape (number of hidden layers, batch size, number of hidden units). ``` batch_size = 2 state = rnn_layer.begin_state(batch_size=batch_size) state[0].shape ``` ### RNN Layer in Action ``` num_steps = 35 X = nd.random.uniform(shape=(num_steps, batch_size, vocab_size)) Y, state_new = rnn_layer(X, state) print(X.shape, len(state), state[0].shape) print(Y.shape, len(state_new), state_new[0].shape) ``` ### RNN Block ``` # This class has been saved in the d2l package for future use. class RNNModel(nn.Block): def __init__(self, rnn_layer, vocab_size, **kwargs): super(RNNModel, self).__init__(**kwargs) self.rnn = rnn_layer self.vocab_size = vocab_size self.dense = nn.Dense(vocab_size) def forward(self, inputs, state): # Get the one-hot vector representation by transposing the input # to (num_steps, batch_size). X = nd.one_hot(inputs.T, self.vocab_size) Y, state = self.rnn(X, state) # The fully connected layer will first change the shape of Y to # (num_steps * batch_size, num_hiddens). # Its output shape is (num_steps * batch_size, vocab_size). output = self.dense(Y.reshape((-1, Y.shape[-1]))) return output, state def begin_state(self, *args, **kwargs): return self.rnn.begin_state(*args, **kwargs) ``` ### Prediction ``` def predict_rnn_gluon(prefix, num_chars, model, vocab_size, ctx, idx_to_char, char_to_idx): # Use model's member function to initialize the hidden state. state = model.begin_state(batch_size=1, ctx=ctx) output = [char_to_idx[prefix[0]]] for t in range(num_chars + len(prefix) - 1): X = nd.array([output[-1]], ctx=ctx).reshape((1, 1)) (Y, state) = model(X, state) # Forward computation does not require incoming model parameters. if t < len(prefix) - 1: output.append(char_to_idx[prefix[t + 1]]) else: output.append(int(Y.argmax(axis=1).asscalar())) return ''.join([idx_to_char[i] for i in output]) ``` ### Prediction with Garbage Parameters ``` ctx = d2l.try_gpu() model = RNNModel(rnn_layer, vocab_size) model.initialize(force_reinit=True, ctx=ctx) predict_rnn_gluon('traveller', 10, model, vocab_size, ctx, idx_to_char, char_to_idx) ``` Next, implement the training function. Its algorithm is the same as in the previous section, but only random sampling is used here to read the data. ``` # This function is saved in the d2l package for future use. def train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx, corpus_indices, idx_to_char, char_to_idx, num_epochs, num_steps, lr, clipping_theta, batch_size, pred_period, pred_len, prefixes): loss = gloss.SoftmaxCrossEntropyLoss() model.initialize(ctx=ctx, force_reinit=True, init=init.Normal(0.01)) trainer = gluon.Trainer(model.collect_params(), 'sgd', {'learning_rate': lr, 'momentum': 0, 'wd': 0}) for epoch in range(num_epochs): l_sum, n, start = 0.0, 0, time.time() data_iter = d2l.data_iter_consecutive( corpus_indices, batch_size, num_steps, ctx) state = model.begin_state(batch_size=batch_size, ctx=ctx) for X, Y in data_iter: for s in state: s.detach() with autograd.record(): (output, state) = model(X, state) y = Y.T.reshape((-1,)) l = loss(output, y).mean() l.backward() # Clip the gradient. params = [p.data() for p in model.collect_params().values()] d2l.grad_clipping(params, clipping_theta, ctx) # Since the error has already taken the mean, the gradient does # not need to be averaged. trainer.step(1) l_sum += l.asscalar() * y.size n += y.size if (epoch + 1) % pred_period == 0: print('epoch %d, perplexity %f, time %.2f sec' % ( epoch + 1, math.exp(l_sum / n), time.time() - start)) for prefix in prefixes: print(' -', predict_rnn_gluon( prefix, pred_len, model, vocab_size, ctx, idx_to_char, char_to_idx)) ``` Train the model using the same hyper-parameters as previously. ``` num_epochs, batch_size, lr, clipping_theta = 200, 32, 1e2, 1e-2 pred_period, pred_len, prefixes = 50, 50, ['traveller', 'time traveller'] train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx, corpus_indices, idx_to_char, char_to_idx, num_epochs, num_steps, lr, clipping_theta, batch_size, pred_period, pred_len, prefixes) ```
github_jupyter
``` import os os.chdir('/home/enis/projects/nna/src/nna/exp/megan/run-2/') # import run # import nna import torch import torch.nn as nn import torch.nn.functional as F from torchvision import transforms import torchaudio torchaudio.set_audio_backend("sox_io") import numpy as np from pathlib import Path from collections import Counter torch.manual_seed(42) np.random.seed(42) torch.backends.cudnn.benchmark = False loc_per_set = [['45', '38', '48', '39', '11', '44', '46', '17', '20', '50', '13', '25', '21', '29', '19', '16', '24', '37'], ['18', '31', '34', '27', '32', '33', '47', '41', '22', '15'], ['30', '12', '14', '36', '40', '49']] import runconfigs import wandb from ignite.metrics import Accuracy, Loss from ignite.contrib.metrics import ROC_AUC # from nna.exp import augmentations, from nna.exp import modelArchs,runutils # wandb.init(config=runconfigs.default_config, project=runconfigs.PROJECT_NAME) # config = wandb.config config = runconfigs.default_config # wandb.config.update(args) # adds all of the arguments as config variables params = { 'batch_size': config['batch_size'], 'shuffle': True, 'num_workers': 0 } device = torch.device( f"cuda:{config['device']}" if torch.cuda.is_available() else "cpu") # labelsbyhumanpath = Path('/scratch/enis/data/nna/labeling/results/') # sourcePath = Path("/scratch/enis/data/nna/labeling/splits/") CATEGORY_COUNT = 2 !ls # RAW DATA def load_raw_data(sample_count,CATEGORY_COUNT): sample_count = 1200 X = np.empty((sample_count,480000),dtype=np.float32) y_true = np.random.randint(0,CATEGORY_COUNT,(sample_count)) for i,y in enumerate(y_true): X[i,:] = y X = np.interp(X, (X.min(), X.max()), (-32768 , 32767)) n_values = np.max(y_true) + 1 y_true = np.eye(n_values)[y_true] X_train, X_test, y_train, y_test = train_test_split( X, y_true, test_size=0.2, random_state=42) X_train, X_val, y_train, y_val = train_test_split( X_train, y_train, test_size=0.25,random_state=42) return X_train,X_test,X_val,y_train,y_test,y_val def mock_raw_data(sample_count,CATEGORY_COUNT): # sample_count = 30 X = np.empty((sample_count*CATEGORY_COUNT,480000),dtype=np.float32) # y_true = np.random.randint(0,CATEGORY_COUNT,(sample_count)) y_true=[] for i in range(CATEGORY_COUNT): y_true.extend([i]*sample_count) print(len(y_true)) for i,y in enumerate(y_true): X[i,:] = y X = np.interp(X, (X.min(), X.max()), (0 , 3400)) # X = np.interp(X, (X.min(), X.max()), (-32768 , 32767)) n_values = np.max(y_true) + 1 y_true = np.eye(n_values)[y_true] X_train, X_test, y_train, y_test = train_test_split( X, y_true, test_size=0.2, random_state=42) X_train, X_val, y_train, y_val = train_test_split( X_train, y_train, test_size=0.25,random_state=42) # print(y_train) return X_train,X_test,X_val,y_train,y_test,y_val ## Load real data rather than mock # MVP1: delete parts longer than 10 seconds import run audio_dataset,_ = run.prepare_dataset() output_file_path = '/scratch/enis/data/nna/labeling/megan/AudioSamplesPerSite/files_as_np_filtered_v3_int16.pkl' audio_dataset.load_audio_files(output_file_path) audio_dataset.pick_channel_by_clipping() sound_ins=next(iter(audio_dataset.items())) sound_ins[1].location_id sound_ins[1].taxo_code # sound_ins[1].taxo_code # classA = 1.1.7 #'duck-goose-swan'] # classB = 0.2.0 # other-aircraft # 3.0.0 : 0.48, 0.26, 0.26, 46 # silence # 2.1.0 : 0.22, 0.56, 0.22, 18 # rain # 1.3.0 1.3.0 : 0.52, 0.4, 0.087, 161 # insect # 1.1.8 : 0.49, 0.19, 0.32, 88 # grouse-ptarmigan other_taxo = ['3.0.0','2.1.0','1.3.0','1.1.8'] sampleTest= [] y=[] location_id_info = [] expected_len=10 for sound_ins in audio_dataset.values(): if sound_ins.taxo_code in ['1.1.10','1.1.7'] + other_taxo: y.append(sound_ins.taxo_code) location_id_info.append(sound_ins.location_id) if sound_ins.length<10: tile_reps = (expected_len/(sound_ins.length)+1) repeated_data = np.tile(sound_ins.data,int(tile_reps)) repeated_data = repeated_data[:expected_len*sound_ins.sr] sampleTest.append(repeated_data) else: sampleTest.append(sound_ins.data[:expected_len*sound_ins.sr]) len(sampleTest),len(y) # sampleTest=np.array(sampleTest) 313+589,(46+18+161+88)/589 from numpy import argmax # define input string # define universe of possible input values alphabet = ['1.1.10','1.1.7'] # define a mapping of chars to integers char_to_int = dict((c, i) for i, c in enumerate(alphabet)) int_to_char = dict((i, c) for i, c in enumerate(alphabet)) # integer encode input data integer_encoded = [char_to_int.get(char,None) for char in y] # print(integer_encoded) # one hot encode onehot_encoded = list() for value in integer_encoded: letter = [0 for _ in range(len(alphabet))] if value is not None: letter[value] = 1 onehot_encoded.append(letter) # print(onehot_encoded) # invert encoding inverted = int_to_char[argmax(onehot_encoded[0])] # print(inverted) onehot_encoded=np.array(onehot_encoded) X_train,X_test, X_val, y_train, y_test,y_val = [],[],[],[],[],[] loc_id_train=[] loc_id_test=[] loc_id_valid=[] for sample,y_val_ins,loc_id in zip(sampleTest,onehot_encoded,location_id_info): if loc_id in loc_per_set[0]: X_train.append(sample) y_train.append(y_val_ins) loc_id_train.append(loc_id) elif loc_id in loc_per_set[1]: X_test.append(sample) y_test.append(y_val_ins) loc_id_test.append(loc_id) elif loc_id in loc_per_set[2]: X_val.append(sample) y_val.append(y_val_ins) loc_id_valid.append(loc_id) else: print('error') set(loc_id_train),set(loc_id_test),set(loc_id_valid) len(sampleTest) X_train,X_test,X_val=np.array(X_train),np.array(X_test),np.array(X_val) y_train,y_test,y_val=np.array(y_train),np.array(y_test),np.array(y_val) # X_train, X_test, y_train, y_test = train_test_split( # sampleTest, onehot_encoded, test_size=0.2, random_state=42) # X_train, X_val, y_train, y_val = train_test_split( # X_train, y_train, test_size=0.25,random_state=42) X_train,X_test,X_val=torch.from_numpy(X_train).float(),torch.from_numpy(X_test).float(),torch.from_numpy(X_val).float() y_train,y_test,y_val=torch.from_numpy(y_train).float(),torch.from_numpy(y_test).float(),torch.from_numpy(y_val).float() X_train.shape,X_test.shape,X_val.shape y_train.shape,y_test.shape,y_val.shape # y_val[0:3] # # X_train,X_test,X_val,y_train,y_test,y_val = load_raw_data('labelsbyhumanpath','sourcePath') # X_train,X_test,X_val,y_train,y_test,y_val = mock_raw_data(4,CATEGORY_COUNT) # X_train,X_test,X_val=torch.from_numpy(X_train).float(),torch.from_numpy(X_test).float(),torch.from_numpy(X_val).float() # y_train,y_test,y_val=torch.from_numpy(y_train).float(),torch.from_numpy(y_test).float(),torch.from_numpy(y_val).float() # # labelsbyhumanpath = Path('/scratch/enis/data/nna/labeling/results/') # # with open(labelsbyhumanpath/"np_array_Ymatrix.npy", 'rb') as f: # # y_true = np.load(f) # Counter(np.argmax(y_train,axis=1).tolist()),Counter(np.argmax(y_val,axis=1).tolist()),Counter(np.argmax(y_test,axis=1).tolist()) # X_train2=np.interp(X_train, (X_train.min(), X_train.max()), (-32768 , 32767)) # torch.from_numpy(X_train2).float() # y_val # smaple_index=10 # X_train[smaple_index,:],y_train[smaple_index,] # X_train[0,:].shape # multiply(torch.ones((1,2)),torch.ones((1,2))) class ToTensor(object): """Convert ndarrays in sample to Tensors.""" def __init__(self, maxMelLen, sampling_rate): # sr = 44100 etc self.maxMelLen = maxMelLen self.sampling_rate = sampling_rate def __call__(self, sample): x, y = sample #https://github.com/PCerles/audio/blob/3803d0b27a4e13efa760227ef6c71d0f3753aa98/test/test_transforms.py#L262 #librosa defaults n_fft = 2048 hop_length = 512 power = 2.0 n_mels = 128 n_mfcc = 40 # htk is false in librosa, no setting in torchaudio -? # norm is 1 in librosa, no setting in torchaudio -? melspect_transform = torchaudio.transforms.MelSpectrogram(sample_rate=self.sampling_rate, window_fn=torch.hann_window, hop_length=hop_length, n_mels=n_mels, n_fft=n_fft) db_transform = torchaudio.transforms.AmplitudeToDB("power", 80.) mel = melspect_transform(x.reshape(-1)) an_x = db_transform(mel) #librosa version # mel = librosa.feature.melspectrogram(y=x.reshape(-1), # sr=self.sampling_rate) # an_x = librosa.power_to_db(mel, ref=np.max) # an_x = an_x.astype("float32") # y = y.astype('float32') # print(an_x.shape) an_x = an_x[:, :self.maxMelLen] # 2-d conv # x = an_x.reshape(1, *an_x.shape[:]) # 1-d conv x = an_x.reshape(1, an_x.shape[0]*an_x.shape[1]) return x,y # #test # maxMelLen_test = 850 # SAMPLING_RATE_test = 48000 # sample_len_seconds = 10 # # to_tensor works on single sample # sample_count = 1 # xx_test = torch.ones((sample_count,SAMPLING_RATE_test*sample_len_seconds)) # y_values = torch.ones(sample_count) # # toTensor = ToTensor(maxMelLen_test,SAMPLING_RATE_test) # x_out,y_out=toTensor((xx_test,y_values)) # x_out.shape,y_out.shape # X_train,X_test,X_val,y_train,y_test,y_val X_train[:].shape # toTensor = ToTensor(maxMelLen_test,SAMPLING_RATE_test) # x_out2,y_out=toTensor((X_train[1:2,:],y_train)) # x_out.shape,y_out.shape # torch.mean(x_out[0]),torch.mean(x_out2[0]) # y_train # X_train,y_train # pitch = augmentations.pitch_shift_n_stepsClass( # runconfigs.SAMPLING_RATE, config['pitch_shift_n_steps']) # noise = augmentations.addNoiseClass(config['noise_factor']) # strech = augmentations.time_stretchClass(runconfigs.SAMPLING_RATE*runconfigs.EXCERPT_LENGTH, # config['time_stretch_factor'], # isRandom=True) # shift = augmentations.shiftClass(config['roll_rate'], isRandom=True) maxMelLen = 938 # old 850 # toTensor = augmentations.ToTensor(maxMelLen,runconfigs.SAMPLING_RATE) toTensor = ToTensor(maxMelLen,runconfigs.SAMPLING_RATE) from ignite.metrics import EpochMetric def roc_auc_perClass_compute_fn(y_preds, y_targets): try: from sklearn.metrics import roc_auc_score except ImportError: raise RuntimeError( "This contrib module requires sklearn to be installed.") y_true = y_targets.numpy() y_pred = y_preds.numpy() # print(y_pred,y_true) # res = [] # for y_true_perClass_Index in y_true.shape[1]: # res.append( # roc_auc_score(y_true[:, y_true_perClass_Index], # y_pred[:, y_true_perClass_Index])) res = roc_auc_score(y_true, y_pred, average=None) return res #[docs] class ROC_AUC_perClass(EpochMetric): """Computes Area Under the Receiver Operating Characteristic Curve (ROC AUC) accumulating predictions and the ground-truth during an epoch and applying `sklearn.metrics.roc_auc_score <http://scikit-learn.org/stable/modules/generated/ sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score>`_ . Args: output_transform (callable, optional): a callable that is used to transform the :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the form expected by the metric. This can be useful if, for example, you have a multi-output model and you want to compute the metric with respect to one of the outputs. check_compute_fn (bool): Optional default False. If True, `roc_curve <http://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html# sklearn.metrics.roc_auc_score>`_ is run on the first batch of data to ensure there are no issues. User will be warned in case there are any issues computing the function. ROC_AUC expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence values. To apply an activation to y_pred, use output_transform as shown below: .. code-block:: python def activated_output_transform(output): y_pred, y = output y_pred = torch.sigmoid(y_pred) return y_pred, y roc_auc = ROC_AUC(activated_output_transform) """ def __init__(self, output_transform=lambda x: x, check_compute_fn: bool = False): # print(output_transform) super(ROC_AUC_perClass, self).__init__(roc_auc_perClass_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn) def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1): """ Utility function for computing output of convolutions takes a tuple of (h,w) and returns a tuple of (h,w) """ from math import floor if type(kernel_size) is not tuple: kernel_size = (kernel_size, kernel_size) h = floor(((h_w[0] + (2 * pad) - (dilation * (kernel_size[0] - 1)) - 1) / stride) + 1) w = floor(((h_w[1] + (2 * pad) - (dilation * (kernel_size[1] - 1)) - 1) / stride) + 1) return h, w # mel.shape,an_x.shape,X_train.shape class testModel(nn.Module): '''A simple model for testing by overfitting. ''' def __init__(self, out_channels, h_w, kernel_size, FLAT=False,output_shape=(10,)): # h_w: height will be always one since we use 1d convolution super(testModel, self).__init__() self.out_channels = out_channels #### CONV self.conv1 = nn.Conv1d(in_channels=1, # depth of image == depth of filters out_channels=self.out_channels, # number of filters kernel_size=kernel_size, # size of the filters/kernels padding=1) self.conv1_shape = conv_output_shape(h_w, kernel_size=kernel_size, stride=1, pad=1, dilation=1) # conv is 1d self.conv1_shape = (1,self.conv1_shape[1]) self.fc1 = nn.Linear(self.out_channels * self.conv1_shape[0] *self.conv1_shape[1], 64) # 100 self.fc2 = nn.Linear(64,output_shape[0]) def forward(self, x): # x = x.reshape(1,) # print(x.shape) # 50,1,108800 (850*128) x = F.relu(self.conv1(x)) # x = self.pool(x) # x = self.drop(x) # print(x.shape)# 58, 2, 108801 # print(self.conv1_shape) # print(x.shape) x = x.view(-1, self.out_channels * self.conv1_shape[0] *self.conv1_shape[1]) # batch_norm is missing x = F.relu((self.fc1(x))) x = (self.fc2(x)) # x = self.drop(x) # x = self.fc4(x) # x = torch.sigmoid(x) # x = F.log_softmax(x,dim=1) return x # test # input_shape=(1,(938*128)) # output_shape=(10,) # testModel_ins=adam(out_channels=2,h_w=input_shape,kernel_size=2,output_shape=output_shape) # # a.conv1.weight # a_out=testModel_ins(torch.ones((3,1,input_shape[1]))) # a_out_correct=torch.zeros(a_out.shape) # a_out_correct[0][:]=1 # a_out_correct # a_out.detach().numpy() # torch.exp(a_out),a_out # for y_true_perClass_Index in a_out_correct.shape[1]: # print(y_true_perClass_Index) # from ignite.contrib.metrics import ROC_AUC # from nna.exp.metrics import ROC_AUC_perClass def activated_output_transform(output): y_pred, y = output # y_pred = torch.exp(y_pred) return y_pred, y # asd=ROC_AUC_perClass(activated_output_transform) # asd.update((a_out,a_out_correct)) # asd.compute() transformCompose = transforms.Compose([ # pitch, # strech, # shift, # noise, toTensor, ]) sound_datasets = { phase: runutils.audioDataset(XY[0], XY[1], transform=transformCompose) for phase, XY in zip(['train', 'val', 'test'], [[X_train, y_train], [X_val, y_val], [X_test, y_test]]) } dataloaders = { x: torch.utils.data.DataLoader(sound_datasets[x], **params) for x in ['train', 'val', 'test'] } # this will change h_w = [128, 938] kernel_size = (4, 4) # if config['CNNLayer_count'] == 1: # model = modelArchs.NetCNN1(config['CNN_filters_1'], h_w, # kernel_size).float().to(device) # if config['CNNLayer_count'] == 2: # model = modelArchs.NetCNN2(config['CNN_filters_1'], config.CNN_filters_2, # h_w, kernel_size, # kernel_size).float().to(device) #simpler model output_shape=(CATEGORY_COUNT,) model = testModel(out_channels=2,h_w=(1,h_w[0]*h_w[1]),kernel_size=kernel_size[0]*kernel_size[0],output_shape=output_shape) model.float().to(device) # device is defined before model.float().to(device) # Move model before creating optimizer optimizer = torch.optim.AdamW(model.parameters(), # weight_decay=config['weight_decay'], ) criterion = nn.BCEWithLogitsLoss() # statHistory={'valLoss':[],'trainLoss':[],'trainAUC':[],'valAUC':[]} metrics = { 'loss': Loss(criterion), # 'accuracy': Accuracy(), # 'ROC_AUC': ROC_AUC(runutils.activated_output_transform), 'ROC_AUC': ROC_AUC_perClass(activated_output_transform), } model model.conv1.weight model.conv1 model.fc1.weight # no init 16, 64 print('ready ?') runutils.run(model, dataloaders, optimizer, criterion, metrics, device,config, runconfigs.PROJECT_NAME) model.fc1.weight example_input = (torch.ones((1,1,h_w[0]*h_w[1]))*-32767) example_input = example_input.float().to(device) out=model(example_input) out activated_output_transform((out,out)) torch.Tensor([10,10,10]) sum(model.fc1.weight) m = nn.Sigmoid() loss = nn.BCEWithLogitsLoss() input = torch.tensor([100.0,100,100], requires_grad=True) # target = torch.empty(3).random_(2) target = torch.ones(3) output = loss((input), target) input,target,output input target output m(input) ```
github_jupyter
# bqplot https://github.com/bloomberg/bqplot ## A Jupyter - d3.js bridge bqplot is a jupyter interactive widget library bringing d3.js visualization to the Jupyter notebook. - Apache Licensed bqplot implements the abstractions of Wilkinson’s “The Grammar of Graphics” as interactive Jupyter widgets. bqplot provides both - high-level plotting procedures with relevant defaults for common chart types, - lower-level descriptions of data visualizations meant for complex interactive visualization dashboards and applications involving mouse interactions and user-provided Python callbacks. **Installation:** ```bash conda install -c conda-forge bqplot ``` ``` from __future__ import print_function from IPython.display import display from ipywidgets import * from traitlets import * import numpy as np import pandas as pd import bqplot as bq import datetime as dt np.random.seed(0) size = 100 y_data = np.cumsum(np.random.randn(size) * 100.0) y_data_2 = np.cumsum(np.random.randn(size)) y_data_3 = np.cumsum(np.random.randn(size) * 100.) x = np.linspace(0.0, 10.0, size) price_data = pd.DataFrame(np.cumsum(np.random.randn(150, 2).dot([[0.5, 0.8], [0.8, 1.0]]), axis=0) + 100, columns=['Security 1', 'Security 2'], index=pd.date_range(start='01-01-2007', periods=150)) symbol = 'Security 1' dates_all = price_data.index.values final_prices = price_data[symbol].values.flatten() ``` # A simple plot with the pyplot API ``` from bqplot import pyplot as plt plt.figure(1) n = 100 plt.plot(np.linspace(0.0, 10.0, n), np.cumsum(np.random.randn(n)), axes_options={'y': {'grid_lines': 'dashed'}}) plt.show() ``` ### Scatter Plot ``` plt.figure(title='Scatter Plot with colors') plt.scatter(y_data_2, y_data_3, color=y_data) plt.show() ``` ### Histogram ``` plt.figure() plt.hist(y_data, colors=['OrangeRed']) plt.show() ``` # Every component of the figure is an independent widget ``` xs = bq.LinearScale() ys = bq.LinearScale() x = np.arange(100) y = np.cumsum(np.random.randn(2, 100), axis=1) #two random walks line = bq.Lines(x=x, y=y, scales={'x': xs, 'y': ys}, colors=['red', 'green']) xax = bq.Axis(scale=xs, label='x', grid_lines='solid') yax = bq.Axis(scale=ys, orientation='vertical', tick_format='0.2f', label='y', grid_lines='solid') fig = bq.Figure(marks=[line], axes=[xax, yax], animation_duration=1000) display(fig) # update data of the line mark line.y = np.cumsum(np.random.randn(2, 100), axis=1) xs = bq.LinearScale() ys = bq.LinearScale() x, y = np.random.rand(2, 20) scatt = bq.Scatter(x=x, y=y, scales={'x': xs, 'y': ys}, default_colors=['blue']) xax = bq.Axis(scale=xs, label='x', grid_lines='solid') yax = bq.Axis(scale=ys, orientation='vertical', tick_format='0.2f', label='y', grid_lines='solid') fig = bq.Figure(marks=[scatt], axes=[xax, yax], animation_duration=1000) display(fig) #data updates scatt.x = np.random.rand(20) * 10 scatt.y = np.random.rand(20) ``` ## The same holds for the attributes of scales, axes ``` xs.min = 4 xs.min = None xax.label = 'Some label for the x axis' ``` ## Use bqplot figures as input widgets ``` xs = bq.LinearScale() ys = bq.LinearScale() x = np.arange(100) y = np.cumsum(np.random.randn(2, 100), axis=1) #two random walks line = bq.Lines(x=x, y=y, scales={'x': xs, 'y': ys}, colors=['red', 'green']) xax = bq.Axis(scale=xs, label='x', grid_lines='solid') yax = bq.Axis(scale=ys, orientation='vertical', tick_format='0.2f', label='y', grid_lines='solid') ``` ## Selections ``` def interval_change_callback(change): db.value = str(change['new']) intsel = bq.interacts.FastIntervalSelector(scale=xs, marks=[line]) intsel.observe(interval_change_callback, names=['selected'] ) db = widgets.Label() db.value = str(intsel.selected) display(db) fig = bq.Figure(marks=[line], axes=[xax, yax], animation_duration=1000, interaction=intsel) display(fig) line.selected ``` # Handdraw ``` handdraw = bq.interacts.HandDraw(lines=line) fig.interaction = handdraw line.y[0] ``` # Moving points around ``` from bqplot import * size = 100 np.random.seed(0) x_data = range(size) y_data = np.cumsum(np.random.randn(size) * 100.0) ## Enabling moving of points in scatter. Try to click and drag any of the points in the scatter and ## notice the line representing the mean of the data update sc_x = LinearScale() sc_y = LinearScale() scat = Scatter(x=x_data[:10], y=y_data[:10], scales={'x': sc_x, 'y': sc_y}, default_colors=['blue'], enable_move=True) lin = Lines(scales={'x': sc_x, 'y': sc_y}, stroke_width=4, line_style='dashed', colors=['orange']) m = Label(value='Mean is %s'%np.mean(scat.y)) def update_line(change): with lin.hold_sync(): lin.x = [np.min(scat.x), np.max(scat.x)] lin.y = [np.mean(scat.y), np.mean(scat.y)] m.value='Mean is %s'%np.mean(scat.y) update_line(None) # update line on change of x or y of scatter scat.observe(update_line, names='x') scat.observe(update_line, names='y') ax_x = Axis(scale=sc_x) ax_y = Axis(scale=sc_y, tick_format='0.2f', orientation='vertical') fig = Figure(marks=[scat, lin], axes=[ax_x, ax_y]) ## In this case on drag, the line updates as you move the points. with scat.hold_sync(): scat.enable_move = True scat.update_on_move = True scat.enable_add = False display(m, fig) ```
github_jupyter
# Stochastic gradient descent (SGD) SGD is an incremental gradient descent algorithm which modifies its weights, in an effort to reach a local minimum. The cuML implementation takes only numpy arrays and cuDF datasets as inputs. - In order to convert your dataset into a cuDF dataframe format please refer the [cuDF documentation](https://rapidsai.github.io/projects/cudf/en/latest/) The SGD algorithm implemented in cuML can accept the following parameters: 1. `loss` : 'hinge', 'log', 'squared_loss' (default = 'squared_loss') 2. `penalty`: 'none', 'l1', 'l2', 'elasticnet' (default = 'none') 3. `alpha`: float (default = 0.0001) 4. `fit_intercept` : boolean (default = True) 5. `epochs` : int (default = 1000) 6. `tol` : float (default = 1e-3) 7. `shuffle` : boolean (default = True) 8. `eta0` : float (default = 0.0) 9. `power_t` : float (default = 0.5) 10. `learning_rate` : 'optimal', 'constant', 'invscaling', 'adaptive' (default = 'constant') 11. `n_iter_no_change` : int (default = 5) For additional information on the SGD model please refer to the [cuML documentation](https://rapidsai.github.io/projects/cuml/en/latest/index.html) - this setup may take a few minutes - long output (output display removed) ``` !wget -nc https://github.com/rapidsai/notebooks-extended/raw/master/utils/rapids-colab.sh !bash rapids-colab.sh import sys, os sys.path.append('/usr/local/lib/python3.6/site-packages/') os.environ['NUMBAPRO_NVVM'] = '/usr/local/cuda/nvvm/lib64/libnvvm.so' os.environ['NUMBAPRO_LIBDEVICE'] = '/usr/local/cuda/nvvm/libdevice/' ``` ### Imports ``` import numpy as np import pandas as pd import cudf from cuml.solvers import SGD as cumlSGD from sklearn.linear_model import SGDRegressor ``` # Helper Functions ``` # check if the mortgage dataset is present and then extract the data from it, else just create a random dataset for sgd import gzip # change the path of the mortgage dataset if you have saved it in a different directory def load_data(nrows, ncols, cached = 'data/mortgage.npy.gz'): if os.path.exists(cached): print('use mortgage data') with gzip.open(cached) as f: X = np.load(f) # the 4th column is 'adj_remaining_months_to_maturity' # used as the label X = X[:,[i for i in range(X.shape[1]) if i!=4]] y = X[:,4:5] rindices = np.random.randint(0,X.shape[0]-1,nrows) X = X[rindices,:ncols] y = y[rindices] else: # create a random dataset print('use random data') X = np.random.rand(nrows,ncols) y = np.random.randint(0,10,size=(nrows,1)) train_rows = int(nrows*0.8) df_X_train = pd.DataFrame({'fea%d'%i:X[0:train_rows,i] for i in range(X.shape[1])}) df_X_test = pd.DataFrame({'fea%d'%i:X[train_rows:,i] for i in range(X.shape[1])}) df_y_train = pd.DataFrame({'fea%d'%i:y[0:train_rows,i] for i in range(y.shape[1])}) df_y_test = pd.DataFrame({'fea%d'%i:y[train_rows:,i] for i in range(y.shape[1])}) return df_X_train, df_X_test, df_y_train, df_y_test # this function checks if the results obtained from two different methods (sklearn and cuml) are the same from sklearn.metrics import mean_squared_error def array_equal(a,b,threshold=2e-3,with_sign=True): a = to_nparray(a).ravel() b = to_nparray(b).ravel() if with_sign == False: a,b = np.abs(a),np.abs(b) error = mean_squared_error(a,b) res = error<threshold return res # the function converts a variable from ndarray or dataframe format to numpy array def to_nparray(x): if isinstance(x,np.ndarray) or isinstance(x,pd.DataFrame): return np.array(x) elif isinstance(x,np.float64): return np.array([x]) elif isinstance(x,cudf.DataFrame) or isinstance(x,cudf.Series): return x.to_pandas().values return x ``` # Run tests ``` %%time # nrows = number of samples # ncols = number of features of each sample nrows = 2**20 ncols = 399 # dataset is split into a ratio of 80:20, # 80% is used as the training data and the remaining 20% is used as the test data X_train, X_test, y_train, y_test = load_data(nrows,ncols) y_train_ser = y_train['fea0'] print('training data',X_train.shape) print('training label',y_train.shape) print('testing data',X_test.shape) print('testing label',y_test.shape) ``` Here we set the parameters usedby both libraries. You can change the number of iterations used by changing the `iterations` variable. Please note that making this too high can cause the functions to take a long time to complete. ``` #set parameters learning_rate = 'adaptive' datatype = np.float32 penalty = 'elasticnet' loss = 'squared_loss' iterations = 10 ``` The `max_iter` parameter controls the maxixmum number of iterations the model can run for but it doesn’t guarantee that the model will definitely run for all those epochs, therefore the sklearn might run for less number of epochs than the cuML model ``` %%time # use the sklearn SGD Regressor model to fit the dataset sk_sgd = SGDRegressor(learning_rate=learning_rate, eta0=0.07, max_iter=iterations, tol=0.0, fit_intercept=True, penalty=penalty, loss=loss) sk_sgd.fit(X_train, y_train_ser) %%time # test the model by predicting its results for the unseen test set y_sk = sk_sgd.predict(X_test) # calculate the Mean Squared Error for the model's predictions error_sk = mean_squared_error(y_test,y_sk) %%time # convert the pandas dataframe to cuDF dataframe and series X_cudf = cudf.DataFrame.from_pandas(X_train) X_cudf_test = cudf.DataFrame.from_pandas(X_test) y_cudf = cudf.Series(y_train_ser) %%time # fit the training data on cuML's implementation of SGD cu_sgd = cumlSGD(learning_rate=learning_rate, eta0=0.07, epochs=iterations, #epochs == n_iter batch_size=512, tol=0.0, penalty=penalty, loss=loss) cu_sgd.fit(X_cudf, y_cudf) %%time # test the model by predicting its values for the test set y_pred = cu_sgd.predict(X_cudf_test) y_pred = to_nparray(y_pred).ravel() # calculate the Mean Squared Error for the model's predictions error_cu = mean_squared_error(y_test,y_pred) # print the MSE of the sklearn and cuML models to compare them print("SKL MSE(y):") print(error_sk) print("CUML MSE(y):") print(error_cu) ```
github_jupyter
# Unit 4: Neighborhood-based Collaborative Filtering for Rating Prediction In this section we generate personalized recommendations for the first time. We exploit rating similarities among users and items to identify similar users and items that assist in finding the relevant items to recommend for each user. This describes the fundamental idea behind Collaborative Filtering (CF) and using kNN is a neighborhood-based approach towards CF. In a later unit we will also have a look at model-based approaches. This is also the first time we try to predict user ratings for unknown items using rating predictions to take the top-$N$ items with the highest rating predictions and recommend those to the user. ``` from collections import OrderedDict import itertools from typing import Dict, List, Tuple import numpy as np import pandas as pd from recsys_training.data import Dataset from recsys_training.evaluation import get_relevant_items from recsys_training.utils import get_entity_sim ml100k_ratings_filepath = '../../data/raw/ml-100k/u.data' ``` ## Load Data ``` data = Dataset(ml100k_ratings_filepath) data.rating_split(seed=42) user_ratings = data.get_user_ratings() ``` The idea behind this recommender is to use item ratings of the $k$ most similar users (neighbors). We identify those _nearest neighbors_ with a similarity metric which we apply to the ratings both, root user and possible neighbor, have in common. Similarity thereby means having a similar opinion on movies. The steps are as follows: 1. Compute user-user similarities (we use the Pearson Correlation Coefficient here, but feel free to try other similarity metrics) 2. For each user: 1. Get the k nearest neighbors along with their similarities 2. Collect the neighborhood item ratings and ignore those already rated by the root user 3. Item Rating Prediction: Compute the similarity-weighted sum of neighborhood item ratings 4. Recommendations: Get the $N$ items with the highest ratings that have a minimum rating count ### 1. User-User Similarities ``` sim_metric = 'pearson' user_user_sims = {} user_pairs = itertools.combinations(data.users, 2) ``` The following takes a few seconds to finish ... ``` for pair in user_pairs: user_user_sims[pair] = get_entity_sim(pair[0], pair[1], user_ratings, sim_metric) user_user_sims[(1,4)] ``` ## 2. Computing Recommendations ### A. Implement Nearest Neighbors for a given user ![](../Parrot.png) **Task:** It's your turn again. Complete `get_k_nearest_neighbors` to return a sorted list of the $k$ nearest neighbors - identified by their id - for a given user, each along with its similarity. ``` def get_k_nearest_neighbors(user: int, k: int, user_user_sims: dict) -> List[Tuple[int, float]]: neighbors = set(data.users) neighbors.remove(user) nearest_neighbors = dict() for neighbor in neighbors: sim = user_user_sims[tuple(sorted((user, neighbor)))][0] if pd.notnull(sim): nearest_neighbors[neighbor] = sim nearest_neighbors = sorted(nearest_neighbors.items(), key=lambda kv: kv[1], reverse=True) return nearest_neighbors[:k] user_neighbors = get_k_nearest_neighbors(1, k=10, user_user_sims=user_user_sims) user_neighbors ``` ### B. Obtain the Neighborhood Ratings **Task:** Now, use the nearest neighbors and get their ratings, but leave out the items our root user has already rated (known positives). Return a mapping from unknown item to a list of dicts with neighbor similarity and item rating. ``` def get_neighborhood_ratings(user, user_neighbors: List[Tuple[int, float]]) -> Dict[int, List[Dict[str, float]]]: neighborhood_ratings = {} for neighbor, sim in user_neighbors: neighbor_ratings = user_ratings[neighbor].copy() # collect neighbor ratings and items for item, rating in neighbor_ratings.items(): add_item = {'sim': sim, 'rating': rating} if item not in neighborhood_ratings.keys(): neighborhood_ratings[item] = [add_item] else: neighborhood_ratings[item].append(add_item) # remove known items known_items = list(user_ratings[user].keys()) for known_item in known_items: neighborhood_ratings.pop(known_item, None) return neighborhood_ratings neighborhood_ratings = get_neighborhood_ratings(1, user_neighbors) list(neighborhood_ratings.items())[:10] ``` ### C. Compute Rating Predictions from Neighborhood Ratings ![](../Parrot.png) **Task:** In this step, we estimate ratings for the seed user based on the neighborhood ratings. We implement a similarity weighted average of neighbor ratings for that. Return a mapping from item to its prediction and the count of neighbor ratings received. ``` def compute_rating_pred(neighborhood_ratings: dict) -> dict: rating_preds = dict() for item, ratings in neighborhood_ratings.items(): if len(ratings) > 0: sims = np.array([rating['sim'] for rating in ratings]) ratings = np.array([rating['rating'] for rating in ratings]) pred_rating = (sims * ratings).sum() / sims.sum() count = len(sims) rating_preds[item] = {'pred': pred_rating, 'count': count} else: rating_preds[item] = {'pred': None, 'count': 0} return rating_preds rating_preds = compute_rating_pred(neighborhood_ratings) list(rating_preds.items())[:20] ``` ### D. Compute the Top-$N$ Recommendation Items ![](../Parrot.png) **Task:** The last step takes the rating predictions and returns the $N$ highest predictions which have a minimum rating count, i.e. the number of neighbors from the neighborhood that rated this item. ``` def compute_top_n(rating_preds: dict, min_count: int, N: int) -> OrderedDict: rating_preds = {key: val for (key, val) in rating_preds.items() if val['count'] >= min_count} # assuming more ratings mean higher confidence in the prediction sorted_rating_preds = sorted(rating_preds.items(), key=lambda kv: (kv[1]['pred'], kv[1]['count']), reverse=True) return OrderedDict(sorted_rating_preds[:N]) top_n_recs = compute_top_n(rating_preds, min_count=2, N=10) top_n_recs ``` ### Combine all steps in `get_recommendations` ``` def get_recommendations(user: int, user_user_sims: dict, k: int, C: int, N: int): user_neighbors = get_k_nearest_neighbors(user, k=k, user_user_sims=user_user_sims) neighborhood_ratings = get_neighborhood_ratings(user, user_neighbors) rating_preds = compute_rating_pred(neighborhood_ratings) top_n_recs = compute_top_n(rating_preds, min_count=C, N=N) return top_n_recs get_recommendations(1, user_user_sims, 10, 2, 10) ``` ## Evaluation Let's check the performance of the neighborhood- and user-based recommender for a neighborhood size of $k = 60$, minimum rating count of $C = 10$ and stay with $N = 10$ recommendations. ``` k = 60 C = 10 N = 10 relevant_items = get_relevant_items(data.test_ratings) users = relevant_items.keys() prec_at_N = dict.fromkeys(data.users) for user in users: recommendations = get_recommendations(user, user_user_sims, k, C, N) recommendations = list(recommendations.keys()) hits = np.intersect1d(recommendations, relevant_items[user]) prec_at_N[user] = len(hits)/N np.mean([val for val in prec_at_N.values() if val is not None]) ```
github_jupyter
``` import pandas as pd %matplotlib inline players = pd.read_csv('players.csv') matches = pd.read_csv('match.csv') heroes = pd.read_csv('hero_names.csv') items = pd.read_csv('item_ids.csv') items.info() hero_lookup = dict(zip(heroes['hero_id'], heroes['localized_name'])) hero_lookup[0] = 'Unknown' players['hero'] = players['hero_id'].apply(lambda _id: hero_lookup[_id]) item_lookup = dict(zip(items['item_id'], items['item_name'])) item_lookup[0] = 'Unknown' def find_item(_id): return item_lookup.get(_id, 'u_' + str(_id)) players['item_0'] = players['item_0'].apply(find_item) players['item_1'] = players['item_1'].apply(find_item) players['item_2'] = players['item_2'].apply(find_item) players['item_3'] = players['item_3'].apply(find_item) players['item_4'] = players['item_4'].apply(find_item) players['item_5'] = players['item_5'].apply(find_item) player_heroes = pd.get_dummies(players['hero']) player_heroes item0 = pd.get_dummies(players['item_0'].fillna(0)) item1 = pd.get_dummies(players['item_1'].fillna(0)) item2 = pd.get_dummies(players['item_2'].fillna(0)) item3 = pd.get_dummies(players['item_3'].fillna(0)) item4 = pd.get_dummies(players['item_4'].fillna(0)) item5 = pd.get_dummies(players['item_5'].fillna(0)) player_items = item0 \ .add(item1, fill_value=0) \ .add(item2, fill_value=0) \ .add(item3, fill_value=0) \ .add(item4, fill_value=0) \ .add(item5, fill_value=0) radiant_cols = list(map(lambda s: 'radiant_' + s, player_heroes.columns.values)) dire_cols = list(map(lambda s: 'dire_' + s, player_heroes.columns.values)) radiant_items_cols = list(map(lambda s: 'radiant_' + str(s), player_items.columns.values)) dire_items_cols = list(map(lambda s: 'dire_' + str(s), player_items.columns.values)) from os.path import isfile X = None radiant_heroes = [] dire_heroes = [] radiant_items = [] dire_items = [] for _id, _index in players.groupby('match_id').groups.items(): radiant_heroes.append(player_heroes.iloc[_index][:5].sum().values) dire_heroes.append(player_heroes.iloc[_index][5:].sum().values) radiant_items.append(player_items.iloc[_index][:5].sum().values) dire_items.append(player_items.iloc[_index][5:].sum().values) radiant_heroes = pd.DataFrame(radiant_heroes, columns=radiant_cols) dire_heroes = pd.DataFrame(dire_heroes, columns=dire_cols) radiant_items = pd.DataFrame(radiant_items, columns=radiant_items_cols) dire_items = pd.DataFrame(dire_items, columns=dire_items_cols) X = pd.concat([radiant_heroes, radiant_items, dire_heroes, dire_items], axis=1) # X.to_csv('mapped_match_hero_item.csv', index=False) X.head() y = matches['radiant_win'].apply(lambda win: 1 if win else 0) classes = ['Dire Win', 'Radiant Win'] _ = pd.Series(y).apply(lambda i: classes[i]).value_counts() from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=.1) print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) import tensorflow as tf from tensorflow.keras.models import Sequential import numpy as np from tensorflow.keras.layers import Dense, Dropout from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras import regularizers config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True session = tf.compat.v1.Session(config=config) model = tf.keras.Sequential([ Dense(256, activation='relu', input_shape=(612,)), Dense(256, activation='relu'), Dense(128, activation='relu'), Dropout(0.7), Dense(256, activation='relu'), Dense(128, activation='relu'), Dropout(0.5), Dense(1, activation='sigmoid'), ]) optimizer = Adam(lr=0.001) model.compile(optimizer, loss='binary_crossentropy', metrics=['accuracy']) model.summary() es = EarlyStopping(monitor='val_loss', mode='min', verbose=1) history = model.fit(X_train, y_train, batch_size=200, verbose=2, epochs=100, validation_data=(X_test, y_test), callbacks=[es]) import matplotlib.pyplot as plt pd.DataFrame(history.history).plot(figsize=(8, 5)) plt.grid(True) plt.gca().set_ylim(0, 1) plt.show() def predict_match(ind, dset=X_train): tst = np.array([dset.iloc[ind]]) chance = model.predict(tst) xt = dset.iloc[ind] rs = xt.to_numpy().nonzero() heroes = xt.iloc[rs] return list(heroes.index), chance np.set_printoptions(formatter={'float': '{: 0.3f}'.format}) for i in range(10,20): h, c = predict_match(i, X_test) print(h) print(c) print('\n') ops = [predict_match(10, X_test), predict_match(12, X_test), predict_match(19, X_test)] rheroes = ops[0][0][:5] ritems = ops[0][0][5:27] dheroes = ops[0][0][27:32] ditems = ops[0][0][32:] win = ops[0][1] rheroes = [x.split('_')[1] for x in rheroes] ritems = [x.split('_')[1] for x in ritems] dheroes = [x.split('_')[1] for x in dheroes] ditems = [x.split('_')[1] for x in ditems] print(rheroes, ritems) print() print(dheroes, ditems) print() print(win) y_test.iloc[10] ```
github_jupyter
``` BRANCH = 'v1.0.2' """ You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab. Instructions for setting up Colab are as follows: 1. Open a new Python 3 notebook. 2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL) 3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 4. Run this cell to set up dependencies. """ # If you're using Google Colab and not running locally, run this cell # install NeMo BRANCH = 'v1.0.2' !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp] import os import wget from nemo.collections import nlp as nemo_nlp from nemo.collections import common as nemo_common from omegaconf import OmegaConf ``` # Tokenizers Background For Natural Language Processing, tokenization is an essential part of data preprocessing. It is the process of splitting a string into a list of tokens. One can think of token as parts like a word is a token in a sentence. Depending on the application, different tokenizers are more suitable than others. For example, a WordTokenizer that splits the string on any whitespace, would tokenize the following string "My first program, Hello World." -> ["My", "first", "program,", "Hello", "World."] To turn the tokens into numerical model input, the standard method is to use a vocabulary and one-hot vectors for [word embeddings](https://en.wikipedia.org/wiki/Word_embedding). If a token appears in the vocabulary, its index is returned, if not the index of the unknown token is returned to mitigate out-of-vocabulary (OOV). # Tokenizers in NeMo In NeMo, we support the most used tokenization algorithms. We offer a wrapper around [Hugging Faces's AutoTokenizer](https://huggingface.co/transformers/model_doc/auto.html#autotokenizer) - a factory class that gives access to all Hugging Face tokenizers. This includes particularly all BERT-like model tokenizers, such as BertTokenizer, AlbertTokenizer, RobertaTokenizer, GPT2Tokenizer. Apart from that, we also support other tokenizers such as WordTokenizer, CharTokenizer, and [Google's SentencePieceTokenizer](https://github.com/google/sentencepiece). We make sure that all tokenizers are compatible with BERT-like models, e.g. BERT, Roberta, Albert, and Megatron. For that, we provide a high-level user API `get_tokenizer()`, which allows the user to instantiate a tokenizer model with only four input arguments: * `tokenizer_name: str` * `tokenizer_model: Optional[str] = None` * `vocab_file: Optional[str] = None` * `special_tokens: Optional[Dict[str, str]] = None` Hugging Face and Megatron tokenizers (which uses Hugging Face underneath) can be automatically instantiated by only `tokenizer_name`, which downloads the corresponding `vocab_file` from the internet. For SentencePieceTokenizer, WordTokenizer, and CharTokenizers `tokenizer_model` or/and `vocab_file` can be generated offline in advance using [`scripts/tokenizers/process_asr_text_tokenizer.py`](https://github.com/NVIDIA/NeMo/blob/main/scripts/process_asr_text_tokenizer.py) The tokenizers in NeMo are designed to be used interchangeably, especially when used in combination with a BERT-based model. Let's take a look at the list of available tokenizers: ``` nemo_nlp.modules.get_tokenizer_list() ``` # Hugging Face AutoTokenizer ``` # instantiate tokenizer wrapper using pretrained model name only tokenizer1 = nemo_nlp.modules.get_tokenizer(tokenizer_name="bert-base-cased") # the wrapper has a reference to the original HuggingFace tokenizer print(tokenizer1.tokenizer) # check vocabulary (this can be very long) print(tokenizer1.tokenizer.vocab) # show all special tokens if it has any print(tokenizer1.tokenizer.all_special_tokens) # instantiate tokenizer using custom vocabulary vocab_file = "myvocab.txt" vocab = ["he", "llo", "world"] with open(vocab_file, 'w') as vocab_fp: vocab_fp.write("\n".join(vocab)) tokenizer2 = nemo_nlp.modules.get_tokenizer(tokenizer_name="bert-base-cased", vocab_file=vocab_file) # Since we did not overwrite special tokens they should be the same as before print(tokenizer1.tokenizer.all_special_tokens == tokenizer2.tokenizer.all_special_tokens ) ``` ## Adding Special tokens We do not recommend overwriting special tokens for Hugging Face pretrained models, since these are the commonly used default values. If a user still wants to overwrite the special tokens, specify some of the following keys: ``` special_tokens_dict = {"unk_token": "<UNK>", "sep_token": "<SEP>", "pad_token": "<PAD>", "bos_token": "<CLS>", "mask_token": "<MASK>", "eos_token": "<SEP>", "cls_token": "<CLS>"} tokenizer3 = nemo_nlp.modules.get_tokenizer(tokenizer_name="bert-base-cased", vocab_file=vocab_file, special_tokens=special_tokens_dict) # print newly set special tokens print(tokenizer3.tokenizer.all_special_tokens) # the special tokens should be different from the previous special tokens print(tokenizer3.tokenizer.all_special_tokens != tokenizer1.tokenizer.all_special_tokens ) ``` Notice, that if you specify tokens that were not previously included in the tokenizer's vocabulary file, new tokens will be added to the vocabulary file. You will see a message like this: `['<MASK>', '<CLS>', '<SEP>', '<PAD>', '<SEP>', '<CLS>', '<UNK>'] will be added to the vocabulary. Please resize your model accordingly` ``` # A safer way to add special tokens is the following: # define your model pretrained_model_name = 'bert-base-uncased' model = nemo_nlp.modules.get_lm_model(pretrained_model_name=pretrained_model_name) # define pretrained tokenizer tokenizer_default = nemo_nlp.modules.get_tokenizer(tokenizer_name=pretrained_model_name) tokenizer_default.text_to_tokens('<MY_NEW_TOKEN> and another word') ``` As you can see in the above, the tokenizer splits `<MY_NEW_TOKEN>` into subtokens. Let's add this to the special tokens to make sure the tokenizer does not split this into subtokens. ``` special_tokens = {'bos_token': '<BOS>', 'cls_token': '<CSL>', 'additional_special_tokens': ['<MY_NEW_TOKEN>', '<ANOTHER_TOKEN>']} tokenizer_default.add_special_tokens(special_tokens_dict=special_tokens) # resize your model so that the embeddings for newly added tokens are updated during training/finetuning model.resize_token_embeddings(tokenizer_default.vocab_size) # let's make sure the tokenizer doesn't split our special tokens into subtokens tokenizer_default.text_to_tokens('<MY_NEW_TOKEN> and another word') ``` Now, the model doesn't break down our special token into the subtokens. ## Megatron model tokenizer ``` # Megatron tokenizers are instances of the Hugging Face BertTokenizer. tokenizer4 = nemo_nlp.modules.get_tokenizer(tokenizer_name="megatron-bert-cased") ``` # Train custom tokenizer model and vocabulary from text file We use the [`scripts/tokenizers/process_asr_text_tokenizer.py`](https://github.com/NVIDIA/NeMo/blob/main/scripts/process_asr_text_tokenizer.py) script to create a custom tokenizer model with its own vocabulary from an input file ``` # download tokenizer script script_file = "process_asr_text_tokenizer.py" if not os.path.exists(script_file): print('Downloading script file...') wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/scripts/tokenizers/process_asr_text_tokenizer.py') else: print ('Script already exists') # Let's prepare some small text data for the tokenizer data_text = "NeMo is a toolkit for creating Conversational AI applications. \ NeMo toolkit makes it possible for researchers to easily compose complex neural network architectures \ for conversational AI using reusable components - Neural Modules. \ Neural Modules are conceptual blocks of neural networks that take typed inputs and produce typed outputs. \ Such modules typically represent data layers, encoders, decoders, language models, loss functions, or methods of combining activations. \ The toolkit comes with extendable collections of pre-built modules and ready-to-use models for automatic speech recognition (ASR), \ natural language processing (NLP) and text synthesis (TTS). \ Built for speed, NeMo can utilize NVIDIA's Tensor Cores and scale out training to multiple GPUs and multiple nodes." # Write the text data into a file data_file="data.txt" with open(data_file, 'w') as data_fp: data_fp.write(data_text) # Some additional parameters for the tokenizer # To tokenize at unigram, char or word boundary instead of using bpe, change --spe_type accordingly. # More details see https://github.com/google/sentencepiece#train-sentencepiece-model tokenizer_spe_type = "bpe" # <-- Can be `bpe`, `unigram`, `word` or `char` vocab_size = 32 ! python process_asr_text_tokenizer.py --data_file=$data_file --data_root=. --vocab_size=$vocab_size --tokenizer=spe --spe_type=$tokenizer_spe_type # See created tokenizer model and vocabulary spe_model_dir=f"tokenizer_spe_{tokenizer_spe_type}_v{vocab_size}" ! ls $spe_model_dir ``` # Use custom tokenizer for data preprocessing ## Example: SentencePiece for BPE ``` # initialize tokenizer with created tokenizer model, which inherently includes the vocabulary and specify optional special tokens tokenizer_spe = nemo_nlp.modules.get_tokenizer(tokenizer_name="sentencepiece", tokenizer_model=spe_model_dir+"/tokenizer.model", special_tokens=special_tokens_dict) # specified special tokens are added to the vocabuary print(tokenizer_spe.vocab_size) ``` ## Example: WordTokenizer from Vocabulary ``` # If you want to use a simple tokenizer like WordTokenizer without first generating the tokenizer.model first # we provide the alternative class WordTokenizer or CharTokenizer that takes a user vocabulary as input # initialize tokenizer with vocabulary and specify optional special tokens tokenizer_word = nemo_nlp.modules.get_tokenizer(tokenizer_name="word", vocab_file=vocab_file, special_tokens=special_tokens_dict) # specified special tokens are added to the vocabulary print(tokenizer_word.vocab_size) ``` # Using any tokenizer to tokenize text into BERT compatible input ``` text="hello world" # create tokens tokenized = [tokenizer_word.bos_token] + tokenizer_word.text_to_tokens(text) + [tokenizer_word.eos_token] print(tokenized) # turn token into input_ids for a neural model, such as BERTModule print(tokenizer_word.tokens_to_ids(tokenized)) ```
github_jupyter
# The Spinning Effective One-Body Initial Condition Solver ## Author: Tyler Knowles ## This module documents the reduced spinning effective one-body initial condition solver as numerically implemented in LALSuite's SEOBNRv3 gravitational waveform approximant. That is, we follow Section IV A of [Buonanno, Chen, and Damour (2006)](https://arxiv.org/abs/gr-qc/0508067). **Notebook Status:** <font color='red'><b> In progress </b></font> **Validation Notes:** This module is under active development -- do ***not*** use the resulting code for scientific applications. In the future, this module will be validated against the LALSuite [SEOBNRv3/SEOBNRv3_opt code]( https://git.ligo.org/lscsoft/lalsuite.) that was reviewed and approved for LIGO parameter estimation by the LIGO Scientific Collaboration. ## Introduction ### The Physical System of Interest Consider two compact objects (e.g. black holes or neutron stars) with masses $m_{1}$, $m_{2}$ (in solar masses) and spin angular momenta ${\bf S}_{1}$, ${\bf S}_{2}$ in a binary system. The spinning effective one-body ("SEOB") Hamiltonian $H_{\rm real}$ (see [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.69)) describes the dynamics of this system. We seek initial conditions for nonadiabatic evolutions of such a system, and follow [BCD2006](https://arxiv.org/abs/gr-qc/0508067) Section IV A. To compute the initial conditions, we begin with the following system parameters: 1. the mass of each compact object, denoted $m_{1}$, $m_{2}$, 1. the spin vector of each compact object, denoted ${\bf S}_{1}$, ${\bf S}_{2}$, and 1. initial orbital frequency $f$. We choose a right-handed spatial coordinate basis $\left\{ {\bf e}_{0}, {\bf e}_{1}, {\bf e}_{2} \right\}$ so that the initial separation vector ${\bf r}$ between the compact objects lies along the ${\bf e}_{0}$-axis and the orbital plane coincides with the ${\bf e}_{0}$, ${\bf e}_{1}$-plane. Assume that ${\bf S}_{1}$, ${\bf S}_{2}$ are written in this basis. Our goal is to produce initial dynamical variables 1. ${\bf x} = \left( x, y, z \right)$, and 1. ${\bf p} = \left( p_{x}, p_{y}, p_{z} \right)$. We include below the physical parameters necessary to compute the initial conditions. Besides the physical parameters, we also need the [Euler–Mascheroni constant](https://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant) $\gamma$ and the [geomtrized](https://en.wikipedia.org/wiki/Geometrized_unit_system) solar mass $\mathcal{M}_{\odot}$, both hard-coded in LALSuite with the significant digits shown below. (The following links point directly to the appropriate LALSuite documentation: [$\gamma$](https://lscsoft.docs.ligo.org/lalsuite/lal/group___l_a_l_constants__h.html#gac6af32574ff8acaeeafe8bf422281e98) and [$\mathcal{M}_{\odot}$](https://lscsoft.docs.ligo.org/lalsuite/lal/group___l_a_l_constants__h.html#gab83f8c705dda3fd0bb2d5f2470bb9cdd).) Please note that throughout this notebook we adpot the following conventions: 1. $c = G = 1$ where $c$ is the speed of light in a vacuum and $G$ is Newton's gravitational constant, 1. $m_{1} \ge m_{2}$, 1. hatted vectors (e.g. $\hat{\bf L}_{N}$) usually denote scaled or unit vectors, and 1. the initial inclination angle $\iota$ of the system relative to some observer is chosen to be zero. <font color='red'>Please note that in [BCD2006](https://arxiv.org/abs/gr-qc/0508067) the initial conditions are solved for given an initial separation; here we use a given initial frequency instead. The difference is in our approach to solving Equation (4.8). Our approach also differs from that found in LALSuite's SEOBNRv3 code XLALSimIMRSpinEOBInitialConditionsPrec() function (file: LALSimIMRSpinEOBInitialConditionsPrec.c) because we choose our intial coordinate system so that the inclination angle $\iota$ is zero and $m_{1} \ge m_{2}$.</font> ### Citations Throughout this module, we refer to * [Buonanno, Chen, and Damour (2006)](https://arxiv.org/abs/gr-qc/0508067) as BCD2006, * [Barausse and Buonanno (2010)](https://arxiv.org/abs/0912.3517) as BB2010, * [Taracchini, et. al. (2012)](https://arxiv.org/abs/1202.0790) as T2012, * [Damour, et. al. (2009)](https://arxiv.org/abs/0811.2069) as DIN2009, and * [Pan, et. al. (2014)](https://arxiv.org/abs/1307.6232) as P2014. LALSuite line numbers are taken from Git commit bba40f2 (see [LALSuite's GitLab page](https://git.ligo.org/lscsoft/lalsuite)). ``` # Initial condition solver for the spinning effective one-body formulation # See https://arxiv.org/abs/gr-qc/0508067 Section IV A, which we refer to as BCD2006 # Import necessary NumPy, SymPy, and SEOBNR modules import numpy as np import os.path from scipy.optimize import root from scipy.interpolate import interp1d, interp2d from numpy.linalg import norm import SEOBNR.NQC_corrections as nqc import SEOBNR.nqc_interp as nqi # For testing, remove numpy and sympy expression files # For now, do NOT regenerate CSE expressions import shutil, os import sys#TylerK: Add sys to get cmdline_helper from NRPy top directory; remove this line and next when debugged sys.path.append('../') !rm -r SEOBNR_Playground_Pycodes outdir = os.path.join("SEOBNR_Playground_Pycodes/") import cmdline_helper as cmd cmd.mkdir(outdir) with open(outdir+"__init__.py", "w") as file: file.write("") # Input variables: will eventually structure this module as a function with the following input parameters # m1, m2 given in solar masses, f in Hz, and spin in m1 = 23. m2 = 10. f = 20. S1 = np.array([0.01, 0.02, -0.03]) S2 = np.array([0.04, -0.05, 0.06]) # Initial conditions are computed with tortoise = 0; we later convert momentum if necessary # See LALSuite's LALSimIMRSpinEOBInitialConditionsPrec.c Line 775 and the discussion # preceeding Equation (14) of Taracchini, et. al. 2012 (https://arxiv.org/abs/1202.0790) tortoise = 0.0 # The values of the following constants are from LALSuite (see LALSuite documentation at # https://lscsoft.docs.ligo.org/lalsuite/lal/group___l_a_l_constants__h.html). # Euler–Mascheroni constant $\gamma$ EMgamma = 0.577215664901532860606512090082402431 # Geomtrized solar mass $\mathcal{M}_{\odot}$ Msol = 4.925491025543575903411922162094833998e-6 #Convert the spins to dimensionless quantities S1 *= m1*m1 S2 *= m2*m2 ``` <a id='toc'></a> # Table of Contents $$\label{toc}$$ This notebook is organized as follows, matching the "steps" listed in [BCD2006](https://arxiv.org/abs/gr-qc/0508067): 1. [Step 1:](#step1) Initial Coordinate Choice * [Step 1.a:](#massterms) Mass terms * [Step 1.b:](#spinterms) Spin terms * [Step 1.c:](#ln) Normalized Orbital Angular Momenutm $\hat{\bf L}_{N}$ * [Step 1.d:](#rhat) Normalized Position $\hat{\bf r}$ * [Step 1.e:](#vhat) Normalized Velocity $\hat{\bf v}$ * [Note](#step1note) 1. [Step 2:](#step2) Compute ${\bf r}$, ${\bf p}_{r}$, ${\bf p}_{\theta}$, and ${\bf p}_{\phi}$ * [Step 2.a:](#omega) $\omega$ * [Step 2.b:](#velocity) Initial Velocity $v$ * [Step 2.c:](#skerr) ${\bf S}_{\rm Kerr}$ * [Step 2.d:](#rootfinding) Root finding 1. [Step 3:](#step3) Rotate $\hat{\bf L} \to {\bf e}_{z}$ * [Note](#step3not3) * [Step 3.a:](#phat) Normalize ${\bf q}$ and ${\bf p}$ * [Step 3.b:](#lhat) $\hat{\bf L}$ * [Step 3.c:](#rotate) Rotation matrix * [Step 3.d:](#rotaterhat) Rotate $\hat{\bf r}$ * [Step 3.e:](#rotatevhat) Rotate $\hat{\bf v}$ * [Step 3.f:](#rotatelnhat) Rotate $\hat{\bf L}_{N}$ * [Step 3.g:](#rotates1) Rotate ${\bf S}_{1}$ * [Step 3.h:](#rotates2) Rotate ${\bf S}_{2}$ * [Step 3.i:](#rotateshat1) Rotate $\hat{\bf S}_{1}$ * [Step 3.j:](#rotateshat2) Rotate $\hat{\bf S}_{2}$ * [Step 3.k:](#rotateq) Rotate ${\bf q}$ * [Step 3.l:](#rotatep) Rotate ${\bf p}$ 1. [Step 4:](#step4) Compute $\dot{\bf r}$ * [Step 4.a:](#carttosph) Convert from Cartesian to Spherical Coordinates * [Step 4.b:](#secondderiv) Second partial derivatives of $H_{\rm real}$ * [Stop 4.c:](#dedr) $\frac{ \partial E }{ \partial r }$ * [Step 4.e:](#sigmastar) $\boldsymbol{\sigma}^{*}$ * [Step 4.f:](#hreal) $H_{\rm real}$ 1. [Step 5:](#step5) Invert the rotation of Step 3 1. [Output](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file 1. [Validation](#validation): Perform validation checks against LALSuite's SEOBNRv3 code (commit bba40f2) <a id='step1'></a> # Step 1: Initial Coordinate Choice \[Back to [top](#toc)\] $$\label{step1}$$ <a id='massterms'></a> ## Step 1.a: Mass terms \[Back to [top](#toc)\] $$\label{massterms}$$ Following the notation preceeding [BCD2006](https://arxiv.org/abs/gr-qc/0508067) Equation (2.2), we define the total mass of the system $M$ and the symmetric mass ratio $\eta$: \begin{align*} M &= m_{1} + m_{2} \\ \eta &= \frac{ m_{1} m_{2} }{ M^{2} } \end{align*} See LALSimIMRSpinEOBInitialConditionsPrec.c Lines 762--763. ``` # Binary system total mass $M$ M = m1 + m2 # Inverse mass terms used repeatedly when computing initial conditions Minv = np.divide(1,M) Msqinv = Minv*Minv # Symmetric mass ratio $\eta$ eta = m1*m2*Msqinv ``` <a id='spinterms'></a> ## Step 1.b: Spin terms \[Back to [top](#toc)\] $$\label{spinterms}$$ Since we assumed $G = c = 1$, we normalize and make the spin angular momenta dimensionless via: \begin{align*} \hat{\bf S}_{1} &= \frac{ 1 }{ M^{2} } {\bf S}_{1} \\ \hat{\bf S}_{2} &= \frac{ 1 }{ M^{2} } {\bf S}_{2} \end{align*} See LALSimIMRSpinEOBInitialConditionsPrec.c Lines 768--771. ``` # Normalized, dimensionless spin vectors S1hat = Msqinv*S1 S2hat = Msqinv*S2 ``` <a id='ln'></a> ## Step 1.c: Normalized Orbital Angular Momenutm $\hat{\bf L}_{N}$ \[Back to [top](#toc)\] $$\label{ln}$$ Since we assume that the initial separation vector ${\bf r}$ between $m_{1}$ and $m_{2}$ lies along the ${\bf e}_{0}$-axis and the initial orbital plane coincides with the ${\bf e}_{0},{\bf e}_{1}$-plane, the normalized inital orbital angular momentum vector $\hat{\bf L}_{N}$ is given by \begin{equation*} \hat{\bf L}_{N} = \begin{bmatrix} 0 \\ 0 \\ 1 \end{bmatrix} \end{equation*} See LALSimIMRSpinEOBInitialConditionsPrec.c Lines 787--789. ``` # Normalized orbital angular momentum LNhat = np.array([0., 0., 1.]) ``` <a id='rhat'></a> ## Step 1.d: Normalized Position $\hat{\bf r}$ \[Back to [top](#toc)\] $$\label{rhat}$$ We assumed that the initial separation vector ${\bf r}$ lies along the ${\bf e}_{0}$-axis, so the normalized initial separation vector $\hat{\bf r}$ is given by \begin{equation*} \hat{\bf r} = \begin{bmatrix} 1 \\ 0 \\ 0 \end{bmatrix}. \end{equation*} See LALSimIMRSpinEOBInitialConditionsPrec.c Lines 801--803. ``` # Normalized position vector rhat = np.array([1., 0., 0.]) ``` <a id='vhat'></a> ## Step 1.e: Normalized Velocity $\hat{\bf v}$ \[Back to [top](#toc)\] $$\label{vhat}$$ Given normalized orbital angular momentum ($\hat{\bf L}_{N}$) and normalized position ($\hat{\bf r}$), the normalized velocity vector ($\hat{\bf v}$) is given by \begin{equation*} \hat{\bf v} = \frac{ \hat{\bf L}_{N} \times \hat{\bf r} }{ \left\lvert \hat{\bf L}_{N} \times \hat{\bf r} \right\rvert }. \end{equation*} Given $\hat{\bf L}_{N} = \begin{bmatrix} 0 \\ 0 \\ 1 \end{bmatrix}$ and $\hat{\bf r} = \begin{bmatrix} 1 \\ 0 \\ 0 \end{bmatrix}$ it is clear that $\hat{\bf v} = \begin{bmatrix} 0 \\ 1 \\ 0 \end{bmatrix}$. See LALSimIMRSpinEOBInitialConditionsPrec.c Lines 807--811. ``` # Normalized velocity vector vhat = np.array([0., 1., 0.]) ``` <a id='step1note'></a> ## Note \[Back to [top](#toc)\] $$\label{step1note}$$ Since we began assuming $\iota = 0$, we do not need to rotate $\hat{\bf r}$, $\hat{\bf v}$, $\hat{\bf L}_{N}$, ${\bf S}_{1}$, ${\bf S}_{2}$, $\hat{\bf S}_{1}$, or $\hat{\bf S}_{2}$ as is done at LALSimIMRSpinEOBInitialConditionsPrec.c Lines 840-847 (Step 1 of [BCD2006](https://arxiv.org/abs/gr-qc/0508067) Section IV A). In particular, the rotation matrix in this case is the $3\times3$ identity matrix. <a id='step2'></a> # Step 2: Compute ${\bf r}$ and ${\bf p}$ in spherical coordinates \[Back to [top](#toc)\] $$\label{step2}$$ We seek postion vector ${\bf r}$ and ${\bf p}$ assuming a spherical orbit without radiation reaction. <a id='omega'></a> ## Step 2.a: Initial orbital frequency $\omega$ \[Back to [top](#toc)\] $$\label{omega}$$ Noting that the plane of the polarization of the gravitational wave "rotates at twice the orbital rate" (see the "Effects of passing" section of [this Wikipedia article](https://en.wikipedia.org/wiki/Gravitational_wave#Effects_of_passing)), the initial orbital frequency is \begin{equation*} \omega = M \mathcal{M}_{\odot} \pi f. \end{equation*} See LALSimIMRSpinEOBInitialConditionsPrec.c Line 893. ``` # Omega: initial orbital angular frequency omega = M*Msol*np.pi*f ``` <a id='velocity'></a> ## Step 2.b: Initial Velocity $v$ \[Back to [top](#toc)\] $$\label{velocity}$$ <font color='red'>Is there a paper reference for this formula? Zach suggested Kepler's Laws, but a cursory look didn't reveal a convincing link.</font> \begin{equation*} v = \sqrt[3]{ \omega }. \end{equation*} See LALSimIMRSpinEOBInitialConditionsPrec.c Line 894. ``` # v: initial velocity and velocity squared, since we use that quantity often v = np.cbrt(omega) vsq = v*v ``` <a id='skerr'></a> ## Step 2.c: ${\bf S}_{\rm Kerr}$ \[Back to [top](#toc)\] $$\label{skerr}$$ <font color='red'>This cell may be unecessary because we compute a in the derivatives (and spins depned on time so $a$ is time-dependent!).</font> From [BB2010](https://arxiv.org/abs/0912.3517) Equations (5.2), (5.64), and (5.67) we have \begin{equation*} {\bf S}_{\rm Kerr} = {\bf S}_{1} + {\bf S}_{2}. \end{equation*} Taking the square of [BB2010](https://arxiv.org/abs/0912.3517) Equation (4.9), \begin{equation*} a^{2} = \frac{ {\bf S}_{\rm Kerr} \cdot {\bf S}_{\rm Kerr} }{ M^{2} } \end{equation*} so that \begin{equation*} a = \sqrt{ a^{2} }. \end{equation*} ``` # Compute S_Kerr, the spin of the deformed Kerr background # See https://arxiv.org/abs/0912.3517 Equations (5.2), (5.64), and (5.67) SKerr = np.add(S1, S2) # Normalize S_Kerr by total mass SKerr *= Msqinv # Compute a, which is a parameter in metric potentials of a Kerr spacetime # See https://arxiv.org/abs/0912.3517 Equation (4.9) asq = np.dot(SKerr,SKerr) a = np.sqrt(asq) ``` <a id='rootfinding'></a> ## Step 2.d: Root-finding \[Back to [top](#toc)\] $$\label{rootfinding}$$ We will write components of the momentum vector ${\bf p}$ in spherical coordinates with components ${\bf p}_{r}$, ${\bf p}_{\theta}$, and ${\bf p}_{\phi}$. In the special case in which we find ourselves, we have (see [BCD2006](https://arxiv.org/abs/gr-qc/0508067) Equations (4.7) and (4.9)): \begin{align*} {\bf r}^{\theta} &= \frac{ \pi }{ 2 } \\ {\bf r}^{\phi} &= 0 \\ {\bf p}_{r} &= 0. \end{align*} From [BCD2006](https://arxiv.org/abs/gr-qc/0508067) Equations (4.8)--(4.9), we seek to solve \begin{equation*} \begin{bmatrix} \frac{ \partial H }{ \partial {\bf r}^{r} } \\ \frac{ \partial H }{ \partial {\bf p}^{\theta} } \\ \frac{ \partial H }{ \partial {\bf p}^{\phi} } - \omega \end{bmatrix} = \begin{bmatrix} 0 \\ 0 \\ 0 \end{bmatrix}. \end{equation*} As the Hamiltonian is given in Cartesian coordinates, this requires computing $\frac{ \partial H }{ \partial {\bf r}^{0} }$, $\frac{ \partial H }{ \partial {\bf p}^{1} }$, and $\frac{ \partial H }{ \partial {\bf p}^{2} }$ and then converting to spherical coordinates. That is, using the chain rule and recalling $\phi = 0$ and $\theta = \frac{ \pi }{ 2 }$, we find \begin{align*} \frac{\partial H}{\partial {\bf r}^{r}} &= \frac{\partial H}{\partial {\bf r}^{0}} - \frac{\frac{\partial H}{\partial {\bf p}^{1}}{\bf p}^{\phi}}{\left({\bf r}^{r}\right)^{2}} + \frac{\frac{\partial H}{\partial {\bf p}^{2}}{\bf p}^{\theta}}{\left({\bf r}^{r}\right)^{2}} \\ \frac{\partial H}{\partial {\bf p}^{\theta}} &= -\frac{\frac{\partial H}{\partial {\bf p}^{2}}}{{\bf r}^{r}} \\ \frac{\partial H}{\partial {\bf p}^{\phi}} &= \frac{\frac{\partial H}{\partial {\bf p}^{1}}}{{\bf r}^{r}}. \end{align*} <font color='red'>Validation note:</font> Using input parameters \begin{equation*} {\rm -M\ 23\ -m\ 10\ -f\ 20\ -X\ 0.01\ -Y\ 0.02\ -Z\ -0.03\ -x\ 0.04\ -y\ -0.05\ -z\ 0.06} \end{equation*} in LALSuite (see LALSimIMRSpinEOBInitialConditionsPrec.c lines 1075--1077), we find that the output of the root finder is \begin{align*} {\bf q}[0] &= 2.129681018601393{\rm e}+01,\\ {\bf p}[1] &= 2.335391115414913{\rm e}-01,\\ {\bf p}[2] &= 2.780558832447243{\rm e}-06. \end{align*} SEOBNRv3_pert gives \begin{align*} {\bf q}[0] &= 2.129680598646680{\rm e}+01,\\ {\bf p}[1] &= 2.335390056385869{\rm e}-01,\\ {\bf p}[2] &= 2.646198198972773{\rm e}-06. \end{align*} Note that LALSuite therefore preserves 6, 6, and 1 signficant digits, respectively. This Jupyter notebook, using those same inputs, gives roots \begin{align*} {\bf q}[0] &= 21.29680702671223,\\ {\bf p}[1] &= 0.2335390343179406,\\ {\bf p}[2] &= 2.646124517885955{\rm e}-06. \end{align*} The perturbed inputs give \begin{align*} {\bf q}[0] &= 21.2968070267122,\\ {\bf p}[1] &= 0.23353903431794046,\\ {\bf p}[2] &= 2.6461245179212286{\rm e}-06. \end{align*} That is, we perserve 15, 15, and 10 significant digits, respectively. We therefore conclude that the root-finders agree to as many significant digits as possible. ``` # First run the notebook Tutorial-SEOBNR_Documentation, if that hasn't already been done if not os.path.isfile("SEOBNR/Hamiltonian-Hreal_one_line_expressions.txt"): %run Tutorial-SEOBNR_Documentation.ipynb # We need to reverse the expressions from the Hamiltonian notebook and perform CSE with open('SEOBNR/SymPy_Hreal_on_bottom.txt', 'w') as output: for line in reversed(list(open("SEOBNR/Hamiltonian-Hreal_one_line_expressions.txt"))): output.write("%s\n" % line.rstrip()) # Check if a file of partial derivative expressions has already been generated. # If not, generate them! # TYLERK: revert Hamiltonian_and_derivs_playground to Hamiltonian_and_derivs after validation is complete #if not os.path.isfile("SEOBNR_Playground_Pycodes/numpy_expressions.py"): if not os.path.isfile("SEOBNR_Playground_Pycodes/newdHdx.py"): import SEOBNR.Hamiltonian_and_derivs_playground as Had Had.output_H_and_derivs() # TYLERK: For now, skip CSE (it takes a long time and we just want to validate derivatives) #import SEOBNR_Playground_Pycodes.sympy_expression as se #se.sympy_cse() from SEOBNR_Playground_Pycodes.new_dHdx import new_compute_dHdx # For testing from SEOBNR_Playground_Pycodes.new_dHdp2 import new_compute_dHdp2 # For testing from SEOBNR_Playground_Pycodes.new_dHdp3 import new_compute_dHdp3 # For testing from SEOBNR.constant_coeffs import compute_const_coeffs KK, k0, k1, k2, k3, k4, k5, k5l, dSO, dSS = compute_const_coeffs(eta,EMgamma,a) #The coefficients do agree with LALSuite! # Inital root guess root_guess = [np.divide(1,v*v), v*2, 0.001*200] # This is the same initial guess given to GSL in LALSuite, but you won't know it unless you're # careful about their scale factors (which are done and undone and done and undone...) # Define the function of which we want to find the roots def root_func(F): #Recompute Hamiltonian derivatives using latest minimization guess dHdx = new_compute_dHdx(m1, m2, eta, F[0], 0.0, 0.0, 0.0, F[1], F[2], S1hat[0], S1hat[1], S1hat[2], S2hat[0], S2hat[1], S2hat[2], KK, k0, k1, dSO, dSS, 2, EMgamma) dHdp2 = new_compute_dHdp2(m1, m2, eta, F[0], 0.0, 0.0, 0.0, F[1], F[2], S1hat[0], S1hat[1], S1hat[2], S2hat[0], S2hat[1], S2hat[2], KK, k0, k1, dSO, dSS, 1, EMgamma) dHdp3 = new_compute_dHdp3(m1, m2, eta, F[0], 0.0, 0.0, 0.0, F[1], F[2], S1hat[0], S1hat[1], S1hat[2], S2hat[0], S2hat[1], S2hat[2], KK, k0, k1, dSO, dSS, 1, EMgamma) return [dHdx[0]/eta+(-dHdp3[0]*F[2]/eta-dHdp2[0]*F[1]/eta)/F[0], -dHdp3[0]/F[0]/eta, dHdp2[0]/F[0]/eta-omega] # Find the roots of root_func soln = root(root_func, root_guess, args=(), method='hybr', jac=None, tol=None, callback=None) if not(soln.success): print("The root finder failed with error message: %s" % soln.message) sys.exit(1) # Populate separation (q) and momentum (p) vectors with the results of root() q = np.array([soln.x[0], 0., 0.]) p = np.array([0., soln.x[1], soln.x[2]]) ``` <a id='step3'></a> # Step 3: Rotate $\hat{\bf L} \to {\bf e}_{z}$ \[Back to [top](#toc)\] $$\label{step3}$$ <a id='step3note'></a> ## Note \[Back to [top](#toc)\] $$\label{step3note}$$ At this point, LALSimIMRSpinEOBInitialConditionsPrec.c normalizes the Cartesian separation and momentum vectors constructed in [Step 2](#step2). We already have a normalized separation vector $\hat{\bf r}$, so we skip that step. <a id='phat'></a> ## Step 3.a: Normalize ${\bf q}$ and ${\bf p}$ \[Back to [top](#toc)\] $$\label{phat}$$ Next we normalize the separation vector ${\bf q}$ and the position vector ${\bf p}$ we found in [Step 2](#step2): \begin{align*} \hat{\bf q} &= \frac{ {\bf q} }{ \left\lvert {\bf q} \right\rvert} \\ \hat{\bf p} &= \frac{ {\bf p} }{ \left\lvert {\bf p} \right\rvert}. \end{align*} See LALSimIMRSpinEOBInitialConditionsPrec.c Line 1101. ``` # Normalize the separation and momentum vectors qhat = q/norm(q) phat = p/norm(p) ``` <a id='lhat'></a> ## Step 3.b: $\hat{\bf L}$ \[Back to [top](#toc)\] $$\label{lhat}$$ We compute the normalized relativistic angular momentum vector $\hat{\bf L}$: \begin{equation*} \hat{\bf L} = \frac{ \hat{\bf r} \times \hat{\bf p} }{ \left\lvert \hat{\bf r} \times \hat{\bf p} \right\rvert }. \end{equation*} See LALSimIMRSpinEOBInitialConditionsPrec.c Lines 1098--1100. ``` # Normalize the relativistic angular momentum vector Lhat = np.cross(rhat,phat) Lhat /= norm(Lhat) ``` <a id='rotate'></a> ## Step 3.c: Rotation matrix \[Back to [top](#toc)\] $$\label{rotate}$$ The rotation matrix from the $\left\{ \hat{\bf r}, {\bf v}, \hat{\bf L}_{N} \right\}$ frame to the $\left\{ \hat{\bf r}, {\bf p}, \hat{\bf L} \right\}$ frame is given by \begin{equation*} \begin{bmatrix} \hat{\bf r}^{0} & \hat{\bf r}^{1} & \hat{\bf r}^{2} \\ \hat{\bf p}^{0} & \hat{\bf p}^{1} & \hat{\bf p}^{2} \\ \hat{\bf L}^{0} & \hat{\bf L}^{1} & \hat{\bf L}^{2}\end{bmatrix}. \end{equation*} See LALSimIMRSpinEOBInitialConditionsPrec.c Line 1107. ``` # Rotation matrix rotate = np.array([rhat, phat, Lhat]) ``` <a id='rotaterhat'></a> ## Step 3.d: Rotate $\hat{\bf r}$ \[Back to [top](#toc)\] $$\label{rotatesrhat}$$ We now rotate $\hat{\bf r}$. We'll use primes to denote the rotated vector. \begin{equation*} \hat{\bf r}^{\prime} = \begin{bmatrix} \hat{\bf r}^{0} & \hat{\bf r}^{1} & \hat{\bf r}^{2} \\ \hat{\bf p}^{0} & \hat{\bf p}^{1} & \hat{\bf p}^{2} \\ \hat{\bf L}^{0} & \hat{\bf L}^{1} & \hat{\bf L}^{2}\end{bmatrix} \begin{bmatrix} \hat{\bf r}^{0} \\ \hat{\bf r}^{1} \\ \hat{\bf r}^{2} \end{bmatrix} \end{equation*} See LALSimIMRSpinEOBInitialConditionsPrec.c Line 1112. ``` # Rotate the normalized separation vector rhatprm = np.dot(rotate,rhat) ``` <a id='rotatevhat'></a> ## Step 3.e: Rotate $\hat{\bf v}$ \[Back to [top](#toc)\] $$\label{rotatevhat}$$ We rotate $\hat{\bf v}$. We'll use primes to denote the rotated vector. \begin{equation*} \hat{\bf v}^{\prime} = \begin{bmatrix} \hat{\bf r}^{0} & \hat{\bf r}^{1} & \hat{\bf r}^{2} \\ \hat{\bf p}^{0} & \hat{\bf p}^{1} & \hat{\bf p}^{2} \\ \hat{\bf L}^{0} & \hat{\bf L}^{1} & \hat{\bf L}^{2}\end{bmatrix} \begin{bmatrix} \hat{\bf v}^{0} \\ \hat{\bf v}^{1} \\ \hat{\bf v}^{2} \end{bmatrix} \end{equation*} See LALSimIMRSpinEOBInitialConditionsPrec.c Line 1113. ``` # Rotate the normalized velocity vector vhatprm = np.dot(rotate, vhat) ``` <a id='rotatelnhat'></a> ## Step 3.f: Rotate $\hat{\bf L}_{N}$ \[Back to [top](#toc)\] $$\label{rotatelnhat}$$ We rotate $\hat{\bf L}_{N}$. We'll use primes to denote the rotated vector. \begin{equation*} \hat{\bf L}_{N}^{\prime} = \begin{bmatrix} \hat{\bf r}^{0} & \hat{\bf r}^{1} & \hat{\bf r}^{2} \\ \hat{\bf p}^{0} & \hat{\bf p}^{1} & \hat{\bf p}^{2} \\ \hat{\bf L}^{0} & \hat{\bf L}^{1} & \hat{\bf L}^{2}\end{bmatrix} \begin{bmatrix} \hat{\bf L}_{N}^{0} \\ \hat{\bf L}_{N}^{1} \\ \hat{\bf L}_{N}^{2} \end{bmatrix} \end{equation*} See LALSimIMRSpinEOBInitialConditionsPrec.c Line 1114. ``` # Rotate the normalized angular momentum vector LNhatprm = np.dot(rotate, LNhat) ``` <a id='rotates1'></a> ## Step 3.g: Rotate ${\bf S}_{1}$ \[Back to [top](#toc)\] $$\label{rotates1}$$ We rotate ${\bf S}_{1}$. We'll use primes to denote the rotated vector. \begin{equation*} {\bf S}_{1}^{\prime} = \begin{bmatrix} \hat{\bf r}^{0} & \hat{\bf r}^{1} & \hat{\bf r}^{2} \\ \hat{\bf p}^{0} & \hat{\bf p}^{1} & \hat{\bf p}^{2} \\ \hat{\bf L}^{0} & \hat{\bf L}^{1} & \hat{\bf L}^{2}\end{bmatrix} \begin{bmatrix} {\bf S}_{1}^{0} \\ {\bf S}_{1}^{1} \\ {\bf S}_{1}^{2} \end{bmatrix} \end{equation*} See LALSimIMRSpinEOBInitialConditionsPrec.c Line 1115. ``` # Rotate the S1 vector S1prm = np.dot(rotate, S1) ``` <a id='rotates2'></a> ## Step 3.h: Rotate ${\bf S}_{2}$ \[Back to [top](#toc)\] $$\label{rotates2}$$ We rotate ${\bf S}_{2}$. We'll use primes to denote the rotated vector. \begin{equation*} {\bf S}_{2}^{\prime} = \begin{bmatrix} \hat{\bf r}^{0} & \hat{\bf r}^{1} & \hat{\bf r}^{2} \\ \hat{\bf p}^{0} & \hat{\bf p}^{1} & \hat{\bf p}^{2} \\ \hat{\bf L}^{0} & \hat{\bf L}^{1} & \hat{\bf L}^{2}\end{bmatrix} \begin{bmatrix} {\bf S}_{2}^{0} \\ {\bf S}_{2}^{1} \\ {\bf S}_{2}^{z} \end{bmatrix} \end{equation*} See LALSimIMRSpinEOBInitialConditionsPrec.c Line 1116. ``` # Rotate the S2 vector S2prm = np.dot(rotate, S2) ``` <a id='rotates1hat'></a> ## Step 3.i: Rotate $\hat{\bf S}_{1}$ \[Back to [top](#toc)\] $$\label{rotates1hat}$$ We rotate $\hat{\bf S}_{1}$. We'll use primes to denote the rotated vector. \begin{equation*} \hat{\bf S}_{1}^{\prime} = \begin{bmatrix} \hat{\bf r}^{0} & \hat{\bf r}^{1} & \hat{\bf r}^{2} \\ \hat{\bf p}^{0} & \hat{\bf p}^{1} & \hat{\bf p}^{2} \\ \hat{\bf L}^{0} & \hat{\bf L}^{1} & \hat{\bf L}^{2}\end{bmatrix} \begin{bmatrix} \hat{\bf S}_{1}^{0} \\ \hat{\bf S}_{1}^{1} \\ \hat{\bf S}_{1}^{1} \end{bmatrix} \end{equation*} See LALSimIMRSpinEOBInitialConditionsPrec.c Line 1117. ``` # Rotate the normalized S1 vector S1hatprm = np.dot(rotate, S1hat) ``` <a id='rotates2hat'></a> ## Step 3.j: Rotate $\hat{\bf S}_{2}$ \[Back to [top](#toc)\] $$\label{rotates2hat\hat}$$ We rotate $\hat{\bf S}_{2}$. We'll use primes to denote the rotated vector. \begin{equation*} \hat{\bf S}_{2}^{\prime} = \begin{bmatrix} \hat{\bf r}^{0} & \hat{\bf r}^{1} & \hat{\bf r}^{2} \\ \hat{\bf p}^{0} & \hat{\bf p}^{1} & \hat{\bf p}^{2} \\ \hat{\bf L}^{0} & \hat{\bf L}^{1} & \hat{\bf L}^{2}\end{bmatrix} \begin{bmatrix} \hat{\bf S}_{2}^{0} \\ \hat{\bf S}_{2}^{1} \\ \hat{\bf S}_{2}^{2} \end{bmatrix} \end{equation*} See LALSimIMRSpinEOBInitialConditionsPrec.c Line 1118. ``` # Rotate the normalized S2 vector S2hatprm = np.dot(rotate, S2hat) ``` <a id='rotateq'></a> ## Step 3.k: Rotate ${\bf q}$ \[Back to [top](#toc)\] $$\label{rotateq}$$ We rotate ${\bf q}$. We'll use primes to denote the rotated vector. \begin{equation*} {\bf r}^{\prime} = \begin{bmatrix} \hat{\bf r}^{0} & \hat{\bf r}^{1} & \hat{\bf r}^{2} \\ \hat{\bf p}^{0} & \hat{\bf p}^{1} & \hat{\bf p}^{2} \\ \hat{\bf L}^{0} & \hat{\bf L}^{1} & \hat{\bf L}^{2}\end{bmatrix} \begin{bmatrix} {\bf q}^{0} \\ {\bf q}^{1} \\ {\bf q}^{2} \end{bmatrix} \end{equation*} See LALSimIMRSpinEOBInitialConditionsPrec.c Line 1119. ``` # Rotate the separation vector rprm = np.dot(rotate,q) ``` <a id='rotatep'></a> ## Step 3.l: Rotate ${\bf p}$ \[Back to [top](#toc)\] $$\label{rotatep}$$ We rotate ${\bf p}$. We'll use primes to denote the rotated vector. \begin{equation*} {\bf p}^{\prime} = \begin{bmatrix} \hat{\bf r}^{0} & \hat{\bf r}^{1} & \hat{\bf r}^{2} \\ \hat{\bf p}^{0} & \hat{\bf p}^{1} & \hat{\bf p}^{2} \\ \hat{\bf L}^{0} & \hat{\bf L}^{1} & \hat{\bf L}^{2}\end{bmatrix} \begin{bmatrix} {\bf p}^{0} \\ {\bf p}^{1} \\ {\bf p}^{2} \end{bmatrix} \end{equation*} See LALSimIMRSpinEOBInitialConditionsPrec.c Line 1120. ``` # Rotate the momentum vector pprm = np.dot(rotate, p) ``` <a id='step4'></a> # Step 4: Compute $\dot{\bf r}$ \[Back to [top](#toc)\] $$\label{step4}$$ <a id='carttosph'></a> ## Step 4.a: Convert from Cartesian to Spherical Coordinates \[Back to [top](#toc)\] $$\label{carttosph}$$ We convert position and momentum into spherical coordinates. In the special case where $\theta = \frac{ \pi }{ 2 }$ and $\phi = 0$, the spherical position vector ${\bf r} = \left( {\bf r}^{r}, {\bf r}^{\theta}, {\bf r}^{\phi} \right)$ is given by \begin{align*} {\bf r}^{r} &= {\bf r}^{0} \\ {\bf r}^{\theta} &= \frac{ \pi }{ 2 } \\ {\bf r}^{\phi} &= 0 \end{align*} and the spherical momentum vector ${\bf p} = \left( {\bf p}^{r}, {\bf p}^{\theta}, {\bf p}^{\phi} \right)$ is given by \begin{align*} {\bf p}^{r} &= {\bf p}^{0} \\ {\bf p}^{\theta} &= - {\bf r}^{0}{\bf p}^{2} \\ {\bf p}^{\phi} &= {\bf r}^{0}{\bf p}^{1} \\ \end{align*} LALSuite calls a Cartesian to spherical routine at LALSimIMRSpinEOBInitialConditionsPrec.c Line 1139, and the function itself is defined on Lines 243--285. ``` # Convert the separation vector from Cartesian to spherical coordinates r = np.array([rprm[0], np.divide(np.pi,2.), 0.]) psph = np.array([pprm[0], -rprm[0]*pprm[2], rprm[0]*pprm[1]]) ``` <a id='secondderiv'></a> ## Step 4.b: Second partial derivatives of $H_{\rm real}$ \[Back to [top](#toc)\] $$\label{seconderiv}$$ In order to include effects of radiation reaction, we need to compute [BCD2006](https://arxiv.org/abs/gr-qc/0508067) Equation (4.14). This requires that we compute $\frac{ \partial H }{ \partial {\bf p}^{\phi} }$, $\frac{ \partial^{2} H_{\rm real} }{ \partial r^{2} }$, and $\frac{ \partial^{2} H_{\rm real} }{ \partial r \partial {\bf p}^{\phi} }$. Recall that we are in the special case ${\bf r}^{\theta} = \frac{\pi}{2}$, ${\bf r}^{\phi} = 0$, so that conversion from Cartesian to spherical coordinates is given by the relations in [Step 4.a](#carttosph). Then \begin{align*} \partial {\bf r}^{r} &= \partial {\bf r}^{0} \\ \partial {\bf p}^{r} &= \partial {\bf p}^{0} \\ \frac{ \partial {\bf p}^{z} }{ \partial {\bf r}^{r} } &= \frac{ {\bf p}^{\theta} }{ \left( {\bf r}^{r} \right)^{2} } \\ \frac{ \partial {\bf p}^{z} }{ \partial {\bf p}^{\theta} } &= -\frac{ 1 }{ {\bf r}^{r} } \\ \frac{ \partial {\bf p}^{y} }{ \partial {\bf r}^{r} } &= -\frac{ {\bf p}^{\phi} }{ \left( {\bf r}^{r} \right)^{2} } \\ \frac{ \partial {\bf p}^{y} }{ \partial {\bf p}^{\phi} } &= \frac{ 1 }{ {\bf r}^{r} }. \end{align*} It follows that \begin{equation*} \frac{ \partial H }{ \partial {\bf r}^{r} } = \frac{ \partial H }{ \partial {\bf r}^{x} } \frac{ \partial {\bf r}^{0} }{ \partial {\bf r}^{r} } + \frac{ \partial H }{ \partial {\bf p}^{1} } \frac{ \partial {\bf p}^{1} }{ \partial {\bf r}^{r} } + \frac{ \partial H }{ \partial {\bf p}^{2} } \frac{ \partial {\bf p}^{2} }{ \partial {\bf r}^{r} } = \frac{ \partial H }{ \partial {\bf r}^{0} } - \frac{ \partial H }{ \partial {\bf p}^{1} } \frac{ {\bf p}^{\phi} }{ \left( {\bf r}^{r} \right)^{2} } + \frac{ \partial H }{ \partial {\bf p}^{2} } \frac{ {\bf p}^{\theta} }{ \left( {\bf r}^{r} \right)^{2} }, \end{equation*} from which it follows that \begin{align*} \frac{\partial^2 H}{\partial \left( {\bf r}^r \right)^2 } &= \frac{\partial^2 H}{\partial \left( {\bf r}^x \right)^2 } + \frac{\partial^2 H}{\partial \left( {\bf p}^z \right)^2 } \cdot \frac{ \left( {\bf p^\theta } \right)^2 }{ \left( {\bf r}^r \right)^4 } - \frac{\partial^2 H}{\partial {\bf p}^y \partial {\bf p}^z } \cdot \frac{ 2 {\bf p}^\theta * {\bf p}^\phi }{ {\bf r}^4 } + \frac{\partial^2 H}{\partial \left( {\bf p}^y \right)^2 } \cdot \frac{ \left( {\bf p^\phi } \right)^2 }{ \left( {\bf r}^r \right)^4 } + \frac{\partial^2 H}{\partial {\bf r}^x \partial {\bf p}^y } \cdot \frac{ 2 {\bf p}^\phi }{ {\bf r}^2 } \\ \frac{\partial^2 H}{\partial {\bf r}^r \partial {\bf p}^\phi } &= \frac{\partial^2 H}{\partial {\bf p}^y \partial {\bf p}^z } \cdot \frac{ {\bf p}^\theta }{ \left( {\bf r}^r \right)^3 } - \frac{\partial^2 H}{\partial \left( {\bf p}^y \right)^2 } \cdot \frac{ {\bf p}^\phi }{ \left( {\bf r}^r \right)^3 } + \frac{\partial^2 H}{\partial {\bf r}^x \partial {\bf p}^y } \cdot \frac{ 1 }{ {\bf r}^r } \end{align*} <font color='red'>Note: be sure that, following this, we use normalized spins.</font> ``` ## Import second derivatives of H from another function/routine #if not os.path.isfile("SEOBNR_Playground_Pycodes/d2Hdx2.py"): # print("I'M BEING BAD") ## import SEOBNR.Hamiltonian_second_derivs_playground as Hsd ## Hsd.output_H_sec_derivs() # import SEOBNR.Hamiltonian_second_derivs_x_playground as Hsd # Hsd.output_H_sec_derivs() #from SEOBNR_Playground_Pycodes.d2Hdx2 import compute_d2Hdx2 # For testing ##d2Hdx2 = compute_d2Hdx2(m1, m2, eta, 2.129681018601393e+01, 0.0, 0.0, ## 0.0, 2.335391115580442e-01, -4.235164736271502e-22, ## S1[0], S1[1], S1[2], ## S2[0], S2[1], S2[2], KK, k0, k1, dSO, dSS, 2, EMgamma) ##d2Hdx2 = compute_d2Hdx2(m1, m2, eta, 2.129681018601393e+01, 0.0, 0.0, ## 0.0, 2.335391115580442e-01, -4.235164736271502e-22, ## S1hat[0], S1hat[1], S1hat[2], ## S2hat[0], S2hat[1], S2hat[2], KK, k0, k1, dSO, dSS, 0, EMgamma) #d2Hdx2 = compute_d2Hdx2(m1, m2, eta, 2.129680018601393e+01, 0.000000000000000e+00, 0.000000000000000e+00, # 0.000000000000000e+00, 2.335392212172933e-01, -4.235166724910499e-22, # 4.857667584940312e-03, 9.715161660389764e-03, -1.457311842632286e-02, # 3.673094582185491e-03, -4.591302628615413e-03, 5.509696538546906e-03, KK, k0, k1, dSO, dSS, 1, EMgamma) #dHdx = new_compute_dHdx(m1, m2, eta, 2.129680018601393e+01, 0.000000000000000e+00, 0.000000000000000e+00, # 0.000000000000000e+00, 2.335392212172933e-01, -4.235166724910499e-22, # 4.857667584940312e-03, 9.715161660389764e-03, -1.457311842632286e-02, # 3.673094582185491e-03, -4.591302628615413e-03, 5.509696538546906e-03, KK, k0, k1, dSO, dSS, 1, EMgamma) #dHdpy = new_compute_dHdp2(m1, m2, eta, 2.129680018601393e+01, 0.000000000000000e+00, 0.000000000000000e+00, # 0.000000000000000e+00, 2.335392212172933e-01, -4.235166724910499e-22, # 4.857667584940312e-03, 9.715161660389764e-03, -1.457311842632286e-02, # 3.673094582185491e-03, -4.591302628615413e-03, 5.509696538546906e-03, KK, k0, k1, dSO, dSS, 1, EMgamma) #dHdpz = new_compute_dHdp3(m1, m2, eta, 2.129680018601393e+01, 0.000000000000000e+00, 0.000000000000000e+00, # 0.000000000000000e+00, 2.335392212172933e-01, -4.235166724910499e-22, # 4.857667584940312e-03, 9.715161660389764e-03, -1.457311842632286e-02, # 3.673094582185491e-03, -4.591302628615413e-03, 5.509696538546906e-03, KK, k0, k1, dSO, dSS, 1, EMgamma) #print("dHdx = %.15e" % (dHdx/eta)) #print("dHdpy = %.15e" % (dHdpy/eta)) #print("dHdpz = %.15e" % (dHdpz/eta)) #print("r = %.15e" % r[0]) #print("ptheta = %.15e" % psph[1]) #print("pphi = %.15e" % psph[2]) #print("dHdr is %.15e" % (dHdx - dHdpy*psph[2]/r[0]/r[0] + dHdpz*psph[1]/r[0]/r[0])) #print("dHdr term2 is %.15e" % (dHdpy*psph[2]/r[0]/r[0])) #print("dHdr term3 is %.15e" % (dHdpz*psph[1]/r[0]/r[0])) #print("d2Hdx2 = %.15e" % (d2Hdx2/eta)) #sys.exit(1) # Might as well exit here for now. ``` <a id='dedr'></a> ## Step 4.c: $\frac{ \partial E }{ \partial r }$ \[Back to [top](#toc)\] $$\label{dedr}$$ We seek to compute $\frac{ \partial H }{\partial r}$, and [BCD2006](https://arxiv.org/abs/gr-qc/0508067) uses the convention $H \equiv E$. (see [BCD2006](https://arxiv.org/abs/gr-qc/0508067) Equation (3.7)). From [BCD2006](https://arxiv.org/abs/gr-qc/0508067) Equation Equation (4.14) (noting that this equation applies in spherical coordinates when ${\bf r}$ is directed along the ${\bf e}_{0}$ axis), \begin{equation*} \frac{ \partial E }{ \partial r } = -\frac{ \frac{ \partial H }{ \partial {\bf p}^{\phi} } \frac{ \partial^{2} H }{ \left(\partial {\bf r}^{r} \right)^{2} } }{ \frac{ \partial^{2} H }{ \partial {\bf r}^{r} \partial {\bf p}^{\phi} } }. \end{equation*} ``` ## Time derivative of Hamiltonain with respect to separation magnitude r #dEdr = -dHdpphi*d2Hdr2/d2Hdrdpphi ``` <a id='sigmastar'></a> ## Step 4.e: $\boldsymbol{\sigma}^{*}$ \[Back to [top](#toc)\] $$\label{sigmastar}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.3), \begin{equation*} \boldsymbol{\sigma}^{*} = \frac{ m_{2} }{ m_{1} } {\bf S}_{1} + \frac{ m_{1} }{ m_{2} }{\bf S}_{2}. \end{equation*} ``` ## Spin combination sigmastar #sigmastar = np.add(np.divide(m2,m1)*S1, np.divide(m1,m2)*S2) ``` <a id='hreal'></a> ## Step 4.f: $H_{\rm real}$ \[Back to [top](#toc)\] $$\label{hreal}$$ We now compute $H_{\rm real}$ (LALSimIMRSpinEOBInitialConditionsPrec.c Line 1217). To do so, we need to restructure the output of Tutorial-SEOBNR_Documentation by first making sure each expression is on a single line and then reversing the lines. ``` #import SEOBNR_Playground_Pycodes.Hreal_on_bottom as Ham ##All inputs agree with LALSuite ##eta, KK, tortoise, and dSO all agree with LALSuite to 16 significant digits ##Hard-code other inputs so we know they agree exactly with LALSuite ##LALSuite command used: ./lalsimulation/src/lalsim-inspiral -a SEOBNRv3 -M 23 -m 10 -f 20 -X 0.01 -Y 0.02 -Z -0.03 -x 0.04 -y -0.05 -z 0.06 #Hreal = Ham.compute_Hreal(m1=m1, m2=m2, EMgamma=EMgamma, tortoise=1, dSO=dSO, dSS=dSS, # x=rprm[0], y=rprm[1], z=rprm[2], p1=pprm[0], p2=pprm[1], p3=pprm[2], # S1x=S1hatprm[0], S1y=S1hatprm[1], S1z=S1hatprm[2], # S2x=S2hatprm[0], S2y=S2hatprm[1], S2z=S2hatprm[2]) #print(Hreal)#TylerK ##Hreal = Ham.compute_Hreal(m1, m2, EMgamma, 1, dSO, dSS, ## 2.129681018601393e+01, 0.000000000000000e+00, 0.000000000000000e+00, ## 0.000000000000000e+00, 2.335391115580442e-01, -4.235164736271502e-22, ## 4.857667584940312e-03, 9.715161660389764e-03, -1.457311842632286e-02, ## 3.673094582185491e-03, -4.591302628615413e-03, 5.509696538546906e-03) ##Temporary validation code block: all hard-coded values from LALSuite! #Hreal_valid = Ham.compute_Hreal(m1=23., m2=10., EMgamma=EMgamma, tortoise=1, # dSO=-7.966696593617955e+01, dSS=1.261873764525631e+01, # x=2.129681018601393e+01, y=0.000000000000000e+00, z=0.000000000000000e+00, # p1=0.000000000000000e+00, p2=2.335391115580442e-01, p3=-4.235164736271502e-22, # S1x=4.857667584940312e-03, S1y=9.715161660389764e-03, S1z=-1.457311842632286e-02, # S2x=3.673094582185491e-03, S2y=-4.591302628615413e-03, S2z=5.509696538546906e-03) #print(Hreal_valid)#TylerK #Hreal_valid = Ham.compute_Hreal() #print(Hreal_valid)#TylerK #if(np.abs(Hreal_valid-9.952429072947245e-01)>1e-14): # print("ERROR. You have broken the Hamiltonian computation!") # sys.exit(1) ``` <a id='polardata'></a> ## Polar data \[Back to [top](#toc)\] $$\label{polardata}$$ At LALSimIMRSpinEOBInitialConditionsPrec.c Lines 1234--1238, we set the following polar data ${\bf P}$: \begin{align*} {\bf P}^{0} &= {\bf r}^{r} \\ {\bf P}^{1} &= 0 \\ {\bf P}^{2} &= {\bf p}^{r} \\ {\bf P}^{3} &= {\bf p}^{\phi} \end{align*} ``` ## Populate a vector of polar coordinate values #polar = np.array([r[0], 0., psph[0], psph[1]]) ``` <a id='vphikepler'></a> ## vPhiKepler \[Back to [top](#toc)\] $$\label{vphikepler}$$ From [T2012](https://arxiv.org/abs/1202.0790) Equation (A2), \begin{equation*} {\rm vPhiKepler} = \frac{ 1 }{ \omega^{2} \left( {\bf r}^{r} \right)^{3} }. \end{equation*} See LALSimIMRSpinEOBFactorizedWaveformPrec_v3opt.c Lines 113 and 1271--1315. <font color='red'>Note that SEOBNRv3_opt recalculates $\omega$, but I think the $\omega$ above is based on a circular orbit and therefore the recalcuation is unnecessary.</font> ``` ## Keplarian velocity #vPhiKepler = 1./(omega*omega*r[0]*r[0]*r[0]) ``` <a id='rcrossp'></a> ## ${\bf r} \times {\bf p}$ \[Back to [top](#toc)\] $$\label{rcrossp}$$ We'll use the notation \begin{equation*} {\rm rcrossp} = {\bf r}^{\prime} \times {\bf p}^{\prime}. \end{equation*} See LALSimIMRSpinEOBFactorizedWaveformPrec_v3opt.c Lines 170--172. ``` ## r cross p #rcrossp = np.cross(rprm,pprm) ``` <a id='vphi'></a> ## vPhi \[Back to [top](#toc)\] $$\label{vphi}$$ We'll use the notation (<font color='red'> paper reference?</font>) \begin{equation*} {\rm vPhi} = \omega {\bf r}^{r} \sqrt[3]{\rm vPhiKepler}. \end{equation*} See LALSimIMRSpinEOBFactorizedWaveformPrec_v3opt.c Lines 185 and 190. ``` ## Keplarian velocity #vPhi = omega*r[0]*np.cbrt(vPhiKepler) ``` <a id='sidot'></a> ## ${\bf S}_{i} \cdot \hat{\bf L}$ \[Back to [top](#toc)\] $$\label{sidotl}$$ We compute ${\bf S}_{1} \cdot \hat{\bf L}$ and ${\bf S}_{2} \cdot \hat{\bf L}$. See LALSimIMRSpinEOBFactorizedFluxPrec_v3opt.c lines 131--134. ``` ## S dot L #s1dotL = np.dot(S1,Lhat) #s2dotL = np.dot(S2,Lhat) ``` <a id='chii'></a> ## $\boldsymbol{\chi}_{\rm S}$, $\boldsymbol{\chi}_{\rm A}$ \[Back to [top](#toc)\] $$\label{chii}$$ From [P2014](https://arxiv.org/abs/1307.6232) Equation 17, we have \begin{align*} \chi_{\rm S} = \frac{1}{2} \left( {\bf S}_{1} + {\bf S}_{2} \right) \cdot \hat{\bf L} \\ \chi_{\rm A} = \frac{1}{2} \left( {\bf S}_{1} - {\bf S}_{2} \right) \cdot \hat{\bf L} \end{align*} ``` ## Spin combinations chiS and chiA #chiS = 0.5*(s1dotL + s2dotL) #chiA = 0.5*(s1dotL - s2dotL) ``` <a id='mihat'></a> ## $\hat{m}_{i}$ \[Back to [top](#toc)\] $$\label{mihat}$$ We scale the masses $m_{1}$, $m_{2}$ by total mass. See LALSimIMREOBNewtonianMultipole.c Lines 540--541. \begin{align*} \hat{m}_{1} = \frac{ m_{1} }{ M } \\ \hat{m}_{2} = \frac{ m_{2} }{ M } \\ \end{align*} ``` ## Normalized mass #mhat1 = m1*Minv #mhat2 = m2*Minv ``` <a id='newtonianmultipole'></a> ## Newtonian multipole \[Back to [top](#toc)\] $$\label{newtonianmultipole}$$ The Newtonian multipolar waveform is given in [DIN2009](https://arxiv.org/abs/0811.2069) Equation (4). For a given $(\ell, m)$ we define \begin{align*} \epsilon &= \left( \ell + m \right) {\rm mod } 2 \\ n &= \left( i m \right)^{\ell} \frac{ 8 \pi }{ \left( 2 \ell + 1 \right)!! } \sqrt{ \frac{ \left( \ell + 1 \right) \left( \ell + 2 \right) }{ \ell \left( \ell - 1 \right) } } \end{align*} along with the associated Legendre function evaluated at zero. See LALSimIMRSpinEOBFactorizedWaveformPrec_v3opt.c Line 206 and LALSimIMREOBNewtonianMultipole.c Lines 205, 210, 290, and 309--506. ``` ## Compute Newtonian multipole ## Compute the associated Legendre function of degree l and order m at x=0 #def AssociatedLegendre(l,m): # if l==1: # if m==1: # return -1. # else: # print("You used a bad (l,m)") # if l==2: # if m==2: # return 3. # elif m==1: # return 0. # else: # print("You used a bad (l,m)") # if l==3: # if m==3: # return 15. # elif m==2: # return 0. # elif m==1: # return 1.5 # else: # print("You used a bad (l,m)") # if l==4: # if m==4: # return 105. # elif m==3: # return 0. # elif m==2: # return -7.5 # elif m==1: # return 0. # else: # print("You used a bad (l,m)") # if l==5: # if m==5: # return -945. # elif m==4: # return 0. # elif m==3: # return 52.5 # elif m==2: # return 0. # elif m==1: # return -1.875 # else: # print("You used a bad (l,m)") # if l==6: # if m==6: # return 10395. # elif m==5: # return 0. # elif m==4: # return -472.5 # elif m==3: # return 0. # elif m==2: # return 13.125 # elif m==1: # return 0. # else: # print("You used a bad (l,m)") # if l==7: # if m==7: # return -135135. # elif m==6: # return 0. # elif m==5: # return 5197.5 # elif m==4: # return 0. # elif m==3: # return -118.125 # elif m==2: # return 0. # elif m==1: # return 2.1875 # else: # print("You used a bad (l,m)") # if l==8: # if m==8: # return 2027025. # elif m==7: # return 0. # elif m==6: # return -67567.5 # elif m==5: # return 0. # elif m==4: # return 1299.375 # elif m==3: # return 0. # elif m==2: # return -19.6875 # elif m==1: # return 0. # else: # print("You used a bad (l,m)") ## Compute the prefix for the Newtonian multipole #def NewtonianPrefix(m1,m2,l,m,epsilon,eta): # Mtot = m1 + m2 # m1hat = np.divide(m1,Mtot) # m2hat = np.divide(m2,Mtot) # if (m%2)==0: # sign = 1 # else: # sign = -1 # lpepm1 = l + epsilon - 1 # if (m1!=m2) or sign==1: # c = np.power(m2hat,lpepm1) + sign*np.power(m1hat,lpepm1) # else: # if l==2 or l==3: # c = -1. # elif l==4 or l==5: # c = -0.5 # else: # c = 0. # n = np.power(complex(0,m), l) # doubfact = doublefactorial(2*l+1) # if epsilon==0: # n *= 8.*np.divide(np.pi,doubfact) # n *= np.sqrt(np.divide((l+1)*(l+2),l*(l-1))) # elif epsilon==1: # n = -n # n *= 16.j*np.divide(np.pi,doubfact) # n *= np.sqrt( np.divide((2*l+1)* (l+2) * (l*l - m*m),(2*l - 1) * (l+1) * l * (l-1)) ) # else: # print("Epsilon must be 0 or 1") # exit() # return n*eta*c ## Function to compute a double factorial; see https://en.wikipedia.org/wiki/Double_factorial #def doublefactorial(n): # if n <= 0: # return 1 # else: # return n * doublefactorial(n-2) ``` <a id='hlmtab'></a> ## hLMTab \[Back to [top](#toc)\] $$\label{hlmtab}$$ In order to compute flux, we need to build the matrix "hLMTab". See [T2012](https://arxiv.org/abs/1202.0790) Equation (17) and the Appendix, along with [this private LIGO doc](https://dcc.ligo.org/T1400476). ``` ## The following populates a matrix T_{lm} of resummed leading-order logarithms of tail effects #deltam = np.divide(m1 - m2,m1 + m2) #flux = 0. #fa1 = interp1d(nqi.domain, nqi.a1Range, kind='cubic') #fa2 = interp1d(nqi.domain, nqi.a2Range, kind='cubic') #fa3 = interp1d(nqi.domain, nqi.a3Range, kind='cubic') #fb1 = interp1d(nqi.domain, nqi.b1Range, kind='cubic') #fb2 = interp1d(nqi.domain, nqi.b2Range, kind='cubic') #a1 = fa1(eta) #a2 = fa2(eta) #a3 = fa3(eta) #b1 = -fb1(eta) #b2 = -fb2(eta) #fa3sAmax = interp2d(nqi.aDomain, nqi.etaDomain, nqi.Amaxa3sVal, kind='cubic') #fa4Amax = interp2d(nqi.aDomain, nqi.etaDomain, nqi.Amaxa4Val, kind='cubic') #fa5Amax = interp2d(nqi.aDomain, nqi.etaDomain, nqi.Amaxa5Val, kind='cubic') #fb3Amax = interp2d(nqi.aDomain, nqi.etaDomain, nqi.Amaxb3Val, kind='cubic') #fb4Amax = interp2d(nqi.aDomain, nqi.etaDomain, nqi.Amaxb4Val, kind='cubic') #fa3sAmed = interp2d(nqi.aDomain, nqi.etaDomain, nqi.Ameda3sVal, kind='cubic') #fa4Amed = interp2d(nqi.aDomain, nqi.etaDomain, nqi.Ameda4Val, kind='cubic') #fa5Amed = interp2d(nqi.aDomain, nqi.etaDomain, nqi.Ameda5Val, kind='cubic') #fb3Amed = interp2d(nqi.aDomain, nqi.etaDomain, nqi.Amedb3Val, kind='cubic') #fb4Amed = interp2d(nqi.aDomain, nqi.etaDomain, nqi.Amedb4Val, kind='cubic') #fa3sAmin = interp2d(nqi.aDomain, nqi.etaDomain, nqi.Amina3sVal, kind='cubic') #fa4Amin = interp2d(nqi.aDomain, nqi.etaDomain, nqi.Amina4Val, kind='cubic') #fa5Amin = interp2d(nqi.aDomain, nqi.etaDomain, nqi.Amina5Val, kind='cubic') #fb3Amin = interp2d(nqi.aDomain, nqi.etaDomain, nqi.Aminb3Val, kind='cubic') #fb4Amin = interp2d(nqi.aDomain, nqi.etaDomain, nqi.Aminb4Val, kind='cubic') #chiAmaxCoeffs = [fa3sAmax(a,eta), fa4Amax(a,eta), fa5Amax(a,eta), fb3Amax(a,eta), fb4Amax(a,eta)] #chiAmedCoeffs = [fa3sAmed(a,eta), fa4Amed(a,eta), fa5Amed(a,eta), fb3Amed(a,eta), fb4Amed(a,eta)] #chiAminCoeffs = [fa3sAmin(a,eta), fa4Amin(a,eta), fa5Amin(a,eta), fb3Amin(a,eta), fb4Amin(a,eta)] #chi = a/(1. - 2.*eta) #if eta < 1.0e-15: # chiAmax = np.divide(chi + 1.,2.) # chiAmin = np.divide(chi - 1.,2.) #else: # if chi <= 0: # chiAmax = (1. + chi)*(1. - 2.*eta)/(1.+ deltam - 2.*eta) # if (1. + deltam - 2.*eta + 2.*chi*(1. - 2.*eta))/(1. - deltam - 2.*eta) < 1.: # chiAmin = -(1. + chi)*(1. - 2.*eta)/(1. - deltam - 2.*eta) # else: # chiAmin = -(1. - chi)*(1. - 2.*eta)/(1. + deltam - 2.*eta) # else: # chiAmin = -(1. - chi)*(1. - 2.*eta)/(1. + deltam - 2.*eta) # if -(1. + deltam - 2.*eta - 2.*chi*(1. - 2.*eta))/(1. - deltam - 2.*eta) > -1.: # chiAmax = (1. - chi)*(1. - 2.*eta)/(1. - deltam - 2.*eta) # else: # chiAmax = (1. + chi)*(1. - 2.*eta)/(1. + deltam - 2.*eta) #chiAmed = np.divide(chiAmax + chiAmin,2.) #if chiAmax < 1.0e-15: # cmax = 1.0 # cmed = 0.0 # cmin = 0.0 #else: # cmax = (chiA - chiAmed)*(chiA - chiAmin)/(chiAmax - chiAmed)/(chiAmax - chiAmin) # cmed = -(chiA - chiAmax)*(chiA - chiAmin)/(chiAmax - chiAmed)/(chiAmed - chiAmin) # cmin = (chiA - chiAmax)*(chiA - chiAmed)/(chiAmax - chiAmin)/(chiAmed - chiAmin) #nqcmax = chiAmaxCoeffs[0] #nqcmed = chiAmedCoeffs[0] #nqcmin = chiAminCoeffs[0] #a3S = cmax*nqcmax + cmed*nqcmed + cmin*nqcmin #nqcmax = chiAmaxCoeffs[1] #nqcmed = chiAmedCoeffs[1] #nqcmin = chiAminCoeffs[1] #a4 = cmax*nqcmax + cmed*nqcmed + cmin*nqcmin #nqcmax = chiAmaxCoeffs[2] #nqcmed = chiAmedCoeffs[2] #nqcmin = chiAminCoeffs[2] #a5 = cmax*nqcmax + cmed*nqcmed + cmin*nqcmin #nqcmax = chiAmaxCoeffs[3] #nqcmed = chiAmedCoeffs[3] #nqcmin = chiAminCoeffs[3] #b3 = cmax*nqcmax + cmed*nqcmed + cmin*nqcmin #nqcmax = chiAmaxCoeffs[4] #nqcmed = chiAmedCoeffs[4] #nqcmin = chiAminCoeffs[4] #b4 = cmax*nqcmax + cmed*nqcmed + cmin*nqcmin #rsq = polar[0]*polar[0] #sqrtr = np.sqrt(polar[0]) #prsq = polar[2]*polar[2] #mag = 1. + (prsq/(rsq*omega*omega))*(a1 + a2/polar[0] + (a3 + a3S)/(polar[0]*sqrtr) + a4/rsq + a5/(rsq*sqrtr)) #phase = b1*polar[2]/(polar[0]*omega) + prsq*polar[2]/(polar[0]*omega)*(b2 + b3/sqrtr + b4/polar[0]) #nqc = complex(mag*np.cos(phase),0) #nqc += complex(0,mag*np.sin(phase)) #import factorized_modes as fm #for l in range(2, 9): # for m in range(1, l+1): # epsilon = (l + m) % 2 # legendre = AssociatedLegendre(l-epsilon,m)*np.sqrt(2*l+1*np.divide(np.math.factorial(l-m),4)*np.pi*np.math.factorial(l+m)) # #Note that LALSimIMREOBNewtonianMultipole.c Line 74 atrributes the # #Newtonian prefix calculations to https://arxiv.org/abs/1106.1021v2 # prefix = NewtonianPrefix(m1,m2,l,m,epsilon,eta) # multipole = prefix*legendre*np.power(vPhi*vPhi,(l+epsilon)/2.) # if ((l+m)%2)==0: # Slm = (Hreal*Hreal - 1.)/(2.*eta) + 1. # else: # Slm = v*psph[2] # eulerlog = EMgamma + np.log(2.*m*v) # k = m*omega # Hrealk = Hreal * k # Hrealksq4 = 4. * Hrealk*Hrealk # Hrealk4pi = 4. * np.pi *Hrealk # Tlmprefac = np.sqrt(Hrealk4pi/(1.-np.exp(-Hrealk4pi)))/np.math.factorial(l) # Tlmprodfac = 1. # for i in range(1,l+1): # Tlmprodfac *= Hrealksq4 + (i*i) # Tlm = Tlmprefac*np.sqrt(Tlmprodfac) # auxflm = 0. # if l==2: # if m==2: # rholm = 1 + vsq * (fm.rho22v2 + v*(fm.rho22v3 + v*(fm.rho22v4 + v*(fm.rho22v5 + v*(fm.rho22v6 # + fm.rho22v6l*eulerlog + v*(fm.rho22v7 + v*(fm.rho22v8 + fm.rho22v8l*eulerlog # + (fm.rho22v10 + fm.rho22v10l*eulerlog)*vsq))))))) # elif m==1: # rholm = 1. + v * (fm.rho21v1 + v*(fm.rho21v2 + v*(fm.rho21v3 + v*(fm.rho21v4 + v*(fm.rho21v5 # + v*(fm.rho21v6 + fm.rho21v6l*eulerlog + v*(fm.rho21v7 + fm.rho21v7l*eulerlog # + v*(fm.rho21v8 + fm.rho21v8l*eulerlog + (fm.rho21v10 + fm.rho21v10l*eulerlog)*vsq)))))))) # auxflm = v*fm.f21v1 + vsq*v*fm.f21v3 # else: # print("You used a bad (l,m)") # elif l==3: # if m==3: # rholm = 1. + vsq*(fm.rho33v2 + v*(fm.rho33v3 + v*(fm.rho33v4 + v*(fm.rho33v5 + v*(fm.rho33v6 # + fm.rho33v6l*eulerlog + v*(fm.rho33v7 + (fm.rho33v8 + fm.rho33v8l*eulerlog)*v)))))) # auxflm = v*vsq*fm.f33v3; # elif m==2: # rholm = 1. + v*(fm.rho32v + v*(fm.rho32v2 + v*(fm.rho32v3 + v*(fm.rho32v4 + v*(fm.rho32v5 # + v*(fm.rho32v6 + fm.rho32v6l*eulerlog + (fm.rho32v8 + fm.rho32v8l*eulerlog)*vsq)))))) # elif m==1: # rholm = 1. + vsq*(fm.rho31v2 + v*(fm.rho31v3 + v*(fm.rho31v4 + v*(fm.rho31v5 + v*(fm.rho31v6 # + fm.rho31v6l*eulerlog + v*(fm.rho31v7 + (fm.rho31v8 + fm.rho31v8l*eulerlog)*v)))))) # auxflm = v*vsq*fm.f31v3 # else: # print("You used a bad (l,m)") # elif l==4: # if m==4: # rholm = 1. + vsq*(fm.rho44v2 + v*(fm.rho44v3 + v*(fm.rho44v4 + v*(fm.rho44v5 + (fm.rho44v6 # + fm.rho44v6l*eulerlog)*v)))) # elif m==3: # rholm = 1. + v*(fm.rho43v + v*(fm.rho43v2 + vsq*(fm.rho43v4 + v*(fm.rho43v5 + (fm.rho43v6 # + fm.rho43v6l*eulerlog)*v)))) # auxflm = v*fm.f43v # elif m==2: # rholm = 1. + vsq*(fm.rho42v2 + v*(fm.rho42v3 + v*(fm.rho42v4 + v*(fm.rho42v5 + (fm.rho42v6 # + fm.rho42v6l*eulerlog)*v)))) # elif m==1: # rholm = 1. + v*(fm.rho41v + v*(fm.rho41v2 + vsq*(fm.rho41v4 + v*(fm.rho41v5 + (fm.rho41v6 # + fm.rho41v6l*eulerlog)*v)))) # auxflm = v*fm.f41v # else: # print("You used a bad (l,m)") # elif l==5: # if m==5: # rholm = 1. + vsq*(fm.rho55v2 + v*(fm.rho55v3 + v*(fm.rho55v4 + v*(fm.rho55v5 + fm.rho55v6*v)))) # elif m==4: # rholm = 1. + vsq*(fm.rho54v2 + v*(fm.rho54v3 + fm.rho54v4*v)) # elif m==3: # rholm = 1. + vsq*(fm.rho53v2 + v*(fm.rho53v3 + v*(fm.rho53v4 + fm.rho53v5*v))) # elif m==2: # rholm = 1. + vsq*(fm.rho52v2 + v*(fm.rho52v3 + fm.rho52v4*v)) # elif m==1: # rholm = 1. + vsq*(fm.rho51v2 + v*(fm.rho51v3 + v*(fm.rho51v4 + fm.rho51v5*v))) # else: # print("You used a bad (l,m)") # elif l==6: # if m==6: # rholm = 1. + vsq*(fm.rho66v2 + v*(fm.rho66v3 + fm.rho66v4*v)) # elif m==5: # rholm = 1. + vsq*(fm.rho65v2 + fm.rho65v3*v) # elif m==4: # rholm = 1. + vsq*(fm.rho64v2 + v*(fm.rho64v3 + fm.rho64v4*v)) # elif m==3: # rholm = 1. + vsq*(fm.rho63v2 + fm.rho63v3*v) # elif m==2: # rholm = 1. + vsq*(fm.rho62v2 + v*(fm.rho62v3 + fm.rho62v4*v)) # elif m==1: # rholm = 1. + vsq*(fm.rho61v2 + fm.rho61v3*v) # else: # print("You used a bad (l,m)") # elif l==7: # if m==7: # rholm = 1. + vsq*(fm.rho77v2 + fm.rho77v3*v) # elif m==6: # rholm = 1. + fm.rho76v2*vsq # elif m==5: # rholm = 1. + vsq*(fm.rho75v2 + fm.rho75v3*v) # elif m==4: # rholm = 1. + fm.rho74v2*vsq # elif m==3: # rholm = 1. + vsq*(fm.rho73v2 + fm.rho73v3*v) # elif m==2: # rholm = 1. + fm.rho72v2*vsq # elif m==1: # rholm = 1. + vsq*(fm.rho71v2 + fm.rho71v3*v) # else: # print("You used a bad (l,m)") # elif l==8: # if m==8: # rholm = 1. + fm.rho88v2*vsq # elif m==7: # rholm = 1. + fm.rho87v2*vsq # elif m==6: # rholm = 1. + fm.rho86v2*vsq # elif m==5: # rholm = 1. + fm.rho85v2*vsq # elif m==4: # rholm = 1. + fm.rho84v2*vsq # elif m==3: # rholm = 1. + fm.rho83v2*vsq # elif m==2: # rholm = 1. + fm.rho82v2*vsq # elif m==1: # rholm = 1. + fm.rho81v2*vsq # else: # print("You used a bad (l,m)") # else: # print("You used a bad (l,m)") # rholmPowl = np.power(rholm,l) # if eta==0.25 and (m % 2): # rholmPowl = auxflm # else: # rholmPowl += auxflm # hlm = Tlm*Slm*rholmPowl*multipole # if (m*m*omega*omega*hlm*hlm) > 5.: # hlm *= nqc # flux += m*m*omega*omega*hlm*hlm # if omega*omega > 1 or flux > 5: # flux = 0. # flux *= np.divide(8.,np.pi) #flux /= eta #rdot = -flux/dEdr #pr = rdot/(dHdpr/px) ``` <a id='step5'></a> # Step 5: Invert the rotation of Step 3 \[Back to [top](#toc)\] $$\label{step5}$$ <a id='invrotationmatrix'></a> ## Inverse Rotation Matrix \[Back to [top](#toc)\] $$\label{invrotationmatrix}$$ The matrix to invert the rotation applied in [Step 3](#step3) is: \begin{equation*} \begin{bmatrix} \hat{\bf r}^{0} & \hat{\bf p}^{0} & \hat{\bf L}^{0} \\ \hat{\bf r}^{1} & \hat{\bf p}^{1} & \hat{\bf L}^{1} \\ \hat{\bf r}^{2} & \hat{\bf p}^{2} & \hat{\bf L}^{2}\end{bmatrix}. \end{equation*} To see that this is indeed the correct matrix inverse, note that by construction $\hat{\bf q}$, $\hat{\bf p}$, and $\hat{\bf L}$ are all unit vectors orthogonal to one another. See LALSimIMRSpinEOBInitialConditionsPrec.c Line 1107. ``` #invert00 = rhat0 #invert01 = phat0 #invert02 = Lhat0 #invert10 = rhat1 #invert11 = phat1 #invert12 = Lhat1 #invert20 = rhat2 #invert21 = phat2 #invert22 = Lhat2 ``` <a id='invrotaterhat'></a> ## Rotate $\hat{\bf r}^{\prime}$ \[Back to [top](#toc)\] $$\label{invrotaterhat}$$ We rotate $\hat{\bf r}^{\prime}$ and call the new separation vector ${\bf r}$. \begin{equation*} \hat{\bf r} = \begin{bmatrix} \hat{\bf r}^{0} & \hat{\bf p}^{0} & \hat{\bf L}^{0} \\ \hat{\bf r}^{1} & \hat{\bf p}^{1} & \hat{\bf L}^{1} \\ \hat{\bf r}^{2} & \hat{\bf p}^{2} & \hat{\bf L}^{2} \end{bmatrix} \begin{bmatrix} \hat{\bf r}^{\prime 0} \\ \hat{\bf r}^{\prime 1} \\ \hat{\bf r}^{\prime 2} \end{bmatrix} \end{equation*} See LALSimIMRSpinEOBInitialConditionsPrec.c Line 1315. ``` #rhat0 = rhat0*rhatprm0 + phat0*rhatprm1 + Lhat0*rhatprm2 #rhat1 = rhat1*rhatprm0 + phat1*rhatprm1 + Lhat1*rhatprm2 #rhat0 = rhat2*rhatprm0 + phat2*rhatprm1 + Lhat2*rhatprm2 ``` <a id='invrotatevhat'></a> ## Rotate $\hat{\bf v}^{\prime}$ \[Back to [top](#toc)\] $$\label{invrotatevhat}$$ We rotate $\hat{\bf v}^{\prime}$ and call the new separation vector ${\bf v}$. \begin{equation*} \hat{\bf v} = \begin{bmatrix} \hat{\bf r}^{0} & \hat{\bf p}^{0} & \hat{\bf L}^{0} \\ \hat{\bf r}^{1} & \hat{\bf p}^{1} & \hat{\bf L}^{1} \\ \hat{\bf r}^{2} & \hat{\bf p}^{2} & \hat{\bf L}^{2} \end{bmatrix} \begin{bmatrix} \hat{\bf v}^{\prime 0} \\ \hat{\bf v}^{\prime 1} \\ \hat{\bf v}^{\prime 2} \end{bmatrix} \end{equation*} See LALSimIMRSpinEOBInitialConditionsPrec.c Line 1316. ``` #vhat0 = rhat0*vhatprm0 + phat0*vhatprm1 + Lhat0*vhatprm2 #vhat1 = rhat1*vhatprm0 + phat1*vhatprm1 + Lhat1*vhatprm2 #vhat2 = rhat2*vhatprm0 + phat2*vhatprm1 + Lhat2*vhatprm2 ``` <a id='invrotatelnhat'></a> ## Rotate $\hat{\bf L}_{N}^{\prime}$ \[Back to [top](#toc)\] $$\label{invrotatelnhat}$$ We rotate $\hat{\bf L}_{N}^{\prime}$ and call the new separation vector ${\bf L}_{N}$. \begin{equation*} \hat{\bf L}_{N} = \begin{bmatrix} \hat{\bf r}^{0} & \hat{\bf p}^{0} & \hat{\bf L}^{0} \\ \hat{\bf r}^{1} & \hat{\bf p}^{1} & \hat{\bf L}^{1} \\ \hat{\bf r}^{2} & \hat{\bf p}^{2} & \hat{\bf L}^{2} \end{bmatrix} \begin{bmatrix} \hat{\bf L}_{N}^{\prime 0} \\ \hat{\bf L}_{N}^{\prime 1} \\ \hat{\bf L}_{N}^{\prime 2} \end{bmatrix} \end{equation*} See LALSimIMRSpinEOBInitialConditionsPrec.c Line 1317. ``` #LNhat0 = rhat0*LNhatprm0 + phat0*LNhatprm1 + Lhat0*LNhatprm2 #LNhat1 = rhat1*LNhatprm0 + phat1*LNhatprm1 + Lhat1*LNhatprm2 #LNhat2 = rhat2*LNhatprm0 + phat2*LNhatprm1 + Lhat2*LNhatprm2 ``` <a id='tortoise_matrix'></a> # Tortoise Conversion Matrix \[Back to [top](#toc)\] $$\label{tortoise_matrix}$$ <font color='red'>We're now back to LALSpinPrecHcapRvecDerivative_v3opt.c, Lines 92--96.</font> From [Pan, Buonanno, Buchman, et. al. (2010)](https://arxiv.org/abs/0912.3466v2) Equation (A3) the matrix for the coordinate conversion to tortoise coordinates is \begin{align*} \begin{pmatrix} 1 + \frac{ x^{2} }{ r^{2} } \left( \xi - 1 \right) & \frac{ x y }{ r^{2} } \left( \xi - 1 \right) & \frac{ x z }{ r^{2} } \left( \xi - 1 \right) \\ \frac{ x y }{ r^{2} } \left( \xi - 1 \right) & 1 + \frac{ y^{2} }{ r^{2} } \left( \xi - 1 \right) & \frac{ y z }{ r^{2} } \left( \xi - 1 \right) \\ \frac{ x z }{ r^{2} } \left( \xi - 1 \right) & \frac{ y z }{ r^{2} } \left( \xi - 1 \right) & 1 + \frac{ z^{2} }{ r^{2} } \left( \xi - 1 \right) \end{pmatrix} \end{align*} ``` #ximinus1 = xi - 1 #toTort = sp.Array([[1 + x*x*ximinus1/(r*r), x*y*ximinus1/(r*r), x*z*ximinus1/(r*r)], # [x*y*ximinus1/(r*r), 1 + y*y*ximinus1/(r*r), y*z*ximinus1/(r*r)], # [x*z*ximinus1/(r*r), y*z*ximinus1/(r*r), 1 + z*z*ximinus1/(r*r)]]) ``` <a id='latex_pdf_output'></a> # Output: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] $$\label{latex_pdf_output}$$ The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename [Tutorial-SEOBNR_Initial_Conditions.pdf](Tutorial-SEOBNR_Initial_Conditions.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) ``` cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-SEOBNR_Initial_Conditions") ``` <a id='validation'></a> # Validation: Perform validation checks against LALSuite's SEOBNRv3 code (commit bba40f2) \[Back to [top](#toc)\] $$\label{validation}$$ ``` ## Validation Cell ## Here we perform a validation check by comparing the derivative values to hard-coded values produced by SEOBNRv3 ## in LALSuite. If this check fails, y'all done sump'tin wrong! #derivative_list = [dHdx,dHdy,dHdz,dHdpx,dHdpy,dHdpz,dHds1x,dHds1y,dHds1z,dHds2x,dHds2y,dHds2z] #for q in derivative_list: # from SEOBNR_Playground_Pycodes.new_q import new_compute_q #from SEOBNR.constant_coeffs import compute_const_coeffs #KK, k0, k1, k2, k3, k4, k5, k5l, dSO, dSS = compute_const_coeffs(eta,EMgamma,a) ## The coefficients do agree with LALSuite! #tortoise = 1 #Only for testing #Hreal = compute_Hreal(m1, m2, eta, 10.0, 11.0, 12.0, # 0.01, 0.02, 0.03, # 0.004, 0.005, -0.006, # 0.007, -0.008, 0.009, # KK, k0, k1, dSO, dSS, tortoise, EMgamma) #Hreal_pert = compute_Hreal(m1, m2, eta, 10.0*(1.+1e-15), 11.0, 12.0, # 0.01, 0.02, 0.03, # 0.004, 0.005, -0.006, # 0.007, -0.008, 0.009, # KK, k0, k1, dSO, dSS, tortoise, EMgamma) # #termbyterm_dHdx = new_compute_dHdx(m1, m2, eta, 10, 11.0, 12.0, # 0.01, 0.02, 0.03, # 0.004, 0.005, -0.006, # 0.007, -0.008, 0.009, # KK, k0, k1, dSO, dSS, 2, EMgamma) #termbyterm_dHdy = new_compute_dHdy(m1, m2, eta, 10, 11.0, 12.0, # 0.01, 0.02, 0.03, # 0.004, 0.005, -0.006, # 0.007, -0.008, 0.009, # KK, k0, k1, dSO, dSS, 2, EMgamma) #termbyterm_dHdz = new_compute_dHdz(m1, m2, eta, 10, 11.0, 12.0, # 0.01, 0.02, 0.03, # 0.004, 0.005, -0.006, # 0.007, -0.008, 0.009, # KK, k0, k1, dSO, dSS, 2, EMgamma) #termbyterm_dHdpx = new_compute_dHdpx(m1, m2, eta, 10, 11.0, 12.0, # 0.01, 0.02, 0.03, # 0.004, 0.005, -0.006, # 0.007, -0.008, 0.009, # KK, k0, k1, dSO, dSS, 1, EMgamma) #termbyterm_dHdpy = new_compute_dHdpy(m1, m2, eta, 10, 11.0, 12.0, # 0.01, 0.02, 0.03, # 0.004, 0.005, -0.006, # 0.007, -0.008, 0.009, # KK, k0, k1, dSO, dSS, 1, EMgamma) #termbyterm_dHdpz = new_compute_dHdpz(m1, m2, eta, 10, 11.0, 12.0, # 0.01, 0.02, 0.03, # 0.004, 0.005, -0.006, # 0.007, -0.008, 0.009, # KK, k0, k1, dSO, dSS, 1, EMgamma) #termbyterm_dHds1x = new_compute_dHds1x(m1, m2, eta, 10, 11.0, 12.0, # 0.01, 0.02, 0.03, # 0.004, 0.005, -0.006, # 0.007, -0.008, 0.009, # KK, k0, k1, dSO, dSS, 1, EMgamma) #termbyterm_dHds1y = new_compute_dHds1y(m1, m2, eta, 10, 11.0, 12.0, # 0.01, 0.02, 0.03, # 0.004, 0.005, -0.006, # 0.007, -0.008, 0.009, # KK, k0, k1, dSO, dSS, 1, EMgamma) #termbyterm_dHds1z = new_compute_dHds1z(m1, m2, eta, 10, 11.0, 12.0, # 0.01, 0.02, 0.03, # 0.004, 0.005, -0.006, # 0.007, -0.008, 0.009, # KK, k0, k1, dSO, dSS, 1, EMgamma) #termbyterm_dHds2x = new_compute_dHds2x(m1, m2, eta, 10, 11.0, 12.0, # 0.01, 0.02, 0.03, # 0.004, 0.005, -0.006, # 0.007, -0.008, 0.009, # KK, k0, k1, dSO, dSS, 1, EMgamma) #termbyterm_dHds2y = new_compute_dHds2y(m1, m2, eta, 10, 11.0, 12.0, # 0.01, 0.02, 0.03, # 0.004, 0.005, -0.006, # 0.007, -0.008, 0.009, # KK, k0, k1, dSO, dSS, 1, EMgamma) #termbyterm_dHds2z = new_compute_dHds2z(m1, m2, eta, 10, 11.0, 12.0, # 0.01, 0.02, 0.03, # 0.004, 0.005, -0.006, # 0.007, -0.008, 0.009, # KK, k0, k1, dSO, dSS, 1, EMgamma) #print("exact Hreal = %.15e" % Hreal) #print("pertd Hreal = %.15e" % Hreal_pert) #print("relative diff in Hreal = %.15e\n" % (np.abs(Hreal - Hreal_pert)/np.abs(Hreal))) #print("new term-by-term computation of dHdx = %.15e\n" % (termbyterm_dHdx[0])) #print("new term-by-term computation of dHdy = %.15e\n" % termbyterm_dHdy[0]) #print("new term-by-term computation of dHdz = %.15e\n" % termbyterm_dHdz[0]) #print("new term-by-term computation of dHdpx = %.15e\n" % termbyterm_dHdpx[0]) #print("new term-by-term computation of dHdpy = %.15e\n" % termbyterm_dHdpy[0]) #print("new term-by-term computation of dHdpz = %.15e\n" % termbyterm_dHdpz[0]) #print("new term-by-term computation of dHds1x = %.15e\n" % termbyterm_dHds1x[0]) #print("new term-by-term computation of dHds1y = %.15e\n" % termbyterm_dHds1y[0]) #print("new term-by-term computation of dHds1z = %.15e\n" % termbyterm_dHds1z[0]) #print("new term-by-term computation of dHds2x = %.15e\n" % termbyterm_dHds2x[0]) #print("new term-by-term computation of dHds2y = %.15e\n" % termbyterm_dHds2y[0]) #print("new term-by-term computation of dHds2z = %.15e\n" % termbyterm_dHds2z[0]) ```
github_jupyter
## Analysis of Gene Expression Data via Arrays using Bioconductor/R - I Today, this notebook constitutes your in-class activity and homework. Over the next 3 days, we will be constructing your own gene expression analysis pipeline, using available tools in R, and available data from the gene expression omnibus (GEO): https://www.ncbi.nlm.nih.gov/geo/ Let's take a look at a new data set to analyze: **GSE35961** **Q1.** Describe in your own words, the treatment groups, number of samples, and mice that were characterized in this experiment. **Q2.** What is the citation attached to this paper? **Q3.** What is NASH? What is Metformin? What is the hypothesis that this experiment was designed to test? **Let's use R to download this data set, use UNIX to prepare our associated input files and organize our directory.** **Q4.** Using R, load the libraries we will use for our analysis here. Provide/Execute your code below. **Q5.** Using R, download the data set GSE35961 from GEO. Provide/Execute your code below. **Q6.** Now, we need to process the data that we downloaded. Using a terminal in UNIX perform the following tasks, and provide your code below (but execute them in a terminal). - Navigate to the newly created directory GSE35961 - Expand the GSE35961_RAW.tar archive [UNIX] - Uncompress all of the .gz files [UNIX] - Delete the GSE35961_RAW.tar file [UNIX] **Q7.** We need to prepare our phenotype file for analysis. For this assignment, we will compare the samples of NASH to the NASH treated with metformin. - In UNIX, prepare a phenotype.csv file which references the appropriate CEL files for analysis (use the example phenotype file from the prelab as a reference). **How many samples are there?** **Then, using R:** - load this phenotype file into R and store it into a variable called phenoData. - and return a summary for the variable phenoData using the pData command. **Provide and execute your R code below.*** **There should be only 8 files!** **Q8.** Next, we need to load our CEL file data into our R pipeline. - read a list of cel files into an object called "celFilelist" (using list.celfiles(), where you give the name of the directory that contains the cel files) - create a new variable called "celFiles" which contains only the CEL files that you want to analyze (i.e., present in the phenoData variable in Q7, above) using brackets [] on celFileList - report the contents of the celFiles variables using print() - read intensity data from the list of celFiles into a variable called "affyRaw" using read.celfiles() **Provide and execute your R code below.** ### Homework Problems **Q9.** Now let's take a look at the data. - Create a boxplot of the intensity data for all CEL files loaded. **Provide and execute your R code below.** **Q10.** Next, create a histogram of the intensity data for all CEL files loaded. Provide and execute your R code below. **Congrats! You have completed Part 1. Feel free to continue on to Part 2, so that you can get ahead! :)**
github_jupyter
postcode strings can be converted to the following formats via the `output_format` parameter: * `compact`: only number strings without any seperators or whitespace, like "2611ET" * `standard`: postcode strings with proper whitespace in the proper places. Note that in the case of postcode, the compact format is the same as the standard one. Invalid parsing is handled with the `errors` parameter: * `coerce` (default): invalid parsing will be set to NaN * `ignore`: invalid parsing will return the input * `raise`: invalid parsing will raise an exception The following sections demonstrate the functionality of `clean_nl_postcode()` and `validate_nl_postcode()`. ### An example dataset containing postcode strings ``` import pandas as pd import numpy as np df = pd.DataFrame( { "postcode": [ 'NL-2611ET', '26112 ET', 'BE 428759497', 'BE431150351', "002 724 334", "hello", np.nan, "NULL", ], "address": [ "123 Pine Ave.", "main st", "1234 west main heights 57033", "apt 1 789 s maple rd manhattan", "robie house, 789 north main street", "1111 S Figueroa St, Los Angeles, CA 90015", "(staples center) 1111 S Figueroa St, Los Angeles", "hello", ] } ) df ``` ## 1. Default `clean_nl_postcode` By default, `clean_nl_postcode` will clean postcode strings and output them in the standard format with proper separators. ``` from dataprep.clean import clean_nl_postcode clean_nl_postcode(df, column = "postcode") ``` ## 2. Output formats This section demonstrates the output parameter. ### `standard` (default) ``` clean_nl_postcode(df, column = "postcode", output_format="standard") ``` ### `compact` ``` clean_nl_postcode(df, column = "postcode", output_format="compact") ``` ## 3. `inplace` parameter This deletes the given column from the returned DataFrame. A new column containing cleaned postcode strings is added with a title in the format `"{original title}_clean"`. ``` clean_nl_postcode(df, column="postcode", inplace=True) ``` ## 4. `errors` parameter ### `coerce` (default) ``` clean_nl_postcode(df, "postcode", errors="coerce") ``` ### `ignore` ``` clean_nl_postcode(df, "postcode", errors="ignore") ``` ## 4. `validate_nl_postcode()` `validate_nl_postcode()` returns `True` when the input is a valid postcode. Otherwise it returns `False`. The input of `validate_nl_postcode()` can be a string, a Pandas DataSeries, a Dask DataSeries, a Pandas DataFrame and a dask DataFrame. When the input is a string, a Pandas DataSeries or a Dask DataSeries, user doesn't need to specify a column name to be validated. When the input is a Pandas DataFrame or a dask DataFrame, user can both specify or not specify a column name to be validated. If user specify the column name, `validate_nl_postcode()` only returns the validation result for the specified column. If user doesn't specify the column name, `validate_nl_postcode()` returns the validation result for the whole DataFrame. ``` from dataprep.clean import validate_nl_postcode print(validate_nl_postcode("NL-2611ET")) print(validate_nl_postcode("26112 ET")) print(validate_nl_postcode('BE 428759497')) print(validate_nl_postcode('BE431150351')) print(validate_nl_postcode("004085616")) print(validate_nl_postcode("hello")) print(validate_nl_postcode(np.nan)) print(validate_nl_postcode("NULL")) ``` ### Series ``` validate_nl_postcode(df["postcode"]) ``` ### DataFrame + Specify Column ``` validate_nl_postcode(df, column="postcode") ``` ### Only DataFrame ``` validate_nl_postcode(df) ```
github_jupyter
# Business and Data Understanding ## Airports Weather Data 2016 ### Import Airports and their latitude/longitude. 10 US airports with the most weather related delays ``` from pyspark.sql import SQLContext import numpy as np from io import StringIO import requests import json import pandas as pd # @hidden_cell # This function accesses a file in your Object Storage. The definition contains your credentials. # You might want to remove those credentials before you share your notebook. def get_object_storage_file_with_credentials_b12280b012b94907ad8decb8341d8375(container, filename): """This functions returns a StringIO object containing the file content from Bluemix Object Storage.""" url1 = ''.join(['https://identity.open.softlayer.com', '/v3/auth/tokens']) data = {'auth': {'identity': {'methods': ['password'], 'password': {'user': {'name': 'member_fa75ff3d05c0b00bdf62f0536608f1ca7c52af71','domain': {'id': 'daf5d7dceca34848ae07708c68826bb2'}, 'password': 'ng[~3U24rFuL)UUm'}}}}} headers1 = {'Content-Type': 'application/json'} resp1 = requests.post(url=url1, data=json.dumps(data), headers=headers1) resp1_body = resp1.json() for e1 in resp1_body['token']['catalog']: if(e1['type']=='object-store'): for e2 in e1['endpoints']: if(e2['interface']=='public'and e2['region']=='dallas'): url2 = ''.join([e2['url'],'/', container, '/', filename]) s_subject_token = resp1.headers['x-subject-token'] headers2 = {'X-Auth-Token': s_subject_token, 'accept': 'application/json'} resp2 = requests.get(url=url2, headers=headers2) return StringIO(resp2.text) airport_lat_long_data = pd.read_csv(get_object_storage_file_with_credentials_b12280b012b94907ad8decb8341d8375('FlightCancellation', 'airports lat long.csv')) airport_lat_long_data.head() airport_lat_long_data.dtypes ``` ### Change column names for merging with weather data ``` airport_lat_long_data=airport_lat_long_data.rename(columns = {'Lat':'inputLatitude','Lon':'inputLongitude'}) airport_lat_long_data.head(1) ``` ### Import weather data. Historical hourly weather data for 10 airports over the year 2016 ``` weather_exported = pd.read_csv(get_object_storage_file_with_credentials_b12280b012b94907ad8decb8341d8375('FlightCancellation', 'weather_exported.csv')) weather_exported.dtypes weather_exported.head(2) ``` ### Derive new date and hour columns ``` weather_exported['FL_DATE'] = weather_exported.DateHrLwt.str.slice(6, 10) + weather_exported.DateHrLwt.str.slice(0, 2) + weather_exported.DateHrLwt.str.slice(3, 5) weather_exported['Hour'] = weather_exported.DateHrLwt.str.slice(11, 13) weather_exported['Hour'] = weather_exported.Hour.astype('int') weather_exported.head(1) airports_weather_df = pd.merge(weather_exported, airport_lat_long_data, how='left', left_on=['inputLatitude','inputLongitude'], right_on = ['inputLatitude','inputLongitude']) ``` #### Drop columns that aren't features ``` airports_weather_df.drop(['SiteId','Latitude','Longitude','inputLatitude','inputLongitude','DateHrGmt','DateHrLwt','SurfaceDewpointTemperatureFahrenheit','SurfaceWetBulbTemperatureFahrenheit','RelativeHumidityPercent', 'SurfaceAirPressureMillibars','WindChillTemperatureFahrenheit','ApparentTemperatureFahrenheit','WindDirectionDegrees', 'DownwardSolarRadiationWsqm','DiffuseHorizontalRadiationWsqm','DirectNormalIrradianceWsqm','MslPressureMillibars', 'HeatIndexFahrenheit','PotentialEvapotranspirationMicrometersPerHour','TenToFortyLiquidSoilMoisturePercent', 'TenToFortySoilTemperatureFahrenheit','ZeroToTenLiquidSoilMoisturePercent','ZeroToTenSoilTemperatureFahrenheit'], axis=1,inplace='True') airports_weather_df.head(1) ``` ## Airports Flight Data 2016 ### Retrieve historical flights data for all US airports over the year 2016 ``` Jan2016_df = pd.read_csv(get_object_storage_file_with_credentials_b12280b012b94907ad8decb8341d8375('FlightCancellation', 'Jan 2016 Flights.csv')) Jan2016_df.head(1) Feb2016_df = pd.read_csv(get_object_storage_file_with_credentials_b12280b012b94907ad8decb8341d8375('FlightCancellation', 'Feb 2016 Flights.csv')) Feb2016_df.head(1) Mar2016_df = pd.read_csv(get_object_storage_file_with_credentials_b12280b012b94907ad8decb8341d8375('FlightCancellation', 'Mar 2016 Flights.csv')) Mar2016_df.head(1) Apr2016_df = pd.read_csv(get_object_storage_file_with_credentials_b12280b012b94907ad8decb8341d8375('FlightCancellation', 'Apr 2016 Flights.csv')) Apr2016_df.head(1) May2016_df = pd.read_csv(get_object_storage_file_with_credentials_b12280b012b94907ad8decb8341d8375('FlightCancellation', 'May 2016 Flights.csv')) May2016_df.head(1) Jun2016_df = pd.read_csv(get_object_storage_file_with_credentials_b12280b012b94907ad8decb8341d8375('FlightCancellation', 'Jun 2016 Flights.csv')) Jun2016_df.head(1) Jul2016_df = pd.read_csv(get_object_storage_file_with_credentials_b12280b012b94907ad8decb8341d8375('FlightCancellation', 'Jul 2016 Flights.csv')) Jul2016_df.head(1) Aug2016_df = pd.read_csv(get_object_storage_file_with_credentials_b12280b012b94907ad8decb8341d8375('FlightCancellation', 'Aug 2016 Flights.csv')) Aug2016_df.head(1) Sep2016_df = pd.read_csv(get_object_storage_file_with_credentials_b12280b012b94907ad8decb8341d8375('FlightCancellation', 'Sep 2016 Flights.csv')) Sep2016_df.head(1) Oct2016_df = pd.read_csv(get_object_storage_file_with_credentials_b12280b012b94907ad8decb8341d8375('FlightCancellation', 'Oct 2016 Flights.csv')) Oct2016_df.head(1) Nov2016_df = pd.read_csv(get_object_storage_file_with_credentials_b12280b012b94907ad8decb8341d8375('FlightCancellation', 'Nov 2016 Flights.csv')) Nov2016_df.head(1) Dec2016_df = pd.read_csv(get_object_storage_file_with_credentials_b12280b012b94907ad8decb8341d8375('FlightCancellation', 'Dec 2016 Flights.csv')) Dec2016_df.head(1) frames = [Jan2016_df, Feb2016_df, Mar2016_df, Apr2016_df, May2016_df, Jun2016_df, Jul2016_df, Aug2016_df, Sep2016_df, Oct2016_df, Nov2016_df, Dec2016_df] airport_flights_df = pd.concat(frames) airport_flights_df.head(1) airport_flights_df.shape ``` ### Select 10 airports ``` airport_flights_df = airport_flights_df[(airport_flights_df.ORIGIN == "BOS") | (airport_flights_df.ORIGIN == "EWR") | (airport_flights_df.ORIGIN == "JFK") | (airport_flights_df.ORIGIN == "LGA") | (airport_flights_df.ORIGIN == "ORD") | (airport_flights_df.ORIGIN == "DEN") | (airport_flights_df.ORIGIN == "DFW") | (airport_flights_df.ORIGIN == "IAH") | (airport_flights_df.ORIGIN == "PHL") | (airport_flights_df.ORIGIN == "SFO")] airport_flights_df = airport_flights_df[(airport_flights_df.DEST == "BOS") | (airport_flights_df.DEST == "EWR") | (airport_flights_df.DEST == "JFK") | (airport_flights_df.DEST == "LGA") | (airport_flights_df.DEST == "ORD") | (airport_flights_df.DEST == "DEN") | (airport_flights_df.DEST == "DFW") | (airport_flights_df.DEST == "IAH") | (airport_flights_df.DEST == "PHL") | (airport_flights_df.DEST == "SFO")] airport_flights_df.shape ``` #### Derive the Hour from the scheduled departure time ``` airport_flights_df['Hour'] = airport_flights_df.CRS_DEP_TIME / 100 airport_flights_df['Hour'] = airport_flights_df.Hour.astype(int) airport_flights_df.head(2) ``` #### Change FL_DATE format to match airport_weather_df FL_DATE format ``` airport_flights_df.FL_DATE = airport_flights_df.FL_DATE.str.replace('-', '') airport_flights_df.head(1) ``` #### Drop columns that aren't needed as features ``` airport_flights_df.drop(['DEP_TIME','DEP_DELAY','TAXI_OUT','WHEELS_ON','TAXI_IN','ARR_TIME','ARR_DELAY','ACTUAL_ELAPSED_TIME', 'AIR_TIME','CARRIER_DELAY','WEATHER_DELAY','NAS_DELAY','SECURITY_DELAY','LATE_AIRCRAFT_DELAY', 'CRS_ELAPSED_TIME', 'CRS_DEP_TIME','CRS_ARR_TIME'], axis=1,inplace='True') airport_flights_df.dtypes airport_flights_df.shape ``` ### Rename feature columns in the weather dataframe for merging with flight dataframe. This will add weather data for the ORIGIN airport ``` airports_weather_df.rename(columns={'Airport':'ORIGIN','SurfaceTemperatureFahrenheit':'O_SurfaceTemperatureFahrenheit', 'CloudCoveragePercent':'O_CloudCoveragePercent','WindSpeedMph':'O_WindSpeedMph', 'PrecipitationPreviousHourInches':'O_PrecipitationPreviousHourInches','SnowfallInches':'O_SnowfallInches', 'SurfaceWindGustsMph':'O_SurfaceWindGustsMph','SurfaceWaterRunOffMillimeters':'O_SurfaceWaterRunOffMillimeters'}, inplace=True) airports_weather_df.dtypes airports_weather_df.head(1) flights_with_weather_df = pd.merge(airport_flights_df,airports_weather_df,on=['FL_DATE','Hour','ORIGIN']) flights_with_weather_df.dtypes flights_with_weather_df.shape ``` ### Rename feature columns in the weather dataframe for merging with flight dataframe. This will add weather data for the DESTINATION airport ``` airports_weather_df.rename(columns={'ORIGIN':'DEST','O_SurfaceTemperatureFahrenheit':'D_SurfaceTemperatureFahrenheit', 'O_CloudCoveragePercent':'D_CloudCoveragePercent','O_WindSpeedMph':'D_WindSpeedMph', 'O_PrecipitationPreviousHourInches':'D_PrecipitationPreviousHourInches','O_SnowfallInches':'D_SnowfallInches', 'O_SurfaceWindGustsMph':'D_SurfaceWindGustsMph','O_SurfaceWaterRunOffMillimeters':'D_SurfaceWaterRunOffMillimeters'}, inplace=True) airports_weather_df.dtypes flights_with_weather_df = pd.merge(flights_with_weather_df,airports_weather_df,on=['FL_DATE','Hour','DEST']) ``` ### Let's use pixiedust to see what it can do ``` import pixiedust display(flights_with_weather_df) ``` # Data Preparation - first iteration ``` %matplotlib inline flights_with_weather_df.CANCELLATION_CODE.value_counts(sort=False, dropna=False) ``` #### Clean up null values ``` flights_with_weather_df.isnull().sum() ``` #### Replace nulls with 0's ``` flights_with_weather_df['O_SurfaceWaterRunOffMillimeters'] = flights_with_weather_df['O_SurfaceWaterRunOffMillimeters'].replace(['$null$'], 0.000) flights_with_weather_df['D_SurfaceWaterRunOffMillimeters'] = flights_with_weather_df['D_SurfaceWaterRunOffMillimeters'].replace(['$null$'], 0.000) flights_with_weather_df['O_SurfaceWaterRunOffMillimeters'] = pd.to_numeric(flights_with_weather_df['O_SurfaceWaterRunOffMillimeters'], errors='coerce') flights_with_weather_df['D_SurfaceWaterRunOffMillimeters'] = pd.to_numeric(flights_with_weather_df['D_SurfaceWaterRunOffMillimeters'], errors='coerce') ``` #### Filter out "non-weather" cancellations ``` calcel_code_list = ['A', 'C', 'D'] flights_with_weather_df = flights_with_weather_df.loc[~flights_with_weather_df['CANCELLATION_CODE'].isin(calcel_code_list)] ``` #### Drop columns not needed ``` flights_with_weather_df = flights_with_weather_df.drop(['FL_NUM','CANCELLATION_CODE'], axis=1) ``` #### The data is very imbalanced ``` flights_with_weather_df.CANCELLED.value_counts() ``` #### Install libraries to use SMOTE ``` ! pip install imbalanced-learn import pandas as pd from sklearn import datasets, metrics #from sklearn.cross_validation import train_test_split from sklearn.model_selection import train_test_split from sklearn.feature_selection import SelectFromModel from sklearn.metrics import accuracy_score, roc_curve, roc_auc_score, f1_score from sklearn.metrics import recall_score, precision_score, confusion_matrix from sklearn.ensemble import RandomForestClassifier #import balancing techniques import imblearn from imblearn.over_sampling import SMOTE ``` ### Four feature columns will need to be converted ``` flights_with_weather_df.dtypes ``` #### Create features df and target ``` x = flights_with_weather_df y = flights_with_weather_df['CANCELLED'] del x['CANCELLED'] print x.shape, y.shape ``` #### Run One Hot Encoding on the four string /object features ``` cols_to_transform = [ 'FL_DATE', 'UNIQUE_CARRIER', 'ORIGIN', 'DEST'] df_with_dummies = pd.get_dummies(x, columns = cols_to_transform ) ``` #### One Hot Encoding expands the 4 feature columns into many more ``` print x.shape print df_with_dummies.shape ``` ### Creating the Training and Test Sets ref: https://beckernick.github.io/oversampling-modeling/ ``` training_features, test_features, training_target, test_target, = train_test_split(df_with_dummies, y, test_size=0.15, random_state=12) print training_features.shape, test_features.shape print training_target.shape, test_target.shape ``` #### Oversample only on the training data ``` x_train, x_val, y_train, y_val = train_test_split(training_features, training_target, test_size = .15, random_state=12) sm = SMOTE(k=5, kind = 'regular', ratio='auto') x_train_res, y_train_res = sm.fit_sample(x_train, y_train) print training_target.value_counts(), np.bincount(y_train_res) clf_rf = RandomForestClassifier(n_estimators=100, random_state=12) clf_rf.fit(x_train_res, y_train_res) ``` Accuracy = (TP+TN) / (TP+TN+FP+FN) Precision = TP / (TP+FP) Recall = TP / (TP+FN) ``` print 'Validation Results' print 'Accuracy: ', clf_rf.score(x_val, y_val) print 'Precision:', precision_score(y_val, clf_rf.predict(x_val)) print 'Recall:', recall_score(y_val, clf_rf.predict(x_val)) print 'F1 score:', f1_score(y_val, clf_rf.predict(x_val),average='weighted') print 'Confusion Matrix:\n', confusion_matrix(y_val, clf_rf.predict(x_val)) print '\nTest Results' print 'Accuracy: ', clf_rf.score(test_features, test_target) print 'Precision:', precision_score(test_target, clf_rf.predict(test_features)) print 'Recall: ', recall_score(test_target, clf_rf.predict(test_features)) print 'F1 score:', f1_score(test_target, clf_rf.predict(test_features),average='weighted') print 'Confusion Matrix:\n', confusion_matrix(test_target, clf_rf.predict(test_features)) #!pip install --user ggplot from ggplot import * # from http://blog.yhat.com/posts/roc-curves.html preds = clf_rf.predict_proba(test_features)[:,1] fpr, tpr, _ = roc_curve(test_target, preds) auc = metrics.auc(fpr,tpr) df = pd.DataFrame(dict(fpr=fpr, tpr=tpr)) ggplot(df, aes(x='fpr', y='tpr')) +\ geom_line() +\ geom_abline(linetype='dashed') +\ ggtitle("ROC Curve w/ AUC=%s" % str(auc)) ```
github_jupyter
``` import bert from bert import run_classifier from bert import optimization from bert import tokenization from bert import modeling import numpy as np import json import tensorflow as tf import itertools from unidecode import unidecode import re import sentencepiece as spm # !git clone https://github.com/huseinzol05/Malaya-Dataset.git # Change to your local Malaya-Dataset import glob left, right, label = [], [], [] for file in glob.glob('../../Malaya-Dataset/text-similarity/quora/*.json'): with open(file) as fopen: x = json.load(fopen) for i in x: splitted = i[0].split(' <> ') if len(splitted) != 2: continue left.append(splitted[0]) right.append(splitted[1]) label.append(i[1]) np.unique(label, return_counts = True) from prepro_utils import preprocess_text, encode_ids, encode_pieces sp_model = spm.SentencePieceProcessor() sp_model.Load('sp10m.cased.v4.model') with open('sp10m.cased.v4.vocab') as fopen: v = fopen.read().split('\n')[:-1] v = [i.split('\t') for i in v] v = {i[0]: i[1] for i in v} class Tokenizer: def __init__(self, v): self.vocab = v pass def tokenize(self, string): return encode_pieces(sp_model, string, return_unicode=False, sample=False) def convert_tokens_to_ids(self, tokens): return [sp_model.PieceToId(piece) for piece in tokens] def convert_ids_to_tokens(self, ids): return [sp_model.IdToPiece(i) for i in ids] tokenizer = Tokenizer(v) BERT_INIT_CHKPNT = 'pretraining_output3/model.ckpt-1000000' BERT_CONFIG = 'checkpoint/small_config.json' MAX_SEQ_LENGTH = 100 tokenizer.tokenize(left[1]) list(v.keys())[:10] from tqdm import tqdm def _truncate_seq_pair(tokens_a, tokens_b, max_length): while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def get_inputs(left, right): input_ids, input_masks, segment_ids = [], [], [] for i in tqdm(range(len(left))): tokens_a = tokenizer.tokenize(' '.join(left[i])) tokens_b = tokenizer.tokenize(' '.join(right[i])) _truncate_seq_pair(tokens_a, tokens_b, MAX_SEQ_LENGTH - 3) tokens = [] segment_id = [] tokens.append("<cls>") segment_id.append(0) for token in tokens_a: tokens.append(token) segment_id.append(0) tokens.append("<sep>") segment_id.append(0) for token in tokens_b: tokens.append(token) segment_id.append(1) tokens.append("<sep>") segment_id.append(1) input_id = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_id) while len(input_id) < MAX_SEQ_LENGTH: input_id.append(0) input_mask.append(0) segment_id.append(0) input_ids.append(input_id) input_masks.append(input_mask) segment_ids.append(segment_id) return input_ids, input_masks, segment_ids input_ids, input_masks, segment_ids = get_inputs(left, right) bert_config = modeling.BertConfig.from_json_file(BERT_CONFIG) epoch = 10 batch_size = 120 warmup_proportion = 0.1 num_train_steps = int(len(left) / batch_size * epoch) num_warmup_steps = int(num_train_steps * warmup_proportion) class Model: def __init__( self, dimension_output, learning_rate = 2e-5, ): self.X = tf.placeholder(tf.int32, [None, None]) self.segment_ids = tf.placeholder(tf.int32, [None, None]) self.input_masks = tf.placeholder(tf.int32, [None, None]) self.Y = tf.placeholder(tf.int32, [None]) model = modeling.BertModel( config=bert_config, is_training=True, input_ids=self.X, input_mask=self.input_masks, token_type_ids=self.segment_ids, use_one_hot_embeddings=False) output_layer = model.get_pooled_output() self.logits = tf.layers.dense(output_layer, dimension_output) self.cost = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits( logits = self.logits, labels = self.Y ) ) self.optimizer = optimization.create_optimizer(self.cost, learning_rate, num_train_steps, num_warmup_steps, False) correct_pred = tf.equal( tf.argmax(self.logits, 1, output_type = tf.int32), self.Y ) self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) dimension_output = 2 learning_rate = 2e-5 tf.reset_default_graph() sess = tf.InteractiveSession() model = Model( dimension_output, learning_rate ) sess.run(tf.global_variables_initializer()) var_lists = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = 'bert') saver = tf.train.Saver(var_list = var_lists) saver.restore(sess, BERT_INIT_CHKPNT) from sklearn.cross_validation import train_test_split train_input_ids, test_input_ids, train_input_masks, test_input_masks, train_segment_ids, test_segment_ids, train_Y, test_Y = train_test_split( input_ids, input_masks, segment_ids, label, test_size = 0.2 ) import time EARLY_STOPPING, CURRENT_CHECKPOINT, CURRENT_ACC, EPOCH = 3, 0, 0, 0 while True: lasttime = time.time() if CURRENT_CHECKPOINT == EARLY_STOPPING: print('break epoch:%d\n' % (EPOCH)) break train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0 pbar = tqdm( range(0, len(train_input_ids), batch_size), desc = 'train minibatch loop' ) for i in pbar: index = min(i + batch_size, len(train_input_ids)) batch_x = train_input_ids[i: index] batch_masks = train_input_masks[i: index] batch_segment = train_segment_ids[i: index] batch_y = train_Y[i: index] acc, cost, _ = sess.run( [model.accuracy, model.cost, model.optimizer], feed_dict = { model.Y: batch_y, model.X: batch_x, model.segment_ids: batch_segment, model.input_masks: batch_masks }, ) assert not np.isnan(cost) train_loss += cost train_acc += acc pbar.set_postfix(cost = cost, accuracy = acc) pbar = tqdm(range(0, len(test_input_ids), batch_size), desc = 'test minibatch loop') for i in pbar: index = min(i + batch_size, len(test_input_ids)) batch_x = test_input_ids[i: index] batch_masks = test_input_masks[i: index] batch_segment = test_segment_ids[i: index] batch_y = test_Y[i: index] acc, cost = sess.run( [model.accuracy, model.cost], feed_dict = { model.Y: batch_y, model.X: batch_x, model.segment_ids: batch_segment, model.input_masks: batch_masks }, ) test_loss += cost test_acc += acc pbar.set_postfix(cost = cost, accuracy = acc) train_loss /= len(train_input_ids) / batch_size train_acc /= len(train_input_ids) / batch_size test_loss /= len(test_input_ids) / batch_size test_acc /= len(test_input_ids) / batch_size if test_acc > CURRENT_ACC: print( 'epoch: %d, pass acc: %f, current acc: %f' % (EPOCH, CURRENT_ACC, test_acc) ) CURRENT_ACC = test_acc CURRENT_CHECKPOINT = 0 else: CURRENT_CHECKPOINT += 1 print('time taken:', time.time() - lasttime) print( 'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n' % (EPOCH, train_loss, train_acc, test_loss, test_acc) ) EPOCH += 1 real_Y, predict_Y = [], [] pbar = tqdm( range(0, len(test_input_ids), batch_size), desc = 'validation minibatch loop' ) for i in pbar: index = min(i + batch_size, len(test_input_ids)) batch_x = test_input_ids[i: index] batch_masks = test_input_masks[i: index] batch_segment = test_segment_ids[i: index] batch_y = test_Y[i: index] predict_Y += np.argmax(sess.run(model.logits, feed_dict = { model.Y: batch_y, model.X: batch_x, model.segment_ids: batch_segment, model.input_masks: batch_masks }, ), 1, ).tolist() real_Y += batch_y from sklearn import metrics print( metrics.classification_report( real_Y, predict_Y, target_names = ['not similar', 'similar'],digits=5 ) ) ```
github_jupyter
``` import re import numpy as np import pickle import import_ipynb import import_ipynb from normalizing import normalize from gensim.models.keyedvectors import KeyedVectors from gensim.test.utils import get_tmpfile from gensim.scripts.glove2word2vec import glove2word2vec import collections from collections import defaultdict import csv from konlpy.tag import Twitter;t=Twitter()#tokens_ko=t.morphs(doc_ko) #각 파일 불러오기 train_content_path= '../content.csv' train_title_path = '../title.csv' valid_content_path ='../testing_data.csv' def clean_str(sentence): sentence = re.sub("[#.]+", "#", sentence) return sentence def get_text_list(data_path, toy): with open (data_path, "r", encoding="utf-8") as f: if not toy: return [clean_str(x.strip()) for x in f.readlines()] else: return [clean_str(x.strip()) for x in f.readlines()] def get_text_list(data_path,title=False,content=False,toy): with open (data_path, "r", encoding="utf-8") as f: if not toy: if content: return [clean_str(x.strip()) for x in f.readlines()] elif title: return [clean_str(x.strip()) for x in f.readlines()] else: return [clean_str(x.strip()) for x in line.readlines()] def build_dict(step, toy=False): if step == "train": train_article_list = get_text_list(train_article_path,title=False,content=True, toy) train_title_list = get_text_list(train_title_path,title=True,content=False, toy) words = list() count = 0 dict = defaultdict(lambda:[]) for sentence in train_article_list + train_title_list: sentence = normalizing(sentence, punctuation=True) for idx,word in enumerate(sentence.split()): if len(word) > 0: normalizedword=word[:3] tmp=[] for char in normalizedword: if ord(char) < 12593 and ord(char) > 12643: tmp.append(char) normalizedword = ''.join(char for char in tmp) if word not in dict[normalizedword].append(word) dict[normalizedword].append(word) dict = sorted(dict.items(), key=operator.itemgetter(0))[1:] words=[] fpr i in rnage(len(dict)): word=[] word.append(dict[i][0]) for w in dict[i][1]: if w not in word: word.append(w) words.append(word) words.append(['<padding>']) words.append(['<unk>']) words.append(['<s>']) words.append(['</s>']) reversed_dict = {i:ch[0] for i,ch in enumerate(words)} word_dict={} for idx,words in enumerate(words): for word in words: word_dict[word]=idx with open("word_dict.pickle", "wb") as f: pickle.dump(word_dict, f) with open("ix_to_dict.pickle", "wb") as t: pickle.dump(word_dict, t) elif step == "valid": with open("word_dict.pickle", "rb") as f: word_dict = pickle.load(f) reversed_dict = dict(zip(word_dict.values(), word_dict.keys())) article_max_len = 120 summary_max_len = 18 print("reversed dict:",len(reversed_dict),"word dict:",len(word_dict)) return word_dict, reversed_dict, article_max_len, summary_max_len def build_dataset(step, word_dict, article_max_len, summary_max_len, toy=False): if step == "train": article_list = get_text_list(train_article_path,title=False,content=True, toy) title_list = get_text_list(train_title_path,title=True,content=False, toy) elif step == "valid": article_list = get_text_list(valid_article_path,title=False,content=True, toy) else: raise NotImplementedError x = [normalize(d) for d in article_list] x = [[word_dict.get(w, word_dict["<unk>"]) for w in d] for d in x] x = [d[:article_max_len] for d in x] x = [d + (article_max_len - len(d)) * [word_dict["<padding>"]] for d in x] if step == "valid": return x else: y = [normalize(d) for d in title_list] y = [[word_dict.get(w, word_dict["<unk>"]) for w in d] for d in y] y = [d[:(summary_max_len - 1)] for d in y] return x, y def batch_iter(inputs, outputs, batch_size, num_epochs): inputs = np.array(inputs) outputs = np.array(outputs) num_batches_per_epoch = (len(inputs) - 1) // batch_size + 1 for epoch in range(num_epochs): for batch_num in range(num_batches_per_epoch): start_index = batch_num * batch_size end_index = min((batch_num + 1) * batch_size, len(inputs)) yield inputs[start_index:end_index], outputs[start_index:end_index] def get_init_embedding(reversed_dict, embedding_size): glove_file = "glove/glove.42B.300d.txt" word2vec_file = get_tmpfile("word2vec_format.vec") glove2word2vec(glove_file, word2vec_file) print("Loading Glove vectors...") word_vectors = KeyedVectors.load_word2vec_format(word2vec_file) word_vec_list = list() for _, word in sorted(reversed_dict.items()): try: word_vec = word_vectors.word_vec(word) except KeyError: word_vec = np.zeros([embedding_size], dtype=np.float32) word_vec_list.append(word_vec) # Assign random vector to <s>, </s> token word_vec_list[2] = np.random.normal(0, 1, embedding_size) word_vec_list[3] = np.random.normal(0, 1, embedding_size) return np.array(word_vec_list) ```
github_jupyter
## Importance Sampling and Particle filter ``` import numpy as np import matplotlib.pyplot as plt from scipy.stats import norm from scipy.stats import poisson ``` ## Importance Sampling and resampling Before we dive into the vast universe of nonlinear filtering, let us take a step back and review importance sampling. The idea of importance is to empirically approximate a probability distribution, which we can evaluate but cannot directly sample from, by weighted samples from _another_ probability distribution. As an example, let us consider a mysterious parabola-shaped probability function $p(x) = 3/4 * (1-x^2) $, defined on $x $ in $ [-1,1]$. We cannot sample from this distribution, but we can sample from the uniform distribution between -1 and 1. Let us now generate weighted samples to approximate the parabola distribution (algorithm 2) ``` # Approximate beta distribution with weighted samples from the uniform distribution np.random.seed(42) N = 1000 # draw samples from proposal # %%%%%%%%%%%%%%% ENTER CODE HERE %%%%%%%%%%%%%%%%%%% # compute weights # %%%%%%%%%%%%%%% ENTER CODE HERE %%%%%%%%%%%%%%%%%%% # normalize the weights such that they sum to 1 # %%%%%%%%%%%%%%% ENTER CODE HERE %%%%%%%%%%%%%%%%%%% # histogram of original samples fig, (ax1,ax2) = plt.subplots(2,1,figsize=(10,8)) fig.tight_layout() ax1.hist(x) ax1.set_title('Original samples') # histogram of weighted samples, together with probability distribution bins = np.linspace(-1, 1, int(np.sqrt(N))) ax2.hist(x,weights=w,density = True,alpha=0.5) xaxis = np.linspace(-1,1,50) ax2.plot(xaxis,f(xaxis)/(4/3)) ax2.legend([r'$p(x)$','weighted histogram']) ax2.set_title('Weighted samples') plt.show() ``` Assume you don't want to store the weights for some reason, but want to have samples that represent the parabola distribution. In this case, you can use the resampling algorithm (algorithm 4) to generate equally weighted samples from the weight distribution: ``` def resample(x,w,N): # %%%%%%%%%%%%%%% ENTER CODE HERE %%%%%%%%%%%%%%%%%%% return x_r,w_r #resample to produce equally weighted samples, which is equivalent to samples without the need for a weight x_r,_ = resample(x,w,N) # histogram of original samples fig, ax2 = plt.subplots(1,1,figsize=(10,4)) fig.tight_layout() # histogram of weighted samples bins = np.linspace(-1, 1, int(np.sqrt(N))) ax2.hist(x,weights=w,density = True,alpha=0.5) ax2.hist(x_r,density = True,alpha=0.5) xaxis = np.linspace(-1,1,50) ax2.plot(xaxis,f(xaxis)/(4/3)) ax2.legend([r'$p(x)$','weighted histogram','resampled histogram']) plt.show() ``` Note that even though the idea of resampling is intruguingly simple, it will increase the variance of the samples. ## Getting started with particle filters: Revisit the random walk In order to compare the PF (and to benchmark it), let us use it on the simple random walk model that we have encountered in the KF section. We already have an optimal solution to this problem (the Kalman filter), and we will now compare the PF to this. ``` def KF1D_generateData(params): x = np.zeros(params["T"]+1) y = np.zeros(params["T"]+1) # initialization x[0] = np.random.normal(params["mu0"],np.sqrt(params["Sigma0"])) y[0] = np.random.normal(params["H"] * x[0], np.sqrt(params["R"])) for i in range(1,params["T"]+1): x[i] = np.random.normal(params["F"] * x[i-1],np.sqrt(params["Q"])) y[i] = np.random.normal(params["H"] * x[i], np.sqrt(params["R"])) return x, y def KF1D(y,params): # %%%%%%%%%%%%%%% COPY CODE HERE %%%%%%%%%%%%%%%%%%% return mu, Sigma np.random.seed(42) N = 2000 c = 0.5 params = { "F": 1, "Q": 1, "H": 1, "R": 10, "mu0": 10, "Sigma0": 2, "T": 100 } # generate the data x, y = KF1D_generateData(params) # compute the KF estimate mu, Sigma = KF1D(y,params) ``` Let's code up the Boostrap Particle filter (Algorithm 5). You may use the resampling procedure that you have already defined above. ``` def BootstrapPF(y,N,c,params): # initialization # %%%%%%%%%%%%%%% ENTER CODE HERE %%%%%%%%%%%%%%%%%%% # filtering recursion for t in range(1,params["T"]+1): # draw from proposal (transition density) # %%%%%%%%%%%%%%% ENTER CODE HERE %%%%%%%%%%%%%%%%%%% #compute weights # %%%%%%%%%%%%%%% ENTER CODE HERE %%%%%%%%%%%%%%%%%%% # normalize weights # %%%%%%%%%%%%%%% ENTER CODE HERE %%%%%%%%%%%%%%%%%%% # resample if necessary # %%%%%%%%%%%%%%% ENTER CODE HERE %%%%%%%%%%%%%%%%%%% return x, w np.random.seed(42) # compute the Bootstrap PF estimate x_PF,w_PF = BootstrapPF(y,N,c,params) # compute mean and variance mu_PF = np.sum(w_PF * x_PF,1) Sigma_PF = np.sum(w_PF * ((x_PF.T-mu_PF)**2).T,1) # Plot the trajectory and the observations # (assume no observation at y = 0 ) t = np.arange(params["T"]+1) fig, (ax1,ax2) = plt.subplots(2,1,figsize=(10,8)) ax1.scatter(t[1:],y[1:],color='grey',facecolors='none') ax1.scatter(t,x,color='black') ax1.plot(t,mu,linewidth=2) ax1.plot(t,mu_PF,'--',linewidth=2) ax1.legend([r'$\mu_t$ (KF)',r'$\mu_t$ (PF)',r'$y_t$',r'$x_t$']) ax1.grid(True) ax1.set_xlim(0,params["T"]) ax1.set_title('Mean') ax2.plot(t,Sigma) ax2.plot(t,Sigma_PF) ax2.set_xlim(0,params["T"]) ax2.set_title('Variance') ax2.legend([r'$\Sigma_t$ (KF)',r'$\Sigma_t$ (PF)']) plt.show() ``` ## Nonlinear Gaussian models Here, we consider a nonlinear filtering task, where the hidden state $X_t$ evolves according to a drift-diffusion with nonlinear drift function. Further, the observations $Y_t$ are linear and corrupted by Gaussian noise. Specifically, the generative model in terms of stochastic differential equations (SDE) reads: \begin{eqnarray} d X_t & = & \tilde{f}(X_t) \, dt + \sigma_x \, dW_t \\ d Z_t & = & \tilde{h}(X_t) \, dt + \sigma_y \, dV_t, \end{eqnarray} with $ \tilde{f}(x) = -4x(x^2-1) $ and $h(x) = x $. Don't worry if you have never worked with SDE's before. It's actually nothing else than the dynamical system we looked at in the KF notebook, just written in a slightly weird way. Analogously, they can easily be discretized in time, and we can rewrite the model in terms of the following transition and emission probabilities: \begin{eqnarray} p(x_t | x_{t-1} ) & = & \mathcal{N} ( x_t ; f(x_{t-1}), Q) \\ p(y_t | x_{t} ) & = & \mathcal{N} ( z_t ; h(x_{t}) , R), \end{eqnarray} with \begin{eqnarray} f(x) & = & x + \tilde{f}(x) dt \\ Q & = & \sigma_x^2 dt \\ h(x) & = & \tilde{h}(x) dt \\ R & = & \sigma_y^2 / dt. \end{eqnarray} The observations $y_t$ can be considered a temporal derivative of the process $Z_t$ (don't tell the mathematicians I said that). Note that again the observation variance scales inversely with the time step: The smaller I make the time step, the less informative a single observation becomes. On the other hand, I also have more observations per time, so this prevents oversampling my observations and thus making the inference trivial. This is a nonlinear model, and we cannot use the Kalman filter anymore. In other words: Whatever the particle filter is doing, we have to trust it... ``` def NLGauss_generateData(params): # unpack some parameters for readibility f = params["f"] h = params["h"] x = np.zeros(params["T"]+1) y = np.zeros(params["T"]+1) # initialization (draw from Gauss with mean mu0 and variance Sigma0) x[0] = np.random.normal(params["mu0"],np.sqrt(params["Sigma0"])) for t in range(1,params["T"]+1): x[t] = np.random.normal( f(x[t-1]) , np.sqrt(params["Q"]) ) y[t] = np.random.normal(h(x[t]), np.sqrt(params["R"]) ) return x, y np.random.seed(42) N = 2000 # number of particles c = 0.2 # resampling criterion dt = 0.001 params = { "f": lambda x: x-4*x*(x**2-1)*dt, "Q": 2 * dt, "h": lambda x: x, "R": 0.1 / dt, "mu0": 0, "Sigma0": 1, "T": int(5/dt), } x,y = NLGauss_generateData(params) ``` When you code the BPF, you can actually re-use a lot of the code from above. ``` def NLGauss_BPF(y,N,c,params): # %%%%%%%%%%%%%%% COPY CODE HERE, ADJUST %%%%%%%%%%%%%%%%%%% return x, w np.random.seed(42) x_PF,w_PF = NLGauss_BPF(y,N,c,params) ``` Let us now visualize the results: ``` # produces weighted histogram images def histImage(x, bins, rang, w=0): image = np.zeros((x.shape[0],bins)) if np.isscalar(w): for i in range(x.shape[0]): image[i,:] = np.histogram(x[i,:],bins,rang,density=True)[0] else: for i in range(x.shape[0]): image[i,:] = np.histogram(x[i,:],bins,rang,weights=w[i,:],density=True)[0] return image T = params["T"] plotrange = [0,T,-2.5,2.5] t = np.arange(T+1)*dt fig, (ax2) = plt.subplots(1,1,figsize=(10,6)) hist = np.transpose(histImage(x_PF,int(np.sqrt(N)),(-3,3),w=w_PF)) ax2.imshow(np.flipud(hist), cmap='Oranges', interpolation='nearest', extent=[0,T,-3,3],aspect='auto',vmax=0.7) ax2.plot(t,x,color='xkcd:moss') ax2.plot(t,np.average(x_PF,1,w_PF), linewidth=3,color = 'xkcd:azure') ax2.axis(plotrange) ax2.legend(['hidden state','BPF'],fontsize=16) ax2.legend([r'$x_t$',r'$\mu_t$ (BPF)']) plt.subplots_adjust(hspace=0.3) plt.savefig('63 - particle filters - gauss.pdf') plt.show() ``` ## Nonlinear filtering with Poisson noise As an alternative to Gaussian-type observation noise, we consider here point-process observations, with the intensity $ g(x_t)$ being a function of the latent state $x_t$. \begin{eqnarray} y_t &\sim & Poisson(g(x_t)). \end{eqnarray} As a concrete example, we consider a Gaussian-shaped rate function $ g(x) = g_0 \exp(\frac{x-m_o}{2 s_0^2}) dt $ for two sensors with peaks at $ m_0 = \pm 1 $ and width $ s_0 $ (i.e. conditionally independent two-dimensional observations). The hidden dynamics is the same as in the previous example. If you want to draw the link to neuroscience, you might consider those "sensors" to be two place cells that fire with a higher rate once the animal (the latent state) is close to their respective place fields. ``` def NLPoisson_generateData(params): # unpack some parameters for readibility f = params["f"] g = params["g"] x = np.zeros(params["T"]+1) y = np.zeros((params["T"]+1,2)) # initialization (draw from Gauss with mean mu0 and variance Sigma0) x[0] = np.random.normal(params["mu0"],np.sqrt(params["Sigma0"])) for t in range(1,params["T"]+1): x[t] = np.random.normal( f(x[t-1]) , np.sqrt(params["Q"]) ) y[t] = np.random.poisson(g(x[t])) return x, y np.random.seed(42) N = 2000 c = 0.2 dt = 0.001 g0 = 50 s0 = 0.05 m0 = np.array([-1,1]) params = { "f": lambda x: x-4*x*(x**2-1)*dt, "Q": 2 * dt, "g": lambda x: g0 * np.transpose(np.exp( - np.array([x-m0[0],x-m0[1]])**2/(2 * s0**2) ))*dt, "mu0": 0, "Sigma0": 1, "T": int(5/dt), } x,y = NLPoisson_generateData(params) ``` Same as before, just code up the BPF. Careful: The weighting step requires a bit of thinking... ``` def NLPoisson_BPF(y,N,c,params): # %%%%%%%%%%%%%%% COPY CODE HERE, ADJUST %%%%%%%%%%%%%%%%%%% return x, w x_PF_PP,w_PF_PP = NLPoisson_BPF(y,N,c,params) t_minus = np.where(y[:,0]>=1)[0] t_plus = np.where(y[:,1]>=1)[0] T = params["T"]*dt t = np.arange(T/dt+1)*dt plotrange = [0, T, -2.5, 2.5] fig, (ax1,ax2) = plt.subplots(2,1,figsize=(7.5,8)) plt.subplots_adjust(hspace=0.5) ax1.plot(t,x,color = 'xkcd:moss') ax1.plot(t,np.average(x_PF_PP,1,w_PF_PP),color = 'xkcd:azure',linewidth=3) for spikepos in t[np.where(y>=1)[0]]: ax1.axvline(x=spikepos,linestyle='--',color = 'xkcd:light grey',linewidth=1) ax1.scatter(t[t_plus],2.2*np.ones(t_plus.size),marker="^",c='xkcd:eggplant purple',s=100) ax1.scatter(t[t_minus],-2.2*np.ones(t_minus.size),marker="v",c='xkcd:eggplant purple',s=100) ax1.axis(plotrange) ax1.legend(['hidden state','PF'],fontsize=16) hist = np.transpose(histImage(x_PF_PP,int(np.sqrt(N)),(-3,3),w=w_PF_PP)) ax2.imshow(np.flipud(hist), cmap='Oranges', interpolation='nearest', extent=[0,T,-3,3],aspect='auto',vmax=0.7) for spikepos in t[np.where(y>=1)[0]]: ax2.axvline(x=spikepos,linestyle='--',color = 'xkcd:light grey',linewidth=1) ax2.scatter(t[t_plus],2.2*np.ones(t_plus.size),marker="^",c='xkcd:eggplant purple',s=100) ax2.scatter(t[t_minus],-2.2*np.ones(t_minus.size),marker="v",c='xkcd:eggplant purple',s=100) ax2.plot(t,x,color='xkcd:moss') ax2.plot(t,np.average(x_PF,1,w_PF), linewidth=3,color = 'xkcd:azure') ax2.axis(plotrange) plt.show() ```
github_jupyter
``` #default_exp test #export from fastcore.imports import * from collections import Counter from contextlib import redirect_stdout from nbdev.showdoc import * from fastcore.nb_imports import * ``` # Test > Helper functions to quickly write tests in notebooks ## Simple test functions We can check that code raises an exception when that's expected (`test_fail`). To test for equality or inequality (with different types of things) we define a simple function `test` that compares two object with a given `cmp` operator. ``` #export def test_fail(f, msg='', contains='', args=None, kwargs=None): args, kwargs = args or [], kwargs or {} "Fails with `msg` unless `f()` raises an exception and (optionally) has `contains` in `e.args`" try: f(*args, **kwargs) except Exception as e: assert not contains or contains in str(e) return assert False,f"Expected exception but none raised. {msg}" def _fail(): raise Exception("foobar") test_fail(_fail, contains="foo") def _fail(): raise Exception() test_fail(_fail) ``` We can also pass `args` and `kwargs` to function to check if it fails with special inputs. ``` def _fail_args(a): if a == 5: raise ValueError test_fail(_fail_args, args=(5,)) test_fail(_fail_args, kwargs=dict(a=5)) #export def test(a, b, cmp,cname=None): "`assert` that `cmp(a,b)`; display inputs and `cname or cmp.__name__` if it fails" if cname is None: cname=cmp.__name__ assert cmp(a,b),f"{cname}:\n{a}\n{b}" test([1,2],[1,2], operator.eq) test_fail(lambda: test([1,2],[1], operator.eq)) test([1,2],[1], operator.ne) test_fail(lambda: test([1,2],[1,2], operator.ne)) show_doc(all_equal) test(['abc'], ['abc'], all_equal) show_doc(equals) test([['abc'],['a']], [['abc'],['a']], equals) #export def nequals(a,b): "Compares `a` and `b` for `not equals`" return not equals(a,b) test(['abc'], ['ab' ], nequals) ``` ## test_eq test_ne, etc... Just use `test_eq`/`test_ne` to test for `==`/`!=`. `test_eq_type` check things are equals and of the same type. We define them using `test`: ``` #export def test_eq(a,b): "`test` that `a==b`" test(a,b,equals, '==') test_eq([1,2],[1,2]) test_eq([1,2],map(int,[1,2])) test_eq(array([1,2]),array([1,2])) test_eq(array([1,2]),array([1,2])) test_eq([array([1,2]),3],[array([1,2]),3]) test_eq(dict(a=1,b=2), dict(b=2,a=1)) test_fail(lambda: test_eq([1,2], 1), contains="==") test_fail(lambda: test_eq(None, np.array([1,2])), contains="==") test_eq({'a', 'b', 'c'}, {'c', 'a', 'b'}) #hide import pandas as pd import torch df1 = pd.DataFrame(dict(a=[1,2],b=['a','b'])) df2 = pd.DataFrame(dict(a=[1,2],b=['a','b'])) df3 = pd.DataFrame(dict(a=[1,2],b=['a','c'])) test_eq(df1,df2) test_eq(df1.a,df2.a) test_fail(lambda: test_eq(df1,df3), contains='==') class T(pd.Series): pass test_eq(df1.iloc[0], T(df2.iloc[0])) test_eq(torch.zeros(10), torch.zeros(10, dtype=torch.float64)) test_eq(torch.zeros(10), torch.ones(10)-1) test_fail(lambda:test_eq(torch.zeros(10), torch.ones(1, 10)), contains='==') test_eq(torch.zeros(3), [0,0,0]) #export def test_eq_type(a,b): "`test` that `a==b` and are same type" test_eq(a,b) test_eq(type(a),type(b)) if isinstance(a,(list,tuple)): test_eq(map(type,a),map(type,b)) test_eq_type(1,1) test_fail(lambda: test_eq_type(1,1.)) test_eq_type([1,1],[1,1]) test_fail(lambda: test_eq_type([1,1],(1,1))) test_fail(lambda: test_eq_type([1,1],[1,1.])) #export def test_ne(a,b): "`test` that `a!=b`" test(a,b,nequals,'!=') test_ne([1,2],[1]) test_ne([1,2],[1,3]) test_ne(array([1,2]),array([1,1])) test_ne(array([1,2]),array([1,1])) test_ne([array([1,2]),3],[array([1,2])]) test_ne([3,4],array([3])) test_ne([3,4],array([3,5])) test_ne(dict(a=1,b=2), ['a', 'b']) test_ne(['a', 'b'], dict(a=1,b=2)) #export def is_close(a,b,eps=1e-5): "Is `a` within `eps` of `b`" if hasattr(a, '__array__') or hasattr(b,'__array__'): return (abs(a-b)<eps).all() if isinstance(a, (Iterable,Generator)) or isinstance(b, (Iterable,Generator)): return all(abs(a_-b_)<eps for a_,b_ in zip(a,b)) return abs(a-b)<eps #export def test_close(a,b,eps=1e-5): "`test` that `a` is within `eps` of `b`" test(a,b,partial(is_close,eps=eps),'close') test_close(1,1.001,eps=1e-2) test_fail(lambda: test_close(1,1.001)) test_close([-0.001,1.001], [0.,1.], eps=1e-2) test_close(np.array([-0.001,1.001]), np.array([0.,1.]), eps=1e-2) test_close(array([-0.001,1.001]), array([0.,1.]), eps=1e-2) #export def test_is(a,b): "`test` that `a is b`" test(a,b,operator.is_, 'is') test_fail(lambda: test_is([1], [1])) a = [1] test_is(a, a) #export def test_shuffled(a,b): "`test` that `a` and `b` are shuffled versions of the same sequence of items" test_ne(a, b) test_eq(Counter(a), Counter(b)) a = list(range(50)) b = copy(a) random.shuffle(b) test_shuffled(a,b) test_fail(lambda:test_shuffled(a,a)) a = 'abc' b = 'abcabc' test_fail(lambda:test_shuffled(a,b)) a = ['a', 42, True] b = [42, True, 'a'] test_shuffled(a,b) #export def test_stdout(f, exp, regex=False): "Test that `f` prints `exp` to stdout, optionally checking as `regex`" s = io.StringIO() with redirect_stdout(s): f() if regex: assert re.search(exp, s.getvalue()) is not None else: test_eq(s.getvalue(), f'{exp}\n' if len(exp) > 0 else '') test_stdout(lambda: print('hi'), 'hi') test_fail(lambda: test_stdout(lambda: print('hi'), 'ho')) test_stdout(lambda: 1+1, '') test_stdout(lambda: print('hi there!'), r'^hi.*!$', regex=True) #export def test_warns(f, show=False): with warnings.catch_warnings(record=True) as w: f() test_ne(len(w), 0) if show: for e in w: print(f"{e.category}: {e.message}") test_warns(lambda: warnings.warn("Oh no!"), {}) test_fail(lambda: test_warns(lambda: 2+2)) test_warns(lambda: warnings.warn("Oh no!"), show=True) #export TEST_IMAGE = 'images/puppy.jpg' im = Image.open(TEST_IMAGE).resize((128,128)); im #export TEST_IMAGE_BW = 'images/mnist3.png' im = Image.open(TEST_IMAGE_BW).resize((128,128)); im #export def test_fig_exists(ax): "Test there is a figure displayed in `ax`" assert ax and len(ax.figure.canvas.tostring_argb()) fig,ax = plt.subplots() ax.imshow(array(im)); test_fig_exists(ax) #export class ExceptionExpected: "Context manager that tests if an exception is raised" def __init__(self, ex=Exception, regex=''): self.ex,self.regex = ex,regex def __enter__(self): pass def __exit__(self, type, value, traceback): if not isinstance(value, self.ex) or (self.regex and not re.search(self.regex, f'{value.args}')): raise TypeError(f"Expected {self.ex.__name__}({self.regex}) not raised.") return True def _tst_1(): assert False, "This is a test" def _tst_2(): raise SyntaxError with ExceptionExpected(): _tst_1() with ExceptionExpected(ex=AssertionError, regex="This is a test"): _tst_1() with ExceptionExpected(ex=SyntaxError): _tst_2() ``` `exception` is an abbreviation for `ExceptionExpected()`. ``` #export exception = ExceptionExpected() with exception: _tst_1() #hide def _f(): with ExceptionExpected(): 1 test_fail(partial(_f)) def _f(): with ExceptionExpected(SyntaxError): assert False test_fail(partial(_f)) def _f(): with ExceptionExpected(AssertionError, "Yes"): assert False, "No" test_fail(partial(_f)) ``` ## Export - ``` #hide from nbdev.export import notebook2script notebook2script() ```
github_jupyter
## <h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Confidence-Intervals" data-toc-modified-id="Confidence-Intervals-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Confidence Intervals</a></span><ul class="toc-item"><li><span><a href="#Agenda" data-toc-modified-id="Agenda-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Agenda</a></span></li></ul></li><li><span><a href="#Motivation-&amp;-Intuition" data-toc-modified-id="Motivation-&amp;-Intuition-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Motivation &amp; Intuition</a></span><ul class="toc-item"><li><span><a href="#Balancing-Precision-and-Uncertainty" data-toc-modified-id="Balancing-Precision-and-Uncertainty-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Balancing Precision and Uncertainty</a></span><ul class="toc-item"><li><span><a href="#🧠-Knowledge-Check" data-toc-modified-id="🧠-Knowledge-Check-2.1.1"><span class="toc-item-num">2.1.1&nbsp;&nbsp;</span>🧠 Knowledge Check</a></span></li><li><span><a href="#Using-Confidence-Intervals-to-Drive-that-Balance" data-toc-modified-id="Using-Confidence-Intervals-to-Drive-that-Balance-2.1.2"><span class="toc-item-num">2.1.2&nbsp;&nbsp;</span>Using Confidence Intervals to Drive that Balance</a></span></li><li><span><a href="#Scenario:-I-Have-a-Sweet-Tooth-🦷" data-toc-modified-id="Scenario:-I-Have-a-Sweet-Tooth-🦷-2.1.3"><span class="toc-item-num">2.1.3&nbsp;&nbsp;</span>Scenario: I Have a Sweet Tooth 🦷</a></span><ul class="toc-item"><li><span><a href="#Solution" data-toc-modified-id="Solution-2.1.3.1"><span class="toc-item-num">2.1.3.1&nbsp;&nbsp;</span>Solution</a></span></li></ul></li></ul></li><li><span><a href="#Key-Points" data-toc-modified-id="Key-Points-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>Key Points</a></span></li></ul></li><li><span><a href="#Constructing-Confidence-Intervals" data-toc-modified-id="Constructing-Confidence-Intervals-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Constructing Confidence Intervals</a></span><ul class="toc-item"><li><span><a href="#Gaussian-Method:-Compare-Sample-with-the-Normal-Curve" data-toc-modified-id="Gaussian-Method:-Compare-Sample-with-the-Normal-Curve-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Gaussian Method: Compare Sample with the Normal Curve</a></span><ul class="toc-item"><li><span><a href="#Confidence-Intervals-for-Normally-Distributed-Data" data-toc-modified-id="Confidence-Intervals-for-Normally-Distributed-Data-3.1.1"><span class="toc-item-num">3.1.1&nbsp;&nbsp;</span>Confidence Intervals for Normally Distributed Data</a></span><ul class="toc-item"><li><span><a href="#🧠-Knowledge-Check" data-toc-modified-id="🧠-Knowledge-Check-3.1.1.1"><span class="toc-item-num">3.1.1.1&nbsp;&nbsp;</span>🧠 Knowledge Check</a></span></li></ul></li></ul></li><li><span><a href="#$t$-Distribution-Method:-When-Sample-isn't-Normal" data-toc-modified-id="$t$-Distribution-Method:-When-Sample-isn't-Normal-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>$t$-Distribution Method: When Sample isn't Normal</a></span><ul class="toc-item"><li><span><a href="#Confidence-Intervals-for-$t$-Distribution" data-toc-modified-id="Confidence-Intervals-for-$t$-Distribution-3.2.1"><span class="toc-item-num">3.2.1&nbsp;&nbsp;</span>Confidence Intervals for $t$-Distribution</a></span></li><li><span><a href="#$t$-Distribution-Examples" data-toc-modified-id="$t$-Distribution-Examples-3.2.2"><span class="toc-item-num">3.2.2&nbsp;&nbsp;</span>$t$-Distribution Examples</a></span><ul class="toc-item"><li><span><a href="#Generated-Normal-Samples" data-toc-modified-id="Generated-Normal-Samples-3.2.2.1"><span class="toc-item-num">3.2.2.1&nbsp;&nbsp;</span>Generated Normal Samples</a></span></li><li><span><a href="#Scenario" data-toc-modified-id="Scenario-3.2.2.2"><span class="toc-item-num">3.2.2.2&nbsp;&nbsp;</span>Scenario</a></span></li></ul></li></ul></li></ul></li><li><span><a href="#Interpreting-Confidence-Intervals" data-toc-modified-id="Interpreting-Confidence-Intervals-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Interpreting Confidence Intervals</a></span><ul class="toc-item"><li><span><a href="#Note-on-Notation:" data-toc-modified-id="Note-on-Notation:-4.1"><span class="toc-item-num">4.1&nbsp;&nbsp;</span>Note on Notation:</a></span></li></ul></li><li><span><a href="#Exercise-Time-💪🏼" data-toc-modified-id="Exercise-Time-💪🏼-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Exercise Time 💪🏼</a></span></li><li><span><a href="#Level-Up:-Confidence-Intervals-for-Non-Normally-Distributed-Data" data-toc-modified-id="Level-Up:-Confidence-Intervals-for-Non-Normally-Distributed-Data-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>Level Up: Confidence Intervals for Non-Normally Distributed Data</a></span></li><li><span><a href="#Level-Up:-Bootstrap" data-toc-modified-id="Level-Up:-Bootstrap-7"><span class="toc-item-num">7&nbsp;&nbsp;</span>Level Up: Bootstrap</a></span><ul class="toc-item"><li><span><a href="#Example" data-toc-modified-id="Example-7.1"><span class="toc-item-num">7.1&nbsp;&nbsp;</span>Example</a></span><ul class="toc-item"><li><span><a href="#Finding-the-Confidence-Interval" data-toc-modified-id="Finding-the-Confidence-Interval-7.1.1"><span class="toc-item-num">7.1.1&nbsp;&nbsp;</span>Finding the Confidence Interval</a></span></li></ul></li></ul></li></ul></div> # Confidence Intervals ## Agenda SWBAT: - Describe the use of confidence intervals - Construct confidence intervals for different types of distributions: - normal - $t$ ``` import numpy as np from scipy import stats from matplotlib import pyplot as plt import seaborn as sns import pandas as pd from math import gamma %matplotlib inline ``` # Motivation & Intuition Ever have an appointment with the cable company to setup your cable and/or internet? ![xfinity logo](https://upload.wikimedia.org/wikipedia/en/6/65/Xfinity_2017.svg) What did they tell you when they scheduled that appointment? Something like > We'll be at your home between 7am to 8pm And they're usually right. This is close to how we use **confidence intervals** ## Balancing Precision and Uncertainty When we sample a population, we'd like to use the sample to tell us something about the population. But we're never 100% sure that the sample statistic (like the mean) is reflective of the population's true value. Maybe we sampled a bunch of weird data points. So like the cable company, we can adjust how much *uncertainty* we're willing to accept compared to our *uncertainty* ### 🧠 Knowledge Check > Say we get a sample from our population and measure the mean. What factors would make more or less confident in drawing conclusions about the population? ### Using Confidence Intervals to Drive that Balance > Because sample statistics are imperfect representations of the true population values, it is often appropriate to state these estimates with **confidence intervals**. ### Scenario: I Have a Sweet Tooth 🦷 <img src="https://upload.wikimedia.org/wikipedia/commons/e/e4/JellyBellyPile.JPG" width=50%/> > * Big bag of jelly beans (Population): $10,000$ jelly beans > * Weigh a sample of $50$ jelly beans > - Average weight is $1.25$ grams Can I take this data as a **good estimate** of the average weight over the whole *population* of jelly beans? In a word, yes. (What else do I have to go on!?) But how do I express how close this estimate is to the population? #### Solution Natural idea: I'll say I'm _confident_ that the real population value lies in some neighborhood or **interval** around the value I measured from my sample. ![](img/heres_your_estimate.gif) Notice: - Larger samples → more confident estimate represents population - Larger intervals → more confident true population value falling within interval ## Key Points - **Sample statistics** are _supplemented_ with **confidence intervals** to approximate the population - Generally believe **sample statistic** is in the _neighborhood_ of true population's statistic - The larger the sample, the less likely we got all the "weirdo" data points from the population - We **trade certainty with precision** by expanding our interval - Taking multiple samples (experiments) allows us more examples of where the true population statistic lies # Constructing Confidence Intervals > **Confidence interval** will depend on the statistics of our sample: > * Our measured/observed **sample mean** > * Our **sample size** > * Also depends on the underlying distribution of our data > - _Is it a normal distribution?_ There are a few methods in constructing a confidence interval. We'll go over two common methods, involving the normal curve (Gaussian method) and a method involving a new distribution called the _$t$-distribution_. z-score, t statistic, t distribution. Basically same thing. How far something is from the norm? From the mean? But basically same thing > **ASIDE: Bootstrap Method** > > There are a few ways to get a confidence interval and one way is to use a **bootstrap** method. We essentially simulate sampling the population with the sample we actually made. > > There is a [_Level Up section on this method_](#Level-Up:-Bootstrap) if you're interested in what this might look like. ## Gaussian Method: Compare Sample with the Normal Curve > If our data are **normally** distributed, then we can use the shape of the normal curve to help us ![Gaussian distribution with percent](https://upload.wikimedia.org/wikipedia/commons/8/8c/Standard_deviation_diagram.svg) * Assume confidence interval will be **centered on our sample mean** * Endpoints are based on number of $z$-scores left & right of mean given by the **standard error** > **Standard Error** ($s_\bar{x}$ or $\hat{\sigma_\bar{x}}$): The estimate of the population variance changes by the overall variance of the sample and the size of the sample > > $$ \large \sigma \approx \hat{\sigma_\bar{x}} = \frac{\sigma_x}{\sqrt{n}} \\ \text{or} \\ \large \sigma \approx s_\bar{x} = \frac{s_x}{\sqrt{n}} $$ > > Note that the $x$ subscript reminds us this is derived from the sample The number of steps we take from the mean is determined by which **level of confidence** we want attached to our interval. In particular, we take $z$-many steps, where $z$ is the (two-tailed) $z$-score that corresponds to our chosen level of confidence. If our data are **not** normally distributed, then there are several strategies we might try, some of which ultimately depend on some connection to the normal distribution, like a strategy that appeals to the Central Limit Theorem. ### Confidence Intervals for Normally Distributed Data Let's look at an example with data we assume to be normally distributed: ``` # A normally distributed variable with a population size of 1000 population = list(stats.norm.rvs(size=1000, random_state=42)) # Let's calculate the population mean. pop_mean = np.mean(population) pop_mean # And the population standard deviation. pop_std = np.std(population) pop_std # Visualizing population (approximately normal) f, ax = plt.subplots() ax = sns.kdeplot(population, ax=ax, label='Population PDF') plt.axvline(pop_mean, ls='-.', c='r', label='$\mu$') std_label = f'$\mu \pm\sigma$: ({pop_mean-pop_std:.3f},{pop_mean+pop_std:.3f})' plt.axvline(pop_mean-pop_std, ls='--', c='pink') plt.axvline(pop_mean+pop_std, ls='--', c='pink', label=std_label) ax.legend() plt.tight_layout() ``` Let's say we take a sample of 50 from our population and that we want an 90%-confidence interval for our estimate of the population mean. ``` np.random.seed(42) sample = np.random.choice(a=population, size=50) np.mean(sample) ``` The $z$-score that corresponds to an 90%-confidence interval can be calculated. (*****Z-score is: How wrong are you willing to be to narrow the interval) In this case, we want 5% on the left, 5% on the right ![](https://upload.wikimedia.org/wikipedia/commons/7/7f/Confidence_Interval_90P.png) ``` # This gives the z-score for the 95th-percentile z = stats.norm.ppf(0.95) z stats.norm.ppf(0.05)#This is just the negative of the 95th percentile # Converting our z-score to standard_error = pop_std / np.sqrt(50) standard_error * z #This is to know how far to step out from mean sample_mean = np.mean(sample) (sample_mean - standard_error * z, sample_mean + standard_error * z) #this is the values for the confidence intervals ``` Thus we'd report our estimate of the population mean as $0.177 \pm 0.228$, or, equivalently, as $(-0.051, 0.405)$. Note that the true population mean of $0.0193$ is in fact in this range. #### 🧠 Knowledge Check > What if I wanted a 80%-confidence interval? ``` z_80 = stats.norm.ppf(0.9) (sample_mean - standard_error * z_80, sample_mean + standard_error * z_80) ``` ## $t$-Distribution Method: When Sample isn't Normal Formally called the "Student's $t$-distribution" by William Gosset in 1908 ![imgguiness](./img/guiness.png) Similar to normal curve, but drops off less quickly at the tails (less certain that the mean is the "actual" mean) More **degrees of freedom** (essentially more data; more independent ways to vary) --> closer to a normal curve PDF of $t$-distribution: ${\frac {\Gamma \left({\frac {\nu +1}{2}}\right)}{{\sqrt {\nu \pi }}\,\Gamma \left({\frac {\nu }{2}}\right)}}\left(1+{\frac {x^{2}}{\nu }}\right)^{-{\frac {\nu +1}{2}}}\!$, where $\Gamma$ denotes the [Gamma Function](https://en.wikipedia.org/wiki/Gamma_function). parameter: $\nu > 0$ where $\nu$ is degrees of freedom (n-1) **$t$ distribution becomes closer to the normal distribution ($z$ distribution) as n increases** ![zvt](./img/z_vs_t.png) We can use the normal distribution when either: * the population standard deviation is known * the sample size is greater than 30. If **neither** of these holds true, we need to use the **$t$-distribution**. The $t$-distribution is wider and has different critical values for different sample sizes. ``` fig, ax = plt.subplots(7, figsize=(10, 20)) X = np.linspace(-10, 10, 201) nus = np.arange(2, 9) y_norm = 1 / np.sqrt(2*np.pi) * np.exp(-0.5 * X**2) for j in range(7): y = gamma((nus[j]+1) / 2) / (np.sqrt(np.pi*nus[j]) * gamma(nus[j] / 2)) *\ (1 + X**2/nus[j])**((-nus[j]+1) / 2) ax[j].plot(X, y, label=fr't-Distribution, $\nu$ = {nus[j]}') ax[j].plot(X, y_norm, label='Normal Distribution') ax[j].legend(); ``` ### Confidence Intervals for $t$-Distribution The construction of confidence intervals for the $t$-distribution is similar to how they are made for the normal distribution. But instead of $z$-scores, we'll have $t$-scores. And since we don't have access to the population standard deviation, we'll make use of the sample standard deviation instead. left endpt.: $\bar{x} - t\times\frac{s}{\sqrt{n}}$ <br/> right endpt.: $\bar{x} + t\times\frac{s}{\sqrt{n}}$ ``` conf_int = 0.95 interval_start, interval_end = stats.t.interval( alpha = conf_int, # Confidence level df = 99, # Degrees of freedom loc = 65, # Sample mean scale = 18) # Standard deviation estimate print(f'To get {conf_int*100}%: {interval_start} to {interval_end}') ``` stats.t.ppf will gather the t interval as well t score creates a wider range, so you just need to report what kind of test you used. ### $t$-Distribution Examples #### Generated Normal Samples Let's play around with making different confidence intervals for different sample sizes ``` # samples = np.random.normal(54, 17, size=100000) samples = np.random.normal(54, 17, size=500) # samples = np.random.normal(54, 17, size=5) mean = samples.mean() pop_ages = pd.DataFrame(samples) pop_ages.hist(bins=100,range=(-20,120),figsize=(9,9)) plt.axvline(mean, linewidth=3, ls='--', c='r', label=f'$\mu$:{mean:.1f}') plt.legend() plt.show() pop_ages.describe() #Min and Max of Confidence Interval stats.t.interval(alpha = 0.95, df = len(samples)-1, loc = samples.mean(), scale = stats.sem(samples)) ``` #### Scenario You are inspecting a hardware factory and want to construct a 90% confidence interval of acceptable screw lengths. You draw a sample of 30 screws and calculate their mean length as 4.8 centimeters and the standard deviation as 0.4 centimeters. What are the bounds of your confidence interval? ``` n = 30 mean = 4.8 t_value = stats.t.ppf(0.95, n-1) margin_error = t_value * 0.4/(n**0.5) confidence_interval = (mean - margin_error, mean + margin_error) confidence_interval stats.t.interval(.05, df = n-1, loc = mean, scale = 0.4) ``` # Interpreting Confidence Intervals ``` # This is from standdowns # z= -1.28 and 1.28 on upper side import math mean = 12.5 sigma = 8 n = 400 lower = mean - (1.28*(sigma/math.sqrt(n))) upper = mean + (1.28*(sigma/math.sqrt(n))) lower, upper 660/1500 p = .44 z = 1.96 n = 1500 lower = p - (z * math.sqrt((.44*(1-.44))/1500)) upper = p + (z * math.sqrt((.44*(1-.44))/1500)) lower, upper #Z test practice from slides. 2 sample proportional test, large n size. Normal distribution. x1 = 130 n1 = 750 p1 = 130/750 x2 = 160 n2 = 700 p2 = 160/700 pstar = (x1+x2)/(n1+n2) z = (p1 - p2)/math.sqrt(pstar*(1-pstar)*((1/n1)+(1/n2))) #Null: The data does not provide sufficient evidence to concluce that for men 20-34 years old, #a higher percentage were overweight in 1990 than in 1980. #H0 = P90 = P80 #Alternative: The data does provide sufficient evidence to concluce that for men 20-34 years old, #a higher percentage were overweight in 1990 than in 1980. #Ha = P90 != P80 # level of significance is 95%, alpha being .05 #Identify sampling distribution: 2 sample proportional test and large n size so z test. So normal distribution. # difference of two proportions in the graphic #Calculate test statistic: this is z (-2.627691364061217) #z #this is saying, these two groups are 2.63 standard distributions away from each other #pstar is saying if they're from the same group. avg should be .2, but p1 and p2 #are spread out from that pstar, or .2. So what is the probability that if we #pulled the same from the same population what is the chance that I'd get #one sample of p1 mean, and one sample of p2 mean if I had pulled them from the same population #so now we can go to our z table and check it out. #the probability is .0043 or .43%. #so now based on the alpha level, we choose to reject the null hypothese because .43% is so much smaller #calculate p value, or find rejection region: #p value is .0043. from the z table. if want to use the crit val way, after this threshold, we reject values after this. #1.96 because it's a 95% confidence level. #side note: this is for two tailed test. If it were one, like just larger or just smaller, all the area #would go to one side so instead of splitting the 5% into 2.5 and 2.5(which makes it 1.96) #it would be 1.645 because it all goes to one side. since the full 5% goes to one side. #make test decision about null hypothesis: #as stated above, we'd reject. because p_val is so much smaller than alpha #state overall condition: # This ends standdowns ``` Wikipedia has this right, many others misinterpret it Wrong: > There is a 95% probability that the mean age is between 26.3 and 28.3 Correct: > If we find 100 (random) samples and create confidence intervals, we expect 95 intervals would contain the true mean of population age. ## Note on Notation: "we found our 95% confidence interval for ages to be from 26.3 and 28.3" OR "we are 95% confident that the average age falls between 26.3 and 28.3" # Exercise Time 💪🏼 ``` # Your turn! # You're weighing walruses in the Arctic in the attempt to estimate # the mean weight of the Canadian walrus population. You have a sample # of 30 walrus weights. The mean of the sample is 2000 lbs. and the # standard deviation is 200 lbs. Calculate the 80%-confidence interval. # Calculate the 70%-confidence interval. How do they compare to the # normal-distribution CIs? (To calculate the latter, just use the # sample standard deviation.) ``` # Level Up: Confidence Intervals for Non-Normally Distributed Data One of the most commonly used strategies for dealing with non-normally distributed data is to find a way to reduce the problem to one that involves normally distributed data! [Here](https://file.scirp.org/Html/3-1240887_76758.htm) is a review article that compares several different strategies. (Note that it ultimately recommends a sort of Bayesian method. We'll get to Bayesian reasoning in a later lesson.) # Level Up: Bootstrap One method of getting the confidence interval is to use **bootstrap** sampling. The idea is that we sample our sample to get an idea of what extreme values we could have got when sampling the population. With this, we can construct a confidence interval for our sample of the population The important to realize is we _only use our sample_ to determine the confidence interval. ![Sampling from population](https://github.com/flatiron-school/ds-central_limit_theorem/raw/008cecd491af6cf1df358903b6df622aac54c999/img/sample_pop.png) Below is an example of how this can be done. ## Example ``` # External file titanic_file = 'https://github.com/MrGeislinger/flatiron-school-data-science-curriculum-resources/raw/main/ProbabilityAndStats/StatisticalDistributions/data/titanic.csv' df_titanic = pd.read_csv(titanic_file) ages = df_titanic.Age.dropna() sns.distplot(ages) display(np.min(ages), np.max(ages), np.std(ages)) # Get an example sample sample = ages.sample(10, replace=True) print(sample.mean()) display(sample) def get_all_sample_means(data, n=10, n_samples=100): ''' ''' # samples = np.random.choice(data,size=(n_samples,n)) means = np.mean(samples, axis=1) # return means # Get many samples and their respective means samples = get_all_sample_means(ages,n=10, n_samples=30) samples # See the sample and the full data compared sns.distplot( samples, # Shows sample means kde=False, hist=False, rug=True ) ax = sns.distplot(ages) ax.vlines(ages.mean(), color='red',ymin=0,ymax=0.05) ``` ### Finding the Confidence Interval ``` def bootstrap_sample(sample, n_samples=10**4): ''' ''' # bs_sample_means = get_all_sample_means( sample, n=len(sample), n_samples=n_samples ) return bs_sample_means b_sample_means = bootstrap_sample(sample) display(np.mean(sample)) display(b_sample_means) sns.distplot(b_sample_means) plt.axvline(b_sample_means.mean(), color='red') np.mean(b_sample_means) two_std = np.std(b_sample_means)*2 (np.mean(sample)-two_std, np.mean(sample)+two_std) ```
github_jupyter
# main function for decomposition ### Author: Yiming Fang ``` import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn from torch.optim.lr_scheduler import StepLR import torchvision import torchvision.transforms as transforms from torchvision import models import tensorly as tl import tensorly from itertools import chain from tensorly.decomposition import parafac, partial_tucker import os import matplotlib.pyplot as plt import numpy as np import time from nets import * from decomp import * # load data def load_mnist(): print('==> Loading data..') transform_train = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform_train) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=0) testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=0) return trainloader, testloader def load_cifar10(): print('==> Loading data..') transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=0) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=0) return trainloader, testloader # ImageNet is no longer publically available def load_imagenet(): print('==> Loading data..') transform_train = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ]) transform_test = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ]) trainset = torchvision.datasets.ImageNet(root='./data', train=True, download=True, transform=transform_train) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=0) testset = torchvision.datasets.ImageNet(root='./data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=0) return trainloader, testloader def load_cifar100(): print('==> Loading data..') transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=0) testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=0) return trainloader, testloader # build model def build(model, decomp='cp'): print('==> Building model..') tl.set_backend('pytorch') full_net = model full_net = full_net.to(device) torch.save(full_net, 'models/model') if decomp: decompose(decomp) net = torch.load("models/model").cuda() print(net) print('==> Done') return net # training def train(epoch, train_acc, model): print('\nEpoch: ', epoch) model.train() criterion = nn.CrossEntropyLoss() train_loss = 0 correct = 0 total = 0 print('|', end='') for batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device) optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() if batch_idx % 10 == 0: print('=', end='') print('|', 'Accuracy:', 100. * correct / total,'% ', correct, '/', total) train_acc.append(correct / total) return train_acc # testing def test(test_acc, model): model.eval() test_loss = 0 correct = 0 total = 0 criterion = nn.CrossEntropyLoss() with torch.no_grad(): print('|', end='') for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets = inputs.to(device), targets.to(device) outputs = model(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() if batch_idx % 10 == 0: print('=', end='') acc = 100. * correct / total print('|', 'Accuracy:', acc, '% ', correct, '/', total) test_acc.append(correct / total) return test_acc # decompose def decompose(decomp): model = torch.load("models/model").cuda() model.eval() model.cpu() for i, key in enumerate(model.features._modules.keys()): if i >= len(model.features._modules.keys()) - 2: break conv_layer = model.features._modules[key] if isinstance(conv_layer, torch.nn.modules.conv.Conv2d): rank = max(conv_layer.weight.data.numpy().shape) // 10 if decomp == 'cp': model.features._modules[key] = cp_decomposition_conv_layer(conv_layer, rank) if decomp == 'tucker': ranks = [int(np.ceil(conv_layer.weight.data.numpy().shape[0] / 3)), int(np.ceil(conv_layer.weight.data.numpy().shape[1] / 3))] model.features._modules[key] = tucker_decomposition_conv_layer(conv_layer, ranks) if decomp == 'tt': model.features._modules[key] = tt_decomposition_conv_layer(conv_layer, rank) torch.save(model, 'models/model') return model # Run functions def run_train(i, model): train_acc = [] test_acc = [] for epoch in range(i): s = time.time() train_acc = train(epoch, train_acc, model) test_acc = test(test_acc, model) scheduler.step() e = time.time() print('This epoch took', e - s, 'seconds') print('Current learning rate: ', scheduler.get_lr()[0]) print('Best training accuracy overall: ', max(test_acc)) return train_acc, test_acc # main function def run_all(dataset, decomp=None, iterations=100, rate=0.05): global trainloader, testloader, device, optimizer, scheduler # choose an appropriate learning rate rate = rate # choose dataset from (MNIST, CIFAR10, ImageNet) if dataset == 'mnist': trainloader, testloader = load_mnist() model = Net() if dataset == 'cifar10': trainloader, testloader = load_cifar10() model = VGG('VGG19') if dataset == 'cifar100': trainloader, testloader = load_cifar100() model = VGG('VGG19') # check GPU availability device = 'cuda:0' if torch.cuda.is_available() else 'cpu' # choose decomposition algorithm from (CP, Tucker, TT) net = build(model, decomp) optimizer = optim.SGD(net.parameters(), lr=rate, momentum=0.9, weight_decay=5e-4) scheduler = StepLR(optimizer, step_size=5, gamma=0.9) train_acc, test_acc = run_train(iterations, net) if not decomp: decomp = 'full' filename = dataset + '_' + decomp torch.save(net, 'models/' + filename) np.save('curves/' + filename + '_train', train_acc) np.save('curves/' + filename + '_test', test_acc) %%time run_all('mnist') %%time run_all('mnist', 'cp', rate=0.01) %%time run_all('mnist', 'tucker') %%time run_all('mnist', 'tt') %%time run_all('cifar10', iterations=200) %%time run_all('cifar10', 'tucker', iterations=200) %%time run_all('cifar100', 'tt', iterations=200) ```
github_jupyter
``` from urllib.request import Request, urlopen import urllib import requests import pandas as pd from xlwt import Workbook from bs4 import BeautifulSoup import sys import time import random url_list = ["https://www.google.com/search?q=Aachen+Hbf", "https://www.google.com/search?q=Aalen+Hbf", "https://www.google.com/search?q=Aschaffenburg+Hbf", "https://www.google.com/search?q=Augsburg+Hbf", "https://www.google.com/search?q=Bad+Friedrichshall+Hbf", "https://www.google.com/search?q=Bayreuth+Hbf", "https://www.google.com/search?q=Schweinfurt+Hbf", "https://www.google.com/search?q=Bielefeld+Hbf", "https://www.google.com/search?q=Bingen(Rhein)+Hbf", "https://www.google.com/search?q=Bochum+Hbf", ] user_agent_list = [ #Chrome 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36', 'Mozilla/5.0 (Windows NT 5.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36', #Firefox 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 6.1)', 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)', 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko', 'Mozilla/5.0 (Windows NT 6.2; WOW64; Trident/7.0; rv:11.0) like Gecko', 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; Trident/7.0; rv:11.0) like Gecko', 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)', 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)' ] def soups(): for url in url_list: hdr = {'User-Agent': random.choice(user_agent_list)} #print(hdr) req = requests.get(url, headers = hdr) #page = urlopen(req) soup = BeautifulSoup(req.text, 'html.parser') yield soup # Scraping def getPropNames(soup): try: names.append(soup.find('div', class_='SPZz6b').find_next('span').text) except: names.append("PROSPECT") pass #print(elm.text) def getPropAdress(soup): try: addresses.append(soup.find('div', class_='i4J0ge').text) except: addresses.append("PROSPECT") pass def GetTime(time_remaining): return time_remaining names = [] addresses= [] totalNumbUrl = len(url_list) i = 0 time_remaining = totalNumbUrl for soup in soups(): getPropNames(soup) getPropAdress(soup) i+=1 time_remaining = (time_remaining - 1) if i%1 == 0: time.sleep(.5) sys.stdout.write('\r' + 'Current Url: ' + str(i) + ' Percentage: '+str(round((i/totalNumbUrl)*100))+ '%' + ' time remaining: ' + str(round(time_remaining/60))+" minutes ") #sys.stdout.flush() #print('\r' +str(round(i/totalNumbUrl)*100)+ '%') Data = {'names': names, 'addresses': addresses} print('') print('result:') print(Data) # Create a Pandas dataframe from the data. df = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in Data.items() ])) # Create a Pandas Excel writer using XlsxWriter as the engine. writer = pd.ExcelWriter('pandas_simple.xlsx', engine='xlsxwriter') # Convert the dataframe to an XlsxWriter Excel object. df.to_excel(writer, sheet_name='Sheet1') # Close the Pandas Excel writer and output the Excel file. writer.save() df.head() ```
github_jupyter
``` %load_ext autoreload %autoreload 2 import sys #sys.path.insert(1, '/home/ximo/Documents/GitHub/skforecast') %config Completer.use_jedi = False # Libraries # ============================================================================== import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.linear_model import Ridge from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline from skforecast.ForecasterAutoreg import ForecasterAutoreg from skforecast.model_selection import grid_search_forecaster ``` Since version 0.4.0, skforecast allows using sklearn pipelines as regressors. This is useful since, many machine learning models, need specific data preprocessing transformations. For example, linear models with Ridge or Lasso regularization benefits from features been scaled. > **⚠ WARNING:** > Version 0.4 of the skforecast library does not allow including ColumnTransformer in the pipeline used as regressor, so if the preprocessing transformations only apply to some specific columns, they have to be applied on the data set before training the model. A more detailed example can be found [here](https://www.cienciadedatos.net/documentos/py39-forecasting-time-series-with-skforecast-xgboost-lightgbm-catboost.html#Exogenous-variables). ``` # Download data # ============================================================================== url = ('https://raw.githubusercontent.com/JoaquinAmatRodrigo/skforecast/master/data/h2o_exog.csv') data = pd.read_csv(url, sep=',', header=0, names=['date', 'y', 'exog_1', 'exog_2']) # Data preprocessing # ============================================================================== data['date'] = pd.to_datetime(data['date'], format='%Y/%m/%d') data = data.set_index('date') data = data.asfreq('MS') pipe = make_pipeline(StandardScaler(), Ridge()) pipe # Create and fit forecaster # ============================================================================== forecaster = ForecasterAutoreg( regressor = pipe, lags = 10 ) forecaster.fit(y=data['y'], exog=data[['exog_1', 'exog_2']]) forecaster ``` When performing grid search over a sklearn pipeline, the name of the parameters is preceded by the name of the model. ``` # Hyperparameter Grid search # ============================================================================== pipe = make_pipeline(StandardScaler(), Ridge()) forecaster = ForecasterAutoreg( regressor = pipe, lags = 10 # This value will be replaced in the grid search ) # Regressor's hyperparameters param_grid = {'ridge__alpha': np.logspace(-3, 5, 10)} # Lags used as predictors lags_grid = [5, 24, [1, 2, 3, 23, 24]] results_grid = grid_search_forecaster( forecaster = forecaster, y = data['y'], exog = data[['exog_1', 'exog_2']], param_grid = param_grid, lags_grid = lags_grid, steps = 5, metric = 'mean_absolute_error', refit = False, initial_train_size = len(data.loc[:'2000-04-01']), return_best = True, verbose = False ) print(results_grid.to_markdown(tablefmt="github")) ```
github_jupyter
``` %load_ext autoreload %autoreload 2 import nlppln with nlppln.WorkflowGenerator(working_dir='/home/jvdzwaan/cwl-working-dir/') as wf: wf.load(steps_dir='../ochre/cwl/') print wf.list_steps() in_dir = wf.add_input(in_dir='Directory') ocr_dir_name = wf.add_input(ocr_dir_name='string') gs_dir_name = wf.add_input(gs_dir_name='string') aligned_dir_name = wf.add_input(aligned_dir_name='string') files = wf.ls(in_dir=in_dir) aligned, gs, ocr = wf.icdar2017st_extract_text(in_file=files, scatter=['in_file'], scatter_method='dotproduct') gs_dir = wf.save_files_to_dir(dir_name=gs_dir_name, in_files=gs) ocr_dir = wf.save_files_to_dir(dir_name=ocr_dir_name, in_files=ocr) aligned_dir = wf.save_files_to_dir(dir_name=aligned_dir_name, in_files=aligned) wf.add_outputs(gs_dir=gs_dir) wf.add_outputs(ocr_dir=ocr_dir) wf.add_outputs(aligned_dir=aligned_dir) wf.save('../ochre/cwl/icdar2017st-extract-data.cwl', wd=True) import nlppln with nlppln.WorkflowGenerator(working_dir='/home/jvdzwaan/cwl-working-dir/') as wf: wf.load(steps_dir='../ochre/cwl/') print wf.list_steps() in_dir1 = wf.add_input(in_dir1='Directory') in_dir2 = wf.add_input(in_dir2='Directory') in_dir3 = wf.add_input(in_dir3='Directory') in_dir4 = wf.add_input(in_dir4='Directory') ocr_dir_name = wf.add_input(ocr_dir_name='string', default='ocr') gs_dir_name = wf.add_input(gs_dir_name='string', default='gs') aligned_dir_name = wf.add_input(aligned_dir_name='string', default='aligned') aligned_dir1, gs_dir1, ocr_dir1 = wf.icdar2017st_extract_data(aligned_dir_name=aligned_dir_name, gs_dir_name=gs_dir_name, ocr_dir_name=ocr_dir_name, in_dir=in_dir1) gs1 = wf.save_dir_to_subdir(inner_dir=gs_dir1, outer_dir=in_dir1) ocr1 = wf.save_dir_to_subdir(inner_dir=ocr_dir1, outer_dir=in_dir1) aligned1 = wf.save_dir_to_subdir(inner_dir=aligned_dir1, outer_dir=in_dir1) aligned_dir2, gs_dir2, ocr_dir2 = wf.icdar2017st_extract_data(aligned_dir_name=aligned_dir_name, gs_dir_name=gs_dir_name, ocr_dir_name=ocr_dir_name, in_dir=in_dir2) gs2 = wf.save_dir_to_subdir(inner_dir=gs_dir2, outer_dir=in_dir2) ocr2 = wf.save_dir_to_subdir(inner_dir=ocr_dir2, outer_dir=in_dir2) aligned2 = wf.save_dir_to_subdir(inner_dir=aligned_dir2, outer_dir=in_dir2) aligned_dir3, gs_dir3, ocr_dir3 = wf.icdar2017st_extract_data(aligned_dir_name=aligned_dir_name, gs_dir_name=gs_dir_name, ocr_dir_name=ocr_dir_name, in_dir=in_dir3) gs3 = wf.save_dir_to_subdir(inner_dir=gs_dir3, outer_dir=in_dir3) ocr3 = wf.save_dir_to_subdir(inner_dir=ocr_dir3, outer_dir=in_dir3) aligned3 = wf.save_dir_to_subdir(inner_dir=aligned_dir3, outer_dir=in_dir3) aligned_dir4, gs_dir4, ocr_dir4 = wf.icdar2017st_extract_data(aligned_dir_name=aligned_dir_name, gs_dir_name=gs_dir_name, ocr_dir_name=ocr_dir_name, in_dir=in_dir4) gs4 = wf.save_dir_to_subdir(inner_dir=gs_dir4, outer_dir=in_dir4) ocr4 = wf.save_dir_to_subdir(inner_dir=ocr_dir4, outer_dir=in_dir4) aligned4 = wf.save_dir_to_subdir(inner_dir=aligned_dir4, outer_dir=in_dir4) wf.add_outputs(gs1=gs1) wf.add_outputs(gs2=gs2) wf.add_outputs(gs3=gs3) wf.add_outputs(gs4=gs4) wf.add_outputs(ocr1=ocr1) wf.add_outputs(ocr2=ocr2) wf.add_outputs(ocr3=ocr3) wf.add_outputs(ocr4=ocr4) wf.add_outputs(aligned1=aligned4) wf.add_outputs(aligned2=aligned3) wf.add_outputs(aligned3=aligned2) wf.add_outputs(aligned4=aligned1) wf.save('../ochre/cwl/icdar2017st-extract-data-all.cwl', pack=True) ```
github_jupyter
# Joining all processed data This notebook joins all processed data and then saves it in a file for subsequent modeling. ``` # Last amended: 24th October, 2020 # Myfolder: C:\Users\Administrator\OneDrive\Documents\home_credit_default_risk # Objective: # Solving Kaggle problem: Home Credit Default Risk # Joining all processed datasets # # Data Source: https://www.kaggle.com/c/home-credit-default-risk/data # Ref: https://www.kaggle.com/jsaguiar/lightgbm-with-simple-features # 1.0 Libraries # (Some of these may not be needed here.) %reset -f import numpy as np import pandas as pd import gc # 1.1 Reduce read data size # There is a file reducing.py # in this folder. A class # in it is used to reduce # dataframe size # (Code modified by me to # exclude 'category' dtype) import reducing # 1.2 Misc import warnings import os warnings.simplefilter(action='ignore', category=FutureWarning) # 1.3 pd.set_option('display.max_colwidth', -1) pd.set_option('display.max_columns', 100) pd.set_option('display.max_rows', 100) # 1.4 Display multiple commands outputs from a cell from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" # 2.0 Prepare to read data pathToData = "C:\\Users\\Administrator\\OneDrive\\Documents\\home_credit_default_risk" os.chdir(pathToData) # 2.1 Some constants num_rows=None # Implies read all rows nan_as_category = True # While transforming # 'object' columns to dummies # 3.0 Read previous application data first df = pd.read_csv( 'processed_df.csv.zip', nrows = num_rows ) # 3.0.1 Reduce memory usage by appropriately # changing data-types per feature: df = reducing.Reducer().reduce(df) # 3.1 df.shape # (356251, 262) df.head(2) # 3.2 df.columns df.drop(columns = ['Unnamed: 0', 'index'], inplace = True) df.columns # 3.3 df.head(2) # 3.4 Set SK_ID_CURR as Index df = df.set_index('SK_ID_CURR') df.head(2) df.shape # (356251, 259) # 4.0 Read bureau_agg bureau_agg = pd.read_csv( 'processed_bureau_agg.csv.zip', nrows = num_rows ) # 4.0.1 Reduce memory usage by appropriately # changing data-types per feature: bureau_agg = reducing.Reducer().reduce(bureau_agg) # 4.1 Set index bureau_agg.head(2) bureau_agg = bureau_agg.set_index("SK_ID_CURR") bureau_agg.head(2) bureau_agg.shape # (305811, 116) # 5.0 Join bureau_agg with df df = df.join( bureau_agg, how='left', on='SK_ID_CURR' ) # 5.1 df.shape # (356251, 375) df.head(2) # 5.2 Read previous application data prev_agg = pd.read_csv( 'processed_prev_agg.csv.zip', nrows = num_rows ) # 5.3 Reduce memory usage by appropriately # changing data-types per feature: prev_agg = reducing.Reducer().reduce(prev_agg) # 5.3 Set Index prev_agg.shape # (338857, 250) prev_agg.head(2) prev_agg = prev_agg.set_index("SK_ID_CURR") prev_agg.head(2) prev_agg.shape # (338857, 250) # 6.0 Join prev_agg with df df = df.join(prev_agg, how='left', on='SK_ID_CURR') df.shape # (356251, 624) df.head(2) # 7.0 Read processed POS data pos_agg = pd.read_csv( 'processed_pos_agg.csv.zip', nrows = num_rows ) # 7.0.1 Reduce memory usage by appropriately # changing data-types per feature: pos_agg = reducing.Reducer().reduce(pos_agg) # 7.1 pos_agg.shape # (337252, 19) pos_agg.head(2) pos_agg = pos_agg.set_index("SK_ID_CURR") pos_agg.head(2) pos_agg.shape # (337252, 18) # 7.2 Join POS with df df = df.join( pos_agg, how='left', on='SK_ID_CURR' ) df.shape # (356251, 642) df.head(2) # 8.0 Read processed installments data ins_agg = pd.read_csv( 'processed_ins_agg.csv.zip', nrows = num_rows ) # 8.0.1 Reduce memory usage by appropriately # changing data-types per feature: ins_agg = reducing.Reducer().reduce(ins_agg) # 8.1 Set index ins_agg.shape # (339587, 26) ins_agg.head(2) ins_agg = ins_agg.set_index("SK_ID_CURR") ins_agg.head(2) ins_agg.shape # (339587, 25) # 9.0 Join Installments data with df df = df.join(ins_agg, how='left', on='SK_ID_CURR') df.shape # (356251, 667) df.head(2) # 10.0 Read Credit card data cc_agg = pd.read_csv( 'processed_creditCard_agg.csv.zip', nrows = num_rows ) # 10.0.1 Reduce memory usage by appropriately # changing data-types per feature: cc_agg = reducing.Reducer().reduce(cc_agg) # 10.1 Set Index cc_agg.shape # (103558, 142) cc_agg.head(2) cc_agg = cc_agg.set_index("SK_ID_CURR") cc_agg.head(2) cc_agg.shape # (103558, 141) # 11. Join Credit card data with df df = df.join(cc_agg, how='left', on='SK_ID_CURR') df.shape # (356251, 808) df.head(2) # 11.1 Save the results for subsequent use: df.to_csv("processed_df_joined.csv.zip", compression = "zip") ################## ```
github_jupyter
``` from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.layers import Embedding, LSTM, Dense, Dropout, Bidirectional from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.models import Sequential from tensorflow.keras.optimizers import Adam ### YOUR CODE HERE from tensorflow.keras import regularizers ### import tensorflow.keras.utils as ku import numpy as np tokenizer = Tokenizer() #!wget --no-check-certificate \ # https://storage.googleapis.com/laurencemoroney-blog.appspot.com/sonnets.txt \ # -O ./tmp/sonnets.txt data = open('./tmp/nlp_w4/sonnets.txt', encoding="utf8").read() corpus = data.lower().split("\n") tokenizer.fit_on_texts(corpus) total_words = len(tokenizer.word_index) + 1 # create input sequences using list of tokens input_sequences = [] for line in corpus: token_list = tokenizer.texts_to_sequences([line])[0] for i in range(1, len(token_list)): n_gram_sequence = token_list[:i+1] input_sequences.append(n_gram_sequence) # pad sequences max_sequence_len = max([len(x) for x in input_sequences]) input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre')) # create predictors and label predictors, label = input_sequences[:,:-1],input_sequences[:,-1] label = ku.to_categorical(label, num_classes=total_words) model = Sequential() model.add(Embedding(total_words, 100, input_length=max_sequence_len-1)) model.add(Bidirectional(LSTM(150, return_sequences = True))) model.add(Dropout(0.2)) model.add(LSTM(100)) model.add(Dense(total_words/2, activation='relu', kernel_regularizer=regularizers.l2(0.01))) model.add(Dense(total_words, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) history = model.fit(predictors, label, epochs=200, verbose=1) import matplotlib.pyplot as plt %matplotlib inline acc = history.history['accuracy'] loss = history.history['loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'b', label='Training accuracy') plt.title('Training accuracy') plt.figure() plt.plot(epochs, loss, 'b', label='Training Loss') plt.title('Training loss') plt.legend() plt.show() seed_text = "Help me Obi Wan Kenobi, you're my only hope" next_words = 100 for _ in range(next_words): token_list = tokenizer.texts_to_sequences([seed_text])[0] token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre') predicted = model.predict_classes(token_list, verbose=0) output_word = "" for word, index in tokenizer.word_index.items(): if index == predicted: output_word = word break seed_text += " " + output_word print(seed_text) ```
github_jupyter
# Implementing a Neural Network In this exercise we will develop a neural network with fully-connected layers to perform classification, and test it out on the CIFAR-10 dataset. ``` # A bit of setup import numpy as np import matplotlib.pyplot as plt from cs231n.classifiers.neural_net import TwoLayerNet %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading external modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 def rel_error(x, y): """ returns relative error """ return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) ``` We will use the class `TwoLayerNet` in the file `cs231n/classifiers/neural_net.py` to represent instances of our network. The network parameters are stored in the instance variable `self.params` where keys are string parameter names and values are numpy arrays. Below, we initialize toy data and a toy model that we will use to develop your implementation. ``` # Create a small net and some toy data to check your implementations. # Note that we set the random seed for repeatable experiments. input_size = 4 hidden_size = 10 num_classes = 3 num_inputs = 5 def init_toy_model(): np.random.seed(0) return TwoLayerNet(input_size, hidden_size, num_classes, std=1e-1) def init_toy_data(): np.random.seed(1) X = 10 * np.random.randn(num_inputs, input_size) y = np.array([0, 1, 2, 2, 1]) return X, y net = init_toy_model() X, y = init_toy_data() ``` # Forward pass: compute scores Open the file `cs231n/classifiers/neural_net.py` and look at the method `TwoLayerNet.loss`. This function is very similar to the loss functions you have written for the SVM and Softmax exercises: It takes the data and weights and computes the class scores, the loss, and the gradients on the parameters. Implement the first part of the forward pass which uses the weights and biases to compute the scores for all inputs. ``` scores = net.loss(X) print('Your scores:') print(scores) print() print('correct scores:') correct_scores = np.asarray([ [-0.81233741, -1.27654624, -0.70335995], [-0.17129677, -1.18803311, -0.47310444], [-0.51590475, -1.01354314, -0.8504215 ], [-0.15419291, -0.48629638, -0.52901952], [-0.00618733, -0.12435261, -0.15226949]]) print(correct_scores) print() # The difference should be very small. We get < 1e-7 print('Difference between your scores and correct scores:') print(np.sum(np.abs(scores - correct_scores))) ``` # Forward pass: compute loss In the same function, implement the second part that computes the data and regularization loss. ``` loss, _ = net.loss(X, y, reg=0.05) correct_loss = 1.30378789133 # should be very small, we get < 1e-12 print('Difference between your loss and correct loss:') print(np.sum(np.abs(loss - correct_loss))) ``` # Backward pass Implement the rest of the function. This will compute the gradient of the loss with respect to the variables `W1`, `b1`, `W2`, and `b2`. Now that you (hopefully!) have a correctly implemented forward pass, you can debug your backward pass using a numeric gradient check: ``` from cs231n.gradient_check import eval_numerical_gradient # Use numeric gradient checking to check your implementation of the backward pass. # If your implementation is correct, the difference between the numeric and # analytic gradients should be less than 1e-8 for each of W1, W2, b1, and b2. loss, grads = net.loss(X, y, reg=0.05) # these should all be less than 1e-8 or so for param_name in grads: f = lambda W: net.loss(X, y, reg=0.05)[0] param_grad_num = eval_numerical_gradient(f, net.params[param_name], verbose=False) print('%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name]))) ``` # Train the network To train the network we will use stochastic gradient descent (SGD), similar to the SVM and Softmax classifiers. Look at the function `TwoLayerNet.train` and fill in the missing sections to implement the training procedure. This should be very similar to the training procedure you used for the SVM and Softmax classifiers. You will also have to implement `TwoLayerNet.predict`, as the training process periodically performs prediction to keep track of accuracy over time while the network trains. Once you have implemented the method, run the code below to train a two-layer network on toy data. You should achieve a training loss less than 0.02. ``` net = init_toy_model() stats = net.train(X, y, X, y, learning_rate=1e-1, reg=5e-6, num_iters=100, verbose=False) print('Final training loss: ', stats['loss_history'][-1]) # plot the loss history plt.plot(stats['loss_history']) plt.xlabel('iteration') plt.ylabel('training loss') plt.title('Training Loss history') plt.show() ``` # Load the data Now that you have implemented a two-layer network that passes gradient checks and works on toy data, it's time to load up our favorite CIFAR-10 data so we can use it to train a classifier on a real dataset. ``` from cs231n.data_utils import load_CIFAR10 def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000): """ Load the CIFAR-10 dataset from disk and perform preprocessing to prepare it for the two-layer neural net classifier. These are the same steps as we used for the SVM, but condensed to a single function. """ # Load the raw CIFAR-10 data # cifar10_dir = 'cs231n/datasets/cifar-10-batches-py' cifar10_dir = os.path.expanduser("~/.keras/datasets/cifar-10-batches-py") # Cleaning up variables to prevent loading data multiple times (which may cause memory issue) try: del X_train, y_train del X_test, y_test print('Clear previously loaded data.') except: pass X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # Subsample the data mask = list(range(num_training, num_training + num_validation)) X_val = X_train[mask] y_val = y_train[mask] mask = list(range(num_training)) X_train = X_train[mask] y_train = y_train[mask] mask = list(range(num_test)) X_test = X_test[mask] y_test = y_test[mask] # Normalize the data: subtract the mean image mean_image = np.mean(X_train, axis=0) X_train -= mean_image X_val -= mean_image X_test -= mean_image # Reshape data to rows X_train = X_train.reshape(num_training, -1) X_val = X_val.reshape(num_validation, -1) X_test = X_test.reshape(num_test, -1) return X_train, y_train, X_val, y_val, X_test, y_test # Invoke the above function to get our data. X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data() print('Train data shape: ', X_train.shape) print('Train labels shape: ', y_train.shape) print('Validation data shape: ', X_val.shape) print('Validation labels shape: ', y_val.shape) print('Test data shape: ', X_test.shape) print('Test labels shape: ', y_test.shape) ``` # Train a network To train our network we will use SGD. In addition, we will adjust the learning rate with an exponential learning rate schedule as optimization proceeds; after each epoch, we will reduce the learning rate by multiplying it by a decay rate. ``` input_size = 32 * 32 * 3 hidden_size = 50 num_classes = 10 net = TwoLayerNet(input_size, hidden_size, num_classes) # Train the network stats = net.train(X_train, y_train, X_val, y_val, num_iters=1000, batch_size=200, learning_rate=1e-4, learning_rate_decay=0.95, reg=0.25, verbose=True) # Predict on the validation set val_acc = (net.predict(X_val) == y_val).mean() print('Validation accuracy: ', val_acc) ``` # Debug the training With the default parameters we provided above, you should get a validation accuracy of about 0.29 on the validation set. This isn't very good. One strategy for getting insight into what's wrong is to plot the loss function and the accuracies on the training and validation sets during optimization. Another strategy is to visualize the weights that were learned in the first layer of the network. In most neural networks trained on visual data, the first layer weights typically show some visible structure when visualized. ``` # Plot the loss function and train / validation accuracies plt.subplot(2, 1, 1) plt.plot(stats['loss_history']) plt.title('Loss history, ') plt.xlabel('Iteration') plt.ylabel('Loss') plt.subplot(2, 1, 2) plt.plot(stats['train_acc_history'], label='train') plt.plot(stats['val_acc_history'], label='val') plt.title('Classification accuracy history') plt.xlabel('Epoch') plt.ylabel('Classification accuracy') plt.legend() plt.show() from cs231n.vis_utils import visualize_grid # Visualize the weights of the network def show_net_weights(net): W1 = net.params['W1'] W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2) plt.imshow(visualize_grid(W1, padding=3).astype('uint8')) plt.gca().axis('off') plt.show() show_net_weights(net) ``` # Tune your hyperparameters **What's wrong?**. Looking at the visualizations above, we see that the loss is decreasing more or less linearly, which seems to suggest that the learning rate may be too low. Moreover, there is no gap between the training and validation accuracy, suggesting that the model we used has low capacity, and that we should increase its size. On the other hand, with a very large model we would expect to see more overfitting, which would manifest itself as a very large gap between the training and validation accuracy. **Tuning**. Tuning the hyperparameters and developing intuition for how they affect the final performance is a large part of using Neural Networks, so we want you to get a lot of practice. Below, you should experiment with different values of the various hyperparameters, including hidden layer size, learning rate, numer of training epochs, and regularization strength. You might also consider tuning the learning rate decay, but you should be able to get good performance using the default value. **Approximate results**. You should be aim to achieve a classification accuracy of greater than 48% on the validation set. Our best network gets over 52% on the validation set. **Experiment**: You goal in this exercise is to get as good of a result on CIFAR-10 as you can (52% could serve as a reference), with a fully-connected Neural Network. Feel free implement your own techniques (e.g. PCA to reduce dimensionality, or adding dropout, or adding features to the solver, etc.). **Explain your hyperparameter tuning process below.** $\color{blue}{\textit Your Answer:}$ ``` best_val = -1 best_net = None # store the best model into this ################################################################################# # TODO: Tune hyperparameters using the validation set. Store your best trained # # model in best_net. # # # # To help debug your network, it may help to use visualizations similar to the # # ones we used above; these visualizations will have significant qualitative # # differences from the ones we saw above for the poorly tuned network. # # # # Tweaking hyperparameters by hand can be fun, but you might find it useful to # # write code to sweep through possible combinations of hyperparameters # # automatically like we did on the previous exercises. # ################################################################################# # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** input_size = 32 * 32 * 3 hidden_size = 50 num_classes = 10 results = {} # dict hparams: train_accuracy, val_accuracy learning_rates = [1e-3, 1e-4, 1e-5] regularization_strengths = [5e-6] import itertools # 调节超参数 experiments = itertools.product(learning_rates, regularization_strengths) for lr, reg in experiments: net = TwoLayerNet(input_dim, hidden_dim, num_classes) # Train the network stats = net.train(X_train_feats, y_train, X_val_feats, y_val, learning_rate=lr, learning_rate_decay=0.95, reg=reg, num_iters=1500//3, batch_size=200, verbose=True) y_train_pred = net.predict(X_train_feats) train_accuracy = np.mean(y_train == y_train_pred) print('Training accuracy: %f' % train_accuracy) # Predict on the validation set y_val_pred = net.predict(X_val_feats) val_accuracy = np.mean(y_val == y_val_pred) print('Validation accuracy: %f' % val_accuracy) results[(lr, reg)] = train_accuracy, val_accuracy, stats if val_accuracy > best_val: best_val = val_accuracy best_stats = stats best_net = net # Print out results. for lr, reg in sorted(results): train_accuracy, val_accuracy = results[(lr, reg)] print('lr %e reg %e train accuracy: %f val accuracy: %f' % ( lr, reg, train_accuracy, val_accuracy)) print('best validation accuracy achieved during cross-validation: %f' % best_val) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # visualize the weights of the best network show_net_weights(best_net) ``` # Run on the test set When you are done experimenting, you should evaluate your final trained network on the test set; you should get above 48%. ``` test_acc = (best_net.predict(X_test) == y_test).mean() print('Test accuracy: ', test_acc) ``` **Inline Question** Now that you have trained a Neural Network classifier, you may find that your testing accuracy is much lower than the training accuracy. In what ways can we decrease this gap? Select all that apply. 1. Train on a larger dataset. 2. Add more hidden units. 3. Increase the regularization strength. 4. None of the above. $\color{blue}{\textit Your Answer:}$ $\color{blue}{\textit Your Explanation:}$
github_jupyter
--- _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._ --- # The Series Data Structure ``` import pandas as pd pd.Series? animals = ['Tiger', 'Bear', 'Moose'] pd.Series(animals) numbers = [1, 2, 3] pd.Series(numbers) animals = ['Tiger', 'Bear', None] pd.Series(animals) numbers = [1, 2, None] pd.Series(numbers) import numpy as np np.nan == None np.nan == np.nan np.isnan(np.nan) sports = {'Archery': 'Bhutan', 'Golf': 'Scotland', 'Sumo': 'Japan', 'Taekwondo': 'South Korea'} s = pd.Series(sports) s s.index s = pd.Series(['Tiger', 'Bear', 'Moose'], index=['India', 'America', 'Canada']) s sports = {'Archery': 'Bhutan', 'Golf': 'Scotland', 'Sumo': 'Japan', 'Taekwondo': 'South Korea'} s = pd.Series(sports, index=['Golf', 'Sumo', 'Hockey']) s ``` # Querying a Series ``` sports = {'Archery': 'Bhutan', 'Golf': 'Scotland', 'Sumo': 'Japan', 'Taekwondo': 'South Korea'} s = pd.Series(sports) s s.iloc[3] s.loc['Golf'] s[3] s['Golf'] sports = {99: 'Bhutan', 100: 'Scotland', 101: 'Japan', 102: 'South Korea'} s = pd.Series(sports) s[0] #This won't call s.iloc[0] as one might expect, it generates an error instead s = pd.Series([100.00, 120.00, 101.00, 3.00]) s total = 0 for item in s: total+=item print(total) import numpy as np total = np.sum(s) print(total) #this creates a big series of random numbers s = pd.Series(np.random.randint(0,1000,10000)) s.head() len(s) %%timeit -n 100 summary = 0 for item in s: summary+=item %%timeit -n 100 summary = np.sum(s) s+=2 #adds two to each item in s using broadcasting s.head() for label, value in s.iteritems(): s.set_value(label, value+2) s.head() %%timeit -n 10 s = pd.Series(np.random.randint(0,1000,10000)) for label, value in s.iteritems(): s.loc[label]= value+2 %%timeit -n 10 s = pd.Series(np.random.randint(0,1000,10000)) s+=2 s = pd.Series([1, 2, 3]) s.loc['Animal'] = 'Bears' s original_sports = pd.Series({'Archery': 'Bhutan', 'Golf': 'Scotland', 'Sumo': 'Japan', 'Taekwondo': 'South Korea'}) cricket_loving_countries = pd.Series(['Australia', 'Barbados', 'Pakistan', 'England'], index=['Cricket', 'Cricket', 'Cricket', 'Cricket']) all_countries = original_sports.append(cricket_loving_countries) original_sports cricket_loving_countries all_countries all_countries.loc['Cricket'] ``` # The DataFrame Data Structure ``` import pandas as pd purchase_1 = pd.Series({'Name': 'Chris', 'Item Purchased': 'Dog Food', 'Cost': 22.50}) purchase_2 = pd.Series({'Name': 'Kevyn', 'Item Purchased': 'Kitty Litter', 'Cost': 2.50}) purchase_3 = pd.Series({'Name': 'Vinod', 'Item Purchased': 'Bird Seed', 'Cost': 5.00}) df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2']) df.head() df.loc['Store 2'] type(df.loc['Store 2']) df.loc['Store 1'] df.loc['Store 1', 'Cost'] df.T df.T.loc['Cost'] df['Cost'] df.loc['Store 1']['Cost'] df.loc[:,['Name', 'Cost']] df.drop('Store 1') df copy_df = df.copy() copy_df = copy_df.drop('Store 1') copy_df copy_df.drop? del copy_df['Name'] copy_df df['Location'] = None df ``` # Dataframe Indexing and Loading ``` costs = df['Cost'] costs costs+=2 costs df import pandas as pd !cat olympics.csv df = pd.read_csv('olympics.csv') df.head() df = pd.read_csv('olympics.csv', index_col = 0, skiprows=1) df.head() df.columns for col in df.columns: if col[:2]=='01': df.rename(columns={col:'Gold' + col[4:]}, inplace=True) if col[:2]=='02': df.rename(columns={col:'Silver' + col[4:]}, inplace=True) if col[:2]=='03': df.rename(columns={col:'Bronze' + col[4:]}, inplace=True) if col[:1]=='№': df.rename(columns={col:'#' + col[1:]}, inplace=True) df.head() ``` # Querying a DataFrame ``` df['Gold'] > 0 only_gold = df.where(df['Gold'] > 0) only_gold.head() only_gold['Gold'].count() df['Gold'].count() only_gold = only_gold.dropna() only_gold.head() only_gold = df[df['Gold'] > 0] only_gold.head() len(df[(df['Gold'] > 0) | (df['Gold.1'] > 0)]) df[(df['Gold.1'] > 0) & (df['Gold'] == 0)] ``` # Indexing Dataframes ``` df.head() df['country'] = df.index df = df.set_index('Gold') df.head() df = df.reset_index() df.head() df = pd.read_csv('census.csv') df.head() df['SUMLEV'].unique() df=df[df['SUMLEV'] == 50] df.head() columns_to_keep = ['STNAME', 'CTYNAME', 'BIRTHS2010', 'BIRTHS2011', 'BIRTHS2012', 'BIRTHS2013', 'BIRTHS2014', 'BIRTHS2015', 'POPESTIMATE2010', 'POPESTIMATE2011', 'POPESTIMATE2012', 'POPESTIMATE2013', 'POPESTIMATE2014', 'POPESTIMATE2015'] df = df[columns_to_keep] df.head() df = df.set_index(['STNAME', 'CTYNAME']) df.head() df.loc['Michigan', 'Washtenaw County'] df.loc[ [('Michigan', 'Washtenaw County'), ('Michigan', 'Wayne County')] ] ``` # Missing values ``` df = pd.read_csv('log.csv') df df.fillna? df = df.set_index('time') df = df.sort_index() df df = df.reset_index() df = df.set_index(['time', 'user']) df df = df.fillna(method='ffill') df.head() ```
github_jupyter
``` import traytable as tt import matplotlib.pyplot as plt ``` Download this notebook and try it out yourself [here](https://github.com/dennisbrookner/traytable/blob/main/docs/examples/0_simple_example.ipynb) ## Making a screen First, initialize the screen with `screen()`. This function requires that you specify * the parameter that varies by row * the parameter that varies by column * the plate shape, in the form of a "max well", e.g. the well in the bottom right corner of the plate. Note that `row` refers to the parameter encoded by the row name; this is the parameter that is the same within a row, rather than the parameter that varies across the row. Likewise for columns. Finally, whatever additional named arguments you pass to `screen()` become "screen static" global parameters that apply to all wells in all trays in the screen. Perhaps you include the protein construct, a nickname for the screen, or the type of plate you're using. ``` myscreen = tt.screen(row = 'protein', col = 'PEG', maxwell = 'H6', construct = 'HEWL', buffer = 'imidazole 20mM') ``` Now let's make a tray. Like with `screen()`, `tray()` will parse any additional named arguments as "tray static" parameters that apply to all wells in the tray. A common example might be the date the tray was set, or a buffer or additive that is the same across the plate. Most importantly, `tray()` accepts arguments `rows` and `cols` to specify the values of the parameters varying across the plate. These can be set in three ways: * with a list of two numbers, e.g. `row = [4, 18]` which would evenly space values across the rows (with number of rows determined via the `maxwell` parameter for the screen * with a list of numbers equal in length to the number of rows/columns, which get mapped to rows/columns explicitly * with a single number, which will be used for all rows/columns ``` tray1 = tt.tray(myscreen, date = '2021-01-01', pH = 5.8, rows = [4,18], cols = [20,25]) ``` The `clonetray()` method clones a tray with useage `newtray = clonetray(screen, oldtray, **kwargs)` where you can override specific parameters of the tray being cloned. When trays are similar (or identical) this saves some typing. ``` tray2 = tt.clonetray(tray1, date = '2021-01-03', rows = [4, 5, 6, 7, 8, 10, 12, 14]) ``` In this case, using `clonetray()` instead of `tray()` saves you from having to re-specify the pH and the column values, which haven't changed from the previous tray. ## Logging hits! Our two trays have some crystals! We can log wells with good (or bad!) crystals via the `well()` function. `well()` requires the tray, well, and a short string to describe crystal quality; any other named parameters (perhaps a more verbose description, or a number of crystals) are accepted and get their own column in the resulting dataframe. For all but the first call to `well()`, don't forget `old_df=df` to concatenate the new results with the old results. ``` df = tt.well(tray1, 'A6', 'good', quantity = 3) df = tt.well(tray1, 'B6', 'good', quantity = 2, note = "chunkier than usual", old_df=df) df = tt.well(tray1, 'C6', 'needles', old_df=df) df ``` The `well()` function uses the tray and well to look up all the data you've logged in your screens. If you have many wells, all of the same quality, you can log them all at once: ``` df = tt.well(tray2, ['B3', 'C3', 'D3', 'E3'], 'needles', old_df=df) df = tt.well(tray2, ['A5', 'A6', 'B5'], 'good', old_df=df, note='borderline') df ``` Finally, let's visualize which conditions are giving good crystals vs. needles. ``` colordict= {'good':'green', 'needles':'gray'} df.plot.scatter('protein', 'PEG', alpha=0.6, c=df.quality.map(colordict)) plt.title('What [protein] vs. %PEG gives the best crystals?') plt.show() ``` Looks like we should optimize with high PEG, low protein conditions. With `traytable`, no matter how many trays you've set with slightly varied screens, you can always consolidate your results in a single table or plot. ### Other things of note * You may have noticed that optional parameters present in some calls to `well()`, but not others, are harmlessly treated as `NaN` where missing. * The `setrows()` and `setcols()` methods are called behind the scenes by `tray()` and `clonetray()` via the `rows` and `cols` keyword arguments, respectively, but are also available as stand-alone functions with usage `tray = setrows(tray, rows)` and likewise for columns. ## Just a code chunk ``` import traytable as tt import matplotlib.pyplot as plt # make trays myscreen = tt.screen(row = 'protein', col = 'PEG', maxwell = 'H6', construct = 'HEWL', buffer = 'imidazole 20mM') tray1 = tt.tray(myscreen, date = '2021-01-01', pH = 5.8, rows = [4,18], cols = [20,25]) tray2 = tt.clonetray(tray1, date = '2021-01-03', rows = [4, 5, 6, 7, 8, 10, 12, 14]) # log results df = tt.well(tray1, 'A6', 'good', quantity = 3) df = tt.well(tray1, 'B6', 'good', quantity = 2, note = "chunkier than usual", old_df=df) df = tt.well(tray1, 'C6', 'needles', old_df=df) df = tt.well(tray2, ['B3', 'C3', 'D3', 'E3'], 'needles', old_df=df) df = tt.well(tray2, ['A5', 'A6', 'B5'], 'good', old_df=df, note='borderline') # plot results colordict= {'good':'green', 'needles':'gray'} df.plot.scatter('protein', 'PEG', alpha=0.6, c=df.quality.map(colordict)) plt.title('What [protein] vs. %PEG gives the best crystals?') plt.show() ```
github_jupyter
#### New to Plotly? Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/). <br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online). <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! #### Version Check Plotly's python package is updated frequently. Run `pip install plotly --upgrade` to use the latest version. ``` import plotly plotly.__version__ ``` #### Basic Dot Plot Dot plots show changes between two points in time or between two conditions. ``` import plotly.plotly as py import plotly.graph_objs as go trace1 = {"x": [72, 67, 73, 80, 76, 79, 84, 78, 86, 93, 94, 90, 92, 96, 94, 112], "y": ["Brown", "NYU", "Notre Dame", "Cornell", "Tufts", "Yale", "Dartmouth", "Chicago", "Columbia", "Duke", "Georgetown", "Princeton", "U.Penn", "Stanford", "MIT", "Harvard"], "marker": {"color": "pink", "size": 12}, "mode": "markers", "name": "Women", "type": "scatter" } trace2 = {"x": [92, 94, 100, 107, 112, 114, 114, 118, 119, 124, 131, 137, 141, 151, 152, 165], "y": ["Brown", "NYU", "Notre Dame", "Cornell", "Tufts", "Yale", "Dartmouth", "Chicago", "Columbia", "Duke", "Georgetown", "Princeton", "U.Penn", "Stanford", "MIT", "Harvard"], "marker": {"color": "blue", "size": 12}, "mode": "markers", "name": "Men", "type": "scatter", } data = [trace1, trace2] layout = {"title": "Gender Earnings Disparity", "xaxis": {"title": "Annual Salary (in thousands)", }, "yaxis": {"title": "School"}} fig = go.Figure(data=data, layout=layout) py.iplot(fig, filenmae='basic_dot-plot') ``` #### Styled Categorical Dot Plot ``` import plotly.plotly as py import plotly.graph_objs as go country = ['Switzerland (2011)', 'Chile (2013)', 'Japan (2014)', 'United States (2012)', 'Slovenia (2014)', 'Canada (2011)', 'Poland (2010)', 'Estonia (2015)', 'Luxembourg (2013)', 'Portugal (2011)'] voting_pop = [40, 45.7, 52, 53.6, 54.1, 54.2, 54.5, 54.7, 55.1, 56.6] reg_voters = [49.1, 42, 52.7, 84.3, 51.7, 61.1, 55.3, 64.2, 91.1, 58.9] trace0 = go.Scatter( x=voting_pop, y=country, mode='markers', name='Percent of estimated voting age population', marker=dict( color='rgba(156, 165, 196, 0.95)', line=dict( color='rgba(156, 165, 196, 1.0)', width=1, ), symbol='circle', size=16, ) ) trace1 = go.Scatter( x=reg_voters, y=country, mode='markers', name='Percent of estimated registered voters', marker=dict( color='rgba(204, 204, 204, 0.95)', line=dict( color='rgba(217, 217, 217, 1.0)', width=1, ), symbol='circle', size=16, ) ) data = [trace0, trace1] layout = go.Layout( title="Votes cast for ten lowest voting age population in OECD countries", xaxis=dict( showgrid=False, showline=True, linecolor='rgb(102, 102, 102)', titlefont=dict( color='rgb(204, 204, 204)' ), tickfont=dict( color='rgb(102, 102, 102)', ), showticklabels=True, dtick=10, ticks='outside', tickcolor='rgb(102, 102, 102)', ), margin=dict( l=140, r=40, b=50, t=80 ), legend=dict( font=dict( size=10, ), yanchor='middle', xanchor='right', ), width=800, height=600, paper_bgcolor='rgb(254, 247, 234)', plot_bgcolor='rgb(254, 247, 234)', hovermode='closest', ) fig = go.Figure(data=data, layout=layout) py.iplot(fig, filename='lowest-oecd-votes-cast') ``` ### Reference See https://plot.ly/python/reference/#scatter for more information and chart attribute options! ``` from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'dot.ipynb', 'python/dot-plots/', 'Dot Plots', 'How to make dot plots in Python with Plotly.', title = 'Python Dot Plots | plotly', has_thumbnail='true', thumbnail='thumbnail/dot-plot.jpg', language='python', display_as='basic', order=3.1, ipynb= '~notebook_demo/2') ```
github_jupyter
## SASPy Tabulation for Descriptive Statistics This notebook demonstrates the usage of a powerful set of tools for descriptive statistics and nesting data in SASPy, powered by the TABULATE procedure. ``` import saspy sas = saspy.SASsession(cfgname='default') saspy.__version__ cars = sas.sasdata('cars', 'sashelp') cars.head() ``` ## Basic usage Like the TABULATE procedure on which it relies, using the **tabulate** methods attached to your SASPy data sets means specifying three things: 1. **class columns**, by which to group your data; 2. **var columns**, which contain data to be calculated; and 3. **statistics**, to be calculated on the var columns within groupings of data. Then you compose the table using a simple syntax, in which `*` indicates a nesting and `|` indicates elements at the same level (this is made possible by Python's operator overloading). ``` # define columns to use as classes or computational vars by_origin, by_type = cars.tabulate.classes('origin', 'type') horsepower, cylinders = cars.tabulate.vars('horsepower', 'cylinders') # grab statistics of interest mean, n = cars.tabulate.stats('mean', 'n') # compose these elements into a table cars.tabulate.table( left = by_origin * by_type, top = (horsepower | cylinders) * (mean | n) ) # alternatively, you can output pure-text tables using .text_table() cars.tabulate.text_table( left = by_origin * by_type, top = (horsepower | cylinders) * (mean | n) ) ``` ### Adding options Labels, formats (for aggregate statistics), and total groupings ('all' keyword) may also be specified, either when creating a placeholder or during composition using .with_() ``` # assign labels, formats by_origin, by_type = cars.tabulate.classes('origin', 'type', labels=False) horsepower, cylinders = cars.tabulate.vars('horsepower', 'cylinders') mean, n = cars.tabulate.stats('mean', 'n', formats=['6.2', '3.'], labels=['Average', 'Count']) # you can override or add options in composition by using .with_() cars.tabulate.table( left = by_origin.with_(all='Total') * by_type, top = (horsepower.with_(label='Power!') | cylinders) * (mean.with_(format='6.2') | n) ) ``` ### Alternative: Create class, var, and stat elements individually ``` by_origin = cars.tabulate.as_class('origin', label='', all='All Origins') by_type = cars.tabulate.as_class('type', label='') horsepower = cars.tabulate.as_var('horsepower') average = cars.tabulate.stat('mean', format='8.2', label='Mean HP') n = cars.tabulate.stat('n', label='Count', format='6.') cars.tabulate.table( left = by_origin, top = by_type * (n | horsepower.with_(label='') * average) ) ``` ## Composition and Re-use The real advantage of a Python interface to PROC TABULATE comes with the ability to compose fragments of interest and then recompose those into various tables at will. ``` # create some elements for reuse by_origin = cars.tabulate.as_class('origin', label='') by_type = cars.tabulate.as_class('type', label='') horsepower = cars.tabulate.as_var('horsepower', label='Horses') cylinders = cars.tabulate.as_var('cylinders', label='Cyls.') enginesize = cars.tabulate.as_var('enginesize', label='Engine Size') average = cars.tabulate.stat('mean', label='Avg', format='8.2') stdev = cars.tabulate.stat('std', label='Std. Dev.', format='5.2') n = cars.tabulate.stat('n', label='Count', format='6.0') # create some compositional fragments by_origin_and_type = by_origin.with_(all='All') * by_type.with_(all='All') hpstats = horsepower * (average | stdev | n) cylstats = cylinders * (average | stdev | n) enginestats = enginesize * (average | stdev) # draw a table cars.tabulate.table( left = by_origin_and_type, top = hpstats ) # draw another table cars.tabulate.table( left = by_type, top = cylstats | enginestats ) # grab another class when needed, draw another table drivetrain = cars.tabulate.as_class('drivetrain', label='Drive Train') cars.tabulate.table( left = by_type * drivetrain.with_(all='All'), top = cylstats | enginestats ) ``` ### Retrieve an indexed DataFrame One of SASPy's best features is the integration with Pandas DataFrames. Instead of drawing a presentational table in HTML or plain text, you can have the resulting nested values converted to a DataFrame using nested indices. Note that certain presentational elements (labels, formats, etc) aren't represented, nor is the exact visual arrangement of your groupings. However, all computations are in the DataFrame and can be further accessed or sliced easily in Python. ``` # invoke to_dataframe() instead of table() my_frame = cars.tabulate.to_dataframe( left = by_type * drivetrain * by_origin, top = cylstats | enginestats ) # my_frame is now a DataFrame; showing an excerpted slice my_frame[:10] # you can directly access portions of that nested frame by indices, from left to right my_frame.loc[('Sedan','Front','Asia')] my_frame.loc[('SUV')] ``` ### View the generated code This can also serve as an excellent tool for teaching the complex syntax of PROC TABULATE statements. ``` sas.teach_me_SAS(True) cars.tabulate.table( left = by_type * drivetrain.with_(all='All'), top = cylstats | enginestats ) ```
github_jupyter
# High-performance simulations with TFF This tutorial will describe how to setup high-performance simulations with TFF in a variety of common scenarios. TODO(b/134543154): Populate the content, some of the things to cover here: - using GPUs in a single-machine setup, - multi-machine setup on GCP/GKE, with and without TPUs, - interfacing MapReduce-like backends, - current limitations and when/how they will be relaxed. <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/federated/tutorials/simulations"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/federated/blob/master/docs/tutorials/simulations.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/federated/blob/master/docs/tutorials/simulations.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/federated/docs/tutorials/simulations.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> ## Before we begin First, make sure your notebook is connected to a backend that has the relevant components (including gRPC dependencies for multi-machine scenarios) compiled. Now, let's start by loading the MNIST example from the TFF website, and declaring the Python function that will run a small experiment loop over a group of 10 clients. ``` #@test {"skip": true} !pip install --quiet --upgrade tensorflow-federated-nightly !pip install --quiet --upgrade nest-asyncio import nest_asyncio nest_asyncio.apply() import collections import time import tensorflow as tf import tensorflow_federated as tff source, _ = tff.simulation.datasets.emnist.load_data() def map_fn(example): return collections.OrderedDict( x=tf.reshape(example['pixels'], [-1, 784]), y=example['label']) def client_data(n): ds = source.create_tf_dataset_for_client(source.client_ids[n]) return ds.repeat(10).shuffle(500).batch(20).map(map_fn) train_data = [client_data(n) for n in range(10)] element_spec = train_data[0].element_spec def model_fn(): model = tf.keras.models.Sequential([ tf.keras.layers.InputLayer(input_shape=(784,)), tf.keras.layers.Dense(units=10, kernel_initializer='zeros'), tf.keras.layers.Softmax(), ]) return tff.learning.from_keras_model( model, input_spec=element_spec, loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]) trainer = tff.learning.build_federated_averaging_process( model_fn, client_optimizer_fn=lambda: tf.keras.optimizers.SGD(0.02)) def evaluate(num_rounds=10): state = trainer.initialize() for _ in range(num_rounds): t1 = time.time() state, metrics = trainer.next(state, train_data) t2 = time.time() print('metrics {m}, round time {t:.2f} seconds'.format( m=metrics, t=t2 - t1)) ``` ## Single-machine simulations Now on by default. ``` evaluate() ``` ## Multi-machine simulations on GCP/GKE, GPUs, TPUs, and beyond... Coming very soon.
github_jupyter
# Pure API Demonstration: Research Software These notebooks demonstrate some uses of the API of Elsevier's *Pure* Current Research Information System (CRIS). This notebook demonstrates some requests for research software. Research Software is currently recorded in Pure as a type of Research Output. **Enter API details - including an API key which gives access to the `research-outputs` endpoint - in [`_Config_DO_THIS_FIRST.ipynb`](./_Config_DO_THIS_FIRST.ipynb) and execute that notebook before executing this notebook. Additionally, the section looking at software across Schools requires access to the `organisational-units` endpoint.** ``` # We're using the requests library to talk to the API import requests # The display, HTML and Markdown libraries will help render HTML and Markdown from IPython.core.display import display, HTML, Markdown # The utility_functions.py script includes: # - pretty_print_json(json_object, ind=4) - prints json with indentation and colours import utility_functions as uf # Retrieve the api_url and headers set in the config notebook %store -r api_url %store -r headers # We'll be making requests to /research-outputs request_url = "/".join([api_url,"research-outputs"]) ``` Research Software has a type URI of `/dk/atira/pure/researchoutput/researchoutputtypes/nontextual/software`. We could use this as the value of the general-purpose 'q' parameter in a GET request, but to be sure we getting what we expect it's better to POST the request using JSON (or XML). ``` # We need the json library to create the POST body import json # Create the JSON structure using dicts/lists request_body = { "typeUris": [ "/dk/atira/pure/researchoutput/researchoutputtypes/nontextual/software", ] } # Serialize as JSON request_json = json.dumps(request_body) # We need to modify the headers to specify the type of data we're submitting post_headers = headers.copy() post_headers["Content-Type"] = 'application/json' ``` ## Get all Research Software ``` # Make the request response = requests.post(url=request_url, headers=post_headers, data=request_json) research_software_json = response.json() # Display raw output uf.pretty_print_json(research_software_json) ``` ## Count Software by publication year Let's count number of software items published each year. ``` # We're going to get a count of research software items published each year from 1989 to 2018 pub_counts = {} for year in range(1989, 2019): # Add additional parameters to the request JSON request_body_for_count = request_body.copy() # We don't need the individual records - just the summary information request_body_for_count['size'] = 0 # Specify the date range request_body_for_count['publishedAfterDate'] = f'{year}-01-01' request_body_for_count['publishedBeforeDate'] = f'{year + 1}-01-01' request_json = json.dumps(request_body_for_count) # Make the request response = requests.post(url=request_url, headers=post_headers, data=request_json) research_software_json = response.json() # Add the result for this year to the results dictionary pub_counts[year] = research_software_json["count"] print(pub_counts) ``` ### Visualising this data We're going to use the Bokeh library to visualise this data. We won't go into the details of using Bokeh here; it's presented as an example of what can be done with data from the API. ``` from bokeh.io import show, output_notebook from bokeh.plotting import figure from bokeh.palettes import PuBu import math output_notebook() # x labels need to be strings x_labels = list(map(str, list(pub_counts.keys()))) y_values = list(pub_counts.values()) p = figure(x_range=x_labels, plot_height=500, title="Publication Counts by Year") p.vbar(x=x_labels, top=y_values, width=0.9, color=PuBu[7][2]) p.xgrid.grid_line_color = None p.y_range.start = 0 p.xaxis.major_label_orientation = math.pi/2 show(p) ``` ## Count Software items by School ### Identify the schools First, we need to use the `organisational-units` endpoint to get a list of schools. ``` # We'll be making a request to /organisational-units org_request_url = "/".join([api_url,"organisational-units"]) # Create the JSON structure using dicts/lists # We only need the identifiers and names # Ordering alphabetically by name org_request_body = { "organisationalUnitTypeUris": [ "/dk/atira/pure/organisation/organisationtypes/organisation/school" ], "orderings": [ "name" ], "fields": [ "uuid", "name.text.value" ], "size": 50 } # Serialize as JSON org_request_json = json.dumps(org_request_body) # Make the request response = requests.post(url=org_request_url, headers=post_headers, data=org_request_json) schools_json = response.json() # Display details for item in schools_json["items"]: print(item["uuid"],item["name"]["text"][0]["value"]) ``` ### Get software counts by school ``` pub_counts = {} for item in schools_json["items"]: # Add additional parameters to the request JSON request_body_for_count = request_body.copy() # We don't need the individual records - just the summary information request_body_for_count['size'] = 0 # Specify the school request_body_for_count['forPersons'] = { "forOrganisations": { "uuids": [item["uuid"],] } } request_json = json.dumps(request_body_for_count) # Make the request response = requests.post(url=request_url, headers=post_headers, data=request_json) research_software_json = response.json() # Add the result for this year to the results dictionary pub_counts[item["name"]["text"][0]["value"]] = research_software_json["count"] print(pub_counts) output_notebook() # x labels need to be strings x_labels = list(map(str, list(pub_counts.keys()))) y_values = list(pub_counts.values()) p = figure(x_range=x_labels, plot_height=500, title="Software items by School") p.vbar(x=x_labels, top=y_values, width=0.9, color=PuBu[7][2]) p.xgrid.grid_line_color = None p.y_range.start = 0 p.xaxis.major_label_orientation = math.pi/2 show(p) ```
github_jupyter
``` from __future__ import absolute_import, division, print_function, unicode_literals import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D from tensorflow.keras.preprocessing.image import ImageDataGenerator import os import numpy as np import matplotlib.pyplot as plt from PIL import Image !ls Dataset2/MURA-v1.1/ PATH = 'Dataset2/MURA-v1.1/' train_dir = os.path.join(PATH, 'train') validation_dir = os.path.join(PATH, 'valid') ############## #cat = positive #dog = negative ############## train_cats_dir = os.path.join(train_dir, 'elbow_positive') # directory with our training cat pictures train_dogs_dir = os.path.join(train_dir, 'elbow_negative') # directory with our training dog pictures validation_cats_dir = os.path.join(validation_dir, 'elbow_positive') # directory with our validation cat pictures validation_dogs_dir = os.path.join(validation_dir, 'elbow_negative') # directory with our validation dog pictures num_cats_tr = len(os.listdir(train_cats_dir)) num_dogs_tr = len(os.listdir(train_dogs_dir)) num_cats_val = len(os.listdir(validation_cats_dir)) num_dogs_val = len(os.listdir(validation_dogs_dir)) total_train = num_cats_tr + num_dogs_tr total_val = num_cats_val + num_dogs_val print('total training cat images:', num_cats_tr) print('total training dog images:', num_dogs_tr) print('total validation cat images:', num_cats_val) print('total validation dog images:', num_dogs_val) print("--") print("Total training images:", total_train) print("Total validation images:", total_val) batch_size = 128 epochs = 1 IMG_HEIGHT = 150 IMG_WIDTH = 150 train_image_generator = ImageDataGenerator(rescale=1./255) # Generator for our training data validation_image_generator = ImageDataGenerator(rescale=1./255) # Generator for our validation data image_gen_train = ImageDataGenerator( rescale=1./255, rotation_range=45, width_shift_range=.15, height_shift_range=.15, horizontal_flip=True, zoom_range=0.5 ) train_data_gen = image_gen_train.flow_from_directory(batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='binary') augmented_images = [train_data_gen[0][0][0] for i in range(5)] plotImages(augmented_images) image_gen_val = ImageDataGenerator(rescale=1./255) val_data_gen = image_gen_val.flow_from_directory(batch_size=batch_size, directory=validation_dir, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='binary') sample_training_images, _ = next(train_data_gen) # This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column. def plotImages(images_arr): fig, axes = plt.subplots(1, 5, figsize=(20,20)) axes = axes.flatten() for img, ax in zip( images_arr, axes): ax.imshow(img) ax.axis('off') plt.tight_layout() plt.show() plotImages(sample_training_images[:5]) # model_new = Sequential([ # Conv2D(16, 3, padding='same', activation='relu', # input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)), # MaxPooling2D(), # Dropout(0.2), # Conv2D(32, 3, padding='same', activation='relu'), # MaxPooling2D(), # Conv2D(64, 3, padding='same', activation='relu'), # MaxPooling2D(), # Conv2D(128, 3, padding='same', activation='relu'), # MaxPooling2D(), # Conv2D(512, 3, padding='same', activation='relu'), # MaxPooling2D(), # Dropout(0.2), # Flatten(), # Dense(512, activation='relu'), # Dense(1) # ]) model = tf.keras.applications.DenseNet169(input_shape=(IMG_HEIGHT, IMG_WIDTH,3), include_top=False, weights='imagenet') model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy']) model.summary() history = model.fit_generator( train_data_gen, steps_per_epoch=total_train // batch_size, epochs=epochs, validation_data=val_data_gen, validation_steps=total_val // batch_size ) acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(epochs) plt.figure() plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label='Training Accuracy') plt.plot(epochs_range, val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show() ```
github_jupyter
# Introduction to Scientific Python # ## Purpose of this tutorial ## This tutorial aims to be a not-so-gentle but useful introduction to the python programming language. The intended audience is anyone with some familiarity with programming but limited experience with python for scientific work. Good computational skills are an increasingly important part of the skillset for graduate students and researchers in any field. Unfortunately the education and tools available can be very patchy. There's nothing wrong with learning as you go but it WILL make more work for you down the road. You will run into a problem you can't solve with your specific toolkit, your dataset will get too big, you won't be able to figure out why your analysis takes hours to run and you'll spend days making graphs for a paper that could have been finished in half an hour. Particularly for people who are just starting out it will pay off to put in some hours learning good programming and data analysis practices up front. ## Why Python? ## Python is an excellent language for dealing with data. Its libraries for statistics, graphing, data management and scientific programming are extremely good and constantly improving. Python is also a language that can grow with you. Languages like R are also excellent for data but are fairly domain specific and will not give you the tools to solve a wider range of problems. You may not be planning to write a web application, machine learning system, or gui today but if you start in Python those tools will be there when you need them. ## Table of Contents ## 1. [Introduction](00_scipy_introduction.ipynb) 1. Setting Up Your Environment 2. The Scientific Python Stack 2. [Programming in Python](01_scipy_programming.ipynb) 1. Types and data structures 2. Branching and conditonals 3. Loops and itteration 4. Functions and Functional 5. Classes 6. Input & Output 3. [Essential Tasks and the Libraries to do them](02_scipy_essentials.ipynb) 1. Numeric data with Numpy 2. Everything data with Pandas 3. Graphing with matplotlib 4. Statistics with Numpy & Scipy ## Setting Up Your Environment ## For scientific programming I highly reccomend using one of the prebuilt distributions for science and data analysis. Either Anaconda, Canopy or Python(x,y) is good but for this tutorial we will be using Anaconda. There are a couple reasons for using these distributions over a vanilla python distribution and then downloading the necessary libraries. Anaconda is batteries included and will provide a python environment that already has all the vital libraries and manages them for you. Particularly on Windows manually installing some of the math libraries is a hassle because they contain C, Fortran and other non-Python code that must be compiled every time you update them. All of these distributions will also give you an environment for interactive computing (rapidly modifying and seeing the results of your code) which you will find extremely handy for common tasks such as tweaking graphs for publications. 1. Go to this link and download the distribution for your system [Anaconda Download](https://www.continuum.io/downloads) 2. Click open and follow the installation instructions 3. That's it ## Familiarize yourself with your tools ## Anaconda comes with several tools and interfaces that you will want to get to know. Many of these such as Conda, are direct clones of industry standard python tools (pip) so skills learned with these will easily transfer. ### 1. [Ipython Command Prompt](http://ipython.readthedocs.io/en/stable/) ### ![Ipython Shell](files/img/ipythonshell.png) The Ipython command prompt is an enhanced version of the python shell for interactively running Python code. Any code you type in will be immediately run within the shell program. As you are learning I suggest you keep an ipython shell open to try out lines of code and experiment. Open one by typing the command 'ipython' into your systems command prompt or opening the Ipython QtConsole. #### Magic Commands ( % ) #### Commands that start with `%` or `%%` are magic commands that interact with the Ipython environment. Try this command to start logging your command history to a file. When you are finished just type `%logstop`. When invoked in Jupyter notebooks `%%` will apply the magic command to the entire cell. [Full list of Magics](http://ipython.readthedocs.io/en/stable/interactive/magics.html) ~~~~ %logstart mydirectory/logfile.py x = 'this command will be recorded in our logfile' %logstop ~~~~ #### System Commands ( ! ) #### Commands that start with `!` are passed to your system shell, these will act as though you typed them into your systems normal command prompt but they can interact directly with python code and the results can be saved as python variables. ~~~~ !ping bbc.com results = !ping bbc.com ~~~~ #### Enhanced Object Information ( ? ) #### Commands that start or end with `?` or `??` provide information about an object, for now just know that this can be used to print useful information about variables. ~~~~ myvariable = 125 ?myvariable ??myvariable ~~~~ Everything else you type in will be interpreted as Python code ~~~~ x = 'hello world' print x ~~~~ ### 2. [Jupyter/Ipython Notebooks](https://jupyter.readthedocs.io/en/latest/index.html) ### ![Jupyter Notebooks](files/img/jupyterpreview.png) Jupyter notebooks are a web server/browser interface for running python code and displaying the results as well as including text, graphics, mathematical formulas and exporting to a variety of formats. Jupyter notebooks were created with the goal of enabling shareable and reproducible analysis for research. They are excellent for this purpose. A well documented Jupyter notebook can be used to clean your data, run your analyses, generate publication quality graphics, explain what you did and then can be exported to make a manuscript, a slideshow, a blogpost and etc. #### Cells #### A Jupyter notebook is made up of a series of 'cells'. Cells are independent code or text blocks that can be executed independently of one another. (while the cells execute independently they are not in seperate namespaces, a variable declared in one cell can be accessed or overwritten by another cell be careful as this can introduce unexpected behavior) #### Kernel #### The 'heart' of the Jupyter notebook is the kernel which is the program that interprets and executes the code you write in your notebook. The main kernel is of course python, but notebooks give you the ability to swap out the kernel and use different versions of Python or other languages such as R, Julia, Octave, Matlab, Haskell and many others. #### Interface #### Open up a new Jupyter Notebook now and you should see some familiar menu items such as File -> save and some unfamiliar ones such as File -> Revert to Checkpoint. Take a few minutes to play with these and familiarize yourself with the environment. I suggest hitting help->User Interface Tour. Make sure you can. 1. Run a cell 2. Create and delete cells 3. Change the content type of cells from code to markdown and back 4. Export your notebook to different formats. 5. Open and close new notebooks 6. Save!! 7. Revert to a checkpoint ### 3. [Conda Package and Environment Manager](https://conda.io/docs/get-started.html) ### #### Packages #### The Conda package manager is an open source package management program that comes with Anaconda. Conda is a direct clone of the default pythong package manager(pip) so all syntax is interchangeable. Packages are libraries of python code that are not included in the basic Python installation and add some additional functionality. By default Anaconda comes with a number of packages preinstalled. You can see these by opening up a command prompt and typing. ~~~~ conda list ~~~~ Conda can be used to install any package found in the Conda repository. Conda should contain most any package you'll need for now. The following commands will search for and install packages. To install a package to a particular environment simply specify the name of the environment with the install command. ~~~~ conda search beautifulsoup4 conda install beautifulsoup4 ~~~~ or ~~~~ conda install --name environmentName beautifulsoup4 ~~~~ #### Virtual Environments #### Conda also manages what are called virtual environments. A virtual environment(ve) is an indepenedent installation of Python which can include configuration information and a set of packages. Because Python is an interpreted language all Python scripts are executed in some environment. Anaconda comes with a default environment but you can also create your own. This can be useful if you have scripts that require out of date packages to run, or if you want to distribute a program along with its dependencies to colleagues or for simply keeping your particular setup and package list all in one place. Using virtual environments is good practice and standard in industry so its a good habit to pick up. The following code creates an environment using python 2.7 called franksLanguagePython and installs two useful packages for dealing with text: natural language toolkit, and beautifulsoup a webscraping package. We then list information about all available environments and use the activate command to switch to using our newly created environment. ~~~~ conda create --name myLanguageVenv python=2.7 NLTK beautifulsoup4 conda info --envs activate myLanguagVenv ~~~~ ### 4. Text Editor ### The old ways are still strong. We will mostly be using Jupyter Notebooks for this tutorial but you should make sure you have access to a good programming focused text editor. Atom, Notepad++, SublimeText, Vim, Emacs and etc. are all good options. ### 5. IDE ### For scientific data analysis integrated development environments are not particularly useful, however if you find yourself doing web development, gui programming or application development I highly recommend [pyCharm](https://www.jetbrains.com/pycharm/download) which is free for educational institutions. IDEs include advanced tools for programming, builds, interacting with external tools and can be essential for certain kinds of programming. ### 6. A Web Browser ### Never code alone. The best part of the Python ecosystem for scientific computing is the incredible range of resources, tutorials and community available. My entirely subjective opinion is that Python has the best community by far and their official documentation is a close second to industry giants like Matlab. Here are some of my favorite resources. - [Python Official Docs](https://www.python.org/doc/) - [Scientific Python Hub](http://www.scipy.org/) - [Scipy Lecture Notes (very good) ](http://www.scipy-lectures.org/) - [Software Carpentry Lessons](https://software-carpentry.org/lessons/) - [Data Carpentry Lessons](http://www.datacarpentry.org/lessons/) - [NBViewer (view/share interesting notebooks online)](https://nbviewer.jupyter.org/) - [Curated Collection of Interesting Jupyter Notebooks](https://github.com/jupyter/jupyter/wiki/A-gallery-of-interesting-Jupyter-Notebooks) - [Collection of Machine Learning notebooks/resources](https://sebastianraschka.com/notebooks/python-notebooks.html) - [Pandas Cookbook](https://github.com/jvns/pandas-cookbook) - [Pystats Subreddit](https://www.reddit.com/r/pystats/) - [PyData Confrence (videos of all talks)](https://www.youtube.com/user/PyDataTV/videos) ## The Scientific Python Stack ## The type of programming you will be doing as a graduate student or scientist is a bit different from the work done in the world of professional developers. Scientific programming is almost always data focused work with the goal of using statistics and analysis to demonstrate and document some interesting effect. The main challenges all programmers working with data face are reliably importing data from various formats, getting it into a useful data structure and then producing relevant analyses and graphs from the data. In the last few years a set of python libraries for working with data have emerged as the defacto standard for this kind of work. Taken together these libraries cover most of the workflow for scientific work and are often called the scientific python stack. ### IPython ### An enhanced environment for doing scientific work in python. Provides a number of enhancements for workflow such as embedding markdown and mathjax formulas for documentation, inline graphing and etc. ### Numpy ### The backbone library for scientific work in python. Numpy provides fixed-type numeric array and matrix data structures for python and fast matrix and linear algebra functions. Numpy arrays should always be used over native python lists for numeric data. ### Pandas ### A data analysis library which provides a spreadsheet like DataFrame object for efficiently storing and manipulating data as well as functions for importing and exporting data. Acts as a powerful wrapper for using numpy objects. ### SciPy ### The python library containing most of Pythons scientific and statistical tools. Contains functions for statistics, signal processing, integration, linear algebra, clustering and many others. ### Matplotlib ### The main graphing library for python. Provides an interface for creating and manipulating graphics. Everything from simple scatter plots to animated heat maps. #### Example #### Here's a high level overview of how you would use these libraries to complete a common scientific task. You've been sent an excel spreadsheet by a collaborator containing the data from an experiment testing the effect of an intervention on a number of variables. Columns contain different variables and each row is an independent data point. You've been asked to take a look at the data and see which if any variables the treatement had an effect on. How would we go about this with Python? 1. Open an Ipython Notebook 2. Use Pandas import functionality to load the spreadsheet into a Pandas dataFrame object. The numeric data will automatically be converted to Numpy arrays and the column names will be preserved. 3. Using Pandas grouping functionality split the data into the control and treatement groups. 4. Loop through all the variables of interest and using Scipy run the appropriate statistical tests between control and treatement for each variable. 5. Use MatplotLib to produce graphs of the data for each 6. Export your Ipython Notebook containing the results and graphs to a pdf and email it to your collaborator
github_jupyter
``` import numpy as np from keras.models import Model from keras.layers import Input, Dense, RepeatVector from keras.layers.merge import Add, Subtract, Multiply, Average, Maximum, Minimum, Concatenate, Dot from keras import backend as K import json from collections import OrderedDict def format_decimal(arr, places=6): return [round(x * 10**places) / 10**places for x in arr] DATA = OrderedDict() ``` **[merge.Multiply.0]** ``` random_seed = 100 data_in_shape = (6,) layer_0 = Input(shape=data_in_shape) layer_1a = Dense(2, activation='linear')(layer_0) layer_1b = Dense(2, activation='linear')(layer_0) layer_2 = Multiply()([layer_1a, layer_1b]) model = Model(inputs=layer_0, outputs=layer_2) np.random.seed(random_seed) data_in = np.expand_dims(2 * np.random.random(data_in_shape) - 1, axis=0) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(random_seed + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) result = model.predict(data_in) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) DATA['merge.Multiply.0'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[merge.Multiply.1]** ``` random_seed = 100 data_in_shape = (6,) layer_0 = Input(shape=data_in_shape) layer_1a = Dense(2, activation='linear')(layer_0) layer_1b = Dense(2, activation='linear')(layer_0) layer_1c = Dense(2, activation='linear')(layer_0) layer_2 = Multiply()([layer_1a, layer_1b, layer_1c]) model = Model(inputs=layer_0, outputs=layer_2) np.random.seed(random_seed) data_in = np.expand_dims(2 * np.random.random(data_in_shape) - 1, axis=0) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(random_seed + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) result = model.predict(data_in) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) DATA['merge.Multiply.1'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` **[merge.Multiply.2]** ``` random_seed = 100 data_in_shape = (6,) layer_0 = Input(shape=data_in_shape) layer_1a = Dense(2, activation='linear')(layer_0) layer_1b = Dense(2, activation='linear')(layer_0) layer_1c = Dense(2, activation='linear')(layer_0) layer_1d = Dense(2, activation='linear')(layer_0) layer_2 = Multiply()([layer_1a, layer_1b, layer_1c, layer_1d]) model = Model(inputs=layer_0, outputs=layer_2) np.random.seed(random_seed) data_in = np.expand_dims(2 * np.random.random(data_in_shape) - 1, axis=0) # set weights to random (use seed for reproducibility) weights = [] for i, w in enumerate(model.get_weights()): np.random.seed(random_seed + i) weights.append(2 * np.random.random(w.shape) - 1) model.set_weights(weights) result = model.predict(data_in) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) DATA['merge.Multiply.2'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights], 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } ``` ### export for Keras.js tests ``` import os filename = '../../../test/data/layers/merge/Multiply.json' if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) with open(filename, 'w') as f: json.dump(DATA, f) print(json.dumps(DATA)) ```
github_jupyter
``` import numpy as np import os,sys sys.path.append('.') sys.path.append('../RL_lib/Utils') %load_ext autoreload %load_ext autoreload %autoreload 2 %matplotlib nbagg import os print(os.getcwd()) %%html <style> .output_wrapper, .output { height:auto !important; max-height:1000px; /* your desired max-height here */ } .output_scroll { box-shadow:none !important; webkit-box-shadow:none !important; } </style> ``` # Image a cube's vertices ## grayscale: 0 is black, higher intensities are lighter ## here intensities are ranges. so foreground should have smaller range, and therefore be darker ## Optical axis +Z (default) # Centered ``` from camera_model_cont import Camera_model import attitude_utils as attu import optics_utils as optu import itertools from time import time ap = attu.Euler_attitude() object_locations = optu.make_cube(10.,1.0*np.asarray([0,0,200])) agent_location = 1.0*np.asarray([0,0,100]) object_intensities = np.linalg.norm(object_locations-agent_location,axis=1)-50 fov=np.pi/4 yaw = 0.0 pitch = 0.0 roll = 0.0 agent_q = np.asarray([yaw,pitch,roll]) C_cb = optu.rotate_optical_axis(0.0, 0.0, 0.0) r_cb = np.asarray([0,0,0]) cm = Camera_model(attitude_parameterization=ap, C_cb=C_cb, r_cb=r_cb, fov=fov, debug=False) t0 = time() pix1 = cm.get_pixel_coords(agent_location, agent_q, object_locations, object_intensities) t1 = time() print('ET: ',t1-t0) cm.render(agent_location, agent_q, object_locations, object_intensities) ``` # Positive Roll, Image should move down in FOV ``` object_locations = optu.make_cube(10.,1.0*np.asarray([0,0,200])) agent_location = 1.0*np.asarray([0,0,100]) object_intensities = np.linalg.norm(object_locations-agent_location,axis=1)-50 fov=np.pi/4 yaw = 0.0 pitch = 0.0 roll = np.pi/16 agent_q = np.asarray([yaw,pitch,roll]) C_cb = optu.rotate_optical_axis(0.0, 0.0, 0.0) r_cb = np.asarray([0,0,0]) cm = Camera_model(attitude_parameterization=ap, C_cb=C_cb, r_cb=r_cb, fov=fov, debug=False) t0 = time() pix1 = cm.get_pixel_coords(agent_location, agent_q, object_locations, object_intensities) t1 = time() print('ET: ',t1-t0) cm.render(agent_location, agent_q, object_locations, object_intensities) ``` # Negative Pitch, Image should move right ``` object_locations = optu.make_cube(10.,1.0*np.asarray([0,0,200])) agent_location = 1.0*np.asarray([0,0,100]) object_intensities = np.linalg.norm(object_locations-agent_location,axis=1)-50 fov=np.pi/4 yaw = 0.0 pitch = -np.pi/16 roll = 0.0 agent_q = np.asarray([yaw,pitch,roll]) C_cb = optu.rotate_optical_axis(0.0, 0.0, 0.0) r_cb = np.asarray([0,0,0]) cm = Camera_model(attitude_parameterization=ap, C_cb=C_cb, r_cb=r_cb, fov=fov, debug=False) t0 = time() pix1 = cm.get_pixel_coords(agent_location, agent_q, object_locations, object_intensities) t1 = time() print('ET: ',t1-t0) cm.render(agent_location, agent_q, object_locations, object_intensities) ``` # Positive Yaw should rotate image ``` object_locations = optu.make_cube(10.,1.0*np.asarray([0,0,200])) agent_location = 1.0*np.asarray([0,0,100]) object_intensities = np.linalg.norm(object_locations-agent_location,axis=1)-50 fov=np.pi/4 yaw = np.pi/8 pitch = 0.0 roll = 0.0 agent_q = np.asarray([yaw,pitch,roll]) C_cb = optu.rotate_optical_axis(0.0, 0.0, 0.0) r_cb = np.asarray([0,0,0]) cm = Camera_model(attitude_parameterization=ap, C_cb=C_cb, r_cb=r_cb, fov=fov, debug=False) t0 = time() pix1 = cm.get_pixel_coords(agent_location, agent_q, object_locations, object_intensities) t1 = time() print('ET: ',t1-t0) cm.render(agent_location, agent_q, object_locations, object_intensities) ```
github_jupyter
``` %matplotlib inline import pandas import geopandas import numpy as np import matplotlib.pyplot as plt import sys from esda.adbscan import ADBSCAN, get_cluster_boundary, remap_lbls ``` - Set up three clusters ``` n = 100 np.random.seed(12345) c1 = np.random.normal(1, 1, (n, 2)) c2 = np.random.normal(6, 1, (n, 2)) c3 = np.random.normal(12, 1, (n, 2)) db = pandas.concat(map(pandas.DataFrame, [c1, c2, c3]) ).rename(columns={0: "X", 1: "Y"})\ .reset_index()\ .drop("index", axis=1) db.plot.scatter("X", "Y") ``` - Run A-DBSCAN ``` ms = int(n/10) print(f"Min. samples: {ms}") ad = ADBSCAN(2.5, ms, reps=100, keep_solus=True) np.random.seed(1234) ad.fit(db) solus_rl = remap_lbls(ad.solus, db) ls = list(map(int, ad.labels_)) ls = pandas.Series(ls) / max(ls) print(ad.votes["lbls"].unique()) db.assign(lbls=ls).plot.scatter("X", "Y", c="lbls", cmap="viridis") ``` * Create boundary of clusters ``` polys = get_cluster_boundary(ad.votes["lbls"], db) ax = polys.plot(color='0.5') db.assign(lbls=ls).plot.scatter("X", "Y", c="lbls", cmap="viridis", s=2, ax=ax ) ``` - Explore boundary stability ``` %%time lines = [] for rep in solus_rl: line = get_cluster_boundary(solus_rl[rep], db, n_jobs=-1 ) line = line.boundary line = line.reset_index()\ .rename(columns={0: "geometry", "index": "cluster_id"} )\ .assign(rep=rep) lines.append(line) lines = pandas.concat(lines) lines = geopandas.GeoDataFrame(lines) lines.plot() ``` - Interactive widget to explore different solutions across replications ``` from ipywidgets import interact, IntSlider def plot_rep(rep): f, ax = plt.subplots(1, figsize=(9, 9)) ax.set_facecolor("k") # Background points db[["X", "Y"]].plot.scatter("X", "Y", ax=ax, color="0.25", s=0.5) # Boundaries cs = lines.query(f"rep == 'rep-{str(rep).zfill(3)}'") cs.plot(ax=ax, color="red") # Cluster IDs for s, row in cs.iterrows(): ax.text(row.geometry.centroid.x, row.geometry.centroid.y, s, size=20, c="w" ) return None reps = range(len(lines["rep"].unique())) slider = IntSlider(min=min(reps), max=max(reps), step=1) interact(plot_rep, rep=slider); ```
github_jupyter
# __DATA 5600: Introduction to Regression and Machine Learning for Analytics__ ## __Review of Basic Concepts in Asymptotic Theory__ <br> <br> ``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline ``` <br> ## __The Law of Large Numbers__ ***Definition*** The law which states that the larger a sample, the nearer its mean is to that of the parent population from which the sample is drawn. More formally: for every $\varepsilon > 0$, the probability $$\{|\bar{Y} - Y| > \varepsilon \} \rightarrow 0 \quad \mbox{as} \quad n \rightarrow \infty$$ where $n$ is the sample size, $\bar{Y}$ is the sample mean, and $\mu$ is the population mean. More rigorous definitions are the following: For i.i.d sequences of one-dimensional random variables $Y_{1}, Y_{2}, \ldots$, let $\bar{Y}_{n} = \frac{1}{n} \sum\limits_{i=1}^{n} Y_{i}$. The *weak law of large numbers* states that $\bar{Y}_{n}$ converges in probability to $\mu = E\{Y_{i}\}$ if $E\{|Y_{i}|\} < \infty$. The *strong law of large numbers* states that $\bar{Y}_{n}$ converges almost surely to $\mu$ if $E\{|Y_{i}|\} < \infty$. Both results hold under the more stringent but easily checked condition that $var\{Y_{i}\} = \sigma^{2} < \infty$. <br> <br> ### Using Simulation to Check the Law of Large Numbers We can use simulation to check the Law of Large Numbers. Consider a fair die with six sides and outcomes $Y = \{1, 2, 3, 4, 5, 6\}$, each with $P[Y_{i} = y] = \frac{1}{6}$. The true mean is $$ \mu = E\{Y\} = \frac{1}{6}[1 + 2 + 3 + 4 + 5 + 6] = 3.5$$ We can verify this in `Python`: ``` x = np.arange(1,7) mu = (1/6) * x.sum() x mu ``` <br> Now let's simulate some rolls of the die and collect some data. We will let our sample size increase and plot the estimated mean. We can simulate a single roll of the die as follows: <br> ``` ## roll the dice a single time and observe the outcome np.random.randint(1, 7) ``` <br> We can also simulate many draws at once as follows: <br> ``` ## roll the dice 100 times and observe the outcomes np.random.randint(1, 7, size=100) m = 10000 sizes = np.arange(1,m + 1) means = np.zeros((m,)) for i in range(len(sizes)): y = np.random.randint(1,7, size=sizes[i]) means[i] = y.mean() plt.plot(means, 'g', lw = 2.5) plt.grid(True) plt.title("Simulating the Toss of a Fair Die to Demonstrate the Law of Large Numbers") plt.xlabel("Sample Size") plt.ylabel("Estimated Mean") ## peak at the first 10 estimated means means[:10] ## peak at the last 10 estimated means means[-10:] ``` <br> We can do a similar simulation for the flipping of a fair coin. We can simulate the flip of a coin with the Binomial distribution as follows: <br> ``` m = 10000 sizes = np.arange(1,m + 1) means = np.zeros((m,)) for i in range(len(sizes)): y = np.random.binomial(1, 0.5, sizes[i]) means[i] = y.mean() plt.plot(means, 'g', lw = 2.5) plt.grid(True) plt.title("Simulating Flipping a Fair Coin") plt.xlabel("Sample Size") plt.ylabel("Estimated Mean") ## peak at the first 10 estimated means means[:10] ## peak at the last 10 estimated means means[-10:] ``` <br> #### Example: The Exponential Distribution Let's do a simulation for the ___exponential distribution___ together. <br> See the wikipedia article for details: https://en.wikipedia.org/wiki/Exponential_distribution <br> ``` ## let's take a single draw from the exponential distribution np.random.exponential? ``` <br> #### Example: The Poisson Distribution Let's do a simulation for the ___Poisson distribution___ together. <br> See the wikipedia article for details: https://en.wikipedia.org/wiki/Poisson_distribution <br> <br> #### More Examples: - Normal distribition - Binomial experiment (number of heads after n tosses) <br>
github_jupyter
For many users, it may be valuable to pull together some particular properties that can be summarized to a simple value per each species record and view them in a spreadsheet program of one kind or another in order to slice the data in various ways or run reports. This notebook runs through all of the data generated, making a number of key decisions about how to summarize, and generates a flat CSV output in the cache folder for this type of use. ``` import pandas as pd import json from IPython.display import display taxonomy_lookup = dict() with open('../cache/itis.json', 'r') as f: itis_species = json.loads(f.read()) f.close() for record in [i for i in itis_species if "data" in i.keys()]: valid_itis_doc = next((i for i in record["data"] if i["usage"] in ["valid","accepted"]), None) if valid_itis_doc is not None: for k, v in record["parameters"].items(): itis_lookup_identifier = v taxonomy_lookup[itis_lookup_identifier] = { "taxonomic_reference": "ITIS", "taxonomic_rank": valid_itis_doc["rank"] } for i in valid_itis_doc["biological_taxonomy"]: taxonomy_lookup[itis_lookup_identifier][i["rank"]] = i["name"] with open('../cache/worms.json', 'r') as f: worms_species = json.loads(f.read()) f.close() for w in [i for i in worms_species if i["processing_metadata"]["status"] != "error"]: worms_accepted = next((i for i in w["data"] if i["status"] == "accepted"), None) if worms_accepted is not None: lookup_name = w["processing_metadata"]["api"].split("/")[-1].split("?")[0] taxonomy_lookup[lookup_name] = { "taxonomic_reference": "WoRMS", "taxonomic_rank": worms_accepted["rank"] } for i in worms_accepted["biological_taxonomy"]: taxonomy_lookup[lookup_name][i["rank"]] = i["name"] with open('../cache/workplan_species.json', 'r') as f: workplan_species = json.loads(f.read()) f.close() for spp in workplan_species: if spp["ITIS TSN"] in taxonomy_lookup.keys(): taxonomy_record = taxonomy_lookup[spp["ITIS TSN"]] elif spp["Lookup Name"] in taxonomy_lookup.keys(): taxonomy_record = taxonomy_lookup[spp["Lookup Name"]] else: taxonomy_record = None if taxonomy_record is not None: for k, v in taxonomy_record.items(): spp[k] = v with open('../cache/tess.json', 'r') as f: tess_data = json.loads(f.read()) f.close tess_lookup = dict() for r in [i for i in tess_data if "data" in i.keys()]: lookup_id = r["processing_metadata"]["api"].split("=")[-1].replace("]","").replace('"','') sp_records = r["data"]["SPECIES_DETAIL"] if not isinstance(sp_records, list): sp_records = [sp_records] tess_lookup[lookup_id] = { "STATUS": ",".join(list(set([i["STATUS"] for i in sp_records]))), "STATUS_TEXT": ",".join(list(set([i["STATUS_TEXT"] for i in sp_records]))), "ECOS_SPCODE": ",".join(list(set([i["SPCODE"] for i in sp_records]))) } for spp in workplan_species: if spp["ITIS TSN"] in tess_lookup.keys(): tess_record = tess_lookup[spp["ITIS TSN"]] elif spp["Lookup Name"] in tess_lookup.keys(): tess_record = tess_lookup[spp["Lookup Name"]] else: tess_record = None if tess_record is not None: for k, v in tess_record.items(): spp[k] = v with open('../cache/iucn.json', 'r') as f: iucn_data = json.loads(f.read()) f.close iucn_lookup = dict() for r in [i for i in iucn_data if i["processing_metadata"]["status"] == "success"]: iucn_lookup[r["parameters"]["Scientific Name"]] = { "iucn_reference": r["data"]["doi"], "iucn_status_code": r["data"]["iucn_status_code"], "iucn_status_name": r["data"]["iucn_status_name"], "iucn_population_trend": r["data"]["iucn_population_trend"], "iucn_record_date": r["data"]["record_date"] } for spp in [i for i in workplan_species if i["Lookup Name"] in iucn_lookup.keys()]: iucn_record = iucn_lookup[spp["Lookup Name"]] for k, v in iucn_record.items(): spp[k] = v with open('../cache/natureserve.json', 'r') as f: natureserve_data = json.loads(f.read()) f.close natureserve_lookup = dict() for r in [i for i in natureserve_data if i["processing_metadata"]["status"] == "success"]: natureserve_lookup[r["parameters"]["Scientific Name"]] = { "natureserve_reference": r["data"]["natureServeGlobalConcept"]["natureServeExplorerURI"], "natureserve_status": r["data"]["roundedNationalConservationStatus"] } try: natureserve_lookup[r["parameters"]["Scientific Name"]]["natureserve_last_review_date"] = r["NatureServe Species"]["nationalConservationStatus"]["@lastReviewedDate"] except: natureserve_lookup[r["parameters"]["Scientific Name"]]["natureserve_last_review_date"] = "unknown" for spp in [i for i in workplan_species if i["Lookup Name"] in natureserve_lookup.keys()]: natureserve_record = natureserve_lookup[spp["Lookup Name"]] for k, v in natureserve_record.items(): spp[k] = v natureserve_data = None natureserve_lookup = None with open('../cache/sgcn.json', 'r') as f: sgcn_data = json.loads(f.read()) f.close() sgcn_lookup = dict() for r in [i for i in sgcn_data if i["processing_metadata"]["status"] == "success"]: sgcn_lookup[r["parameters"]["Scientific Name"]] = { "sgcn_statelist_2005": r["data"]["statelist_2005"], "sgcn_statelist_2015": r["data"]["statelist_2015"] } for spp in [i for i in workplan_species if i["Lookup Name"] in sgcn_lookup.keys()]: for k, v in sgcn_lookup[spp["Lookup Name"]].items(): spp[k] = v sgcn_data = None sgcn_lookup = None with open('../cache/gbif.json', 'r') as f: gbif_data = json.loads(f.read()) f.close() gbif_lookup = dict() for r in [i for i in gbif_data if i["processing_metadata"]["status"] == "success"]: gbif_lookup[r["parameters"]["Scientific Name"]] = { "gbif_reference": r["data"]["resolvable_identifier"], "gbif_taxonomic_status": r["data"]["TaxonomicStatus"] } if "count" in r["data"]["Occurrence Summary"].keys(): gbif_lookup[r["parameters"]["Scientific Name"]]["gbif_occurrence_records"] = r["data"]["Occurrence Summary"]["count"] else: gbif_lookup[r["parameters"]["Scientific Name"]]["gbif_occurrence_records"] = 0 for spp in [i for i in workplan_species if i["Lookup Name"] in gbif_lookup.keys()]: for k, v in gbif_lookup[spp["Lookup Name"]].items(): spp[k] = v gbif_data = None gbif_lookup = None with open('../cache/xdd.json', 'r') as f: xdd_data = json.loads(f.read()) f.close() xdd_lookup = dict() for r in [i for i in xdd_data if i["processing_metadata"]["status"] == "success"]: xdd_lookup[r["parameters"]["Search Term"]] = { "xdd_reference": r["processing_metadata"]["api"], "xdd_number_docs": len(r["data"]) } for spp in [i for i in workplan_species if i["Lookup Name"] in xdd_lookup.keys()]: for k, v in xdd_lookup[spp["Lookup Name"]].items(): spp[k] = v xdd_data = None xdd_lookup = None with open('../cache/sb_datarelease.json', 'r') as f: sb_data = json.loads(f.read()) f.close() sb_lookup = dict() for r in [i for i in sb_data if "data" in i.keys()]: sb_lookup[r["parameters"]["q"]] = { "sb_reference": r["processing_metadata"]["api"], "sb_number_items": len(r["data"]) } for spp in [i for i in workplan_species if i["Lookup Name"] in sb_lookup.keys()]: for k, v in sb_lookup[spp["Lookup Name"]].items(): spp[k] = v sb_data = None sb_lookup = None with open('../cache/gap.json', 'r') as f: gap_data = json.loads(f.read()) f.close() with open('../cache/gap_metrics.json', 'r') as f: gap_metrics = json.loads(f.read()) f.close() gap_lookup = list() for r in [i for i in gap_data if "data" in i.keys()]: gap_lookup.append({ "scientific_name_source": r["parameters"]["Name Source"], "scientific_name": r["parameters"]["Scientific Name"], "GAP_SpeciesCode": r["data"]["GAP_SpeciesCode"], "GAP_StatesWithHabitat": ",".join([st["state_name"] for st in next(s for s in gap_metrics if s["GAP_SpeciesCode"] == r["data"]["GAP_SpeciesCode"])["State Metrics"]]) }) for spp in workplan_species: check_list = [spp["Lookup Name"]] if "Species" in spp.keys(): check_list.append(spp["Species"]) gap_spp = next((i for i in gap_lookup if i["scientific_name"] in check_list), None) if gap_spp is not None: spp["GAP_SpeciesCode"] = gap_spp["GAP_SpeciesCode"] spp["GAP_StatesWithHabitat"] = gap_spp["GAP_StatesWithHabitat"] [i for i in workplan_species if "GAP_SpeciesCode" in i.keys()] df_wp_spp = pd.DataFrame(workplan_species).to_csv('../cache/summarized_data.csv', index=False) ```
github_jupyter