text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
<a href="https://colab.research.google.com/github/Tixonmavrin/Zindi-Zimnat-Insurance-Recommendation-Challenge/blob/master/Baseline2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` !pip install catboost import pandas as pd import numpy as np from tqdm import tqdm import copy train = pd.read_csv('Train.csv') test = pd.read_csv('Test.csv') submission = pd.read_csv('SampleSubmission.csv') print(train.shape) train.head() print(test.shape) test.head() print(submission.shape) submission.head() np_data = [] train_columns = train.columns for v in tqdm(train.values): info = v[:8] binary = v[8:] index_n = [k for k, i in enumerate(binary) if i == 1] for i in index_n: for k in range(len(binary)): if (k not in index_n) or (k == i): binary_0 = list(copy.copy(binary)) binary_0[i] = 0 if k == i: np_data.append(list(info) + binary_0 + [train_columns[8+k]] + [1]) else: np_data.append(list(info) + binary_0 + [train_columns[8+k]] + [0]) df_data = pd.DataFrame(np_data) df_data.columns = ['ID', 'join_date', 'sex', 'marital_status', 'birth_year', 'branch_code', 'occupation_code', 'occupation_category_code', 'P5DA', 'RIBP', '8NN1', '7POT', '66FJ', 'GYSR', 'SOP4', 'RVSZ', 'PYUQ', 'LJR9', 'N2MW', 'AHXO', 'BSTQ', 'FM3X', 'K6QO', 'QBOL', 'JWFN', 'JZ9D', 'J9JW', 'GHYX', 'ECY3', 'product_pred', 'target'] np_data_test = [] answ_test = [] test_columns = test.columns for v in tqdm(test.values): info = v[:8] binary = v[8:] index_n = [k for k, i in enumerate(binary) if i == 1] for k in range(len(binary)): if k not in index_n: np_data_test.append(list(info) + list(binary) + [test_columns[8+k]]) df_data_test = pd.DataFrame(np_data_test) df_data_test.columns = ['ID', 'join_date', 'sex', 'marital_status', 'birth_year', 'branch_code', 'occupation_code', 'occupation_category_code', 'P5DA', 'RIBP', '8NN1', '7POT', '66FJ', 'GYSR', 'SOP4', 'RVSZ', 'PYUQ', 'LJR9', 'N2MW', 'AHXO', 'BSTQ', 'FM3X', 'K6QO', 'QBOL', 'JWFN', 'JZ9D', 'J9JW', 'GHYX', 'ECY3', 'product_pred'] df_data['date1'] = df_data['join_date'].apply(lambda x: x.split('/')[0] if (x == x) else np.nan) df_data['date2'] = df_data['join_date'].apply(lambda x: x.split('/')[1] if (x == x) else np.nan) df_data['date3'] = df_data['join_date'].apply(lambda x: x.split('/')[2] if (x == x) else np.nan) df_data.drop('join_date', axis=1, inplace=True) df_data_test['date1'] = df_data_test['join_date'].apply(lambda x: x.split('/')[0] if (x == x) else np.nan) df_data_test['date2'] = df_data_test['join_date'].apply(lambda x: x.split('/')[1] if (x == x) else np.nan) df_data_test['date3'] = df_data_test['join_date'].apply(lambda x: x.split('/')[2] if (x == x) else np.nan) df_data_test.drop('join_date', axis=1, inplace=True) from catboost import CatBoostClassifier cat_features = ['sex', 'marital_status', 'branch_code', 'occupation_code', 'occupation_category_code', 'product_pred'] model = CatBoostClassifier() model.fit(df_data.drop(['ID', 'target'], axis=1), df_data['target'], cat_features) preds_proba = model.predict_proba(df_data_test.drop(['ID',], axis=1)) df_answer = df_data_test[['ID', 'product_pred']] df_answer['target'] = preds_proba[:,1] df_answer['ID X PCODE'] = df_answer['ID'] + ' X ' + df_answer['product_pred'] df_answer.drop(['ID', 'product_pred'], axis=1, inplace=True) df_answer.rename(columns={'target':'Label'}, inplace=True) df_answer = submission[submission['ID X PCODE'].isin(list(set(list(submission['ID X PCODE'])) - set(list(df_answer['ID X PCODE']))))].append(df_answer) df_answer.reset_index(drop=True, inplace=True) df_answer.to_csv('submission2.csv', index=False) ```
github_jupyter
# Tutorial: Learn how to use Datasets in Azure ML In this tutorial, you learn how to use Azure ML Datasets to train a regression model with the Azure Machine Learning SDK for Python. You will * Explore and prepare data for training the model * Register the Dataset in your workspace to share it with others * Take snapshots of data to ensure models can be trained with the same data every time * Create and use multiple Dataset definitions to ensure that updates to the definition don't break existing pipelines/scripts In this tutorial, you: &#x2611; Setup a Python environment and import packages &#x2611; Load the Titanic data from your Azure Blob Storage. (The [original data](https://www.kaggle.com/c/titanic/data) can be found on Kaggle) &#x2611; Explore and cleanse the data to remove anomalies &#x2611; Register the Dataset in your workspace, allowing you to use it in model training &#x2611; Take a Dataset snapshot for repeatability and train a model with the snapshot &#x2611; Make changes to the dataset's definition without breaking the production model or the daily data pipeline ## Pre-requisites: Skip to Set up your development environment to read through the notebook steps, or use the instructions below to get the notebook and run it on Azure Notebooks or your own notebook server. To run the notebook you will need: A Python 3.6 notebook server with the following installed: * The Azure Machine Learning SDK for Python * The Azure Machine Learning Data Prep SDK for Python * The tutorial notebook Data and train.py script to store in your Azure Blob Storage Account. * [Titanic data](./train-dataset/Titanic.csv) * [train.py](./train-dataset/train.py) To create and register Datasets you need: * An Azure subscription. If you don’t have an Azure subscription, create a free account before you begin. Try the [free or paid version of Azure Machine Learning service](https://aka.ms/AMLFree) today. * An Azure Machine Learning service workspace. See the [Create an Azure Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/setup-create-workspace?branch=release-build-amls). * The Azure Machine Learning SDK for Python (version 1.0.21 or later). To install or update to the latest version of the SDK, see [Install or update the SDK](https://docs.microsoft.com/python/api/overview/azure/ml/install?view=azure-ml-py). For more information on how to set up your workspace, see the [Create an Azure Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/setup-create-workspace?branch=release-build-amls). The first part that needs to be done is setting up your python environment. You will need to import all of your python packages including `azureml.dataprep` and `azureml.core.dataset`. Then access your workspace through your Azure subscription and set up your compute target. ``` import azureml.dataprep as dprep import azureml.core import pandas as pd import logging import os import shutil from azureml.core import Workspace, Datastore, Dataset # Get existing workspace from config.json file in the same folder as the tutorial notebook # You can download the config file from your workspace workspace = Workspace.from_config() print("Workspace") print(workspace) print("Compute targets") print(workspace.compute_targets) # Get compute target that has already been attached to the workspace # Pick the right compute target from the list of computes attached to your workspace compute_target_name = 'dataset-test' remote_compute_target = workspace.compute_targets[compute_target_name] ``` To load data to your dataset, you will access the data through your datastore. After you create your dataset, you can use `get_profile()` to see your data's statistics. We will now upload the [original data](https://www.kaggle.com/c/titanic/data) to the default datastore(blob) within your workspace.. ``` datastore = workspace.get_default_datastore() datastore.upload_files(files=['./train-dataset/Titanic.csv'], target_path='train-dataset/', overwrite=True, show_progress=True) dataset = Dataset.auto_read_files(path=datastore.path('train-dataset/Titanic.csv')) #Display Dataset Profile of the Titanic Dataset dataset.get_profile() ``` To predict if a person survived the Titanic's sinking or not, the columns that are relevant to train the model are 'Survived','Pclass', 'Sex','SibSp', and 'Parch'. You can update your dataset's deinition and only keep these columns you will need. You will also need to convert values ("male","female") in the "Sex" column to 0 or 1, because the algorithm in the train.py file will be using numeric values instead of strings. For more examples of preparing data with Datasets, see [Explore and prepare data with the Dataset class](aka.ms/azureml/howto/exploreandpreparedata). ``` ds_def = dataset.get_definition() ds_def = ds_def.keep_columns(['Survived','Pclass', 'Sex','SibSp', 'Parch', 'Fare']) ds_def = ds_def.replace('Sex','male', 0) ds_def = ds_def.replace('Sex','female', 1) ds_def.head(5) ``` Once you have cleaned your data, you can register your dataset in your workspace. Registering your dataset allows you to easily have access to your processed data and share it with other people in your organization using the same workspace. It can be accessed in any notebook or script that is connected to your workspace. ``` dataset = dataset.update_definition(ds_def, 'Cleaned Data') dataset.generate_profile(compute_target='local').get_result() dataset_name = 'clean_Titanic_tutorial' dataset = dataset.register(workspace=workspace, name=dataset_name, description='training dataset', tags = {'year':'2019', 'month':'Apr'}, exist_ok=True) workspace.datasets ``` You can also take a snapshot of your dataset. This makes for easily reproducing your data as it is in that moment. Even if you changed the definition of your dataset, or have data that refreshes regularly, you can always go back to your snapshot to compare. Since this snapshot is being created on a compute in your workspace, it may take a signficant amount of time to provision the compute before running the action itself. ``` print(dataset.get_all_snapshots()) snapshot_name = 'train_snapshot' print("Compute target status") print(remote_compute_target.get_status().provisioning_state) snapshot = dataset.create_snapshot(snapshot_name=snapshot_name, compute_target=remote_compute_target, create_data_snapshot=True) snapshot.wait_for_completion() ``` Now that you have registered your dataset and created a snapshot, you can call up the dataset and it's snapshot to use it in your train.py script. The following code snippit will train your model locally using the train.py script. ``` from azureml.core import Experiment, RunConfiguration experiment_name = 'training-datasets' experiment = Experiment(workspace = workspace, name = experiment_name) project_folder = './train-dataset/' # create a new RunConfig object run_config = RunConfiguration() run_config.environment.python.user_managed_dependencies = True from azureml.core import Run from azureml.core import ScriptRunConfig src = ScriptRunConfig(source_directory=project_folder, script='train.py', run_config=run_config) run = experiment.submit(config=src) run.wait_for_completion(show_output=True) ``` You can also use the same script with your dataset snapshot for your Pipeline's Python Script Step. ``` from azureml.pipeline.core import Pipeline, PipelineData from azureml.pipeline.steps import PythonScriptStep from azureml.data.data_reference import DataReference trainStep = PythonScriptStep(script_name="train.py", compute_target=remote_compute_target, source_directory=project_folder) pipeline = Pipeline(workspace=workspace, steps=trainStep) pipeline_run = experiment.submit(pipeline) pipeline_run.wait_for_completion() ``` During any point of your workflow, you can get a previous snapshot of your dataset and use that version in your pipeline to quickly see how different versions of your data can effect your model. ``` snapshot = dataset.get_snapshot(snapshot_name=snapshot_name) snapshot.to_pandas_dataframe().head(5) ``` You can make changes to the dataset's definition without breaking the production model or the daily data pipeline. You can call get_definitions to see that there are several versions. After each change to a dataset's version, another one is added. ``` dataset.get_definitions() dataset = Dataset.get(workspace=workspace, name=dataset_name) ds_def = dataset.get_definition() ds_def = ds_def.drop_columns(['Fare']) dataset = dataset.update_definition(ds_def, 'Dropping Fare as PClass and Fare are strongly correlated') dataset.generate_profile(compute_target='local').get_result() ``` Dataset definitions can be deprecated when usage is no longer recommended and a replacement is available. When a deprecated dataset definition is used in an AML Experimentation/Pipeline scenario, a warning message gets returned but execution will not be blocked. ``` # Deprecate dataset definition 1 by the 2nd definition ds_def = dataset.get_definition('1') ds_def.deprecate(deprecate_by_dataset_id=dataset._id, deprecated_by_definition_version='2') dataset.get_definitions() ``` Dataset definitions can be archived when definitions are not supposed to be used for any reasons (such as underlying data no longer available). When an archived dataset definition is used in an AML Experimentation/Pipeline scenario, execution will be blocked with error. No further actions can be performed on archived Dataset definitions, but the references will be kept intact. ``` # Archive the deprecated dataset definition #1 ds_def = dataset.get_definition('1') ds_def.archive() dataset.get_definitions() ``` You can also reactivate any defition that you archived for later use. ``` ds_def = dataset.get_definition('1') ds_def.reactivate() dataset.get_definitions() ``` Now delete the current snapshot name to clean up your resource's space. ``` dataset.delete_snapshot(snapshot_name) ``` You have now finished using a dataset from start to finish of your experiment!
github_jupyter
# Image classification training with image format demo 1. [Introduction](#Introduction) 2. [Prerequisites and Preprocessing](#Prequisites-and-Preprocessing) 1. [Permissions and environment variables](#Permissions-and-environment-variables) 2. [Prepare the data](#Prepare-the-data) 3. [Fine-tuning The Image Classification Model](#Fine-tuning-the-Image-classification-model) 1. [Training parameters](#Training-parameters) 2. [Start the training](#Start-the-training) 4. [Inference](#Inference) ## Introduction Welcome to our end-to-end example of the image classification algorithm training with image format. In this demo, we will use the Amazon sagemaker image classification algorithm in transfer learning mode to fine-tune a pre-trained model (trained on imagenet data) to learn to classify a new dataset. In particular, the pre-trained model will be fine-tuned using [caltech-256 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech256/). To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. ## Prequisites and Preprocessing This notebook uses *MXNet GPU Optimized* kernel in SageMaker Studio. ### Permissions and environment variables Here we set up the linkage and authentication to AWS services. There are three parts to this: * The roles used to give learning and hosting access to your data. This will automatically be obtained from the role used to start the notebook * The S3 bucket that you want to use for training and model data * The Amazon sagemaker image classification docker image which need not be changed ``` %%time import sagemaker from sagemaker import get_execution_role role = get_execution_role() print(role) sess = sagemaker.Session() bucket=sess.default_bucket() prefix = 'ic-lstformat' from sagemaker.amazon.amazon_estimator import get_image_uri training_image = get_image_uri(sess.boto_region_name, 'image-classification', repo_version="latest") print (training_image) ``` ### Prepare the data The caltech 256 dataset consist of images from 257 categories (the last one being a clutter category) and has 30k images with a minimum of 80 images and a maximum of about 800 images per category. The image classification algorithm can take two types of input formats. The first is a [RecordIO format](https://mxnet.incubator.apache.org/tutorials/basic/record_io.html) (content type: application/x-recordio) and the other is a [lst format](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec) (content type: application/x-image). Files for both these formats are available at http://data.dmlc.ml/mxnet/data/caltech-256/. In this example, we will use the lst format for training and use the training/validation split [specified here](http://data.dmlc.ml/mxnet/data/caltech-256/). ``` import os import urllib.request def download(url): filename = url.split("/")[-1] if not os.path.exists(filename): urllib.request.urlretrieve(url, filename) # Caltech-256 image files download('http://www.vision.caltech.edu/Image_Datasets/Caltech256/256_ObjectCategories.tar') !tar -xf 256_ObjectCategories.tar --no-same-owner # Tool for creating lst file download('https://raw.githubusercontent.com/apache/incubator-mxnet/master/tools/im2rec.py') !ls -lha !pwd !pip install opencv-python %%bash mkdir -p caltech_256_train_60 for i in 256_ObjectCategories/*; do c=`basename $i` mkdir -p caltech_256_train_60/$c for j in `ls $i/*.jpg | shuf | head -n 60`; do mv $j caltech_256_train_60/$c/ done done python im2rec.py --list --recursive caltech-256-60-train caltech_256_train_60/ python im2rec.py --list --recursive caltech-256-60-val 256_ObjectCategories/ ``` A .lst file is a tab-separated file with three columns that contains a list of image files. The first column specifies the image index, the second column specifies the class label index for the image, and the third column specifies the relative path of the image file. The image index in the first column should be unique across all of the images. Here we make an image list file using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool from MXNet. You can also create the .lst file in your own way. An example of .lst file is shown as follows. ``` !head -n 3 ./caltech-256-60-train.lst > example.lst f = open('example.lst','r') lst_content = f.read() print(lst_content) ``` When you are bringing your own image files to train, please ensure that the .lst file follows the same format as described above. In order to train with the lst format interface, passing the lst file for both training and validation in the appropriate format is mandatory. Once we have the data available in the correct format for training, the next step is to upload the image and .lst file to S3 bucket. ``` # Four channels: train, validation, train_lst, and validation_lst s3train = 's3://{}/{}/train/'.format(bucket, prefix) s3validation = 's3://{}/{}/validation/'.format(bucket, prefix) s3train_lst = 's3://{}/{}/train_lst/'.format(bucket, prefix) s3validation_lst = 's3://{}/{}/validation_lst/'.format(bucket, prefix) # upload the image files to train and validation channels !aws s3 cp caltech_256_train_60 $s3train --recursive --quiet !aws s3 cp 256_ObjectCategories $s3validation --recursive --quiet # upload the lst files to train_lst and validation_lst channels !aws s3 cp caltech-256-60-train.lst $s3train_lst --quiet !aws s3 cp caltech-256-60-val.lst $s3validation_lst --quiet ``` Now we have all the data stored in S3 bucket. The image and lst files will be converted to RecordIO file internelly by the image classification algorithm. But if you want do the conversion, the following cell shows how to do it using the [im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) tool. Note that this is just an example of creating RecordIO files. We are **_not_** using them for training in this notebook. More details on creating RecordIO files can be found in this [tutorial](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec). ``` %%bash python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-val 256_ObjectCategories/ python im2rec.py --resize 256 --quality 90 --num-thread 16 caltech-256-60-train caltech_256_train_60/ ``` After you created the RecordIO files, you can upload them to the train and validation channels for training. To train with RecordIO format, you can follow "[Image-classification-fulltraining.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-fulltraining.ipynb)" and "[Image-classification-transfer-learning.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-transfer-learning.ipynb)". Again, we will **_not_** use the RecordIO file for the training. The following sections will only show you how to train a model with images and list files. Before training the model, we need to setup the training parameters. The next section will explain the parameters in detail. ## Fine-tuning the Image Classification Model Now that we are done with all the setup that is needed, we are ready to train our object detector. To begin, let us create a ``sageMaker.estimator.Estimator`` object. This estimator will launch the training job. ### Training parameters There are two kinds of parameters that need to be set for training. The first one are the parameters for the training job. These include: * **Training instance count**: This is the number of instances on which to run the training. When the number of instances is greater than one, then the image classification algorithm will run in distributed settings. * **Training instance type**: This indicates the type of machine on which to run the training. Typically, we use GPU instances for these training * **Output path**: This the s3 folder in which the training output is stored ``` s3_output_location = 's3://{}/{}/output'.format(bucket, prefix) ic = sagemaker.estimator.Estimator(training_image, role, train_instance_count=1, train_instance_type='ml.p2.xlarge', train_volume_size = 50, train_max_run = 360000, input_mode= 'File', output_path=s3_output_location, sagemaker_session=sess) ``` Apart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These are: * **num_layers**: The number of layers (depth) for the network. We use 18 in this samples but other values such as 50, 152 can be used. * **use_pretrained_model**: Set to 1 to use pretrained model for transfer learning. * **image_shape**: The input image dimensions,'num_channels, height, width', for the network. It should be no larger than the actual image size. The number of channels should be same as the actual image. * **num_classes**: This is the number of output classes for the new dataset. Imagenet was trained with 1000 output classes but the number of output classes can be changed for fine-tuning. For caltech, we use 257 because it has 256 object categories + 1 clutter class. * **num_training_samples**: This is the total number of training samples. It is set to 15240 for caltech dataset with the current split. * **mini_batch_size**: The number of training samples used for each mini batch. In distributed training, the number of training samples used per batch will be N * mini_batch_size where N is the number of hosts on which training is run. * **epochs**: Number of training epochs. * **learning_rate**: Learning rate for training. * **top_k**: Report the top-k accuracy during training. * **resize**: Resize the image before using it for training. The images are resized so that the shortest side is of this parameter. If the parameter is not set, then the training data is used as such without resizing. * **precision_dtype**: Training datatype precision (default: float32). If set to 'float16', the training will be done in mixed_precision mode and will be faster than float32 mode ``` ic.set_hyperparameters(num_layers=18, use_pretrained_model=1, image_shape = "3,224,224", num_classes=257, mini_batch_size=128, epochs=2, learning_rate=0.01, top_k=2, num_training_samples=15420, resize = 256, precision_dtype='float32') ``` ## Input data specification Set the data type and channels used for training ``` train_data = sagemaker.session.s3_input(s3train, distribution='FullyReplicated', content_type='application/x-image', s3_data_type='S3Prefix') validation_data = sagemaker.session.s3_input(s3validation, distribution='FullyReplicated', content_type='application/x-image', s3_data_type='S3Prefix') train_data_lst = sagemaker.session.s3_input(s3train_lst, distribution='FullyReplicated', content_type='application/x-image', s3_data_type='S3Prefix') validation_data_lst = sagemaker.session.s3_input(s3validation_lst, distribution='FullyReplicated', content_type='application/x-image', s3_data_type='S3Prefix') data_channels = {'train': train_data, 'validation': validation_data, 'train_lst': train_data_lst, 'validation_lst': validation_data_lst} ``` ## Start the training Start training by calling the fit method in the estimator ``` ic.fit(inputs=data_channels, logs=True) ``` # Inference *** A trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means predicting the topic mixture representing a given document. You can deploy the created model by using the deploy method in the estimator ``` ic_classifier = ic.deploy(initial_instance_count = 1, instance_type = 'ml.m4.xlarge') ``` #### Download test image ``` !wget -O /tmp/test.jpg http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/008.bathtub/008_0007.jpg file_name = '/tmp/test.jpg' # test image from IPython.display import Image Image(file_name) import json import numpy as np with open(file_name, 'rb') as f: payload = f.read() payload = bytearray(payload) ic_classifier.content_type = 'application/x-image' result = json.loads(ic_classifier.predict(payload)) # the result will output the probabilities for all classes # find the class with maximum probability and print the class index index = np.argmax(result) object_categories = ['ak47', 'american-flag', 'backpack', 'baseball-bat', 'baseball-glove', 'basketball-hoop', 'bat', 'bathtub', 'bear', 'beer-mug', 'billiards', 'binoculars', 'birdbath', 'blimp', 'bonsai-101', 'boom-box', 'bowling-ball', 'bowling-pin', 'boxing-glove', 'brain-101', 'breadmaker', 'buddha-101', 'bulldozer', 'butterfly', 'cactus', 'cake', 'calculator', 'camel', 'cannon', 'canoe', 'car-tire', 'cartman', 'cd', 'centipede', 'cereal-box', 'chandelier-101', 'chess-board', 'chimp', 'chopsticks', 'cockroach', 'coffee-mug', 'coffin', 'coin', 'comet', 'computer-keyboard', 'computer-monitor', 'computer-mouse', 'conch', 'cormorant', 'covered-wagon', 'cowboy-hat', 'crab-101', 'desk-globe', 'diamond-ring', 'dice', 'dog', 'dolphin-101', 'doorknob', 'drinking-straw', 'duck', 'dumb-bell', 'eiffel-tower', 'electric-guitar-101', 'elephant-101', 'elk', 'ewer-101', 'eyeglasses', 'fern', 'fighter-jet', 'fire-extinguisher', 'fire-hydrant', 'fire-truck', 'fireworks', 'flashlight', 'floppy-disk', 'football-helmet', 'french-horn', 'fried-egg', 'frisbee', 'frog', 'frying-pan', 'galaxy', 'gas-pump', 'giraffe', 'goat', 'golden-gate-bridge', 'goldfish', 'golf-ball', 'goose', 'gorilla', 'grand-piano-101', 'grapes', 'grasshopper', 'guitar-pick', 'hamburger', 'hammock', 'harmonica', 'harp', 'harpsichord', 'hawksbill-101', 'head-phones', 'helicopter-101', 'hibiscus', 'homer-simpson', 'horse', 'horseshoe-crab', 'hot-air-balloon', 'hot-dog', 'hot-tub', 'hourglass', 'house-fly', 'human-skeleton', 'hummingbird', 'ibis-101', 'ice-cream-cone', 'iguana', 'ipod', 'iris', 'jesus-christ', 'joy-stick', 'kangaroo-101', 'kayak', 'ketch-101', 'killer-whale', 'knife', 'ladder', 'laptop-101', 'lathe', 'leopards-101', 'license-plate', 'lightbulb', 'light-house', 'lightning', 'llama-101', 'mailbox', 'mandolin', 'mars', 'mattress', 'megaphone', 'menorah-101', 'microscope', 'microwave', 'minaret', 'minotaur', 'motorbikes-101', 'mountain-bike', 'mushroom', 'mussels', 'necktie', 'octopus', 'ostrich', 'owl', 'palm-pilot', 'palm-tree', 'paperclip', 'paper-shredder', 'pci-card', 'penguin', 'people', 'pez-dispenser', 'photocopier', 'picnic-table', 'playing-card', 'porcupine', 'pram', 'praying-mantis', 'pyramid', 'raccoon', 'radio-telescope', 'rainbow', 'refrigerator', 'revolver-101', 'rifle', 'rotary-phone', 'roulette-wheel', 'saddle', 'saturn', 'school-bus', 'scorpion-101', 'screwdriver', 'segway', 'self-propelled-lawn-mower', 'sextant', 'sheet-music', 'skateboard', 'skunk', 'skyscraper', 'smokestack', 'snail', 'snake', 'sneaker', 'snowmobile', 'soccer-ball', 'socks', 'soda-can', 'spaghetti', 'speed-boat', 'spider', 'spoon', 'stained-glass', 'starfish-101', 'steering-wheel', 'stirrups', 'sunflower-101', 'superman', 'sushi', 'swan', 'swiss-army-knife', 'sword', 'syringe', 'tambourine', 'teapot', 'teddy-bear', 'teepee', 'telephone-box', 'tennis-ball', 'tennis-court', 'tennis-racket', 'theodolite', 'toaster', 'tomato', 'tombstone', 'top-hat', 'touring-bike', 'tower-pisa', 'traffic-light', 'treadmill', 'triceratops', 'tricycle', 'trilobite-101', 'tripod', 't-shirt', 'tuning-fork', 'tweezer', 'umbrella-101', 'unicorn', 'vcr', 'video-projector', 'washing-machine', 'watch-101', 'waterfall', 'watermelon', 'welding-mask', 'wheelbarrow', 'windmill', 'wine-bottle', 'xylophone', 'yarmulke', 'yo-yo', 'zebra', 'airplanes-101', 'car-side-101', 'faces-easy-101', 'greyhound', 'tennis-shoes', 'toad', 'clutter'] print("Result: label - " + object_categories[index] + ", probability - " + str(result[index])) ``` #### Clean up When we're done with the endpoint, we can just delete it and the backing instances will be released. ``` ic_classifier.delete_endpoint() ```
github_jupyter
# Silver per m2 Calculations This journal documents the calculations and assumptions for the silver baseline file used in the calculator. ``` import numpy as np import pandas as pd import os,sys import matplotlib.pyplot as plt plt.rcParams.update({'font.size': 22}) plt.rcParams['figure.figsize'] = (12, 8) density_Ag = 10.49 #g/cm3, source Wikipedia ``` ### Pre-Journal Calculations From the ITRPVs, we have grams of Ag per cell from 2009 through 2019, with projections through 2030. Data for silver per cell for 4 different types of cell were extracted from ITRPV graphs with "webplotdigitizer" then rounded to ~2 significant figures. The 4 types of cell noted in ITRPV 2019 and 2020 are Monofacial p-type, Bifacial p-type, HJT n-type, and n-type. Some mathmatical assumptions: 1) n-type cells account for only 5% of the world market share and have for the last decade. While the amount of silver in the two different n-type cells is noteably different, because their marketshare is so small, these two n-type cell silver quantities were averaged together. 2) The difference in silver per cell between bifacial and monofacial cells is not significant, and were therefore averaged together. Therefore the process for determining the average silver per cell across the different technologies was: average silver per cell = 0.95*(average of monofacial and bifacial p-type) + 0.05*(average of n-type) This math was completed in the google spreadsheet of raw data <https://docs.google.com/spreadsheets/d/1WV54lNAdA2uP6a0g5wMOOE9bu8nbwvnQDgLj3GuGojE/edit?usp=sharing> then copied to a csv and is uploaded here. ``` #read in the csv of 2009 through 2030 data for silver per cell. cwd = os.getcwd() #grabs current working directory skipcols = ['Source'] itrpv_ag_gpc = pd.read_csv(cwd+"/../../PV_ICE/baselines/SupportingMaterial/ag_g_per_cell.csv", index_col='Year', usecols=lambda x: x not in skipcols) itrpv_ag_gpc #plot the raw data plt.plot(itrpv_ag_gpc, marker="o") plt.title("Silver mass per cell over time") plt.ylabel("Silver, grams/cell") ``` Based on looking at the plot of original data, it doesn't seem crazy to linearly interpolate for missing data ``` ag_gpc = itrpv_ag_gpc.interpolate() plt.plot(ag_gpc, marker="o") plt.title("Silver mass per cell over time") plt.ylabel("Silver, grams/cell") ``` ## Convert to a per module area basis (not per cell) ``` #import cell per m2 from the silicon baseline cpm2 = pd.read_csv(cwd+"/../../PV_ICE/baselines/SupportingMaterial/output_cell_per_m2.csv", index_col='Year', usecols=lambda x: x not in skipcols) #print(cpm2) #convert silver per cell to silver per m^2 of module, based on output from silicon baseline ag_gpc.columns = cpm2.columns = ['ag_g_per_m2'] #rename to a common name ag_gpm2 = ag_gpc.mul(cpm2, 'columns') #multiply plt.plot(ag_gpm2) plt.title("Silver mass per module m2 over time") plt.ylabel("Silver, grams/module m2") ``` ### Extend projection through 2050 It appears that the silver per cell is expected to level out by 2025 or so. We will extend 2030 values through 2050 as a "lower limit" or minimal further improvement. ``` #create an empty df as a place holder yrs = pd.Series(index=range(2031,2050), dtype='float64') tempdf = pd.DataFrame(yrs, columns=['ag_g_per_m2']) fulldf = pd.concat([ag_gpm2,tempdf]) #attach it to rest of df #set the 2050 value to the same as 2030 fulldf.loc[2050] = fulldf.loc[2030] #interpolate for missing values ag_gpm2_full = fulldf.interpolate() #print(ag_gpm2_full) #plot plt.plot(ag_gpm2_full) plt.title("Silver mass per module area over time") plt.ylabel("Silver, grams/module m2") #print out to csv ag_gpm2_full.to_csv(cwd+'/../../PV_ICE/baselines/SupportingMaterial/output_ag_g_per_m2.csv', index=True) ```
github_jupyter
# Image Captioning ## Part 2: Train a CNN-RNN Model --- In this notebook, we will train our CNN-RNN model. - [Step 1](#step1): Training Setup - [1a](#1a): CNN-RNN architecture - [1b](#1b): Hyperparameters and other variables - [1c](#1c): Image transform - [1d](#1d): Data loader - [1e](#1e): Loss function, learnable parameters and optimizer - [Step 2](#step2): Train and Validate the Model - [2a](#2a): Train for the first time - [2b](#2b): Resume training - [2c](#2c): Validation - [2d](#2d): Notes regarding model validation <a id='step1'></a> ## Step 1: Training Setup We will describe the model architecture and specify hyperparameters and set other options that are important to the training procedure. We will refer to [this paper](https://arxiv.org/pdf/1502.03044.pdf) and [this paper](https://arxiv.org/pdf/1411.4555.pdf) for useful guidance. <a id='1a'></a> ### CNN-RNN architecture For the complete CNN-RNN model, see **model.py**. - For the encoder model, we use a pre-trained ResNet which has been known to achieve great success in image classification. We use batch normalization because according to [this paper](https://arxiv.org/abs/1502.03167) it "allows us to use much higher learning rates and be less careful about initialization. It also acts as a regularizer, in some cases eliminating the need for Dropout." - The decoder is an RNN which has an Embedding layer, a LSTM layer and a fully-connected layer. LSTM has been shown to be successful in sequence generation. <a id='1b'></a> ### Hyperparameters and other variables In the next code cell, we will set the values for: - `batch_size` - the batch size of each training batch. It is the number of image-caption pairs used to amend the model weights in each training step. We will set it to `32`. - `vocab_threshold` - the minimum word count threshold. A larger threshold will result in a smaller vocabulary, whereas a smaller threshold will include rarer words and result in a larger vocabulary. We will set it to `5` just like [this paper](https://arxiv.org/pdf/1411.4555.pdf) - `vocab_from_file` - a Boolean that decides whether to load the vocabulary from file. This will be changed to `True` once we are done setting `vocab_threshold` and generating a `vocab.pkl` file. - `embed_size` - the dimensionality of the image and word embeddings. We have tried `512` as done in [this paper](https://arxiv.org/pdf/1411.4555.pdf) but it took a long time to train, so I will set it to `256`. - `hidden_size` - the number of features in the hidden state of the RNN decoder. We will use `512` based on [this paper](https://arxiv.org/pdf/1411.4555.pdf). The larger the number, the better the RNN model can memorize sequences. However, larger numbers can significantly slow down the training process. - `num_epochs` - the number of epochs to train the model. We are dealing with a huge amount of data so it will take a long time to complete even 1 epoch. Therefore, we will set `num_epochs` to `1`. We will save the model AND the optimizer every 100 training steps, and to resume training from the last step. ``` # Watch for any changes in vocabulary.py, data_loader.py, utils.py or model.py, and re-load it automatically. %load_ext autoreload %autoreload 2 import torch import torch.nn as nn from torch.autograd import Variable from torchvision import transforms import sys from pycocotools.coco import COCO import math import torch.utils.data as data import numpy as np import os import requests import time from utils import train, validate, save_epoch, early_stopping from data_loader import get_loader from model import EncoderCNN, DecoderRNN # Set values for the training variables batch_size = 32 # batch size vocab_threshold = 5 # minimum word count threshold vocab_from_file = True # if True, load existing vocab file embed_size = 256 # dimensionality of image and word embeddings hidden_size = 512 # number of features in hidden state of the RNN decoder num_epochs = 10 # number of training epochs ``` <a id='1c'></a> ### Image transform When setting this transform, we keep two things in mind: - the images in the dataset have varying heights and widths, and - since we are using a pre-trained model, we must perform the corresponding appropriate normalization. **Training set**: As seen in the following code cell, we will set the transform for training set as follows: ```python transform_train = transforms.Compose([ transforms.Resize(256), # smaller edge of image resized to 256 transforms.RandomCrop(224), # get 224x224 crop from random location transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5 transforms.ToTensor(), # convert the PIL Image to a tensor transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model (0.229, 0.224, 0.225))]) ``` According to [this page](https://pytorch.org/docs/master/torchvision/models.html), like other pre-trained models, ResNet expects input images normalized as follows: - The images are expected to have width and height of at least 224. The first and second transformations resize and crop the images to 224 x 224: ```python transforms.Resize(256), # smaller edge of image resized to 256 transforms.RandomCrop(224), # get 224x224 crop from random location ``` - The images have to be converted from numpy.ndarray (H x W x C) in the range [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]: ```python transforms.ToTensor(), # convert the PIL Image to a tensor ``` - Then they are normalized using mean = [0.485, 0.456, 0.406] and std = [0.229, 0.224, 0.225]. This is achieved using the last transformation step: ```python transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model (0.229, 0.224, 0.225)) ``` The data augmentation step `transforms.RandomHorizontalFlip()` improves the accuracy of the image classification task as mentioned in [this paper](http://cs231n.stanford.edu/reports/2017/pdfs/300.pdf). **Validation set**: We won't use the image augmentation step, i.e. RandomHorizontalFlip(), and will use CenterCrop() instead of RandomCrop(). ``` # Define a transform to pre-process the training images transform_train = transforms.Compose([ transforms.Resize(256), # smaller edge of image resized to 256 transforms.RandomCrop(224), # get 224x224 crop from random location transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5 transforms.ToTensor(), # convert the PIL Image to a tensor transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model (0.229, 0.224, 0.225))]) # Define a transform to pre-process the validation images transform_val = transforms.Compose([ transforms.Resize(256), # smaller edge of image resized to 256 transforms.CenterCrop(224), # get 224x224 crop from the center transforms.ToTensor(), # convert the PIL Image to a tensor transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model (0.229, 0.224, 0.225))]) ``` <a id='1d'></a> ### Data loader We will build data loaders for training and validation sets, applying the above image transforms. We will then get the size of the vocabulary from the `train_loader`, and use it to initialize our `encoder` and `decoder`. ``` # Build data loader, applying the transforms train_loader = get_loader(transform=transform_train, mode='train', batch_size=batch_size, vocab_threshold=vocab_threshold, vocab_from_file=vocab_from_file) val_loader = get_loader(transform=transform_val, mode='val', batch_size=batch_size, vocab_threshold=vocab_threshold, vocab_from_file=vocab_from_file) # The size of the vocabulary vocab_size = len(train_loader.dataset.vocab) # Initialize the encoder and decoder encoder = EncoderCNN(embed_size) decoder = DecoderRNN(embed_size, hidden_size, vocab_size) # Move models to GPU if CUDA is available if torch.cuda.is_available(): encoder.cuda() decoder.cuda() ``` <a id='1e'></a> ### Loss function, learnable parameters and optimizer **Loss function**: We will use `CrossEntropyLoss()`. **Learnable parameters**: According to [this paper](https://arxiv.org/pdf/1411.4555.pdf), the "loss is minimized w.r.t. all the parameters of the LSTM, the top layer of the image embedder CNN and word embeddings." We will follow this strategy and choose the parameters accordingly. Since we also added a Batch Normalization layer, we will optimize its parameters too. This makes sense for two reasons: - the EncoderCNN in this project uses ResNet which has been pre-trained on an image classification task. So we don't have to optimize the parameters of the entire network again for a similar image classification task. We only need to optimize the top layer whose outputs are fed into the DecoderRNN. - the DecoderRNN is not a pre-trained network, so we have to optimize all its parameters. **Optimizer**: According to [this paper](https://arxiv.org/pdf/1502.03044.pdf), Adam optimizer works best on the MS COCO Dataset. Therefore, we will use it. ``` # Define the loss function criterion = nn.CrossEntropyLoss().cuda() if torch.cuda.is_available() else nn.CrossEntropyLoss() # Specify the learnable parameters of the model params = list(decoder.parameters()) + list(encoder.embed.parameters()) + list(encoder.bn.parameters()) # Define the optimizer optimizer = torch.optim.Adam(params=params, lr=0.001) ``` <a id='step2'></a> ## Step 2: Train and Validate the Model At the beginning of this notebook, we have imported the `train` fuction and the `validate` function from `utils.py`. To figure out how well our model is doing, we will print out the training loss and perplexity during training. We will try to minimize overfitting by assessing the model's performance, i.e. the Bleu-4 score, on the validation dataset. It will take a long time to train and validate the model. Therefore we will split the training procedure into two parts: first, we will train the model for the first time and save the it every 100 steps; then we will resume, as many times as we would like or until the early stopping criterion is satisfied. We will save the model and optimizer weights in the `models` subdirectory. We will do the same for the validation procedure. First, let's calculate the total number of training and validation steps per epoch. ``` # Set the total number of training and validation steps per epoch total_train_step = math.ceil(len(train_loader.dataset.caption_lengths) / train_loader.batch_sampler.batch_size) total_val_step = math.ceil(len(val_loader.dataset.caption_lengths) / val_loader.batch_sampler.batch_size) print ("Number of training steps:", total_train_step) print ("Number of validation steps:", total_val_step) ``` <a id='2a'></a> ### Train for the first time Run the below cell if training for the first time or training continously without break. To resume training, skip this cell and run the one below it. ``` # Keep track of train and validation losses and validation Bleu-4 scores by epoch train_losses = [] val_losses = [] val_bleus = [] # Keep track of the current best validation Bleu score best_val_bleu = float("-INF") start_time = time.time() for epoch in range(1, num_epochs + 1): train_loss = train(train_loader, encoder, decoder, criterion, optimizer, vocab_size, epoch, total_train_step) train_losses.append(train_loss) val_loss, val_bleu = validate(val_loader, encoder, decoder, criterion, train_loader.dataset.vocab, epoch, total_val_step) val_losses.append(val_loss) val_bleus.append(val_bleu) if val_bleu > best_val_bleu: print ("Validation Bleu-4 improved from {:0.4f} to {:0.4f}, saving model to best-model.pkl". format(best_val_bleu, val_bleu)) best_val_bleu = val_bleu filename = os.path.join("./models", "best-model.pkl") save_epoch(filename, encoder, decoder, optimizer, train_losses, val_losses, val_bleu, val_bleus, epoch) else: print ("Validation Bleu-4 did not improve, saving model to model-{}.pkl".format(epoch)) # Save the entire model anyway, regardless of being the best model so far or not filename = os.path.join("./models", "model-{}.pkl".format(epoch)) save_epoch(filename, encoder, decoder, optimizer, train_losses, val_losses, val_bleu, val_bleus, epoch) print ("Epoch [%d/%d] took %ds" % (epoch, num_epochs, time.time() - start_time)) if epoch > 5: # Stop if the validation Bleu doesn't improve for 3 epochs if early_stopping(val_bleus, 3): break start_time = time.time() ``` <a id='2b'></a> ### Resume training Resume training if having trained and saved the model. There are two types of data loading for training depending on where we are in the process: 1. We will load a model from the latest training step if we are in the middle of the process and have previously saved a model, e.g. train-model-14000.pkl which means model was saved for epoch 1 at training step 4000. 2. We will load a model saved by the below validation process after completing validating one epoch. This is when we start to train the next epoch. Therefore, we need to reset `start_loss` and `start_step` to 0.0 and 1 respectively. We will modify the code cell below depending on where we are in the training process. ``` # Load the last checkpoints checkpoint = torch.load(os.path.join('./models', 'train-model-76500.pkl')) # Load the pre-trained weights encoder.load_state_dict(checkpoint['encoder']) decoder.load_state_dict(checkpoint['decoder']) optimizer.load_state_dict(checkpoint['optimizer']) # Load start_loss from checkpoint if in the middle of training process; otherwise, comment it out start_loss = checkpoint['total_loss'] # Reset start_loss to 0.0 if starting a new epoch; otherwise comment it out #start_loss = 0.0 # Load epoch. Add 1 if we start a new epoch epoch = checkpoint['epoch'] # Load start_step from checkpoint if in the middle of training process; otherwise, comment it out start_step = checkpoint['train_step'] + 1 # Reset start_step to 1 if starting a new epoch; otherwise comment it out #start_step = 1 # Train 1 epoch at a time due to very long training time train_loss = train(train_loader, encoder, decoder, criterion, optimizer, vocab_size, epoch, total_train_step, start_step, start_loss) ``` Now that we have completed training an entire epoch, we will save the necessary information. We will load pre-trained weights from the last train step `train-model-{epoch}12900.pkl`, `best_val_bleu` from `best-model.pkl` and the rest from `model-{epoch}.pkl`). We will append `train_loss` to the list `train_losses`. Then we will save the information needed for the epoch. ``` # Load checkpoints train_checkpoint = torch.load(os.path.join('./models', 'train-model-712900.pkl')) epoch_checkpoint = torch.load(os.path.join('./models', 'model-6.pkl')) best_checkpoint = torch.load(os.path.join('./models', 'best-model.pkl')) # Load the pre-trained weights and epoch from the last train step encoder.load_state_dict(train_checkpoint['encoder']) decoder.load_state_dict(train_checkpoint['decoder']) optimizer.load_state_dict(train_checkpoint['optimizer']) epoch = train_checkpoint['epoch'] # Load from the previous epoch train_losses = epoch_checkpoint['train_losses'] val_losses = epoch_checkpoint['val_losses'] val_bleus = epoch_checkpoint['val_bleus'] # Load from the best model best_val_bleu = best_checkpoint['val_bleu'] train_losses.append(train_loss) print (train_losses, val_losses, val_bleus, best_val_bleu) print ("Training completed for epoch {}, saving model to train-model-{}.pkl".format(epoch, epoch)) filename = os.path.join("./models", "train-model-{}.pkl".format(epoch)) save_epoch(filename, encoder, decoder, optimizer, train_losses, val_losses, best_val_bleu, val_bleus, epoch) ``` <a id='2c'></a> ### Validation We will do validation for an epoch once we have trained and saved the model for that epoch. There are two types of data loading for validation depending on where we are in the process: 1. We will load a model from the latest validation step if we are in the middle of the process and have previously saved a model, e.g. val-model-14000.pkl which means the model was saved for epoch 1 at val step 4000. 2. We will load a model saved by the above training process after completing training one epoch. This is when we just start to do validation, i.e. at validation step \#1. Therefore, we need to reset `start_loss`, `start_bleu` and `start_step` to 0.0, 0.0 and 1 respectively. We will modify the code cell below depending on where we are in the validation process. ``` # Load the last checkpoint checkpoint = torch.load(os.path.join('./models', 'val-model-75500.pkl')) # Load the pre-trained weights encoder.load_state_dict(checkpoint['encoder']) decoder.load_state_dict(checkpoint['decoder']) # Load these from checkpoint if in the middle of validation process; otherwise, comment them out start_loss = checkpoint['total_loss'] start_bleu = checkpoint['total_bleu_4'] # Reset these to 0.0 if starting validation for an epoch; otherwise comment them out #start_loss = 0.0 #start_bleu = 0.0 # Load epoch epoch = checkpoint['epoch'] # Load start_step from checkpoint if in the middle of training process; otherwise, comment it out start_step = checkpoint['val_step'] + 1 # Reset start_step to 1 if starting a new epoch; otherwise comment it out #start_step = 1 # Validate 1 epoch at a time due to very long validation time val_loss, val_bleu = validate(val_loader, encoder, decoder, criterion, train_loader.dataset.vocab, epoch, total_val_step, start_step, start_loss, start_bleu) ``` Now that we have completed training and validation for an entire epoch, we will save all the necessary information. We will load most information from `train-model-{epoch}.pkl` and `best_val_bleu` from `best-model.pkl`. We will then do the following updates: - appending `val_bleu` and `val_loss` to the lists `val_bleus` and `val_losses` respectively - updating `best_val_bleu` if it is not as good as `val_bleu` we just got in the above cell Then we will save the information needed for the epoch. ``` # Load checkpoints checkpoint = torch.load(os.path.join('./models', 'train-model-7.pkl')) best_checkpoint = torch.load(os.path.join('./models', 'best-model.pkl')) # Load the pre-trained weights encoder.load_state_dict(checkpoint['encoder']) decoder.load_state_dict(checkpoint['decoder']) optimizer.load_state_dict(checkpoint['optimizer']) # Load train and validation losses and validation Bleu-4 scores train_losses = checkpoint['train_losses'] val_losses = checkpoint['val_losses'] val_bleus = checkpoint['val_bleus'] best_val_bleu = best_checkpoint['val_bleu'] # Load epoch epoch = checkpoint['epoch'] val_losses.append(val_loss) val_bleus.append(val_bleu) print (train_losses, val_losses, val_bleus, best_val_bleu) if val_bleu > best_val_bleu: print ("Validation Bleu-4 improved from {:0.4f} to {:0.4f}, saving model to best-model.pkl". format(best_val_bleu, val_bleu)) best_val_bleu = val_bleu print (best_val_bleu) filename = os.path.join("./models", "best-model.pkl") save_epoch(filename, encoder, decoder, optimizer, train_losses, val_losses, val_bleu, val_bleus, epoch) else: print ("Validation Bleu-4 did not improve, saving model to model-{}.pkl".format(epoch)) # Save the entire model anyway, regardless of being the best model so far or not filename = os.path.join("./models", "model-{}.pkl".format(epoch)) save_epoch(filename, encoder, decoder, optimizer, train_losses, val_losses, val_bleu, val_bleus, epoch) if epoch > 5: # Stop if the validation Bleu doesn't improve for 3 epochs if early_stopping(val_bleus, 3): print ("Val Bleu-4 doesn't improve anymore. Early stopping") ``` <a id='2d'></a> ### Notes regarding model validation - Another way to validate a model involves creating a json file such as [this one](https://github.com/cocodataset/cocoapi/blob/master/results/captions_val2014_fakecap_results.json) containing the model's predicted captions for the validation images. Then, write up a script or use one [available online](https://github.com/tylin/coco-caption) to calculate the BLEU score of the model. - Other evaluation metrics (such as TEOR and Cider) are mentioned in section 4.1 of [this paper](https://arxiv.org/pdf/1411.4555.pdf). # Next steps A few things that we may try in the future to improve model performance: - Adjust learning rate: make it decay over time, as in [this example](https://github.com/pytorch/examples/blob/master/imagenet/main.py). - Run the code on a GPU to so that we can train the model more. Tried AWS p2.xlarge; however, the datasets exceeded the storage limit.
github_jupyter
![header](assets/header.png) # Link Prediction - Introduction In this Notebook we are going show to how to develop and train user-provided custom models with Amazon Neptune ML to perform node regression in a property graph. <div style="background-color:#eeeeee; padding:10px; text-align:left; border-radius:10px; margin-top:10px; margin-bottom:10px; "><b>Note</b>: Make sure you're running the notebook with the <b>conda_neptune_ml_p36</b> jupyter kernel. This notebook take approximately 1 hour to complete</div> [Neptune ML](https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning.html#machine-learning-overview) is a feature of Amazon Neptune that enables users to automate the creation, management, and usage of Graph Neural Network (GNN) machine learning models within Amazon Neptune. Neptune ML is built using [Amazon SageMaker](https://aws.amazon.com/sagemaker/) and [Deep Graph Library](https://www.dgl.ai/) and provides a simple and easy to use mechanism to build/train/maintain these models and then use the predictive capabilities of these models within a Gremlin query to predict elements or property values in the graph. This notebook follows the structure of the default [Neptune ML Link Prediction tutorial](https://github.com/aws/graph-notebook/blob/main/src/graph_notebook/notebooks/04-Machine-Learning/Neptune-ML-03-Introduction-to-Link-Prediction-Gremlin.ipynb) to make it easy to follow along and highligh the key differences when using a custom model with Neptune ML. Link prediction is a unsupervised machine learning task where a model built using nodes and edges in the graph to predict whether an edge exists between two particular nodes. This task can be handled by Neptune ML's built in model, however, if you need to add customization to the model training step or want more granular control over the model used then you can implement your own custom model for the task. Link prediction is commonly used to solve many common business problems such as: * Predicting group membership in a social or identity network * [Entity Resolution in an identity graph](https://github.com/awslabs/sagemaker-graph-entity-resolution/blob/master/source/sagemaker/dgl-entity-resolution.ipynb) * Knowledge graph completion * Product recommendation Neptune ML uses a four step process to automate the process of creating production ready GNN models: 1. **Load Data** - Data is loaded into a Neptune cluster using any of the normal methods such as the Gremlin drivers or using the Neptune Bulk Loader. 2. **Export Data** - A service call is made specifying the machine learning model type and model configuration parameters. The data and model configuration parameters are then exported from a Neptune cluster to an S3 bucket. 3. **Model Training** - A set of service calls are made to pre-process the exported data, train the machine learning model, and then generate an Amazon SageMaker endpoint that exposes the model. 4. **Run Queries** - The final step is to use this inference endpoint within our Gremlin queries to infer data using the machine learning model. The **Model Training** part of the workflow when using Neptune ML's built-in models consist of: * Data Processing API call * Model Training API call * Inference Endpoint API call During the experimentation and development phase, when using custom models, there's an extra step between Data Processing API call and the Model Training API call for model implementation and local model testing. So the overall **Model Training** part consists of: * Data Processing API call * ***Model development and local testing*** * Model Training API call * Inference Endpoint API call We'll use the [MovieLens 100k dataset](https://grouplens.org/datasets/movielens/100k/) provided by [GroupLens Research](https://grouplens.org/datasets/movielens/). This dataset consists of movies, users, and ratings of those movies by users. For this notebook we'll walk through how Neptune ML can predict product recommendations in a product knowledge graph. To demonstrate this we'll predict the movies a user would be most likely to rate as well as which users are most likely to rate a given movie. We'll walk through each step of loading and exporting the data, processing the data, developing and locally testing the model, training the model, and finally we'll show how to use that model to infer the movies a user has rated using Gremlin traversals. ## Checking that we are ready to run Neptune ML Run the code below to check that this cluster is configured to run Neptune ML. ``` import neptune_ml_utils as neptune_ml neptune_ml.check_ml_enabled() ``` If the check above did not say that this cluster is ready to run Neptune ML jobs then please check that the cluster meets all the pre-requisites defined [here](https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning.html#machine-learning-overview). # Load the data The first step in building a Neptune ML model is to load data into the Neptune cluster. Loading data for Neptune ML follows the standard process of ingesting data into Amazon Neptune, for this example we'll be using the Bulk Loader. We have written a script that automates the process of downloading the data from the MovieLens websites and formatting it to load into Neptune. All you need to provide is an S3 bucket URI that is located in the same region as the cluster. <div style="background-color:#eeeeee; padding:10px; text-align:left; border-radius:10px; margin-top:10px; margin-bottom:10px; "><b>Note</b>: This is the only step that requires any specific input from the user, all remaining cells will automatically propogate the required values.</div> ``` s3_bucket_uri="s3://<INSERT S3 BUCKET OR PATH>" # remove trailing slashes s3_bucket_uri = s3_bucket_uri[:-1] if s3_bucket_uri.endswith('/') else s3_bucket_uri ``` Now that you have provided an S3 bucket, run the cell below which will download and format the MovieLens data into a format compatible with Neptune's bulk loader. ``` response = neptune_ml.prepare_movielens_data(s3_bucket_uri) ``` This process only takes a few minutes and once it has completed you can load the data using the `%load` command in the cell below. ``` %load -s {response} -f csv -p OVERSUBSCRIBE --run ``` ## Check to make sure the data is loaded Once the cell has completed, the data has been loaded into the cluster. We verify the data loaded correctly by running the traversals below to see the count of nodes by label: <div style="background-color:#eeeeee; padding:10px; text-align:left; border-radius:10px; margin-top:10px; margin-bottom:10px; "><b>Note</b>: The numbers below assume no other data is in the cluster</div> ``` %%gremlin g.V().groupCount().by(label).unfold() ``` If our nodes loaded correctly then the output is: * 19 genres * 1682 movies * 100000 rating * 943 users To check that our edges loaded correctly we check the edge counts: ``` %%gremlin g.E().groupCount().by(label).unfold() ``` If our edges loaded correctly then the output is: * 100000 about * 2893 included_in * 100000 rated * 100000 wrote ## Preparing for export With our data validated let's remove a few `rated` vertices so that we can build a model that predicts these missing connections. In a normal scenario, the data you would like to predict is most likely missing from the data being loaded so removing these values prior to building our machine learning model simulates that situation. Specifically, let's remove the `rated` edgesfor `user_1`, to provide us with a few candidate vertices to run our link prediction tasks. Let's start by taking a look at what `rated` edges currently exist. ``` %%gremlin g.V('user_1').outE('rated') ``` Now let's remove these edges to simulate them missing from our data. ``` %%gremlin g.V('user_1').outE('rated').drop() ``` Checking our data again we see that the edges have now been removed. ``` %%gremlin g.V('user_1').outE('rated') ``` # Export the data and model configuration **Note:** Before exporting data ensure that Neptune Export has been configured as described here: [Neptune Export Service](https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-data-export-service.html#machine-learning-data-export-service-run-export) With our product knowledge graph loaded we are ready to export the data and configuration which will be used to train the ML model. The export process is triggered by calling to the [Neptune Export service endpoint](https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-data-export-service.html). This call contains a configuration object which specifies the type of machine learning model to build, in this example node classification, as well as any feature configurations required. <div style="background-color:#eeeeee; padding:10px; text-align:left; border-radius:10px; margin-top:10px; margin-bottom:10px; "><b>Note</b>: The configuration used in this notebook specifies only a minimal set of configuration options meaning that our model's predictions are not as accurate as they could be. The parameters included in this configuration are one of a couple of sets of options available to the end user to tune the model and optimize the accuracy of the resulting predictions.</div> The configuration options provided to the export service are broken into two main sections, selecting the target and configuring features. ## Selecting the target In the first section, selecting the target, we specify what type of machine learning task will be run. To run a link prediction mdoel do not specify any `targets` in the `additionalParams` value. Unlike node classification or node regression, link prediction can be used to predict any edge type that exists in the graph between any two vertices. Becasue of this, there is no need to define a target set of values. ## Configuring features The second section of the configuration, configuring features, is where we specify details about the types of data stored in our graph and how the machine learning model should interpret that data. In machine learning, each property is known as a feature and these features are used by the model to make predictions. In our export example below we have specified that the `title` property of our `movie` should be exported and trained as a `text_word2vec` feature, the `name` property of our `genre` should be exported and treated as a `category` feature, and that our `age` field should range from 0-100 and that data should be bucketed into 10 distinct groups. See the [documentation for feature encoding](https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-feature-encoding.html) to learn more about supported feature transformations in Neptune ML. <div style="background-color:#eeeeee; padding:10px; text-align:left; border-radius:10px; margin-top:10px; margin-bottom:10px; "><b>Important</b>: The example below is an example of a minimal amount of the features of the model configuration parameters and will not create the most accurate model possible. Additional options are available for tuning this configuration to produce an optimal model are described here: <a href="https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-data-export-parameters.html">Neptune Export Process Parameters</a></div> Running the cell below we set the export configuration and run the export process. Neptune export is capable of automatically creating a clone of the cluster by setting `cloneCluster=True` which takes about 20 minutes to complete and will incur additional costs while the cloned cluster is running. Exporting from the existing cluster takes about 5 minutes but requires that the `neptune_query_timeout` parameter in the [parameter group](https://docs.aws.amazon.com/neptune/latest/userguide/parameters.html) is set to a large enough value (>72000) to prevent timeout errors. ``` export_params={ "command": "export-pg", "params": { "endpoint": neptune_ml.get_host(), "profile": "neptune_ml", "cloneCluster": False }, "outputS3Path": f'{s3_bucket_uri}/neptune-export', "additionalParams": { "neptune_ml": { "version": "v2.0", "features": [ { "node": "movie", "property": "title", "type": "word2vec" }, { "node": "genre", "property": "name", "type": "category", }, { "node": "user", "property": "age", "type": "bucket_numerical", "range" : [1, 100], "num_buckets": 10 } ] } }, "jobSize": "medium"} %%neptune_ml export start --export-url {neptune_ml.get_export_service_host()} --export-iam --wait --store-to export_results ${export_params} ``` # ML data processing, model development, model training, and endpoint creation Once the export job is completed we are now ready to train our machine learning model and create the inference endpoint. Training our Neptune ML model requires a few steps. <div style="background-color:#eeeeee; padding:10px; text-align:left; border-radius:10px; margin-top:10px; margin-bottom:10px; "><b>Note</b>: The cells below only configure a minimal set of parameters required to run a model training.</div> ## Data processing The first step (data processing) processes the exported graph dataset using standard feature preprocessing techniques to prepare it for use by DGL. This step performs functions such as feature normalization for numeric data and encoding text features using word2vec. At the conclusion of this step the dataset is formatted for model training. This step is implemented using a SageMaker Processing Job and data artifacts are stored in a pre-specified S3 location once the job is complete. Additional options and configuration parameters for the data processing job can be found using the links below: * [Data Processing](https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-on-graphs-processing.html) * [dataprocessing command](https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-api-dataprocessing.html) Run the cells below to create the data processing configuration and to begin the processing job. ``` # The training_job_name can be set to a unique value below, otherwise one will be auto generated training_job_name=neptune_ml.get_training_job_name('link-prediction') processing_params = f""" --config-file-name training-data-configuration.json --job-id {training_job_name} --s3-input-uri {export_results['outputS3Uri']} --s3-processed-uri {str(s3_bucket_uri)}/preloading """ %neptune_ml dataprocessing start --wait --store-to processing_results {processing_params} ``` ## Model development and local testing The next step consists of developing the model, writing the model training code, and testing the custom implementation before full-fledged model training. The custom model implementation is provided in the [`link-predict/src/`](link-predict/src/) directory which has the following contents. * **[`model-hpo-configuration.json`](link-predict/src/model-hpo-configuration.json)** contains the configuration for the model hyperparameters, their defaults and ranges according to the [Neptune ML hyperparameter configuration specificaton](https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-customizing-hyperparams.html) * **[`train.py`](link-predict/src/train.py)** is the entrypoint script for model training. It contains the custom model code implementation, and when executed, runs model training with the hyperparameter values passed in as program arguments. * **[`transform.py`](link-predict/src/transform.py)** is the entrypoint script for [model transform](https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-model-transform.html). When executed, it computes the model artifacts that will be needed when the model is deployed to an inference endpoint Let's take a look at the training script in more detail ``` !pygmentize link-predict/src/train.py ``` As you can see the `train.py` defines the `RGCNLinkPrediction` model using the implementation provided by the Neptune ML Toolkit modelzoo. The modelzoo contains reusable modular GNN components implemented in [DGL](https://github.com/dmlc/dgl) with the Pytorch backend, which you can use in your custom implementations. The script also implements functions to train this model architecture using pytorch. We can run this script locally, to test it out and really see the training in action but first we need to bring the processed data locally and create a folder to store the training output. ``` s3_processed_data = f"""{str(s3_bucket_uri)}/preloading""" !aws s3 cp --recursive $s3_processed_data link-predict/data/ !mkdir output/ ``` Now let's run the training script ``` !cd link-predict && python src/train.py --local ``` This should have run succesfully for a few epochs and generated some files in `output/`. The model parameters are in `output/model.pt` and evaluation metrics for the training run are in `output/eval_metrics_info.json`. The contents of output are what will be generated when we train with the Neptune ML modeltraining API later. Let's also take a look at the transform script in detail ``` !pygmentize src/transform.py ``` The transform script is what is used to compute the model artifacts such as node embeddings and precomputed predictions needed for inference. The transform script can also be run on the local instance. ``` !cd node-class && python src/transform.py --local ``` The transform script should have added even more files like `entity.npy` to the `output/` folder. These files are needed to create the model that will be deployed but are not generated by `train.py`. This is because when we train with the Neptune ML modeltraining API, `train.py` is executed by multiple SageMaker training jobs trying different hyperparameter configurations. However, only the artifacts from the best training job are needed for the deployment. Therefore, when using the Neptune ML train API, `transform.py` is called once on the output from the best training instance. Now, that we have verified locally that both of the entry point scripts work, we can move to the next of training using the Neptune ML modeltraining API. To do this we must first upload the `link-predict/src/` to S3 so that the neptune ml modeltraining API can pick our custom model implementation. ``` s3_custom_source_location = f"""{str(s3_bucket_uri)}/training/source/{training_job_name}""" !aws s3 cp --recursive link-predict/src/ $s3_custom_source_location ``` ## Model training The next step (model training) trains the ML model that will be used for predictions. The model training is done in stages. The first stage uses a SageMaker Processing job to parse the `model-hpo-configuration.json` and generate a model training strategy. A model training strategy is a configuration set that specifies what type of model and model hyperparameter ranges will be used for the model training. Once the first stage is complete, the SageMaker Processing job launches a SageMaker Hyperparameter tuning job. The SageMaker Hyperparameter tuning job runs a pre-specified number of model training job trials (`train.py`) on the processed data. Once all the training jobs are complete, the Hyperparameter tuning job also notes the training job that produced the best performing model. The SageMaker Processing job then computes and stores (`transform.py`) the model artifacts generated by the best SageMaker training job in the output S3 location. Additional options and configuration parameters for the data processing job can be found using the links below: * [Model Training](https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-on-graphs-model-training.html) * [modeltraining command](https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-api-modeltraining.html) <div style="background-color:#eeeeee; padding:10px; text-align:left; border-radius:10px; margin-top:10px; margin-bottom:10px; "><b>Information</b>: Link prediction is a more computationally complex model than classification or regression so training this model will take 2-3 hours</div> ``` training_params=f""" --job-id {training_job_name} --data-processing-id {training_job_name} --instance-type ml.p3.2xlarge --s3-output-uri {str(s3_bucket_uri)}/training """ # %neptune_ml training start --wait --store-to training_results {training_params} curl \ -X POST https://(your Neptune endpoint)/ml/modeltraining -H 'Content-Type: application/json' \ -d '{ "id" : "<INSERT training job name from above>", "dataProcessingJobId" : "<INSERT training job name from above>", "trainModelS3Location" : "<INSERT S3 BUCKET OR PATH>", "trainingInstanceType" : "ml.g4.xlarge" "customModelParameters" : { "sourceS3DirectoryPath": "<INSERT Source S3 path from above>", "trainingEntryPointScript": "train.py", "transformEntryPointScript": "transform.py" } }' ``` Once the training job is completed, you will have a model that was trained using the custom implementation that was provided. After this stage, all the following steps for endpoint creation and inference are identical to using the built-in Neptune ML models. ## Endpoint creation The final step is to create the inference endpoint which is an Amazon SageMaker endpoint instance that is launched with the model artifacts produced by the best training job. This endpoint will be used by our graph queries to return the model predictions for the inputs in the request. The endpoint once created stays active until it is manually deleted. Each model is tied to a single endpoint. Additional options and configuration parameters for the data processing job can be found using the links below: * [Inference Endpoint](https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-on-graphs-inference-endpoint.html) * [Endpoint command](https://docs.aws.amazon.com/neptune/latest/userguide/machine-learning-api-endpoints.html) <div style="background-color:#eeeeee; padding:10px; text-align:left; border-radius:10px; margin-top:10px; margin-bottom:10px; "><b>Information</b>: The endpoint creation process takes ~5-10 minutes</div> ``` endpoint_params=f""" --job-id {training_job_name} --model-job-id {training_job_name}""" %neptune_ml endpoint create --wait --store-to endpoint_results {endpoint_params} ``` Once this has completed we get the endpoint name for our newly created inference endpoint. The cell below will set the endpoint name which will be used in the Gremlin queries below. ``` endpoint=endpoint_results['endpoint']['name'] ``` # Querying using Gremlin Now that we have our inference endpoint setup let's query our product knowledge graph to show how to predict how likely it is that a user will rate a movie. The need to predict the likelyhood of connections in a product knowledge graph is commonly used to provide recommendations for products that a customer might purchase. Unlike node classification and node regression, link prediction can infer any of the edge labels that existed in our graph when the model was created. In our model this means we could infer the probability that a `wrote`, `about`, `rated`, or `included_in` edge exists between any two vertices. However for this example we are going to focus on inferring the `rated` edges between the `user` and `movie` vertices. ## Predicting what movies a user will rate Before we predict what movies `user_1` is most likely to rate let's verify that our graph does not contain any `rated` edges for `user_1`. ``` %%gremlin g.V('user_1').out('rated').hasLabel('movie').valueMap() ``` As expected, their are not any `rated` edges for `user_1`. Maybe `user_1` is a new user in our system and we want to provide them some product recommendations. Let's modify the query to predict what movies `user_1` is most likely to rate. First, we add the `with()` step to specify the inference endpoint we want to use with our Gremlin query like this `g.with("Neptune#ml.endpoint","<INSERT ENDPOINT NAME>")`. <div style="background-color:#eeeeee; padding:10px; text-align:left; border-radius:10px; margin-top:10px; margin-bottom:10px; "><b>Note</b>: The endpoint values are automatically passed into the queries below</div> Second, when we ask for the link within our query we use the `out()` step to predict the target node or the `in()` step to predict the source node. For each of these steps we need to specify the type of model being used with a with() step (`with("Neptune#ml.prediction")`). Putting these items together we get the query below, which returns the movies that` user_1` is likely to rate. ``` %%gremlin g.with("Neptune#ml.endpoint","${endpoint}"). V('user_1').out('rated').with("Neptune#ml.prediction").hasLabel('movie').valueMap('title') ``` Great, we can now see that we are predicted edges showing that `Sleepers` is the movie that `user_1` is most likely to rate. In the example above we predicted the target node but we can also use the same mechanism to predict the source node. Let's turn that question around and say that we had a product and we wanted to find the people most likely to rate this product. ## Predicting the top 10 users most likely to rate a movie To accomplish this we would want to start at the movie vertex and predict the rated edge back to the user. Since we want to return the top 10 recommended users we need to use the `.with("Neptune#ml.limit",10)` configuration option. Combining these together we get the query below which finds the top 10 users most likely to rate `Apollo 13`. ``` %%gremlin g.with("Neptune#ml.endpoint","${endpoint}"). with("Neptune#ml.limit",10). V().has('title', 'Apollo 13 (1995)'). in('rated').with("Neptune#ml.prediction").hasLabel('user').id() ``` With that we have sucessfully been able to show how you can use link prediction to predict edges starting at either end. From the examples we have shown here you can begin to see how the ability to infer unknown connections within a graph starts to enable many interesting and unique use cases within Amazon Neptune. # Cleaning Up Now that you have completed this walkthrough you have created a Sagemaker endpoint which is currently running and will incur the standard charges. If you are done trying out Neptune ML and would like to avoid these recurring costs, run the cell below to delete the inference endpoint. ``` neptune_ml.delete_endpoint(training_job_name) ``` In addition to the inference endpoint the CloudFormation script that you used has setup several additional resources. If you are finished then we suggest you delete the CloudFormation stack to avoid any recurring charges. For instructions, see Deleting a Stack on the AWS CloudFormation Console. Be sure to delete the root stack (the stack you created earlier). Deleting the root stack deletes any nested stacks.
github_jupyter
``` import numpy as np import pandas as pd from bs4 import BeautifulSoup import torchvision from torchvision import transforms, datasets, models import torch from torchvision.models.detection.faster_rcnn import FastRCNNPredictor import matplotlib.pyplot as plt from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor import matplotlib.patches as patches import os from PIL import Image def gen_box(stuff): x_low = int(stuff.find('xmin').text) y_low = int(stuff.find('ymin').text) x_high = int(stuff.find('xmax').text) y_high = int(stuff.find('ymax').text) return [x_low, y_low, x_high, y_high] def gen_lab(stuff): if stuff.find('name').text == 'with_mask': return 1 elif stuff.find('name').text == 'mask_weared_incorrect': return 2 else: return 0 def gen_tar(id, file): with open(file) as f: data = f.read() soup = BeautifulSoup(data, features= "lxml-xml") objects = soup.find_all('object') num_obs = len(objects) boxes = [] labels = [] for i in objects: boxes.append(gen_box(i)) labels.append(gen_lab(i)) boxes = torch.as_tensor(boxes, dtype=torch.float32) labels = torch.as_tensor(labels, dtype=torch.int64) img_id = torch.tensor([id]) target = {} target["boxes"] = boxes target["labels"] = labels target["image_id"] = img_id return target imgs = list(sorted(os.listdir("images/"))) labels = list(sorted(os.listdir("annotations/"))) class create_Mask(object): def __init__(self, transforms): self.transforms = transforms self.imgs = list(sorted(os.listdir("images/"))) def __getitem__(self, idx): image = 'maksssksksss'+ str(idx) + '.png' label = 'maksssksksss'+ str(idx) + '.xml' image_path = os.path.join('images/', image) label_path = os.path.join('annotations/', label) img = Image.open(image_path).convert('RGB') target = gen_tar(idx, label_path) if self.transforms is not None: img = self.transforms(img) return img, target def __len__(self): return len(self.imgs) data_tr = transforms.Compose([ transforms.ToTensor(), ]) def fn_collate(batch): return tuple(zip(*batch)) data = create_Mask(data_tr) data_loader = torch.utils.data.DataLoader( data, batch_size=4, collate_fn = fn_collate ) ``` # Model ``` def get_model(num_classes): model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True) features = model.roi_heads.box_predictor.cls_score.in_features model.roi_heads.box_predictor = FastRCNNPredictor(features, num_classes) return model model = get_model(3) device = torch.device('cuda') for imgs, annotations in data_loader: imgs = list(img.to(device) for img in imgs) annotations = [{k: v.to(device) for k, v in t.items()} for t in annotations] print(annotations) break ``` # Training ``` epochs = 50 model.to(device) parameters = [p for p in model.parameters() if p.requires_grad] optimizer = torch.optim.Adam(parameters, lr = 0.0001) len_dataloader = len(data_loader) for epoch in range(epochs): model.train() i = 0 epoch_loss = 0 for imgs, annotations in data_loader: i += 1 imgs = list(img.to(device) for img in imgs) annotations = [{k: v.to(device) for k, v in t.items()} for t in annotations] loss_dict = model([imgs[0]], [annotations[0]]) losses = sum(loss for loss in loss_dict.values()) optimizer.zero_grad() losses.backward() optimizer.step() epoch_loss += losses print(epoch_loss) for imgs, annotations in data_loader: imgs = list(img.to(device) for img in imgs) annotations = [{k: v.to(device) for k, v in t.items()} for t in annotations] break model.eval() preds = model(imgs) preds def plot_image(img_tensor, annotation): fig,ax = plt.subplots(1) img = img_tensor.cpu().data # Display the image ax.imshow(img.permute(1, 2, 0)) for box in annotation["boxes"]: xmin, ymin, xmax, ymax = box # Create a Rectangle patch rect = patches.Rectangle((xmin,ymin),(xmax-xmin),(ymax-ymin),linewidth=1,edgecolor='r',facecolor='none') # Add the patch to the Axes ax.add_patch(rect) plt.show() print("Prediction") plot_image(imgs[1], preds[1]) print("Target") plot_image(imgs[1], annotations[1]) ```
github_jupyter
# Octant Weak Lensing Maps for cosmoDC2 using Argonne Simulations This notebook shows, for a given redshift distribution (provided by Joe Zuntz), how we (Patricia Larsen and Nan Li) calculate the effective convergence and shear maps by using Argonne Simulations, which is designed for cosmoDC2. We also attached some codes for the calculation of the power spectrums of weak lensing maps at the bottom of this notebook. Some cross-checks will be implemented later, for example, comparing our results to the theoretical predictions created by pyccl. Should you have any questions or are interested, please feel free to slack us (@linan7788626, @plarsen, @joezuntz). The maps are located on NERSC, and if you want to take a look at the data, please let me know. ``` %matplotlib inline import numpy as np import pylab as pl import healpy as hp from scipy import interpolate from scipy.integrate import quad # path to the simulation data on Cooley, which is an Argonne cluster, please contact Patricia for more information direc_shears = </path/to/the/simulations/data/> def loadinWeakLensingMaps(step): ''' Load in the data in the directory above. The data files contain integrated (0 to zs) weak lensing maps for a given source plane. The input parameter <step> stands for the snapshotID corresponding to the source plane. ''' try: a00 = hp.read_map(direc_shears+'rt_'+step+'_A1_2.fits',0) a01 = hp.read_map(direc_shears+'rt_'+step+'_A1_2.fits',1) a10 = hp.read_map(direc_shears+'rt_'+step+'_A2_2.fits',0) a11 = hp.read_map(direc_shears+'rt_'+step+'_A2_2.fits',1) except: print("Can not find SnapShot %s"%step) return None kappa_m = 1.0 - (a00 + a11)*0.5 # kappa map shear1_m = (a11-a00)*0.5 # shear1 map shear2_m = -(a01+a10)*0.5 # shear2 map return kappa_m, shear1_m, shear2_m def weights_nz_dz(zs_arr,steps): ''' Calculate the weights of the effective weak lensing maps for a given redshift distribution of sources, i.e., n(z). There are <nstep> redshift bins for sources, - the zeroth bin is [0, (zs[0]+zs[1])/2); zbin = zs[0]. - the last bin is [(zs[-2]+zs[-1])/2), 1.0); zbin = zs[-1] - the ith bin is [(zs[i-1]+z[i])/2, (zs[i]+z[i+1])/2); zbin = zs[i] By integrating the normalized n(z) in the bins, we can obtain the weights for the source planes. ''' zs_in, ng_in = np.loadtxt("./source_1.txt", usecols=(0, 1), unpack=True) # n(z) from Joe Zuntz dzs = zs_in[1:]-zs_in[:-1] hng = (ng_in[1:]+ng_in[:-1])*0.5 area_tot = np.sum(hng*dzs) ng_rescale = ng_in/area_tot # normalization f = interpolate.interp1d(zs_in, ng_rescale) # the function created by interpolation fnz_norm = f(zs_arr) zs_min = 0.0 zs_max = 3.0 wt_arr = zs_arr*0.0 zinit = 200. nsteps = 500 a = np.linspace(1./(zinit+1.),1.,nsteps) z = 1./a - 1. for i in range(len(zs_arr)): if i==0: zs_1 = zs_min zs_m = zs_arr[0] zs_2 = z[int(steps[i])] wt_arr[i] = quad(f, zs_1,zs_2)[0] elif i==(len(zs_arr)-1): zs_1 = z[int(steps[i-1])] zs_m= zs_arr[-1] zs_2 = zs_max wt_arr[i] = quad(f, zs_1,zs_2)[0] else: zs_1 = z[int(steps[i-1])] zs_m= zs_arr[i] zs_2 = z[int(steps[i])] wt_arr[i] = quad(f, zs_1,zs_2)[0] return wt_arr def cal_eff_wl_maps(): ''' calculate the total effective weak lensing maps by summing the list of maps with the corresponding weights. ''' # Initial the redshifts of source planes data_list = {} data_list['steps'] = np.array(['487', '475', '464', '453', '442', '432', '421', '411', '401', '392', '382', '373', '365', '355', '347', '338', '331', '323', '315', '307', '300', '293', '286', '279', '272', '266', '259', '253', '247', '241', '235', '230', '224', '219', '213', '208', '203', '198', '194', '189', '184', '180', '176', '171', '167', '163', '159', '155', '151', '148', '144', '141', '137', '134', '131', '127', '124', '121']) data_list['zs'] = np.array([0.019587576, 0.04249394, 0.06348896, 0.08982432, 0.11601174, 0.14167726, 0.17023492, 0.19876349, 0.22816956, 0.25715542, 0.28774858, 0.31976724, 0.3496554, 0.38333392, 0.4181242, 0.45316005, 0.4872712, 0.5210507, 0.5587207, 0.5984776, 0.63728297, 0.67555356, 0.7154497, 0.7575482, 0.8018985, 0.8445264, 0.8898808, 0.9368595, 0.9829167, 1.030879, 1.0813811, 1.1297112, 1.1807017, 1.2338057, 1.2901752, 1.3487234, 1.4050395, 1.4638758, 1.5192988, 1.5775385, 1.6453991, 1.7094843, 1.7690942, 1.83975, 1.9136083, 1.9828148, 2.0554118, 2.131457, 2.2114959, 2.2847886, 2.3618498, 2.4422584, 2.5272007, 2.6155977, 2.6953585, 2.7935054, 2.895901, 2.9888024]) nsteps = len(data_list['zs']) weights = weights_nz_dz(data_list['zs'], data_list['steps']) kappa_map_last, shear1_map_last, shear2_map_last = loadinWeakLensingMaps(data_list['steps'][-1]) kappa_map = kappa_map_last*weights[-1] shear1_map = shear1_map_last*weights[-1] shear2_map = shear2_map_last*weights[-1] for i in range(nsteps-1): res_tmp = loadinWeakLensingMaps(data_list['steps'][i]) if res_tmp==None: continue else: kappa_m_tmp, \ shear1_m_tmp, \ shear2_m_tmp = res_tmp kappa_map = kappa_map + kappa_m_tmp*weights[i] shear1_map = shear1_map + shear1_m_tmp*weights[i] shear2_map = shear2_map + shear2_m_tmp*weights[i] return kappa_map, shear1_map, shear2_map def vis_kappa_eff(kappa_map): ''' A simple function to visualize an octant convergence map with healpix. ''' nside = 4096 #----------------------------------- # Create masks according to the octant properties # x,y,z = hp.pix2vec(nside, np.arange(hp.nside2npix(nside))) mask_octant = (x>0)&(y>0)&(z<0) #----------------------------------- # setup the colormap # cmap = pl.cm.jet cmap.set_over(cmap(1.0)) cmap.set_under('w') cmap.set_bad('gray') #----------------------------------- # make a plot in healpix map # kappa_map = hp.ma(kappa_map) kappa_map.mask = np.logical_not(mask_octant) hp.mollview(kappa_map, cmap=cmap) return 0 ''' The main function to calculate kappa and shear maps. It takes about 8 minutes. ''' ka_map, s1_map, s2_map = cal_eff_wl_maps() ''' Write the maps to a fits file with healpy ''' hp.write_map('WL_MAPs_3.fits',m=(ka_map,s1_map,s2_map),overwrite=True) ''' Functions for the calculation power spectrum, please slack Patricia (@plarsen) for more details. ''' def upgrade_pixels(pix_list): pix_list_new=np.array([],dtype=np.int64) for i in range(4): pix_list_new = np.concatenate((pix_list_new,pix_list*4+i)) return pix_list_new def compute_masks(): nside=4096 x,y,z = hp.pix2vec(nside, np.arange(hp.nside2npix(nside))) mask_octant = (x>0)&(y>0)&(z<0) pix_list_image = [8786, 8787, 8788, 8789, 8790, 8791, 8792, 8793, 8794, 8913, 8914, 8915, 8916, 8917, 8918, 8919, 8920, 8921, 9042, 9043, 9044, 9045, 9046, 9047, 9048, 9049, 9050, 9169, 9170, 9171, 9172, 9173, 9174, 9175, 9176, 9177, 9178, 9298, 9299, 9300, 9301, 9302, 9303, 9304, 9305, 9306, 9425, 9426, 9427, 9428, 9429, 9430, 9431, 9432, 9433, 9434, 9554, 9555, 9556, 9557, 9558, 9559, 9560, 9561, 9562, 9681, 9682, 9683, 9684, 9685, 9686, 9687, 9688, 9689, 9690, 9810, 9811, 9812, 9813, 9814, 9815, 9816, 9817, 9818, 9937, 9938, 9939, 9940, 9941, 9942, 9943, 9944, 9945, 9946, 10066, 10067, 10068, 10069, 10070, 10071, 10072, 10073, 10074, 10193, 10194, 10195, 10196, 10197, 10198, 10199, 10200, 10201, 10202, 10321, 10322, 10323, 10324, 10325, 10326, 10327, 10328, 10329, 10444, 10445, 10446, 10447, 10448, 10449, 10450, 10451, 10452] pix_list_nest = hp.ring2nest(32,pix_list_image) for i in range(int(np.log2(nside/32))): pix_list_nest = upgrade_pixels(pix_list_nest) pix_list_image = hp.nest2ring(nside,pix_list_nest) mask_image = np.zeros(hp.nside2npix(nside)) mask_image[pix_list_image]=1.0 return mask_octant,mask_image def compute_power(map_array,mask,lmax=1500): '''compute power spectra for a given array of maps with a given mask (anafast with fsky mask correction)''' fsky = np.sum(mask)/(len(mask)+0.0) alms_wl = hp.map2alm(map_array,lmax=lmax) cls_wl = hp.alm2cl(alms_wl)/fsky/(hp.pixwin(4096)[:lmax+1]**2) # assuming octant map return cls_wl ''' compute and plot the power spectrums of kappa and shear maps. ''' # hp.smoothing(kappa,lmax=lmax,fwhm= (arcmin_smoothing/60.)*(np.pi/180.)) #smoothing if needed # read maps from the fits file kappa0 = hp.read_map('WL_MAPs_3.fits',0) shear1 = hp.read_map('WL_MAPs_3.fits',1) shear2 = hp.read_map('WL_MAPs_3.fits',2) # create masks in healpix space mask_oct, mask_image = compute_masks() # compute the power spectrums of the maps with masks. lmax=1500 cls = compute_power((kappa0*mask_oct,shear1*mask_oct,shear2*mask_oct),mask_oct,lmax=lmax) # plot the power spectrums pl.figure() l = np.arange(lmax+1) pl.loglog(l,cls[0],label='convergence power') pl.loglog(l,cls[1],label='shear power') ```
github_jupyter
# Building your Deep Neural Network: Step by Step **Notation**: - Superscript $[l]$ denotes a quantity associated with the $l^{th}$ layer. - Example: $a^{[L]}$ is the $L^{th}$ layer activation. $W^{[L]}$ and $b^{[L]}$ are the $L^{th}$ layer parameters. - Superscript $(i)$ denotes a quantity associated with the $i^{th}$ example. - Example: $x^{(i)}$ is the $i^{th}$ training example. - Lowerscript $i$ denotes the $i^{th}$ entry of a vector. - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the $l^{th}$ layer's activations). Let's get started! ## 1 - Packages Let's first import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the main package for scientific computing with Python. - [matplotlib](http://matplotlib.org) is a library to plot graphs in Python. - dnn_utils provides some necessary functions for this notebook. - testCases provides some test cases to assess the correctness of your functions - np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work. Please don't change the seed. ``` import numpy as np import h5py import matplotlib.pyplot as plt from testCases_v4a import * from dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward %matplotlib inline plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' %load_ext autoreload %autoreload 2 np.random.seed(1) ``` ## 2 - Outline of the Assignment To build your neural network, you will be implementing several "helper functions". These helper functions will be used in the next assignment to build a two-layer neural network and an L-layer neural network. Each small helper function you will implement will have detailed instructions that will walk you through the necessary steps. Here is an outline of this assignment, you will: - Initialize the parameters for a two-layer network and for an $L$-layer neural network. - Implement the forward propagation module (shown in purple in the figure below). - Complete the LINEAR part of a layer's forward propagation step (resulting in $Z^{[l]}$). - We give you the ACTIVATION function (relu/sigmoid). - Combine the previous two steps into a new [LINEAR->ACTIVATION] forward function. - Stack the [LINEAR->RELU] forward function L-1 time (for layers 1 through L-1) and add a [LINEAR->SIGMOID] at the end (for the final layer $L$). This gives you a new L_model_forward function. - Compute the loss. - Implement the backward propagation module (denoted in red in the figure below). - Complete the LINEAR part of a layer's backward propagation step. - We give you the gradient of the ACTIVATE function (relu_backward/sigmoid_backward) - Combine the previous two steps into a new [LINEAR->ACTIVATION] backward function. - Stack [LINEAR->RELU] backward L-1 times and add [LINEAR->SIGMOID] backward in a new L_model_backward function - Finally update the parameters. <img src="images/final outline.png" style="width:800px;height:500px;"> <caption><center> **Figure 1**</center></caption><br> **Note** that for every forward function, there is a corresponding backward function. That is why at every step of your forward module you will be storing some values in a cache. The cached values are useful for computing gradients. In the backpropagation module you will then use the cache to calculate the gradients. This assignment will show you exactly how to carry out each of these steps. ## 3 - Initialization You will write two helper functions that will initialize the parameters for your model. The first function will be used to initialize parameters for a two layer model. The second one will generalize this initialization process to $L$ layers. ### 3.1 - 2-layer Neural Network **Exercise**: Create and initialize the parameters of the 2-layer neural network. **Instructions**: - The model's structure is: *LINEAR -> RELU -> LINEAR -> SIGMOID*. - Use random initialization for the weight matrices. Use `np.random.randn(shape)*0.01` with the correct shape. - Use zero initialization for the biases. Use `np.zeros(shape)`. ``` # GRADED FUNCTION: initialize_parameters def initialize_parameters(n_x, n_h, n_y): """ Argument: n_x -- size of the input layer n_h -- size of the hidden layer n_y -- size of the output layer Returns: parameters -- python dictionary containing your parameters: W1 -- weight matrix of shape (n_h, n_x) b1 -- bias vector of shape (n_h, 1) W2 -- weight matrix of shape (n_y, n_h) b2 -- bias vector of shape (n_y, 1) """ np.random.seed(1) ### START CODE HERE ### (≈ 4 lines of code) W1 = np.random.randn(n_h, n_x) * 0.01 b1 = np.zeros((n_h, 1)) W2 = np.random.randn(n_y, n_h) * 0.01 b2 = np.zeros((n_y, 1)) ### END CODE HERE ### assert(W1.shape == (n_h, n_x)) assert(b1.shape == (n_h, 1)) assert(W2.shape == (n_y, n_h)) assert(b2.shape == (n_y, 1)) parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parameters parameters = initialize_parameters(3,2,1) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ``` **Expected output**: <table style="width:80%"> <tr> <td> **W1** </td> <td> [[ 0.01624345 -0.00611756 -0.00528172] [-0.01072969 0.00865408 -0.02301539]] </td> </tr> <tr> <td> **b1**</td> <td>[[ 0.] [ 0.]]</td> </tr> <tr> <td>**W2**</td> <td> [[ 0.01744812 -0.00761207]]</td> </tr> <tr> <td> **b2** </td> <td> [[ 0.]] </td> </tr> </table> ### 3.2 - L-layer Neural Network The initialization for a deeper L-layer neural network is more complicated because there are many more weight matrices and bias vectors. When completing the `initialize_parameters_deep`, you should make sure that your dimensions match between each layer. Recall that $n^{[l]}$ is the number of units in layer $l$. Thus for example if the size of our input $X$ is $(12288, 209)$ (with $m=209$ examples) then: <table style="width:100%"> <tr> <td> </td> <td> **Shape of W** </td> <td> **Shape of b** </td> <td> **Activation** </td> <td> **Shape of Activation** </td> <tr> <tr> <td> **Layer 1** </td> <td> $(n^{[1]},12288)$ </td> <td> $(n^{[1]},1)$ </td> <td> $Z^{[1]} = W^{[1]} X + b^{[1]} $ </td> <td> $(n^{[1]},209)$ </td> <tr> <tr> <td> **Layer 2** </td> <td> $(n^{[2]}, n^{[1]})$ </td> <td> $(n^{[2]},1)$ </td> <td>$Z^{[2]} = W^{[2]} A^{[1]} + b^{[2]}$ </td> <td> $(n^{[2]}, 209)$ </td> <tr> <tr> <td> $\vdots$ </td> <td> $\vdots$ </td> <td> $\vdots$ </td> <td> $\vdots$</td> <td> $\vdots$ </td> <tr> <tr> <td> **Layer L-1** </td> <td> $(n^{[L-1]}, n^{[L-2]})$ </td> <td> $(n^{[L-1]}, 1)$ </td> <td>$Z^{[L-1]} = W^{[L-1]} A^{[L-2]} + b^{[L-1]}$ </td> <td> $(n^{[L-1]}, 209)$ </td> <tr> <tr> <td> **Layer L** </td> <td> $(n^{[L]}, n^{[L-1]})$ </td> <td> $(n^{[L]}, 1)$ </td> <td> $Z^{[L]} = W^{[L]} A^{[L-1]} + b^{[L]}$</td> <td> $(n^{[L]}, 209)$ </td> <tr> </table> Remember that when we compute $W X + b$ in python, it carries out broadcasting. For example, if: $$ W = \begin{bmatrix} j & k & l\\ m & n & o \\ p & q & r \end{bmatrix}\;\;\; X = \begin{bmatrix} a & b & c\\ d & e & f \\ g & h & i \end{bmatrix} \;\;\; b =\begin{bmatrix} s \\ t \\ u \end{bmatrix}\tag{2}$$ Then $WX + b$ will be: $$ WX + b = \begin{bmatrix} (ja + kd + lg) + s & (jb + ke + lh) + s & (jc + kf + li)+ s\\ (ma + nd + og) + t & (mb + ne + oh) + t & (mc + nf + oi) + t\\ (pa + qd + rg) + u & (pb + qe + rh) + u & (pc + qf + ri)+ u \end{bmatrix}\tag{3} $$ **Exercise**: Implement initialization for an L-layer Neural Network. **Instructions**: - The model's structure is *[LINEAR -> RELU] $ \times$ (L-1) -> LINEAR -> SIGMOID*. I.e., it has $L-1$ layers using a ReLU activation function followed by an output layer with a sigmoid activation function. - Use random initialization for the weight matrices. Use `np.random.randn(shape) * 0.01`. - Use zeros initialization for the biases. Use `np.zeros(shape)`. - We will store $n^{[l]}$, the number of units in different layers, in a variable `layer_dims`. For example, the `layer_dims` for the "Planar Data classification model" from last week would have been [2,4,1]: There were two inputs, one hidden layer with 4 hidden units, and an output layer with 1 output unit. This means `W1`'s shape was (4,2), `b1` was (4,1), `W2` was (1,4) and `b2` was (1,1). Now you will generalize this to $L$ layers! - Here is the implementation for $L=1$ (one layer neural network). It should inspire you to implement the general case (L-layer neural network). ```python if L == 1: parameters["W" + str(L)] = np.random.randn(layer_dims[1], layer_dims[0]) * 0.01 parameters["b" + str(L)] = np.zeros((layer_dims[1], 1)) ``` ``` # GRADED FUNCTION: initialize_parameters_deep def initialize_parameters_deep(layer_dims): """ Arguments: layer_dims -- python array (list) containing the dimensions of each layer in our network Returns: parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL": Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1]) bl -- bias vector of shape (layer_dims[l], 1) """ np.random.seed(3) parameters = {} L = len(layer_dims) # number of layers in the network for l in range(1, L): ### START CODE HERE ### (≈ 2 lines of code) parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * 0.01 parameters['b' + str(l)] = np.zeros((layer_dims[l], 1)) ### END CODE HERE ### assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1])) assert(parameters['b' + str(l)].shape == (layer_dims[l], 1)) return parameters parameters = initialize_parameters_deep([5,4,3]) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ``` **Expected output**: <table style="width:80%"> <tr> <td> **W1** </td> <td>[[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388] [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218] [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034] [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]</td> </tr> <tr> <td>**b1** </td> <td>[[ 0.] [ 0.] [ 0.] [ 0.]]</td> </tr> <tr> <td>**W2** </td> <td>[[-0.01185047 -0.0020565 0.01486148 0.00236716] [-0.01023785 -0.00712993 0.00625245 -0.00160513] [-0.00768836 -0.00230031 0.00745056 0.01976111]]</td> </tr> <tr> <td>**b2** </td> <td>[[ 0.] [ 0.] [ 0.]]</td> </tr> </table> ## 4 - Forward propagation module ### 4.1 - Linear Forward Now that you have initialized your parameters, you will do the forward propagation module. You will start by implementing some basic functions that you will use later when implementing the model. You will complete three functions in this order: - LINEAR - LINEAR -> ACTIVATION where ACTIVATION will be either ReLU or Sigmoid. - [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID (whole model) The linear forward module (vectorized over all the examples) computes the following equations: $$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}\tag{4}$$ where $A^{[0]} = X$. **Exercise**: Build the linear part of forward propagation. **Reminder**: The mathematical representation of this unit is $Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}$. You may also find `np.dot()` useful. If your dimensions don't match, printing `W.shape` may help. ``` # GRADED FUNCTION: linear_forward def linear_forward(A, W, b): """ Implement the linear part of a layer's forward propagation. Arguments: A -- activations from previous layer (or input data): (size of previous layer, number of examples) W -- weights matrix: numpy array of shape (size of current layer, size of previous layer) b -- bias vector, numpy array of shape (size of the current layer, 1) Returns: Z -- the input of the activation function, also called pre-activation parameter cache -- a python tuple containing "A", "W" and "b" ; stored for computing the backward pass efficiently """ ### START CODE HERE ### (≈ 1 line of code) Z = W.dot(A) + b ### END CODE HERE ### assert(Z.shape == (W.shape[0], A.shape[1])) cache = (A, W, b) return Z, cache A, W, b = linear_forward_test_case() Z, linear_cache = linear_forward(A, W, b) print("Z = " + str(Z)) ``` **Expected output**: <table style="width:35%"> <tr> <td> **Z** </td> <td> [[ 3.26295337 -1.23429987]] </td> </tr> </table> ### 4.2 - Linear-Activation Forward In this notebook, you will use two activation functions: - **Sigmoid**: $\sigma(Z) = \sigma(W A + b) = \frac{1}{ 1 + e^{-(W A + b)}}$. We have provided you with the `sigmoid` function. This function returns **two** items: the activation value "`a`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call: ``` python A, activation_cache = sigmoid(Z) ``` - **ReLU**: The mathematical formula for ReLu is $A = RELU(Z) = max(0, Z)$. We have provided you with the `relu` function. This function returns **two** items: the activation value "`A`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call: ``` python A, activation_cache = relu(Z) ``` For more convenience, you are going to group two functions (Linear and Activation) into one function (LINEAR->ACTIVATION). Hence, you will implement a function that does the LINEAR forward step followed by an ACTIVATION forward step. **Exercise**: Implement the forward propagation of the *LINEAR->ACTIVATION* layer. Mathematical relation is: $A^{[l]} = g(Z^{[l]}) = g(W^{[l]}A^{[l-1]} +b^{[l]})$ where the activation "g" can be sigmoid() or relu(). Use linear_forward() and the correct activation function. ``` # GRADED FUNCTION: linear_activation_forward def linear_activation_forward(A_prev, W, b, activation): """ Implement the forward propagation for the LINEAR->ACTIVATION layer Arguments: A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples) W -- weights matrix: numpy array of shape (size of current layer, size of previous layer) b -- bias vector, numpy array of shape (size of the current layer, 1) activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu" Returns: A -- the output of the activation function, also called the post-activation value cache -- a python tuple containing "linear_cache" and "activation_cache"; stored for computing the backward pass efficiently """ if activation == "sigmoid": # Inputs: "A_prev, W, b". Outputs: "A, activation_cache". ### START CODE HERE ### (≈ 2 lines of code) Z, linear_cache = linear_forward(A_prev, W, b) A, activation_cache = sigmoid(Z) ### END CODE HERE ### elif activation == "relu": # Inputs: "A_prev, W, b". Outputs: "A, activation_cache". ### START CODE HERE ### (≈ 2 lines of code) Z, linear_cache = linear_forward(A_prev, W, b) A, activation_cache = relu(Z) ### END CODE HERE ### assert (A.shape == (W.shape[0], A_prev.shape[1])) cache = (linear_cache, activation_cache) return A, cache A_prev, W, b = linear_activation_forward_test_case() A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "sigmoid") print("With sigmoid: A = " + str(A)) A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "relu") print("With ReLU: A = " + str(A)) ``` **Expected output**: <table style="width:35%"> <tr> <td> **With sigmoid: A ** </td> <td > [[ 0.96890023 0.11013289]]</td> </tr> <tr> <td> **With ReLU: A ** </td> <td > [[ 3.43896131 0. ]]</td> </tr> </table> **Note**: In deep learning, the "[LINEAR->ACTIVATION]" computation is counted as a single layer in the neural network, not two layers. ### d) L-Layer Model For even more convenience when implementing the $L$-layer Neural Net, you will need a function that replicates the previous one (`linear_activation_forward` with RELU) $L-1$ times, then follows that with one `linear_activation_forward` with SIGMOID. <img src="images/model_architecture_kiank.png" style="width:600px;height:300px;"> <caption><center> **Figure 2** : *[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model</center></caption><br> **Exercise**: Implement the forward propagation of the above model. **Instruction**: In the code below, the variable `AL` will denote $A^{[L]} = \sigma(Z^{[L]}) = \sigma(W^{[L]} A^{[L-1]} + b^{[L]})$. (This is sometimes also called `Yhat`, i.e., this is $\hat{Y}$.) **Tips**: - Use the functions you had previously written - Use a for loop to replicate [LINEAR->RELU] (L-1) times - Don't forget to keep track of the caches in the "caches" list. To add a new value `c` to a `list`, you can use `list.append(c)`. ``` # GRADED FUNCTION: L_model_forward def L_model_forward(X, parameters): """ Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation Arguments: X -- data, numpy array of shape (input size, number of examples) parameters -- output of initialize_parameters_deep() Returns: AL -- last post-activation value caches -- list of caches containing: every cache of linear_activation_forward() (there are L-1 of them, indexed from 0 to L-1) """ caches = [] A = X L = len(parameters) // 2 # number of layers in the neural network # Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list. for l in range(1, L): A_prev = A ### START CODE HERE ### (≈ 2 lines of code) W = parameters['W' + str(l)] b = parameters['b' + str(l)] A, cache = linear_activation_forward(A_prev, W, b, activation='relu') caches.append(cache) ### END CODE HERE ### # Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list. ### START CODE HERE ### (≈ 2 lines of code) W = parameters['W' + str(L)] b = parameters['b' + str(L)] AL, cache = linear_activation_forward(A, W, b, activation='sigmoid') caches.append(cache) ### END CODE HERE ### assert(AL.shape == (1,X.shape[1])) return AL, caches X, parameters = L_model_forward_test_case_2hidden() AL, caches = L_model_forward(X, parameters) print("AL = " + str(AL)) print("Length of caches list = " + str(len(caches))) ``` <table style="width:50%"> <tr> <td> **AL** </td> <td > [[ 0.03921668 0.70498921 0.19734387 0.04728177]]</td> </tr> <tr> <td> **Length of caches list ** </td> <td > 3 </td> </tr> </table> Great! Now you have a full forward propagation that takes the input X and outputs a row vector $A^{[L]}$ containing your predictions. It also records all intermediate values in "caches". Using $A^{[L]}$, you can compute the cost of your predictions. ## 5 - Cost function Now you will implement forward and backward propagation. You need to compute the cost, because you want to check if your model is actually learning. **Exercise**: Compute the cross-entropy cost $J$, using the following formula: $$-\frac{1}{m} \sum\limits_{i = 1}^{m} (y^{(i)}\log\left(a^{[L] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right)) \tag{7}$$ ``` # GRADED FUNCTION: compute_cost def compute_cost(AL, Y): """ Implement the cost function defined by equation (7). Arguments: AL -- probability vector corresponding to your label predictions, shape (1, number of examples) Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples) Returns: cost -- cross-entropy cost """ m = Y.shape[1] # Compute loss from aL and y. ### START CODE HERE ### (≈ 1 lines of code) cost = - (Y * np.log(AL) + (1 - Y) * np.log(1 - AL)).sum(axis=1, keepdims=True) / m ### END CODE HERE ### cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17). assert(cost.shape == ()) return cost Y, AL = compute_cost_test_case() print("cost = " + str(compute_cost(AL, Y))) ``` **Expected Output**: <table> <tr> <td>**cost** </td> <td> 0.2797765635793422</td> </tr> </table> ## 6 - Backward propagation module Just like with forward propagation, you will implement helper functions for backpropagation. Remember that back propagation is used to calculate the gradient of the loss function with respect to the parameters. **Reminder**: <img src="images/backprop_kiank.png" style="width:650px;height:250px;"> <caption><center> **Figure 3** : Forward and Backward propagation for *LINEAR->RELU->LINEAR->SIGMOID* <br> *The purple blocks represent the forward propagation, and the red blocks represent the backward propagation.* </center></caption> <!-- For those of you who are expert in calculus (you don't need to be to do this assignment), the chain rule of calculus can be used to derive the derivative of the loss $\mathcal{L}$ with respect to $z^{[1]}$ in a 2-layer network as follows: $$\frac{d \mathcal{L}(a^{[2]},y)}{{dz^{[1]}}} = \frac{d\mathcal{L}(a^{[2]},y)}{{da^{[2]}}}\frac{{da^{[2]}}}{{dz^{[2]}}}\frac{{dz^{[2]}}}{{da^{[1]}}}\frac{{da^{[1]}}}{{dz^{[1]}}} \tag{8} $$ In order to calculate the gradient $dW^{[1]} = \frac{\partial L}{\partial W^{[1]}}$, you use the previous chain rule and you do $dW^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial W^{[1]}}$. During the backpropagation, at each step you multiply your current gradient by the gradient corresponding to the specific layer to get the gradient you wanted. Equivalently, in order to calculate the gradient $db^{[1]} = \frac{\partial L}{\partial b^{[1]}}$, you use the previous chain rule and you do $db^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial b^{[1]}}$. This is why we talk about **backpropagation**. !--> Now, similar to forward propagation, you are going to build the backward propagation in three steps: - LINEAR backward - LINEAR -> ACTIVATION backward where ACTIVATION computes the derivative of either the ReLU or sigmoid activation - [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID backward (whole model) ### 6.1 - Linear backward For layer $l$, the linear part is: $Z^{[l]} = W^{[l]} A^{[l-1]} + b^{[l]}$ (followed by an activation). Suppose you have already calculated the derivative $dZ^{[l]} = \frac{\partial \mathcal{L} }{\partial Z^{[l]}}$. You want to get $(dW^{[l]}, db^{[l]}, dA^{[l-1]})$. <img src="images/linearback_kiank.png" style="width:250px;height:300px;"> <caption><center> **Figure 4** </center></caption> The three outputs $(dW^{[l]}, db^{[l]}, dA^{[l-1]})$ are computed using the input $dZ^{[l]}$.Here are the formulas you need: $$ dW^{[l]} = \frac{\partial \mathcal{J} }{\partial W^{[l]}} = \frac{1}{m} dZ^{[l]} A^{[l-1] T} \tag{8}$$ $$ db^{[l]} = \frac{\partial \mathcal{J} }{\partial b^{[l]}} = \frac{1}{m} \sum_{i = 1}^{m} dZ^{[l](i)}\tag{9}$$ $$ dA^{[l-1]} = \frac{\partial \mathcal{L} }{\partial A^{[l-1]}} = W^{[l] T} dZ^{[l]} \tag{10}$$ **Exercise**: Use the 3 formulas above to implement linear_backward(). ``` # GRADED FUNCTION: linear_backward def linear_backward(dZ, cache): """ Implement the linear portion of backward propagation for a single layer (layer l) Arguments: dZ -- Gradient of the cost with respect to the linear output (of current layer l) cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer Returns: dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev dW -- Gradient of the cost with respect to W (current layer l), same shape as W db -- Gradient of the cost with respect to b (current layer l), same shape as b """ A_prev, W, b = cache m = A_prev.shape[1] ### START CODE HERE ### (≈ 3 lines of code) dW = dZ.dot(A_prev.T) / m db = dZ.sum(axis=1, keepdims=True) / m dA_prev = W.T.dot(dZ) ### END CODE HERE ### assert (dA_prev.shape == A_prev.shape) assert (dW.shape == W.shape) assert (db.shape == b.shape) return dA_prev, dW, db # Set up some test inputs dZ, linear_cache = linear_backward_test_case() dA_prev, dW, db = linear_backward(dZ, linear_cache) print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db)) ``` ** Expected Output**: ``` dA_prev = [[-1.15171336 0.06718465 -0.3204696 2.09812712] [ 0.60345879 -3.72508701 5.81700741 -3.84326836] [-0.4319552 -1.30987417 1.72354705 0.05070578] [-0.38981415 0.60811244 -1.25938424 1.47191593] [-2.52214926 2.67882552 -0.67947465 1.48119548]] dW = [[ 0.07313866 -0.0976715 -0.87585828 0.73763362 0.00785716] [ 0.85508818 0.37530413 -0.59912655 0.71278189 -0.58931808] [ 0.97913304 -0.24376494 -0.08839671 0.55151192 -0.10290907]] db = [[-0.14713786] [-0.11313155] [-0.13209101]] ``` ### 6.2 - Linear-Activation backward Next, you will create a function that merges the two helper functions: **`linear_backward`** and the backward step for the activation **`linear_activation_backward`**. To help you implement `linear_activation_backward`, we provided two backward functions: - **`sigmoid_backward`**: Implements the backward propagation for SIGMOID unit. You can call it as follows: ```python dZ = sigmoid_backward(dA, activation_cache) ``` - **`relu_backward`**: Implements the backward propagation for RELU unit. You can call it as follows: ```python dZ = relu_backward(dA, activation_cache) ``` If $g(.)$ is the activation function, `sigmoid_backward` and `relu_backward` compute $$dZ^{[l]} = dA^{[l]} * g'(Z^{[l]}) \tag{11}$$. **Exercise**: Implement the backpropagation for the *LINEAR->ACTIVATION* layer. ``` # GRADED FUNCTION: linear_activation_backward def linear_activation_backward(dA, cache, activation): """ Implement the backward propagation for the LINEAR->ACTIVATION layer. Arguments: dA -- post-activation gradient for current layer l cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu" Returns: dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev dW -- Gradient of the cost with respect to W (current layer l), same shape as W db -- Gradient of the cost with respect to b (current layer l), same shape as b """ linear_cache, activation_cache = cache if activation == "relu": ### START CODE HERE ### (≈ 2 lines of code) dZ = relu_backward(dA, activation_cache) dA_prev, dW, db = linear_backward(dZ, linear_cache) ### END CODE HERE ### elif activation == "sigmoid": ### START CODE HERE ### (≈ 2 lines of code) dZ = sigmoid_backward(dA, activation_cache) dA_prev, dW, db = linear_backward(dZ, linear_cache) ### END CODE HERE ### return dA_prev, dW, db dAL, linear_activation_cache = linear_activation_backward_test_case() dA_prev, dW, db = linear_activation_backward(dAL, linear_activation_cache, activation = "sigmoid") print ("sigmoid:") print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db) + "\n") dA_prev, dW, db = linear_activation_backward(dAL, linear_activation_cache, activation = "relu") print ("relu:") print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db)) ``` **Expected output with sigmoid:** <table style="width:100%"> <tr> <td > dA_prev </td> <td >[[ 0.11017994 0.01105339] [ 0.09466817 0.00949723] [-0.05743092 -0.00576154]] </td> </tr> <tr> <td > dW </td> <td > [[ 0.10266786 0.09778551 -0.01968084]] </td> </tr> <tr> <td > db </td> <td > [[-0.05729622]] </td> </tr> </table> **Expected output with relu:** <table style="width:100%"> <tr> <td > dA_prev </td> <td > [[ 0.44090989 0. ] [ 0.37883606 0. ] [-0.2298228 0. ]] </td> </tr> <tr> <td > dW </td> <td > [[ 0.44513824 0.37371418 -0.10478989]] </td> </tr> <tr> <td > db </td> <td > [[-0.20837892]] </td> </tr> </table> ### 6.3 - L-Model Backward Now you will implement the backward function for the whole network. Recall that when you implemented the `L_model_forward` function, at each iteration, you stored a cache which contains (X,W,b, and z). In the back propagation module, you will use those variables to compute the gradients. Therefore, in the `L_model_backward` function, you will iterate through all the hidden layers backward, starting from layer $L$. On each step, you will use the cached values for layer $l$ to backpropagate through layer $l$. Figure 5 below shows the backward pass. <img src="images/mn_backward.png" style="width:450px;height:300px;"> <caption><center> **Figure 5** : Backward pass </center></caption> ** Initializing backpropagation**: To backpropagate through this network, we know that the output is, $A^{[L]} = \sigma(Z^{[L]})$. Your code thus needs to compute `dAL` $= \frac{\partial \mathcal{L}}{\partial A^{[L]}}$. To do so, use this formula (derived using calculus which you don't need in-depth knowledge of): ```python dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL ``` You can then use this post-activation gradient `dAL` to keep going backward. As seen in Figure 5, you can now feed in `dAL` into the LINEAR->SIGMOID backward function you implemented (which will use the cached values stored by the L_model_forward function). After that, you will have to use a `for` loop to iterate through all the other layers using the LINEAR->RELU backward function. You should store each dA, dW, and db in the grads dictionary. To do so, use this formula : $$grads["dW" + str(l)] = dW^{[l]}\tag{15} $$ For example, for $l=3$ this would store $dW^{[l]}$ in `grads["dW3"]`. **Exercise**: Implement backpropagation for the *[LINEAR->RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model. ``` # GRADED FUNCTION: L_model_backward def L_model_backward(AL, Y, caches): """ Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group Arguments: AL -- probability vector, output of the forward propagation (L_model_forward()) Y -- true "label" vector (containing 0 if non-cat, 1 if cat) caches -- list of caches containing: every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2) the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1]) Returns: grads -- A dictionary with the gradients grads["dA" + str(l)] = ... grads["dW" + str(l)] = ... grads["db" + str(l)] = ... """ grads = {} L = len(caches) # the number of layers m = AL.shape[1] Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL # Initializing the backpropagation ### START CODE HERE ### (1 line of code) dAL = - (np.divide(Y, AL) - np.divide(1-Y, 1-AL)) ### END CODE HERE ### # Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "dAL, current_cache". Outputs: "grads["dAL-1"], grads["dWL"], grads["dbL"] ### START CODE HERE ### (approx. 2 lines) current_cache = caches[-1] grads["dA" + str(L-1)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, "sigmoid") ### END CODE HERE ### # Loop from l=L-2 to l=0 for l in reversed(range(L-1)): # lth layer: (RELU -> LINEAR) gradients. # Inputs: "grads["dA" + str(l + 1)], current_cache". Outputs: "grads["dA" + str(l)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)] ### START CODE HERE ### (approx. 5 lines) current_cache = caches[l] dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l+1)], current_cache, "relu") grads["dA" + str(l)] = dA_prev_temp grads["dW" + str(l + 1)] = dW_temp grads["db" + str(l + 1)] = db_temp ### END CODE HERE ### return grads AL, Y_assess, caches = L_model_backward_test_case() grads = L_model_backward(AL, Y_assess, caches) print_grads(grads) ``` **Expected Output** <table style="width:60%"> <tr> <td > dW1 </td> <td > [[ 0.41010002 0.07807203 0.13798444 0.10502167] [ 0. 0. 0. 0. ] [ 0.05283652 0.01005865 0.01777766 0.0135308 ]] </td> </tr> <tr> <td > db1 </td> <td > [[-0.22007063] [ 0. ] [-0.02835349]] </td> </tr> <tr> <td > dA1 </td> <td > [[ 0.12913162 -0.44014127] [-0.14175655 0.48317296] [ 0.01663708 -0.05670698]] </td> </tr> </table> ### 6.4 - Update Parameters In this section you will update the parameters of the model, using gradient descent: $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{16}$$ $$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{17}$$ where $\alpha$ is the learning rate. After computing the updated parameters, store them in the parameters dictionary. **Exercise**: Implement `update_parameters()` to update your parameters using gradient descent. **Instructions**: Update parameters using gradient descent on every $W^{[l]}$ and $b^{[l]}$ for $l = 1, 2, ..., L$. ``` # GRADED FUNCTION: update_parameters def update_parameters(parameters, grads, learning_rate): """ Update parameters using gradient descent Arguments: parameters -- python dictionary containing your parameters grads -- python dictionary containing your gradients, output of L_model_backward Returns: parameters -- python dictionary containing your updated parameters parameters["W" + str(l)] = ... parameters["b" + str(l)] = ... """ L = len(parameters) // 2 # number of layers in the neural network # Update rule for each parameter. Use a for loop. ### START CODE HERE ### (≈ 3 lines of code) for l in range(L): parameters["W" + str(l+1)] -= learning_rate * grads['dW' + str(l + 1)] parameters["b" + str(l+1)] -= learning_rate * grads['db' + str(l + 1)] ### END CODE HERE ### return parameters parameters, grads = update_parameters_test_case() parameters = update_parameters(parameters, grads, 0.1) print ("W1 = "+ str(parameters["W1"])) print ("b1 = "+ str(parameters["b1"])) print ("W2 = "+ str(parameters["W2"])) print ("b2 = "+ str(parameters["b2"])) ``` **Expected Output**: <table style="width:100%"> <tr> <td > W1 </td> <td > [[-0.59562069 -0.09991781 -2.14584584 1.82662008] [-1.76569676 -0.80627147 0.51115557 -1.18258802] [-1.0535704 -0.86128581 0.68284052 2.20374577]] </td> </tr> <tr> <td > b1 </td> <td > [[-0.04659241] [-1.28888275] [ 0.53405496]] </td> </tr> <tr> <td > W2 </td> <td > [[-0.55569196 0.0354055 1.32964895]]</td> </tr> <tr> <td > b2 </td> <td > [[-0.84610769]] </td> </tr> </table> ## 7 - Conclusion Congrats on implementing all the functions required for building a deep neural network! We know it was a long assignment but going forward it will only get better. The next part of the assignment is easier. In the next assignment you will put all these together to build two models: - A two-layer neural network - An L-layer neural network You will in fact use these models to classify cat vs non-cat images!
github_jupyter
# The Matplotlib Jupyter Widget Backend Enabling interaction with matplotlib charts in the Jupyter notebook and JupyterLab https://github.com/matplotlib/jupyter-matplotlib ``` # Enabling the `widget` backend. # This requires jupyter-matplotlib a.k.a. ipympl. # ipympl can be install via pip or conda. %matplotlib widget import matplotlib.pyplot as plt import numpy as np # Testing matplotlib interactions with a simple plot fig = plt.figure() plt.plot(np.sin(np.linspace(0, 20, 100))); fig.canvas.toolbar_visible = False fig.canvas.header_visible = False fig.canvas.footer_visible = False fig.canvas.resizable = False fig.canvas ``` # 3D plotting ``` from mpl_toolkits.mplot3d import axes3d fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # Grab some test data. X, Y, Z = axes3d.get_test_data(0.05) # Plot a basic wireframe. ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10) plt.show() ``` # Subplots ``` # A more complex example from the matplotlib gallery np.random.seed(0) n_bins = 10 x = np.random.randn(1000, 3) fig, axes = plt.subplots(nrows=2, ncols=2) ax0, ax1, ax2, ax3 = axes.flatten() colors = ['red', 'tan', 'lime'] ax0.hist(x, n_bins, density=1, histtype='bar', color=colors, label=colors) ax0.legend(prop={'size': 10}) ax0.set_title('bars with legend') ax1.hist(x, n_bins, density=1, histtype='bar', stacked=True) ax1.set_title('stacked bar') ax2.hist(x, n_bins, histtype='step', stacked=True, fill=False) ax2.set_title('stack step (unfilled)') # Make a multiple-histogram of data-sets with different length. x_multi = [np.random.randn(n) for n in [10000, 5000, 2000]] ax3.hist(x_multi, n_bins, histtype='bar') ax3.set_title('different sample sizes') fig.tight_layout() plt.show() fig.canvas.toolbar_position = 'right' fig.canvas.toolbar_visible = False ``` # Interactions with other widgets and layouting ``` # When using the `widget` backend from ipympl, # fig.canvas is a proper Jupyter interactive widget, which can be embedded in # an ipywidgets layout. See https://ipywidgets.readthedocs.io/en/stable/examples/Layout%20Templates.html # One can bound figure attributes to other widget values. from ipywidgets import AppLayout, FloatSlider plt.ioff() slider = FloatSlider( orientation='horizontal', description='Factor:', value=1.0, min=0.02, max=2.0 ) slider.layout.margin = '0px 30% 0px 30%' slider.layout.width = '40%' fig = plt.figure() fig.canvas.header_visible = False fig.canvas.layout.min_height = '400px' plt.title('Plotting: y=sin({} * x)'.format(slider.value)) x = np.linspace(0, 20, 500) lines = plt.plot(x, np.sin(slider.value * x)) def update_lines(change): plt.title('Plotting: y=sin({} * x)'.format(change.new)) lines[0].set_data(x, np.sin(change.new * x)) fig.canvas.draw() fig.canvas.flush_events() slider.observe(update_lines, names='value') AppLayout( center=fig.canvas, footer=slider, pane_heights=[0, 6, 1] ) ```
github_jupyter
``` #hide %load_ext autoreload %autoreload 2 from fastai.text.all import * from reformer_fastai.all import * from timeit import timeit #hide #skip torch.cuda.set_device(0) #hide #all_slow ``` # LSH evaluation speed We want to test the speed of during evaluation in seconds per step, as reported in the right part of table 5 of the paper: https://arxiv.org/pdf/2001.04451.pdf ![image.png](images/table-lsh-speed.png) ## get data Helper method to get data. Assume 1 step of training and 10 of validation. ``` def get_dataloaders(bs=32, sl=1024, train_steps=1, valid_steps=10, seed=123): train_sz, valid_sz = bs*train_steps, bs*valid_steps dls = DataLoaders.from_dsets(DeterministicTwinSequence(sl, train_sz, seed=seed), DeterministicTwinSequence(sl, valid_sz, seed=seed), bs=bs, shuffle=False, device='cuda') return dls ``` ## get model Helper method to get `LSHLM` method. If `n_hashes=0` full attention is used. ``` def get_lshlm(n_hashes=1, sl=1024, use_lsh=True): if n_hashes==0: use_lsh=False return LSHLM(vocab_sz=128, d_model=256, n_layers=1, n_heads=4, max_seq_len=sl,bucket_size=64, n_hashes=n_hashes, causal=True, use_lsh=use_lsh) ``` ## train Get a learner that is trained for 1 epoch (just in case). ``` def get_learner(dls, model, n_epochs=1, lr=1e-3): learn = Learner(dls, model, opt_func=adafactor, loss_func=CrossEntropyLossFlat(ignore_index=-100), metrics=MaskedAccuracy(), cbs=[MaskTargCallback()]).to_fp16() with learn.no_bar(): with learn.no_logging(): learn.fit(n_epochs, lr) return learn ``` ## time evaluation ``` 'function to get average time per step of validation' def time_eval(learn,dls, n_rounds=10): with learn.no_bar(): t = timeit(learn.validate, number=n_rounds) steps = dls.valid.n / dls.valid.bs return t / n_rounds / steps ``` # Loop experiment setup ``` n_lsh=[0, 1,2,4,8] sls =[1024, 2048, 4096, 8192, 16384, 32768] bss =[32, 16, 8, 4, 2, 1] train_steps, valid_steps = 1,10 cols = ['sl', 'bs', 'n-lsh', 'time'] results = [] for sl, bs in zip(sls, bss): for n_hashes in n_lsh: if n_hashes==0 and sl>8192: results.append((sl, bs, n_hashes, np.nan)) # won't fit in memory else: dls = get_dataloaders(bs=bs, sl=sl, train_steps=train_steps, valid_steps=valid_steps) model = get_lshlm(n_hashes=n_hashes, sl=sl) learn = get_learner(dls, model) t = time_eval(learn, dls) del(learn, model, dls) torch.cuda.empty_cache() results.append((sl, bs, n_hashes, t)) df = pd.DataFrame(results, columns=cols) df.head() df.to_csv('lsh-timing.csv') def get_label(nh): return f'lsh-{nh}' if nh>0 else 'full attention' def get_linestyle(nh): return '--' if nh == 0 else '-' fig, ax = plt.subplots(figsize=(8,5)) for nh, c in zip(n_lsh, ['k','r', 'b', 'g', 'y']): dat = df.loc[df['n-lsh']==nh] ax.plot(dat['sl'], dat['time'], color=c, label=get_label(nh), linestyle=get_linestyle(nh)) ax.set_yscale('log') ax.set_xscale('log', basex=2) ax.set_xlabel('sequence length / batch') ax.set_yticks([0.1, 1]) ax.set_xticks(sls) ax.set_xticklabels(f'{sl}/{bs}' for sl, bs in zip(sls, bss)) ax.legend(loc='upper left') ax.set_ylabel('seconds / step'); ``` ![image.png](images/table-lsh-speed.png) We were unable to to do the full sequence length for full attention due to out of memory errors on a single gpu. The results for the smaller sequences are mostly matching, except for lsh-8 which in our experiments turn out slower than in the paper. Also, our full attention seems to be a bit faster. In general results looks offset by a constant. This could be due to method of measurement or architecture choices.
github_jupyter
This example creates a fake in-memory particle dataset and then loads it as a yt dataset using the `load_particles` function. Our "fake" dataset will be numpy arrays filled with normally distributed randoml particle positions and uniform particle masses. Since real data is often scaled, I arbitrarily multiply by 1e6 to show how to deal with scaled data. ``` import numpy as np n_particles = 5000000 ppx, ppy, ppz = 1e6 * np.random.normal(size=[3, n_particles]) ppm = np.ones(n_particles) ``` The `load_particles` function accepts a dictionary populated with particle data fields loaded in memory as numpy arrays or python lists: ``` data = { "particle_position_x": ppx, "particle_position_y": ppy, "particle_position_z": ppz, "particle_mass": ppm, } ``` To hook up with yt's internal field system, the dictionary keys must be 'particle_position_x', 'particle_position_y', 'particle_position_z', and 'particle_mass', as well as any other particle field provided by one of the particle frontends. The `load_particles` function transforms the `data` dictionary into an in-memory yt `Dataset` object, providing an interface for further analysis with yt. The example below illustrates how to load the data dictionary we created above. ``` import yt from yt.units import Msun, parsec bbox = 1.1 * np.array( [[min(ppx), max(ppx)], [min(ppy), max(ppy)], [min(ppz), max(ppz)]] ) ds = yt.load_particles(data, length_unit=parsec, mass_unit=1e8 * Msun, bbox=bbox) ``` The `length_unit` and `mass_unit` are the conversion from the units used in the `data` dictionary to CGS. I've arbitrarily chosen one parsec and 10^8 Msun for this example. The `n_ref` parameter controls how many particle it takes to accumulate in an oct-tree cell to trigger refinement. Larger `n_ref` will decrease poisson noise at the cost of resolution in the octree. Finally, the `bbox` parameter is a bounding box in the units of the dataset that contains all of the particles. This is used to set the size of the base octree block. This new dataset acts like any other yt `Dataset` object, and can be used to create data objects and query for yt fields. This example shows how to access "deposit" fields: ``` ad = ds.all_data() # This is generated with "cloud-in-cell" interpolation. cic_density = ad["deposit", "all_cic"] # These three are based on nearest-neighbor cell deposition nn_density = ad["deposit", "all_density"] nn_deposited_mass = ad["deposit", "all_mass"] particle_count_per_cell = ad["deposit", "all_count"] ds.field_list ds.derived_field_list slc = yt.SlicePlot(ds, 2, ("deposit", "all_cic")) slc.set_width((8, "Mpc")) ``` Finally, one can specify multiple particle types in the `data` directory by setting the field names to be field tuples (the default field type for particles is `"io"`) if one is not specified: ``` n_star_particles = 1000000 n_dm_particles = 2000000 ppxd, ppyd, ppzd = 1e6 * np.random.normal(size=[3, n_dm_particles]) ppmd = np.ones(n_dm_particles) ppxs, ppys, ppzs = 5e5 * np.random.normal(size=[3, n_star_particles]) ppms = 0.1 * np.ones(n_star_particles) data2 = { ("dm", "particle_position_x"): ppxd, ("dm", "particle_position_y"): ppyd, ("dm", "particle_position_z"): ppzd, ("dm", "particle_mass"): ppmd, ("star", "particle_position_x"): ppxs, ("star", "particle_position_y"): ppys, ("star", "particle_position_z"): ppzs, ("star", "particle_mass"): ppms, } ds2 = yt.load_particles( data2, length_unit=parsec, mass_unit=1e8 * Msun, n_ref=256, bbox=bbox ) ``` We now have separate `"dm"` and `"star"` particles, as well as their deposited fields: ``` slc = yt.SlicePlot(ds2, 2, [("deposit", "dm_cic"), ("deposit", "star_cic")]) slc.set_width((8, "Mpc")) ```
github_jupyter
#### Nota: Estos ejemplos están indicados para hacerse en scripts de código Python, no en Jupyter ## Conexión a la base de datos, creación y desconexión ``` # Importamos el módulo import sqlite3 # Nos conectamos a la base de datos ejemplo.db (la crea si no existe) conexion = sqlite3.connect('ejemplo.db') # Cerramos la conexión, si no la cerramos se mantendrá en uso y no podremos gestionar el fichero conexion.close() ``` ## Creación de una tabla utilizando sintaxis SQL Antes de ejecutar una consulta (query) en código SQL, tenemos que crear un cursor. **Una vez creada la tabla, si intentamos volver a crearla dará error indicándonos que esta ya existe.** ``` import sqlite3 conexion = sqlite3.connect('ejemplo.db') # Creamos el cursor cursor = conexion.cursor() # Ahora crearemos una tabla de usuarios para almacenar nombres, edades y emails cursor.execute("CREATE TABLE usuarios (nombre VARCHAR(100), edad INTEGER, email VARCHAR(100))") # Guardamos los cambios haciendo un commit conexion.commit() conexion.close() ``` ## Insertando un registro ``` import sqlite3 conexion = sqlite3.connect('ejemplo.db') cursor = conexion.cursor() # Insertamos un registro en la tabla de usuarios cursor.execute("INSERT INTO usuarios VALUES ('Hector', 27, 'hector@ejemplo.com')") # Guardamos los cambios haciendo un commit conexion.commit() conexion.close() ``` ## Recuperando el primer registro con .fetchone() ``` import sqlite3 conexion = sqlite3.connect('ejemplo.db') cursor = conexion.cursor() # Recuperamos los registros de la tabla de usuarios cursor.execute("SELECT * FROM usuarios") # Mostrar el cursos a ver que hay ? print(cursor) # Recorremos el primer registro con el método fetchone, devuelve una tupla usuario = cursor.fetchone() print(usuario) conexion.close() ``` ## Insertando varios registros con .executemany() ``` import sqlite3 conexion = sqlite3.connect('ejemplo.db') cursor = conexion.cursor() # Creamos una lista con varios usuarios usuarios = [('Mario', 51, 'mario@ejemplo.com'), ('Mercedes', 38, 'mercedes@ejemplo.com'), ('Juan', 19, 'juan@ejemplo.com'), ] # Ahora utilizamos el método executemany() para insertar varios cursor.executemany("INSERT INTO usuarios VALUES (?,?,?)", usuarios) # Guardamos los cambios haciendo un commit conexion.commit() conexion.close() ``` ## Recuperando varios registros con .fetchall() ``` import sqlite3 conexion = sqlite3.connect('ejemplo.db') cursor = conexion.cursor() # Recuperamos los registros de la tabla de usuarios cursor.execute("SELECT * FROM usuarios") # Recorremos todos los registros con fetchall, y los volvamos en una lista de usuarios usuarios = cursor.fetchall() # Ahora podemos recorrer todos los usuarios for usuario in usuarios: print(usuario) conexion.close() ``` ## Utilizando DB Browser En esta práctica vamos a analizar el contenido de nuestra base de datos utilizando un programa externo. http://sqlitebrowser.org/
github_jupyter
# Exercise 3 Check if regularization may improve the performances by varying the parameter lambda (as usual in magnitude: 0, 10-5, 10-4, 10-3, etc.) of the L1 (LASSO) or of the L2 (Ridge) regularization; see lambda in eqs.(43) and (52) in the review. There is also a mixed version (l1_l2) that can be tried.3.1 Are performances of the CNN are optimized at some intermediate value of lambda?3.2 Is there any improvement in the visualization and understanding of the weights in thefilters?Note that the regularization we introduced acts on the w’s, not on the biases. One can alsotry the equivalent procedure for biases or for the output of the relu units (see Keras doc.), ifthere is any reason for suspecting that it may help. In our case, the logic was to let theweights of the filters go to zero if not needed, hence that kind of regularization was selected. ``` import numpy as np import matplotlib.pyplot as plt plt.rcParams['font.size'] = 14 import keras import tensorflow as tf from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten #, Reshape from keras.layers import Conv1D, MaxPooling1D, AveragePooling1D import pandas as pd %run plot.py ``` ### Read data ``` str0 = 'ts_L60_Z12_A500_DX50_bias5_N10000.dat' fnamex='DATA/x_'+str0 fnamey='DATA/y_'+str0 x = np.loadtxt(fnamex, delimiter=" ",dtype=float) N,L = len(x), len(x[0]) Show_data(x,L,"original data") categ = np.loadtxt(fnamey, dtype=int) n_class = 3 # y.argmax() - y.argmin() +1 print('data: ',N) y = np.zeros((N,n_class)) for i in range(N): y[i][categ[i]] = 1. print(N, L) ``` ### Rescale data, split train/val. ``` # # FIRST PASSAGE: DO NOT DO THIS --> FAILURE # #remove average value of each sample from its values xm = x.mean(axis=1) for i in range(N): x[i] = x[i]-xm[i] # # SECOND PASSAGE: DO NOT DO THIS --> ALSO FAILURE # #rescale (crude version, variance should be used) x = x/400 Show_data(x,L,"rescaled data") perc_train=0.8 N_train = int(perc_train*N) x_train = x[:N_train] y_train = y[:N_train] x_val = x[N_train:] y_val = y[N_train:] N_val = len(x_val) print('N_train=',N_train,' N_val=',N_val,' L=',L,' n_class=',n_class) #x_train=x_train.astype("float32") #y_train=y_train.astype("float32") #x_val=x_val.astype("float32") #y_val=y_val.astype("float32") # Keras wants an additional dimension with a 1 at the end x_train = x_train.reshape(x_train.shape[0], L, 1) x_val = x_val.reshape(x_val.shape[0], L, 1) input_shape = (L, 1) ``` ## Convolutional NN ``` from keras import initializers, regularizers from sklearn.model_selection import GridSearchCV from keras.wrappers.scikit_learn import KerasClassifier def create_CNN(n_class, reg, lam, fil, k_size, Poolsize, bias_reg, act_reg): ini = initializers.RandomNormal(mean = 0, stddev = 0.05) model = Sequential() if bias_reg == True: model.add(Conv1D(filters = fil, kernel_size = k_size[0], kernel_regularizer = reg(lam), kernel_initializer=ini, bias_regularizer = reg(lam), bias_initializer = ini, activation = "relu", input_shape = input_shape )) elif act_reg == True: model.add(Conv1D(filters = fil, kernel_size = k_size[0], kernel_regularizer = reg(lam), kernel_initializer=ini, activity_regularizer = reg(lam), activation = "relu", input_shape = input_shape )) else: model.add(Conv1D(filters = fil, kernel_size = k_size[0], kernel_regularizer = reg(lam), kernel_initializer=ini, activation = "relu", input_shape = input_shape )) model.add(MaxPooling1D(Poolsize)) #model.add(AveragePooling1D(Poolsize)) model.add(Conv1D(filters = fil, kernel_size = k_size[1], activation = "relu")) #model.add(MaxPooling1D(Poolsize)) model.add(Flatten()) model.add(Dense(12, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(n_class, activation='softmax')) # softmax ! return model def compile_model(n_class = 3, reg = regularizers.l2, lam = 0.001, fil = 5, k_size = [11, 7], Poolsize = 5, bias_reg = False, act_reg = False): # create the mode model=create_CNN(n_class, reg, lam, fil, k_size, Poolsize, bias_reg, act_reg) # compile the model model.compile(loss=keras.losses.categorical_crossentropy, optimizer = "adam", metrics = ["accuracy"]) return model # ...LESSON ... # reproducibility import tensorflow.random as tf_r tf_r.set_seed(12345) NCONV = 1 NF = 5 ''' model_gridsearch = KerasClassifier(build_fn=compile_model) # define parameter dictionary reg = [regularizers.l1, regularizers.l2, regularizers.l1_l2] lam = [0.1,0.01,0.001,0.0001] param_grid = dict(reg = reg , lam = lam) # call scikit grid search module grid = GridSearchCV(estimator=model_gridsearch, param_grid=param_grid, n_jobs=1, cv=4) grid_result = grid.fit(x_train,y_train, epochs=250, batch_size = 250, shuffle = True, verbose = 0)''' ''' results = pd.DataFrame.from_dict(grid_result.cv_results_) results.to_csv('results_DATA/GridSearchResults_New.csv')''' results = pd.read_csv('results_DATA/GridSearchResults_New.csv') best_result = results[results["rank_test_score"] == 1] best_result reg_best = regularizers.l1_l2#best_result.param_reg.values[0] lam_best = float(best_result.param_lam.values[0]) print(reg_best, lam_best) BATCH_SIZE = 250 EPOCHS = 100 model = compile_model(reg=reg_best ,lam=lam_best) fit = model.fit(x_train,y_train,batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(x_val, y_val), verbose=2, shuffle=True) def Show_weights(model,l=0,label="model"): c=['r','y','c','b','m'] m=['o','s','D','<','>'] ms=10 w = model.layers[l].get_weights()[0] wT=w.T M=len(wT) b = model.layers[l].get_weights()[1] fig,AX=plt.subplots(1,2,figsize=(12,4.4)) ax=AX[0] ax.axhline(0, c="k") ax.plot((0,)) for i in range(M): ax.plot(wT[i][0],"-",c=c[i],marker=m[i],label=str(i),markersize=ms) ax.set_title(label+': filters of layer '+str(l)) ax.set_xlabel('index') ax=AX[1] ax.axhline(0, c="k") for i in range(M): ax.plot((i),(b[i]),c=c[i],marker=m[i],label="filter "+str(i),markersize=ms) ax.set_title(label+': bias of layer '+str(l)) ax.set_xlabel('filter nr') ax.set_xticks(np.arange(5)) ax.legend() plt.show() Show_weights(model,0) Show_weights(model,2) def Show_history(fit): fig,AX=plt.subplots(1,2,figsize=(12,5.)) ax=AX[0] ax.plot(fit.history['accuracy'],"b",label="train") ax.plot(fit.history['val_accuracy'],"r--",label="valid.") ax.plot((0,EPOCHS),(1/3,1/3),":",c="gray",label="random choice") ax.set_xlabel('epoch') ax.set_ylabel("Accuracy") ax.set_ylim([0, 1]) ax.legend() ax=AX[1] ax.plot(fit.history['loss'],"b",label="train") ax.plot(fit.history['val_loss'],"r--",label="valid.") ax.set_xlabel('epoch') ax.set_ylabel("Loss") ax.set_ylim([0, 1.05*np.max(fit.history['loss'])]) ax.legend() plt.show() Show_history(fit) Show_weights(model,0) Show_weights(model,2) import pandas as pd from scipy import stats import seaborn from sklearn import metrics from sklearn.metrics import classification_report from sklearn import preprocessing LABELS = ["absent","positive","negative"] cmap="GnBu" def show_confusion_matrix(validations, predictions, label="Model"): matrix = metrics.confusion_matrix(validations, predictions) plt.figure(figsize=(6, 5)) seaborn.heatmap(matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt='d', linecolor='white', linewidths=1, cmap=cmap) plt.title(label+': Confusion Matrix') plt.ylabel('True Label') plt.xlabel('Predicted Label') plt.show() y_pred_val = model.predict(x_val) # Take the class with the highest probability from the val predictions max_y_pred_val = np.argmax(y_pred_val, axis=1) max_y_val = np.argmax(y_val, axis=1) show_confusion_matrix(max_y_val, max_y_pred_val) #print(classification_report(max_y_val, max_y_pred_val)) import pandas as pd from scipy import stats import seaborn from sklearn import metrics from sklearn.metrics import classification_report from sklearn import preprocessing LABELS = ["absent","positive","negative"] cmap="GnBu" def show_confusion_matrix(validations, predictions, label="Model"): matrix = metrics.confusion_matrix(validations, predictions, normalize='true') plt.figure(figsize=(6, 5)) seaborn.heatmap(matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, #fmt='d', linecolor='white', linewidths=1, cmap=cmap) plt.title(label+': Confusion Matrix') plt.ylabel('True Label') plt.xlabel('Predicted Label') plt.show() y_pred_val = model.predict(x_val) # Take the class with the highest probability from the val predictions max_y_pred_val = np.argmax(y_pred_val, axis=1) max_y_val = np.argmax(y_val, axis=1) show_confusion_matrix(max_y_val, max_y_pred_val) #print(classification_report(max_y_val, max_y_pred_val)) ``` <a style='text-decoration:none;line-height:16px;display:flex;color:#5B5B62;padding:10px;justify-content:end;' href='https://deepnote.com?utm_source=created-in-deepnote-cell&projectId=1cb9264e-65a5-431d-a980-16667908489e' target="_blank"> <img alt='Created in deepnote.com' style='display:inline;max-height:16px;margin:0px;margin-right:7.5px;' src='data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iODBweCIgaGVpZ2h0PSI4MHB4IiB2aWV3Qm94PSIwIDAgODAgODAiIHZlcnNpb249IjEuMSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayI+CiAgICA8IS0tIEdlbmVyYXRvcjogU2tldGNoIDU0LjEgKDc2NDkwKSAtIGh0dHBzOi8vc2tldGNoYXBwLmNvbSAtLT4KICAgIDx0aXRsZT5Hcm91cCAzPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IkxhbmRpbmciIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxnIGlkPSJBcnRib2FyZCIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoLTEyMzUuMDAwMDAwLCAtNzkuMDAwMDAwKSI+CiAgICAgICAgICAgIDxnIGlkPSJHcm91cC0zIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgxMjM1LjAwMDAwMCwgNzkuMDAwMDAwKSI+CiAgICAgICAgICAgICAgICA8cG9seWdvbiBpZD0iUGF0aC0yMCIgZmlsbD0iIzAyNjVCNCIgcG9pbnRzPSIyLjM3NjIzNzYyIDgwIDM4LjA0NzY2NjcgODAgNTcuODIxNzgyMiA3My44MDU3NTkyIDU3LjgyMTc4MjIgMzIuNzU5MjczOSAzOS4xNDAyMjc4IDMxLjY4MzE2ODMiPjwvcG9seWdvbj4KICAgICAgICAgICAgICAgIDxwYXRoIGQ9Ik0zNS4wMDc3MTgsODAgQzQyLjkwNjIwMDcsNzYuNDU0OTM1OCA0Ny41NjQ5MTY3LDcxLjU0MjI2NzEgNDguOTgzODY2LDY1LjI2MTk5MzkgQzUxLjExMjI4OTksNTUuODQxNTg0MiA0MS42NzcxNzk1LDQ5LjIxMjIyODQgMjUuNjIzOTg0Niw0OS4yMTIyMjg0IEMyNS40ODQ5Mjg5LDQ5LjEyNjg0NDggMjkuODI2MTI5Niw0My4yODM4MjQ4IDM4LjY0NzU4NjksMzEuNjgzMTY4MyBMNzIuODcxMjg3MSwzMi41NTQ0MjUgTDY1LjI4MDk3Myw2Ny42NzYzNDIxIEw1MS4xMTIyODk5LDc3LjM3NjE0NCBMMzUuMDA3NzE4LDgwIFoiIGlkPSJQYXRoLTIyIiBmaWxsPSIjMDAyODY4Ij48L3BhdGg+CiAgICAgICAgICAgICAgICA8cGF0aCBkPSJNMCwzNy43MzA0NDA1IEwyNy4xMTQ1MzcsMC4yNTcxMTE0MzYgQzYyLjM3MTUxMjMsLTEuOTkwNzE3MDEgODAsMTAuNTAwMzkyNyA4MCwzNy43MzA0NDA1IEM4MCw2NC45NjA0ODgyIDY0Ljc3NjUwMzgsNzkuMDUwMzQxNCAzNC4zMjk1MTEzLDgwIEM0Ny4wNTUzNDg5LDc3LjU2NzA4MDggNTMuNDE4MjY3Nyw3MC4zMTM2MTAzIDUzLjQxODI2NzcsNTguMjM5NTg4NSBDNTMuNDE4MjY3Nyw0MC4xMjg1NTU3IDM2LjMwMzk1NDQsMzcuNzMwNDQwNSAyNS4yMjc0MTcsMzcuNzMwNDQwNSBDMTcuODQzMDU4NiwzNy43MzA0NDA1IDkuNDMzOTE5NjYsMzcuNzMwNDQwNSAwLDM3LjczMDQ0MDUgWiIgaWQ9IlBhdGgtMTkiIGZpbGw9IiMzNzkzRUYiPjwvcGF0aD4KICAgICAgICAgICAgPC9nPgogICAgICAgIDwvZz4KICAgIDwvZz4KPC9zdmc+' > </img> Created in <span style='font-weight:600;margin-left:4px;'>Deepnote</span></a>
github_jupyter
# Batch Reactor with Multiple Reactions ### The simulation is adapted from: http://www.learncheme.com/simulations/kinetics-reactor-design/batch-reactor-with-multiple-reactions The concentrations of five species are plotted as a function of dimensionless time, for three irreversible, elementary, liquid-phase reactions in an isothermal batch reactor. Use sliders to change the dimensionless rate constants for each reaction. The initial amounts of A and B in the reactor are 10 mol and 5 mol, respectively. *Use buttons to plot just one concentration or all five together.* <br> The reactions are: <br> A + B → C <br> C → 2E <br> 2A → D Three liquid-phase reactions take place in an isothermal batch reactor: <br> A + B → C, with reaction rate $r1 = k1C_{A}C_{B}$ <br> C → 2E with reaction rate $r2 = k2C_{C}$ <br> 2A → D with reaction rate $r3 = k3C_{A}^{2}$ <br> where Ci is the concentration of component i, ri is the rate of reaction, and ki is the rate constant. ![image.png](attachment:image.png) where t is time, NA are moles of A, and V is reactor volume. Initially 10 moles of A and 5 moles of B are fed into the reactor. ## Import Libraries: ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt from IPython.html.widgets import interact from scipy.integrate import odeint plt.style.use('bmh') ``` ### Review for batch reactor: https://encyclopedia.che.engin.umich.edu/Pages/Reactors/Batch/Batch.html ## Function of bactch reactor combine with interactive representation ``` def batch_reactor(k1 = 0.5,k2 = 0.5,k3 = 0.5): # Ignore the volume in this model because the batch reactor is function of time # time: independent variable # C: dependent variable def batch_solver (Y, t): # Define the vector Y including the five components Ca = Y[0] Cb = Y[1] Cc = Y[2] Cd = Y[3] Ce = Y[4] # Define the explicit rates of the three reactions r1 = k1*Ca*Cb r2 = k2*Cc r3 = k3*Ca**2 # Define mass balance on each component due to the multiple ractions going on in the reactor dCadt = -r1 - 2*r2 dCbdt = -r1 dCcdt = r1 - r2 dCddt = r3 dCedt = 2*r3 return [dCadt, dCbdt, dCcdt, dCddt, dCedt] t = np.linspace(0,5,1000) Na0 = 10 # moles Nb0 = 5 # moles V = 100 # liters Ca0 = Na0/V # initial component in the batch reactor Cb0 = Nb0/V # initial component in the bacth reactor Y0 = [Ca0, Cb0, 0, 0 , 0] # five component Sol = odeint(batch_solver, Y0, t) Na = Sol[:,0]*V Nb = Sol[:,1]*V Nc = Sol[:,2]*V Nd = Sol[:,3]*V Ne = Sol[:,4]*V # Plotting section plt.figure(figsize=(10, 6)) plt.plot(t,Na, label='A component') plt.plot(t,Nb, label='B component') plt.plot(t,Nc, label='C component') plt.plot(t,Nd, label='D component') plt.plot(t,Ne, label='E component') plt.ylim([0,10]) plt.xlim([0,5]) plt.ylabel(r'moles of species') plt.xlabel(r'time') plt.title(r'Langmuir adsorption model for two components') plt.legend(loc = 'best') # optional plt.style.use('ggplot') batch_reactor (0.5,0.5,0.5) ``` ## Interactive Output: ``` interact (batch_reactor, k1 = (0,1,0.1), k2 = (0,1,0.1), k3 = (0,1,0.1)) ``` Kindly note the volume of the reactor is assumed to be 100 liters but can be changed in the code. <br> **This code is an open source code; any contribution is welcomed.** <br> **Please use the link of the webpage in your browser as a way to show that the code is extracted from this repository, incase used by you in the future.** <br> for example (this link for othe code): https://github.com/jeff-ball/Machine-Learning-Chemical-Engineering-Approach/blob/main/Reactordesign1.ipynb <br> <br> $$ Thank YOU! $$
github_jupyter
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as seabornInstance from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import metrics %matplotlib inline # Import packages/ modules import os import matplotlib.pyplot as plt import numpy as np import pandas as pd import earthpy as et import matplotlib.dates as mdates from matplotlib.dates import DateFormatter import seaborn as sns import datetime from textwrap import wrap from statsmodels.formula.api import ols # Handle date time conversions between pandas and matplotlib from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() # Use white grid plot background from seaborn sns.set(font_scale=1.5, style="whitegrid") # Conditional statement to check and set working directory. ea_path = os.path.join(et.io.HOME, 'earth-analytics') if os.path.exists(ea_path): os.chdir(ea_path) print("working directory is set to earth-analytics") else: print("This path does not exist") # Set base path to download data base_path = os.path.join(ea_path, "data") base_path ``` ## Techniques to measure the chemical composition of the atmosphere measurement of PM2 with R & P Model 2025 PM-2.5 Sequential Air Sampler w/VSCC - Gravimetric PM10_total with HI-VOL SA/GMW-1200 - GRAVIMETRIC PM25 with Met-one BAM-1020 W/PM2.5 SCC - Beta Attenuation (optical method) Pressure (mb) withINSTRUMENTAL - BAROMETRIC SENSOR VOC_ppb_C = 6L Pressurized Canister - Precon Saturn GC/MS O3_ppb, 8-HR RUN AVG BEGIN HOUR NO2 ppb, INSTRUMENTAL - CHEMILUMINESCENCE lead (ng/m3), Lo-Vol-Xontech 920 or 924, Teflon - ICP/Mass Spectrometer CO ppb, INSTRUMENTAL - Gas Filter Correlation Thermo Electron 48i-TLE AQI, R & P Model 2025 PM-2.5 Sequential Air Sampler w/VSCC - Gravimetric ``` # Files to download for further analysis # Define relative path to files file_path1 = os.path.join("data","output_figures", "sandiego_2014_fires", "SD_weather_aq2014" , "SD_temp.csv") file_path2 = os.path.join("data","output_figures", "sandiego_2014_fires", "SD_weather_aq2014", "SD_pres.csv" ) file_path3 = os.path.join("data", "output_figures", "sandiego_2014_fires", "SD_weather_aq2014", "SD_RH.csv") file_path4 = os.path.join("data", "output_figures", "sandiego_2014_fires", "SD_weather_aq2014", "SD_wind.csv") file_path5 = os.path.join("data", "output_figures", "sandiego_2014_fires", "SD_weather_aq2014", "SD_O3.csv") file_path6 = os.path.join("data", "output_figures", "sandiego_2014_fires", "SD_weather_aq2014", "SD_CO.csv") file_path7 = os.path.join("data", "output_figures", "sandiego_2014_fires", "SD_weather_aq2014", "SD_NO2.csv") file_path8= os.path.join("data", "output_figures", "sandiego_2014_fires", "SD_weather_aq2014", "SD_PM25.csv") file_path9 = os.path.join("data", "output_figures", "sandiego_2014_fires", "SD_weather_aq2014", "SD_PM10.csv") file_path10 = os.path.join("data", "output_figures", "sandiego_2014_fires", "SD_weather_aq2014", "SD_PM2.csv") file_path11 = os.path.join("data", "output_figures", "sandiego_2014_fires", "SD_weather_aq2014", "SD_lead.csv") file_path12 = os.path.join("data", "output_figures", "sandiego_2014_fires", "SD_weather_aq2014", "SD_VOC.csv") file_path13 = os.path.join("data", "output_figures", "sandiego_2014_fires", "SD_weather_aq2014", "SD_AQI.csv") # To check if path is created os.path.exists(file_path8) sd_temp = pd.read_csv(file_path1, parse_dates=['Date Local'], index_col=['Date Local']) sd_pres = pd.read_csv(file_path2, parse_dates=['Date Local'], index_col=['Date Local']) sd_RH = pd.read_csv(file_path3, parse_dates=['Date Local'], index_col=['Date Local']) sd_wind = pd.read_csv(file_path4, parse_dates=['Date Local'], index_col=['Date Local']) sd_O3 = pd.read_csv(file_path5, parse_dates=['Date Local'], index_col=['Date Local']) sd_CO = pd.read_csv(file_path6, parse_dates=['Date Local'], index_col=['Date Local']) sd_NO2 = pd.read_csv(file_path7, parse_dates=['Date Local'], index_col=['Date Local']) sd_PM25 = pd.read_csv(file_path8, parse_dates=['Date Local'], index_col=['Date Local']) sd_PM10 = pd.read_csv(file_path9, parse_dates=['Date Local'], index_col=['Date Local']) sd_lead = pd.read_csv(file_path11, parse_dates=['Date Local'], index_col=['Date Local']) sd_VOC = pd.read_csv(file_path12, parse_dates=['Date Local'], index_col=['Date Local']) sd_AQI = pd.read_csv(file_path13, parse_dates=['Date Local'], index_col=['Date Local']) sd_O3.head(3) sd_O3['City Name'].unique() sd_O3.columns # frames = [sd_O3, sd_RH, sd_temp] # O3_RH_t = pd.concat(frames, sort= False) # result = df1.append([df2, df3]) # sd_combined_df = sd_O3.append([sd_RH, sd_temp, sd_pres, sd_wind, sd_CO, sd_NO2, sd_PM25], sort = False) # sd_combined_df.set_index('Date Local', inplace=True) # pd.DataFrame.to_csv(sd_combined_df) sd_combined_df = pd.concat([sd_O3, sd_RH, sd_temp, sd_pres, sd_wind, sd_CO, sd_NO2], axis= 0, join = 'outer').sort_index() # sd_combined_df.set_index('Date Local', inplace=True) # finaldf = pd.concat([df1, df2, df3], axis=1, join='inner').sort_index() # result.head(2) sd_combined_df.to_csv('data/output_figures/sandiego_2014_fires/air_quality_csv/sd_cumulative_df.csv') # print(sd_combined_df['City Name'].unique()) print(sd_combined_df.columns) sd_combined_df.head() # To check empty columns sd_combined_df.isnull().any() # # data_frames = [sd_temp, sd_O3, sd_NO2, sd_CO,] # # df_merged = reduce(lambda left,right: pd.merge(left,right,on=['Date Local'], # # how='outer'), data_frames) # # df_merged.head() # dfs= [sd_RH, sd_temp, sd_pres, sd_wind] # def mergefiles(dfs, countfiles, i=0): # if i == (countfiles - 2): # it gets to the second to last and merges it with the last # return # dfm = dfs[i].merge(mergefiles(dfs[i+1], countfiles, i=i+1), on='Date Local') # return dfm # print(mergefiles(dfs, len(dfs))) # print(dfm.columns) #pd.DataFrame.to_csv('data/output_figures/sandiego_2014_fires/air_quality_csv/weather_chem_sd_stations.csv') file_path2 = os.path.join(base_path,"output_figures","sandiego_2014_fires", "air_quality_csv", "sd_chemical_composition_2014_mean_values_only.csv") # To check if path is created os.path.exists(file_path2) sd_atm_df = pd.read_csv(file_path2, parse_dates=['Date Local'], index_col=['Date Local']) sd_atm_df.head(2) # sd_weather_2014_df.reset_index(inplace = True) # sd_weather_2014_df.head() # # Create plot space # fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, # sharex = True, figsize = (10, 12)) # # Add subtitle # fig.subplots_adjust(bottom=0.15, left=0.2) # fig.suptitle(""" Figure 1(a-e). Atmospheric conditions during wildfires\n # Mar - June 2014, San Diego, CA.""", fontsize = 20) sd_atm_df.shape # fig, (ax1, ax2) = plt.subplots(2, 1 # sharex = True, figsize = (10, 12)) sd_atm_df.plot(x='NO2_mean', y='CO_mean', style='o', c='b') plt.title('NO2 vs CO') plt.xlabel('NO2 (ppb)') plt.ylabel('CO (ppm)') plt.show() sd_atm_df.plot(x='O3_ppb_mean', y='NO2_mean', style='o', c='r') plt.title('O3 vs NO2') plt.xlabel('O3 (ppb)') plt.ylabel('NO2 (pp)b') plt.show() plt.figure(figsize=(4,4)) plt.tight_layout() sns.distplot(sd_atm_df['NO2_mean']) ``` Our next step is to divide the data into “attributes” and “labels”. Attributes are the independent variables while labels are dependent variables whose values are to be predicted. In our dataset, we only have two columns. We want to predict the MaxTemp depending upon the MinTemp recorded. Therefore our attribute set will consist of the “MinTemp” column which is stored in the X variable, and the label will be the “MaxTemp” column which is stored in y variable. ``` sd_atm_df X = sd_atm_df['NO2_mean'].values.reshape(-1,1) y = sd_atm_df['CO_mean'].values.reshape(-1,1) ``` Next, we split 80% of the data to the training set while 20% of the data to test set using below code. The test_size variable is where we actually specify the proportion of the test set. ``` X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) ``` After splitting the data into training and testing sets, finally, the time is to train our algorithm. For that, we need to import LinearRegression class, instantiate it, and call the fit() method along with our training data. As we have discussed that the linear regression model basically finds the best value for the intercept and slope, which results in a line that best fits the data. To see the value of the intercept and slope calculated by the linear regression algorithm for our dataset, execute the following code. ``` regressor = LinearRegression() regressor.fit(X_train, y_train) #training the algorithm #To retrieve the intercept: print(regressor.intercept_) #For retrieving the slope: print(regressor.coef_) ``` This means that for every one unit of change in NO2_mean, the change in the CO is about 0.029%. Now that we have trained our algorithm, it’s time to make some predictions. To do so, we will use our test data and see how accurately our algorithm predicts the percentage score. To make predictions on the test data, execute the following script: ``` # print(X_test) X_test.shape y_pred = regressor.predict(X_test) ``` Now compare the actual output values for X_test with the predicted values, execute the following script: ``` df = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': y_pred.flatten()}) # df.head() print(df) ``` We can also visualize comparison result as a bar graph using the below script : Note: As the number of records is huge, for representation purpose I’m taking just 15 records. ``` df1 = df.head(19) df1.plot(kind='bar',figsize=(8,5)) plt.grid(which='major', linestyle='-', linewidth='0.5', color='green') plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black') plt.show() ``` Though our model is not very precise, the predicted percentages are close to the actual ones. Let's plot our straight line with the test data : ``` plt.scatter(X_test, y_test, color='gray') plt.plot(X_test, y_pred, color='red', linewidth=2) plt.show() ``` The straight line in the above graph shows our algorithm is correct. The final step is to evaluate the performance of the algorithm. This step is particularly important to compare how well different algorithms perform on a particular dataset. For regression algorithms, three evaluation metrics are commonly used: 1. Mean Absolute Error (MAE) is the mean of the absolute value of the errors. It is calculated as: ![image.png](attachment:image.png) 2. Mean Squared Error (MSE) is the mean of the squared errors and is calculated as: ![image.png](attachment:image.png) 3. Root Mean Squared Error (RMSE) is the square root of the mean of the squared errors: ![image.png](attachment:image.png) Luckily, we don’t have to perform these calculations manually. The Scikit-Learn library comes with pre-built functions that can be used to find out these values for us. Let’s find the values for these metrics using our test data. Mean Absolute Error (MAE) is the mean of the absolute value of the errors. It is calculated as: ``` print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred)) print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) ``` Multiple Linear Regression ![image.png](attachment:image.png) We just performed linear regression in the above section involving two variables. Almost all the real-world problems that you are going to encounter will have more than two variables. Linear regression involving multiple variables is called “multiple linear regression” or multivariate linear regression. The steps to perform multiple linear regression are almost similar to that of simple linear regression. The difference lies in the evaluation. You can use it to find out which factor has the highest impact on the predicted output and how different variables relate to each other. ``` # To check empty columns sd_atm_df.isnull().any() ``` Once the above code is executed, all the columns should give False, In case for any column you find True result, then remove all the null values from that column using below code. ``` dataset = sd_atm_df.fillna(method='ffill') dataset.describe() ``` Our next step is to divide the data into “attributes” and “labels”. X variable contains all the attributes/features and y variable contains labels. ``` X = dataset[[ 'NO2_mean','CO_mean', 'PM2.5_mean']].values y = dataset['O3_ppb_mean'].values plt.figure(figsize=(4,4)) plt.tight_layout() sns.distplot(dataset['O3_ppb_mean']) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) regressor = LinearRegression() regressor.fit(X_train, y_train) # regressor = LinearRegression() # regressor.fit(X_train, y_train) #training the algorithm #To retrieve the intercept: print(regressor.intercept_) #For retrieving the slope: print(regressor.coef_) import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as seabornInstance from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import metrics %matplotlib inline # coeff_df = pd.DataFrame(regressor.coef_, X.columns) # coeff_df y_pred = regressor.predict(X_test) df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred}) df.head() df1.plot(kind='bar',figsize=(10,6)) plt.grid(which='major', linestyle='-', linewidth='0.5', color='green') plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black') plt.show() print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred)) print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) ``` Mean O3= 30.693 RMSE = 6.51 prediction from model = (6.51/30.69)*100= 21% greater than actual value so not a great model fit. However O3 fromation depends on temp, pressure and destruction with OH radical, photolysis and collisons with air moleucles
github_jupyter
# Tellurium, Antimony and libRoadRunner Introduction <div align='center'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/tellurium-and-libroadrunner.png" width="60%" style="padding: 20px"></div> <div align='center' style='font-size:100%'> Veronica L. Porubsky, BS <div align='center' style='font-size:100%'>Sauro Lab PhD Student, Department of Bioengineering<br> Head of Outreach, <a href="https://reproduciblebiomodels.org/dissemination-and-training/seminar/">Center for Reproducible Biomedical Modeling</a><br> University of Washington, Seattle, WA USA </div> <hr> ## TOC * [Links to relevant resources](#relevant-resources) * [Packages and constants](#packages-constants) * [Tellurium, Antimony, and libRoadRunner support rapid simulation and analysis of kinetic models](#rapid-simulation) * [What is biochemical network modeling?](#network-modeling) * [Why do we perform network modeling?](#perform-network-modeling) * [Types of networks](#network-types) * [Metabolic networks](#metabolic) * [Protein signaling networks](#protein-signaling) * [Gene regulatory networks](#gene-regulation) * [Repressilator model by Elowitz & Liebler (2000):](#repressilator) * [Model of respiratory oscillations in Saccharomyces cerevisae by Jana Wolf et al. (2001):](#wolf) * [Negative feedback and ultrasensitivity effects in Kholodenko (2000):](#kholodenko) * [Writing a simple model in the Antimony language](#write-antimony) * [Simulating the simple Antimony Model](#simulate-antimony) * [Plotting the simple Antimony model simulation results](#plot-antimony) * [Setting and getting values with Tellurium](#set-get-values) * [Resetting your model with Tellurium](#model-reset) * [Adding events to an Antimony string](#events-antimony) * [Writing an Antimony model with interactions: the repressilator](#interactions-antimony) * [Exploring the species and reactions in your model with Tellurium](#tellurium-explore) * [SBML description format vs. human-readable Antimony string](#sbml-antimony) * [Exercises](#exercises) * [Acknowledgements](#acknowledgements) # Links to relevant resources <a class="anchor" id="relevant-resources"></a> <a href="https://github.com/sys-bio/tellurium#installation-instructions">Tellurium installation instructions</a><br> <a href="https://tellurium.readthedocs.io/en/latest/">Tellurium documentation</a><br> <a href="https://libroadrunner.readthedocs.io/en/latest/">libRoadRunner documentation</a><br> <a href="https://tellurium.readthedocs.io/en/latest/antimony.html">Antimony documentation</a><br> # Tellurium, Antimony, and libRoadRunner support rapid simulation and analysis of kinetic models <a class="anchor" id="rapid-simulation"></a> <br> <div align='center'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/tellurium-utilities.png" width="75%" style="padding: 20px"></div> # What is biochemical network modeling? <a class="anchor" id="network-modeling"></a> <ul> <li>Chemical kinetics studies the factors that influence the rate of chemical reactions</li> <ul class="square"> <li>e.g. <span style="color:blue">concentration</span>, temperature, light, catalysts, etc. </li> </ul> <li>Chemical reaction networks are the framework for building all types of dynamical models</li> <ul class="square"> <li>Genetic circuits</li> <li>Cell signaling pathways</li> <li>Metabolic networks</li> </ul> <li>Types of biochemical network models:</li> <ul class="square"> <li>Agent-based</li> <li>Algebraic</li> <li>Boolean</li> <li>Constraint based</li> <li><span style="color:blue">Mechanistic differential equations models</span></li> <li>Statistical and machine learning methods</li> <li>Stochastic</li> </ul> </ul> # Why do we perform network modeling? <a class="anchor" id="perform-network-modeling"></a> <ul> <li>Understand subcellular processes</li> <li>Drive experimentation</li> <li>Make predictions about system behavior and the impacts of interferring with the system</li> <li>Design synthetic networks to control cellular processes</li> <li>Develop novel treatments for disease by predicting targets for pharmacological therapies, genetic modification, etc</li> <li>Provide a basis for larger multi-cellular models</li> </ul> # Types of networks: <a class="anchor" id="network-types"></a> <br> <div align='center'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/networks_fig1.PNG" width = "75%" style="padding: 0px"></div> <br> # Metabolic networks <a class="anchor" id="metabolic"></a> <br> Glycolytic pathway of <em>Lactococcus lactis</em>, produced on JWS Online for Dr. Sauro's textbook, "Systems Biology: Introduction to Pathway Modeling". <br> <table><tr> <td> <img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/metabolic_network.PNG" style="width: 800px;"/> </td> </tr></table> # Protein signaling networks <a class="anchor" id="protein-signaling"></a> <table><tr> <td> <img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/protein_actions.PNG" style="width: 450px;"/> </td> <td> <img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/protein_signalling_network.PNG" style="width: 300px;"/> </td> </tr></table> # Gene regulatory networks <a class="anchor" id="gene-regulation"></a> <table><tr> <td> <img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/gene_actions.PNG" style="width: 400px;"/> </td> <td> <img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/gene_regulatory.PNG" style="width: 400px;"/> </td> </tr></table> # Repressilator model by Elowitz & Liebler (2000) <a class="anchor" id="repressilator"></a> <br> Repressilator circuit from <a href="http://www.elowitz.caltech.edu/publications/Repressilator.pdf">Elowitz & Liebler (2000):</a> <div align='center'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/repressilator.png" width="50%" style="padding: 20px"></div> ## Repressilator model by Elowitz & Liebler (2000) <table><tr> <td> <img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/repressilator_2.PNG" style="width: 300px;"/> </td> <td> <img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/repressilator_3.PNG" style="width: 1000px;"/> </td> </tr></table> ## Repressilator model by Elowitz & Liebler (2000) <table><tr> <td> <img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/repressilator_1.PNG" style="width: 700px;"/> </td> </tr></table> ## Repressilator model by Elowitz & Liebler (2000): <table><tr> <td> <img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/repressilator_4.PNG" style="width: 700px;"/> </td> </tr></table> # Model of respiratory oscillations in Saccharomyces cerevisae by Jana Wolf et al. (2001) <a class="anchor" id="wolf"></a> <div align='center'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/wolf_publication.PNG" width="65%" style="padding: 20px"></div> <br> <div align='center'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/wolf_network.PNG" width="65%" style="padding: 20px"></div> # Negative feedback and ultrasensitivity effects in Kholodenko (2000):= <a class="anchor" id="kholodenko"></a> <table><tr> <td> <img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/kholodenko_1.PNG" style="width: 500px;"/> </td> <td> <img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/kholodenko_2.PNG" style="width: 500px;"/> </td> </tr></table> # Packages and constants <a class="anchor" id="packages-constants"></a> ``` # Install packages !pip install tellurium -q # Import packages import tellurium as te # Python-based modeling environment for kinetic models ``` # Writing a simple model in the Antimony language <a class="anchor" id="write-antimony"></a> <br> <div align='center'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/simple-antimony.png" width="75%" style="padding: 20px"></div> ``` ant_str = """ model simple_model # name the model species S1, S2; # specify species J1: S1 -> S2; k1*S1; # reaction name: reaction; reaction rate law; S1 = 10.0; # assign species initial conditions S2 = 0.0; k1 = 1.0; # assign constant values to global parameters end """ ``` # Simulating the simple Antimony model <a class="anchor" id="simulate-antimony"></a> ``` r = te.loada(ant_str) # create an executable model by loading the string to a RoadRunner object instance result = r.simulate(0, 10, 25) # simulate(time_start, time_end, number_of_points) print(result) # print the timecourse simulation results ``` # Plotting the simple Antimony model simulation results <a class="anchor" id="plot-antimony"></a> ``` # Plot the results of the simulation r.plot(title = 'Uni-uni mass-action model', xtitle = 'Time', ytitle = 'Concentration', figsize = (8, 6)) ``` # Setting and getting values with Tellurium <a class="anchor" id="set-get-values"></a> ``` print('Simulation with updated value for parameter, k1:') r = te.loada(ant_str) r.k1 = 5.0 # You can set the parameter values without changing the Antimony string r.simulate(0,10,100) r.plot(title = 'Uni-uni mass-action model with k1 = 5', xtitle = 'Time', ytitle = 'Concentration', figsize = (8, 6)) print('Simulation with updated concentration for species, S2, at time = 0:') r = te.loada(ant_str) r.S2 = 10.0 # You can also change the species concentration without changing the Antimony string r.simulate(0,10,100) r.plot(title = 'Uni-uni mass-action model with S2 = 10', xtitle = 'Time', ytitle = 'Concentration', figsize = (8, 6)) # You can also check the current values of parameters and species print(f'Current value of species S2 is: {r.S2}') print(f'Current value of parameter k1 is: {r.k1}') ``` # Resetting your model with Tellurium <a class="anchor" id="model-reset"></a> Resets are an important step in modeling with Tellurium that must be considered. These will allow you to run multiple simulations without reloading your model each time. You should consider whether you want to reset the time, the concentrations, or parameter values that you have changed. ``` # Here, we load our model and run a simulation as before print('Original simulation:') r = te.loada(ant_str) r.simulate(0,10,100) r.plot(title = 'Uni-uni mass-action model', xtitle = 'Time', ytitle = 'Concentration', figsize = (8, 6)) # Now, let's change the value of k1 as above and re-run our simulation print('Simulation with updated parameter and no reset:') r.k1 = 5.0 r.simulate(0,10,100) r.plot(title = 'Uni-uni mass-action model', xtitle = 'Time', ytitle = 'Concentration', figsize = (8, 6)) # The previous plot may not be what you expect, you must reset the model to start from initial concentrations print('Simulation with updated parameter and no reset:') r.reset() r.k1 = 5.0 r.simulate(0,10,100) r.plot(title = 'Uni-uni mass-action model', xtitle = 'Time', ytitle = 'Concentration', figsize = (8, 6)) # To reset the parameter and species values you have changed to the current intial value print('Simulation with reset parameter k1') r.resetAll() r.simulate(0,10,100) r.plot(title = 'Uni-uni mass-action model', xtitle = 'Time', ytitle = 'Concentration', figsize = (8, 6)) ``` # Adding events to an Antimony string <a class="anchor" id="events-antimony"></a> ``` ant_str = """ model simple_model_with_event # name the model species S20; J1: S1 -> S2; k1*S1; # reaction; reaction rate law; S1 = 10.0; # assign species initial conditions S2 = 0.0; S20 := cos(time); # Adding a species defined with an assignment rule k1 = 1.0; # assign constant values to global parameters E1: at (time > 5): S1 = 10; # add an event - spike in S1 at time > 5 end """ r = te.loada(ant_str) # create an executable model by loading the string to a RoadRunner object instance r.simulate(0, 10, 100) # simulate(time_start, time_end, number_of_points) r.plot(title = 'Uni-uni mass-action model with event', xtitle = 'Time', ytitle = 'Concentration', figsize = (8, 6)) ant_str = """ model simple_model_with_event # name the model J1: S1 -> S2; k1*S1; # reaction; reaction rate law; S1 = 10.0; # assign species initial conditions S2 = 0.0; k1 = 1.0; # assign constant values to global parameters E1: at (S1 < 0.005): S1 = 10; # add an event - spike in S1 when S1 falls below 0.005 end """ r = te.loada(ant_str) # create an executable model by loading the string to a RoadRunner object instance r.simulate(0, 10, 100) # simulate(time_start, time_end, number_of_points) r.plot(title = 'Uni-uni mass-action model with event', xtitle = 'Time', ytitle = 'Concentration', figsize = (8, 6)) ``` # Writing an Antimony model with interactions: the repressilator <a class="anchor" id="interactions-antimony"></a> <br> Repressilator circuit from Elowitz & Liebler (2000): <div align='center'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/repressilator.png" width="40%" style="padding: 20px"></div> ``` repressilator_str = """ # Species: species M1, P3, P1, M2, P2, M3; # Reactions: J0: -> M1; a_m1*(Kr_P3^n1/(Kr_P3^n1 + P3^n1)) + leak1; J1: M1 -> ; d_m1*M1; J2: -> P1; a_p1*M1; J3: P1 -> ; d_p1*P1; J4: -> M2; a_m2*(Kr_P1^n2/(Kr_P1^n2 + P1^n2)) + leak2; J5: M2 -> ; d_m2*M2; J6: -> P2; a_p2*M2; J7: P2 -> ; d_p2*P2; J8: -> M3; a_m3*(Kr_P2^n3/(Kr_P2^n3 + P2^n3)) + leak3; J9: M3 -> ; d_m3*M3; J10: -> P3; a_p3*M3; J11: P3 -> ; d_p3*P3; # Species initializations: M1 = 0.604016261711246; P3 = 1.10433330559171; P1 = 7.94746428021418; M2 = 2.16464969760648; P2 = 3.55413750091507; M3 = 2.20471854765531; # Parameter value initializations: a_m1 = 1.13504504342841; Kr_P3 = 0.537411795656332; n1 = 7.75907326833983; leak1 = 2.59839004225795e-07; d_m1 = 0.360168301619141; a_p1 = 5.91755684808254; d_p1 = 1.11075218613419; a_m2 = 2.57306185467814; Kr_P1 = 0.190085253528206; n2 = 6.89140262856765; leak2 = 1.51282707494481e-06; d_m2 = 1.05773721506759; a_p2 = 8.35628834784826; d_p2 = 0.520562081730298; a_m3 = 0.417889543691157; Kr_P2 = 2.71031378955001; n3 = 0.44365980532785; leak3 = 3.63586125130783e-11; d_m3 = 0.805873530762994; a_p3 = 4.61276807677109; d_p3 = 1.54954108126666; """ # Load the repressilator model repressilator = te.loada(repressilator_str) # Save the repressilator model in the SBML format te.saveToFile('repressilator_sbml.xml', repressilator.getCurrentSBML()) # Simulate the repressilator model repressilator.simulate(0, 100, 500) repressilator.plot(figsize = (10, 8), linewidth = 3) ``` # Exploring the species and reactions in your model with Tellurium <a class="anchor" id="tellurium-explore"></a> ``` repressilator.reset() # Print the variable, or floating, species IDs print('The variable species in the repressilator model are:') print(repressilator.getFloatingSpeciesIds()) # Print the global parameter IDs print('The parameters in the repressilator model are:') print(repressilator.getGlobalParameterIds()) # Print the global parameter values print('The current values of parameters in the repressilator model are:') print(repressilator.getGlobalParameterValues()) ``` # SBML description format vs. human-readable Antimony string <a class="anchor" id="sbml-antimony"></a> We will explore more standards in systems biology in a later lecture, but it is useful to show the utility of using Antimony to easily write and understand network models. <br> The Systems Biology Markup Lanugage (SBML) is a standard format that has been developed to have a common language and to encourage widespread support for models. However, it is difficult to create an SBML model. Writing a model in Antimony is straightforward, and Tellurium will allow you to export to the standard format. ``` # The SBML version of the model is machine-readable and difficult to interpret with visual inspection repressilator.reset() print(repressilator.getCurrentSBML()) # The Antimony version of this model is human-readable print(repressilator.getAntimony()) ``` # Exercises ## Exercise 1 JWS Online is a web-based tool that allows users to interact with a graphical interface to construct and simulate models. It also has many stored models and simulations. Investigate the epidermal growth factor model from <a href="https://jjj.mib.ac.uk/models/kholodenko1/">Kholodenko 1999 on JWS Online</a>. Click on "simulation" to visualize the model and simulation capabilities. Explore the simulations and alter parameter values to see the effect on the model output. More information about this model can be found on the <a href="https://www.ebi.ac.uk/biomodels/BIOMD0000000048">BioModels Database</a>. What biological domain was this model designed to study? Interested in another biological process? Filter the results of the <a href="https://jjj.mib.ac.uk/models/?&process=1&jwsmodel__model_type=&organism=&id=&page=2">JWS Online Model Database</a> to find and explore a relevant model! ## Exercise 2: Write an Antimony model for the network below: <br> <div align='center'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/simple-model.png" width="40%" style="padding: 20px"></div> ## Exercise 2 Solution: ``` # Write your code here # Write a simple antimony string antStr = ''' J1: S1 -> S2; k1*S1; # Reaction names 'J1', 'J2' J2: S2 -> S3; k2*S2; k1 = 0.15; k2 = 0.45; # Specify parameter values S1 = 1; S2 = 0; S3 = 0; # Specify initial conditions ''' ``` ## Exercise 3: Create a RoadRunner object instance with your Antimony string from Exercise 2 and simulate your model from time 0 to time 20 with 50 data points. ## Exercise 3 Solution: ``` # Write your code here # Load the Antimony string to a RoadRunner object instance 'r' r = te.loada(antStr) # Simulate the model (numerically integrate the differential equations) r.simulate(0, 20, 50) ``` ## Exercise 4: Plot the simulation from Exercise 2 with a title and x and y axis labels. ## Exercise 4 Solution: ``` # Write your code here # Plot the simulation results r.plot(title = 'Simple reaction network', xtitle = 'Time', ytitle = 'Concentration') ``` ## Exercise 5: Reset your model from Exercises 1-3, simulate, and plot only the species S2 concentration timecourse. ## Exercise 5 Solution: ``` # Write your code here # Reset the model species concentrations to the initial conditions r.reset() # Simulate the model with selections 'time' and 'S2' r.simulate(0, 20, 50, ['time', 'S2']) r.plot(title = 'Simple reaction network: S2 concentration', xtitle = 'Time', ytitle = 'Concentration') ``` # Acknowledgements <br> <div align='left'><img src="https://raw.githubusercontent.com/vporubsky/tellurium-libroadrunner-tutorial/master/acknowledgments.png" width="80%"></div> <br> <html> <head> <title>Bibliography</title> </head> <body> <h1>Bibliography</h1> <ol> <li> <p>K. Choi et al., <cite>Tellurium: An extensible python-based modeling environment for systems and synthetic biology</cite>, Biosystems, vol. 171, pp. 74–79, Sep. 2018.</p> </li> <li> <p>E. T. Somogyi et al., <cite>libRoadRunner: a high performance SBML simulation and analysis library.,</cite>, Bioinformatics, vol. 31, no. 20, pp. 3315–21, Oct. 2015.</p> <li> <p>L. P. Smith, F. T. Bergmann, D. Chandran, and H. M. Sauro, <cite>Antimony: a modular model definition language</cite>, Bioinformatics, vol. 25, no. 18, pp. 2452–2454, Sep. 2009.</p> </li> <li> <p>K. Choi, L. P. Smith, J. K. Medley, and H. M. Sauro, <cite>phraSED-ML: a paraphrased, human-readable adaptation of SED-ML</cite>, J. Bioinform. Comput. Biol., vol. 14, no. 06, Dec. 2016.</p> </li> <li> <p> B.N. Kholodenko, O.V. Demin, G. Moehren, J.B. Hoek, <cite>Quantification of short term signaling by the epidermal growth factor receptor.</cite>, J Biol Chem., vol. 274, no. 42, Oct. 1999.</p> </li> </ol> </body> </html>
github_jupyter
<h1><font size=12> Weather Derivatites </h1> <h1> Rainfall Simulator -- LSTM <br></h1> Developed by [Jesus Solano](mailto:ja.solano588@uniandes.edu.co) <br> 16 September 2018 ``` # Import needed libraries. import numpy as np import pandas as pd import random as rand import matplotlib.pyplot as plt from scipy.stats import bernoulli from scipy.stats import gamma import pickle import time import datetime from keras.models import load_model # Download files. ! wget https://github.com/jesugome/WeatherDerivatives/raw/master/datasets/ensoForecastProb/ensoForecastProbabilities.pickle ! wget https://raw.githubusercontent.com/jesugome/WeatherDerivatives/master/results/visibleMarkov/transitionsParametersDry.csv ! wget https://raw.githubusercontent.com/jesugome/WeatherDerivatives/master/results/visibleMarkov/transitionsParametersWet.csv ! wget https://raw.githubusercontent.com/jesugome/WeatherDerivatives/master/results/visibleMarkov/amountGamma.csv ! wget https://github.com/jesugome/WeatherDerivatives/raw/master/results/visibleMarkov/rainfall_lstmDry_LSTM.h5 ! wget https://github.com/jesugome/WeatherDerivatives/raw/master/results/visibleMarkov/rainfall_lstmWet_LSTM.h5 ``` # Generate artificial Data ``` ### ENSO probabilistic forecast. # Open saved data. ensoForecast = pickle.load(open('../datasets/ensoForecastProb/ensoForecastProbabilities.pickle','rb')) #ensoForecast = pickle.load(open('ensoForecastProbabilities.pickle','rb')) # Print an example .. ( Format needed) ensoForecast['2005-01'] ### Create total dataframe. def createTotalDataFrame(daysNumber, startDate , initialState , initialPrep , ensoForecast ): # Set variables names. totalDataframeColumns = ['state','Prep','Month','probNina','probNino', 'nextState'] # Create dataframe. allDataDataframe = pd.DataFrame(columns=totalDataframeColumns) # Number of simulation days(i.e 30, 60) daysNumber = daysNumber # Simulation start date ('1995-04-22') startDate = startDate # State of rainfall last day before start date --> Remember 0 means dry and 1 means wet. initialState = initialState initialPrep = initialPrep # Only fill when initialState == 1 dates = pd.date_range(startDate, periods = daysNumber + 2 , freq='D') for date in dates: # Fill precipitation amount. allDataDataframe.loc[date.strftime('%Y-%m-%d'),'Prep'] = np.nan # Fill month of date allDataDataframe.loc[date.strftime('%Y-%m-%d'),'Month'] = date.month # Fill El Nino ENSO forecast probability. allDataDataframe.loc[date.strftime('%Y-%m-%d'),'probNino'] = float(ensoForecast[date.strftime('%Y-%m')].loc[0,'El Niño'].strip('%').strip('~'))/100 # Fill La Nina ENSO forecast probability. allDataDataframe.loc[date.strftime('%Y-%m-%d'),'probNina'] = float(ensoForecast[date.strftime('%Y-%m')].loc[0,'La Niña'].strip('%').strip('~'))/100 # Fill State. allDataDataframe.loc[date.strftime('%Y-%m-%d'),'state'] = np.nan simulationDataFrame = allDataDataframe[:-1] # Fill initial conditions. simulationDataFrame['state'][0] = initialState if initialState == 1: simulationDataFrame['Prep'][0] = initialPrep else: simulationDataFrame['Prep'][0] = 0.0 return simulationDataFrame simulationDataFrame = createTotalDataFrame(daysNumber= 30, startDate = '2017-08-18', initialState = 1 , initialPrep = 0.4, ensoForecast = ensoForecast) simulationDataFrame.head() ### Load transitions and amount parameters. # Transitions probabilites. #transitionsParametersDry = pd.read_csv('transitionsParametersDry.csv', sep = ' ', header=None, names = ['variable', 'value']) transitionsParametersDry = pd.read_csv('../results/visibleMarkov/transitionsParametersDry.csv', sep = ' ', header=None, names = ['variable', 'value']) transitionsParametersDry.index += 1 transitionsParametersDry #transitionsParametersWet = pd.read_csv('transitionsParametersWet.csv', sep = ' ', header=None, names = ['variable', 'value']) transitionsParametersWet = pd.read_csv('../results/visibleMarkov/transitionsParametersWet.csv', sep = ' ', header=None, names = ['variable', 'value']) transitionsParametersWet.index += 1 transitionsParametersWet #amountParametersGamma = pd.read_csv('amountGamma.csv', sep = ' ', header=None, names = ['variable', 'loge(mu)', 'loge(shape)']) amountParametersGamma = pd.read_csv('../results/visibleMarkov/amountGammaPro.csv', sep = ' ', header=None, names = ['variable', 'mu', 'shape']) amountParametersGamma.index += 1 print(amountParametersGamma) print('\n * Intercept means firts month (January) ') # Load neural network. #lstmModelDry = load_model('rainfall_lstmDry_LSTM.h5') lstmModelDry = load_model('../results/visibleMarkov/rainfall_lstmDry_LSTM.h5') # Load neural network. #lstmModelWet = load_model('rainfall_lstmWet_LSTM.h5') lstmModelWet = load_model('../results/visibleMarkov/rainfall_lstmWet_LSTM.h5') ``` ## Simulation Function Core ``` ### Build the simulation core. # Updates the state of the day based on yesterday state. def updateState(yesterdayIndex, simulationDataFrame, transitionsParametersDry, transitionsParametersWet): # Additional data of day. yesterdayState = simulationDataFrame['state'][yesterdayIndex] yesterdayPrep = simulationDataFrame['Prep'][yesterdayIndex] yesterdayProbNino = simulationDataFrame['probNino'][yesterdayIndex] yesterdayProbNina = simulationDataFrame['probNina'][yesterdayIndex] yesterdayMonth = simulationDataFrame['Month'][yesterdayIndex] # Calculate transition probability. if yesterdayState == 0: xPredict = np.array([(yesterdayMonth-1)/11,yesterdayProbNino,yesterdayProbNina]) xPredict = np.reshape(xPredict, ( 1, 1 , xPredict.shape[0])) # Includes month factor + probNino value + probNino value. successProbability = lstmModelDry.predict(xPredict)[0][0] elif yesterdayState == 1: xPredict = np.array([yesterdayPrep ,(yesterdayMonth-1)/11,yesterdayProbNino,yesterdayProbNina]) xPredict = np.reshape(xPredict, ( 1, 1 , xPredict.shape[0])) # Includes month factor + probNino value + probNino value. successProbability = lstmModelWet.predict(xPredict)[0][0] else: print('State of date: ', simulationDataFrame.index[yesterdayIndex],' not found.') #print(successProbability) todayState = bernoulli.rvs(successProbability) return todayState # Simulates one run of simulation. def oneRun(simulationDataFrame, transitionsParametersDry, transitionsParametersWet, amountParametersGamma): # Define the total rainfall amount over the simulation. rainfall = 0 # Total rainfall days. wetDays = 0 # Loop over days in simulation to calculate rainfall ammount. for day in range(1,len(simulationDataFrame)): # Get today date. dateOfDay = datetime.datetime.strptime(simulationDataFrame.index[day],'%Y-%m-%d') # Update today state based on the yesterday state. todayState = updateState(day-1, simulationDataFrame, transitionsParametersDry, transitionsParametersWet) # Write new day information. simulationDataFrame['state'][day] = todayState simulationDataFrame['nextState'][day-1] = todayState # Computes total accumulated rainfall. if todayState == 1: # Sum wet day. wetDays+=1 # Additional data of day. todayProbNino = simulationDataFrame['probNino'][day] todayProbNina = simulationDataFrame['probNina'][day] todayMonth = simulationDataFrame['Month'][day] # Calculates gamma log(mu). gammaLogMu = amountParametersGamma['mu'][1] + amountParametersGamma['mu'][todayMonth]+ todayProbNino*amountParametersGamma['mu'][13]+todayProbNino*amountParametersGamma['mu'][13] #print(gammaMu) # Calculates gamma scale gammaLogShape = amountParametersGamma['shape'][1] + amountParametersGamma['shape'][todayMonth]+ todayProbNino*amountParametersGamma['shape'][13]+todayProbNino*amountParametersGamma['shape'][13] #print(gammaShape) if todayMonth==1: # Calculates gamma log(mu). gammaLogMu = amountParametersGamma['mu'][todayMonth]+ todayProbNino*amountParametersGamma['mu'][13]+todayProbNino*amountParametersGamma['mu'][13] #print(gammaMu) # Calculates gamma scale gammaLogShape = amountParametersGamma['shape'][todayMonth]+ todayProbNino*amountParametersGamma['shape'][13]+todayProbNino*amountParametersGamma['shape'][13] #print(gammaShape) # Update mu gammaMu = np.exp(gammaLogMu) # Update shape gammaShape = np.exp(gammaLogShape) # Calculate gamma scale. gammaScale = gammaMu / gammaShape # Generate random rainfall. todayRainfall = gamma.rvs(a = gammaShape, scale = gammaScale) # Write new day information. simulationDataFrame['Prep'][day] = todayRainfall # Updates rainfall amount. rainfall += todayRainfall else: # Write new day information. simulationDataFrame['Prep'][day] = 0 yesterdayState = todayState return rainfall,wetDays updateState(0, simulationDataFrame, transitionsParametersDry, transitionsParametersWet) # Run only one iteration(Print structure of results) # Simulations iterations. iterations = 10000 oneRun(simulationDataFrame, transitionsParametersDry, transitionsParametersWet, amountParametersGamma) ``` ## Complete Simulation ``` # Run total iterations. def totalRun(simulationDataFrame, transitionsParametersDry, transitionsParametersWet, amountParametersGamma,iterations): # Initialize time startTime = time.time() # Array to store all precipitations. rainfallPerIteration = [None]*iterations # Loop over each iteration(simulation) for i in range(iterations): simulationDataFrameC = simulationDataFrame.copy() iterationRainfall = oneRun(simulationDataFrameC, transitionsParametersDry, transitionsParametersWet, amountParametersGamma) rainfallPerIteration[i] = iterationRainfall # Calculate time currentTime = time.time() - startTime # Logging time. print('The elapsed time over simulation is: ', currentTime, ' seconds.') return rainfallPerIteration # Run total iterations. def totalRun(simulationDataFrame, transitionsParametersDry, transitionsParametersWet, amountParametersGamma,iterations): # Initialize time startTime = time.time() # Array to store all precipitations. rainfallPerIteration = [None]*iterations wetDaysPerIteration = [None]*iterations # Loop over each iteration(simulation) for i in range(iterations): simulationDataFrameC = simulationDataFrame.copy() iterationRainfall,wetDays = oneRun(simulationDataFrameC, transitionsParametersDry, transitionsParametersWet, amountParametersGamma) rainfallPerIteration[i] = iterationRainfall wetDaysPerIteration[i] = wetDays # Calculate time currentTime = time.time() - startTime # Print mean of wet days. print('The mean of wet days is: ', np.mean(wetDaysPerIteration)) # Logging time. print('The elapsed time over simulation is: ', currentTime, ' seconds.') return rainfallPerIteration #### Define parameters simulation. # Simulations iterations. iterations = 1000 # Create dataframe to simulate. simulationDataFrame = createTotalDataFrame(daysNumber= 30, startDate = '2017-08-18', initialState = 1 , initialPrep = 0.4, ensoForecast = ensoForecast) simulationDataFrame.head() # Final Analysis. finalSimulation = totalRun(simulationDataFrame, transitionsParametersDry, transitionsParametersWet, amountParametersGamma,iterations) ``` ## Final Results ``` # Final Analysis. finalSimulation = totalRun(simulationDataFrame, transitionsParametersDry, transitionsParametersWet, amountParametersGamma,iterations) fig = plt.figure(figsize=(20, 10)) plt.hist(finalSimulation,facecolor='steelblue',bins=100, density=True, histtype='stepfilled', edgecolor = 'black' , hatch = '+') plt.title('Rainfall Simulation') plt.xlabel('Rainfall Amount [mm]') plt.ylabel('Probability ') plt.grid() plt.show() ``` ### Enero ``` #### Define parameters simulation. # Simulations iterations. iterations = 1000 # Create dataframe to simulate. simulationDataFrame = createTotalDataFrame(daysNumber= 30, startDate = '2017-01-01', initialState = 0 , initialPrep = 0.4, ensoForecast = ensoForecast) # Final Analysis. finalSimulation = totalRun(simulationDataFrame, transitionsParametersDry, transitionsParametersWet, amountParametersGamma,iterations) fig = plt.figure(figsize=(20, 10)) plt.hist(finalSimulation,facecolor='lightgreen',bins=100, density=True, histtype='stepfilled', edgecolor = 'black' , hatch = '+') plt.title('Rainfall Simulation') plt.xlabel('Rainfall Amount [mm]') plt.ylabel('Probability ') plt.grid() plt.show() #### Define parameters simulation. # Simulations iterations. iterations = 1000 # Create dataframe to simulate. simulationDataFrame = createTotalDataFrame(daysNumber= 30, startDate = '2017-01-01', initialState = 1 , initialPrep = 0.4, ensoForecast = ensoForecast) # Final Analysis. finalSimulation = totalRun(simulationDataFrame, transitionsParametersDry, transitionsParametersWet, amountParametersGamma,iterations) fig = plt.figure(figsize=(20, 10)) plt.hist(finalSimulation,facecolor='skyblue',bins=100, density=True, histtype='stepfilled', edgecolor = 'black' , hatch = '+') plt.title('Rainfall Simulation') plt.xlabel('Rainfall Amount [mm]') plt.ylabel('Probability ') plt.grid() plt.show() #### Define parameters simulation. # Simulations iterations. iterations = 1000 # Create dataframe to simulate. simulationDataFrame = createTotalDataFrame(daysNumber= 30, startDate = '2017-01-01', initialState = 1 , initialPrep = 2.0 , ensoForecast = ensoForecast) # Final Analysis. finalSimulation = totalRun(simulationDataFrame, transitionsParametersDry, transitionsParametersWet, amountParametersGamma,iterations) fig = plt.figure(figsize=(20, 10)) plt.hist(finalSimulation,facecolor='steelblue',bins=100, density=True, histtype='stepfilled', edgecolor = 'black' , hatch = '+') plt.title('Rainfall Simulation') plt.xlabel('Rainfall Amount [mm]') plt.ylabel('Probability ') plt.grid() plt.show() ``` ### Abril ``` #### Define parameters simulation. # Simulations iterations. iterations = 1000 # Create dataframe to simulate. simulationDataFrame = createTotalDataFrame(daysNumber= 30, startDate = '2017-04-01', initialState = 0 , initialPrep = 0.4, ensoForecast = ensoForecast) # Final Analysis. finalSimulation = totalRun(simulationDataFrame, transitionsParametersDry, transitionsParametersWet, amountParametersGamma,iterations) fig = plt.figure(figsize=(20, 10)) plt.hist(finalSimulation,facecolor='lightgreen',bins=100, density=True, histtype='stepfilled', edgecolor = 'black' , hatch = '+') plt.title('Rainfall Simulation') plt.xlabel('Rainfall Amount [mm]') plt.ylabel('Probability ') plt.grid() plt.show() #### Define parameters simulation. # Simulations iterations. iterations = 1000 # Create dataframe to simulate. simulationDataFrame = createTotalDataFrame(daysNumber= 30, startDate = '2017-04-01', initialState = 1 , initialPrep = 0.4, ensoForecast = ensoForecast) # Final Analysis. finalSimulation = totalRun(simulationDataFrame, transitionsParametersDry, transitionsParametersWet, amountParametersGamma,iterations) fig = plt.figure(figsize=(20, 10)) plt.hist(finalSimulation,facecolor='skyblue',bins=100, density=True, histtype='stepfilled', edgecolor = 'black' , hatch = '+') plt.title('Rainfall Simulation') plt.xlabel('Rainfall Amount [mm]') plt.ylabel('Probability ') plt.grid() plt.show() #### Define parameters simulation. # Simulations iterations. iterations = 1000 # Create dataframe to simulate. simulationDataFrame = createTotalDataFrame(daysNumber= 30, startDate = '2017-04-01', initialState = 1 , initialPrep = 2.0 , ensoForecast = ensoForecast) # Final Analysis. finalSimulation = totalRun(simulationDataFrame, transitionsParametersDry, transitionsParametersWet, amountParametersGamma,iterations) fig = plt.figure(figsize=(20, 10)) plt.hist(finalSimulation,facecolor='steelblue',bins=100, density=True, histtype='stepfilled', edgecolor = 'black' , hatch = '+') plt.title('Rainfall Simulation') plt.xlabel('Rainfall Amount [mm]') plt.ylabel('Probability ') plt.grid() plt.show() ``` ### Octubre ``` #### Define parameters simulation. # Simulations iterations. iterations = 1000 # Create dataframe to simulate. simulationDataFrame = createTotalDataFrame(daysNumber= 30, startDate = '2017-10-01', initialState = 0 , initialPrep = 0.4, ensoForecast = ensoForecast) # Final Analysis. finalSimulation = totalRun(simulationDataFrame, transitionsParametersDry, transitionsParametersWet, amountParametersGamma,iterations) fig = plt.figure(figsize=(20, 10)) plt.hist(finalSimulation,facecolor='lightgreen',bins=100, density=True, histtype='stepfilled', edgecolor = 'black' , hatch = '+') plt.title('Rainfall Simulation') plt.xlabel('Rainfall Amount [mm]') plt.ylabel('Probability ') plt.grid() plt.show() #### Define parameters simulation. # Simulations iterations. iterations = 1000 # Create dataframe to simulate. simulationDataFrame = createTotalDataFrame(daysNumber= 30, startDate = '2017-10-01', initialState = 1 , initialPrep = 0.4, ensoForecast = ensoForecast) # Final Analysis. finalSimulation = totalRun(simulationDataFrame, transitionsParametersDry, transitionsParametersWet, amountParametersGamma,iterations) fig = plt.figure(figsize=(20, 10)) plt.hist(finalSimulation,facecolor='skyblue',bins=100, density=True, histtype='stepfilled', edgecolor = 'black' , hatch = '+') plt.title('Rainfall Simulation') plt.xlabel('Rainfall Amount [mm]') plt.ylabel('Probability ') plt.grid() plt.show() #### Define parameters simulation. # Simulations iterations. iterations = 1000 # Create dataframe to simulate. simulationDataFrame = createTotalDataFrame(daysNumber= 30, startDate = '2017-10-01', initialState = 1 , initialPrep = 2.0 , ensoForecast = ensoForecast) # Final Analysis. finalSimulation = totalRun(simulationDataFrame, transitionsParametersDry, transitionsParametersWet, amountParametersGamma,iterations) fig = plt.figure(figsize=(20, 10)) plt.hist(finalSimulation,facecolor='steelblue',bins=100, density=True, histtype='stepfilled', edgecolor = 'black' , hatch = '+') plt.title('Rainfall Simulation') plt.xlabel('Rainfall Amount [mm]') plt.ylabel('Probability ') plt.grid() plt.show() ```
github_jupyter
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W2D5_ReinforcementLearning/W2D5_Tutorial2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Neuromatch Academy: Week 3, Day 4, Tutorial 2 # Learning to Act: Multi-Armed Bandits __Content creators:__ Marcelo Mattar and Eric DeWitt with help from Byron Galbraith __Content reviewers:__ Matt Krause and Michael Waskom --- # Tutorial Objectives In this tutorial you will use 'bandits' to understand the fundementals of how a policy interacts with the learning algorithm in reinforcement learning. * You will understand the fundemental tradeoff between exploration and exploitation in a policy. * You will understand how the learning rate interacts with exploration to find the best available action. --- # Setup ``` # Imports import numpy as np import matplotlib.pyplot as plt #@title Figure settings import ipywidgets as widgets # interactive display %config InlineBackend.figure_format = 'retina' plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle") #@title Helper functions np.set_printoptions(precision=3) def plot_choices(q, epsilon, choice_fn, n_steps=1000, rng_seed=1): np.random.seed(rng_seed) counts = np.zeros_like(q) for t in range(n_steps): action = choice_fn(q, epsilon) counts[action] += 1 fig, ax = plt.subplots() ax.bar(range(len(q)), counts/n_steps) ax.set(ylabel='% chosen', xlabel='action', ylim=(0,1), xticks=range(len(q))) def plot_multi_armed_bandit_results(results): fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(20, 4)) ax1.plot(results['rewards']) ax1.set(title=f"Total Reward: {np.sum(results['rewards']):.2f}", xlabel='step', ylabel='reward') ax2.plot(results['qs']) ax2.set(xlabel='step', ylabel='value') ax2.legend(range(len(results['mu']))) ax3.plot(results['mu'], label='latent') ax3.plot(results['qs'][-1], label='learned') ax3.set(xlabel='action', ylabel='value') ax3.legend() def plot_parameter_performance(labels, fixed, trial_rewards, trial_optimal): fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(16, 6)) ax1.plot(np.mean(trial_rewards, axis=1).T) ax1.set(title=f'Average Reward ({fixed})', xlabel='step', ylabel='reward') ax1.legend(labels) ax2.plot(np.mean(trial_optimal, axis=1).T) ax2.set(title=f'Performance ({fixed})', xlabel='step', ylabel='% optimal') ax2.legend(labels) ``` --- # Section 1: Multi-Armed Bandits ``` #@title Video 1: Multi-Armed Bandits # Insert the ID of the corresponding youtube video from IPython.display import YouTubeVideo video = YouTubeVideo(id="kdiXr1zsfo0", width=854, height=480, fs=1) print("Video available at https://youtu.be/" + video.id) video ``` Consider the following learning problem. You are faced repeatedly with a choice among $k$ different options, or actions. After each choice you receive a reward signal in the form of a numerical value, where the larger value is the better. Your objective is to maximize the expected total reward over some time period, for example, over 1000 action selections, or time steps. This is the original form of the k-armed bandit problem. This name derives from the colloquial name for a slot machine, the "one-armed bandit", because it has the one lever to pull, and it is often rigged to take more money than it pays out over time. The multi-armed bandit extension is to imagine, for instance, that you are faced with multiple slot machines that you can play, but only one at a time. Which machine should you play, i.e. which arm should you pull, which action should you take, at any given time to maximize your total payout. <img alt="MultiArmedBandit" width="625" height="269" src="https://github.com/NeuromatchAcademy/course-content/blob/master/tutorials/W2D5_ReinforcementLearning/static/W2D5_Tutorial2_MultiarmedBandit.png?raw=true"> While there are many different levels of sophistication and assumptions in how the rewards are determined, for simplicity's sake we will assume that each action results in a reward drawn from a fixed Gaussian distribution with unknown mean and unit variance. This problem setting is referred to as the *environment*, and goal is to find the arm with the highest mean value. We will solve this *optimization problem* with an *agent*, in this case an algorithm that takes in rewards and returns actions. --- # Section 2: Choosing an Action The first thing our agent needs to be able to do is choose which arm to pull. The strategy for choosing actions based on our expectations is called a *policy* (often denoted $\pi$). We could have a random policy -- just pick an arm at random each time -- though this doesn't seem likely to be capable of optimizing our reward. We want some intentionality, and to do that we need a way of describing our beliefs about the arms' reward potential. We do this with an action-value function \begin{align} q(a) = \mathbb{E} [r_{t} | a_{t} = a] \end{align} where the value $q$ for taking action $a \in A$ at time $t$ is equal to the expected value of the reward $r_t$ given that we took action $a$ at that time. In practice, this is often represented as an array of values, where each action's value is a different element in the array. Great, now that we have a way to describe our beliefs about the values each action should return, let's come up with a policy. An obvious choice would be to take the action with the highest expected value. This is referred to as the *greedy* policy \begin{align} a_{t} = \text{argmax}_{a} \; q_{t} (a) \end{align} where our choice action is the one that maximizes the current value function. So far so good, but it can't be this easy. And, in fact, the greedy policy does have a fatal flaw: it easily gets trapped in local maxima. It never explores to see what it hasn't seen before if one option is already better than the others. This leads us to a fundamental challenge in coming up with effective polices. ## Section 2.1: The Exploitation-Exploration Dilemma If we never try anything new, if we always stick to the safe bet, we don't know what we are missing. Sometimes we aren't missing much of anything, and regret not sticking with our preferred choice, yet other times we stumble upon something new that was way better than we thought. This is the exploitation-exploration dilemma: do you go with you best choice now, or risk the less certain option with the hope of finding something better. Too much exploration, however, means you may end up with a sub-optimal reward once it's time to stop. In order to avoid getting stuck in local minima while also maximizing reward, effective policies need some way to balance between these two aims. A simple extension to our greedy policy is to add some randomness. For instance, a coin flip -- heads we take the best choice now, tails we pick one at random. This is referred to as the $\epsilon$-greedy policy: \begin{align} P (a_{t} = a) = \begin{cases} 1 - \epsilon + \epsilon/N & \quad \text{if } a_{t} = \text{argmax}_{a} \; q_{t} (a) \\ \epsilon/N & \quad \text{else} \end{cases} \end{align} which is to say that with probability 1 - $\epsilon$ for $\epsilon \in [0,1]$ we select the greedy choice, and otherwise we select an action at random (including the greedy option). Despite its relative simplicity, the epsilon-greedy policy is quite effective, which leads to its general popularity. ### Exercise 1: Implement Epsilon-Greedy In this exercise you will implement the epsilon-greedy algorithm for deciding which action to take from a set of possible actions given their value function and a probability $\epsilon$ of simply chosing one at random. TIP: You may find [`np.random.random`](https://numpy.org/doc/stable/reference/random/generated/numpy.random.random.html), [`np.random.choice`](https://numpy.org/doc/stable/reference/random/generated/numpy.random.choice.html), and [`np.argmax`](https://numpy.org/doc/stable/reference/generated/numpy.argmax.html) useful here. ``` def epsilon_greedy(q, epsilon): """Epsilon-greedy policy: selects the maximum value action with probabilty (1-epsilon) and selects randomly with epsilon probability. Args: q (ndarray): an array of action values epsilon (float): probability of selecting an action randomly Returns: int: the chosen action """ ##################################################################### ## TODO for students: implement the epsilon greedy decision algorithm # Fill out function and remove raise NotImplementedError("Student excercise: implement the epsilon greedy decision algorithm") ##################################################################### # write a boolean expression that determines if we should take the best action be_greedy = ... if be_greedy: # write an expression for selecting the best action from the action values action = ... else: # write an expression for selecting a random action action = ... return action # Uncomment once the epsilon_greedy function is complete # q = [-2, 5, 0, 1] # epsilon = 0.1 # plot_choices(q, epsilon, epsilon_greedy) # to_remove solution def epsilon_greedy(q, epsilon): """Epsilon-greedy policy: selects the maximum value action with probabilty (1-epsilon) and selects randomly with epsilon probability. Args: q (ndarray): an array of action values epsilon (float): probability of selecting an action randomly Returns: int: the chosen action """ # write a boolean expression that determines if we should take the best action be_greedy = np.random.random() > epsilon if be_greedy: # write an expression for selecting the best action from the action values action = np.argmax(q) else: # write an expression for selecting a random action action = np.random.choice(len(q)) return action q = [-2, 5, 0, 1] epsilon = 0.1 with plt.xkcd(): plot_choices(q, epsilon, epsilon_greedy) ``` This is what we should expect, that the action with the largest value (action 1) is selected about (1-$\epsilon$) of the time, or 90% for $\epsilon = 0.1$, and the remaining 10% is split evenly amongst the other options. Use the demo below to explore how changing $\epsilon$ affects the distribution of selected actions. ### Interactive Demo: Changing Epsilon Epsilon is our one parameter for balancing exploitation and exploration. Given a set of values $q = [-2, 5, 0, 1]$, use the widget below to see how changing $\epsilon$ influences our selection of the max value 5 (action = 1) vs the others. At the extremes of its range (0 and 1), the $\epsilon$-greedy policy reproduces two other policies. What are they? ``` #@title #@markdown Make sure you execute this cell to enable the widget! @widgets.interact(epsilon=widgets.FloatSlider(0.1, min=0.0, max=1.0)) def explore_epilson_values(epsilon=0.1): q = [-2, 5, 0, 1] plot_choices(q, epsilon, epsilon_greedy, rng_seed=None) #to_remove explanation """ When epsilon is zero, the agent always chooses the currently best option; it becomes greedy. When epsilon is 1, the agent chooses randomly. """; ``` --- # Section 3: Learning from Rewards Now that we have a policy for deciding what to do, how do we learn from our actions? One way to do this is just keep a record of every result we ever got and use the averages for each action. If we have a potentially very long running episode, the computational cost of keeping all these values and recomputing the mean over and over again isn't ideal. Instead we can use a streaming mean calculation, which looks like this: \begin{align} q_{t+1}(a) \leftarrow q_{t}(a) + \frac{1}{n_t} (r_{t} - q_{t}(a)) \end{align} where our action-value function $q_t(a)$ is the mean of the rewards seen so far, $n_t$ is the number of actions taken by time $t$, and $r_t$ is the reward just received for taking action $a$. This still requires us to remember how many actions we've taken, so let's generalize this a bit further and replace the action total with a general parameter $\alpha$, which we will call the learning rate \begin{align} q_{t+1}(a) \leftarrow q_{t}(a) + \alpha (r_{t} - q_{t}(a)). \end{align} ## Exercise 2: Updating Action Values In this exercise you will implement the action-value update rule above. The function will take in the action-value function represented as an array `q`, the action taken, the reward received, and the learning rate, `alpha`. The function will return the updated value for the selection action. ``` def update_action_value(q, action, reward, alpha): """ Compute the updated action value given the learning rate and observed reward. Args: q (ndarray): an array of action values action (int): the action taken reward (float): the reward received for taking the action alpha (float): the learning rate Returns: float: the updated value for the selected action """ ##################################################### ## TODO for students: compute the action value update # Fill out function and remove raise NotImplementedError("Student excercise: compute the action value update") ##################################################### # write an expression for the updated action value value = ... return value # Uncomment once the update_action_value function is complete # q = [-2, 5, 0, 1] # action = 2 # print(f"Original q({action}) value = {q[action]}") # q[action] = update_action_value(q, 2, 10, 0.01) # print(f"Updated q({action}) value = {q[action]}") # to_remove solution def update_action_value(q, action, reward, alpha): """ Compute the updated action value given the learning rate and observed reward. Args: q (ndarray): an array of action values action (int): the action taken reward (float): the reward received for taking the action alpha (float): the learning rate Returns: float: the updated value for the selected action """ # write an expression for the updated action value value = q[action] + alpha * (reward - q[action]) return value q = [-2, 5, 0, 1] action = 2 print(f"Original q({action}) value = {q[action]}") q[action] = update_action_value(q, 2, 10, 0.01) print(f"Updated q({action}) value = {q[action]}") ``` --- # Section 4: Solving Multi-Armed Bandits Now that we have both a policy and a learning rule, we can combine these to solve our original multi-armed bandit task. Recall that we have some number of arms that give rewards drawn from Gaussian distributions with unknown mean and unit variance, and our goal is to find the arm with the highest mean. First, let's see how we will simulate this environment by reading through the annotated code below. ``` def multi_armed_bandit(n_arms, epsilon, alpha, n_steps): """ A Gaussian multi-armed bandit using an epsilon-greedy policy. For each action, rewards are randomly sampled from normal distribution, with a mean associated with that arm and unit variance. Args: n_arms (int): number of arms or actions epsilon (float): probability of selecting an action randomly alpha (float): the learning rate n_steps (int): number of steps to evaluate Returns: dict: a dictionary containing the action values, actions, and rewards from the evaluation along with the true arm parameters mu and the optimality of the chosen actions. """ # Gaussian bandit parameters mu = np.random.normal(size=n_arms) # evaluation and reporting state q = np.zeros(n_arms) qs = np.zeros((n_steps, n_arms)) rewards = np.zeros(n_steps) actions = np.zeros(n_steps) optimal = np.zeros(n_steps) # run the bandit for t in range(n_steps): # choose an action action = epsilon_greedy(q, epsilon) actions[t] = action # copmute rewards for all actions all_rewards = np.random.normal(mu) # observe the reward for the chosen action reward = all_rewards[action] rewards[t] = reward # was it the best possible choice? optimal_action = np.argmax(all_rewards) optimal[t] = action == optimal_action # update the action value q[action] = update_action_value(q, action, reward, alpha) qs[t] = q results = { 'qs': qs, 'actions': actions, 'rewards': rewards, 'mu': mu, 'optimal': optimal } return results ``` We can use our multi-armed bandit method to evaluate how our epsilon-greedy policy and learning rule perform at solving the task. First we will set our environment to have 10 arms and our agent parameters to $\epsilon=0.1$ and $\alpha=0.01$. In order to get a good sense of the agent's performance, we will run the episode for 1000 steps. ``` # set for reproducibility, comment out / change seed value for different results np.random.seed(1) n_arms = 10 epsilon = 0.1 alpha = 0.01 n_steps = 1000 results = multi_armed_bandit(n_arms, epsilon, alpha, n_steps) fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(16, 6)) ax1.plot(results['rewards']) ax1.set(title=f'Observed Reward ($\epsilon$={epsilon}, $\\alpha$={alpha})', xlabel='step', ylabel='reward') ax2.plot(results['qs']) ax2.set(title=f'Action Values ($\epsilon$={epsilon}, $\\alpha$={alpha})', xlabel='step', ylabel='value') ax2.legend(range(n_arms)); ``` Alright, we got some rewards that are kind of all over the place, but the agent seemed to settle in on the first arm as the preferred choice of action relatively quickly. Let's see how well we did at recovering the true means of the Gaussian random variables behind the arms. ``` fig, ax = plt.subplots() ax.plot(results['mu'], label='latent') ax.plot(results['qs'][-1], label='learned') ax.set(title=f'$\epsilon$={epsilon}, $\\alpha$={alpha}', xlabel='action', ylabel='value') ax.legend(); ``` Well, we seem to have found a very good estimate for action 0, but most of the others are not great. In fact, we can see the effect of the local maxima trap at work -- the greedy part of our algorithm locked onto action 0, which is actually the 2nd best choice to action 6. Since these are the means of Gaussian random variables, we can see that the overlap between the two would be quite high, so even if we did explore action 6, we may draw a sample that is still lower than our estimate for action 0. However, this was just one choice of parameters. Perhaps there is a better combination? ## Interactive Demo: Changing Epsilon and Alpha Use the widget below to explore how varying the values of $\epsilon$ (exploitation-exploration tradeoff), $\alpha$ (learning rate), and even the number of actions $k$, changes the behavior of our agent. ``` #@title #@markdown Make sure you execute this cell to enable the widget! @widgets.interact_manual(k=widgets.IntSlider(10, min=2, max=15), epsilon=widgets.FloatSlider(0.1, min=0.0, max=1.0), alpha=widgets.FloatLogSlider(0.01, min=-3, max=0)) def explore_bandit_parameters(k=10, epsilon=0.1, alpha=0.001): results = multi_armed_bandit(k, epsilon, alpha, 1000) plot_multi_armed_bandit_results(results) ``` While we can see how changing the epsilon and alpha values impact the agent's behavior, this doesn't give as a great sense of which combination is optimal. Due to the stochastic nature of both our rewards and our policy, a single trial run isn't sufficient to give us this information. Let's run mulitple trials and compare the average performance. First we will look at differet values for $\epsilon \in [0.0, 0.1, 0.2]$ to a fixed $\alpha=0.1$. We will run 200 trials as a nice balance between speed and accuracy. ``` # set for reproducibility, comment out / change seed value for different results np.random.seed(1) epsilons = [0.0, 0.1, 0.2] alpha = 0.1 n_trials = 200 trial_rewards = np.zeros((len(epsilons), n_trials, n_steps)) trial_optimal = np.zeros((len(epsilons), n_trials, n_steps)) for i, epsilon in enumerate(epsilons): for n in range(n_trials): results = multi_armed_bandit(n_arms, epsilon, alpha, n_steps) trial_rewards[i, n] = results['rewards'] trial_optimal[i, n] = results['optimal'] labels = [f'$\epsilon$={e}' for e in epsilons] fixed = f'$\\alpha$={alpha}' plot_parameter_performance(labels, fixed, trial_rewards, trial_optimal) ``` On the left we have plotted the average reward over time, and we see that while $\epsilon=0$ (the greedy policy) does well initially, $\epsilon=0.1$ starts to do slightly better in the long run, while $\epsilon=0.2$ does the worst. Looking on the right, we see the percentage of times the optimal action (the best possible choice at time $t$) was taken, and here again we see a similar pattern of $\epsilon=0.1$ starting out a bit slower but eventually having a slight edge in the longer run. We can also do the same for the learning rates. We will evaluate $\alpha \in [0.01, 0.1, 1.0]$ to a fixed $\epsilon=0.1$. ``` # set for reproducibility, comment out / change seed value for different results np.random.seed(1) epsilon = 0.1 alphas = [0.01, 0.1, 1.0] n_trials = 200 trial_rewards = np.zeros((len(epsilons), n_trials, n_steps)) trial_optimal = np.zeros((len(epsilons), n_trials, n_steps)) for i, alpha in enumerate(alphas): for n in range(n_trials): results = multi_armed_bandit(n_arms, epsilon, alpha, n_steps) trial_rewards[i, n] = results['rewards'] trial_optimal[i, n] = results['optimal'] labels = [f'$\\alpha$={a}' for a in alphas] fixed = f'$\epsilon$={epsilon}' plot_parameter_performance(labels, fixed, trial_rewards, trial_optimal) ``` Again we see a balance between an effective learning rate. $\alpha=0.01$ is too weak to quickly incorporate good values, while $\alpha=1$ is too strong likely resulting in high variance in values due to the Gaussian nature of the rewards. --- # Summary In this tutorial you implemented both the epsilon-greedy descision algorithm and a learning rule for solving a multi-armed bandit scenario. You saw how balancing exploitation and exploration in action selection is crtical in finding optimal solutions. You also saw how choosing an appropriate learning rate determines how well an agent can generalize the information they receive from rewards.
github_jupyter
# Section 1 ``` !cat /proc/cpuinfo !cat /proc/meminfo !ls a=10 type(a) b=10.3 type(b) c=False type(c) d='Python class' type(d) e= 3j+2 type(e) a e print(e) f=""" This is my first python class at DIAT Points Building""" g="This is my first \ program" #this is a comment a="a" * 5 print(a) ``` **For Loop in Python** ``` result = 0 for i in range(100): result += i result ``` **Lists in python** ``` names=["Rahul","Tanya","Shombho","Murthy"] names type(names) names[1] type(names[1]) len(names[1]) names[1][2:] names.reverse() names names.append("Ram") names names.insert(1,"Pallavi") names names1=["Laksh","Arjun","Tina","Kishore"] newlist=names+names1 newlist names.extend(names1) names names.index('Tina') names2=names.copy() names2 names[0]="Rajesh" names names2 L = list(range(10)) L type(L[0]) L2 = [str(c) for c in L] L2 type(L2[0]) L3=[True, "2",3.0,4] [type(item) for item in L3] ``` But this flexibility comes at a cost: to allow these flexible types, each item in the list must contain its own type info, reference count, and other information–that is, each item is a complete Python object. In the special case that all variables are of the same type, much of this information is redundant: it can be much more efficient to store data in a fixed-type array. The difference between a dynamic-type list and a fixed-type (NumPy-style) array is illustrated in the following figure: ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA2cAAAHmCAIAAAB4WmFmAAAgAElEQVR4nOzdfyBV9/848OetiysuyVSjvWmsbgstTT+XomVU62rRCq2kod4ttFWzlUY/fFRvUb4WS9rCKm25TWMKUdIvtW5aalpUtMjPG65cfP845x7X/eHnvdev5+OP9jq/Xud175xznvf169BaWloAIYQQQgihdg3p7QIghBBCCKF+AKNGhBBCCCHUMYwaEUIIIYRQxzBqRAghhBBCHcOoESGEEEIIdQyjRoQQQggh1DGMGhFCCCGEUMfovV0AhBBCCKFBp76+/sGDB71dCuloNBqLxVJTUxNbj1EjQkiRBPxKXj3Q1XWYjN4uCkII9SH79+/39/fv7VLIdPDgwS+++EJsJUaNCCEFuhu93NyLAwCHuTWeZszeLk7XCXglxRWgojVSXwdvlwghOWpoaLC1tT18+HBvF0QKZ2fn169fS67H2yBCg0nlrb1fH3gEw0RWDdMd8+a4Ce/PnmdlrJC4iIwUGxoFCsi8R/hFKZ+77S0GsPb5YftiY2m78I45arlxAACCs0q3zNZTbgERQgPfkCF9cYQJjUaTul5uD4m7p/fu5TwaNgyGjZmxYfNqY4nGKH5Rirf34RfAXPO/HxZLbh7gSiI91598AVo1Ne+uDdvjbNbb5UGDFO/x5a1RsbK2+sTlHnC26Hqu/Ee37pTUvh6ub2pmrNOT4ilZY3lRbEYGAIBD9XYZ+zQIE1WVdZ3PmV/56E5eyWvV4aZTzLCKEiEkR0ZGRgBQWFgouii6RqHkdT+rvBKxNTaDSEeFPtNsjHQUy7qxvCiKwwEAG/+DAIMrauTdOu0VxSHSnIzwz5ZFsvBBgnqFCtW12drD4526ujpeMZeTwSVWhbpMAdWCA45Sa91k4935fMr0DAAwD6u4s7E/hY0q5H+1ZO+hO4pMGY6RvZeEv45/Pt07AwCCr1ZsmdafvhKEUL9gZGRUWFhI/EssUuEjRRFxpLzqRelqonfUKKfYuzzxXVTEVwwaguzT0SKLUWdzK3utLAgBAAD7cFhkZOTx48cT0+/Ul3ID2eT6UKe9+V1tSVZRMSASY9UG3K8hhmNkS31NTU19o6dFF4I/FTXyK2EM3vseQkhRqEhRcqWiKao13c07BiMjEp8bF8QF89YVW49e6nM9vNBg09BIJRl6ZtuP57qSS1Gp3C5eu41A/kasgXb/sOkAAAJ+WVlJSUlZJa+Di0DArywrKelgV4FA9IBO5txVDKY6kyElHhbwKstKSkrKynh88TNSXwlCCMkXER1S/1K1jJJ1jZJrek5hfTAzvMPTS+SfrehDgs8jb9nid2xBZVlZSUmJzIeHaCbUnb+SL++yksqunIsFAC54BIeRD+aoo7kynimtRRPwyiQ+XPtbyQ3E5ykpq5R4kiEkE3OsmfCHzRvD6PyiFAcajUajTfI9K/lndPeYJ41Go9Fo0aknbWi0SVpTyO4XGV4jJk2aNGkSjUbbmVIkdpQWVFyL/2aSivrIkQYGBiNHaKnYeEY+knbZFV077eswSUV9xEgDA3JX2iTfvadL2u6cf8yTpqJCo33ziM+7FPkNrRM5d0N+vCeNpkKjTTotkiO/5NrOlTYqWiNGGhgYjByppa4yycE35REPgH/2GxsajTbFi/xKvKcIvxKbvUV4RSKE5K2wLbE1cj+dvKNGa7+4MA8i6T8vXPy5IeHusZU0Go1GW3lLLIri3yWfS56nqVu1yEOiMv2gL01di7xlq0z65tglYrf8lEgbmsqIkSMNDAxGaKnY+EaK3anJTBzi+SC4Fr/Thia8849Qn+TwzaUi8mydfGoek2yIF8c/H0PMxmTt+rmHiw/xZOacyJD4bvh3PWk0FRWa7+n8okuRk1S0RhIfbmU8r8OtALyiW8d2ek6iqWgRn8dg5AjiSZZP1Rvx0/cS33ab5x+Jd4v8SCuPYSXJIEV2bgReHTCGqdYQ60JDc8RrHot+dosCAABzzcbnGa3HEQdwuVwuAJzm/it2mNsUo+kuQaI7Z0R5mUzb2/bHpeDawZVG051COdy2R3NDtzoZqHuK3igaG4jhKUEm6lpWXkGie0vLuZsaecRZuCXV9eSqkvQFBtP9hf24yfJxQu3DswVQX3guQywH8ivJSPm3HhBCSF6kBoViK42E5HVSeUeNGU9ZK3x9yIWgPfH5HexPDlDkNYpvaHxB/PfvcupOK/KQGDHPO1RkZ26Qm9XnB8+e3uswwd5L9J6dEepl5Bgp+tQjM+Ec8Vs5ZbqLv+jOXE6QldGClBIBQGefmiO01Dv4gJU3jhAjVtmfvqfDmPaJC7E69GiGRHBGfuRQpwlGVl7UM9P8DU31jrYKik5rGU1x848Sf9JyQu0njDhLBs4MlYZiYnXEL3fEzl2SfZr8SG+M7ugjoYGI/+yRMKmrrQJ6M9eSFeMZv6Y9arNjfgYZoLHXL7D6OCEuLu6wn3CjdVhCQkJcXFxMXIzrJKnnsfY5nHX1alywsD2cuzVapFGiLD1ounescM+wtFxuwf3chDDhHQWipqyPl1WH2H7OcnQpeleG8IzcwtLS0sKshDBrAFBXpYPOwnBOXEKcn3VrqYivJI6zf1I/nK0SIdRfiLVTE8Gi3Csd5d5CzQMd1mYO+RSJctktXokog2SXceoGK6t/PTswJutqWrAr2a4W68122soBAHANTL6aFRcofHhwvE5JqRHMCI3lAoC5R3Dy1atpccHC1rkM+/WxPIBOPjWtDTvo/f8ojayR8HH9kAmgY2lP5so5kF0iXonZ9pli7Rcc5udqDXWv6jvaWl9OPh1dfcLScu8XFhdysxKEnx/Ye34nzmTptJb8SFtPPGpzcv7FOPIzua+cNuAGNCBp1ESvucp4P9coMu0z2ZABwJjvEUwsh8ZeEP3ddOfsT+R+a+yYTGNHZ2dn98/IPzb2Zx6Ojo7Ozs6rnS30pcyTEMgpSD/gOXvaNOctx6+GkQNwTl+gfluWxfiSr0lwPXw1/cBGGwszY5aF48YDFbnCWXBjg1OlNfR2lLMcVf5FTNYD5kFBnmaGenp6hrMdN6Y31tf7WwGA8ezFzo7Onwm/kk/XrCK+EufFFoNr5giEkFKIViVSA6uJRdG0vMi/X2NjPegv3iCMwmK/+v6a3E8BAIHJBYnbV8+eZrPleHKgyEATdnBa/fHtdtNmO28/nib8vZ+c8bfUTPwSuHcit9hNm2bjvOUy9Vji/HSjDDr71OygmJXnIohnsfUn84wBABhmbn5EcblxqQVSjzEHAHC9Wpq+Z8vGPcfT70Q6MzvayhxrmxCTcL+0/viBjTYWLEN9Q7PZjseLOeQX8/dzImpmsOYL/7+EnhMd8UBViIIfuysDRVH/xUlOTklPT0k5Gx+514E2wiWUrKf2S9tsTAcA0JuxmOxrwom41voL59GJrUTMxF4+25BcJxBOZFjTINFo0Moj7r7oTNrTVmwkr4SrecTfIj//3FZhKXZ5ThM9VsfCnUOWhns05X5Xc5YrunCeHm7mRZHfk3QGQ2TETKNwmseGRmyWRggNHPKPGlUAAPQ/jwojFjO2eqSUyfkUHnH3t9tRDwl9h/Vk1YK1H+fEFhvqB73lik/bzYS7x7F1tm2mhXOMsHIx759K6NJTUwZBUZo3sa/rWkthMGbpuJ5IxB44K/WL4QLEcCOmyXgJhfStOizH1Y4svbZ1GfrWwi+Gqq/VW7yV/EzRp1uj+aJLv5ItbsHsDj4SGjA4W+3nzbO3Z7t4beUI13nE5O6x0ScX6CzXQOJ3FzculQzUeHcvkF1DPFyndPH3hYWZQZtlph75E0ZLOF+PMNSS9ndIn+FK/ooDidC045zliTnHgbxTbLU3sfE8eClfIe3gCCHUGWIN0KKVi9RsjnKkqDHUOtPchEEY136PlAElPSH2kBjzng2R+HSFtWjcpK71pjnIZGEmVm3LnPIBWeL0a48B5PDU5KaQNXiBbnOpgjEt7Mj2e+7W81LHebIPL23ndb3tb+Xzyooe3b1769atW3fv3sh/LL6dZf8p+ZGC4u6SJ+dlHCU/09ql0vuioQHO3Noj8PDVgprI1W1eDEN1aYg9kEz8wvkz8SSxJtB1TlcDMok3CgqDvxryv8UPyAbl8gYpLz9laJAJppp4f5YOc5Yv1urgw8L+HxlR3lYTDGiTVsZf63DsH0IIKZDUkdRyP4vi+rAxXb9LcIt1AgAIZZ/a0LJMfrPdij0k6MPUpK8fNdZcbICn7EwAQIVJvpx3lL42kbB0Wgv+GQAQeyA5ZLWZXteemkWnhbNvaGvUFz0iexLS1WG0sFhHfrnjvGWa+HHtP+pkbBWU3YrYE+gdypG+maIzc60rZMQCQGzilRAzGz0ou/ETcZB58PxB96bHwcs6OCt1ywwBXwB0OoMu/W+Z6NKwlUv8wtnkbFz0Kzl+zMPJUv5vZK4rf9nOVoEwFORBO83gyqHvebx+psuP27d6kUO9ubEu02NPhV1N3ChxOSOEkAwFBQXZ2dmWlpa9XRApamtrJ02SUpGkwJEPdGOHZD9z+yAuALh8d3rut+10AlTQ2MKuPl3I7lkvyquJRE+emry7KdRcIN7TTbyl7ZOx9cSjTdOMe/w/QVCSYmtg3zoe3NzcWnesllYNhyM+CQgAY75HIMT6A4D/yUw/G8eizN/IVvSti+UfCKC+SouhQQc6Xdrk1SL0Fm/12OoSBQCnLtz/uDGXqJQ29/uUpYDfF0YzPwCIBYCx2hrt7FbT0M5GpWGY2Xkm2rmX3M0K3z4viAMAwPGefnphvSP+9EIIdY6Ojs6YMWNWrlzZ2wWRIioqasSIEZLrFTpelm63Odw6yCoDAGJ3hlsulF3zx31WIbBgihZG4e/hUlMR/+yNwpHWY3W1heu6/9S8IayVbFfoOa7/xp4OQOGf2kqGjOYeYSd3ubH0iCicf8xB3U2i8lHP0sED/KMAICqWu38eN5b4TNYu81k9KwYagFjzP7WGqAwAzsnw0HxyVNn6FTJ+GXfhRc3SCPs1cqLPlW20aPsbRpB2dCuRmmSkDX0FXd/MZk9ijelKLZdYAIBXdY0AbW4Naoq9xyKE+jFdXd133nnH19e3twsixR9//CE1alTYu2EIOrP3H2YDAJhzg7yDZDUWA3ALy9tMjsP7OzdWsSWDoscVbVdUZpwkzzn27ZHUWtZ8siMg52R4aCQ5elrmU5PCv3uSrJVkJxeUVpS2VVGaTPaYhOjj13rc6ZP3jJiHEazD928UhoyyMcyE/TU5kaHhZPM0+7NZWNOIJFFTUGVE+YdmmAMA+NmJ9aylXp/3uLQnU8Qz3xH2LOb67zndZtKcsmsRTuTMQOzl1sbiR/acRF9J2Xj5126VtPmczNFvkClqoE5jA7lHUY++EoQQ6lsU/jvYYtX/eXhxxKefbkXeUqOPX1pvsZgoDS//9AfmboouWBDbaHRW8cbZ5IjRkvQfyPHO4PrBOyIPRT1hR8CMKP8MMAfgSj41JZTdSCafca7L5hrrSdZLzhX2mOSGxnGD7Ho0k5ugrqqcTKqKPPtKLkVJVjQSyP6a5hDlT06P5+dljTMQI2kY893ILg0AwAVghzmKD3Bm6r4LwAEArv/6nRN2Ooy6kpjMXOjrbNHFHyJMC98wdqw3BwBCnSa8DIzZ4DT7Dai+nXrcSTirv3Ww3zQF/KVyvPburDaDBpHG7/p6Y/YG52n64rvy/lo/fXoGmAfG7HSyfl9XpTH/4k/zyNKZGwrnyB89jvxKguzXW1z1H/XiRvIdpu92Z/xphhDq1xTfesJgfcPxiWKHSt34zhwH8nETynZUj/NfYfbsylG2l/Sd5c7byuBGYJyvg1lp7s/2bmQvRHM/N4s2j6VOPDXF8TNjydY0P5e5UgNCBmtuoDn4cwEg9vTFYAs7iYdT59FHGI4l2v4z/PwiwzYvM1KpyPh5L9s7StYRImcnsB1n4ZQ7SDq9mQ6u4E/V/buyJWcmMGQHWwdtzQAAjr8Txx8AwGeSp7OFxI5tUbVwVHW7xcaIsHSONwcAINbfTXjZUeeOOSk5eqxzOUvX2vM51n+rxFZ1eylRowrxy4zr78YWK511cLidPnlH1Z/GtoagDAAAjtN04tebn6efMu64CCGkOHJroaZ+pEuOQDFcvDlQxhQ4DNbSGOodLkEuU8zNJUNG+c7aIybW32WKuTkVMgL4nPS3EdtHb6aDq8iitKdmW5V3YoWtaexZssJBfYf1ZK5BcRfFJuBp/yNLbGV+8iU5lV1GqJe5wQitkSbthIzE2Z18RT6TzxpzrGkcHFRUyAkHutAmyzDzCBbO/GkdNk/a+5CmbYqN8bEWXTNprLCTo/CEbwwTO1CF/KNrM6ui/sbEirTDftZtdwVzdnDC1frjq9vU1XUtZynUdd8UP5EI17eGSzkLw+KHrBhX8cOsA+Ou/r5ldusKnWmxV2Pa7OU6TgtDRoRQPye3qFGNvE+P1ZbyMNL3O5smDFLM9bVFX3TMXB1THOPDFt2b7RNT3FhDvhHPQOS+L/MhIdzQlR/yYWlXE/zEznu4oOaAlGEunXhqiuK/uE+2DPusaefNsxM+diHDz2LiNdzCL86AKe1N0O1t1Zu9pSDtcNsHmXlgwtWsGA9ZZ2exPaj9g5fPxsfZIMFgrW5srK+vb0z0NOt4b0oDOeGTx/qF0odu0fVXH0ivr6koLi4uLi6taWxZbUbuaLb6eGN9fX1jizOr7cXAMDtOFmV12w06Np570lsaK0qLifnGiktrWu4kbnGcJnZpdj1niVIbLk5vken4ajOpZzGevfp4ekt9TUVxMVG8isaW9O3O4sXTn7Y6vaW+orS4uLi4tKKm5fhqfO0SQkhBNDTam3pCjmgtLS1yykrA5wvojPZm8hAIBECXPjWcgFdZWlHTCDBshIEeMZhaIOALxDOUdQ4Bny+ATm24G7nS3CsWAMJyazZaMPm8yoqKmkZQ0Ro1Ukd22S/ttLHyzwAAj4SCSMdOdMYX8PkCYDA66q5IFI+aL0/aRxbZud2tAACCyrLSmrpGUBk2YiT5LfL5fOnz8ZWl24yclwEA4FPQeKDns/+gAYt3y0FrCgcAwDqrIn02xj4IISQPDg4OAoHg1KlTPcmEihdra2uJdG1tbc/LZmdnx2azv/zyS7H1cgwW2o8YAUBWxAgAQGfq6DPbPo6kBTuyziHz3O0VSgAADMnzSuLd+h85INradV7nxm92/GVI2032fMsdbwUAoOvo6Yt9GFmR691fDwrfIvgJhoyoHUUZx8m6c5EXYyKEEOqJyMhIDoezYMGCnmdFxYtKoOCZdwaEgfjUfHRU+N6azxb3xVnpUZ9RxgkluxqLvhgTIYRQt23fvt3LywsAmpube56b0kJGwBF9nTAAn5pll34RThUSaKuIt3ygAYP/NJ2ckcrDaWYPRvojhBACAIAdO3bs2rWLSPc8apRsj9bQ0JBLI7VUgzdq1BIfUiPDQHxq/vvXFSLhsdVpgHwkpCAMi9P19fWNjSpMJv68QAihHtq0adOBAwcUlLnigkXKoIsazTxj6p0jGlXUmZ3qeDgwn5pmnqfrneu78CWgQYzOYDA7HNeFEEKoXc3NzZ6enkeOHOntgvTIIAwa6Iwuxn8D8anZ5S8BIYQQQt0jEAhWrlx54sQJsfUvX75MSUnplSK1r6KiQur6QRg1IoQQQggpSUNDg5OT02+//UYsivY7vHPnzrJlyzqep0/paDSaurqUyaMxakQIIYQQUoja2lo2m52WlkYsTp48OS8vj9qqoaFhYmLCYDBoNFovFVC6vLy8+vp6yfUYNSKEEEIIyV99fb2tre2VK+QI1NmzZ3t5ebm4uACAqqqqpqamubn5sWPHerWM0q1YsULq+g6ixo8//vjly5cKKA8ayD799FMfH5/eLsUg8urVKzabXVdX19sFQf2Mt7f38uXLe7sUCA1YJ0+epELGjz766Ndff/X29iYW586d2x/jq/aixqampqSkJC8vr9GjRyutQKi/u3DhwqVLlzBqVKaXL1+mp6dv2bJl2LBhvV0W1G/88ssv165dw6gRIcWZMGHC0KFDm5qaPvnkk59//llVVTU5OZnYtHDhwosXL1ZWVnY1TyMjIwAoLCwUXRRdo1Adt1B//PHHEydOVEJR0MDw4sWLFy9e9HYpBqMVK1bo6AyQlxchJfjzzz97uwgIDXDTpk27fft2SUmJra0tjUb7888/i4uLiU329vb//PNPN6JGgpGRUWFhIfEvsUiFjxRFxJH4RkGEEEIIIYUwMzP76KOPiMEu586dI1aOGzfunXfe0dTU7N4gGCpSlFypaBg1IoQQQggpHBU1Lly4kEgMGdLlMIyIDql/qVpGybpGyTU9h2OoEUIIIYQUq7y8/Pr160R6wYIFRGLo0KE9zFasipFqs1YQjBoRQgghhBQrJSWlqakJADQ1Na2srHqeodToUDKIbGfnbsCoESGEEEJIsajm6fnz56uqqso9f8nxMaCAzo7YrxEhhBBCSIGampr++OMPIk11apQL0dHTVOBILIqm5QWjRoQQQgghBbp69WpFRQWRtre3793C9AS2UCOEEEIIKRDVPD158mR9fX055iyrIyMoZmQMRo0IIYQQQgr0+++/Ewmx5unm5mY+ny+XU+Tn51NpPp9PLHY78+bmZqnrMWpECCGEEFKU4uLiO3fuEGlqzh0AyM3NvXDhAovF6qVydcDExERyJUaNCCGEEEKKQjVP6+rqTps2jVpvbm5eV1e3d+/eXipXe7y8vMzMzCTXY9SIEEIIIaQoVNRob28v+jIYFRUVLS0tqcFZr2MymXS6lBARx1AjhBBCCClEQ0NDWloakRZtnu6nMGpECCGEEFKIzMzM2tpaABg6dOhHH33U28XpKYwaEUIIIYQUgmqenjFjxogRI3q3MD2HUSNCCCGEkEJQc+4MgOZpwKgRIYQQQkgRHj58WFBQQKTl+yJBMRoaGorLXBSOoUYIIYQQkj+qeXrMmDHm5uaKOIVovEikiW6UCjIA6xqLMyNXrFixN/lR+7sJGhoaBMopEUJICrxUEUIDm3KapxUaJopRTl2j4DYnOolbqqamRiwztPXeYb03ddZk3S6ev6H8UXrKuT8yc/KfVAKAjg5r8txZ8xfZTjbQpvapfJabk5MDtvXt5FOUvHPOumgAh6QHoaZqXf48ctKQGRkQl1s3w2Wz2xwD0Q3FV2ICjuW8McVlh+ccNQAAwe2EiKT8auoLJI+varDy3DLHsM3K8tsJO7/PhmHDrNd+yTbVbXvGtvk0NICatt7Yd96zsDAzHoXVzggvVRnwUkUIdVllZWVWVhaRHhjN06CsqLHhrxNB0TmS623jsw/NNOjso6AoM3LOqqC26/JzchIjgja7h6VsZ5Pv5KHDMABgtpsV79+/hDvLg6CYE/3Lg4Y3XdY7GXQhR8Gzi/GpOcCbsVbsUVT5ICc1NRV4M7aSj6KGv34NkfYFgrbTxraPotr07zcnpgIAJNZNtotyavvlyswHZqxPCt9k2tXQAA00eKlKPwwvVYRQVyUmJr5+/RoA1NTU5s2bp6CzSFY0amhoKK72UUm3HlXiyWC748xGS0FN+cPLcd9GpAKkOrsfy03x1O3gaACA6tsxwucQa9M+v4UzTRgCXsH11KDNIfkA0d52I/VzPS07kxMAwFjbTTt4Oa91J+kN7d4Haquh8lxQSCrMmO/epUcR+bVIPjXpqsOI7fS2exJfIDQ2EisFAtAf1/YXxoub0anCdOqvueVOM9t+JW3yqa8vL8lLPxEQnwOQE7FoSsGZO1GTtQENZnipSoWXKkKoq6hOjXPnzlVOdaASmqqV+oPVds6MyaYsALCcOeddPZ8lAYmQH5T+aLWTcYd1GOUn/AKIPOKvRcwUNtEYGLJmzZy0ctaqHICgkLPLf3YTv5EKGqqrXwnoDG1tDdGPqmFg6bbRUtqJBLXVtXyB5BGUhurqVwKga2poq7VuJ54N7VeayAH1BcryKOtMfutSTtLlRzPZxu3mY/khe6VTgv+SzfEAqUsOXijY/iFWYqC+f6kKGmqrX/GBTtfW1pbxF4uXKkKo1zQ3N1+8eJFID4w5dwjKve28bu3TPnnBIghIBGBp0mvjPMZ/mwq2O5Ki3EypHapvx0xaEgDgnPJgz9h/LwflAwCsP7JjZttePXSDObv2OczbnAg5ARcfObOFT7VhUHMj+aDTuhByP5bDkf+3+0NjMtivzU+YaLcZbHffi3IRxv+CvOTo3euCqEYhloPf/u3uIi1BtVfiQgK/jabu9c5+R750n5H+1cTNicSK1CUTjQAAwD3lwXaWIvpgvW5/UED5hehEAFj/Y9oHd7Y5h+TEn8j6km0spVanTT70yU6BR/JvrY3Oh+j9Fz3mfoj9plAfvlQF5XnRO3cHJbZeqZu+37/e3lTkZHipIoR6WWZmZnl5OZGWFTW2tLTcuXPniy++UGK5Ouvvv/+Wur7XbjoCAfGjP7+inj7WZAak5qQG/F7sZirsNCS4kXQSAMB28ltq8Ox6BgAA2C6YYSCZlaHVIhYk5gNU1PMByCdAYoBzouhO+Ylr59Ul3ImyJKs4XgMA8Fq3345ZvyQgFQCAZevA4iUm5uQnBi1KLEp5sIelBgCCzL1LV0UQjyEWC/LzAeKD1hpOzdGskyzRU4GAKkgnqIr/X1BR7eyeohqKiAf2jA/eN56iawshOZATcL3I2d6ww6LQ537mDdHrAPKfltXCKGz6Qq361qVafXv9lCVE0+4MBwdITMyB/JB1i/4NS9tD1tXhpYoQ6n1U8/T48eNNTEyk7lNRUfH06dOjR48qsVxdUFlZKblSuVFj65209tz/CwMAANtJb2m9s/QTiMgBiEjN+6+bqQYAQEN+UnQ+ALgvm6kB0Pia2HfOWGkdA+jDx7AA8qVscU7I/tbSQEPYNz/1h1/zLEXqSCiCF5l+AakAYOsXf8hzphpAgFfyp3br8iH+p3TPPfaGDY/OESGKKUgAACAASURBVM+hGeu/P7zFXhsERTfOfe3kraoyzCWiwLH69hdTnFJhRvy1Y1OHQxMMVetK7UXqtz7f/GMB1EBSdbgVnShjz4CdxZOJvBsaqsZY/Ve0b/5fKScAAGw/maQBaiwrZ4B4gDPpf9tL+8hi6KPHOgAkAvBq+AD4KBr0+uilKsiM9EsFaB2dExqQvNNrXXROvHe858LthnTASxUh1Bd0Zs4dXV1dKyur3bt3K6tQXeDt7a2joyO5XqlRY2rS2QtjXjRWPbt04tv4HAAA1qbPTTUAjK02sSAkH07+ftPNdA4AlHPTEwEAbBeJ1ljwpOYK8OoZced+XS/amuOecm87SwMAwHCO+/fOZ9bF56em3ql2M5W80RZciMsHANi003MmcaPXZtnv32G7KCA1/o8/d9gb/p1F/Ghw/78t9toAAHRDS/bPhQsFQAcANU31YQAATC1NNbpaN77T/PhoaU9SKXKiI0TGVWo7tT6KBEVJQTkgfHgD3XjRphnxIWLVQrIJeEQJquv4XSo6GpD66KXakB8XkQ8A638MEg7o1rbf8q1t9KJUiL5etMXQWA0vVYRQr3v+/Pn9+/eJdPtz7jAYjLfeekspheoaNRk/qZVb15gTsTYnglqasf77w2Q/91EL3R1CNifmR5zJ957DUhNcPxMCAOC8zEy0xkJWF3bNMcSPb1X11o9ju9uJ1XosfexEGXUcJGIgZEjQTr4uNAAAqEH57ScAAC9e8YVd6GdssjNscxRdPl+frV/COgugHqN0yPvRKUBqFYbD7qQvpoDwiauuN5baUpufHg0A4LBkNvncmcpeDiE5ABEX8//rwupo9BadSXxBI7U1e/JR0ADRRy9VOnGhRgTthGu6DQ3EyvInAADwqp4PoIaXKkKo1/3yyy9Egslkzp49W76ZGxkZAUBhYaHoougahVJyv0aHHbst4TWMeGvM+AnvsUTm+zW2WcaCxHxITOUGsCY/PRMPAOC3ZDJRPsHrOgCA1OIqACn31FcvXkiubNsbXWeMPgAAU+oHbnh8j7zxJ0ZHiG9kqtKh+q/UVABg6qp35kN2le2cOZaT2wy3HFHgAIlSnkW2llNMjaUOzBTcPHMSAAASdwcMIx9QL+OJ/x4/e8eFNbODQvB5xHco+jhHg1hfvFQbnj0gr4r8xAiJwFKVTge8VBFCfQDVPP3hhx+qqsrq/twjRkZGhYWFxL/EIhU+UhQRRyp35p3dX7i5SJldAgBAd5K7A2xOhJCUP9mQnwoA4PzhZHJMod7bJgCpABFXHnlLzv1RdD2LaAoazWS0rm3bG73yWQkAAA+kjWxUGzvRASARHPZd272AIWizC0NDWw0EYybPgJwc3qv2XmLRfRLDLcnOYZ3Yk9SQf0bYcJYTHy82N3B+xKl875ntDxTNT4qT8h2iwapvXqpqY4yJqsrdSdccxzL4ba9UbW01wEsVIdTbGhsbL126RKQV9EoYIl4UCxOJlYo4nSjlvof6dTu3cg2rZZsAAKJXzXEKAgDWpiXGwqfJqPesZwAAwOZDZ6vFjqu+fWBdNACA7W4rkRGIqUmpRa337YYHN8j6AKlhMllBkvhYoKGh3ZYaHQDoowyYAJATlFIkelhDdW2bRwOvXtFvy5Xxi+XF1d8TAQBmHEnJzs7OzszMzMzMzM6+lhbvBwAAianc8nbyqX2U7PNtIgCAQ5hNx6M40SDQNy9VARDDoP/5V6AmcaUSB+GlihDqXX/88cerV68AYMiQIfb29nLPn6hBpP6lwkfJkFERQaRyo8Z2jZpi6yCy6L7w3dYF7cle7gAAkLj502/i8l9UCwQgENQW513wmbSEeMj4rVvUpkUsJ2TOor15LxoABPnJYd6JAADOy6dL7TT0jtV8AACI+OrghXLh46S2vCgzIeHGiwYAeHMS8SiM/vrghWoBADQU3UiwGz9p98ViAOphlpNypQigofxFeUNPvgjZnhSX1zbUVotoEABA7dUzEQAADstnswwMDAwMDQ0NDQ0NDEYZz3RYDwAAIXHXG6TlU1yUfyFu58R564jaj+997fBBhDrUa5eqxtiFDgAA0WsDLjwSRleC2qLbFxI4t4m/cLxUEUK9i2qenjNnjr6+vqJPV9iW2Bq5n05JLdSymnHaUGMtWz8jkRx4uMnKWPSpQZ+zJW199ryIfMiP/9Yu/luxQ533JXlKvmArP2LRNNF+is7udtIb3dSMFx9xj14bnZ8TsnZKCMywtYUnqTnEsOoztpaj1LRNP9ltG/BtKuSErJ0U0nrgLOI/GnqmAKkA0evmRAMAOGcW7DHsxFfb4ddCVYi8Jj/QqoltO166H8nePrnoRCIAwPol70s8SEYt2GEbEZAKiT9xt9tb6srMB4C1LymuE9PFoQGub1+qGna+30PiOoDUtfNSgTXDVgdSc4hi+NmyJ6sB4KWKEOpdaWlpREJBzdMUqUGh2Eq5j5VRUl0j0cwyTLOD2/OUBUQ9Adjuthkltk3NeEtS7pEd62e0XT3D1v37pGt7nMTnObP1+z7MT6RCxHb9mdxA2a9DU/tw+y/xu92J7us5qUTIyHJw32H7NvFE1HaJyN7nbityCMt9x48bPyRGQY5alRLWWjDW6E4G4zK/FmIDs7XD14g3pL+dTFuFXlv2OAcAwGHB+1Jm7WDZLmMBAOTkPa+Wkg+LZevgviMs/tqDFCfTzr4aGA1gffxSVTO0v5cZ727LAgDIzyFCRtYMhx1H5ghDV7xUEUK95sGDBw8fPiTStra27e8sX2Lt1ETLtdwrHWktLS2ytjU1NdHp9HPnzk2cOLHHJxI0NICaWge3aOGryVhHspM+NJC1M/mqaABgaGprSMtT0CCgq9GJXav5AqBr6mq3eQrV5sVMXBTQ9o2C5KHV1dUCAdDpDA1pL6Km3n6rqa0t8VxrqC6X8iLddsn8Wjr1fVE7CxoA1GROLtLBZjkLDg5+8eIFNe8AUoLCwsKxY8fevn1b6qSsXdQ/LlVyf6AzNDVk5IyXagd8fHyMjIwOHDignNMhNBiEhIR8+eWXADB69OiSkhIajdbOztu2bbtx40ZUVFRPzkhNxCOaEPu3G9muWLFi6dKlxGcRpbQx1PROvIOh4eKPAQAAM1ZaynwOAQBdQ1u7/TnN6MJ7OF1DW1csKqytrRW8TDpyEkDqOE26tnZ7v+Ppahq6arJOrqat29VmI5lfS2e+r9ad6e3u3MFmhET1j0tVcn+JnPFSRQgpG9Wpkc1mtx8y9lN9acavF1e/JzrCf/aB4t6TlX9q7aIAcr4L20VTFHOi2gSfiZulv2lMyHbfvSinjubzRahPwksVIYQk1NXVZWdnE2lFd2qkyOrICMI5HeV7uj4UNdZWluYDADg7Wxl2tG/3qY94FyAHgOWwyTvARXoPpJ5ThfZfRSN8GQ1C/RBeqgghJCklJYXP5wOAiorKnDlzlHx2sQBRQa+K6UNRowbLqbDQSdFnMWZvL2RvV/BJNNihKexQBZ8EoV6ClypCCEmimqdtbGy0tLQ63P/p06fZ2dlz585VcLm648WLF9OmTZNc34eiRoQQQgihfur8+fNEopPN00OGDHnjjTeUPNS6kzgcztChQyXXY9SIEEIIIdQjt27devLkCZGeP39+Zw4xMDAwMzPrm/MY3L9//80335Rc34feDYMQQggh1B+dO3eOSIwfP57FUlRX7F6HUSNCCCGEUI9QnRqVNnq6V2DUiBBCCCHUfVVVVTdu3CDSGDUihBBCCCHpfvvtt6amJgAYNmzYzJkze7s4CoRRI0IIIYRQ91HN03Z2dgwGQ/kF0NBQ0rsIcAw1QgghhFA3NTc3p6WlEekuNU/fvn17yJCeVt6JxotEura2tod5tgPrGhFCCCGEuik7O7usrIxId3LOnUePHtnY2Pz+++8tLS09L4BCw0QxGDUihBBCCHUT1TxtYWHx1ltvtb9zU1PT3r17WSxWRkaGvAqgtOZp6EwL9dWrV4uLi5VQFDQwPH78WEVFpbdLMRhdvHhRmfcO1N89f/58zJgxvV0KhPo9aqbGDpun79y5s3bt2ps3b1Jrel7XKFnRqKGhobjax/aiRhqNRqPRdu7cqaBzo4HqnXfe6e0iDC6vXr0CAF9f394uCOpnVFVVe7sICPVv//77b15eHpFesGCBrN0aGhp27twZHBwsEAgUVxglNFW3FzW2tLS0tLRkZmaampoquhxowAgMDHz69Glvl2Jw0dTUBICCggIdHZ3eLgvqNzw9PbGuEaEe4nA4RH3h8OHDLS0tpe5z+fLlzz//PD8/X7lFU4iOW6hVVFR6ZRg56qfodByY3ztUVVXxUkWdN3To0N4uAkL9HtU8/fHHH0teUzwe7+uvv/7++++plughQ4Y0NzdTO9TU1Ny7d085Re2Suro6qevxAY8QQggh1GWNjY0XL14k0pLN0+fPn3d3dxdtfJs+ffrVq1dF97l8+fLUqVMVXc7u+eyzzyRXYtSIEEIIIdRl6enpPB4PAIYMGfLhhx+KbioqKlqwYAHVi/Gtt976f//v//n4+BCL//nPf548eQIAampq+vr6yi11pzx//lxqF0yMGhFCCCGEuoxqnp41a9Ybb7whuonP5xNRF41GW79+fVBQ0M8///zPP/8QW62srGJjY4kDjx49qtxSd8qKFSukju/GqBEhhBBCqMtSU1OJhGTz9Pjx43/88cfMzEx3d/eZM2c2Njbu2bOH2LRkyZL+O9MIRo1oUPjhhx9+/fVXZZ5x/PjxoaGhyjwjQgghpcnPz3/w4AGRljpT42effUZ1DTxy5EhRUREA0Gi0HTt2ZGZmEuu7MV+jkZERABQWFoouiq5RKIwa0aCQmppaV1dna2urnNM9ePDg5MmTGDUihNBARb0S5s033zQzM2tnz4aGBqqicenSpZMmTfrzzz97eHYjI6PCwkLiX2KRCh8piogjMWpEg8XMmTO//fZb5Zzr3Llz58+fV865EEIIKR/VqZHNZre/Z1RU1LNnz0BY0QgA2traxKbuvRuGiBfFwkRiZTdy6xJ8DzVCCCGEUBfU1dVlZ2cT6XZeCQMAfD7///7v/4j0p59+Srw2xdDQkFhDo9G6emqiBpH6lwofJUNGRQSRWNeIEEIIIdQFKSkpDQ0NAKCiojJ37tx29vz+++9LSkoAYMiQIf7+/sTKyZMnh4SEREZG9vy9GGLN0FSbtYJg1IgQQggh1AVU8/S8efOYTKas3err6/fu3UuknZ2dJ0yYQG3y9fUtLy+/ceNGt8sgNTqUDCLb2bkbMGpECCGEEOoCqud6+83T4eHh//77LwAMHTp0+/btCi2S5PgYUMCAGOzXiBBCCCHUWbdu3aLeE9jO1By1tbX79u0j0itXrhw3bpwiCiM6LIYKHIlFRYyPwagRIYQQQqizqOZpFos1fvx4WbsdOnSorKwMAOh0+rZt25RUOAXDFmqEEEIIoc6iosZ2mqd5PN7+/fuJ9KpVq4yNjRVUGFkdGUExI2MwakQIIYQQ6pTKysqbN28SaamvhCGEhYWVl5cDgIqKinIqGsUCRAWNpMaoESGEEEKoU5KSkpqamgBg2LBhs2bNkrpPTU1NSEgIkV6zZo2szoV3797NzMwUHVjddzQ1NUltfMeoEfVvV69effLkyeLFixkMRm+XBSGE0ABHNU/b2dmpqalJ3SckJKSyshIAVFVVv/nmG1lZvf3222ZmZlu3blVEOXvI399faqs6Ro1S8P85v9nvyLDpawN852Mk0pddu3Zt1qxZzc3NY8eODQkJcXBw6O0SoZ56ev6AR0jqVM/QAAeZfcwHW0kQQn1Ec3NzWloakZbVPF1VVRUaGkqkP//88//85z+yctPQ0Bg9enT7c/f0loMHD6qrq0uuxzHUUghqnhxNSgpP/Zvf2yVB7SsqKmpubgaAx48fL1myxNbW9v79+3LMn190aafnShsbh9P5PDlmi9pR/uRqVlbW9ZL63i5IHyoJQqiPyM7OfvnyJZGWNefO/v37q6urAUBNTc3Pz095hVMKjBqlUSH+o9bZmljB01MHgnYEHX8qUFiRkDQODg5r1qyh3uN5/vx5c3NzX19f4ortGV56pK+6kZV/VGxGBqekDv/XKokKaAKAlnJP+vzKqR07dhy/8rTXS4IQ6suo5mkLC4sxY8ZI7lBRUXHw4EEi7eXlZWBgoLzCKQVGjfLAL/9l2679u+L/xcpJ5VJVVY2Ojs7Ozp4yZQqxRiAQhIaGjhs3Ljo6mqiG7I7KW740rXleoXIrKOrbXvz5y/79++P/LO/tgiCE+jQqapTVPL1v3z4ejwcA6urqfbPDYg9h1CjEf1VVVlZV9QoAGDLqGAX8V1VVZWVVVXzxiqdGAJBVKyH7KCQfM2bMuH79+g8//KCnp0esKS0tXbt27dSpU3NycrqRIe/hhVAAAI9kLvcwW44lRd0n4L8qKysrq6qSvIwEAuE6cp9XMvLgV1VVVVW9knUlapEXshiVTuSMEBr4nj9/npeXR6SlRo0vX74MDw8n0uvWrXvzzTeVVzhlwdEwAFCVGPSly64T5NKibXFLxeoMBQ/O/7g3JOpEVh61asPB8wHuMxnw6rj7KC/y0KS5ozSIjdfLgycy2jlqgEtPTzcxMSHSVPMxlZC6sp1E5/cfNWpUS0sL1ekkNzd35syZK1euDA4O7lL51cctTk6eM8tuGhP4x7p0JFIAQdmfh77223YiS7jCdFtc5GaH94ib173j7lO9Tqw5eG6pavJCL/J+DVZfXfxpu6UedX/j3zgetMFrf55ItqamkJe35uKlqXNnexFrkjbP1dgMALAm5tahZdQImJIrp07Nd9svI2eE0GDB4XCIhI6Ozvvvvy+5Q3Bw8KtXrwBg2LBhSq5o1NDQqK2tVcKJ8N4nOLf1I5dwkadJ0i6XJABorTrk3/vRwmEjkTa1ssrLygKA8I3zGf8pCJjPfC2l9qGwUdD+UQPw94eo2traqqqq3i4F6fjx42fOnDExMaEC2Q7RdVh2dkRSauUTUqKqGy5Gc4kr0mr5cjhxIgvydrnMKqECu9cAAEc3LjwqelTW/rmLhhde8yUqn+8d/+9c4redMAsAEFYZvJY8Z+nL1hEwSZsdkmTnjBAaPKjm6UWLFg0dOlRsa2lpaUREBJHesGHDyJEjlVMqDQ0NsbRCw8fB3kItePrHsvA8ADDdEFdYXVtbXhD3lRW5rYb8L33EfxaZLjqYmF1cXXstObm2/O42KwCA/UcuvAJN97jq8sLziwAArM4VlFeXl5eXx72n2f5RSHloNJqDg8PA65I8OAjOH9iQBACw6Fx+eXJ0dHJtcdwGKwA46nb0H7GGZtMNFwte1NaWn9+3HAAgb1vKA6LR4OkprxMAYLUtsTw5Ojq5+uLhNQAAsK2g9pClxarq2vKLuxYBgNWu8+W11eXl5XHr3+tczgihweL169eZmZlEWmrzdFBQUF1dHQBoampu3rxZmWVTTi0jYbDXNT5I/RUAAJYfDXDQowPQ33QI+C2R4eKwq7V+gf7m/JPX5rcew3jb1W3NrqyjUPNaAAB0OoM5TBMAQGs4k0FnkN9pB0cNaFOmTPH19SXSLS0tnU/0ZP+mpqbMzMwzZ86IXj8mJiY//fTTjBkznJyc5PLRkFLx847szwOArxLD575F9OwY7hAQtCh8VhKEZz8KeHu8sLuH6VfXMwMmMgAAZq7ZtHzziRMAvHo+AANelecDAJhuWktMv0qfPHUqwFGAP5+9gjc16XSgD9PSBAAttWEMoIv3a24nZ4TQoJGenk4McxkyZMi8efPEtj5//jwyMpJIb9y48Y033lBm2USrGxVtsEeNJKuZhq2PAPrsTxbBriSxXaqe3rvBvfek6GUDQENRKblrRxl376j+Tl9ff9myZco8Y2Zm5saNG7lcLrVm1KhRe/bscXNzE+0fifobFU0AANi/7Wu4/AafrOB7SbxdVTR0W+SxbCJ1CTMM5y6CE9QVTF5veXl/V83XGw4AzwsKAABgpFYnLsX2ckYIDRpU8/SsWbMkg8I9e/bU19cDAJPJ/PLLL5VZMMmKRoX2cRzwAUynWNlO0BRZFAjEujqVnfpikdvRPOia7h2FuubJkydfffVVQkICtUZFRcXb23v79u1aWjjXXv/GL7pHjjTLO7Ff4kpSUxG5fTXI7oHKMP1sAySFw7b5H1Ud/HJiQ67b5nAAgA12xp2pLmwnZ4TQoJGamkokJJuni4uLf/jhByLt6+s7YsQIpZZMhBKaqjFqBADIunr/FcykAkc6XVV064NTXxPB35pdcV7sKSOYzJo/v7dw2NV+nt07CnXJgwcPpk6dWlNTQ61ZsGDBgQMHxo0b14ulQvLCMBy3HOAEwMHsAhcT9bYz5jCGD+9kG/Fz7kUikbd/oxu5zmrbre0L8faHEOqM/Pz8hw8fEmnJqHH37t0NDQ0AoK2tTfXOGqjwtgkAAEkPSwWgSX4Z/LT4eNGNBbnEcJlTh3zJvxWmnra0XGpEXyDS6aNQ9125coUKGceNG3fgwAGFvdATr5TeIABi6NjDYgHjvc4GiWJe3UvdlQdg+tXF02u1yv8trwPdt94Z/9ZwyT1rsFoRISQN1Tytr69vamoquunJkyfR0dFEetOmTcOHS7m3yJKfn3/p0qWJEyfKq5xyVF5ePmHCBMn1g/1ZaPzBXIATAOF+hz6M9p2vyX9+ascat/As0X00GCMAIK+w6DnAmwBVD855zmo7PIp8tmVxLv4z00G/7DmP+aZex0ehHmOz2dOnT3/y5Imvr6+3t7eKioq8chbw+QIAOp1HBKX8Wh4IVPgCoDNkzQGPFEDTZOlySDoB4cu+sroVvnC8HgCA4NU/tzOzH490WmbZmTiSTmcCAOTt3/AV32H6aG01NbX73Du6/5k4ZcbEt8gGhsaGVwCQte30g3WWxvyySrqOnib+f0YIkX7//XcisXjxYrFNu3btev36NQDo6Oj4+Ph0KdshQ4aMHDly5cqVcimkfP3www9DhkiZZmew3xkZ4z8+uMhrYxIkbXMYta3tNmGnuPEfTIX9WZC02UTjt+WL4ERSltgOoDnqPYAkgHAXs3AAgDV3qw91fBTqsREjRnTv7S/t4+cfU5/gJrpmq5UBMWFrDLdmtRlT7mdEMmgu/jYOTrgAJC2zSAJTq0UjICmLuJR2fdy5qJExfsE+gM0AeUnheW0Hsnx16m7AwrcBwOBd8gq20A0HAPjqfG3ATHl/FoRQv1RbW5udnU2kxZqnCwsLjx0j3wXx1VdfdbUz/bhx46qrqz/77DO5lFO+kpOTpU5yPNjnawQY7v7j3X3Lrahl0zUHz587KFoB/eZ8v3P7iAnesk4kZQEsPxx30KpNJm+uux4jkoW+SqeOQn2ViqrsTUosxuDU9rtnvO3w4u65DYtMAQDysoiQ0dRq+b5TtprS9m+LDgCv7p0hKvk3HD5/9+6t69evXzxHzPkI+5dFEpM+6s1dF7Oh9er8aurozuSMEBoMUlJSiG6Lqqqqc+fOFd0UGBjY2NgIALq6ul988YVyymNkZGRkZCS2KLpGofDeB8B4e3108pqwKl59o4oKk+hin13tIvI+asbc9Ydq1wSV8eoBVHT0htMBVlSvAjqd+vqGT1yWXLu4qozXqKKuM5xo3Or4KNQ3MYydW1qce7sUg9TEldHVTpGikyZqvj03+OS13a+qKusbAVTUmZqaIlsl9wfQXHmyeoWAvNQKMpMAADacCl5JVR9OnABrwrOOAhSW8+FtTQAYviw4efH2Kl59owpTZziD3pmcEUKDAdU8bWNjo6nZOuHKo0ePjh8/TqS3bNnCZCq1GcrIyKiwsJD4FyRCSQKxSb7w3kdiaA5niMy+Q6dLtH0xNPVE9pD22GAM1+vGUQihNujS+o7SNYfraUqulrW/xKV2MevG01mT3xpOB3j1/M9DwUcBAEznvCOSp9hNoLM5I4QGNFlz7gQEBAgEAgDQ09P773//q8wiEfGiWJhIrFT0qfH+hxAayIwsbQGSIC98LitcbNO+cKcuDHdECA0+ubm5z549I9K2trbU+ocPH8YLp1v5+uuvlfl2FqIGkapopCJFyZCRqomUI4waEUID2XBL9+JbxjFHj6dezMvKywMAU1OrqXaOn6391PItGbWXCCEEACJz7rBYLNGZgAMCApqamgBg1KhR69at653CAYBEM7QiIkVRGDUihAa44ePn+gbPHeBz7yKEFIDq1CjaPH3//v0TJ8gXV/n5+amrq3cj58bGxoyMjJ5UUkqNDiWDyHZ27gYcQ40QQgghJK6iouLmzZtEWjRq/O6775qbmwFAX1/f09OzGzkXFBTMmjXrypUrcilnOwqF5JUhRo0IIYQQQuKSkpKIZmgNDY2ZM8lJGPLy8hISEoj0N998w2B0+a1VP/7448SJE2/cuAEALS0t8iotUa0o2s1REYNjMGpECCGEEBJHNU/b2dmpqakR6R07dhCh3pgxY9auXdulDKurq1esWLF69WridTIgj6hRNDoUGx+jiFHV2K8RIYQQQqiN5ubmCxcuEGmqefrOnTtnzpwh0tu2baNCyc7Izs52cXEpKioSXSnHukblwKgRDQr37t07e/ZsSEiI0s4ox5diI4QQUrLLly+Xl5cT6fnz5xMJf39/Is4zNDR0c3OTeXBbTU1Nu3bt2rlzJ9HeLV+yhr8AzryDULe9/fbb5ubm7u7uyjndtWvXDh48qJxzIYQQkjuqeXrKlCljxowBgFu3bp09e5ZYuX37dlXV9t46SikqKnJxcaHeZC3m77//3rVrlzzKC6LN5bt27SIWu515YWFhbW2t5HqMGtGgoK6uPnbsWOr3oqK9fv166NChyjkXQgghuaNmaqSap/39/YnE2LFjV61a1ZlMTpw44eXlVV1dTSy+8cYbdXV1dXV11A4vXrxISkqSW6Hlp7y8XKwxnYBRI0IIIYRQq5KSkry8+OK1zAAAIABJREFUPCK9YMECALh+/ToVR/r7+3fm1aIhISFffvkltWhnZ2dpablz504AoNPpxNsI586dS3WU7FPs7OzeffddyfU4hhohhBBCqBXVEj1ixIj3338fRCoaTUxMVq5c2ZlMqHBQTU3twIEDSUlJsbGxxBpnZ2c5l1hZMGpECCGEEGpFVSsuWrRo6NChOTk5f/zxB7Fmx44dneyA5OPjo62tbWFhce3aNR8fn1OnTj1+/JjY9NVXX3Vjose+AKNGhBBCCCHS69evL168SKSJ5unt27cTi+PHj1+xYkUn81m6dGlVVVVubu6kSZMA4NChQ1SeZmZmBgYGci63UnTcML9q1aruvWMRDU7Pnz83MTHp7VIMLsSEsXZ2dp3paoMQ4cmTJ7a2tr1dCoT6nPT09FevXgHAkCFDPvzww0uXLqWlpRGbvvvuu+6NdLx06VJOTg6R3rRpEwCMGTPm0aNHciqy8nT8jDExMdHR0VFCUdDA0NjY2O+mLe3viC98woQJXZpyFg1y1dXVRH98hJAoqnn6gw8+0NXV3bFjB7H47rvvLlu2rHt5/u9//6MymTdvHgAQs/n0Ox1HjXv27HnvvfeUUBQ0MPj5+f3999+9XYrBhQgWIyMjdXV1e7ssqN9wdXXV09Pr7VIg1OdQXRgXLFiQkZGRkZFBLAYEBAwZ0p1+fYWFhdT0Olu3biUSFhYWcXFxPS4sSUNDQ+r0inKH7VkIIYQQQgAA9+/fpyo+Fi5cuH79eiJtZma2dOnS7uW5f/9+4q0wenp6VG3lxo0bk5OTe96tSENDQyyt0PARR8MghBBCCAGIvBJGX1//+fPnly5dIhYDAgJoNFo3Mqypqfnpp5+ItI+PDzV0mk6nT5s2rXuVl2KUU8tIwKgRIYQQQghApFMjm82mejS+9957Dg4O3cswIiKCx+MBAIPB+Pzzz+VSSDGi1Y2Khi3UCCGEEEJQW1tLvTB65MiR1KjnwMDA7lU0NjU1ff/990Tazc1NET2JJSsaFdrHEaNGhKQouZt++ueT6Vf/flxeDrpjFzq4bvBw1O+Xc7Ii1HeVl5fv3buX6PKF+hoTExMvL6/eLoVSpaSkEBOZqaqqUuNX3n///Y8//rh7GcbHxz958oRIb9iwQS6FbIcSmqoxakRI3N1jK83dYkVWcLkZnCBvv4LGPcZ4xSAkP3fu3Dlw4ICjo2NvFwSJKy4u/vnnnwdb1Eg1T5uamubm5hLpwMDAbmdIzey9ePFiqa917nfwGYiQGN6Vn2IBgO0TtnGlrV5jXriHUxQXAIK+i/3s+GpWbxcPoQFl2LBh8fHxvV0KJO63334bbCEjAKSmphKJ0tJSIjFt2jR7e/vu5ZaRkXHjxg0iTczsLWbIkCHnz58fN25c9/JXqNLS0iVLlkiux6gRITHMT8Ky3lUxnc0iJrdnRSYnXzWw5wLwaup6uWgIIYQUIzc3t7i4mEg/e/aMSOzcubPbGYaEhBAJMzOzOXPmSO5Ao9EaGhqok/Y1Usd3Y9SIkDg9s9lteizrT3G3Bu+M3ioOQgghhaOap9XU1BoaGgBg1qxZ8+fP715ujx49oibxoWb2FtPU1DR79uyetIArzqZNm6S+OwqjRoQ6wvsnHUNGhBAa0KiokQgZoWc9Gvft29fc3AwAo0ePdnJykrWburr62LFju30WxaHmlRSDUSNCHcj/7SgHAABsPuiL1zZCCKEeqqiouHnzpugaKysrGxub7uVWVVUVG0sOqfT19VVVVe1p+UQYGRkBQGFhoeii6BqFwqgRoXaVXVrvEgUAYB220kKnt0uDEEJI/pKSkoiqQUpPKhrDw8OJSXDU1dXd3d17WjhpjIyMCgsLiX+JRSp8pCgijsR3wyDUjpK9n1plAACYc2LWY8yIEEIDEtU8TZg3b57U8Sud0djYePjwYSK9du1aXV3dnhZOAhUpSq5UNIwaEZKlMt7TYGsGAEBgWvJiQ6yYRwihAai5ufnChQuiawICArqdW1xcHDEsmkajKWJmbyI6pP6lahkl6xol1/QcPggRkop32ncu0TTtEcfdbqPf2+VBCCGkEJcvX66oqKAWP/roo1mzZnU7N2pm7yVLlihhLkaxKkaqzVpBMGpESJIgZSfbKZQLAK4xuZHOZr1dHoQQQooi1jz93XffdTur8+fP37p1i0hLndlbjqRGh5JBZDs7dwNGjQiJuxXpZu+fAQDgGhey/J3KsjLhpFV0pp4Ovowaoc4oKyvT09PreD+Eehv1ymkAWLhw4fTp07udFTWz9+TJk3tSYdkNkuNjQAGdHbFfI0Jt8W8FeglfQh3rMlJda8RIyogT+bxeLRxC/YOjo+PIkSMtLS0vX77c22VBqD3FxcV//fUXtdiTisaHDx/+8ccfRFrWzN5yJzp6mgociUXRtLz007pGfspev8NXwCssyM6QAQAg4PMFdAajb3+cflFIxBg90xo4Mqb1luekW6hDeMn0T7W1tb/88gsA3Lx5c/bs2cuWLdu7d6+hoaFyzl756NIP4VEp6dxyANAdu/DTNRtWLdbvZBsBryTll9gzaVf+5j4uBxhrbrPMw8d5tmTJ+dfi9wWdyoB31x/f48iUKMKl+ONRpxK5j8sBYOz0hWs8Nyy2IPtGF6VH7jmZr6urLn5QfX29+qTNgav1Rf7e+UWX9u2Jyvibtz7iuCOr7Xl4j07HRHMSr3LLy4EoqpvnMhsWXi1ddebMGSr98ccfv//++93Oat++fS0tLQBgYGDwySefyKFwfU8//QNrLEoJ5WSAjb+/nSFDUJIyxcCeC3CYW+NpJnH99g39opAIAAD0t6S3bOntQiC8ZPovDQ2NVatW/fjjj8TiqVOnzp49++WXX3799deampoKPXXZtYMjp3uLrOByMzhBEYEFuduNO3zc8e96aplHtTmYy4kNvZFQcMDRmFrJe5Tu/8m8UC4AANR8KvHOtbKDDiO9OW0ziQry4xTuWWwIAOU3I6KiuDJK4OEZuFo48o6XHuk/zyuUWHCoa3ueyksOI6xETkIW9VTY1cSN0zr6nKiNyMhIKt2TodPl5eVxcXFE2tfXV0VFpacl6xxZHRlBMSNj+msLtZpWa7r+34fkJdjYO4XpjH5RSIT6DimXjKAofu/Ob3YeK5LyclTUtxw7diwxMdHExIRY5PP5u3fvHjdu3I8//khUxigE/+4mImQ090jmFpYWFyQEsgEAuP5fRtzq8GhB8d0oALD2iEvjFpcW53KCifWhTuGPhH9yt475apkIQ0YA0BKverl7bBMRMnqEJReWlhZcTWADAEAQ2/saDwBg7OLwhASOqLS0OHPyaF11IrvKW740LSpklCxp+r4NHAAAdkzW/Zr6mtKCLD9rAACOd9At7ETTFa9evbp37x6Rnjp16uTJk7ud1aFDh+rr6wFAQ0NjzZo18ilfVxSKoBblfpZ+WtfYBtPcMSGMf5+vbfmWRJ1/n9EvColQ3yHlkqkvP7XVnwPWbJ/Vhlj52Oex2Wx7e/tDhw7t3LmzuroaAJ4/f7569erw8PDQ0FBFDBQoufJzLACAOSc50k4fAMBx+/GYh1puscCJvlCy0aL9CbToxvbc3PvvWLCI1mz9xVtyw65M8eYAPH5ZD8ZMAKi87BYKAK5hyVsmPzS38oYa8SIkusUCgLlfcuRGOwAAPccT3Bh1czcATuqNkmk2+jqs2Y6stgfxb5kDcAFcYz4j2sJ5Dy+EAgB4JHM3FG039+K03R/q//6LCwDsMP/Vs1kAwDSe7b8/OGiKkjrSDSRbtmyhfsbs37+/2/m8fv2aqrP08PDQ0enUSyGePXt2+fLlbr+3UKFKSkqkjgrqT1Ejr7KSLwAGU4fJaFvxS9d33CjeoigQCOh0OgAI+LxKnoDBZDJbe0cJeJWVfAFdR09H6ucX8HmVPD7Q6To64jtQ2QKfV8bjA52hpyP5+BJUVlYKBHQGU4dJdaaRVkiRzyWg0xlMHaZ4eQQCAZAnJHZj6OhJ7CT8PFJzQKifknLJELWOWlJ2BgAQ8Cp5fIGAztARvw5aryMBr7KSuIu03g74vEoeX8pRANKv5YGiubn5yZMnqampzW01NTU1S9Pt9c7OzpcvX87LyyMezzdv3vzggw/YbPbBgwfl+oH4N0+eAwBw3WrbGh4yF7oFQqw/cK88Lbu1fuQUDoBHDDdyNTGdliB9p+08/wywPlyT7skEHbO2bw3V1h9FZCJ85DBt05KvGs6aZszk3y2SLIHg0RV/AADw/WwutZJhZhtsDlu5kJv3L0ib+fXWj4GxAADsDUvJcFJ93OLk5Dmz7KYxgX9M9gfm3MvngwXxh1lfS66UY2tWU1NTcnKynZ0djUaTX659SENDw08//USkdXV1Z8+e3e2sfv7553///RcAhgwZsm7dus4fqKura21t3e3zKs5vv/0m/f97i2wCgQAAbt++3c4+ylHBTXA1by1zYFxCoDUAQFhuRUtLS0s91xUAgJ1bQ+7PjXEFAI/DyWmHfaijrH3iKsSzYifcrxA9UWNpbrCr6P8/88CE3MYWsWzT0mJaswVrv6ul1C4tBclhosez/eJKiQ0ShWxpaeFywtr+sVgHJ3Bb8yIP8UjLTfYT2c8nprVILTUFYR5tThiXWyq/L747vv7666VLl/ZuGSQ5Ojp+/fXXSjtdUlLS6NGjlXa6x48fA8DLly+VdkZlaHPJ1MS4St69fLj1xK6NuQnBba5b1+Bc6qoUXkfJV5N9Wq9965jcipaWioTA1nzZgRzR24HMa3mgWLZsmeR3qkwMBsPFxUVLS0tOH6jmMBsAgH2YK7q2voDsanaYW5rgQSTNk4sbW1paargxxLIfp1BKfhW55O7smBqJjTXcwwAA1odFN9XkEu+Rc81tc0A9eV5XKfm01HOJjeaBaVI/VAwbgHrYCRXEkUWzDuRUtLTU3E8g/1Z9OI3ScumGs2fPEp1QTUxM/ve//1VWVsop4//P3tkH1Hj+f/x9OOkcnKiWtmJFsmw6Wc2PbZ7KwwrfTqNmPTAN5aH1gGn5EsumJVQkytdq0wMKHUYN1bCZZjLHLKMmo1AUnTinnWPn98d1ujudTg96luv1B9e57uu+rs9d3ff9OZ/P9fl8uhBRUVHMX6Ofn19rpho5ciSZx8nJqfln/fe//50xY8bjLsm4ceM2bdpUX+bnQGuUFalb5xlqtMYL5FlRqzXu1PCGaQCPmhePQlF+TlDTa+tRqzx6JeU3MS0/jLxOlM8RALAVCMgE/NNEqnpCnlOZjW9b+zbz2HlBOUJyoaHLCDlNFiyPr5HYViBQzhFyrn1/H01BtUYF1RrbhDq3jFIhqIvybjoXVXOML1C9cZW3dsP3UX084pUKR2P3cndh9uzZzf7BtAt9+/b18vJqQ62RPA8FURfqdCu/NiDqQrmi/LTyb8UjSaYoDSPPTFWlUFYsjN8ZHx8fFeJV81AWZBRo+MVr1hqVfzaCc3XPyCdfemyjyhXqFAmVNojUIo36nmatUaEorf864ntEaZ6jRRw4cKBHj9rIh969ey9cuPC3335rswU6mydPnqjmE22NqnPixAlmnjNnzjT/xOdRa+z60TDyY5FrAAC2SaJShUJRfCG1vjG3wT1O/KALpRKFrFj1BgvLyJcpFAUZIQCAxJw/yOZheWa4F9lfnFUkyd6zJ1tRnupvCyDOPbZQbfc93/9ccaVCITkd5QEAosCjV6UArudkAIBXUrkiOz09W1aeL0wKM6vZlKUqpLwk04skBfSKL5IoLmVfkpXnEwNq4qIVZyrUr8M//lylQiEpPk2uIzjuhBSA+Fq6EADiReXZ6enZlyQFp4UZ04c09MOgUJ4vVG4ZnneaTFJKXvm2WcUSmUQikaRZ8yAvyfTyEwIQhGVJLqXv2ZNdLkrlA0Bc9HeFahP6J12QKRSl53bWdgnC8stlCkmB8u779ix5HDR+L3cPWCyWtra2vr6+gYGBoaHhK6+8YmxsPGjQIBMTk8GDBw8dOnTYsGEWFhavv/76iBEj+Hz+yJEjra2tR40aNXr06Lfffvvdd98dN27chAkTbG1tJ02aNHXqVHt7+2nTps2YMcPR0dHJyWnmzJnOzs6zZ892cXEZMWKEqgoCwMnJ6c8//5w9e3abez8NX+pd57OkTAQAfCN9LnTHbU71B4BEdy3WgEARAH5GjEftX5rk1hrPRZ6enn7BTJyzjanes/7iB/eru4uq7JYIAN/KtN6rquTbNZEA4BH/3rNVujfw3lOg9k3K2cv52eZolIcPHyp3RwEAnjx5smvXrpEjR44fPz4tLY0YlZ5rduzYUVZWRtoDBgxgjIUtgMnsbWNjM3bs2DYQrivTiBbcJWyNNXYCxgagUChkRRlEcWRsjeoeaqIk8oPya+yIlReUhuiwrOKaaZRfNP2Jb0Jp2EBQhoobqqYzPl+iOm2teVL1W6xCcYHYPGxD8ut/Na0rZI3ZUsXSqVAoioXk220YMSXWXLt/jbFTUfuddWe5QqGoVBpHQ4T5ii4DtTUqqK2xTah3X2voUShESiNkSLHKqco70SNJolC5j1KZ26Rc6Xu2Dat9HJwmXyODCmQqM2i8l7sL7u7u/v7+7b3KwYMH1fIMW1panjp1ihzNysrq169fGy3VkK1R+RivMdeVqtqtVd8sCoVCoSg/lyHMyMhITdoZVLuZSXC63u6Exm2NdT3UNQ988tyus1jNi+lcfStknYtStzWWX2C2SQk8aq9HEHW6DT3URkZGv/3224IFC3r3rquIA0ZGRiEhISUlJW20Wkfz+PHjAQMGMJezcOHCFk+Vn5/PfPNJSUl5pnOfR1vjcxA7Qb76j+LXPnfYJhPnCpDToONaiWCJq0XNBnbe4DdtgRx4TH6H2YzMGTAYEOFS0QPABNAi3wJDA5fh9EsSCRlz/wYAoPKJBOAw01oy++I5ppMESKyRZPBoO0CInODhOsH+UanezjMsGsgt+6RaDIAf5D5c9bjRuPm28MtB5sVbK8cxlnOPOTNrI+5MbSYBiSBxTLwhDgIIhQgWDA8W+Cct9545zqLbbdlvMyoqKgoKCjpmrTt37nTMQpQai2RwYIDkJUgAgIv7524AQLG49r6Fx5wZzH2kO9pegByhYLZD7eOgP3mF/EGiZZt/L1Ma4tq1a76+vkypDAD9+vULCQlZunRpz54923Fh7bqGvvJSEuiso0Xedwb/+dhrkZCkZeS7T1erMq872t6RtJzdvOe6rBouCAWEm1PyxvlaN1sCHuqIIC8rLQYAY7V4K7EwlKSWDPEY3ayQ2xpK1k+0iQQAQUbBHnsznnzr5VBPfrAQQr/xW94sVXl9tBYrK6tdu3aFh4d//fXXO3bsYB6hJSUlwcHB69evd3Z29vHxeeedd9pqxY4hOjq6tLSU+Tht2rQWT7VlyxaFQgFg4MCBzs7ObSBc1+Y50Bp5AGD75iBV076sujlnVqsEk3H7GAOAGLLa1wiBBGRKiy4rq8iJEkPrZWDV1lL5QVU3GKOmO9q3IEM61CEQQKSfS6QfPKKy4n3t6v2UK3LThQAGmxirhXo+ygEAK6N+Kp11ZJZB9dINvPfkP1oyOzBRBGGkuzAS8Mgq3mVHX2/1uHnz5tGjR5lwuQ6Aw6G/hQ5A+mee8sZNjAxVP6ijrXJ/1bmPevfjAZrvZfK6b/a9TNHMjh07/P39//nnH/KRxWJ99NFHYWFhqgaeNoc8HG9cLZbDkvlNVdwS5QCAgE/8w/LC6DVMJm9R4IbDkyMcG/q1WjgG7bQNXZSDykcPn0UQ0e17cutab7H495wcAIJRdaq7SAuPeJLMjqlujacEUkN8+UiwCADiRXvszXgA2LqWa9Lyy7WGRwJnL95C22mNhP79+y9btiwgICAzMzMqKur48eNET5LJZCkpKSkpKVZWVp988omrq2t9q2QXpKqqKjw8nPnYq1evyZMnt2yq+/fvJyYqH0F+fn6qPv0Opk+fPo8fP256XKt5Xp6BOX/cEo8zYO44Le22XoBjauEBJAI7LxR/ZM6V1NmzwdXVba4GYGa/UiHxzD1+MFSwSAgk+k0aZKosCaACb4StLXJyhD9elnpbqkzNHmAL5ODGg0fNlZtnsXLPJc91uQd3hy4KFQKJk4yHFSnWdFDprueHV1991dDQsMOCRn/77TemSAClPeG8Zu0BJMIjvjhmFrfuXisur9n3rSaady9TNBMUFMSojNbW1tu3b9eY+61N4Y2yE0AoFEUeKgi3r6msJ8/dmwQA/HeI5SE3ZnmoCICtv79OZKRQFCmI+bDctyFTn7w4jxQX1e6jeUDdNFA805ECQAjR0Z8KHE1qbNslZ3bnAIDNm4NUp/4hmmQR9/CeYYamUXlZK7/sCPjmKloo25DsvuRpt1dJEhaL5eDg4ODgUFhYGB0dnZCQ8PChUpm+dOnSggULVqxYMX/+/EWLFjHZ3bsm27Ztu3//PvNxwoQJLS5ZtH37dpLZu2/fvgsWLGgb+Z6RPn36qLXbVX18XrRG5P1VCmvlHSIt/O7bptzTz4xM6Qq/elvGsW6dsY5jMNrRO11hv97KNFiEB1WSeiPYBgN0ACDxfFG8W23d0JtnFuUAgN0o82da0MBstPeGdPvx600dgoGcvyvWmDyTu+MFgMVijRgxYs6cOR2znJ6eXnJycses9eJR+VhFOZRViwEg8ZpsD++ZDDbNoul7maKZCRMmHD58WE9Pb8OGDQsXLlQLhWknzG0dACEQtyR4+oEQR122vDBzi0OkCIBgiYMBIC9MG+MnBOCRtCvCjcuNFIYCfmPCp8s2mLFxOXYOP6Z3anTApFEWuhzIxSX7Q5cQs6T5QMb/I5dK5WBzpE+qAaCyWiyXa8nlYLM5bDZ4rzsJIBQizn3JFOsDzha6cvHVLR4CEQB4TB+lYgIsO70xUgTANszHWtP7Ri6VygE2W1xZCQDSx2LItaRysDkcLWIph3BF+OF9QY4GbAAozN5DjO0dUBrGzMwsIiLiyy+/TExMjImJuXTpEul/+PDh5s2bt2zZ4uDg4OPj895773XM7/2ZEIvFatm8W+yerq6u3rFjB2kvWLCgf//+rRWupTx+/FhVd2xXutxvVB2OuYMHAMS5LM8sFAMoyUueNtQlp80X4pl/4AEAkQK/w1eVcVWQiwtzDyck50qbNYU0c33AxrTcCvJKE1eSN8y9+0/qDzV9xwEAEDn70+QSKQBIy/JWOZJNzV5T+c0qfCEtPOztvTH3pjLiWlJVDgDQ6fXcfBegUJ4F5Ve7nINZhYC0rKRMCrw+1QkAEOq5/nBZjTYpLivMTEg4U9K8G1cDz3AvUzRy4MCBn3/+uaioyNvbu8NUB56lW5IHAOSECvS0WFYsLbLHAAjaPN8SKNmy0AUA+CFhbmaAUZAyvWLo8phcALLqYojiXMYP1+OyrKystHSM3UNzAIAfttaNGA6lyXO0uFwuV4ulrHad42espcXlcrnOiWIA4M36ijgZclyG67GsrLR0hgfmAECQcJ2qdpibspVM7e9hU/9CpFcTtLhcLperpWXslwMAgeONWVpcLpebeFnMMfsPCZHJCRYM0GI5zZljZ8UaOolskfRa/5Fl/Qnbg969e3t5ef3222+nTp364IMPmLLLCoXi2LFj06ZNMzc337JlS3l5ecfI00wiIyPVRJo+fXrLpkpMTLx37x6AHj16fPLJJ20gXEvpMJURz4HWCJ5DAIkyEzoM1WGxWMY27m2lMtbdHMmbuS6VLCQYPoBlZedkZ8fS0hk6RuDpfqp5FgbJtbTIQJcxelosOyc7lg4/VPkF8/X6Q3mWbiTvqyjS3ZjLsrKz4g6wIeNDstZaNKn26QCA7FFRXFzgGFM9lpWdnRVruAupQfWxFS22RumW8F4mL9hIl6EsFneA8epiOTgWHwr9+ah5g9o5OdlZsXQGDHXw9PzhVktMgzLgme5likbYbPaYMWN4vA5+GPHc4ouTgpSJ1sgGdYF/VH7lBjM2pJePEAUuPtGfmKV51vPJH48wPbcC4M/ZFV+TplEkImfzvcJSiy+sZMzYvXqrFJyos3Iv5f8WbqXnkjxqZiEiRGXkb3BUdUOLf0sXAuD7r59qpOlxr9WrwUvUAsCbl1acGqZM9C1MTMwRKa9UVL7dssM3VI8fP37fvn1///332rVrjYxqLf5//fXX8uXLBw4cuHDhwosXL3a0WJp49OgRkyWHYG5ubm7+bM49hshIZaHwmTNnDhnSaTnvmKhnpqddlciurzVC19q3ICNK5U7lR2WcjvfiA9BWetjrbePQtO2xvt1emzzQanaBcMycKwuy/AV8ABDlCHNyAPBtPaKEDryGp62BDehOj9hJEnrkCIlmK4g/V+xsxtEkJM85tjwjSpk8QURueluvpHPFa2pLTjWxPYX3+vSdXgJyvvKpERRfvN2RRmFQugX1//6NfERJtela+QPZAMBxjPgxa6ey7EuOUJgjAsD38I9yGsZrYJ4aVO5oLeUHnpYWmrqXKV0YtpHbhj0KSWVpaWlpaWmlRJYe4UuCUDiW3jKZRCZTzLNkdFm2Y8QlmUQiO+6rC7B1zeatib2kkFVWlpeWlpaWl0sUl2JXOqvodRzn2Euac6jscWMmNRjttueSgsxRWl4pU6T72qtVnuZ5Z8skEtmlCM2Pa46ZW0O5WuaRi2EbOa+MVSgk5WSV0nKJTJEe4WupuUpuR/Dyyy+vW7fu5s2be/fuVc1ZKJFI/ve//1lbW48dO3bv3r3MbtdOYcuWLWQvJpMrp8WGxu+///73338n7WXLlrWJeK2nvgbZ5rAUNXW76/P06VM2m33x4sXWZL9sO6QVZWI52DxdXQ6pLy0FhyktLZdLAY5K+JJcKq/zmZSihVqEU91JmF5S8RrsutWrNU9bp1Y0gJqC0ahf17aekGTGigqxHGCzeRpibjSdon4hUnGFWCoHODyDrlAqNygo6Pr162lpaZ0tSB1cXFyF0fV0AAAgAElEQVSGDh0aGlovzLZ9OHr06IIFCzos/05RUdHgwYPv37+vr6/fMSt2EBpvGfIo0FRtuqKigmwwUy/IrmEeuVwKNqeJUQ3ey90CDw8PAwODiIiITpQhOzt75syZTFAFpetw5MiRRYsWFRcXt+z0y5cvb9u2LTk5WU2DMTQ09Pb29vLyMjY2bgsxn4GKigpTU9PKykoALJZS+Tl+/PiUKVNaMNt77713/PhxAKNHjz537lzLRFq9evWlS5f27dvXstPbFXt7e4FAsHz5crX+5+hJyNE1UNWJ2HUSm7DZavoSu54uqCkknq0xOwqbp2vQgF+l/rT1J+bp6mo+u56QZMa619X0KerrcXi6HOqTpnRHNN4y6o+C2tG6ug0kHNEwT7O6GryXKRRKo1haWsbFxTGJHq9fv0767927FxISsmHDhvfff9/Hx2f8+PEdJtKmTZuIyqilpSWTyQD06dOnZQJcuXKFqIwAVqxY0RqpHj9+/Oeff7ZmhnZCItG8w+c50hopFAqFQqE8N/Tr1y8gIMDf3//777+Pjo7OyMj4999/Acjl8tTU1NTU1DfeeOOTTz5xd3dvce6bZvLgwYNt27aR9pAhQ4iiNmXKFG3tluTx27x5M2mYmJi8//77rZHq9OnT//d//9fiGdoPhUIxderU+v1Ua6RQKBQKhdJesFgse3t7e3v7oqKi7du3x8fHP3jwgBy6cuXKokWLVq5c6enpuXjx4tdee62dZAgPDxeLxQA4HA5TFaZlOXdKS0uZxGr+/v6tKXSkr68/aNAgjcpZp3PkyBE9Pb36/VRrpFAoFAqF0u6YmpqGh4evX78+OTk5JibmwoULpL+ysjIqKmrr1q1Tpkzx8fGZNm1a29acLCsri46OJm0nJ6e9e/eStoODQwtmi46Orq6uBqCjo/Pxxx+3UrZhw4atWrWqlZO0B5cvX9bY/xzEUFMoFAqFQukecDicjz/++Ndffz179qybm1uvXso0QwqF4vjx446OjmZmZhs3blQt39JKwsLCSFBOnz59DA0NSefIkSMHDhz4rFNJpdKdO0maTyxcuFBHR6fx8e2Bqampqamp2kfVnnaFao0UCoVCoVA6mrfffjspKenWrVvr168fNKi23OLNmzcDAwMHDhzo6en566+/tnKVu3fvxsTEkLaPj88PP/xA2i1zT3/zzTdlZWUAevbs2bmZvYmaaGpqWlRUVFRUBBX1sV31SKo1UigUCoVC6RwGDBiwevXqGzdupKWlTZw4kemvrq5OSEgYNWrU6NGjExMTiVO4BXz11VckHJjH43l4eDD1D1uQqVGhUERFkbIjcHFxMTHptKr0jJpYv7O9ofsaKRQNlORlJqYdOnvu+o0HD6A/2M7ew9/X2aQLpMOkULoTd+7ckUqlAwYM6GxBKOrI5fKnT5922HI9e/acNWvWrFmz8vPzt23blpiYSIJXAPzyyy9z5swJCAjw8vLy9vZ+9dVXmz9tSUlJbGwsafv6+v7888+kraenN3r06GcV8tixY/n5+aQdEBDwrKe3FUQ7LCoqIoZGRnesb1wkA9p2dao1UijqXE32Hu4ep9IhEuUIIwP9C2QRZvSOoVDajn79+ikUiq4ZDfCCc+XKlfT09I5fd/jw4TExMWFhYQkJCTt27GC0tPv372/YsCEsLMzR0XHp0qWTJk1qzmwbNmyQSqUAdHR0li9f7unpSfrt7e1bEHDDVCMcO3Zs10mXo6YXtoemqAp9B1Ioakh/z4gD4BWW5DN7oj7uJq72DEwUAZHRx3wi6hSTpVAorYLL5XK5XH9//84WhKLOkSNHjh071lmr83i8Tz755JNPPjl58mR0dPR3331HDJ9Pnz49dOjQoUOHLCwsli5dOnfu3EbiUW7fvv2///2PtAMCAvr06ZOVlUU+tsA9LRKJsrOzSbuLlBDUqB3WVyIbGdwC6L5GCkUNzntfiPKLJbEr3SxNjIxMrFfGxAkAAJeKHnWyaBQKhfIiMXny5PT09Bs3bnz22WcGBrXFn65evfrJJ58YGxv7+PhcuXJF47lffvkl2Q3Zv3//gICAU6dOVVVVAejRo8d77733rJJs2rSJNIYMGSIQCFpyMZ1BUQ1tNSHVGikUdXgmlhZGKnsYeS+RVA06LSkiQKFQKJRWMWjQoNDQ0Fu3bn3zzTeqruGqqqrt27ePGDFi0qRJBw8elMvlzKGbN29+/fXXpL1s2bJ+/fodPXqUfBwzZoy+vv4zCXDnzh2mWnRAQECPHl1Rd2KiqpmP7RFG3RWvnELpUuQlbCSbHB3eMe9kUSgUCuVFRVtbe+7cubm5uefPn587dy6HU/vdPjs7e9asWYMHD/7yyy/v3bsH4Isvvvjnn38A6OnpkS0QjMO9Be7pbdu2kdn69+8/b968NrmctkJVO1SLj1FttxVUa6RQNFCWdzg2NiEhdqu3k5WNZxwAQVjGR5a8zpaLQqFQXnTeeuutb7755vbt26Ghoapa0e3bt1evXv3qq686OTnFx8eTzhUrVvB4vOvXr1+/fp30PKvW+OTJEyYQ29vbu71LZndxaDQMhaKBWyfXLAoUqfbY8E1p4h0KhULpIujr63/22WcrV648cuTI9u3bT548qVAoAPzzzz9CoZCM6du374IFCwAw7mljY2MrK6tnWighIaG8vBwAm8328fFpy2toCxoKf0GnZN5hs9lvvfVW2y5J6fYMHTq0s0VoLYNnxQktKnqh6ubVvJjAUBEQ7DA8J+x09spxnS2aBrS0tAAMGjSIxWJ1tiyU54bq6uouEgpKobSYHj16CAQCgUBw7dq17du3f/PNN48e1YYtVlVVWVhYzJ8//+zZs6TnWUvCqGb2nj17dguKEDbCP//8U1xcvH///jaZbePGjUx7//795GOLJ793755MJqvf34TWKJfLd+/ebW5Ot3NRmktsbOzDhw87W4rWoms2Wpljx9HZ23fuqtHDQ0XICdyct3icdddzUxsbG58/f57UP6BQms+IESM6WwQKpW0YNmxYVFTUhg0b7OzsfvnlF6a/vLw8PDyc+fisWuPhw4evXbtG2m2e2fv333+/cuVKYGBg207bJigUCo3B6U17qK2trUeOHNkOIlG6J8eOHSNZVbsPHIug+KhQGz+g8qFUDl5X3NdBfQIUCoVy+/btCxcukDafz//jjz9UA6sBLF++vKioaN68ef3792/OhExm74kTJ9rY2LSttNbW1j179mSis7sU9vb2GnU/Gg1DoTRN8VXylUtHi90VVUYKhUKhAPj8889JPvBXXnnl3LlzN2/eXLNmDZfLZQb89ddfAQEBxsbG3t7eIpGo4ZkAIC8v7/Tp06RNt3MQqNZIodRFmjeHxfLemHa1pAIAIC/JS16iLDBoaND13NMUCoVCAfDHH38wdrvPPvuMy+UaGRmFhITo6uqqjXzy5ElcXJyVldX48eP379+vcQMfgM2bN5OGubl5C/L1dEuo1kih1EWGYiAu0GW4sR6LZWXF0jK2cc8BAIRkrbWgpkYKhULpkqxbt+7ff/8FQEyJpPO3334rKSkh7fT09Pnz56uaHs+cOTN79mwTE5PPP//8zp07qrPdvn07NTWVtLtsZu+Oh74DKZS68Pi7suI3bo2IE4oApQODb+u1ZtNaZ2ujTpaNQqE0RUXhmV3RcZnZogcA9AdPn/2xz0eORs3MmyUuyTyQeCjr7HXRjQfAYL7dB17+buNM6o2T5iaHh+7PwetL9mxwrueBqDiTvCduf7roxgMAg8dM/9jbx7Hm6XEzO3bDvqv6+lz1kyQSCdfq05B5RnVfy9LCzKB1SfeBlyzdQ1fa116HuDAtfrcw/ZzowQMQUT29P7B7cb/YXr58OS0tjbRXrVqlra2s5cXk3DE3NyfR1uHh4V9//fWOHTsKCwvJoTt37qxbt+7LL7+cOXOmj4/P2LFjAWzdupXYIPX09D766KOOvp6uygv7B0ahNATbzG5erN287VKxWCyVA2wOT5dHczVSKM8BZblbB4zxU+kQiXKEoTEhBRfWmDX5upNe9tbhx9U5WSRMjDyfWhDhbMZ0iguzg2dOiiRfKCtny9UmQdlWpwF+wrqTxIUGCYs2OJoAePBrTFxcQ9vpvLxD5tX9blq2daaDci3bUcGM1lhxxklvvMoiSlH3R51L9x3d1HV2T9auXUvyNb766qskRyOhfkkYXV3d5cuXL1u2LCMjIzo6OjMzk5wok8n27du3b98+Pp+/YMGCuDjl38LixYt79+7doRfThaEWVwpFM2wOT9fAwMDAgKqMFMrzgfTyMqIy8r0yREWlxQWpIQIAEAUvj8lr8mx58eU4ALZeSVmi4tLiC8Iw0h/pEl1YoxvmJQToDK1RGQHoqJteLicsIyqjV1RGUWlpwblUAQAgVOCXKwaAwY7RqalCVbKykvjKs/W5dacrTNtQW21AR7vmoDw73EcIAIL40/mVksrSgtNBtgAg9AvNEzd5od2Qixcvpqenk/Z///vfXr16kfaDBw9yc3NJW21jIovFmjZt2rFjx65fv75s2TLVvY8ikcjX15fkfdTS0lq6dGlHXEPr6NOnT8csRLVGCoVCoXQHSs6mJAIAX5gRa29pYmBk5rxmT7wHAAh3nyxp6nS2mYPoQr4kO9bNztLIwMjaceWFKKLy3bivzIVa8aNnJACPqAzR6SgAqFQXId0zEQA/KCPW197EwMBstPNeESltJzx+vgSArsU4Z2dHVezesSBao0f83Dq+cHHecpdIAAIvQd21JNf/EAEQRAXPG2fB4/AMzMYFbwp7lh9Vd4MxNJqamnp6ejL9mZmZJKS6b9++48eP13iumZnZ5s2bi4uLd+3aVT/XjEwm+/jjj48ePUp2THZB+vTpw6iMqu12gmqNFAqFQukGSH/ddxQAPAKn1np5edM9QwBAdPZWWZ4Ti8VisbwTLtcclWevt2OxWCy7WDEA6FpaW6h6FvoZGZJJtGpmm5qVca6gco+vvXl/7foSyAvPBgMAAuZOZDo5llPD+ABw4fe7GuXO+yYkEQAEPrMsVPuzI1cIAQjiowI+0Hii8MpVJjWu5LGyoTkYuEVUV1d/+umneXlNm2k7l19//fXIkSOkvWbNGlIri3DgwAHSmDFjBmOA1AiXy12wYMHFixd//PHHMWPGqB7KzMycMWPG0KFDN23aREoLdjUeP37c9KA2gmqNFAqFQukGyO7cEwEQjLVU1fx4JmRLovC3u4M8vAAgztMjs0QOQHw5cVJwDoAgf3sNObUq8ja6xAGAYJK58jDbws5+tBkPDShnkkcPAAAefGNVEfSGjAEA4XmRBu+x9HLsIiEAfojvaBUhpIXJRLakqHnGUDuPN/kDciXu09YfrgDEV9NmjQ8EAP+PbdouO9iTJ082bdpkY2Pz2muvrVu37s8//2yzqduU4GCiq8PMzGzu3LlMv1wuz8khCTDU3dON8O677zJtJqQGwI0bNz799NOBAwfOnz+/q2nSHeaeBtUaKRQKhdI9UL7hq+todBxjSw9lN9v5q9Nkn6ND4H45ynZ4eAKAID7IscYzLC85nBCbkJCwdb23lZ4N0RkzNs9qriamNHKJ6+qUnBFvewBAcWW90BncPP41iblYM1fVf1oRt9AdAD8ky80EknoqqpnbFzs9ACAnWKDHYukMd8kB+B5RReGObRXiKpPJmCpf165d+/zzzy0sLKytrTdv3nzr1q02WqQNyM3NzcjIIO3g4GC2SiGGU6dOkfK2LBZrypQpzZzw1KlT586dI+3vvvtu//79qq5tiUTy9ddf29jYvPPOO8nJyf/880/js8XFxY0ePXrHjh3Nv6Jn5XENTE+7KpFUa6RQKBRK98HwpbrhrpIyEQDwjfS50B23OdUfABLdtVgDAkUA+BkxHrVKoeTWGs9Fnp6efsFMnLONqV69LDlNMLifVp3PZbdEAPhWpvW0z5Jv10QCgEf8eya16s7NzHC/HAAe8f52DSxh4L2nQFC3y9nL2aTt0qLIZLK+fftaWNRxml+8eHHFihUmJibjx4/fuXNnWVlZm63XUhhD47Bhw9zd3VUPMTl3xowZY2ho2MwJmRKCb7zxxuTJk11cXE6dOnX58mVvb29Vbeznn392d3cfNGjQmjVrbt++rXGqJ0+eLF269JdfflmyZMnhw4ef6bpaRn0Nss2hWiOFQqFQug/37j+p85nbfzAAiEoeSACYOa/aqaJtecQn2qsmSOQNi8sQZmRkpCbtDPIgMSrBw/WczzybdnTjSV3rYP8BfACiS3fUPM0VuWnBIgAI8xHUKpTSvNUOoQCCMsKseQCgpaUNADratbpoRV4AayjJvCPwUF5P8Hhjp61n6pszW0bv3r15PF5+fn5eXt6nn3766quvMocUCsWZM2cWL15sZGQ0bdq0PXv2VFaqhwV1DD/99NPx48dJe+3atT179lQ9ymiN06ZNa+aEN27cYM4KDAxk+keMGLFz587i4uLIyMhhw4Yx/aWlpV988YWpqamzszPjDWfgcDgmJkoz9rx5827evNncC+vC0HyNlBcChUJRVVV1967m3ehtTnl5OQnoo1AoHY12XUNfeSnRaHS0yPvO4D8fey0SErcw3326Zd2TdUfbO5KWs5v3XJdVwwWhgHBzSt44X+tmS8BDHRHkZaXFAGDMq/vGFQtDSWrJEI/RtWlfru6NSAQAjNB9kJd3C+j9UPQzANy4dDYvr3/vl60tsH6iTSQACDIK9tib8eRbL4d68oOFEPqN3/Jm6cpxBs0WtWnefPPNN998Myws7KeffkpJSUlNTWVMjHK5PCMjIyMjg8PhzJgxw9XVddq0aRxOx6UqYwyNw4cP//DDD1UP3bx589q1a6Td/E2N4eHhJOZ6wIABH3ygHoTUr18/Pz8/X1/fEydOREdHM4HVT58+PXDgwIEDB15//XUfH585c+b07dsXQI8ePZKSksaNGyeTySoqKmbPnn3mzBnVYB0Wi3X27NnJkye35OLbmStXrggEAg0HFA0jl8sBXLx4sZExFIoan3322axZszpbCnXMzc07+JbjcrmdfdEUSlfn5MmT/fr1a6PJKokRke+fIVPpLT9HUtIILlQqFAqFQlYQxK+9T/n+Qpnm2WrmtAUA25As9QOinQBgu7Oyfif4wqI6IkTZAoAg6oLqDJKCJCKDV2qBan9Nup8G4EeVKFdBvEhlcVm+PzSs0mIOHz5sZGRUv18mk2VkZMydO1dHR6e+dDwe76OPPsrMzJTJGv25tgWnTp1i1t27d6/a0aioKHJowIAB//77b3MmfPjwIdH2AISGhjY5/saNGytXrtTX11f7Iejo6HzyySf5+flkGOPyBuDv7686Q0hISO+uSp8+faKjo+tfNbU1Ul4I3njjjYkTJy5fvrxjlvvhhx+YL8EUCqVD4I2yE0AoFEUeKgi3r6msJ8/dmwQA/HcG8QAgN2Z5qAiArb+/TmSkUBQpiPmw3FfF1FcHeXEe8TpqNxBeUFdx4pmOFABCiI7+VOBoUrMjsOTM7hwAsHlzkOrUP0QTddbDe4aZ6iT9htl5eQ1WqUXCfXLjaJxQBPC9/Mf0fmWYlow4OgV8c5V9kmxDsvuSp2ZqbWvYbLa9vb29vb1UKj169GhKSsrRo0eZuBmxWPzNN9988803L7300gcffODq6vruu++yWKz2kIR5xo4YMcLFxUXtKONo/s9//tNMAbZv315VVQWAw+GoVpdpCFNT07CwsM8//3zv3r3bt2//9ddfSX9lZeW2bdu2bds2efLkpUuX+vr6njp1SigUAoiMjJwwYYKTkxMZWV1d/frrrwcEBDRHvA7myy+/ZH6tqlCtkfJCwGaz9fX1X3vttY5ZrqCggJa6p1A6GHNbB0AIxC0Jnn4gxFGXLS/M3ELq8QmWOBgA8sK0MX5CAB5JuyLcuNxIYSjgNyZ8umyDGRuXY+fwY3qnRgdMGmWhy4FcXLI/dAnxZJsP7FeziFwqlYPNkT6pBoDKarFcriWXg83msNngve4kgFCIOPclU6wPOFvoysVXt3gIRADgMX2UiuO47PTGSBEA2zAf67oeXTN731j7Oj3yq8PihJ4QBERFzOMA0sJkAIBwRfjhfUGOBmwAKMzeEwoA6kl62g8OhzNr1qxZs2aJxeJDhw6lpKScPHmSuCgB3L9/PyYmJiYmZtCgQR9++KGrq+ubb77ZhqtnZWUxtsZ169apPW+lUumZM2dIu5nuablcvnOn0og7f/78l156qZmScDicefPmzZs3Lzc3Nzo6OjU1tbq6mhw6efLkyZMnX3311Xnz5uXl5ZHYc09Pz5EjR5qampIxenp6qrl+ug79+vXT2E9fbBQKhULpDvAs3ZJIPppQgZ4Wy4qlNdSBBDQEbZ5vCZRsWegCAPyQMDczwCjoAtESQpfH5AKQVRdDFOcyfrgel2VlZaWlY+wemgMA/LC1bsRwKE2eo8XlcrlaLGW16xw/Yy0tLpfLdU4UAwBv1lfE75zjMlyPZWWlpTM8MAcAgoTrVLXD3JStZGp/D5smr0siI1pINalQwzH7T7wAAHKCBQO0WE5z5thZsYZOIlskvdZ/ZNnANO0Fj8ebO3duRkbGnTt3YmJixo0bp2rbu3XrVnh4uLW19Wuvvfb555+3VdLHtWvXkoaVldXMmTPVjh4/flwikQBgs9l2dg3FodchOTmZaHUsFsvHx6cFIo0ePXrPnj23bt368ssvBw2qtSv//fffISEhd+/eJT+Whw8ffvDBB02m7GkEU1NTRulkPqr2tCtUa3wW5FKptLEANWlhpreTU8DGTA1WXQqFQqG0Lzy3+OKkIJKfESR1jsA/Kr9ygxkb0stHiAIXn+hPasfwrOcL/fkAhOm5FQB/zq74EC+y6VEkImfzvcJSiy+sZGrN9OqtsimyzsrKuiM8C7fSc0keNbMQEaIy8jc4qrqhxb+lCwHw/ddPNWq2x6+ScQ7y5qUVp4Z5kQ/CxMQckfJKReXbLTsuFkWdl156afHixadPn/7777+Jpqh69Nq1a+vWrbOwsLCxsdm0aVNrkj5+//33P/30E2l//vnn9R3QjHt6woQJDdnM1Ni2bRtpCAQCtXxDz4SBgcGqVatu3Lhx8ODBSZMmMf1koydpnz9/vvVeaaImmpqaFhUVFRUVQUV9bFc9kqVoONLz6dOnbDb74sWL9SszvoDISzJtjB1EwE5Rpbel5pyv4rxYHZtFsI0qz/ZtYJtM9ycoKOj69etpaWmdLUgdXFxchg4dGhoa2jHLHT16dMGCBXfu3OmY5SiU55SsrKxZs2aRVMxtiVRcJpYC4PB0eZxatUwulwIcdl09TS6Vgq3aKReLxVKpHGw2T1e3xTqYuKJMKgfYHF1dnibFUC6VgsNp5SYxaUWFWC4HwObp6rZ2srocOXJk0aJFxcXFrZnk+vXrycnJe/fuvXr1qtohFos1duxYV1dXZ2dnA4NnC/p+++23SSJua2vrCxcu1B8wePBgokht2bKlOfpZVlYWE8h8+vTpcePGPZM8jZCfnx8eHr5v374nT56oHTpw4EBeXt758+fj4uKeaU6iKTJKIaMyqnaqHmoZrq6us2bNqh8M8KLbGkvOJK9atSrhTNNZlCR3rymTvjZS5lO5C1mbbhelUCiUToPDMzAwMDAw4NXVpNhsdZURAJuj1snm8XQNDAwMWqEyAiBzGGhWGZXLtmJ6AkeXrGLQxipjW2Fubr527dpGkj4uWbLEyMjIwcHh22+/bWbSx2PHjjG1W0JCQuoPEIlEjLbUzE2NTJizlZVV61XGI0eO+Pv7CwQCS0vLUaNGxcfH11cZAXz88ccVFRUtmJ9cHfMvY1asb1xsD3Njl/xD60DuXtwfGiq0fdl53jiTxkfy+M6pUdJ8ab9Rg561TgCFQqFQKC8uakkf09LSSktLySG5XJ6ZmZmZmcnhcKZPn+7q6jp9+vRGkj4yOxpHjRqlUSlk3NMmJiaqGbkboqCgIDMzk7RVM3u3jJMnTzo6OjZn5KNHj44cOTJ8+PBWrqhmUCRGx1bO2QgvjtYor6iokMvZHJ4ur95fo049+6FcLiflLMUVZVJwdHV5bLaRs+9KDRNLxRViKXFDcBvMeCAXV4ilcnkDzorGZKNQKBQKpRtAvNJjx47dunXryZMnU1JS0tPTHz16RI5KpVKSK5vH473//vuurq6TJ09m17UDHz58mElwo9HQCODYsWOkoTlJdT3Cw8NJsu5XXnll1qxZLbs0Bo3mw549ew4cOHDw4MFDhgwZPHjwq6++am5uPmTIkG3btp0/f77Fa2nUDusrkY0MbgEvhIe6MHOrHUtLT2/AgAF6OlyW06rkMkB8OYHFYtn4CQEI/cawWCwWi+WdfBXA5YQ5Wlpa3lsPJ6+y09EbMEBPxzn2MqSX57BYLJZTXm1ig4q09XNYXB29AQP09HRYTuvT8+p7uuV5aRvtWFo6yuW1rOZszCuTNy4bhUKhUCjdlZ49e7733nsJCQn37t07cOCAs7Mzl1vrxBOLxd9++62Dg8Mrr7yyZMmSM2fOkAAMhUKxbt06Mubtt9+2t7evP3NlZSXjv26Oe7qioiIpSZluPSAgoFevXq28tFmzZn3++eeurq6BgYGxsbHHjx8vKCiQSCRFRUU5OTm7d+9evXr13Llz33777eaXxm4NRTW01YTd39Yovhw71IGkJLAVCCAU5ghDw5YHuY1Edf3BygKm1QAQ5ydgdqhWQgb1PFjywwETXSJFtR3CYBdSFlQl72vuVmeSHgx8gQe/MjExR5QYaJNYKJLEWnIalM1Ac7ANhUKhUCjdB21t7ZkzZ86cOVMsFqenp6ekpJw4cUI16eOOHTt27NgxcODADz/80NDQ8OLFi+RQQ4bGo0ePktM5HE5zdihu27bt8ePHAHr37j1//vzWX1GPHj06q8QDExDDxMeg7UyMDN3f1ng9JwMAvJLKFdnp6U2ibXEAACAASURBVNmy8nxhUpgZFzzL+TKF5FyYAIBt2GmJQiaRSNLUK416ZOSXlhblR9kPBqCqy8lvHhNEigDw/VNLZQqFpDg1yFZ5rGZHr7wk08tPCEAQliW5lL5nT3a5KJUPAHHR3xU2IhuFQqFQKC8OPB5vzpw5x44du3v37o4dO8aPH6+aT+f27dubNm369NNPyUdra+uGajczmxqnTp2qar/UiEwmi42NJe2FCxfq6em19jI6A9UkO/XDq9s8IKb7a41KrhfeEwMAW9fC0c3eiA2AzQandz8eAB1OHw7YHPWQNn5qwS57CwMDEwtLE/VEOvmZqQAAj8RQZwM2wDFy3nA8I6TOLor8IztFABASs9KO7FfUtXSOjxIAiBPm1uZ01CAbhUKhUCgvHPr6+osWLTp16tStW7c2bdpkY6MhC3peXp61tXV4ePjff/+t2q9QKE6ePEnazXFP79mzp6SkBACLxVq6dGlbiN/96f5a4+DRdgCQEzxchxWwNe1qSXMzcNuGxTmbNRWcYjvWtHYIe6KLU93DxDoZHBiwKoCwKiAi/QYAFIslrZCNQqFQKJRujLGx8fLly3/99ddr166tXbtWbcfhxYsXV65caWpqOm7cuJiYGBKRfe7cuXv37pEBU6dObXIJJrP3zJkzzc3N2/oKOgi1bYtqZWPa3EPd/e1auqN9CzKkpK5UpJ9LpB88orLife2avHIdTtM14G3tX1d1W8tkqnslpX/mJZJWYmS95NI62myA11LZKB2GtDAzaF3SfeAlS/fQlfY0xp1CaUMePXpUVVXVSJoVSmehUCh69+7d2VIAgLm5+WuvvcaU4DM0NGRUQ4VC8eOPP/7444++vr6TJ09mNEs+n9+kZ/b48eO//fYbaS9btqx9ZG+CsrKyX3/91d3dvU1mU61n7e7uTj62ePIrV65MmDChfv8LoZ+Y2a9USDxzjx8MFSwSAol+kwaZFm1wbCJBY3PIOfuHGOMYxVFLS1vlIOc1aw8gER7xxTGzuPI6pQi5PGX+2PaTjdIWlG2d6aAMebIdFUy1RgqlTdHR0enZs6e/v39nC0JRp6Cg4Mcff+xsKQDg6dOnn3/+OWlPnjz5+PHjZ8+eTUlJSU1NZZI+Pn369Pvvv2dOMTMzk0gkje9r3Lx5M2lYW1u/88477SN7E1RWVrJYLC2tpk1UnYLGvOsvhNYIAByD0Y7e6Qr79VamwSI8qJKoHqyUNlLvpVGEV0vlqMnAKD3+7beqB2XVYgBIvCbbwzOqf27zZKN0IoVpGwKZKHkdWvKHQmljevToweVyv/rqq84WhKLOkSNHfv75586WAgCSk5P//PNP0g4JCWGxWO++++67774bFRWVlZWVkpJy6NAhJukj4dChQ4aGhk5OTq6urlOmTGHXqwj0559/njhxgrQ/++yzDrgKjZiZmVVVVe3bt6+zBGgEe3v7IUOG1O/v9vsapZnrAzam5VYQS5+4kmhkygw7NYpdTuDeq1LIK8rKxPIG5lHHfAKpSh65fEumGIC0JDlgmiA0R3XM61PJNsdQz/WHmRSN4rLCzISEMyXSJmWjdDLivOUukQAEXgKgNjSeQqFQKB3D06dP169fT9r29vZvv/02c6hnz55Tp06Nj4+/d+/ewYMH1eJmxGLxnj17pk2b9vLLLy9evPj06dMk6SNh48aN5OPAgQPff//9DrmUbkK31xol19IiA13G6Gmx7JzsWDr8UBEAeEx/nRweNIL8nUUO57K09AYMCG3uVyuOhWCnAACEgQ46LBaLa+weWaMy6jBjPhT68wHkBAsGaLHsnJzsrFg6A4Y6eHr+cEvSpGyUziU7coUQgCA+KuCDzpaFQqFQXkS+/fbb69evkzbjp1ZDW1v7/fffZ4pcv/LKK6o+3wcPHuzcuXPChAmDBg1asWLFhQsXHjx4kJKSQo4uW7asviWS0gjdXmvUnR6xk+RRzBESrU4Qf66YCY42sPNJ8rdlRgeNMQIAbWhCbeeBrvfegiiP2nP5XjtPZ+3k1xnDcYz4MWunP18pgDBHBIDv4R/lNIzXpGyUTkRamDwpOAdAUtQ8Y7X87hQKhUJpf+Ry+RdffEHaM2bM+L//+79GRmZnZ5N2eHj4nTt3iKbYo0etklNcXLx58+a33npr2LBhEokEQN++fT09PdvzCroh3V/FNrPzzlZ4iysqpHI5NNSB1nWLyJ4ZUiGWytk8XV0OG4DlvD2yD+Ohlr2RY7lHJtsFlW6Ome+ebK+YCrFUzmbzdHU5AC7IPkKdM3l23hGXvMMrKirkcrDZHJ6KAE3JRuksKuIWugPgh2S5mUB8ubPFoVAolBeP+Pj4v/76i7SZWoIa+eGHH8jWRhaLNWXKFH19fW9vb29v7+Li4n379qWkpDDVqwGUl5eTBpfLjYuLmz17tokJjUBtLt3e1qiEp6trYGBg0IBaxuHpGhgY6Kooe2yOJps1W0O38lxlSLTGIQDYyvU1CdC4bJSO52ZmuF8OAI94f7vOloVCoVBeRGQy2ZdffknaTk5OGtN9Mxw7dow0xowZM2DAAKbf2Nh42bJl58+fv379ekhIyPDhw1XPKisrCwwMHDx48NixY7dv385EZD+P9OnTp2MWelG0RgqluUjzVjuEAgjKCLPmAUxCJR3tLpodgUKhULodu3fvvnnzJgAWi9W4oREqhQQbKgkzdOjQNWvW/PHHH0OHDlU7pFAofvrpJx8fHyMjo/feey8hIUEtIruL06dPH0ZlVG23E1RrpFDqcHVvBEnOPkL3QV5ebl7e5bPnfgaAG5fO5uXlXS3pVOkoFAql+1NdXb1hwwbSnjlzppWVVSODi4qKrl27RtrTpk1rZOSxY8cKCgpIe8eOHT4+PoaGhszRp0+fHj9+3NPT09DQcObMmampqWT7Y9fn8ePHHbYWdYpSKHV4UqmMfXEfUze0SRQ5ySYS/KjKS748DedRKBQKpW3YtWvXrVu30DxD4+HDh0nD0NBw5MiRjYzcsmULabz77ruLFi0CEBkZmZ2dnZKScvDgQcbEWF1dfejQoUOHDvXt25dJ+thlc3GjA93ToFojhaJGv2F2Xl6DVSppcZ/cOBonFAF8L/8xvV8Z1nWfHBQKhfL8I5VKQ0OVZXg/+OCDESNGND6ecU//5z//YbFYDQ37448/srKySJspIdizZ88pU6ZMmTJlx44dGRkZKSkp33333ZMnyqzJVVVViYmJiYmJ+vr6zs7Orq6u48aNU43LbhKFQtGISG1CfUNjnz592s/6SLVGCqUOZva+sfZ1euRXh8UJPSEIiIqYR7MiUSgUSruyc+fOkpISAD169Fi7dm3jgyUSyZkzZ0i7cfd0WFgYaZiYmAgEArWj2traTk5OTk5OVVVVQqEwJSXl+PHjMpmybtyDBw9iY2NjY2ONjY1nz57t6ur61ltvNXkhJ06ccHFxMTAwOHr06LBhw5oc33o6wFVN9zVSKE0gkVUDAKqfjx0uFAqF8twikUgY9c7V1VUt6rk+x48fJ7sP2Wy2nV2DWS9KS0uZwn3Lly/v2bNnQyP79u3r7u7+3Xff3b17NzY2duLEiWpJH7ds2TJq1Chzc/Pg4OD8/PxGZIuLi3v06FFBQcG0adMePHjQ+IU8L1BbI4XSPCrp3UKhPAdUFJ7ZFR2XmS16AEB/8PTZH/t85GjUTDeBuCTzQOKhrLPXRTceAIP5dh94+buNq5/MT5qbHB66PwevL9mzwbneRueKM8l74vani248ADB4zPSPvX0crY3IsZvZsRv2XdXX56qfJJFIuFafhswzYgNASV5mYtqhs+eu33jwAPqD7ew9/H2dTTjNXeX5Zfv27Xfv3gXQs2fP4ODgJsczOXcmTpzYr1+/hoZFRUVVV1cD0NHR+eijj5ojiZ6enpeXl5eXV0lJCUn6eP78eeZoQUHB+vXr169fb2Vl5erq+uGHH9ZP+ujo6JiWlgagsLDQycnp5MmT2trqRUSePn3KOMS7FE+fPtV8QNEwcrkcwMWLFxsZQ6Go8dlnn82aNauzpVDH2dn5s88+67Dlvvvuu5dffrnDlqNQnlOysrL69evXhhOWnovS8J7jhxTImnGyROSl6S3pn1qgOqqyIMufiZSz3VleT4QodecnAAQJi8jhC2F8DYeVeOXLFAqFIj9JsyAqV9HEKq3n8OHDRkZGbTVbM6mqqjIwMCDX8tFHHzXnFEZXi4iIaGiMRCJhpl25cmWLxSOa4uuva6j6y2Kx3nnnnW3btt29e1f1FH9/f2aMm5ub2oQffvihtrY2p0uira29dOnS+j8Eaj2hUCgUSrdAennZGD8A4HtlJK6y0Zef2r3cJVgIUfDymOnpvtaNny0vvhwHwNYrabXPREv9uz8n2ggCAUS6RPvIIszYAJCXEGDjGVl7jo66C+JywjI/IQB4RWWscrWR/3Vq+RgXIRAq8BNUpo/mYbBjdOqQil69ak/p27cqYJK7CAD0uWwA0t8z4gB4hSX5zJ6oj7uJqz0DE0VAZPQxnwhHs+as8pwSHR1dVlYGgM1mr1mzpsnxly5dIjkd0eimxvj4eDJtz549lyxZ0mLxzMzMVq9evXr1apFIlJKSsnfv3qKiInJIoVCcPXv27Nmz/v7+dnZ2rq6uM2fO7Nev3+bNm//66y8S5Z2cnDx06FDVatomJiZDhgxxdXVtsUjtx9dff82U9laFao0UCoVC6Q6UnE1JBAC+MCPW3ggAnNfsib+m45kI4e6TJb5NuG/ZZg6iC/nm1hbED2zkuPJC1FkbPyFw474EZjwAFT96RgLwiMpY+eY1/ng/VKqLkO6ZCIAflBHraw8ABs57RfFcvicgPH6+ZLSdka7FOGeLuidJ8/iACPCIn2sCAJz3vhDlh5lbKN3qRitj4s4mjhECl4oeNXOVFv4EO5Wqqqrw8HDSnjt3rpmZWZOnMO5pU1PThsJNFArFtm3bSFujH7kF8Pl8Pp8fGhr6888/p6Sk7N+//969e+TQ06dPT5w4ceLEicWLFzs4OLi6uv7vf/+zt7fPy8sDEBISMnTo0Dlz5pDBbDZ70KBBc+fObb1IbU5GRobG3Z80GoZCoVAo3QDpr/uOAoBH4NRarYk33TMEAERnb5XlObFYLBbLO4EpLS/PXm/HYrFYdrFiANC1rFEZCf2MSApoXk2+Ld7UrIxzBZV7fO3N+6tvUAMgLzxLNuIFzJ3IdHIspxKn9IXf72qUO++bkEQAEPjMUqqTPBNLC9WdmLyXiBw62i1fpesTFRVFQka0tLSaY2iESs6d+jHRDEeOHGFiVj755JNWi1mHt99+e+vWrcXFxSdOnPj444/79+/PHKqurk5PT589e/aQIUNMTEz09fVJ/4IFC06fPt2aRU1NTU1NTdU+qva0K1RrpFAoFEo3QHbnngiAYKylqubHMyEmK+Fvdwd5eAFAnKdHZokcgPhy4qTgHABB/vYanLoVeRtd4gBAMMlceZhtYWc/2owHQKZJAskjEifrwTdWFUFvyBgAEJ4XieufI70cu0gIgB/i25BnOS9hYxwAwOEd8xau0iKkUunEiRPF4raarzEqKys3b95M2p6ens3RgR49epSbm0vajbinmczeEyZMGD16dKsl1UDPnj0nT568e/fue/fuEU2xt0rK36qqqkOHDjEx1P/884+Tk9P169dbuSj5EZmamhYVFRFHuWk9WrmERqjWSKFQKJTugNL6V11Ho+MYW3oou9nOX50WAIDIIXC/HGU7PDwBQBAf5FjjtZSXHE6ITUhI2Lre20rPhuiMGZtnNXejoNImKa6rU3JGvO0BAMWV8npn3Dz+NdEI18wdr9pflnc4NjYhIXart5OVjWccAEFYxkeWvJat8qz8+++/ISEh5eXlp06dUt2H135ERERUVFQA6NWr13//+9/mnHLs2DESs8vlcsePH69xjEgkOnXqFGkzmb3bj169egkEgr1795aWliYlJc2YMUNjRZmKiopx48a1JhcPoybW72xv6L5GCoVCoXQfDF/qXeezpEwEAHwjfS50x21O9Re6RCLRXYvUmwc/I8ajVimU3FrjuUhU53wbU716WXKaYHC/uqpC2S0RAL6VaT3ts+TbNZEA4BH/nkmd1/Gtk2sWBdYRxIZvWjd9UPNXeWZ69OjxyiuvkHZ0dLSPj0+7OkAfPnwYERFB2gsWLNAYhFEfxj09depUDkdzaqWNGzeSxpAhQ2bMmNFqSZtLnz593NzcXF1d8/Ly4uLiMjMz//77b9UB9+7de//99999990WTE60w6KiImJoZH419X9HZECLrqBBmtYaMzIy/vjjj7ZdldKNuXLlSi/V+EAKhULpQO7dr5v9jtt/MCCCqOSBBCYcM+dVOwWRi4TKgx7xifZGKu9B3rC4DGEFelWV38zLiAlNFAHBw/UunC5NH2fQfBFuPJEBKmpM/wF8QCS6dEcM6KqMq8hNCxYBQJiPQE3VGzwrTmhR0QtVN6/mxQSGioBgh+E5YaezV4571lVaxuzZs7/77juFQlFdXb1s2bKDBw+2esoG2bx5M6kBra2tvWrVquacolAoTpw4QdoNuafv3r2bmppK2itWrHimSoCt4dSpU0eOHLlw4cLFixeZ2tb1OXPmTHl5ubGxcSuXU9ML20NTVKUxrbFnz57vvvvuN998037LU7olTIBY10EikeTm5jL7ZtqbP/74g7hOKBRKR6Nd1wRXXkoCnXW0yPvO4D8fey0SErcw3326Zd2TdUfbO5KWs5v3XJdVwwWhgHBzSt64phL3qMBDHRHkZaXFAGDMq/vGFQtD/QAAIR6j1dU8XbPRjmRDpqOzt+/cVaOHh4qQE7g5b/E482dbpYXo6OjweLzKykoAhw4d+vHHH8eOHdsWE6tTXl6+detW0vb29m6mFnXu3LnS0lLSfu+99zSOiYiI+OeffwD079+/w95Kv/zyi62trUKhaGhA3759TU1Np0yZYm1tfebMmdZoeBrPra9ENjK4BTTx1/Xjjz+2yTIUSufy119/Xb9+ndk63d7I5XI2m27/oFA6FFL688bVYjksmduv4pYoBwAEfOK5lRdGr4mrOSgK3HB4coRjQ/eqhWPQTtvQRTmofPTwWQQR3b4nt671OIt/z8kBIBhloWpQlBYe8SQ5F1PdmsiUw7EIio8KtfEDKh9Kma+jzVqlNfTp08fQ0JDEbQQEBPzyyy8sFquN5q5l06ZNRDflcrmfffZZM89i3NNWVlYak+k8efJk9+7dpL1kyZK+ffu2hbBNc+/ePTWVUV9f/80337S2tra2traxsTEzM2N+jFevXm3vzYhtPj+NhqG8ELzxxhsrVqx43FEcPHhQR0ensy+aQnmh4I2yEwAQRR4qqDX0y3P3JgEA/51BPADIjVkeKgJg6+9PBgticisanFJenJcDANDuo3lA3bucZzqSRNsc/amgtrfkzO4cALB5c5Dq1D9Ek2rLHt4zmk5MWHz1CllPi81+llVaBYvFYrYb/vrrrwkJCW01M8P9+/eZZIqLFi1iNlM2CaM1NuSe3r17Nwk3YbPZixcvbrWkzWXatGmrV692cnJavXr1wYMHi4qK7t+/f+LEibCwsNmzZw8dOrQ9NG8CE1XNfGyP3ahUa6RQKBRKd8Dc1gEAELck+HCFHIC8MHOLQ6QIgGCJgwEgL0wb4ycE4JG0KyIiJggA4DcmvFAOAJdj57CsvNPOXK2QAoBcXJIcvISYJc0HMgWO5VKpVCqH9Ek1AFRWi+WkRw4AvNedBAAQ574k7WoFALn46kYPgQgAPKaPUtkaWXZ6Y6QIgG2Yj7VaIIc0bw6L5b0x7WoJUWflJXnJS9yJIIYGvGdZpdVMnz7d1taWtNesWdPmFZM3btxYVVUFoHfv3oGBgc086969e5cuXWIkrD9AoVBER0eTtpub28CBA9tC2GbRs2fP9evXHzp0aP369e+//36bJBVvHFXtUC0+RrXdVlCtkUKhUCjdAZ6lW5IHAOSECvS0WFYsraEORBEJ2jzfEijZstAFAPghYW5mgFHQhZ0AgNDlMbkAZNXFEMW5jB+ux2VZWVlp6Ri7h+YAAD9srRvJvy1NnqPF5XK5WqwBpHRhjp+xlhaXy+U6J4oBgDfrqyQAQI7LcD2WlZWWzvDAHAAIEq5T1Q5zU7aSqf09bNQvQ4ZiIC7QZbixHotlZcXSMrZxJxbPkKy1FuxnWKVNiIiIIHEkxcXFGzZsaMOZS0tLt2/fTtpLly41NDRs5olHjhwhXuB+/fppTMF46NCha9eukbavr29bCEtRQrVGCoVCoXQPeG7xxUlBJD8jSN4agX9UfuUGMzakl48Q1So+0Z/sI+RZzxf68wEI03MrAP6cXfEhXnxyroiczfcKSy2+sJLZd9irN7+BlZWJI3gWbqXnkjxqZiEiRGXkb3BUdUOLf0sXAuD7r59qVG9TJY+/KyveS1BnCr6tV+qF4jU1pQKbt0rbYGVltXDhQtKOiIi4fft2W8381VdfEeNl3759V65c2fwTVd3TGneQM5m9J02aZGNTTy/vXjBZvglqZWPafF8jq5FIHwql2+Di4jJ06NDQ0NCOWe7o0aMLFiy4c+dOxyxHoTynZGdnz5w58+HDZ4o1aQZScZlYCoDD0+VxarUKuVwKcNTUDLlUCrZqp1wsFkulcrDZPF3dFlvuxBVlUjnA5ujqagxqlkul4HAai5mTS8ViMZmDp8vTLEhTq7ScI0eOLFq0qLi4GMC9e/fMzc1JkRhXV9fk5OTWz3/nzh0zMzOJRAIgKCio+VZMmUz20ksvkQCaxMREd3d3tQF5eXmMpnj06NFGysZ0Ov7+/ocOHZo8eXJnC6KBY8eOLV68ODg4WK2fhnlSKBQKpXvB4RlwNEQSs9kadC+2eoJoNo+ny2t1HDJP16DROeotq2EET1fTVTzLKm2DoaHhmjVriDlw7969AQEBo0aNauWcoaGhRGXk8XgrVqxo/omnTp0iKiOLxZoyZUr9AUxmb3Nzc3t7+1bK2a4UFRXd+3/27j2g6ap9APgzGDjEoYBo4oWLaHhhpGRohQmWgikjhbwNFS9gpHILEC+oUCJ44RKiogYq5AU1BypmCSpWmsErw1sKARKYoKAMZLDB3j8OfB1j3AdDfT5/+Ds7++6cg73+9nAuz3nyJDk5WdEDkeHZs2f5+flN6zFqRAghhFBLVq9evXv37vz8fLFY7OHh0cmsfIWFhdHR9fmP3N3dtbS02v5Zanl60qRJAwYMaNoylZC8OzN7d8zYsWNra2uPHz+u6IHIYG1tPXr06Kb1PfovFCGEEEIKx2AwqFsSfvvtt04uUm/durW6uhoA+vbt297roVvOuRMaGioUCgFAS0uLw+F0ZpBIJowaEUIIIdSKOXPmfPjhh6S8fv16gUDQsXYKCgoOHDhAyh4eHv369Wv7Z3Nzc0nWcZCVc6eyspLK7L1q1arevXsDkjeMGhFCCCHUutDQUJKkOi8vb8eOHR1r5NtvvyUX/Wlqarq7u7frs4mJiaQwcOBAU1NTqXejo6PJySoVFZWVK1d2bHioZRg1IoQQQqh1H3zwweLFi0k5JCTkyZMn7W0hLy8vJiaGlL28vPr27dvy81Ko5elZs2ZJXbJSV1dHZX90dHRs+zUzqF0wakQIIYRQmwQGBpKVXz6f3/ZroyU/TvYdamtrtzf/dlVVVVpaGik3XZ4+depUTk4OKWNm766DUSNCCCGE2mTIkCFUsHj48GHqZr+2yMnJOXz4MCl7e3sz25nf6OLFi2QzpYqKipWVldS7ERERpDBt2rSmi9dvPHX1Zq5KlzfMvIOQNFFRiv+WpCptNTWJyqpnMN3H33q4vK/rQugtVlBQUFVV1W1feKhdlJWVZdZ7eXnt27evsLCwrq7O09Pz0qVLbWwwICBAJBIBgI6OzqpVq9o7Hmp5+pNPPtHQ0JB86/fff6eSAXl5ebW35dea5D8fUq6srOy67jBqREha1X+3g6LDmtZrLfXGqBEhORo0aJCSklJcXJyiB4Kk3bx5kzqPLKV3797BwcEkr01KSsrp06dnz57daoMPHjyIjyfXZ4Ovr28HflX4+eefSaHp8jR1haCxsbHM1N9vtsrKSpxrREhxVHoBAMs5PHjuSKipr6upgZEju+EWBoTeInQ6vVevXl988YWiB4Kk0en0Q4cONffuggULwsLC/vrrLwBYu3btrFmzVFRUWm4wICCgtrYWAAYOHPjVV1+1dzyZmZmPHj0iZamo8dGjR1wul5S9vb2lTsm8Dbpzth6jRoRkMxhvaW1louhRIIRQj0Oj0UJDQy0sLADg4cOH4eHhLd8KeO/evaNHj5Ly2rVrO5BJkVqeNjAwGDFihORbu3btIgvf/fv3nz9/fntbft01XY9WV1fvukVqPA2DUHOEih4AQgj1UB9//PG8efNI+bvvvnv27FkLD2/ZsqWurg4AdHV1O5ZJ8fz586Rga2srWc/n86lUPqtXr1ZrtB39rVPZoOu6wKgRIdly84vL+GUlJSVl/A5egYAQQm+wrVu39urVCwCeP3++YcOG5h67fft2QkICKfv5+TEY7d4d/vz58xs3bpCy1PL03r17y8vLAaBXr14uLi7tbRm1F65QIyQbL8hGK6jhBYsdHhjoamuC/2AQQogwMDDw8vLaunUrABw4cGDNmjWjRo1q+tjmzZvJROOQIUNWrFjRgY7Onz9P1qB79+5NlsWJ2traqKgoUl68ePHAgQM70LgCKSkp/fLLL0ZGRooeiAxPnz6VueEYvwQRaqoXALBYLG1t7WepqTwA4HHd2Nybe9OPuIxX9NgQQqin8PX1PXDgQHFxsUgk8vLyotaRKZmZmadPnybl9evXk7nJ9qI2NX722WeSU5UnTpzIy8sj5dWrV3egZcWi0WjV1dWPHz9W9EBkk3muCKNGhKQxTZaIxUsaXolK7v/qOcomDiBuZajP4iMmmHsHIYQAAEBDR6nJFQAAIABJREFUQyMoKGjZsmUAkJycfP78+RkzZkg+sGnTJrFYDAB6enpLly7tQBd1dXW//PILKUstT1OZvWfMmDF27NgONK5YtbW1FhYW3377raIHIoO7uzs58y4Fo0aEWkbXMbaOuB4cN9EXIC6rcL8JpmxECKEGS5YsCQsLy8rKAgBvb+/p06dT6cEzMjKonDgbNmxQVVXtQPvXr18vKSkh5enTp1P1aWlp169fJ2VPT88Oj1+x1NTU9PT0FD0KGZrbfoqnYRBqnVrfAYoeAkII9URKSkqhoaGkfPfuXWqjIQBs2rSJFAwMDBYvXtyx9qnlaVNT02HDhlH1VGbvMWPGTJ06tWONv4709fX19fWlXkrWdCmMGhFq3ZMHDwAAwHKoFk40IoRQI1OnTrWzsyPlgICAFy9eAMDNmzfPnj1LKjdu3NhqGvDmUHslJZenc3Nzk5KSSNnHx6djLb/WSJior6+fl5dHNnfqN9EV/WLUiFBjgvuBdi6xKVllAhF5nZO2z5YdBADAshyuqdCxIYRQjxQcHEziwqdPn27evBkA/P39yVtGRkaOjo4da/a///7LzMwkZcmocefOndRNM3Pnzu3MyF9HVJjYtLKrYdSIUGPCF6ncaKepLC01FVNTUxpNzWjySh4AACQc99ZV8OAQQqgnGjly5Jo1a0g5KioqISHhwoUL5KW/vz+d3sFDFElJSeQwTb9+/T744ANS+eLFC+q2wzVr1nTsXPbri0SH1J/UtGLTycWumG7E0zAINcYc6bPXD6KCUnnA45FwESydg4M2rTHXxeVphHq6spy0/ZHRF1J4zwBA2+DzuUtXLbZt679dftGFU3E/Xfr9IS/3GYABy+pLZ/cFFk0PKwhu/Lg96EQqjHY9stW+yf30ZWk/Hok+cYaX+wwADCZ+vtRlle34+l8581P2bT1+X1u7yRUmVVVVaqbeAUt06RLtnDwRx02+zssF0DYwGPwhx2ON/XhGG3pRiPXr18fExJSWltbU1Li6upLKd999d8GCBR1uk1qenjFjBhV67tmzp6KiAgAYDIazs3PnRq0AdXV1z58/LysrKyoq6nxrUlOMZM268802B6NGhKRoWrtstXbZKuCX8QUiADpDU5OJ/1AQeh2U3IgYMNFNooLHS+UGRQVkp28c3uq/YkGWiwYrutGHedy4sJsJ2aH2w6lKfk6K/+ypYeQ3yvK5oiZDiLAb4MZt3Eh0kB83b6utHgA8+ysqOprXzAicXQKW1Md9ZRkeWmZhjQYDXC5vUnmmBbP1XhRCU1MzMDDw66+/BoCnT5+Syk2bNlFHqttLKBSmpKSQMrU8LRKJqAM3Tk5O/fv379Sgu8DTp0+vXLny7NmzsrKy0tLSMgmlpaXPnj3j8/nkyWHDhhkbG3e4I5nRYdMgsoWHOwC/DBGSjcHUZDSZQ0AI9VyCLE8SMrKck+PWmWmLrhz0cvDnAs/fK+rzM2taSdEvKsyKBgBL5/gNq6aYaP/3R5wZ2xcAwhwiVwlDSdCZEeth5iQRy2lIf4lmxXqSYM45PHndfDPRP1e8JjpwAYLYbuzyM+ZMMLCNTDAsk0xB06dPhcfUhTwAAG21+ubK9jnVh4wB8VcXfTZWA8pzs25euw3Gam3qRVGcnZ2///77+/fvk5ejRo3qzKbDy5cvk9sClZSUPvvsM1J59OjRgoICAKDRaNSaeM9RWVlpamraxknEjz76qOX7u9uFzDJSc40kXpT7vCNGjQghhN4ERb8fjQMAYHGT91nrAgDYbzwS80DDKQ64B38tWtPK8i19uA0v/d6I8cZkCVjX1ic9/HczNy5A7tMqGM4EgLJrTmEAwAlP9hn3gDXZDcqlh3DGKQ4AWH7J+9ZYAwDo2B/jxaixnAC4F28WmVvpahpb2EvNLgkyWAA8AE7MIjJPKMjiruQCAARfLfax0AEAAE1NK73xVm3tpQN/e3JBp9MXLVq0bt068tLCwkJJqePHJ6jl6YkTJ+rokL+HV5m9Z82a1ZmJui7y+PHjNoaMJiYmhoaGnY8aJaNDqcBRsiwveBoGIYTQG0Dw1/FzAAAc32mvoibm504BAAC83wtKMuxoNBqN5hKb1fCuKCXQikaj0az28QEANE0aQkairy6515jZkDOGOe1S8vXs8iNrrEf0k3ECQ5TzOzk27LFoClXJMJkWzAIASL/9n8xxZxwKiAMAYK+aUx8D3Uw+DADAiV9THzLKoZcOEAqFrq6uDx8+bNenkpOTqTKXyyUbEDuGytRILU+npqb+9ddfpNwzM3sbGRmtXbt25MiR48aNs7Kysre3X7FixcyZM9XUpHeybtmyRSEj7CSMGhFCCL0BhI+f8ACA/XGjWz+ZemRLIvfWf0M5zgAA0U6cC0UiAOBnxU31TwUAP3drGYu6ZRkhDtEAAOypI+rfphtbWZsPZwKAUNYIql6QeSMOa7DkELQMJwIAcG/y+E0/I8jat5ILAKyANQ0ry2V3L6QCgDvbnAGCkqKi/Pz8oqIyagNlR3rpkIqKij179hgbG8+ZM4e6haVlv/76a1paGvXyyZMn3333Xcd6/+eff6iAlbqlkMrsbWpq+sknn3Ss5a4WFBT0999/Z2RkXLp0KSoqqrS09OzZs1VVVZLPjB07lspw2UlUvkZCKgG43FeoMWpECCH0Jqif/atuFNExBptw6qvp9tuusgEAeDa+J0RQsofjBADAjvGjTpCIihJj98XGxkYEuphqmZGYMXnnnLZuFKyfk+Q3jikZYydxAAAKy5scnYH8iz+Q8zcbF02urxLkXksFAAhzMLKiqQ0YPFhfX3/wYC0VmlXsjaKO9dIBxcXFJNCpq6s7ffr0pEmTPv7448TERJIHpzlUjkbqkEpYWNijR486MAAqifc777xjamoKANnZ2dSatbe3dwfa7GanT58eO3bsqVOnyEvJ9EPr1q2j0Wjy7S5PAjSJJuUF9zUihBB6cwzs37vR66oSHgAAS1dbDTQtdia4cx3CIG6hShx5m5UcxXkVFFYVbHRa2fiEs5m+VpMsOa0w6Nv4GpSSAh4AsEz1m0SfRYc3hgEAcGKm68n4Ok4FYFmyDSCXm8oDSHWaaKNVmG7Z7l46ok+fPkwmk8lkUrv0fvvtNzabPXLkSG9vbw6H0/Se4gsXLvzxxx+kvGvXrtWrV7948UIgEHzzzTcnTpxo7wCo5elZs2aRAGvHjh11dXUAMGjQIAcHhw7/aN2grKxs9erV8fHxVI2pqam2tjY5Em5kZPTll18CQEFBwbVr1ywtLZttSHEeP348ceLEpvUYNSKEEHpzPHn6stFrtX4GADzgFT2rAj3GcPt1e9lhKxty1nBi4qwlEiQCc2R0MrcMVCtK8zOSo4LieAD+o7TSrxafkbXDsDm5L4UAEjFVvwEsAB4v8zEfQPJ6qbIbJ/15AADBq9hNQz12QEK4u50ekw4AJRmxn5o58YD3QxLP8sP29dIxvXv3ZjKZubm5cXFxu3btunPnDql/8ODBihUr1q9fv2bNmq+++kpLS4v6CHXr9Pvvv+/o6FhSUuLl5QUACQkJf/zxx6RJk9re+8uXL6mVbrI8XVZWFhdXH+m7u7urSp5C72HOnz+/YsUKKtqm0+lr1661t7cfN24cqVm7di3JRkSj0fr3798zL9FOTEyUORuKUSN6Kzx9+jQ9Pf1///tf93RXUlIitYsFIdRNejWegistJgedNVTI953OrKXOK7lkWZi18HOTxh/WNLe2JSX7BS6LHNaNYgcBcHcezbBoLXGPBCY0GoKopLgQAGCwVOJXPjeIpJYM4JhLhHlCIBsTbRxm6jV8Qmf8PA+2kxMXoLq9vXSKqqrq0qVLnZyczp07t3379qtXr5L64uLiDRs2BAUFLVu2zN3d3cDA4OzZs3/++Sd5l5zzWLVq1e7du//55x8A8PDw+OOPP9q+Jnvx4kWBQAAAKioqJKjavXt3ZWUlAPTu3Xv58uXy+xHlqby83NPT8+DBg1TN6NGjDx06RMJosrg/ZMgQ6n7FIUOGsFis8PBwxQy3RX///beurozD+LivEb0V+Hy+UCis6i7V1dVkJQUh1G1IQJV7v1ByY19ZAS8VAIDNIiu3opzIjVQmb57v1sQWdgEa2/rttQQAKH/xvD0D4f37RLJV/u3UVABgTzCWnFAU5CQ5kZyLCQsafTkztc1YAAD5eU8kahs2Mb46ut2mXuSCRqPNnDnzypUrN2/edHBwoLJ2V1ZWRkREjBgxYv78+dRGQ3NzczI7qKqqSh1euXHjBjVT2BbU/sUpU6YwmUyhULhnzx5Ss2zZMskJzp7j0qVLJiYmVMiopKT0zTffZGRkvP/++//888/Ro0dJvbe3d0+eKG0VzjWit4KBgcFnn30WFBTUPd2dO3eux/42jNAbijnBig1cLi/sp+zt1sb1X26iG8fiAQBYHw5lAgDciPIK4gGApbu7RlgYlxfGjppXusa8mRVdUWFGKgAA9FKX/YBG4xHov8cG4ALv3G/ZtnoNqQSL0g6mAgCYjRsq2fTlyGAAAOC4zBzeqBUYOJwFwIOg+JsB1tRux9IHuQAA5dXC9vQiZ++///6JEydyc3N37twZExPz8uVLAKitrT127Bj1jGRCGTabPXnyZDJDuX79ent7+6YJaGSi7rAmOXfi4+PJgi+NRlu9erX8fiD5qKys9PX1jYqKoo4KGRkZxcbGfvTRR+RlSEhIbW0tAAwYMGDFihUKG6g84FwjQgihN8EISxsAAIh29U8sEwGAKOfCLpswHgCwXW10AEQ5Jye6cQGAE78/NDTKDwAA3CZuzxEBAGTtc6SZupxMu18mAAAQ8Yt+9Hcl05IjhvRt6EQkEAgEIhC8rAYAKK/mi0iNCACAOdqODQAQvdD15P0yABDx74dw2DwAAM7nEyS2RpZcDQnjAYBl8Krx0qdKGOY2zgAAcQ67LuQAAEDZhUC3IB4AwNyPR7ajl65hYGAQGRlZUFCwZcuWAQMGSL3r5eV16NChmpoa8jI0NJQsTBcUFAQHB7el/Vu3bpHbXwBg+vTpAECt4drZ2Y0YMUIuP4W8XLt2zdTUdPfu3SRkpNFoq1atyszMpEJGkMhh6eHh0ca4uecSI/QWsLe3X7t2bbd1d/bs2XfeeafbukPoNXXp0qW+ffvKr73yeM6rbzfWq6JftlAsFhcGk7OqrIBC8nT6XvI2O/y6WCxOD391lJXFkvg0K7iwvv0qyfYbYceUkzbvxUt88FUjftxsyYFeD2eTJ7iFQhk/RxWPI9HIq1bcuVXt6aUzEhMTdXV1W32sqqpK5uSZrq5uSEjI8+fPxWLxsmXLSKW6unphYWGrbVIpHkeMGCEWiy9evEg1e+3atc7/aPJSVVXl5eUlefmNnp5eSkpK0yd9fHwAYMyYMXw+X7J+/fr1M2fOrOyRLCwsduzY0fRnwblGhBBCbwbmgpjCeL/6iIsk0GG7h98r3zqcDoKsJN9UAICYOHeyj5A5fhnXnQUA3DM3ygBYjvtjApxJCMbjkU+znIMTCtN9qH2Hqr0loslGPdfvVGMaLyi+Hs9paIUMITz53lZbyWVo/q0zXABguQdO05W1T4xhElN41Y/NkhgKy2/v1fJQW0Y7eukOvXr1orLtMJmvdlQWFRX5+PgMHTrU29v7q6++UldXB4DKysq1a9e22qbUlTDU5kgzMzPJCTzF+vPPP8eNG7dz505qC/vy5cuzsrJkptEJDg7+77//bt261adPny4aD/kb7gY0cYsZOxF6Mzg4OBgZGXXzvsbHjx93T3cIvaZSUlJmz579/Hm7zpq0gYBfwhcAAIOpyWS8CstEIgEAg944ThMJBECXrBTx+XyBQAR0OlNTU3r1uM34ZSUCEQCdoakp81CzSCAABqOVowUCfhlfIAKgM3Vkj6S1XjouKSlp5cqVhYWFLT92/PjxefPmkfLly5dVVFRCQkKSkpIkjwOqqKiMHTuWpLCg0Wh//fXX+PHNHkgvKyvT0dEhuwAvXrw4bNiwUaNGkUDl2LFjc+fO7fyP1kk1NTUBAQHbtm0jgwSAwYMH79+/38bGpr1NbdiwITMz8/jx450ZDxUvVlZWUtF5ZxokrK2t2Ww2yZ0kCU/DIIQQerMwmDoMGSeJ6XQZoRddOls1ncnUZHb6HDJTU6fFNpp0KwuDqSnr52h7L12rrq6OOvtiZWVFrvg7c+bMgwcPduzYceTIEZI9RygUUlnPxGKxh4fHlStXmmszOTmZRGO9e/e2sLBYvXq1uCFhzZw5c7r6J2pVZmbmokWLGqaiAQA4HM7333/fr18/BY6Kihe7Aa5QI4QQQqjdjh49eu/ePVIOCAig6keOHBkdHZ2fn79hwwZtbW2pT129etXNzU0kkp3yiFqenjZtWkVFBXW9iru7O52uyHkukUj07bffTpgwgQoZBwwY8NNPPx05ckSxISN04/I0YNSIEEIIofaqra2lIsVp06Y13XE4YMCAwMDAR48eRUREGBgYSL4VERFhaGgYHh5eUVEhWV9XV0edfZkxY0ZkZCS5LqFPnz7UkRqFuHv37qRJkzZu3CgU1ifOtLe3v3Pnjp2dnQJHRVDnV6iaLg0iMWpECCGEUPvEx8c/ePCAlCVzNErp3bv36tWrHz58eOzYMWNjY6q+oKDA3d196NCh69ato7aAX79+/enTp6RsaWm5d2/9IfcVK1Yoaj6vrq5ux44d48eP/+uvv0iNlpbW0aNHExIS+vfvr5AhtaBpBCl3GDUihBBCqB1EIhE10ThjxoyJEye2/LyysvLcuXPv3bsndefy8+fPg4KC9PX1ly9ffu/ePWp5+r333rt8+fKTJ08AQElJadWqVV3wQ7QuOzt78uTJ3t7e1dX1NznOmjXrzp071AGgtxCehkEIIYRQOxw+fDgnh2Qgb2misakffvjh3XffJadklJSUyFHrmpqagwcP/vDDD1TunhkzZkRERJDynDlzDA0N5Tn6NhCLxbt37/b19SX33wBA3759w8LClixZIt+OKioqqL2hPQr1g0vBqBEhhBBCbSUUCgMDA0l51qxZ77//fts/O2zYMB8fH2qe0snJ6fTp0y9evAAAsVhcXl5O6gsLC7OyskjZzc1NbkNvm/z8fCcnp9TUVKrms88+O3jw4NChcr6t8enTp9euXWt1plYhxGKxtbV103qMGhFqTlnayRNx3OTrvFwAbQODwR9yPNbYN7n9CyHUCbW1tbdv31b0KJC0/Px8yZyLkmJiYvLy8gCARqO1a6KR8Pb23r9//+PHj+vq6v7999+CgoLo6Ojw8HDqFkEAOHToECl88MEH3ZzZ+8CBA56ennw+n7zs06fP9u3bV65c2RV99e/ff8iQITKDM4XjcrmamjLuZ8eoESFZyjI8tMzCJCp4POByeZPKMy0UmB4NoTdLbm5uRUWFiYmJogeCZJB5Y3JNTQ1145+dnd24cePa22yfPn2CgoLIUu8vv/ySmprq5eXl5uZmbm6ekZEh9fDdu3c3b9789ddf6+h0+f3ahYWFy5cvv3DhAlUzefLk2NhYqQPg8jVy5Mi23JfT/TIzM2XWY9SIUFNl+5zqQ8aA+KuLPhurAeW5WTev3Qbj1/zeeYR6FAMDAyaT2TP3db3lfvnlFz8/v6b1Bw4cePToEQDQaLTNmzd3rHFHR8fw8HCS+tvHx2fGjBlisfjhw4fk3X79+lHXBVVUVGzZsiUkJGTJkiUeHh4jRozoWI+tiouLW716NdWvmpra1q1b3dzcaDRaF/XYYfr6+gBApnupl5I1XQqjRoSkCbK4K7kAAMFXi30syC+4mppWeuOtFDoshN5ESkpKgwcPVvQokDRtbW0lJeksK9XV1Vu3biVle3t7FquZW7lbo6SkFBoaOmXKFAD4+++/IyMjx4wZQxaFaTQadQ6DOi5TVVW1Z8+effv2ffHFF9988418NwIWFxe7uLicOXOGqpk4ceKhQ4dGjhwpx17kTl9fPy8vj/xJXlLhI6Ur4kjMvIOQtJvJhwEAOPFrLLp8TQQhhF4X0dHR5GZqJSWlTZs2daapTz75xN7enpQDAwNPnTpFyoMHD66pqQEADQ0NHo/n7u5OHayuq6s7derUpEmTLCwsEhMTyU2DnZSQkDBmzBgqZFRVVQ0KCrp27VoPDxmpSLFpZVfDqBEhKWV3L6QCgDvbnAGCkqKi/Pz8oqIy2bdfIYTQ26GqqiooKIiU586dO2bMmE42uG3bNlVVVQAoLS09fvw4qSwtLSUFZ2fnMWPGhIaGPnr0KCgoaNCgQdQHr127xmazR40adeDAASqTYns9e/Zs/vz5X375JZVXfPz48enp6WvXrlVWVu74T9X1SHRI/UnNMjada2xa03kYNSLUmCD3WioAQJiDkRVNbcDgwfr6+oMHa6nQrGJvFCl6cAghpBh79+4ll7goKSn5+/t3vsHhw4e7u7uTMrWhkCxPKysrU5m9+/Xrt3bt2ry8vIMHD44ePZr6+N9//71ixQo9Pb2tW7dSsWYbJSUljR079tixY+QlnU7ftGnTjRs3xo4d28kfqvvlNSZVI/fuMGpEqFmpACxLNtuS7N1JdZpok1iEc44IobfOy5cvg4ODSXnBggWSdwN2xrp16yTv5aPT689afPnll3p6epJPqqqqLl269Pbt20lJSZMnT6bqnzx5sn79+mHDhrm5ubUlSHrx4oWTk5Otre1///1HasaOHXvjxo3NmzdTvb8uZP68UpX6DeTVKUaNCMnGDkjIKxdmppw5k5JZnB7DAgDg/ZDEU/S4EEKou+3evZvc76esrCyXiUaib9++VB4fABCJ6n8tby6zN41Gmzlz5pUrV/788097e3tqKbmysjIiIsLIyGj+/Pnp6enNdXfx4sWxY8fGxsaSl8rKyr6+vn/99df48ePl8/P0PHKfdMSoEaHGhECyu9o4zNRj1v/qqTN+ngcbAAA6uIUGIYReVxUVFSEhIaTs6Ogo3/Q38+bNk8puY2FhYW5u3vKnJkyYkJCQ8ODBg6+//rp3796ksra29tixY++///7UqVMl0y4CQEVFxVdffTV9+vR///2X1IwcOfLatWvbtm3r1auX/H4aRZLa3SjfKUYKRo0INcbUNmMBAOTnPZGoFdb/3zfk/70ghFBbff/99+TICJ1O37hxo3wbT0lJkToN7enp2cbPGhoaRkZGPnr0aMuWLZJpwFNSUmxsbExMTA4fPiwUCq9cucJisfbu3UvepdFobm5ut27d6plX+bWXZHRIpeMhLyXL8oJRI0JSBg5nAQAExd+U2MNY+iAXAKC8Wij7Qwgh9Cbi8/k7duwg5SVLlhgaGsq3/XPnzkm+VFJSInkc205bW9vf3//Ro0d79uyRnAe9ffv24sWLNTU1p0yZkpubSyoNDAxSU1PDwsJk3nyDWoVRI0JSGOY2zgAAcQ67LuQAAEDZhUC3IB4AwNyPe3QSL4QQkq/w8HByQllFRWXDhg1yb19qKbmurk5yp2PbMRiMlStX3r9/n+R0pOorKyup8sKFC3k83ieffNLh0fZAUtsWJScXqRzgcvSanRhCqBsMn72KA9FxAL42Rr4sFovHqz8C485dPF7Gbe4IIfRGevHixc6dO0l52bJlUueaO+/WrVvURkPK999/7+rq2rHbn5WUlGbPnv35558vX748Li5O6t0TJ04oKyt7eXl1+FYb+aqpqSksLKRyVXYSdcgdAI4fP05edrjxJ0+eCIUy1tYwakSoCYZJTOHVoa6rgrg8qA8ZWX57I/1cLBiKHhpCCHWb0NBQkkmxV69e69atk3v7UsvTDAZDIBBUV1d/88031G0x7ZWRkbF48eLbt29TNdTNhEKh8PDhw4cPH7a2tv7mm2+mTp3amcF33u3bt+/cuSPzvm+Fq6uru3PnTtN6jBoRkoGua7H1TKY/v4wvEAHQmTqaGC8i9Fooy0nbHxl9IYX3DAC0DT6fu3TVYlvdNv4D5hddOBX306XfH/JynwEYsKy+dHZfYNF0gk1w48ftQSdSYbTrka32zCZDSPvxSPSJM7zcZwBgMPHzpS6rbMfrkvfyU/ZtPX5fW7vJprqqqio1U++AJbr01htp2wOdVVdXFxYWRsrLly8fOnSoHBsnzp8/T5XpdLqfnx+5qPD06dNpaWkWFhbtak0kEn333XfffvstlcHnnXfeiY6ONjc3j4iI2LNnD5UM/MKFCxcuXBg3bpy3t7eDg4OiMjWOHz9eWVlZXnON8mVtbf3ee+/JeEOM0FvA3t5+7dq13dbd2bNn33nnnW7rDqHX1KVLl/r27SvHBouvh8v4nmMFZAvb8OEqnrOsr0/3hGzJp8qzL7lTy5uWe0ubDCGcLaMRP24eeTs9uIW1Ued7wjY10oYHOisxMbFPnz6kWQaDUVhYKK+WKaWlpUpKrw5XcDicmpoa6jiLmZlZXV1d21vLysqSSrs4d+7cp0+fUg9UVFREREQ0XfjW09MLCwvj8/ly/wFbtX79+pkzZ1b2SBYWFjt27Gg6ZjwNgxBC6I0gyPKc6AYAwHJO5uUVF2YnBLABAHj+XlEZrX5aVJgVDQCWzvGXeIXFhenc+l1iYQ6ROQ35FDJiPTSMpoZRyf41pBfssmI93bgAAM7hyXnFxdnXE0h0F8R2u8EHADCwjUxI4Eq6dCm+IZDUVqO3qZFWH+i88vLyyoZzJC4uLrq68pzFJM6fP08Wjgl3d3cVFZVdu3aRl+np6VQ67pbV1tYGBwebmZllZNT/V+7fv/+JEyeOHTumra1NPaaurr569eqHDx+SnI5UfX5+vru7+7Bhw9avX09dGIOa1f3BNULdD+caEeqB5DvXWHiJ7A9jcV/Ni5XHcEhdcBvmykp56feqJF6n10/osa+X1z9AZjI54cm8q+EAAJZ7yxsPIYD05pdMVVXxYsi3bcClZoZQlU7GyIm517ZGOtRLO82ZM4c0qKam9vjxY7m0KWX+/PnKFuGmAAAgAElEQVRUKGJpaUnVW1lZkUpdXd2KioqWG/n777+l0i7a2dk9efKk1d5JTkepBOO9evVatmzZ3bt3O/uztQ3ONSKEEEIKIfjr+DkAAI7vtFfzYszPnQIAAHi/F5Rk2NFoNBrNJTar4V1RSqAVjUajWe3jAwBomow3ltwA2Vd3IGlEpaG1aZeSr2eXH1ljPaKfjIz/opzfyV17HoteZRxkmEwji9Lpt2XPY2UcCogDAGCvmmPclkY61ku7CIXCy5cvk7Krq+s777zT+Tal1NXVJScnUy8lM3uHhoaSleuioqJt27Y114JYLA4LC3vvvfeuX79Oavr163f48OGffvppwIABrQ7A0tLy/PnzWVlZixcvVlVVJZXV1dUHDx4cM2aMra1tWlpax360NxtGjQghhN4AwsdPeADA/thEMvJj6g0HAADurf+GcpwBAKKdOBeKRADAz4qb6p8KAH7u1k1OtACUZYQ4RAMAsKeOqH+bbmxlbT6cCa9ui2qk6sUzAADgsAZLDkHLcCIAAPcmT8bqsSBr30ouALAC1pgz29RISQd6aScVFZXIyMg+ffro6ur6+Ph0uj0Z/vjjD3I6GwD09fVnzJhBvcVisVasWEHKO3fubJqaBwByc3OnTJni4eFRVVVFaqytrW/fvu3o6NiuYYwZMyY2NjY3N9fHx6dv376kUiwWJyUlTZ48eeLEiadOnZJcRkcYNSKEEHoT1M/+Nb7AiTHYhFNfTbffdpXsc7TxPSGCkj0cJwAAdoyfbcMpaVFRYuy+2NjYiEAXUy0zEjMm75wjI6aUqX5Okt84pmSMncQBACgsFzX5RP7FH6IBAGDjosltbaT9vXSAurq6hoZGfn5+W+btOuDYsWNU2dfXV/JYDABs2bKFyWQCQFVVVdOwde/evSwW6+rVq+Qlk8mMjo5OTk4ePHhwxwajq6sbHBxcUFCwY8cOyaPiN27csLe3Hzly5J49e6jwtGdSV1fvno4wakQIIfTmGNi/d6PXVSUk5aquthpoWuxMcAcAiFuoQhvgywMAVnIU51VQWFWw0Wmlk5OTm390w4kXM32t9l49Z9BXpdHrkgIeALBM9ZtEn0WHN4YBAHBiputJnatptZG299JxXZeShko3w2AwFi1aJPXuwIEDqXtojh079ueff5JyQUHBtGnTvvrqq4qKClJjaWmZlZVFzU12BpPJ9PLyysnJOXz4sGQa8JycHFdX12HDhm3evJncx92jqKurUyGjZLmLYNSIEELozfHk6ctGr9X6GQAA8IqeVQHAcPt1eyVy1nBi4qx1JQIj5sjoZG5ycnJC/F4/Dokb/Edp2aeVtGsIuS8bTwP2G8ACAF7mY6m147IbJ/15AADBq9hNQr1WG2lrLz1QdnZ2SUn93+m8efN69+7d9Bk3NzdyOZ5YLPbw8ACA2NhYExOTX375hTzQu3fviIiIS5cuyffGGhUVFUdHx8zMzAsXLnz66adU/dOnT7ds2TJs2DBXV9fs7Gw59th5lRK3JnY1zPKN3govXryIiYn5+eefu6e78vLy6urq7ukLIdRIr8ZTcKXF5QAAoKFCvu90Zi11Xskly8KshZ+bNP6wprm1LSnZL3BZ5LBuFDsIgLvzaIbFmvHQVkxoNARRSXEhAMBgZuNvXD43yA0AAAI45k2vKm2hEX57eumJ1q9fT5UDAwNlPtOrV68dO3bY29sDwO+//y6ZWAcAPvroo9jYWCMjo64b5PTp06dPn37r1q3t27efOHGCZA6vqqras2fPvn37vvjii2+++Ubq+LZMZWVl6urq1IGbrtBty9OAc43oLVFTUyMUCl92F4FAoOifGKG3DvlFLfd+oeTGvrICXioAAJtFVm5FOZEboxve5PluTWxhF6Cxrd9eSwCA8hfP2zMQ3r9PJFvl305NBQD2BGPJCUVBTpITybmYsEBWLsRmG+nTnl56oLq6urNnz5KyoaHhkCFDmntyzpw5H330ESlTIWOvXr1CQkKuXr3apSEj5b333ouPj8/JyXF3d6fSntfV1Z06dWrSpEmTJ09OTEwUi8XNffzgwYM6OjoGBgY5OTldNEIqVw5V06VBZM//nQQhOdDR0XF2dg4KCuqe7s6dO7d8+fLu6QshBAAAzAlWbOByeWE/ZW+3Nq7/chPdOBYPAMD6cCgTAOBGlFcQDwAs3d01wsK4vDB21LzSNTKm+sinCzNSAQCgVzNfwxqNR6D/HhuAC7xzv2Xb6hnX1xalHUwFADAbJ3kjn+hyJMkiznGZObxdjTD1hW3upSc6ceLEy5f1uwha3o/49OnTXr0aZTh6//33Dx06NHr06C4cnyzDhg0LDQ3dtGnTnj17vv/++8ePH5P6tLS0tLQ0Y2NjLy8vR0dHqdECwI8//lhbW1tUVLRw4cJr16519dWF3bBUjXONCCGE3gQjLG0AACDa1T+xTAQAopwLu2zCeADAdrXRARDlnJzoxgUATvz+0NAokhPcbeJ2cvVL1j5HmqnLybT7ZQIAABG/6Ed/VzItOWJI34ZORAKBQCACwctqAIDyar6I1IgAAJij7dgAANELXU/eLwMAEf9+CIfNAwDgfD5B59VYS66GhPEAwDJ41XipO7JbbaTtvfRIAQEBVLmFqPHMmTNjxoxJSUmhalRVVX/66afuDxkp/fr18/Pzy8vLO3jw4KhRo6j6+/fvr1ixQk9Pb+vWrWVlZZIfoTKZ37hxo7m1+NcLrYWZVYTeGA4ODkZGRt0810j9PooQkiklJWX27NlU3r5O4//oqLEwrv4FC6DhHLRftnDrcHpRiNVg31QAVkBh5kZdAH7GPg2zlQDADr9+Zo15RoSVmVtq/WdZLB6v4dOs4MJMH10AAMGPjmpU+42wY8rPLGEC8O//qDFqYcMHWdDQiB83e6vtqznFGxF2E924ACxuYbqtrvT8U6uNtLGXzkhKSlq5cmVhYaFcWqP8+eef5ubmpDx69Og7d+40feb58+dr1qw5cuQIVUOj1ccqTk5OP/zwg3yH1DFisfjcuXPbt2+nEgAR6urqy5cvd3d3J0d5AGDWrFlkRV5ZWTktLW3SpEnUwxs3bty9e/e7777bnSNvozt37mzevFky+3q97rk2ByHFwhsFEeqB5HujoFgsFgsL4/04kt9xbPfwe+VisVhcxdtLamJ41C2AQq47CwDAMrxULBaWZscEOLMafUOynIMTCoVU61UJUu9TOPFUo8XX4zmNnmKHJ99rPMpysl2S5c6tEsvWaiNt6KVTEhMTdXV15dgg4eDgQI34u+++a/rA+fPnJdMuKisrr1+/fuPGjeSlkpLS//73P7mPqjNITkepfJPKysrz5s1LT08Xi8VPnjyhcl4aGhqWl7+6hDIgIECtB4uMjGz68+JcI3or4FwjQj2QvOcaGwj4JXwBADCYmkzGq5k8kUgAwJDaWiYSCIAuWSni8/kCgQjodKamptTqcdvxy0oEIgA6Q1NT5qFmkUAADEYru9xaa6T1BzqsK+YaHz16ZGhoWFtbS17eunXL1NSUepfP53t6eh44cICqMTY2Pnz48IQJE16+fDly5EgyGEtLS8ll6x7in3/+2blzZ2xsLLVlk7CysvL29haJRLNmzSI1S5YsiYmpvzR8w4YNFy9eJHmFeppvv/126dKlXl5eUvV4GgYhhNCbhcHUYcg4SUynywgC6QypSjqTqcns9DlkpqZOi2006bYjjbT+QI8SFhZGhYyDBg2SDBlTU1OdnJzy8/PJSyUlJQ8Pj2+//ZbBYABA7969g4ODORwOefL06dOzZ8/u9uG3xNDQcPfu3QEBAbt3746MjKSyUaakpKSkpJiYmFhZWZFgNzY2dubMmXPmzCEPaGpqfvjhhwobd/Oo+xWl4GkYhBBCCHUtPp9/8OBB6iU19/by5cvVq1dPnTqVChmHDx9+5cqVHTt2MCQi6wULFkyYMIGUfX19a2pqumvg7aCtre3v75+fn79nz54RI0ZQ9VlZWSkpKdQBamdn56Kiog73oq+vT22apF5K1nQpjBoRQggh1LWio6PLy8uplzNmzACA3377zdTUlOyfAwAajfb1119nZmZ+/PHHUh+n0WihoaGknJ2dHRER0V0Dbzc1NbWVK1fev3//1KlTkmnASZ5wACgtLZ07d24n9weSMFFfXz8vLy8vLw8kwscujSMxakQIIYRQF6qtrY2MjKReqqqqfvzxx97e3pMnT6Zu5xs2bNgvv/wSGRnZXJLqjz76aO7cuaT83Xff9cAroSl1dXUlJSWGhoYbN2709fU1NjaWeuDatWthYWEdbp8KE5tWdjXc14gQQgihLnTy5EnJmGbcuHGTJ0++e/cuVbNs2bJdu3ZpaGjI+vQr27ZtO3PmTHV19fPnz/39/aOiorpqxB31/Pnz2bNnp6WlUTOLzfHz81u0aFEHuiB/k3l5eWSikYodm04ukgc60EULcK4RIYQQQl1Ial7t5s2bVMioq6t77ty5AwcOtBoyAoC+vj51qnf//v2ScWcPcfr06dTU1FZDRgCorq7++eefO99jXmNSNZ1vXwrONSKEEEKoq/z222/Xr1+XrKmrqyOFhQsXfv/995qazdzoKMvatWsPHjz45MkTkUjk5eWVnJwsz7F22qRJk3r37k3l39HW1h40aJCuri75kxSomsDAwJs3b3a4L5lBoVQlNQEprwgSo0aEEEKKUVtbKxAI1q9fr+iBIGnZ2dnyOqe8c+fOppUDBgzYu3fvF1980d7WmEzmd999t3z5cgC4cOHC+fPnycGaHmLUqFH//PNPXl7ewIEDBw0a1PRm6m4m9+lGjBoRakyUs8878r6amlqTd6qeVZlyvJdY6CpgVAi9iQoLC6urq/ft26fogSBpNTU11IxgZ+Tm5nK5XKnKOXPm7NmzR0eng1dmOzk5RUREkCsfvb29p02bRqf3oGBm4MCBAwcO7P5+qW2OkmdlMGpEqItVPY0KC+M186bzJy5LAKNGhORj2LBhffv27cmHYd9a5G6YTjZSV1fn6OgoFX36+flt3bq1M80qKSmFhoZOnToVAO7evbt3795Vq1Z1aqCvM8noUCpwlCzLC0aNCDXGHBnJ5ZZJ1qj2qbnp4eDPAwDtPk2nIBFCCEnLycnhcDhSOxrV1NQ2bdrU+catrKzs7OzOnDkDAFu2bOFwOP369et8s6hVGDUiJEXTwtZWqiojnwXAA3CeP01PIWNCCKHXhVgsjoqK8vX1rayslHrL2tpaXlv9QkJCzp07JxQKnz59umXLFioH+NumueMv0DWZdzBqRKg1/IyAlXEAwA53MWnD1bEIIfTWevTo0dKlSy9duiRZqaqqSs7WyPHkyogRI1avXr1r1y4AiIqK+vrrr42MjOTVePcoLi6+efPm/Pnz5dLapEmTqPL8+fPJyw43fufOnU8++aRpPUaNCLXiftI+LgCA5Zr54xU9FoQQ6rkOHjzo6ekpeXMgQR3HtrGxkWN3GzZsOHTo0LNnz2pqary8vJoeu+nhKioqlJSUZJ297BH4fH7TSowaEWpZzr6F0QAAzu6TO3jgDyGE3nBFRUUrVqw4f/48VdOnT5+KigoAMDIyItcGjhs3bvDgwXLsVFNTc8uWLeQoTGJiYmpqqqWlpRzb72qGhoaTJk06fvy4ogcig7W1tYGBQdN6vBsGoZaUpPxI7jSIWWWJv2MhhFBT8fHxY8eOpUJGBoPh6upKQkaQyOndFYkVXVxcqFuePT095ZItCLUAo0aEWlB29Ft/AADLcLYJU9GDQQihnqW4uHj27NkcDqesrD7zhLm5+a1bt/7991/ycvTo0f/88w8pf/7553IfAJ1Op87B3Lp16+DBg3LvAknCqBGhZvGzTrilAgD4+di348YrhBB6C5w6dWrs2LE//fQTeamqqrp169bffvtNWVn57NmzpNLc3JwUtLS0Pvjgg64YhrW19bRp00jZ39+fmuNEXQGjRoSaI/g5kiS5dV/2KWb2RgiheqWlpQsWLLC3ty8pKSE148aN++uvv/z8/JSVlXft2kVWigcNGkQ9YGNjo6ys3EXj2bVrF2n8v//+++6777qoFwQYNSLUHFHRxcBoAAD23qXDcUsjQggBAMDZs2fHjh179OhR8pJOp/v7+9+4ccPExAQAysrKDh06RN5ydXW9fPkyKXfF8jRlzJgxLi4upBwWFpafn991ffVM6urq3dMRRo0IyfbHQXKvoKXXlyaKHgtCCCleeXn50qVLZ82a9fjxY1IzZsyY69evb9myRUVFhdTs2bPn5cuXANC7d29jY2OyXqykpEQtIneRLVu29O3bFwAEAoGPj0+X9tWjqKurUyGjZLmLYNSIkCxlaTv9UwGA5edjgVsaEULdRpQT4mhnZRd4X6DokTT266+/jh07NiYmhrxUUlLy8fFJT083MzOjnhEKhbt37yZlJyentLQ0Uv7www+1tbW7dHj9+/f39/cn5RMnTvz+++9d2l2P0vQOnq6DUSNCMvBz73IBAFgbl01R9FgQQm1XlvZjhKOdlampqampqZ3LusSMorZ/uCjjQsg6FzsrK1NTU1MrO4+Qk/myQzfBjR8D7eys7NadlJEHuU1jKEs7uc/F0c6U9GTnGHIyo76rqhe/x3FTuekvhK2MNift5DoXx/purOw8Qn68XyZq+w/bdpWVla6urtOmTSsoKCA1I0aMuHbtWnBwsNT1gEePHi0qKgIAJSUlNze35ORkUt8VOXeaWrVq1fDhw0nZw8NDLBZ3Q6c9QbctTwNm+UZIJuZ4F6FwMQCDjv9EEHptlETYDXCTuB+Ex+Nxo4P8uHlbbVu/Qf7+jy6jSEp/6tOp3DBf92xhqOTOZn5Oiv/sqWT/CpTPbRKmtWEMZRkeWmZhjXoCLpc3qTzTggmgAiTLl0qLo70RYTdRspv60Z64XnrGXK7LI2lpaUuWLKGy59BotNWrV2/btk3mjSZUEhxbW1uxWPzw4UPysks3NVJUVVV37NjxxRdfAMCff/4ZFxfn6OjYDf0qVtOJRnV19a6bfcS5RoRko9MxZETodZIV60niKOfw5Lzi4uzrCWwAAAhiu92QNSXYmOB2cjQAOAfH8/IKC/PSgzksAAAIizyfQz2UEeuhYdQQMgKAhvTUSxvGULbPqT5kDIi/mldcWlqcl34pITx8o3HbL5YruuDsxgUAS7/4e8Xl5eXFV2P8AACA63cko82ttEIsFnt6ek6ZMoUKGfX19VNSUsLDw2WGjJcuXbp16xYpe3p6njt3jpQHDx7MYrHkNaqW2dnZUbcnr1u3rqqqqnv67SEqG3RdFxg1IoQQegMUnXGKAwCWX/K+NdZ6OjrDze2P8cgmPO7Fm62uUzOmf8u7V1i1z2eBiZ6urt54n6hoEvBl5r1oeKbsmlMYAHDCk3lXwwEApO9bbn0MgizuSi4AQPDV4o0LLPR0NDV19MZb2a9ZY68jFYGqAAAI+GUlJSX8xgvl/P/yeQAA7B3+C4x1mEymjsUSv3AWAIBGG/6m2iInJ6e4uDg0NJS6bcXZ2TkrK2vKlGY37ezatYsUzMzMLCwsqKixeyYaKaGhoUpKSgDw77//hoSEdGfXbwOcS0EIIfTaE+X8To5CeCx6FdYwTKYFs8CXB+m3/4MP/7NTM+MCOMfw9i0hiRFEKYHTpvqnguXe8hQXpp6JsWSLzP4DAQBA49XOPea0S8nX9T4yH84UZMnI7dL6GKx0byYfBgDgxK+xaPlie2bx3bTAgFX+3PqJTU5wcpSPdf0VVfWr19z7eYLxxgzSOQkspePYjqLRaLW1taQ8ZMiQAwcOTJ8+vYXn79+/T+1i9PLyqqiooI7CdHPUOG7cOCcnJ3JJzPbt21esWKGr20MT7paXl+fn5/fM+2weP37M58uYoseoESGE0Guv6sUzAADgsAYzJKq1DCcC8IB7k8dfM4/jDNxoiHbifDEt3VqXzs+Km+qfCgB+7tZNLwzNiA0hmxxtPhzRUEc3trImJZknVVofAxjcvZAKAO5scwYISopKXwqFKioaA3Q1m3wZx9mYxTV67WtTCNdTfMwBgDnqE2eAaICFo+ap8mLsTeiJ6+b48gAAXNmjW/mbahtDQ0Mmk8nn8xctWhQeHt6vX7+Wnw8NDSWnT4YMGeLg4JCUlFRTUwMAqqqqVlZWchlS2wUGBh4/fryioqKysnLt2rWHDx/u5gG0UVZW1sOHD7dv367ogcjw5MkTHo/XtB6jRoQQQq+/+uk3fuN4jjF2Egei46CwXAQM+21X2dGTucCz8T0hPPLZHo4TAAA7xq/hnEpJRuLpm6W9oPyP5IPRXB4AsIOTF7f9DvpWxyDIvZYKABDmYJQJkPrqGcuY63FLzKWnxJz3Xt2xzIJZlRPCNvJNhVTfoIyvzoxnAtCNv72+N3riSgCuA0ur4XFW8KVEez0GyEmfPn1Onz796aeftvrk06dPjxw5Qspubm50Op1anp4yZUqfPn3kNaQ2GjRokJ+f3/r16wEgLi7Ozc1NMj1QzzFx4kQVFZX9+/creiAyzJ07d+LEiU3rcV8jQgihN4ZB38Znj0sKeADAMtVnAoCmxc4EdwCAuIUqtAG+PABgJUdxqKiw4NeNK1c6Oa10i25YFzZj6bc/CmtxDA1SAViWbLYlOSaS6jTRJrGo0YFs53jePhcLJh2AOdxzfwIAAHCvPSwj7+qYu2THsxv3u5Bj1fpR8baj0WhtCRkBICoqipw76dOnz/LlywHgwoUL5K1uXp6meHp6Dhs2DADEYrGHh4dCxtAqZWVlGo2m6FHIpqSkJPMGSIwaEUIIvTFyXzae6Os3gAUAvMzHZIvWcPt1eyViLU5MnLXuqzU3gznRXG5yMjdhb7Afieb8bUZZhaTJdwwAwA5IyCsXZqacOZOSWZwewwIA4P2QJLkgyHaZ/epWKvrgdzmN+8iI9TBaSLLKstmWpM53MM0xraRLUja2oLq6OioqipSXLVvWr1+///3vf4WFhaTGxsamm8dDMBiM4OBgUk5LS0tISFDIMLqCvr6+vr6+1EvJmi6FK9TorVBYWHj58uWkpKTu6Y7P53dnsn6EUANm4zyHopLiQgCAwcyGbzudWUudV3LJlkXWws8b3ReqOdzclmSJtrV3WbNonfmoIB6k+u7M+MpifJuXqVsagxBI4GjjMFOvYUQ64+d5sJ2cuADVjVoRCgGoeU6G9lAAACAnc0pSAs2cwgCAHZx8xMeaCaKsxCAW2x8gbvLcCcUpa1o+aCNf8fHxT548AQAlJaU1a9YAALU8PXLkyBEjRrT04a40b9688PDw69evA4Cvr6+tra1UTvLXmr6+fl5eHvkTmoSSBHlLvjBqRG8FGo02aNCgCRMmdE93BQUFN2/e7J6+EEISeP8+EY3Xo77a+LdTUwGAPcG4PuoT5URupFJ583y3Jn4aaiv7i5Bh7BcTHmTmBlD+XCACZtu/LpsfA1PbjAVcHuTnPQFjajW5YWayUUjD7C0Zegr+I7eyVAMA8M9F+AMAcOKP1J+qppvYbsyOLzVaGAapKf/w1+i0I8btLCqz9xdffGFoaAgA58+fJzWKWp6mhIWFTZo0SSwW5+bmhoaGrl27VrHjkRcSL0qFiaSyq7vGqBG9FXR1dY2MjIKCgrqnu3PnzpHNPQih7sHUf48NwAXeud+ybfUaUugUpR1MBQAwG0em6uBGlFcQDwAs3d01wsK4vDB21LzSNc3cplJ4/w4AAGioNJfxv3F2xDaMgTmcBcCDoPibAdZUXFn6IBcAoLxacmE77ujFzSb1M58gyONJHqgmk5LsCY3O6QwYSjJvM1u+VEa+fv7559u3b5Oyp6cnADx79uzGjRukRuFRo7m5+YIFC+Lj4wEgKCho6dKlAwYMUOyQOo/MIFITjVSk2DRkpGYi5Qj3NSKEEHr9MUfbsQEAohe6nrxfBgAi/v0QDpsHAMD5fIIOAIhyTpJb+Djx+0NDo8htKm4Tt+eIAAQZjjSaS8jJ+0XkuImoKONH1/oLBgdKTN2JBAKBQASCl9UAAOXVfBGpEbVtDAxzG2cAgDiHXRfIlTNlFwLdgngAAHM/Hin5AwWxjdb9mCEAgLKsgLlOAADgPI2lCaDCZAIAcN02nswqqX+anxMTTdaF+a3cRShXVGZvc3PzDz/8EACSk5NJYvA+ffpYWFh031CaERQURG6yKS8v37Bhg6KHI395jUnVyL07jBoRQgi9AZhztsUDAECqwygtmqmpisYo31QAAD/u5vEMACjatcIBAIAVELxgOICuX/peAAAI8oq6AUIoBIj2dRg1WItGMzWlqQw2W0gy4wRc2mRcPyso+NFRRU1NTU2FNmCiGwBAqttgFRU1NTU1+zh+m8YAw2evIudafG2MaKampjQtG38uAIA7d/F46SnPoIVmajQaTYtFwkrneA9jOgAwZvnU3zfjwBpAs7JztLOiaRi5xfEAgBMTOF5uuXdacefOnYsXL5Kyl5cXKVCbGj/77DNVVdVuGkrzhg4d+s0335DyDz/8kJWVpdjxyJHMoFCqUu5nZTBqRAgh9CZgGi8ovh5ff310fYJidnjyva22wwFAkJVEAriYOHeSF5E5fhnXnQUA3DM3ypis/ZdinNmNPsyydE5IL9xo9SqNomrvZu5TZqq2ZQwAAAyTmMKrfqSj+jTKLL+9V8tDbRsHe5z45HjJc9N+8em7F9SvejNNlhSnJziTrD2p3DhuKgAAix3O5cUsaXS+p0tRE416enqzZ88GgNraWiqOVPjyNMXX13fQoEEAUFtbS5bR30gkNJRcsCYr1PKddMR9jQghhN4QOuYLjmQuiCorEYgA6AxNzVdnWBgmLkLhYgCGxB5Fum1opjBIAHQGHUDTask+qyW7BXw+n3yaqcmUmrVj2O/LFO/r+Bjqe9W12Hom059fRvph6mg26oYx/ohQGAN0Oh0WiL+MKCkTAZ2pqclo3IrOePt9Kfbh9Y3IHM5qtNEAACAASURBVG3XKi4uJvsFAcDNzY3k9vvjjz9KS0tJpaJy7jSlrq4eFBS0ZMkSAPj1118TExNtbW0VPSi5IWEidZJa8mC1ZFleMGpECCH0RmFqyj5DTKfLiKvoDEbjl0xNhhxOIDc3BkpL/dCpyJauqdNSFh05DbYjdu/eXV1dDQAaGhrLli0jldTy9Lhx43rU7c+Ojo4REREZGRkA4OPjY2Njo6LSnaeG3hy4Qo0QQgihdhAIBHv27CHlFStWaGjUHybvOTl3pCgpKVEZgv7+++/du3crdjxyJLUALZUAHM9QI4QQQkiRDh8+XFJSAgDKysqrV68mlf/++2/9Rs2eFzUCwOTJk+fMmUPKAQEB1Er6G0PmSWq594Ir1AghhBBqK7FYHBYWRsr29vZ6evXpyqnlaW1t7Q8++EAxg2tRSEhIUlJSTU1NWVnZ5s2bIyIiFDuezMzM1NTUkSNHtv5otxOLxTLv9cGoESGEEEJtlZycfO/ePVL28PCg6qnlaRsbGyWlnriSaWho6O7uHhISAgB79uz5+uuv3333XQWOZ+TIkcXFxevXr1fgGJrj5+cnM5zFqBEhhBBCbUUl3Pnoo4/Mzc1Jubq6+tKlS6TcA5enKevWrYuJiSkpKRGJRJ6entT8qEKoqakNGDDg008/VeAYmrNjxw4GQ8bpsZ742wBCCCGEeiAej0dFh1RmbwC4fPlyZWUlACgrK0+bNk0xg2uDvn37BgYGkvL58+d/+eUXxY7ntYNzjQjJws85GXOQe+Y679kzADBgWX3p5PKllTH+g0FIjsRi8cuXL52cnBQ9ECStoKBARK5JbGznzp2kYGhoyGazqXpqeXrSpElaWlrdMMIOW758eWRkJLk+29PT89atWyTZJGoL/BJEqImyNDutyVyJCh6Px40LOxF+/cwac4WNCqE3zosXL4RC4aFDhxQ9ECRNLBZrakrfcPj48eNjx46Rsru7u+TmxR6bc6cpZWXlXbt2kQnR27dv79+/f+XKlYoe1GsDV6gRkiJK2b6KCwDAjrl6r7yqvDj7qp8lAADXLSiDr9ixIfRG6devX9++fetQz5OYmKimpib13ysyMrKmpob8h5OcIf7777+zs7NJecaMGd32v58O++yzz2bOnEnK/v7+5eXlih3PawSjRoSkVD28ywMAdrj/EgtjJoOpM9zCf0ewokeFEEKK9PLly71795Kys7Nznz59qLeoicYhQ4awWM1c1d3DbN++nVzBU1JSQu10fH2pq6t3T0cYNSIkG/fOfUFDuaqyviBU0GAQQkixYmNjSWZsOp1OZfYmqJPIPX95mmJsbOzq6krK33///T///KPY8XSYuro6FTJKlrsIRo0ISWF++qUzAED0whmBiWUA/Psn50z2BQBwX2qmoCtfEUJIgcRicXh4OCnPnTt3yJAh1FsVFRVpaWmk/FosT1M2bdpE9m5WV1d7e3srejgdR06vdw+MGhGSNnzBt3s5AACp/mwtGk1jlEMqAIsTnrfdFo+PIYTeQklJSQ8ePCBlyczeAHDx4kWy2bFXr15Tp05VwOA6SktLa9OmTaR8+vTpq1evKnY8HdZty9OAUSNCsui4HMlmN66yd7bXw5gRIfRWojJ7f/LJJ2ZmZpJvUZsap0yZ0p3hi1y4urpSN6B4eHiIxWLFjqcDKhtQNV36XwGjRoSaKMvwoBmRzDtsTn306D95sF1Emoz0ZQgh9EbLyMi4cuUKKXt6ekq9m5ycTAqv0aZGioqKCpWBMiMjIzY2VrHj6aSmEaTcYdSIkJSiwClmYQAA7OTs8jNHzghLeQFsAACu2+RdaSWKHRxCCHUzaqJxxIgRVMIaIiMjo6ioiJRtbGy6e2TyMHPmTGphfcOGDd25R/B1hEtuCDXCz0ry5wEAxPCOWA9nAgBd02TjyXulKqPCAH7/XwFY6Ch4iAgh1F0KCwtPnDhByh4eHpKZvUFiefrdd981MjLq7sHJya5du8aNG1dXV1dUVLRt27buTMSTnZ0dEhLSbd213aNHj2TWY9SIUGP1yXXYrBES56XpA0m6W2YvFQUMCSGEFCQiIkIoFAKAlpbW4sWLpd59HXPuNMVisZYvXx4dHQ0AO3fudHZ2Hjp0aDf0y2Qyy8vL9+/f3w19dYBkSk4KRo0INaLSlwSL3G+2Jx73s9WhAwDkpBwJAgAAvBoGIfT2qKioILEUwP/bu/+wKKu8f+BvatBBG3RIaMWK8SE37FqGFStdN/YJzBUedx3W8HHT0Sf6AXzNFNQiMKlwk/VHAq7LA3Rt2ApmorsO7i7YldBqmuYD5q27UsHFWA0pKKMzCWOMzvePexhmmEEgBwbh/fqjzpy573M+Q13X/bnOuc85SEpKGjVqlP23Fy9e/PTTT8Xy7bXnjrN169bt2rXLYDC0tbW98sorJSUlA9Cp0Wj09vYWzzYcbP72t7999913zvXMGokcSIN/XaRCvAZVGaqADKjUaoNQXCWIXyas+59Qz4ZHRDRgioqKLl++DGDEiBHLli3r8m1FRcWNGzcAyGSyiIgID8TnPgEBAWvWrElNTQXw3nvvLV++fNq0aQPQ74MPPrhmzZoB6Kivzpw547Keq2GIupA9vUdXuiFB/KAptqaMquRcoeWPoVJPRkZEw5OpviIxNjZlY4Wp52vdxmKx5OTkiOXf/va348eP73KBbXp61qxZI0aMGMDQ+sWKFSsmTpwIwGKxdNmTclBRKBQKhaLLR/uafsWskciJJDDu5QKLpa2lpampqampqaWt3bIve3monGPzRIOc/vDOrYtjo8LCwsLCwmIT08tqGnt/c2NNxcb0xNioqLCwsLCo2JSNe865TtNMx3eui42Nik3f4+qtld7EoD+8pyBxcWyY2FPs4o17am6SEbZfOVeo0eRUfNFm30T94Y0pi63dRMWmF5Q1ujWpNJlMtnP2nDfcuX79+oEDB8Ty7T49LRo5cqRtYconn3zy3nvveTaemxPTRIVCodVqtVot7NLHfs0j+RQk6o5ULufQItFtpHlrbMAKTednQRA0hVlpGu36uUE93ly7M3HyokK7CkGo0uSkJte1ZwfbPSqN9ZUZ82bmiG+tGBY47eHaixj0NSl+U3MceoJGI/zMcCqiuzNLrcvwRtoCaT6+NWD6ii7RZuVl1lWvDXbTg922B83MmTPDwsK6fPvJJ5/o9XoAXl5et+meO87i4uIee+yxjz/+GMArr7zym9/8RiodjE8BrVbrnBeKlf3dNccaiYhoKDi9faWYriXklmubmuqOlYp79GepVhzveSGb6Ux5IYCEDSWCVqfTVm9QKwEAOdv+UW+7qGZ7iu8DHSkjAN+uQy+9iEFfEG9NGTNLDmmbWlqatNUHS3Nz14b49Pqnmk6vFFNGZUK5oG3S1ZWKm8oKGavyanrdys188cUX4jmBcDXQCLvp6SlTpgQGBrql08EgOzvby8sLwFdffWXbAHxQEUcWbf+0pY/OKWN/JJHMGomIaAho3BdfDECZVl6wPDrI3z94WtwuoQgAoPngRI/z1NLZvxPO6toKXl4YGhQYGBT+cl6hmPCd0l7puEb/cXwOAHVuuXAoFwAMfY7BdFqTpAGADYea1i6MCPKXy/2DwqPili+P8++SgZqM+uZmvd4IwMdxy6/Go+8VA4BSU14QHRrkHxgct3ZHkRoANH/6sA9T8t3bt2+fWAgJCXE5lGjLGofG9LTNww8/vGTJErH8+9///vz5856Np0daR11q3N4ds0YiIrrtmeuPZgAAUpY8bquUhv5ygxIAqs+ch6km1svLy8srcftp202V66K8vLy8ogqMgCwoNCTQbjpSNu4eAIDvyM6qXx4sP1Zn2LE8etLYzto+xACcKP8zAKhLlt/svAD9nnWLvXx8/QIC/Px8vWLX7as5Z/et6f/e/zsAqFN/2TnGJ5sTnwkAwtGvb3mHsLa2ttraWrGckpIijr3Z++abb06ftv4Zb+udGl168803xT2Gvvvuu/T0dE+H0y2XSWGXSre/48iskYiIbnttVy4BANTKCfYvovn9x3QA0JwQjNKH1AkAUBivrmg0AzCeLp6ZUQUgLTna+X3Cmu0bxZccY2ZM6qiThERFTwuWwXYaQF9jgP7fFVUAklXTpDA1NzaeO3eusVHv+HKkuSzl8fkZxZ0Vmoz5i7IAwFf83P7tBQGA6jGHTR1kQcHi1Z9pbzVt9PHxKSgoGDt2bFRUlG3gzZ5toHHcuHGPPvroLXY32EyYMEHcggfAu+++e/LkSc/GcyvcPujIrJGIiG5/1jlco2M+J/3Jz9QAoDOYIY37/SHx7b+Y1N1mNP+vOh4AVEVpHetUmmvKCgq2by/YmhgbNjW+EIBqQ/n/hHa3RKXvMZgaPq4CgJz5D0R5+QRMmKBQKCZM8PP2itp+3DqxbD73D1WOAECZXNrUbrG06UrTIq0tdUyIW8c5rzn0I50QqrZWOy3R6Ttvb+9Ro0YdPHjQ5XIQW9YYHR3d5YzBoWH16tX33nsvgBs3brh8rXMQ6vJ2Yz8tox6C/7GJiGi4mjjG8RXA5q8FAMowhQyAPOKt0mQAKF7k7RWQKgBQluepbVnh1x+uTUqKj09aUaixLniZqlT0fQ3tTWPoUAUoI1WqSHHNTVX89JiyRjOAsxWlAAB1cVacvwSQBsat/6BcXOzi6J5xDie1oK1ZAABl4N29X1bzQ1y7dq2yslIsD73padGoUaOyssQTwfDRRx/99a9/9Ww8N2GfHYob8dg+9seqamaNREQ0ZDS0Og70jQ1QAhBOfSvO2gbHpefbJWDqouLowM5FKBOfLNRoyss1pfkb0sRsLiNmctTGw+6NAYAqs1RraD9VuW9f5amm6iIlAAjv7Bc674l8zC5dlTw+P9a5mwsXWx0++4ydCABC46U254vd6KOPPhI35bnzzjtnz57dr3150KJFi2yT7y+99JJtRfkwx/0aaVjw8vIqKir64IMPBqa7K1euOL8/TkT9TwaHcT5zc5MOACbIOp52/r9+JiFJI76yqFw0x+GMUHnwtLniy4Fz4xKXL0mfNjlLQFXqWzX/LyK819PUN4uh3XqWfcz8XwV1ROQf/tsUVXy8BrjWeU9k9EP2Hba3231nM9JxSLOlSZzB9vXu3ye7bXp6xowZcrm8X/vyIC8vr+zs7J///OcA6uvrt27dunr1ak8H5YLz8hf7stuXUTNrpGFhzZo1s2bNGsge77///oHsjogAAMI3F8zhQbZHm/FMVRUA1SMh1iTMXL9trW0rbyF1fdkT2XNdPwilIWlFuVlTVwCGyyYzZL1/XHYfg+zuqUpoBJzTXkCIbePxjpFJu2XZVUf/bUTnnt/e3g5LtsUUsqFWZ0aorRv910IVAKiUij5kuD/AP/7xD7EwxPbccTZjxowFCxa8//77AH73u989/fTT48aNc2P733//fWNj4969e93Smv3uknv37hU//uDGm5ub29tdLPpi1kjDgnjmlqejIKL+IlP8VAVoIPz9SN3coBBrbePhP1UBwNQp94kVx/NWZQkAIpOTfXNyNEKOKu+3LcunuR4w09X+CwDg6y3p5lnp29cYZMFKQEBWyYnMaFte2fJFAwAY7Fe3aGqbOjNV0wd//rN9P49EqaDRCDl/rdsUHWK9xnx8VwkAKGfc159J4+eff15fb932fKi+1Ghvw4YNGo3GZDJduXIlIyMjLy/PjY2fOXPmzJkzq1atcmObbnTmzBnnSr7XSEREtz/ZQ7EqAChctHRPrR6A2Vi7Ua0SAEA95xF/AOb6PdNXaACoS97Ozs5LAwCsmL6p3gyYahZ7eSVu3FPbqAcAmBtrdi61HjB4j39nHmY2mUwmM0yt1wDAcM1oFmvMvYtBOi0mAQCK52+pEHMvfcW6FVkCACx47McAJv3nTABAzqotFUYApsadKf+lyqqy/62TIsVttwuXZpTpzQDM9RVbYnIEAKqlMTfZB/LW2aan77vvvtDQ0JtfPAQEBQWlpKSI5cLCwn/9619ubDw8PPxXv/rV1UEpIiJiypQpLoK2EBERecLBgwfHjBnjrtYMZ0s6n21Kpa2YpqmzWCwWi26DuIONMlMnXl+dL16gyj1mMVRHurwZyDyo6+ihrUTdzfNfVWToVQwWS5ugtrug84pkTZv1ipZ8FwumxV7yDR2/1T4Su2jT6trd88csKysLDAx0ro+KihJ7SkxMdE9Pg57BYLjnHnHHd8yePduNLa9Zs2YwZ42bN292jpljjURENBTIQhY2HSuxHh8tiOuRVbnlZ9fPDQZgOr0/tQoAioqTxRNVZOHPapKVADT7jutlyrcPFiWoHG5WRiaUVuvWRnUewDJilH0+ad/3iN7EAADS0CLdoTSxI0G8QpmWf8iQPbdjzbQ8cVddrroziVUm5B86mO/YsWxhka4kzZo5WrtJzj1rWB/cn++dGY3Gw4etK8qHw/S0SCaTvfnmm2L5wIEDttc6hycvi8Xi6RiIiGg4qqysnDdv3uXLl93brFHfbDIDEqlc7rCGxWw2AdIu7yiaTSZIOivNJqPRKN4tk8v6vldjTzHYmIx6sR+Zv9xlN+IFEolMLpd2FzxMxmajCYBUJpdJ3Zkw7t+/PykpSafT2Vf+5S9/efLJJwGMHDmypaVFPHZvOLhx40Z4ePipU6cATJ48WRAESXevuvbFq6++eurUKXG1zWATHR2tUqmc37nkahgiIhpSZHJ/lwtCJBIX6ZnE8ewTiVQml7phOUl3Mdj02E+XC1wGD6nM3x3R9pLtpcbHH398+KSMAO64447s7Gxxdv7s2bP5+fnLli3zdFCewRlqIiIi6oHFYikvLxfLw2d62iYyMlKlsr5w+vrrr7t9gPx2wayRiIiIenDy5Mlvv/1WLA/5nRpd2rRpk7e3N4BLly5lZmZ6OhwHo0ePHpiOmDUSERFRD2zT0w8++GBwcPDNLx6SJk2a9OKLL4rlbdu2ffnll56NRzR69Ghbymhf7ifMGomIiKgHtrXDw3B62mbt2rV33303gPb29sFzwODVq1cHrC9mjURERHQzFy9e/PTTT8XycM4ax44d+8Ybb4jlsrKyyspKz8YjGrDpaTBrJCIiopsrLy+/ceMGAJlMFhER4elwPCkxMXHy5MlieeXKleKfxYNs+3Lbavo1ieTOO0RE5BltbW2tra3z58/3dCDU1bfffmsymWwfbS81zpo1S1wRMmxJJJK33npLXA906tSpd95557nnnvN0UFYDMFXNrJGIiDxDr9ebzeb6+npPB0JdXb58ub29XSxfv379gw8+EMvDeXraJiYmZvbs2QcOHADw6quvLliwQCYbuF0zPYtZIxEReUZgYKCvr29NTY2nA6GuxLNhxPLRo0f1ej0ALy+vmJgYj8Y1WGzZskWpVF6/fv3ChQvr16/Pysr6AY1cu3btn//856OPPur28G5dQ0NDdHS0cz2zRiIiIuqWbXp6ypQp48eP92wwg8RDDz2UmJiYl5cHICcnJzExUaFQ9LURk8l09erVhoaGfgjwVplMpmvXrjnXM2skIiKibnHPHZfeeOONnTt3Xr582WQypaam/oDjpMeMGfPEE08UFhb2R3i36KmnnvL19XWu5xpqIiIicu2rr746ffq0WGbWaG/cuHEZGRlieffu3UeOHPFsPAODWSMRERG5Zhto9Pf3f+SRRzwbzGDzwgsvPPDAA2I5JSXFYrEMQKcKhcJ+NlzRYQC6BrNGIiIi6o7tpcaYmJg77mDO4GDEiBGbN28WyydOnNixY8eAdS2miQqFQqvVarVa2KWP/ZpH8v8AIiIicsFkMtmOPxF3KKQuVCpVZGSkWE5PT29tbR2ATm1ponNlf2PWSERERC589NFHYhokkUhmz57t6XAGqS1btoijsDqdbuPGjf3dnZgd2v5pG1Z0Hlzsj+FGZo1ERETkgm16esaMGWPHjvVsMIPWT3/602eeeUYsb9q0SafTDWTvWkddatzeHbNGIiIicsGWNXJ6+ubWrVt31113AWhtbU1LSxuYTl0mhV0q3f6OI7NGIiIi6qq9vd22ATX33Lm5H/3oR+np6WK5uLj4xIkTAx9Dl3lqMVl0+6Ajs0YiIiLqynY0yP333/+Tn/zEs8EMfikpKUFBQQAsFktKSsqA9Ws/lCimibaP9mV3YdZIREREXZlMJrHA6enekEqlGzZsEMtHjhzZvXu3Z+PpJ8waiYho6NDXH96YsjgqLCwsLCwsKja9oKzR1OubjY0V2zcmLo4Vb49dnLLz8DmXF5rOHV6XuDgqKnZPrdE5hMM7t3a0ERabmF5W0+gyzq3pKeJVUVGxi1M2Hu9DoICxfs/W9MVRHb0sTtlZWWvuw/09aG1t/f7778Uyp6d7acGCBTNmzBDLqampLs9xdrsuE9BdNgB3+4IYnkNNRERDRPPxrQHTV9hVCEKVJisvs656bXCPjzvT6URfpf2RwIIgaIpzTpTWZccF21UbKwsyZibliB9iW7ukas1bYwNWaBwbKcxK02jXzw2yVdZsT5kan2PfFao0xX7/aVk7rcffCAD6w7F+v7DrxBrq7txj+5b3roWefPbZZ2JBKpVGRUW5pc3hIDs7e/r06RaLRavVZmdnv/LKKwPWdZcEsb+2b7QQERF5wsGDB8eMGeO25toEtfhgUyaUC9omXV1ppkqsUOVW93h3e10JAEQmlBwUdE26as2Gjudkcl17x0Ut1cmOz9Dc6hb7RoQiawgJueXapqa6Y6XWCKA6ZrBeY6jO76jLPHRW29LSoq2rLs3PLa1u6t3vbD+YphTvLzp01tBmaKo7lGbdZ1pVbej5/t6YOXOm2GJMTIx7Whw21Grr/wMymez8+fM3uXLevHmjRo0aOyj5+Pg8//zzzjFzrJGIiIaCxqPvFQOAUlNeEB0IAHFrdxR94RtfDM2fPmxcHh5409slwTFC9dlJ4SFSAEDg3Jerc49OXaEBGi62IVgGAMYvPswBgIRyYdm5tcokTZc2GvfFFwNQppUXLI8GAP+4XUKRjzIe0HxwonFaVCBg3JudBACRuU37lvsDAORyeVBwuHNIZpNRbzRBIpHL5XZP67Yv/y0AUOVmPB0RAkAWHJGxeUPW1NQ+/bluwmKxVFdXi2W+1NhXWVlZe/fubWtrMxqNr7766ttvv93dlYGBgcHBwYmJiQMZXi9lZ2ePHz/euZ5ZIxERDQGm/3v/7wCgTv1lZ3oomxOfieIMCEe/NuLSzlhlkgZIqDYUhMsAAI2VURNmVgG51S3Lw+Wh4XL7FscE3iM24t1R4/PjueXl//nz6GkymLY7RWCuP5oBAEhZ8ritUhr6yw1KpAqoPnMeUYHQf/bnYgAoykvw7/7HmJtrtqxcnVpc1VGhzCwtSosLt39ma/5Va0K4mOO2XbVWtnffZu+dP3/+rrvuunz5MvhSY9/de++9L730UmZmJoB33nnnxRdfVCqVLq8cM2ZMUFBQfHz8wAbYK++//76vr69zPVfDEBHREND+7QUBgOqxUKldrSxIfCVR85nWOOkJceqwMD6rAgBg3J46swoA0lRKh3wRAPQ1G+cXAoBq5iSZtU4iD4mOniYDXKZnbVcuAQDUygn2Ifj9x3QA0JwQjICx4WQVACRHPCA165sbz507d65Rb3R8OVJ/PC5gqpgyRqrVkQAgZMyf+sLOWvE3PfHfCQBQuOi/1pXpAWPtnid/kQoAyc9MleHWjR8/Pi8vLyAg4N133504caIbWhxmXn755cDAQAA3btwYyF14BgCzRiIiGgpGiv+65pDPSSeEqq3VZmlw3LFcFQAhK2Z7vVl/OCe+GADyhbQgcRDP3Fi2vWD79u1b1yWG+U0Vc8byt57sbSZmHZM0OmaU0p/8TA0AOoMZ0Fr3f855IC7K2y9ggkKhUEzw8/WOStnZaE0dzRWbEjQAoDqobavcsaPS0lKaHAmgcFFBvRkAghf+Ll8NAFUZKj8vL9/J86sApTpXu2muGycQJRLJkiVL3NfeMDJ69OisrCyxXFlZWVZW5tl43IhZIxERDR33jBvl8LmtWQAAZeDdPgCmLX1LXM4S/4C33y8yACjTyhNDO9LCtq/XxifFx8evyCgUrFVTFX4+fQxh4hhvh8/NXwsAlGEKh+xTUwUoVWqVOHlZlbNowksVZgAmIT9LAJBW/nZUkDhmKY/L2qwCgJzDdeLuPP6JO+pUcBCXEBfEl84GjcWLF0+dOlUsr169ur3dLe8OeB6zRiIiGjouXGx1+OwzdiIACI2X2gBAEpxxLNfu64SitOjOT7IfF5ZrysvLS0vy09RiOpcx2S/ucHOfQmhodcwQxgYoAQinvrXb2lFdWq1tt5zat2PfKUtTSYISAHLyBSMAbzG5zEpdmZ6eYpWWLR7tZ2htAwB9TYrXA+JSHJXamj1m/GJC7NbDbtyykW6Fl5dXdna2WP7yyy+3bdvm2XjchVkjERENISMdB/pamgwAAF9v60CcfJoqwfZt2oJwhwFA+bToudHR0XELE9fvOHVWkwYA0Lz1Xk1fIpDBIQRzc5MOACbIJED7NSMAqGb+Ktw2Mug/b9lS29Um7elisSQUZ2XldCgWrD9OAjSue3xqDgCoyusM+3bsa28RxC2GNCt+saWPGS71n4iIiLi4OLG8bt26S5cu2X/7+eefu3EAcvTo0e5q6uaYNRIR0VAgHsTRUKuzH2/Tfy1UAYBK2TE/fK6soHMr76yUsnPdDs+FzE3LjwQAw5XLfQlE+OaCfZvGM1VVAFSPhMiAH/14KgA0fH3BxY0yAFJFiPgiZn61rs3Q4qgtMVRmPL0/QwCAImFHdLAMgEQeunbPWXHm/ejJr/sSKvWvjRs3jhw5EoBer3/99ddt9cuWLQsJCcnPz+/+1t4aPXq0LWW0L/cTZo1ERDQEyB6JUgEQcv5a15mzmY/vKgEA5Yz7xKRRf3yFKgtAZEKyCgAEVfyf9N01adbViFvfjLzJC/bWNAAABy5JREFUk7jzXUKZ4qdim38/Utf5fePhP1UBwNQp9wHwU9wHAELGkfrOKFt04rmFxnYA7RAnsmu/aZfK5I6kgG31tko5yW6YVHKP+PalrMtQK3nUxIkTk5OtG8Pn5+efPXtWLJeWlgIwGAwWi+XWe7l69WrPF7kJs0YiIhoKJkXGAAAKl2aU6c0AzPUVW2JyBACqpTH+AGDa88p0DQAk5BVkv12eBgBVSZll5wCcLljsFZa453Ct3gQAZmPjzoyl4qjkpHvH2Hoxm0wmk8lsNhoMAGC6ahSrzABkD8WqAKBw0dI9tXoAZmPtRrVKAAD1nEf8AUgfmC7Ojy9ataXeCAD62rKlMVkAoIr9sQyQTfpvNQDkqFaU1XZMN5uN9cfLtu88bgK8x4jJomb1prLmjsyzvnKHuGTX+VRs8qz09PSAgAAAZrN59erVYqXtjG+3GLDpaYAnChIRkYe4+URBi6FE3fl0s9tYOU08ErDpkPWQwMyDOovFYrG05HecOHisxVKdG9l5r/22zMoNuo4O2s4WdfcwLRIMFovFcLbE7sbORtI0dbYobacOOl6iLK1rs/ZSV2rXSKQq0hbYhhaLxWIxFNktn1ap1ZGdjSQIbe75U5aVlQUGBrqnrWGvoKDA9l/owIEDFotFJrOOE8+ZM+eq+9h6ufWmIiIiNm/e7PxbONZIRERDg2xhka4kzZqTictHVMm5Zw3rgyUAjH9/KxUA1EXJUeLpMfJnczVKANAcb9ArF79dlCkuZoYgiHcrEzaU6qpf7jxrxntEt517A4AsZGHTsRJ1RytiCLnlZ9fPDbZdGPr028eK0mB3iVKVdlD7cVywdW9waXCcoe5gsrgnj1ClqaoCoIxU52piZAAge3qPrnSDdUmPpri4SrD+UqHljw5bnNPg8Oyzz4aGhorllStXXr9+/fr16/3RkXP66HZeFnfMqRMREfVVZWXlvHnzxJPr3MlkbDaaAEhlcpnUfg9Ds8kEqdRxV0Oz2QRIJbZKs9FoNJnMkEhk1hcJfwijvtlkBiRSuVzmehdFs0mvN5oBiVQml7nux2zU68VWZDKZ1LkZk15vNJsBSGRyuYvvb8H+/fuTkpJ0Op07Gx1O9Hr9a6+95uPjs2rVqoCAgA8//HDWrFniV3l5ecnJyeIk9Zw5c3bv3u3RSF2Ljo5WqVSrVq3qUs8tQYmIaGiRyvylLs9zkUid0zOJxLFOIpPJZbd8Lp9M7t9DGxKp3L+HpFTSQyvSW0hrqX9lZmb+4Q9/AFBQUPDaa68tW7bs17/+9f79+wFkZGSYzdY3UnU63a5duzwZaDeamppc1jNrJCIiInKnsWPHioUrV66sXLmysLBw1apVFRUV7e3tFy9etF3W0NDw0ksveSjGHowaNcq5klkjERERkTutWbPm+vXrmzZtMplMAGpra59//vmJEyc2NDTYXzZy5MiHH37YQzHezLFjx1pbW53rmTUSERERuZNEIsnMzHzmmWdWr169d+9esbJLygggLCwsLy9vwKPr2VNPPeWynmuoiYiIiNxPoVDs2bOnsrLStob6dseskYiIiKi/REZGnjx5ctu2bX5+frfemkKhUCgUXT7a1/QrZo1ERERE/ejOO+984YUXvvzyy6VLl95xhxtSLzFNVCgUWq1Wq9XCLn3s1zyS7zUSEZHHtLa2LlmyxNNRUFfffPONp0MYgvz8/P74xz8mJSUlJSUdPXr0B7ej1Wqd80Kx8pZj7AGzRiIi8oywsLDly5fb9q6jwcPPz2/evHmejmJoCg0NPXLkyN69e5999lkvL6++3i6OLIo5on2m6JwyihfcesD2mDUSEZFn3H333Zs3b/Z0FEQe8OSTT544ceLkyZO32E6XvLA/MkV7zBqJiIiIBppEcks5mMvs0DmJvMnFPwCzRiIiIqLbm23C2rY4Bu5LFm24hpqIiIjotmS/LKbLm479sT6GWSMRERER9Ywz1ERERES3pe5eZATXUBMRERENDRaL5erVq3V1dW5p7cMPP7SV6+rqxI8/uPHW1laLxeJcz6yRiIiIaKBVV1cfOXLkiSee8HQgrj300EPOlcwaiYiIiAZaeHi42WzOy8vzdCAuLF68eMqUKc71zBqJiIiIBtodd9zh4+Nz7733ejoQF0aOHOnyvGyuoSYiIiKinjFrJCIiIqKeMWskIiIiop4xayQiIiKinjFrJCIiIrqNjR49emA64hpqIiIiotuSfb4olq9evdp/3XGskYiIiOh21a9pYhfMGomIiIhuVwM2PQ3OUBMRERHdppwHGkePHt1/o4/MGomIiIhuewMwVc0ZaiIiIiLqGccaiYiIiDzg5MmTzz33nKejcOHzzz93Wc+skYiIiGigqVSq7777ztNRuLZw4cLZs2c713tZLJaBj4aIiIiIbi98r5GIiIiIesaskYiIiIh6xqyRiIiIiHrGrJGIiIiIevb/ATP/dgtMEZZRAAAAAElFTkSuQmCC) At the implementation level, the array essentially contains a single pointer to one contiguous block of data. The Python list, on the other hand, contains a pointer to a block of pointers, each of which in turn points to a full Python object like the Python integer we saw earlier. Again, the advantage of the list is flexibility: because each list element is a full structure containing both data and type information, the list can be filled with data of any desired type. Fixed-type NumPy-style arrays lack this flexibility, but are much more efficient for storing and manipulating data. ``` ``` **Tuples** ``` rollno=(101,102,103,104) type(rollno) rollno[-2] rollno[2]=105 ```
github_jupyter
``` import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from DataGenerator import DataGenerator from Model import build_hf_model_only img_size = 64 dg = DataGenerator() dg.gen_one() t_img = tf.convert_to_tensor(dg.output_img, dtype = tf.float32) ds = tf.data.Dataset.from_tensor_slices(([t_img], [t_img])) for i in range(30000): dg.gen_one() t_img = tf.convert_to_tensor(dg.output_img, dtype = tf.float32) ds = ds.concatenate(tf.data.Dataset.from_tensor_slices(([t_img], [t_img]))) print(ds) BUFFER_SIZE = 100 BATCH_SIZE = 40 EPOCHS = 40 ds = ds.shuffle(buffer_size=BUFFER_SIZE, reshuffle_each_iteration=False) ds_valid = ds.take(BATCH_SIZE).batch(BATCH_SIZE) ds_train = ds.skip(BATCH_SIZE).batch(BATCH_SIZE) from Model import build_hf_model_only from Model import intensity_loss from tensorflow.keras.callbacks import LearningRateScheduler from tensorflow.keras import backend as K def lr_scheduler(epoch, lr): if (epoch % 10 == 0) & (epoch != 0): return lr/2 return lr model = build_hf_model_only(input_shape=(img_size, img_size, 1)) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss=intensity_loss, metrics='accuracy') callbacks = [LearningRateScheduler(lr_scheduler, verbose=1)] history = model.fit(ds_train, epochs=EPOCHS, batch_size=BATCH_SIZE, callbacks=callbacks, validation_data=ds_valid, shuffle=True) hist = history.history x_arr = np.arange(len(hist['loss'])) + 1 fig = plt.figure(figsize=(12, 4)) ax = fig.add_subplot(1, 2, 1) ax.plot(x_arr, hist['loss'], '-o', label='Train loss') ax.plot(x_arr, hist['val_loss'], '--<', label='Validation loss') ax.legend(fontsize=15) ax = fig.add_subplot(1, 2, 2) ax.plot(x_arr, hist['accuracy'], '-o', label='Train acc.') ax.plot(x_arr, hist['val_accuracy'], '--<', label='Validation acc.') ax.legend(fontsize=15) plt.show() from DataGenerator import DataGenerator import matplotlib.pyplot as plt dg = DataGenerator() dg.gen_one() img = dg.output_img img_ds = tf.data.Dataset.from_tensor_slices([[img]]) output_net = model.predict(img_ds) output_net = output_net.reshape(img_size, img_size, 1) fig = plt.figure(figsize=(10,10)) ax1 = fig.add_subplot(2, 2, 4, projection='3d') X, Y = np.meshgrid(range(0, dg.last_g.shape[0]), range(0, dg.last_g.shape[1])) ax1.plot_surface(X, Y, output_net[:, :, 0], cmap='jet') ax1.set_title('Predicted Surface') ax2 = fig.add_subplot(2, 2, 2) pred_img = dg.reprojector.intensity_reproject(img_array=img, h_map_array=output_net[:, :, 0]) ax2.imshow(pred_img, cmap='gray') ax2.set_title('Predicted Image') ax3 = fig.add_subplot(2, 2, 1) ax3.imshow(img, cmap='gray') ax3.set_title('True Image') ax4 = fig.add_subplot(2, 2, 3, projection='3d') ax4.plot_surface(X, Y, dg.last_g, cmap='jet') ax4.set_title('True Surface') ```
github_jupyter
## Making Recommendations Based on Correlation ``` # Import Dependencies import pandas as pd import numpy as np ``` These datasets are hosted on: https://archive.ics.uci.edu/ml/datasets/Restaurant+%26+consumer+data They were originally published by: Blanca Vargas-Govea, Juan Gabriel González-Serna, Rafael Ponce-Medellín. Effects of relevant contextual features in the performance of a restaurant recommender system. In RecSys’11: Workshop on Context Aware Recommender Systems (CARS-2011), Chicago, IL, USA, October 23, 2011. ``` # Importing the data set rating_df = pd.read_csv('./data/rating_final.csv') cuisine_df = pd.read_csv('./data/chefmozcuisine.csv') geodata = pd.read_csv('./data/geoplaces.csv', endoding='mbcs') rating_df.head() cuisine_df.head() geodata.head() # Extract just the placeID and place names from geodata dataframe places = geodata[['placeID', 'name']] places.head() ``` ### Grouping and Ranking Data ``` # Calculate the mean rating for each placeID rating = pd.DataFrame(rating_df.groupby('placeID')['rating'].mean()) rating.head() # Calculate the count of ratings and assign to the dataframe rating['rating_count'] = pd.DataFrame(rating.groupby('placeID')['rating'].count()) rating.head() # Print description of the data frame rating.describe() # Sort the data frame based on the count of rating rating.sort_values('rating_count', ascending=False).head() # Check the name of the business places[places['placeID']==135085] # Check the cuisine of the business cuisine[cuisine['placeID']==135085] ``` ### Preparing Data for Analysis ``` # Creating crosstab table shows rating for each user and place places_crosstab = pd.pivot_table(data=rating_df, values='rating', index='userID', columns='placeID') places_crosstab.head() # Check user rating at a specific place tortas_ratings = places_crosstab[135085] # Print all user ratings equal or above zero tortas_ratings[tortas_ratings >= 0] ``` ### Evaluating Similarity Based on Correlation ``` # Calculate the Pearson standard correlation to find similar place like Tortas similar_to_tortas = places_crosstab.corrwith(tortas_ratings) # Create a data frame to store the result corr_tortas = pd.DataFrame(similar_to_tortas, columns=['PearsonR']) corr_tortas.dropna(inplace=True) corr_tortas.head() # Create a summary dataframe tortas_corr_summary = corr_tortas.join(rating['rating_count']) tortas_corr_summary[tortas_corr_summary['rating_count'] >= 10].sort_values( 'PearsonR', ascending = False).head(10) # Check the top seven placeIDs that are similar to Tortas places_corr_tortas = pd.DataFrame([135085, 132754, 135045, 135062, 135028, 135042, 135046], index=np.arange(7), columns=['placeID']) summary = pd.merge(places_corr_tortas, cuisine, on='placeID') # Check the name of the similar restaurant place[place['placeID']==135046] cuisine['Rcuisine'].described() ```
github_jupyter
``` import numpy as np import os from time import time import matplotlib.pyplot as plt import pandas as pd start = time() # model parameters Sigma = [0.5, 0.6, 0.7] theta = [0.01, 0.02, 0.03] kappa = 6.21 rho = -0.5 T1 = 10 S0 = 2 # initial conditions # S0 V0 = [0.01, 0.05, 0.07] # option parameters int_rates = [0, 0.05] K = np.arange(1.6,2.4,0.2) time_maturity = np.arange(1.5,2.5,0.5) num_simulations = 500 n = 500 ``` Stock prices simulation: ``` def stock_price_generator (t_max, n ,m, S0, k, V0, sigma, theta, kappa, rho): dt = t_max / n # Brownian motions: dw_v = np.random.normal(size=(m, n)) * np.sqrt(dt) dw_i = np.random.normal(size=(m, n)) * np.sqrt(dt) dw_s = rho * dw_v + np.sqrt(1.0 - rho ** 2) * dw_i # Perform time evolution s = np.empty((m, n + 1)) # initialisation stock prices vector s[:, 0] = S0 v = np.ones(m) * V0 for t in range(n): dv = kappa * (theta - v) * dt + sigma * np.sqrt(v) * dw_v[:, t] ds = r * s[:, t] * dt + np.sqrt(v) * s[:, t] * dw_s[:, t] v = np.clip(v + dv, a_min=0.0, a_max=None) s[:, t + 1] = s[:, t] + ds return s def find_expected_payoff(stock_path, k, r, t_max): payoff = max(stock_path[-1] - k, 0) # one payoff for each simulation c = payoff * np.exp(-r * t_max) # in case r=0, this step is useless return c df = pd.DataFrame(columns=['price', 'stock', 'sigma', 'strike', 'maturity', 'initial_vol', 'theta','interest_rates']) # fill the dataset for v0 in V0: print(f"vo {v0}") for sigma in Sigma: print(f"sigma {sigma}") for theta in Theta: print(f"theta {theta}") for k in K: print(f"k {k}") for t in time_maturity: for r in int_rates: s = stock_price_generator (t, n, num_simulations, S0, k, v0, sigma, theta, kappa, rho) for stock_path in s: p = find_expected_payoff(stock_path, k, r, t) new_row = { 'price':p, 'stock':stock_path[T1], 'strike':k, 'maturity':t, 'sigma':sigma, 'theta':theta, 'initial_vol': v0, 'interest_rates':r } #append row to the dataframe df = df.append(new_row, ignore_index=True) df.head() zero_values = sum(x == 0 for x in df.price) non_zeros_values = sum(x != 0 for x in df.price) print(f"This database contains {len(df)} options. \n{zero_values} have zero value.") plt.hist(df.price, bins =100); len(df) df.to_csv('dataset3.csv') ```
github_jupyter
# Image Captioning with LSTM This is a partial implementation of "Show and Tell: A Neural Image Caption Generator" (http://arxiv.org/abs/1411.4555), borrowing heavily from Andrej Karpathy's NeuralTalk (https://github.com/karpathy/neuraltalk) This example consists of three parts: 1. COCO Preprocessing - prepare the dataset by precomputing image representations using GoogLeNet 2. COCO RNN Training - train a network to predict image captions 3. COCO Caption Generation - use the trained network to caption new images ### Output This notebook prepares the dataset by extracting a vector representation of each image using the GoogLeNet CNN pretrained on ImageNet. A link to download the final result is given in the next notebook. ### Prerequisites To run this notebook, you'll need to download the MSCOCO [training](http://msvocds.blob.core.windows.net/coco2014/train2014.zip) and [validation](http://msvocds.blob.core.windows.net/coco2014/val2014.zip) datasets, and unzip them into './coco/'. The [captions](http://cs.stanford.edu/people/karpathy/deepimagesent/caption_datasets.zip) should be downloaded as well and unzipped into './captions/' ``` import sklearn import numpy as np import lasagne import skimage.transform from lasagne.utils import floatX import theano import theano.tensor as T import matplotlib.pyplot as plt %matplotlib inline import json import pickle ``` Functions for building the GoogLeNet model with Lasagne are defined in googlenet.py: ``` import googlenet ``` We need to download parameter values for the pretrained network ``` !wget -N https://s3.amazonaws.com/lasagne/recipes/pretrained/imagenet/blvc_googlenet.pkl ``` Build the model and select layers we need - the features are taken from the final network layer, before the softmax nonlinearity. ``` cnn_layers = googlenet.build_model() cnn_input_var = cnn_layers['input'].input_var cnn_feature_layer = cnn_layers['loss3/classifier'] cnn_output_layer = cnn_layers['prob'] get_cnn_features = theano.function([cnn_input_var], lasagne.layers.get_output(cnn_feature_layer)) ``` Load the pretrained weights into the network ``` model_param_values = pickle.load(open('blvc_googlenet.pkl'))['param values'] lasagne.layers.set_all_param_values(cnn_output_layer, model_param_values) ``` The images need some preprocessing before they can be fed to the CNN ``` MEAN_VALUES = np.array([104, 117, 123]).reshape((3,1,1)) def prep_image(im): if len(im.shape) == 2: im = im[:, :, np.newaxis] im = np.repeat(im, 3, axis=2) # Resize so smallest dim = 224, preserving aspect ratio h, w, _ = im.shape if h < w: im = skimage.transform.resize(im, (224, w*224/h), preserve_range=True) else: im = skimage.transform.resize(im, (h*224/w, 224), preserve_range=True) # Central crop to 224x224 h, w, _ = im.shape im = im[h//2-112:h//2+112, w//2-112:w//2+112] rawim = np.copy(im).astype('uint8') # Shuffle axes to c01 im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1) # Convert to BGR im = im[::-1, :, :] im = im - MEAN_VALUES return rawim, floatX(im[np.newaxis]) ``` Let's verify that GoogLeNet and our preprocessing are functioning properly ``` im = plt.imread('./coco/val2014/COCO_val2014_000000391895.jpg') plt.imshow(im) rawim, cnn_im = prep_image(im) plt.imshow(rawim) p = get_cnn_features(cnn_im) CLASSES = pickle.load(open('blvc_googlenet.pkl'))['synset words'] print(CLASSES[p.argmax()]) ``` Load the caption data ``` dataset = json.load(open('./captions/dataset_coco.json'))['images'] ``` Iterate over the dataset and add a field 'cnn features' to each item. This will take quite a while. ``` def chunks(l, n): for i in xrange(0, len(l), n): yield l[i:i + n] for chunk in chunks(dataset, 256): cnn_input = floatX(np.zeros((len(chunk), 3, 224, 224))) for i, image in enumerate(chunk): fn = './coco/{}/{}'.format(image['filepath'], image['filename']) try: im = plt.imread(fn) _, cnn_input[i] = prep_image(im) except IOError: continue features = get_cnn_features(cnn_input) for i, image in enumerate(chunk): image['cnn features'] = features[i] ``` Save the final product ``` pickle.dump(dataset, open('coco_with_cnn_features.pkl','w'), protocol=pickle.HIGHEST_PROTOCOL) ```
github_jupyter
# Project convex optimization: oligomerization of proteins *Selected Topics in Mathematical Optimization: 2017-2018* **Michiel Stock** ([email](michiel.stock@ugent.be)) ![](Figures/logo.png) YOUR NAME(S) HERE ``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline ``` In this project we will study the association of protein complexes through the principle of **entropy maximization**. Consider the following three proteins: A, B and C: ![Three monomeric proteins.](Figures/monomers.jpg) Each of these proteins has sites which it can use to reversible bind to other proteins. Two types of interactions can be distinguished: - **weak type I interactions**: protein A has one type I donor site, protein C has one type I acceptor site and protein B has both an acceptor and a donor site. In the figures, these sites are indicated as a triangle sticking in (acceptor) or out (donor) the protein. - **strong type II interaction**: protein A has one type II acceptor site and protein C has one type II donor site. These sites are depicted as two bumps sticking out or pressing in the protein. These sites allow for dimerisation of the proteins. All possible homo- and hetero-dimers are shown below. ![The proteins can form dimers according to their sites.](Figures/dimers.jpg) But tri-and tetramers can also be formed: ![One trimer and three tetramers.](Figures/oligomers.jpg) Let us use one (monomer), two (dimer), three (trimer) and four (tetramer) character long strings to denote all possible conformations. Here we use the convention that the string $P_1P_2P_3P_4$ represents the tetramer $$ \left (\begin{array}{cc} P_1 & P_2 \\ P_4 & P_3\end{array} \right ) \,. $$ Note that in our notation, we have $$ P_1P_2P_3P_4 = P_4P_1P_2P_3 = P_3P_4P_1P_2 = P_2P_3P_4P_1\,, $$ but $$ P_1P_2P_3 \neq P_3P_1P_2 \neq P_2P_3P_1\,. $$ The following code identifies all unique mono-and oligomers. ``` monomers = set(['A', 'B', 'C']) dimers = set(['AC', 'CB', 'CA', 'BA', 'BB']) trimers = set([dm1 + dm2[1] for dm1 in dimers for dm2 in dimers if dm1[1:] == dm2[:-1]]) tetramers = set([dm1 + dm2[2] for dm1 in trimers for dm2 in trimers if dm1[1:] == dm2[:-1] if dm2[-1] + dm1[0] in dimers]) # some tetramers are counted multiple times, because our encoding is not unique # let us remove duplicates lowest_lexo = lambda polymer : sorted([polymer[i:] + polymer[:i] for i in range(len(polymer))])[0] tetramers = set(map(lowest_lexo, tetramers)) compounds = monomers | dimers | trimers | tetramers compounds = list(compounds) compounds.sort() monomers =list(monomers) monomers.sort() print('There are {} unique compounds'.format(len(compounds))) monomers compounds ``` Oligiomerzation is a process that releases energy. The change of heat in a system is quantified by the enthalpy. Formation of a type I bonds results in a change of enthalphy of $\Delta H$ of -1 Joule / mol (heat is released when two monomers bind), while type II bonds have a change of enthalpy of $\Delta H$ of -2 Joule / mol. The following piece of code returns the energy the formation enthalpy of a compound. ``` binding_enthalpy = {'AC' : -2, 'BA' : -1, 'BB' : -1, 'CA' : -1, 'CB' : -1} def get_enthalpy(compound): if len(compound) == 1: return 0 # no bonds else: enthalpy = 0 for i in range(len(compound) - 1): dimer = compound[i:i+2] enthalpy += binding_enthalpy[dimer] if len(compound)==4: enthalpy += binding_enthalpy[compound[-1] + compound[0]] return enthalpy enthalpies_dict = {compound : get_enthalpy(compound) for compound in compounds} print('Formation enthalpies:') for compound, enthalpy in enthalpies_dict.items(): print('{} => {}'.format(compound, enthalpy)) enthalpies = [enthalpies_dict[comp] for comp in compounds] # as list, same order as compounds enthalpies = np.array(enthalpies).reshape((-1, 1)) # as Numpy array ``` Since all the association and dissociation reactions are assumed to be reversible, a mixture of the three monomers should give rise to all possible mono- and oligomers (though not necessarily in equal quantities). We will assume that the system will go to an equilibrium where the Gibbs free energy is lowest. The Gibbs free energy depends both on the entropy as well as the enthalpy of the system. Let us denote $x$ as the vector containing the concentrations of the 19 possible species. The entropy of the system is given by $$ S(\mathbf{x}) = - \sum_{i=1}^{19}x_i \log x_i\,. $$ The enthalpy of the system is given by $$ H(\mathbf{x}) = \sum_{i=1}^{19}x_i h_i\,, $$ with $h_i$ the formation enthalpy of compound $i$. The Gibbs free energy is then given by $$ G(\mathbf{x}) = H(\mathbf{x}) - T S(\mathbf{x}) $$ with $T\geq0$ the temperature of the system (in Kelvin). Systems with a constant temperture go to a state with a minimal Gibbs free energy. Note that: - By low temperatures, enthalpy dominates. The enthalpy can be raised by the formation of oligomers with multiple low-energy bonds. - By high temperature, entropy dominates. High entropy can be obtained by having many different species at a low concentration. The concentration of each species can not be freely chosen, vector $x$ has two types of constraints: - **equality constraints**: there is a conservation of mass: the total quantity of A, B, C in all the species should remain constant. These form four linear equality contraints. - **inequality constraints**: since $x$ is a vector with concentrations all elements should be larger than zero: i.e. $x_i \geq 0$. The equality constraints are given by the stoichiometric matrix $S$, a $3\times 19$ matrix which quantifies how many of individual molecules $A$, $B$ and $C$ are in a complex and $c$ is the vector of length 3 containing the total of $A$, $B$ and $C$ in the system. The optimization problem can thus be formulated as follows: $$ \min_\mathbf{x} G(\mathbf{x}) = H(\mathbf{x}) - T S(\mathbf{x}) $$ $$ \text{subject to } \mathbf{x} \succeq 0 $$ $$ S\mathbf{x}=\mathbf{c}\,. $$ In this project you will have to find the equilibrium concentrations by minimizing the Gibbs free energy by a given temperature. > NOTE: We have chosen **not** to explicitly add the inequality constraints in the optimization problem (i.e. using the logaritmic barrier function). The entropy term will ensure that a minimizer of the Gibbs free energy will always have strictly positive values of $\mathbf{x}$, i.e. concentrations greater than 0. This can easily be seen by noting that $\partial S(\mathbf{x})/\partial x_i|_{0}=-\infty$. To circumvent numerical problems, at the end of every Newton step, we will enforce feasibility by making sure that every component of $\mathbf{x}$ is greater than some small positive tolerance ($10^{-10}$). ** Project assignments** 1. Suppose that an initial mixture contains only the monomers A, B and C with relative fractions of 0.3, 0.4 and 0.3. Give the matrix $S$ and vector $\mathbf{c}$ of the linear constraints of the system. 2. First assume $T=0$ (no entropy term). In this case the problem is a linear programming problem. Find the equilibrium mixture with the lowest energy. You can use the linear programming solver in `scipy`: `from scipy.optimize import linprog`. What compounds are formed? 3. Compute the first and second-order partial derivatives of the Gibbs free energy. Complete the functions `gibbs_fun`, `gibbs_grad` and `gibbs_hessian` which give the function value, gradient and hessian of the Gibbs free energy. These functions take as inputs the vector of concentrations $\mathbf{x}$ and the temperature $T$. The enthalpy per mol is available in the vector `enthalpies`. 4. Complete the implementation for performing linearly constrained Newton step. This function should do a step which satifies the mass balance equations. 5. Complete the implementation of `newton_oligiomerization`. Make sure that after every step the concentratons are strictly positive. Use this to calculate the equilibrium concentration of the different species at a temperature of 100 Kelvin. **Check if it satifies the mass balance!**. 6. Make a plot of the concentrations, enthalpy and entropy of the equilibrium system at different temperatures. Vary $T$ from $ 10^{-1},\ldots, 10^3$. Describe what you see. 7. Use grid search (step size of 0.05) to find the initial quantities of A, B and C (total quantity is equal to 1, at least 0.05 mol/L of each momomer) that have to be mixed at a temperature of 100K to obain: - the equilibrium mixture with the highest entropy - the equilibrium mixture with the lowest enthalphy - the equilibrium mixture with the highest concentration of CBA **ASSIGNMENT 1** Complete the matrix $S$ and vector $\mathbf{c}$ to describe the mass balance of the system. ``` # your code here! S = ... c = ... print(S) print(c) ``` **ASSIGNMENT 2** Use linear programming to find the equilibrium solution at $T=0K$. ``` from scipy.optimize import linprog linprog? xstar_0K = ... ``` DESCRIBE THE RESULT **ASSIGNMENT 3** Complete the function and partial derivatives and implement these functions. $$ \min_{\mathbf{x}} \sum_{i=1}^{19}[h_ix_i + Tx_i \log x_i] $$ $$ \text{subject to } S\mathbf{x}=\mathbf{c}\,. $$ **Function** $$ f(\mathbf{x}, T)=\ldots $$ **Gradient** $$ \frac{\partial f(\mathbf{x}, T)}{\partial x_i}=\ldots $$ $$ \nabla f(\mathbf{x}, T)=\ldots $$ **Hessian** $$ \frac{\partial^2 f(\mathbf{x}, T)}{\partial x_i^2}=\ldots $$ Note that $$ \frac{\partial^2 f(\mathbf{x}, T)}{\partial x_i\partial x_j}=\ldots \qquad \text{if $i\neq j$} $$ ``` def gibbs_fun(x, T): """ Negative entropy Inputs: - x: vector of the concentrations (19 x 1) - T: temperature Output: the function value of the Gibbs free energy """ # complete... def gibbs_gradient(x, T): """ Gradient of the negative entropy Inputs: - x: vector of the concentrations (19 x 1) - T: temperature Output: the gradient of the Gibbs free energy (19 x 1 vector) """ # complete... def gibbs_hessian(x, T): """ Hessian of negative entropy Inputs: - x: vector of the concentrations (19 x 1) - T: temperature Output: the gradient of the Gibbs free energy (19 x 19 matrix) """ # complete... ``` **ASSIGNMENT 4** Complete the code for a single constrained Newton step for this problem. ``` def constrained_newton_step(x, S, c, T): """ Computes a constrained Newton step for the Gibbs free energy minimization problem. Note: Inputs: - x: point in which to compute the Newton step, does not have to be feasible - S, c: matrix and vector of the system describing the mass balance - T: temperature of the system Output: - Dx: the Newton step """ # complete ... Dx = ... return Dx # test step (does it make any sense?) constained_newton_step(np.ones((19,1)) / 10, S, c, T=100) ``` **ASSIGNMENT 5** Complete the code for Newton's method for finding the optimal concentration vector. ``` def newton_oligomerization(x0, S, c, T, stepsize, tolerance=1e-10, epsilon=1e-7): """ Newton's method for finding concentrations that minimize the Gibbs free energy at a specific temperature Inputs: - x0: starting point, vector with strictly positive elements, but not necessarily feasible - S, c: matrix and vector of the system describing the mass balance - T: temperature of the system - stepsize: fixed step size - tolerance: tolerance parameter to make the vectors strictly feasible after every step, minimum value of every element in x - epsilon: tolerance for stopping Output: - x: minimizer """ x = x0 while True: Dx = ... # compute search direction if ...: # determine convergence break # update x # make x feasible return x # starting points should contain positive concentrations # but not necessarily satisfy mass balance x0 = np.ones(...) # Compute the equilibrium concentrations xstar_100K = ... # check if solution satisfies mass balance ... ``` **Assignment 6** ``` # functions for calculating entropy and enthalpy entropy = lambda x : - np.sum(x * np.log(x)) enthalpy = lambda x : np.sum(enthalpies * x.reshape((-1, 1))) # compute the obtained concentrations, entropy en enthalpy at different tempartures # plot the results ``` **Describe the obtained plots and give a physicochemical interpretation** YOUR TEXT HERE **Assignment 7** ``` # Use grid search to find initial mixtures of A, B and C that give rise to mixtures # with the highest entropy, lowest entropy and highest concentration of CBA at 100K. ``` **Find inital mixture which will lead to the highest entropy.** ``` # your code here ``` **Find inital mixture which will lead to the lowest enthalpy.** ``` # your code here ``` **Find inital mixture which will lead to the highest concentration CBA.** ``` # your code here ``` ## References Boyd, S. and Vandenberghe, L., '*[Convex Optimization](https://web.stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf)*'. Cambridge University Press (2004)
github_jupyter
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W0D4_Calculus/student/W0D4_Tutorial1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Tutorial 1: Differentiation and Integration **Week 0, Day 4: Calculus** **By Neuromatch Academy** __Content creators:__ John S Butler, Arvind Kumar with help from Ella Batty __Content reviewers:__ Aderogba Bayo, Tessy Tom, Matt McCann __Production editors:__ Matthew McCann, Ella Batty **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p> --- # Tutorial Objectives *Estimated timing of tutorial: 80 minutes* In this tutorial, we will cover aspects of calculus that will be frequently used in the main NMA course. We assume that you have some familiarty with calculus, but may be a bit rusty or may not have done much practice. Specifically the objectives of this tutorial are * Get an intuitive understanding of derivative and integration operations * Learn to calculate the derivatives of 1- and 2-dimensional functions/signals numerically * Familiarize with the concept of neuron transfer function in 1- and 2-dimensions. * Familiarize with the idea of numerical integration using Riemann sum ``` # @title Video 1: Why do we care about calculus? from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="BV1F44y1z7Uk", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="NZwfH_dG2wI", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) ``` --- # Setup ``` # Imports !pip install sympy --quiet import numpy as np import scipy.optimize as opt # import root-finding algorithm import sympy as sp # Python toolbox for symbolic maths import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # Toolbox for rendring 3D figures from mpl_toolkits import mplot3d # Toolbox for rendring 3D figures # @title Figure Settings import ipywidgets as widgets # interactive display from ipywidgets import interact %config InlineBackend.figure_format = 'retina' # use NMA plot style plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle") my_layout = widgets.Layout() fig_w, fig_h = 12, 4.5 my_fontsize = 16 my_params = {'axes.labelsize': my_fontsize, 'axes.titlesize': my_fontsize, 'figure.figsize': [fig_w, fig_h], 'font.size': my_fontsize, 'legend.fontsize': my_fontsize-4, 'lines.markersize': 8., 'lines.linewidth': 2., 'xtick.labelsize': my_fontsize-2, 'ytick.labelsize': my_fontsize-2} plt.rcParams.update(my_params) # @title Plotting Functions def move_sympyplot_to_axes(p, ax): backend = p.backend(p) backend.ax = ax backend.process_series() backend.ax.spines['right'].set_color('none') backend.ax.spines['bottom'].set_position('zero') backend.ax.spines['top'].set_color('none') plt.close(backend.fig) def plot_functions(function, show_derivative, show_integral): # For sympy we first define our symbolic variable x, y, z, t, f = sp.symbols('x y z t f') # We define our function if function == 'Linear': f = -2*t name = r'$-2t$' elif function == 'Parabolic': f = t**2 name = r'$t^2$' elif function == 'Exponential': f = sp.exp(t) name = r'$e^t$' elif function == 'Sine': f = sp.sin(t) name = r'$sin(t)$' elif function == 'Sigmoid': f = 1/(1 + sp.exp(-(t-5))) name = r'$\frac{1}{1+e^{-(t-5)}}$' if show_derivative and not show_integral: # Calculate the derivative of sin(t) as a function of t diff_f = sp.diff(f) print('Derivative of', f, 'is ', diff_f) p1 = sp.plot(f, diff_f, show=False) p1[0].line_color='r' p1[1].line_color='b' p1[0].label='Function' p1[1].label='Derivative' p1.legend=True p1.title = 'Function = ' + name + '\n' p1.show() elif show_integral and not show_derivative: int_f = sp.integrate(f) int_f = int_f - int_f.subs(t, -10) print('Integral of', f, 'is ', int_f) p1 = sp.plot(f, int_f, show=False) p1[0].line_color='r' p1[1].line_color='g' p1[0].label='Function' p1[1].label='Integral' p1.legend=True p1.title = 'Function = ' + name + '\n' p1.show() elif show_integral and show_derivative: diff_f = sp.diff(f) print('Derivative of', f, 'is ', diff_f) int_f = sp.integrate(f) int_f = int_f - int_f.subs(t, -10) print('Integral of', f, 'is ', int_f) p1 = sp.plot(f, diff_f, int_f, show=False) p1[0].line_color='r' p1[1].line_color='b' p1[2].line_color='g' p1[0].label='Function' p1[1].label='Derivative' p1[2].label='Integral' p1.legend=True p1.title = 'Function = ' + name + '\n' p1.show() else: p1 = sp.plot(f, show=False) p1[0].line_color='r' p1[0].label='Function' p1.legend=True p1.title = 'Function = ' + name + '\n' p1.show() def plot_alpha_func(t, f, df_dt): plt.figure() plt.subplot(2,1,1) plt.plot(t, f, 'r', label='Alpha function') plt.xlabel('Time (au)') plt.ylabel('Voltage') plt.title('Alpha function (f(t))') #plt.legend() plt.subplot(2,1,2) plt.plot(t, df_dt, 'b', label='Derivative') plt.title('Derivative of alpha function') plt.xlabel('Time (au)') plt.ylabel('df/dt') #plt.legend() def plot_charge_transfer(t, PSP, numerical_integral): fig, axes = plt.subplots(1, 2) axes[0].plot(t, PSP) axes[0].set(xlabel = 't', ylabel = 'PSP') axes[1].plot(t, numerical_integral) axes[1].set(xlabel = 't', ylabel = 'Charge Transferred') ``` --- # Section 1: What is differentiation and integration? ``` # @title Video 2: A geometrical interpretation of differentiation and integration from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="BV1sU4y1G7Ru", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="uQjwr9RQaEs", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) ``` This video covers the definition of differentiation and integration, highlights the geometrical interpretation of each, and introduces the idea of eigenfunctions. <details> <summary> <font color='blue'>Click here for text recap of video </font></summary> Calculus is a part of mathematics concerned with **continous change**. There are two branches of calculus: differential calculus and integral calculus. Differentiation of a function $f(t)$ gives you the derivative of that function $\frac{d(f(t))}{dt}$. A derivative captures how sensitive a function is to slight changes in the input for different ranges of inputs. Geometrically, the derivative of a function at a certain input is the slope of the function at that input. For example, as you drive, the distance traveled changes continuously with time. The derivative of the distance traveled with respect to time is the velocity of the vehicle at each point in time. The velocity tells you the rate of change of the distance traveled at different points in time. If you have slow velocity (a small derivative), the distance traveled doesn't change much for small changes in time. A high velocity (big derivative) means that the distance traveled changes a lot for small changes in time. The sign of the derivative of a function (or signal) tells whether the signal is increasing or decreasing. For a signal going through changes as a function of time, the derivative will become zero when the signal changes its direction of change (e.g. from increasing to decreasing). That is, at local minimum or maximum values, the slope of the signal will be zero. This property is used in optimizing problems. But we can also use it to find peaks in a signal. Integration can be thought of as the reverse of differentation. If we integrate the velocity with respect to time, we can calculate the distance traveled. By integrating a function, we are basically trying to find functions that would have the original one as their derivative. When we integrate a function, our integral will have an added unknown scalar constant, $C$. For example, if $$ g(t) = 1.5t^2 + 4t - 1$$ our integral function $f(t)$ will be: $$ f(t) = \int g(t) dt = 0.5t^3 + 2t^2 - t + C$$. This constant exists because the derivative of a constant is 0 so we cannot know what the constant should be. This is an indefinite integral. If we compute a definite integral, that is the integral between two limits of the input, we will not have this unknown constant and the integral of a function will capture the area under the curve of that function between those two limits. </details> ### Interactive Demo 1: Geometrical understanding In the interactive demo below, you can pick different functions to examine in the drop down menu. You can then choose to show the derivative function and/or the integral function. For the integral, we have chosen the unknown constant $C$ such that the integral function at the left x-axis limit is 0 (f(t = -10) = 0). So the integral will reflect the area under the curve starting from that position. For each function: * Examine just the function first. Discuss and predict what the derivative and integral will look like. Remember that derivative = slope of function, integral = area under curve from t = -10 to that t. * Check the derivative - does it match your expectations? * Check the integral - does it match your expectations? ``` # @markdown Execute this cell to enable the widget function_options = widgets.Dropdown( options=['Linear', 'Exponential', 'Sine', 'Sigmoid'], description='Function', disabled=False, ) derivative = widgets.Checkbox( value=False, description='Show derivative', disabled=False, indent=False ) integral = widgets.Checkbox( value=False, description='Show integral', disabled=False, indent=False ) def on_value_change(change): derivative.value = False integral.value = False function_options.observe(on_value_change, names='value') interact(plot_functions, function = function_options, show_derivative = derivative, show_integral = integral); ``` In the demo above you may have noticed that the derivative and integral of the exponential function is same as the exponential function itself. Some functions like the exponential function, when differentiated or integrated, equal a scalar times the same function. This is a similar idea to eigenvectors of a matrix being those that, when multipled by the matrix, equal a scalar times themselves, as you saw yesterday! When \begin{align*} \frac{d(f(t)}{dt} = a\cdot f(t), \end{align*} we say that $f(t)$ is an **eigenfunction** for derivative operator, where $a$ is a scaling factor. Similarly, when \begin{align*} \int f(t)dt = a\cdot f(t), \end{align*} we say that $f(t)$ is an **eigenfunction** for integral operator. As you can imagine, working with eigenfunctions can make mathematical analysis easy. --- # Section 2: Analytical & Numerical Differentiation ``` # @title Video 3: Differentiation from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="BV14g41137d5", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="sHogZISXGuQ", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) ``` In this section, we will delve into how we actually find the derivative of a function, both analytically and numerically. ## Section 2.1: Analytical Differentiation *Estimated timing to here from start of tutorial: 20 min* When we find the derivative analytically, we are finding the exact formula for the derivative function. To do this, instead of having to do some fancy math every time, we can often consult [an online resource](https://en.wikipedia.org/wiki/Differentiation_rules) for a list of common derivatives, in this case our trusty friend Wikipedia. If I told you to find the derivative of $f(t) = t^3$, you could consult that site and find in Section 2.1, that if $f(t) = t^n$, then $\frac{d(f(t))}{dt} = nt^{n-1}$. So you would be able to tell me that the derivative of $f(t) = t^3$ is $\frac{d(f(t))}{dt} = 3t^{2}$. This list of common derivatives often contains only very simple functions. Luckily, as we'll see in the next two sections, we can often break the derivative of a complex function down into the derivatives of more simple components. ### Section 2.1.1: Product Rule Sometimes we encounter functions which are the product of two functions that both depend on the variable. How do we take the derivative of such functions? For this we use the [Product Rule](https://en.wikipedia.org/wiki/Product_rule). \begin{align} f(t) = u(t)\cdot v(t)\\ \frac{d(f(t))}{dt} = v\cdot \frac{du}{dt} + u\cdot \frac{dv}{dt}\\ \end{align} #### Coding Exercise 2.1.1: Derivative of the postsynaptic potential alpha function Let's use the product rule to get the derivative of the post-synaptic potential alpha function. As we saw in Video 3, the shape of the postsynaptic potential is given by the so called alpha function: \begin{align*} f(t) = t \cdot exp(-\frac{t}{\tau}) \end{align*} Here $f(t)$ is a product of $t$ and $exp(-\frac{t}{\tau})$. So we can have $u(t) = t$ and $v(t) = exp(-\frac{t}{\tau})$ and use the product rule! We have defined $u(t)$ and $v(t)$ in the code below, in terms of the variable $t$ which is an array of time steps from 0 to 10. Define $\frac{du}{dt}$ and $\frac{dv}{dt}$, the compute the full derivative of the alpha function using the product rule. You can always consult wikipedia to figure out $\frac{du}{dt}$ and $\frac{dv}{dt}$! ``` ######################################################################## ## TODO for students ## Complete all ... in code below and remove raise NotImplementedError("Calculate the derivatives") ######################################################################## # Define time, time constant t = np.arange(0, 10, .1) tau = 0.5 # Compute alpha function f = t * np.exp(-t/tau) # Define u(t), v(t) u_t = t v_t = np.exp(-t/tau) # Define du/dt, dv/dt du_dt = ... dv_dt = ... # Define full derivative df_dt = ... # Visualize plot_alpha_func(t, f, df_dt) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D4_Calculus/solutions/W0D4_Tutorial1_Solution_636667ff.py) *Example output:* <img alt='Solution hint' align='left' width=1687.0 height=607.0 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W0D4_Calculus/static/W0D4_Tutorial1_Solution_636667ff_0.png> ### Section 2.1.2: Chain Rule Many times we encounter situations in which the variable $a$ is changing with time ($t$) and affecting another variable $r$. How can we estimate the derivative of $r$ with respect to $a$ i.e. $\frac{dr}{da} = ?$ To calculate $\frac{dr}{da}$ we use the [Chain Rule](https://en.wikipedia.org/wiki/Chain_rule). \begin{align} \frac{dr}{da} = \frac{dr}{dt}\cdot\frac{dt}{da} \end{align} That is, we calculate the derivative of both variables with respect to t and divide that derivative of $r$ by that derivative of $a$. We can also use this formula to simplify taking derivatives of complex functions! We can make an arbitrary function t so that we can compute more simple derivatives and multiply, as we will see in this exercise. #### Math Exercise 2.1.2: Chain Rule Let's say that: $$ r(a) = e^{a^4 + 1} $$ What is $\frac{dr}{da}$? This is a more complex function so we can't simply consult a table of common derivatives. Can you use the chain rule to help? Hint: we didn't define t but you could set t equal to the function in the exponent. [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D4_Calculus/solutions/W0D4_Tutorial1_Solution_a0e42694.py) ### Section 2.2.3: Derivatives in Python using Sympy There is a useful Python library for getting the analytical derivatives of functions: Sympy. We actually used this in Interactive Demo 1, under the hood. See the following cell for an example of setting up a sympy function and finding the derivative. ``` # For sympy we first define our symbolic variables f, t = sp.symbols('f, t') # Function definition (sigmoid) f = 1/(1 + sp.exp(-(t-5))) # Get the derivative diff_f = sp.diff(f) # Print the resulting function print('Derivative of', f, 'is ', diff_f) ``` ## Section 2.2: Numerical Differentiation *Estimated timing to here from start of tutorial: 30 min* Formally, the derivative of a function $\mathcal{f}(x)$ at any value $a$ is given by the finite difference formula (FD): \begin{align*} FD = \frac{f(a+h) - f(a)}{h} \end{align*} As $h\rightarrow 0$, the FD approaches the actual value of the derivative. Let's check this. *Note that the numerical estimate of the derivative will result in a time series whose length is one short of the original time series.* ### Interactive Demo 2.2: Numerical Differentiation of the Sine Function Below, we find the numerical derivative of the sine function for different values of $h$, and and compare the result the analytical solution. - What values of h result in more accurate numerical derivatives? ``` # @markdown *Execute this cell to enable the widget.* def numerical_derivative_demo(h = 0.2): # Now lets create a sequence of numbers which change according to the sine function dt = 0.01 tx = np.arange(-10, 10, dt) sine_fun = np.sin(tx) # symbolic diffrentiation tells us that the derivative of sin(t) is cos(t) cos_fun = np.cos(tx) # Numerical derivative using difference formula n_tx = np.arange(-10,10,h) # create new time axis n_sine_fun = np.sin(n_tx) # calculate the sine function on the new time axis sine_diff = (n_sine_fun[1:] - n_sine_fun[0:-1]) / h fig = plt.figure() ax = plt.subplot(111) plt.plot(tx, sine_fun, label='sine function') plt.plot(tx, cos_fun, label='analytical derivative of sine') with plt.xkcd(): # notice that numerical derivative will have one element less plt.plot(n_tx[0:-1], sine_diff, label='numerical derivative of sine') plt.xlim([-10, 10]) plt.xlabel('Time (au)') plt.ylabel('f(x) or df(x)/dt') ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05), ncol=3, fancybox=True) plt.show() _ = widgets.interact(numerical_derivative_demo, h = (0.01, 0.5, .02)) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D4_Calculus/solutions/W0D4_Tutorial1_Solution_36cd3b93.py) ## Section 2.3: Transfer Function and Gain of a Neuron *Estimated timing to here from start of tutorial: 34 min* When we inject a constant current (DC) in a neuron, its firing rate changes as a function of strength of the injected current. This is called the **input-output transfer function** or just the *transfer function* or *I/O Curve* of the neuron. For most neurons this can be approximated by a sigmoid function e.g. \begin{align} rate(I) = \frac{1}{1+\text{e}^{-a*(I-\theta)}} - \frac{1}{exp(a*\theta)} + \eta \end{align} where $I$ is injected current, $rate$ is the neuron firing rate and $\eta$ is noise (Gaussian noise with zero mean and $\sigma$ standard deviation). *You will visit this equation in a different context in Week 3* The slope of a neurons input-output transfer function ($\frac{d(r(I)}{dI}$) is called the **gain** of the neuron, as it tells how the neuron output will change if the input is changed. In other words, the slope of the transfer function tells us in which range of inputs the neuron output is most sensitive to changes in its input. ### Interactive Demo 2.3: Calculating the Transfer Function and Gain of a Neuron In the following demo, you can estimate the gain of the following neuron transfer function using numerical differentiaton. We will use our timestep as h. See the cell below for a function that computes the rate via the fomula above and then the gain using numerical differentiation. In the following cell, you can play with the parameters $a$ and $\theta$ to change the shape of the transfer functon (and see the resulting gain function). You can also set $I_{mean}$ to see how the slope is computed for that value of I. In the left plot, the red vertical lines are the two values of the current being used to compute the slope, while the blue lines point to the corresponding ouput firing rates. Change the parameters of the neuron transfer function (i.e. $a$ and $\theta$) and see if you can predict the value of $I$ for which the neuron has maximal slope and which parameter determines the peak value of the gain. 1. Ensure you understand how the right plot relates to the left! 2. How does $\theta$ affect the transfer function and gain? 3. How does $a$ affect the transfer function and gain? ``` def compute_rate_and_gain(I, a, theta, current_timestep): """ Compute rate and gain of neuron based on parameters Args: I (ndarray): different possible values of the current a (scalar): parameter of the transfer function theta (scalar): parameter of the transfer function current_timestep (scalar): the time we're using to take steps Returns: (ndarray, ndarray): rate and gain for each possible value of I """ # Compute rate rate = (1+np.exp(-a*(I-theta)))**-1 - (1+np.exp(a*theta))**-1 # Compute gain using a numerical derivative gain = (rate[1:] - rate[0:-1])/current_timestep return rate, gain # @markdown Execute this cell to enable the widget def plot_rate_and_gain(a, theta, I_mean): current_timestep = 0.1 # Compute I I = np.arange(0, 8, current_timestep) rate, gain = compute_rate_and_gain(I, a, theta, current_timestep) I_1 = I_mean - current_timestep/2 rate_1 = (1+np.exp(-a*(I_1-theta)))**-1 - (1+np.exp(a*theta))**-1 I_2 = I_mean + current_timestep/2 rate_2 = (1+np.exp(-a*(I_2-theta)))**-1 - (1+np.exp(a*theta))**-1 input_range = I_2-I_1 output_range = rate_2 - rate_1 # Visualize rate and gain plt.subplot(1,2,1) plt.plot(I,rate) plt.plot([I_1,I_1],[0, rate_1],color='r') plt.plot([0,I_1],[rate_1, rate_1],color='b') plt.plot([I_2,I_2],[0, rate_2],color='r') plt.plot([0,I_2],[rate_2, rate_2],color='b') plt.xlim([0, 8]) low, high = plt.ylim() plt.ylim([0, high]) plt.xlabel('Injected current (au)') plt.ylabel('Output firing rate (normalized)') plt.title('Transfer function') plt.text(2, 1.3, 'Output-Input Ratio =' + str(np.round(1000*output_range/input_range)/1000), style='italic', bbox={'facecolor': 'red', 'alpha': 0.5, 'pad': 10}) plt.subplot(1,2,2) plt.plot(I[0:-1], gain) plt.plot([I_mean, I_mean],[0,0.6],color='r') plt.xlabel('Injected current (au)') plt.ylabel('Gain') plt.title('Gain') plt.xlim([0, 8]) low, high = plt.ylim() plt.ylim([0, high]) _ = widgets.interact(plot_rate_and_gain, a = (0.5, 2.0, .02), theta=(1.2,4.0,0.1), I_mean= (0.5,8.0,0.1)) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D4_Calculus/solutions/W0D4_Tutorial1_Solution_6ae0b0ff.py) # Section 3: Functions of Multiple Variables *Estimated timing to here from start of tutorial: 44 min* ``` # @title Video 4: Functions of multiple variables from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="BV1Ly4y1M77D", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="Mp_uNNNiQAI", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) ``` This video covers what partial derivatives are. <details> <summary> <font color='blue'>Click here for text recap of video </font></summary> In the previous section, you looked at function of single variable $t$ or $x$. In most cases, we encounter functions of multiple variables. For example, in the brain, the firing rate of a neuron is a function of both excitatory and inhibitory input rates. In the following, we will look into how to calculate derivatives of such functions. When we take the derrivative of a multivariable function with respect to one of the variables it is called the **partial derivative**. For example if we have a function: \begin{align} f(x,y) = x^2 + 2xy + y^2 \end{align} The we can define the partial derivatives as \begin{align} \frac{\partial(f(x,y))}{\partial x} = 2x + 2y + 0 \\\\ \frac{\partial(f(x,y))}{\partial y} = 0 + 2x + 2y \end{align} In the above, the derivative of the last term ($y^2$) with respect to $x$ is zero because it does not change with respect to $x$. Similarly, the derivative of $x^2$ with respect to $y$ is also zero. </details> Just as with the derivatives we saw earlier, you can get partial derivatives through either an analytical method (finding an exact equation) or a numerical method (approximating). ### Interactive Demo 3: Visualize partial derivatives In the demo below, you can input any function of x and y and then visualize both the function and partial derivatives. We visualized the 2-dimensional function as a surface plot in which the values of the function are rendered as color. Yellow represents a high value and blue represents a low value. The height of the surface also shows the numerical value of the function. A more complete description of 2D surface plots and why we need them is located in Bonus Section 1.1. The first plot is that of our function. And the two bottom plots are the derivative surfaces with respect to $x$ and $y$ variables. 1. Ensure you understand how the plots relate to each other - if not, review the above material 2. Can you come up with a function where the partial derivative with respect to x will be a linear plane and the derivative with respect to y will be more curvy? 3. What happens to the partial derivatives if there are no terms involving multiplying x and y together? ``` # @markdown Execute this widget to enable the demo # Let's use sympy to calculate Partial derivatives of a function of 2-variables @interact(f2d_string = 'x**2 + 2*x*y + y**2') def plot_partial_derivs(f2d_string): f, x, y = sp.symbols('f, x, y') f2d = eval(f2d_string) f2d_dx = sp.diff(f2d,x) f2d_dy = sp.diff(f2d,y) print('Partial derivative of ', f2d, 'with respect to x is', f2d_dx) print('Partial derivative of ', f2d, 'with respect to y is', f2d_dy) p1 = sp.plotting.plot3d(f2d, (x, -5, 5), (y, -5, 5),show=True,xlabel='x', ylabel='y', zlabel='f(x,y)',title='Our function') p2 = sp.plotting.plot3d(f2d_dx, (x, -5, 5), (y, -5, 5),show=True,xlabel='x', ylabel='y', zlabel='df(x,y)/dx',title='Derivative w.r.t. x') p3 = sp.plotting.plot3d(f2d_dy, (x, -5, 5), (y, -5, 5),show=True,xlabel='x', ylabel='y', zlabel='df(x,y)/dy',title='Derivative w.r.t. y') ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D4_Calculus/solutions/W0D4_Tutorial1_Solution_8a586322.py) To see an application of the numerical calculation of partial derivatives to understand a neuron driven by excitatory and inhibitory inputs, see Bonus Section 1! We will use the partial derivative several times in the course. For example partial derivative are used the calculate the Jacobian of a system of differential equations. The Jacobian is used to determine the dynamics and stability of a system. This will be introduced in the second week while studying the dynamics of excitatory and inhibitory population interactions. --- # Section 4: Numerical Integration ``` # @title Video 5: Numerical Integration from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="BV1p54y1H7zt", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="cT0_CbD_h9Q", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) ``` This video covers numerical integration and specifically Riemann sums. <details> <summary> <font color='blue'>Click here for text recap of video </font></summary> Geometrically, integration is the area under the curve. This interpretation gives two formal ways to calculate the integral of a function numerically. **[Riemann sum](https://en.wikipedia.org/wiki/Riemann_sum)**: If we wish to integrate a function $f(t)$ with respect to $t$, then first we divide the function into $n$ intervals of size $dt = a-b$, where $a$ is the starting of the interval. Thus, each interval gives a rectangle with height $f(a)$ and width $dt$. By summing the area of all the rectangles, we can approximate the area under the curve. As the size $dt$ approaches to zero, our estimate of the integral approcahes the analytical calculation. Essentially, the Riemann sum is cutting the region under the curve in vertical stripes, calculating area of the each stripe and summing them up. </details> ## Section 4.1: Demonstration of the Riemann Sum *Estimated timing to here from start of tutorial: 60 min* ### Interactive Demo 4.1: Riemann Sum vs. Analytical Integral with changing step size Below, we will compare numerical integration using the Riemann Sum with the analytical solution. You can change the interval size $dt$ using the slider. 1. What values of dt result in the best numerical integration? 2. What is the downside of choosing that value of dt? 3. With large dt, why are we underestimating the integral (as opposed to overestimating? ``` # @markdown Run this cell to enable the widget! def riemann_sum_demo(dt = 0.5): step_size = 0.1 min_val = 0. max_val = 10. tx = np.arange(min_val, max_val, step_size) # Our function ftn = tx**2 - tx + 1 # And the integral analytical formula calculates using sympy int_ftn = tx**3/3 - tx**2/2 + tx # Numerical integration of f(t) using Riemann Sum n = int((max_val-min_val)/dt) r_tx = np.zeros(n) fun_value = np.zeros(n) for ii in range(n): a = min_val+ii*dt fun_value[ii] = a**2 - a + 1 r_tx[ii] = a; # Riemann sum is just cumulative sum of the fun_value multiplied by the r_sum = np.cumsum(fun_value)*dt with plt.xkcd(): plt.figure(figsize=(20,5)) ax = plt.subplot(1,2,1) plt.plot(tx,ftn,label='Function') for ii in range(n): plt.plot([r_tx[ii], r_tx[ii], r_tx[ii]+dt, r_tx[ii]+dt], [0, fun_value[ii], fun_value[ii], 0] ,color='r') plt.xlabel('Time (au)') plt.ylabel('f(t)') plt.title('f(t)') plt.grid() plt.subplot(1,2,2) plt.plot(tx,int_ftn,label='Analytical') plt.plot(r_tx+dt,r_sum,color = 'r',label='Riemann Sum') plt.xlabel('Time (au)') plt.ylabel('int(f(t))') plt.title('Integral of f(t)') plt.grid() plt.legend() plt.show() _ = widgets.interact(riemann_sum_demo, dt = (0.1, 1., .02)) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D4_Calculus/solutions/W0D4_Tutorial1_Solution_fd942e45.py) There are other methods of numerical integration, such as **[Lebesgue integral](https://en.wikipedia.org/wiki/Lebesgue_integral)** and **Runge Kutta**. In the Lebesgue integral, we divide the area under the curve into horizontal stripes. That is, instead of the independent variable, the range of the function $f(t)$ is divided into small intervals. In any case, the Riemann sum is the basis of Euler's method of integration for solving ordinary differential equations - something you will do in a later tutorial today. ## Section 4.2: Neural Applications of Numerical Integration *Estimated timing to here from start of tutorial: 68 min* ### Coding Exercise 4.2: Calculating Charge Transfer with Excitatory Input An incoming spike elicits a change in the post-synaptic membrane potential ($PSP(t)$) which can be captured by the following function \begin{align} PSP(t) = J\times t\times exp\big(-\frac{t-t_{sp}}{\tau_{s}}\big) \end{align} where $J$ is the synaptic amplitude, $t_{sp}$ is the spike time and $\tau_s$ is the synaptic time constant. Estimate the total charge transfered to the postsynaptic neuron during an PSP with amplitude $J=1.0$, $\tau_s = 1.0$ and $t_{sp} = 1.$ (that is the spike occured at 1ms). The total charge will be the integral of the PSP function. ``` ######################################################################## ## TODO for students ## Complete all ... in code below and remove raise NotImplementedError("Calculate the charge transfer") ######################################################################## # Set up parameters J = 1 tau_s = 1 t_sp = 1 dt = .1 t = np.arange(0, 10, dt) # Code PSP formula PSP = ... # Compute numerical integral # We already have PSP at every time step (height of rectangles). We need to #. multiply by width of rectangles (dt) to get areas rectangle_areas = ... # Cumulatively sum rectangles (hint: use np.cumsum) numerical_integral = ... # Visualize plot_charge_transfer(t, PSP, numerical_integral) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D4_Calculus/solutions/W0D4_Tutorial1_Solution_200c1e98.py) *Example output:* <img alt='Solution hint' align='left' width=1687.0 height=607.0 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W0D4_Calculus/static/W0D4_Tutorial1_Solution_200c1e98_0.png> You can see from the figure that the total charge transferred is a little over 2.5. --- # Section 5: Differentiation and Integration as Filtering Operations *Estimated timing to here from start of tutorial: 75 min* ``` # @title Video 6: Filtering Operations from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="BV1Vy4y1M7oT", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="7_ZjlT2d174", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) ``` This video covers a different interpretation of differentiation and integration: viewing them as filtering operations. <details> <summary> <font color='blue'>Click here for text recap of video </font></summary> In the above, we used the notions that geometrically integration is the area under the curve and differentiation is the slope of the curve. There is another interpretation of these two operations. As we calculate the derivative of a function, we take the difference of adjacent values of the function. This results in the removal of common part between the two values. As a consequence, we end up removing the unchanging part of the signal. If we now think in terms of frequencies, differentiation removes low frequencies, or slow changes. That is, differentiation acts as a high pass filter. Integration does the opposite because in the estimation of an integral we keep adding adjacent values of the signal. So, again thinking in terms of frequencies, integration is akin to the removal of high frequencies or fast changes (low-pass filter). The shock absorbers in your bike are an example of integrators. We can see this behavior the demo below. Here we will not work with functions, but with signals. As such, functions and signals are the same. Just that in most cases our signals are measurements with respect to time. ``` # @markdown Execute this cell to see visualization h = 0.01 tx = np.arange(0,2,h) noise_signal = np.random.uniform(0, 1, (len(tx)))*0.5 x1 = np.sin(0.5*np.pi*tx) + noise_signal # This will generate a 1 Hz sin wave # In the signal x1 we have added random noise which contributs the high frequencies # Take the derivative equivalent of the signal i.e. subtract the adjacent values x1_diff = (x1[1:] - x1[:-1]) # Take the integration equivalent of the signal i.e. sum the adjacent values. And divide by 2 (take average essentially) x1_integrate = (x1[1:] + x1[:-1])/2 # Plotting code plt.figure(figsize=(15,10)) plt.subplot(3,1,1) plt.plot(tx,x1,label='Original Signal') #plt.xlabel('Time (sec)') plt.ylabel('Signal Value(au)') plt.legend() plt.subplot(3,1,2) plt.plot(tx[0:-1],x1_diff,label='Differentiated Signal') # plt.xlabel('Time (sec)') plt.ylabel('Differentiated Value(au)') plt.legend() plt.subplot(3,1,3) plt.plot(tx,x1,label='Original Signal') plt.plot(tx[0:-1],x1_integrate,label='Integrate Signal') plt.xlabel('Time (sec)') plt.ylabel('Integrate Value(au)') plt.legend() ``` Notice how the differentiation operation amplifies the fast changes which were contributed by noise. By contrast, the integration operation supresses the fast changing noise. If we perform the same operation of averaging the adjancent samples on the orange trace, we will further smooth the signal. Such sums and subtractions form the basis of digital filters. --- # Summary *Estimated timing of tutorial: 80 minutes* * Geometrically, integration is the area under the curve and differentiation is the slope of the function * The concepts of slope and area can be easily extended to higher dimensions. We saw this when we took the derivative of a 2-dimensional transfer function of a neuron * Numerical estimates of both derivatives and integrals require us to choose a time step $h$. The smaller the $h$, the better the estimate, but for small values of $h$, more computations are needed. So there is always some tradeoff. * Partial derivatives are just the estimate of the slope along one of the many dimensions of the function. We can combine the slopes in different directions using vector sum to find the direction of the slope. * Because the derivative of a function is zero at the local peak or trough, derivatives are used to solve optimization problems. * When thinking of signal, integration operation is equivalent to smoothening the signals (i.e. remove fast changes) * Differentiation operations remove slow changes and enhance high frequency content of a signal --- # Bonus Section 1: Numerical calculation of partial derivatives ## Bonus Section 1.1: Understanding 2D plots Let's take the example of a neuron driven by excitatory and inhibitory inputs. Because this is for illustrative purposes, we will not go in the details of the numerical range of the input and output variables. In the function below, we assume that the firing rate of a neuron increases motonotically with an increase in excitation and decreases monotonically with an increase in inhibition. The inhibition is modelled as a subtraction. Like for the 1-dimensional transfer function, here we assume that we can approximate the transfer function as a sigmoid function. To evaluate the partial derivatives we can use the same numerical differentiation as before but now we apply it to each row and column separately. ``` # @markdown Execute this cell to visualize the neuron firing rate surface def sigmoid_function(x,a,theta): ''' Population activation function. Expects: x : the population input a : the gain of the function theta : the threshold of the function Returns: the population activation response F(x) for input x ''' # add the expression of f = F(x) f = (1+np.exp(-a*(x-theta)))**-1 - (1+np.exp(a*theta))**-1 return f # Neuron Transfer function step_size = 0.1 exc_input = np.arange(2,9,step_size) inh_input = np.arange(0,7,step_size) exc_a = 1.2 exc_theta = 2.4 inh_a = 1. inh_theta = 4. rate = np.zeros((len(exc_input),len(inh_input))) for ii in range(len(exc_input)): for jj in range(len(inh_input)): rate[ii,jj] = sigmoid_function(exc_input[ii],exc_a,exc_theta) - sigmoid_function(inh_input[jj],inh_a,inh_theta)*0.5 with plt.xkcd(): X, Y = np.meshgrid(exc_input, inh_input) fig = plt.figure(figsize=(12,12)) ax1 = fig.add_subplot(2,2,1) lg_txt = 'Inhibition = ' + str(inh_input[0]) ax1.plot(exc_input,rate[:,0],label=lg_txt) lg_txt = 'Inhibition = ' + str(inh_input[20]) ax1.plot(exc_input,rate[:,20],label=lg_txt) lg_txt = 'Inhibition = ' + str(inh_input[40]) ax1.plot(exc_input,rate[:,40],label=lg_txt) ax1.legend() ax1.set_xlabel('Excitatory input (au)') ax1.set_ylabel('Neuron output rate (au)'); ax2 = fig.add_subplot(2,2,2) lg_txt = 'Excitation = ' + str(exc_input[0]) ax2.plot(inh_input,rate[0,:],label=lg_txt) lg_txt = 'Excitation = ' + str(exc_input[20]) ax2.plot(inh_input,rate[20,:],label=lg_txt) lg_txt = 'Excitation = ' + str(exc_input[40]) ax2.plot(inh_input,rate[40,:],label=lg_txt) ax2.legend() ax2.set_xlabel('Inhibitory input (au)') ax2.set_ylabel('Neuron output rate (au)'); ax3 = fig.add_subplot(2, 1, 2, projection='3d') surf= ax3.plot_surface(Y.T, X.T, rate, rstride=1, cstride=1, cmap='viridis', edgecolor='none') ax3.set_xlabel('Inhibitory input (au)') ax3.set_ylabel('Excitatory input (au)') ax3.set_zlabel('Neuron output rate (au)'); fig.colorbar(surf) ``` In the **Top-Left** plot, we see how the neuron output rate increases as a function of excitatory input (e.g. the blue trace). However, as we increase inhibition, expectedly the neuron output decreases and the curve is shifted downwards. This constant shift in the curve suggests that the effect of inhibition is subtractive, and the amount of subtraction does not depend on the neuron output. We can alternatively see how the neuron output changes with respect to inhibition and study how excitation affects that. This is visualized in the **Top-Right** plot. This type of plotting is very intuitive, but it becomes very tedious to visualize when there are larger numbers of lines to be plotted. A nice solution to this visualization problem is to render the data as color, as surfaces, or both. This is what we have done in the plot on the bottom. The colormap on the right shows the output of the neuron as a function of inhibitory input and excitatory input. The output rate is shown both as height along the z-axis and as the color. Blue means low firing rate and yellow means high firing rate (see the color bar). In the above plot, the output rate of the neuron goes below zero. This is of course not physiological as neurons cannot have negative firing rates. In models, we either choose the operating point such that the output does not go below zero, or else we clamp the neuron output to zero if it goes below zero. You will learn about it more in Week 2. ## Bonus Section 1.2: Numerical partial derivatives We can now compute the partial derivatives of our transfer function in response to excitatory and inhibitory input. We do so below! ``` # @markdown Execute this cell implement our neural transfer function, `plot_2d_neuron_transfer_function`, in respond to excitatory and inhibitory input def plot_2d_neuron_transfer_function(exc_a, exc_theta, inh_a, inh_theta): # Neuron Transfer Function step_size = 0.1 exc_input = np.arange(1,10,step_size) inh_input = np.arange(0,7,step_size) rate = np.zeros((len(exc_input),len(inh_input))) for ii in range(len(exc_input)): for jj in range(len(inh_input)): rate[ii,jj] = sigmoid_function(exc_input[ii],exc_a,exc_theta) - sigmoid_function(inh_input[jj],inh_a,inh_theta)*0.5 # Derivative with respect to excitatory input rate rate_de = np.zeros((len(exc_input)-1,len(inh_input)))# this will have one row less than the rate matrix for ii in range(len(inh_input)): rate_de[:,ii] = (rate[1:,ii] - rate[0:-1,ii])/step_size # Derivative with respect to inhibitory input rate rate_di = np.zeros((len(exc_input),len(inh_input)-1))# this will have one column less than the rate matrix for ii in range(len(exc_input)): rate_di[ii,:] = (rate[ii,1:] - rate[ii,0:-1])/step_size X, Y = np.meshgrid(exc_input, inh_input) fig = plt.figure(figsize=(20,8)) ax1 = fig.add_subplot(1, 3, 1, projection='3d') surf1 = ax1.plot_surface(Y.T, X.T, rate, rstride=1, cstride=1, cmap='viridis', edgecolor='none') ax1.set_xlabel('Inhibitory input (au)') ax1.set_ylabel('Excitatory input (au)') ax1.set_zlabel('Neuron output rate (au)') ax1.set_title('Rate as a function of Exc. and Inh'); ax1.view_init(45, 10) fig.colorbar(surf1) Xde, Yde = np.meshgrid(exc_input[0:-1], inh_input) ax2 = fig.add_subplot(1, 3, 2, projection='3d') surf2 = ax2.plot_surface(Yde.T, Xde.T, rate_de, rstride=1, cstride=1, cmap='viridis', edgecolor='none') ax2.set_xlabel('Inhibitory input (au)') ax2.set_ylabel('Excitatory input (au)') ax2.set_zlabel('Neuron output rate (au)'); ax2.set_title('Derivative wrt Excitation'); ax2.view_init(45, 10) fig.colorbar(surf2) Xdi, Ydi = np.meshgrid(exc_input, inh_input[:-1]) ax3 = fig.add_subplot(1, 3, 3, projection='3d') surf3 = ax3.plot_surface(Ydi.T, Xdi.T, rate_di, rstride=1, cstride=1, cmap='viridis', edgecolor='none') ax3.set_xlabel('Inhibitory input (au)') ax3.set_ylabel('Excitatory input (au)') ax3.set_zlabel('Neuron output rate (au)'); ax3.set_title('Derivative wrt Inhibition'); ax3.view_init(15, -115) fig.colorbar(surf3) plot_2d_neuron_transfer_function(exc_a = 1.2, exc_theta = 2.4, inh_a = 1, inh_theta = 4) ``` Is this what you expected? Change the parameters in the function to generate the 2-d transfer function of the neuron for different excitatory and inhibitory $a$ and $\theta$ and test your intuitions Can you relate this shape of the partial derivative surface to the gain of the 1-d transfer-function of a neuron (Section 2)? [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W0D4_Calculus/solutions/W0D4_Tutorial1_Solution_cdb38705.py)
github_jupyter
# Weight Decay :label:`sec_weight_decay` Now that we have characterized the problem of overfitting, we can introduce some standard techniques for regularizing models. Recall that we can always mitigate overfitting by going out and collecting more training data. That can be costly, time consuming, or entirely out of our control, making it impossible in the short run. For now, we can assume that we already have as much high-quality data as our resources permit and focus on regularization techniques. Recall that in our polynomial regression example (:numref:`sec_model_selection`) we could limit our model's capacity simply by tweaking the degree of the fitted polynomial. Indeed, limiting the number of features is a popular technique to mitigate overfitting. However, simply tossing aside features can be too blunt an instrument for the job. Sticking with the polynomial regression example, consider what might happen with high-dimensional inputs. The natural extensions of polynomials to multivariate data are called *monomials*, which are simply products of powers of variables. The degree of a monomial is the sum of the powers. For example, $x_1^2 x_2$, and $x_3 x_5^2$ are both monomials of degree 3. Note that the number of terms with degree $d$ blows up rapidly as $d$ grows larger. Given $k$ variables, the number of monomials of degree $d$ (i.e., $k$ multichoose $d$) is ${k - 1 + d} \choose {k - 1}$. Even small changes in degree, say from $2$ to $3$, dramatically increase the complexity of our model. Thus we often need a more fine-grained tool for adjusting function complexity. ## Norms and Weight Decay We have described both the $L_2$ norm and the $L_1$ norm, which are special cases of the more general $L_p$ norm in :numref:`subsec_lin-algebra-norms`. (***Weight decay* (commonly called $L_2$ regularization), might be the most widely-used technique for regularizing parametric machine learning models.**) The technique is motivated by the basic intuition that among all functions $f$, the function $f = 0$ (assigning the value $0$ to all inputs) is in some sense the *simplest*, and that we can measure the complexity of a function by its distance from zero. But how precisely should we measure the distance between a function and zero? There is no single right answer. In fact, entire branches of mathematics, including parts of functional analysis and the theory of Banach spaces, are devoted to answering this issue. One simple interpretation might be to measure the complexity of a linear function $f(\mathbf{x}) = \mathbf{w}^\top \mathbf{x}$ by some norm of its weight vector, e.g., $\| \mathbf{w} \|^2$. The most common method for ensuring a small weight vector is to add its norm as a penalty term to the problem of minimizing the loss. Thus we replace our original objective, *minimizing the prediction loss on the training labels*, with new objective, *minimizing the sum of the prediction loss and the penalty term*. Now, if our weight vector grows too large, our learning algorithm might focus on minimizing the weight norm $\| \mathbf{w} \|^2$ vs. minimizing the training error. That is exactly what we want. To illustrate things in code, let us revive our previous example from :numref:`sec_linear_regression` for linear regression. There, our loss was given by $$L(\mathbf{w}, b) = \frac{1}{n}\sum_{i=1}^n \frac{1}{2}\left(\mathbf{w}^\top \mathbf{x}^{(i)} + b - y^{(i)}\right)^2.$$ Recall that $\mathbf{x}^{(i)}$ are the features, $y^{(i)}$ are labels for all data examples $i$, and $(\mathbf{w}, b)$ are the weight and bias parameters, respectively. To penalize the size of the weight vector, we must somehow add $\| \mathbf{w} \|^2$ to the loss function, but how should the model trade off the standard loss for this new additive penalty? In practice, we characterize this tradeoff via the *regularization constant* $\lambda$, a non-negative hyperparameter that we fit using validation data: $$L(\mathbf{w}, b) + \frac{\lambda}{2} \|\mathbf{w}\|^2,$$ For $\lambda = 0$, we recover our original loss function. For $\lambda > 0$, we restrict the size of $\| \mathbf{w} \|$. We divide by $2$ by convention: when we take the derivative of a quadratic function, the $2$ and $1/2$ cancel out, ensuring that the expression for the update looks nice and simple. The astute reader might wonder why we work with the squared norm and not the standard norm (i.e., the Euclidean distance). We do this for computational convenience. By squaring the $L_2$ norm, we remove the square root, leaving the sum of squares of each component of the weight vector. This makes the derivative of the penalty easy to compute: the sum of derivatives equals the derivative of the sum. Moreover, you might ask why we work with the $L_2$ norm in the first place and not, say, the $L_1$ norm. In fact, other choices are valid and popular throughout statistics. While $L_2$-regularized linear models constitute the classic *ridge regression* algorithm, $L_1$-regularized linear regression is a similarly fundamental model in statistics, which is popularly known as *lasso regression*. One reason to work with the $L_2$ norm is that it places an outsize penalty on large components of the weight vector. This biases our learning algorithm towards models that distribute weight evenly across a larger number of features. In practice, this might make them more robust to measurement error in a single variable. By contrast, $L_1$ penalties lead to models that concentrate weights on a small set of features by clearing the other weights to zero. This is called *feature selection*, which may be desirable for other reasons. Using the same notation in :eqref:`eq_linreg_batch_update`, the minibatch stochastic gradient descent updates for $L_2$-regularized regression follow: $$ \begin{aligned} \mathbf{w} & \leftarrow \left(1- \eta\lambda \right) \mathbf{w} - \frac{\eta}{|\mathcal{B}|} \sum_{i \in \mathcal{B}} \mathbf{x}^{(i)} \left(\mathbf{w}^\top \mathbf{x}^{(i)} + b - y^{(i)}\right). \end{aligned} $$ As before, we update $\mathbf{w}$ based on the amount by which our estimate differs from the observation. However, we also shrink the size of $\mathbf{w}$ towards zero. That is why the method is sometimes called "weight decay": given the penalty term alone, our optimization algorithm *decays* the weight at each step of training. In contrast to feature selection, weight decay offers us a continuous mechanism for adjusting the complexity of a function. Smaller values of $\lambda$ correspond to less constrained $\mathbf{w}$, whereas larger values of $\lambda$ constrain $\mathbf{w}$ more considerably. Whether we include a corresponding bias penalty $b^2$ can vary across implementations, and may vary across layers of a neural network. Often, we do not regularize the bias term of a network's output layer. ## High-Dimensional Linear Regression We can illustrate the benefits of weight decay through a simple synthetic example. ``` %matplotlib inline from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() ``` First, we [**generate some data as before**] (**$$y = 0.05 + \sum_{i = 1}^d 0.01 x_i + \epsilon \text{ where } \epsilon \sim \mathcal{N}(0, 0.01^2).$$**) We choose our label to be a linear function of our inputs, corrupted by Gaussian noise with zero mean and standard deviation 0.01. To make the effects of overfitting pronounced, we can increase the dimensionality of our problem to $d = 200$ and work with a small training set containing only 20 examples. ``` n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5 true_w, true_b = np.ones((num_inputs, 1)) * 0.01, 0.05 train_data = d2l.synthetic_data(true_w, true_b, n_train) train_iter = d2l.load_array(train_data, batch_size) test_data = d2l.synthetic_data(true_w, true_b, n_test) test_iter = d2l.load_array(test_data, batch_size, is_train=False) ``` ## Implementation from Scratch In the following, we will implement weight decay from scratch, simply by adding the squared $L_2$ penalty to the original target function. ### [**Initializing Model Parameters**] First, we will define a function to randomly initialize our model parameters. ``` def init_params(): w = np.random.normal(scale=1, size=(num_inputs, 1)) b = np.zeros(1) w.attach_grad() b.attach_grad() return [w, b] ``` ### (**Defining $L_2$ Norm Penalty**) Perhaps the most convenient way to implement this penalty is to square all terms in place and sum them up. ``` def l2_penalty(w): return (w**2).sum() / 2 ``` ### [**Defining the Training Loop**] The following code fits a model on the training set and evaluates it on the test set. The linear network and the squared loss have not changed since :numref:`chap_linear`, so we will just import them via `d2l.linreg` and `d2l.squared_loss`. The only change here is that our loss now includes the penalty term. ``` def train(lambd): w, b = init_params() net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss num_epochs, lr = 100, 0.003 animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: with autograd.record(): # The L2 norm penalty term has been added, and broadcasting # makes `l2_penalty(w)` a vector whose length is `batch_size` l = loss(net(X), y) + lambd * l2_penalty(w) l.backward() d2l.sgd([w, b], lr, batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) print('L2 norm of w:', np.linalg.norm(w)) ``` ### [**Training without Regularization**] We now run this code with `lambd = 0`, disabling weight decay. Note that we overfit badly, decreasing the training error but not the test error---a textbook case of overfitting. ``` train(lambd=0) ``` ### [**Using Weight Decay**] Below, we run with substantial weight decay. Note that the training error increases but the test error decreases. This is precisely the effect we expect from regularization. ``` train(lambd=3) ``` ## [**Concise Implementation**] Because weight decay is ubiquitous in neural network optimization, the deep learning framework makes it especially convenient, integrating weight decay into the optimization algorithm itself for easy use in combination with any loss function. Moreover, this integration serves a computational benefit, allowing implementation tricks to add weight decay to the algorithm, without any additional computational overhead. Since the weight decay portion of the update depends only on the current value of each parameter, the optimizer must touch each parameter once anyway. In the following code, we specify the weight decay hyperparameter directly through `wd` when instantiating our `Trainer`. By default, Gluon decays both weights and biases simultaneously. Note that the hyperparameter `wd` will be multiplied by `wd_mult` when updating model parameters. Thus, if we set `wd_mult` to zero, the bias parameter $b$ will not decay. ``` def train_concise(wd): net = nn.Sequential() net.add(nn.Dense(1)) net.initialize(init.Normal(sigma=1)) loss = gluon.loss.L2Loss() num_epochs, lr = 100, 0.003 trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr, 'wd': wd}) # The bias parameter has not decayed. Bias names generally end with "bias" net.collect_params('.*bias').setattr('wd_mult', 0) animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: with autograd.record(): l = loss(net(X), y) l.backward() trainer.step(batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) print('L2 norm of w:', np.linalg.norm(net[0].weight.data())) ``` [**The plots look identical to those when we implemented weight decay from scratch**]. However, they run appreciably faster and are easier to implement, a benefit that will become more pronounced for larger problems. ``` train_concise(0) train_concise(3) ``` So far, we only touched upon one notion of what constitutes a simple linear function. Moreover, what constitutes a simple nonlinear function can be an even more complex question. For instance, [reproducing kernel Hilbert space (RKHS)](https://en.wikipedia.org/wiki/Reproducing_kernel_Hilbert_space) allows one to apply tools introduced for linear functions in a nonlinear context. Unfortunately, RKHS-based algorithms tend to scale poorly to large, high-dimensional data. In this book we will default to the simple heuristic of applying weight decay on all layers of a deep network. ## Summary * Regularization is a common method for dealing with overfitting. It adds a penalty term to the loss function on the training set to reduce the complexity of the learned model. * One particular choice for keeping the model simple is weight decay using an $L_2$ penalty. This leads to weight decay in the update steps of the learning algorithm. * The weight decay functionality is provided in optimizers from deep learning frameworks. * Different sets of parameters can have different update behaviors within the same training loop. ## Exercises 1. Experiment with the value of $\lambda$ in the estimation problem in this section. Plot training and test accuracy as a function of $\lambda$. What do you observe? 1. Use a validation set to find the optimal value of $\lambda$. Is it really the optimal value? Does this matter? 1. What would the update equations look like if instead of $\|\mathbf{w}\|^2$ we used $\sum_i |w_i|$ as our penalty of choice ($L_1$ regularization)? 1. We know that $\|\mathbf{w}\|^2 = \mathbf{w}^\top \mathbf{w}$. Can you find a similar equation for matrices (see the Frobenius norm in :numref:`subsec_lin-algebra-norms`)? 1. Review the relationship between training error and generalization error. In addition to weight decay, increased training, and the use of a model of suitable complexity, what other ways can you think of to deal with overfitting? 1. In Bayesian statistics we use the product of prior and likelihood to arrive at a posterior via $P(w \mid x) \propto P(x \mid w) P(w)$. How can you identify $P(w)$ with regularization? [Discussions](https://discuss.d2l.ai/t/98)
github_jupyter
# Parrot Sequoia Image Processing Tutorial ## Overview Micasense has made an open python library with image processing tools and tutorials for their RedEdge sensor. These tools are also higly relevant for processing Parrot Sequoia images. I have added some functionality to the micasense library, which enables processing of Sequoia imagery. This tutorial is similar to the `MicaSense RedEdge Image Processing Tutorial 1`. It walks through how to convert Sequoia data from raw images to irradiance normalised with data from the sunshine sensor and then to reflectance. Vignette correction and removal of fish-eye distortion is also part of the workflow. The code is found in the file `sequoiautils.py`. Most of the code has originally been shared by other users in the [Parrot developer forum](https://forum.developer.parrot.com/c/sequoia) (see details in the relevant sections below and in the script). This tutorial assumes you have gone through the basic setup [here](./MicaSense Image Processing Setup.html) and your system is set up and ready to go. ### Opening an image with pyplot Sequoia 16-bit images can be read with pyplot directly into numpy arrays using the pyplot `imread` function or the matplotlib `imread` function, and then we can display the image inline using the `imshow` function of `matplotlib`. ``` import cv2 import matplotlib.pyplot as plt import numpy as np import os,glob import math %matplotlib inline imagePath = os.path.join('.','data','Sequoia','0077') imageName = os.path.join(imagePath,'IMG_180413_080658_0000_NIR.TIF') # Read raw image DN values # reads 16 bit tif - this will likely not work for 12 bit images imageRaw=plt.imread(imageName) # Display the image fig, ax = plt.subplots(figsize=(8,6)) ax.imshow(imageRaw, cmap='gray') plt.show() ``` ### Adding a colorbar We will use start by using a plotting function in `micasense.plotutils` that adds a colorbar to the display, so that we can more easily see changes in the values in the images and also see the range of the image values after various conversions. This function also colorizes the grayscale images, so that changes can more easily be seen. Depending on your viewing style, you may prefer a different color map and you can also select that colormap here or browsing the colormaps on the [matplotlib site](https://matplotlib.org/users/colormaps.html). ``` import micasense.plotutils as plotutils # Optional: pick a color map that fits your viewing style # one of 'gray, viridis, plasma, inferno, magma, nipy_spectral' plotutils.colormap('viridis') plotutils.plotwithcolorbar(imageRaw, title='Raw image values with colorbar'); ``` ### Reading Sequoia Metadata In order to perform various processing on the images, we need to read the metadata of each image. For this we use ExifTool. We can read standard image capture metadata such as location, UTC time, imager exposure and gain, but also Sequoia specific metadata which can make processing workflows easier. ``` import micasense.metadata as metadata exiftoolPath = None if os.name == 'nt': exiftoolPath = 'C:/exiftool/exiftool.exe' # get image metadata meta = metadata.Metadata(imageName, exiftoolPath=exiftoolPath) cameraMake = meta.get_item('EXIF:Make') cameraModel = meta.get_item('EXIF:Model') firmwareVersion = meta.get_item('EXIF:Software') bandName = meta.get_item('XMP:BandName') print('{0} {1} firmware version: {2}'.format(cameraMake, cameraModel, firmwareVersion)) print('Exposure Time: {0} seconds'.format(meta.get_item('EXIF:ExposureTime'))) print('Imager Gain: {0}'.format(meta.get_item('EXIF:ISO')/100.0)) print('Size: {0}x{1} pixels'.format(meta.get_item('EXIF:ImageWidth'),meta.get_item('EXIF:ImageHeight'))) print('Band Name: {0}'.format(bandName)) print('Center Wavelength: {0} nm'.format(meta.get_item('XMP:CentralWavelength'))) print('Bandwidth: {0} nm'.format(meta.get_item('XMP:WavelengthFWHM'))) print('Focal Length: {0}'.format(meta.get_item('EXIF:FocalLength'))) ``` ### Converting raw Sequoia images to irradiance This step includes vignette correction and conversion of raw image to irradiance using the sensor calibration model. First a vignette map is created following the procedure described in the [Application note: How to correct vignetting in images](https://forum.developer.parrot.com/uploads/default/original/2X/b/b9b5e49bc21baf8778659d8ed75feb4b2db5c45a.pdf). The implemented code is written by [seanmcleod](https://forum.developer.parrot.com/t/vignetting-correction-sample-code/5614). The vignette map will be multiplied by the raw image values to reverse the darkening seen at the image corners. See the `vignette_correction` function for the details of the vignette parameters and their use. ```python V = vignette_correction(meta, xDim, yDim) ``` The conversion of raw image to irradiance is done following the procedure described in the [Application note: Pixel value to irradiance using the sensor calibration model](https://forum.developer.parrot.com/uploads/default/original/2X/3/383261d35e33f1f375ee49e9c7a9b10071d2bf9d.pdf). The procedure is described in more details in this [document](https://onedrive.live.com/?authkey=%21ACzNLk1ORe37aRQ&cid=C34147D823D8DFEF&id=C34147D823D8DFEF%2115414&parId=C34147D823D8DFEF%21106&o=OneUp). ``` import micasense.sequoiautils as msutils SequoiaIrradiance, V = msutils.sequoia_irradiance(meta, imageRaw) plotutils.plotwithcolorbar(V,'Vignette Factor'); plotutils.plotwithcolorbar(SequoiaIrradiance,'Sequoia irradiance image with vignette factor applied'); ``` ### Sunshine calibration of Sequoia irradiance image Next step is to calculate the sunshine irradiance. This is used to normalise the images in an image dataset according to variations in the incomming solar radiation. The implemented code for retrieving the sunshine sensor data is written by [Yu-Hsuan Tu](https://github.com/dobedobedo/Parrot_Sequoia_Image_Handler/tree/master/Modules/Dependency). ``` # Sunshine sensor Irradiance SunIrradiance = msutils.GetSunIrradiance(meta) print ('Sunshine sensor irradiance: ', SunIrradiance) # Light calibrated sequoia irradiance SequoiaIrradianceCalibrated = SequoiaIrradiance/SunIrradiance plotutils.plotwithcolorbar(SequoiaIrradianceCalibrated,'Light calibrated Sequoia irradiance image'); ``` ### Convert irradiance to reflectance The calibrated iradiance image can now be converted into reflectance. To do this, an image of a reflectance panel with known reflectance is required. The irradiance values of the panel image is then used to determine a scale factor between irradiance and reflectance. For now I do not have a Sequoia example. But the procedure is similar to the RedEdge procedure described in the `MicaSense RedEdge Image Processing Tutorial 1`. ### Undistorting images Finally, lens distortion effects can be removed from the images using the information in the [Application note: How to correct distortion in images](https://forum.developer.parrot.com/uploads/default/original/2X/e/ec302e9e4498cba5165711c2a52fa2c37be10431.pdf). The implemented code is originally written in Matlab by [muzammil360](https://github.com/muzammil360/SeqUDR) and has been modified and rewritten in Python. Generally for photogrammetry processes on raw (or irradiance/reflectance) images, this step is not required, as the photogrammetry process will optimize a lens distortion model as part of it's bulk bundle adjustment. ``` # correct for lens distortions to make straight lines straight undistortedImage = msutils.correct_lens_distortion_sequoia(meta, SequoiaIrradianceCalibrated) plotutils.plotwithcolorbar(undistortedImage, 'Undistorted image'); ``` --- Copyright (c) 2017-2018 MicaSense, Inc. For licensing information see the [project git repository](https://github.com/micasense/imageprocessing)
github_jupyter
# 凸性 凸性 (convexity)在优化算法的设计中起到至关重要的作用, 这主要是由于在这种情况下对算法进行分析和测试要容易得多。 换言之,如果该算法甚至在凸性条件设定下的效果很差, 通常我们很难在其他条件下看到好的结果。 此外,即使深度学习中的优化问题通常是非凸的, 它们也经常在局部极小值附近表现出一些凸性。 这可能会产生一些像[Izmailov.Podoprikhin.Garipov.ea.2018](https://zh-v2.d2l.ai/chapter_references/zreferences.html#izmailov-podoprikhin-garipov-ea-2018) 这样比较有意思的新的优化变体。 ``` %matplotlib inline import numpy as np import paddle from mpl_toolkits import mplot3d ``` ## 定义 在进行凸分析之前,我们需要定义凸集(convex sets)和凸函数(convex functions)。 ### 1. 凸集 凸集(convex set)是凸性的基础。 简单地说,如果对于任何 $a,b \in \mathcal{X}$ ,连接 $a$ 和 $b$ 的线段也位于 $\mathcal{X}$ 中,则向量空间中的一个集合 $\mathcal{X}$ 是凸(convex)的。 在数学术语上,这意味着对于所有 $\lambda \in [0, 1]$ ,我们得到 $$\lambda a + (1-\lambda) b \in \mathcal{X} \text{ 当 } a, b \in \mathcal{X}.$$ 这听起来有点抽象,那我们来看一下 图11.2.1 里的例子。 第一组存在不包含在集合内部的线段,所以该集合是非凸的,而另外两组则没有这样的问题。 <img src="../img/pacman.svg"> <center>图11.2.1 第一组是非凸的,另外两组是凸的。</center> 有了定义做什么呢? 我们来看一下交集 图11.2.2 。 假设 $\mathcal{X}$ 和 $\mathcal{Y}$ 是凸集,那么 $\mathcal {X} \cap \mathcal{Y}$ 也是凸集的。 现在考虑任意 $a, b \in \mathcal{X} \cap \mathcal{Y}$ , 因为 $\mathcal{X}$ 和 $\mathcal{Y}$ 是凸集, 所以连接 $a$ 和 $b$ 的线段包含在 $\mathcal{X}$ 和 $\mathcal{Y}$ 中。 鉴于此,它们也需要包含在 $\mathcal{X} \cap \mathcal{Y}$ 中,从而证明我们的定理。 <img src="../img/convex-intersect.svg"> <center>图11.2.2 两个凸集的交集是凸的。</center> 我们可以毫不费力地进一步得到这样的结果: 给定凸集 $\mathcal{X}i$ ,它们的交集 $\cap{i} \mathcal{X}_i$ 是凸的。 但是反向是不正确的,考虑两个不相交的集合 $\mathcal{X} \cap \mathcal{Y} = \emptyset$, 取 $a \in \mathcal{X}$ 和 $b \in \mathcal{Y}$ 。 因为我们假设 $\mathcal{X} \cap \mathcal{Y} = \emptyset$ , 在 图11.2.3 中连接 $a$ 和 $b$ 的线段需要包含一部分既不在 $\mathcal{X}$ 也不在 $\mathcal{Y}$ 中。 因此线段也不在 $\mathcal{X} \cup \mathcal{Y}$ 中,因此证明了凸集的并集不一定是凸的,即非凸(nonconvex)的。 <img src="../img/nonconvex.svg"> <center>图11.2.3 两个凸集的并集不一定是凸的。</center> 通常,深度学习中的问题是在凸集上定义的。 例如, $\mathbb{R}^d$ ,即实数的 $d$-维向量的集合是凸集(毕竟 $\mathbb{R}^d$ 中任意两点之间的线存在 $\mathbb{R}^d$ )中。 在某些情况下,我们使用有界长度的变量,例如球的半径定义为 ${\mathbf{x} | \mathbf{x} \in \mathbb{R}^d \text{ and } | \mathbf{x} | \leq r}$ 。 ## 2. 凸函数 现在我们有了凸集,我们可以引入凸函数(convex function)$f$。 给定一个凸集 $\mathcal{X}$ ,如果对于所有 $x, x' \in \mathcal{X}$ 和所有 $\lambda \in [0, 1]$ ,一个函数 $f: \mathcal{X} \to \mathbb{R}$ 是凸的,我们可以得到 $$\lambda f(x) + (1-\lambda) f(x') \geq f(\lambda x + (1-\lambda) x').$$ 为了说明这一点,让我们绘制一些函数并检查哪些函数满足要求。 下面我们定义一些函数,包括凸函数和非凸函数。 ``` f = lambda x: 0.5 * x**2 # 凸函数 g = lambda x: np.cos(np.pi * x) # 非凸函数 h = lambda x: np.exp(0.5 * x) # 凸函数 x, segment = np.arange(-2, 2, 0.01), np.arange(-1.5, 1, 2.49) use_svg_display() _, axes = plt.subplots(1, 3, figsize=(9, 3)) for ax, func in zip(axes, [f, g, h]): plot([x, segment], [func(x), func(segment)], axes=ax) ``` 不出所料,余弦函数为非凸的,而抛物线函数和指数函数为凸的。 请注意, 为使该条件有意义, $\mathcal{X}$ 是凸集的要求是必要的。 否则可能无法很好地界定 $f(\lambda x + (1-\lambda) x')$ 的结果。 ## 3. 詹森不等式 给定一个凸函数 $f$ ,最有用的数学工具之一就是詹森不等式(Jensen's inequality)。 它是凸性定义的一种推广: $$\sum_i \alpha_i f(x_i) \geq f\left(\sum_i \alpha_i x_i\right) \text{ and } E_X[f(X)] \geq f\left(E_X[X]\right),$$ 其中 $\alpha_i$ 是非负实数,因此 $\sum_i \alpha_i = 1$ 且 $X$ 是随机变量。 换句话说,凸函数的期望不小于期望的凸函数,其中后者通常是一个更简单的表达式。 为了证明第一个不等式,我们多次将凸性的定义应用于一次求和中的一项。 詹森不等式的一个常见应用:用一个较简单的表达式约束一个较复杂的表达式。 例如,它可以应用于部分观察到的随机变量的对数似然。 具体地说,由于$\int P(Y) P(X \mid Y) dY = P(X)$,所以 $$E_{Y \sim P(Y)}[-\log P(X \mid Y)] \geq -\log P(X),$$ 这里,$Y$ 是典型的未观察到的随机变量, $P(Y)$ 是它可能如何分布的最佳猜测, $P(X)$ 是将 $Y$ 积分后的分布。 例如,在聚类中 $Y$ 可能是簇标签,而在应用簇标签时, $P(X \mid Y)$ 是生成模型。 # 性质 下面我们来看一下凸函数一些有趣的性质。 ## 1. 局部极小值是全局极小值 首先凸函数的局部极小值也是全局极小值。 我们用反证法证明它是错误的:假设 $x^{\ast} \in \mathcal{X}$ 是一个局部最小值,使得有一个很小的正值 $p$ ,使得 $x \in \mathcal{X}$ 满足 $0 < |x - x^{\ast}| \leq p$ 有 $f(x^{\ast}) < f(x)$ 。 假设存在 $x' \in \mathcal{X}$ ,其中 $f(x') < f(x^{\ast})$ 。 根据凸性的性质, $$\begin{aligned} f(\lambda x^{\ast} + (1-\lambda) x') &\leq \lambda f(x^{\ast}) + (1-\lambda) f(x') \ &< \lambda f(x^{\ast}) + (1-\lambda) f(x^{\ast}) \ &= f(x^{\ast}), \ \end{aligned}$$ 这与 $x^{\ast}$ 是局部最小值相矛盾。 因此,对于 $f(x') < f(x^{\ast})$ 不存在 $x' \in \mathcal{X}$ 。 综上所述,局部最小值 $x^{\ast}$ 也是全局最小值。 例如,对于凸函数 $f(x) = (x-1)^2$ ,有一个局部最小值 $x=1$ , 它也是全局最小值。 ``` f = lambda x: (x - 1) ** 2 set_figsize() plot([x, segment], [f(x), f(segment)], 'x', 'f(x)') ``` 凸函数的局部极小值同时也是全局极小值这一性质是很方便的。 这意味着如果我们最小化函数,我们就不会“卡住”。 但是,请注意,这并不意味着不能有多个全局最小值,或者可能不存在一个全局最小值。 例如,函数 $f(x) = \mathrm{max}(|x|-1, 0)$ 在 $[-1,1]$ 区间上都是最小值。 相反,函数 $f(x) = \exp(x)$ 在 $\mathbb{R}$ 上没有取得最小值。对于 $x \to -\infty$ ,它趋近于 $0$ ,但是没有 $f(x) = 0$ 的 $x$ 。 ## 2. 水平集的凸函数 凸函数将凸集定义为水平集(below sets)。它们定义为: $$\mathcal{S}_b := {x | x \in \mathcal{X} \text{ and } f(x) \leq b}$$ 这样的集合是凸的。 让我们快速证明一下。 对于任何 $x, x' \in \mathcal{S}_b$ ,我们需要证明:当$\lambda \in [0, 1]$,$\lambda x + (1-\lambda) x' \in \mathcal{S}_b$。 因为$f(x) \leq b$且$f(x') \leq b$,所以 $$f(\lambda x + (1-\lambda) x') \leq \lambda f(x) + (1-\lambda) f(x') \leq b.$$ ## 3. 凸性和二阶导数 当一个函数的二阶导数 $f: \mathbb{R}^n \rightarrow \mathbb{R}$ 存在时,我们很容易检查这个函数的凸性。 我们需要做的就是检查 $\nabla^2f \succeq 0$ , 即对于所有 $\mathbf{x} \in \mathbb{R}^n$, $\mathbf{x}^\top \mathbf{H} \mathbf{x} \geq 0$. 例如,函数 $f(\mathbf{x}) = \frac{1}{2} |\mathbf{x}|^2$ 是凸的,因为 $\nabla^2 f = \mathbf{1}$ , 即其导数是单位矩阵。 更正式的讲,$f$ 为凸函数,当且仅当任意二次可微一维函数 $f: \mathbb{R}^n \rightarrow \mathbb{R}$ 是凸的。 对于任意二次可微多维函数$f: \mathbb{R}^{n} \rightarrow \mathbb{R}$, 它是凸的当且仅当它的 Hessian $\nabla^2f\succeq 0$。 首先,我们来证明一下一维情况。 为了证明凸函数的 $f''(x) \geq 0$,我们使用: $$\frac{1}{2} f(x + \epsilon) + \frac{1}{2} f(x - \epsilon) \geq f\left(\frac{x + \epsilon}{2} + \frac{x - \epsilon}{2}\right) = f(x).$$ 因为二阶导数是由有限差分的极限给出的,所以遵循 $$f''(x) = \lim_{\epsilon \to 0} \frac{f(x+\epsilon) + f(x - \epsilon) - 2f(x)}{\epsilon^2} \geq 0.$$ 为了证明 $f'' \geq 0$ 可以推导 $f$ 是凸的, 我们使用这样一个事实:$f'' \geq 0$ 意味着 $f'$ 是一个单调的非递减函数。 假设 $a < x < b$ 是$\mathbb{R}$中的三个点, 其中,$x = (1-\lambda)a + \lambda b$ 且 $\lambda \in (0, 1)$. 根据中值定理,存在 $\alpha \in [a, x]$,$\beta \in [x, b]$,使得 $$f'(\alpha) = \frac{f(x) - f(a)}{x-a} \text{ 且 } f'(\beta) = \frac{f(b) - f(x)}{b-x}.$$ 通过单调性 $f'(\beta) \geq f'(\alpha)$ ,因此 $$\frac{x-a}{b-a}f(b) + \frac{b-x}{b-a}f(a) \geq f(x).$$ 由于 $x = (1-\lambda)a + \lambda b$, 所以 $$\lambda f(b) + (1-\lambda)f(a) \geq f((1-\lambda)a + \lambda b),$$ 从而证明了凸性。 第二,我们需要一个引理证明多维情况: $f: \mathbb{R}^n \rightarrow \mathbb{R}$ 是凸的当且仅当对于所有 $\mathbf{x}, \mathbf{y} \in \mathbb{R}^n$ $$g(z) \stackrel{\mathrm{def}}{=} f(z \mathbf{x} + (1-z) \mathbf{y}) \text{ where } z \in [0,1]$$ 是凸的。 为了证明 $f$ 的凸性意味着 $g$ 是凸的, 我们可以证明,对于所有的 $a,b,\lambda \in[0,1]$, $0 \leq \lambda a + (1-\lambda) b \leq 1$。 $$\begin{aligned} &g(\lambda a + (1-\lambda) b)\ =&f\left(\left(\lambda a + (1-\lambda) b\right)\mathbf{x} + \left(1-\lambda a - (1-\lambda) b\right)\mathbf{y} \right)\ =&f\left(\lambda \left(a \mathbf{x} + (1-a) \mathbf{y}\right) + (1-\lambda) \left(b \mathbf{x} + (1-b) \mathbf{y}\right) \right)\ \leq& \lambda f\left(a \mathbf{x} + (1-a) \mathbf{y}\right) + (1-\lambda) f\left(b \mathbf{x} + (1-b) \mathbf{y}\right) \ =& \lambda g(a) + (1-\lambda) g(b). \end{aligned}$$ 为了证明这一点,我们可以展示给你看 $[0,1]$ 中所有的 $\lambda$ : $$\begin{aligned} &f(\lambda \mathbf{x} + (1-\lambda) \mathbf{y})\ =&g(\lambda \cdot 1 + (1-\lambda) \cdot 0)\ \leq& \lambda g(1) + (1-\lambda) g(0) \ =& \lambda f(\mathbf{x}) + (1-\lambda) g(\mathbf{y}). \end{aligned}$$ 最后,利用上面的引理和一维情况的结果,我们可以证明多维情况: 多维函数 $f:\mathbb{R}^n\rightarrow\mathbb{R}$ 是凸函数,当且仅当 $g(z) \stackrel{\mathrm{def}}{=} f(z \mathbf{x} + (1-z) \mathbf{y})$ 是凸的,这里 $z \in [0,1]$,$\mathbf{x}, \mathbf{y} \in \mathbb{R}^n$ 。 根据一维情况, 当且仅当对于所有 $\mathbf{x}, \mathbf{y} \in \mathbb{R}^n$, $g'' = (\mathbf{x} - \mathbf{y})^\top \mathbf{H}(\mathbf{x} - \mathbf{y}) \geq 0$ ($\mathbf{H} \stackrel{\mathrm{def}}{=} \nabla^2f$)。 这相当于根据半正定矩阵的定义, $\mathbf{H} \succeq 0$。 # 约束 凸优化的一个很好的特性是能够让我们有效地处理约束(constraints)。 即它使我们能够解决以下形式的 约束优化(constrained optimization)问题: $$\begin{aligned} \mathop{\mathrm{minimize~}}_{\mathbf{x}} & f(\mathbf{x}) \ \text{ subject to } & c_i(\mathbf{x}) \leq 0 \text{ for all } i \in {1, \ldots, N}. \end{aligned}$$ 这里 $f$ 是目标函数, $c_i$ 是约束函数。 例如第一个约束 $c_1(\mathbf{x}) = |\mathbf{x}|_2 - 1$ ,则参数 $\mathbf{x}$ 被限制为单位球。 如果第二个约束 $c_2(\mathbf{x}) = \mathbf{v}^\top \mathbf{x} + b$ ,那么这对应于半空间上所有的 $\mathbf{x}$ 。 同时满足这两个约束等于选择一个球的切片作为约束集。 ## 1. 拉格朗日函数 通常,求解一个有约束的优化问题是困难的,解决这个问题的一种方法来自物理中相当简单的直觉。 想象一个球在一个盒子里,球会滚到最低的地方,重力将与盒子两侧对球施加的力平衡。 简而言之,目标函数(即重力)的梯度将被约束函数的梯度所抵消(由于墙壁的“推回”作用,需要保持在盒子内)。 请注意,任何不起作用的约束(即球不接触壁)都将无法对球施加任何力。 这里我们简略拉格朗日函数 $L$ 的推导,上述推理可以通过以下鞍点优化问题来表示: $$L(\mathbf{x}, \alpha_1, \ldots, \alpha_n) = f(\mathbf{x}) + \sum_{i=1}^n \alpha_i c_i(\mathbf{x}) \text{ where } \alpha_i \geq 0.$$ 这里的变量 $\alpha_i$ ($i=1,\ldots,n$) 是所谓的拉格朗日乘数(Lagrange multipliers),它确保约束被正确地执行。 选择它们的大小足以确保所有 $i$ 的 $c_i(\mathbf{x}) \leq 0$ 。 例如,对于 $c_i(\mathbf{x}) < 0$ 中任意 $\mathbf{x}$ ,我们最终会选择 $\alpha_i = 0$ 。 此外,这是一个鞍点(saddlepoint)优化问题。 在这个问题中,我们想要使 $L$ 相对于 $\alpha_i$ 最大化(maximize),同时使它相对于 $\mathbf{x}$ 最小化(minimize)。 有大量的文献解释如何得出函数 $L(\mathbf{x}, \alpha_1, \ldots, \alpha_n)$ 。 我们这里只需要知道 $L$ 的鞍点是原始约束优化问题的最优解就足够了。 ## 2. 惩罚 一种至少近似地满足约束优化问题的方法是采用拉格朗日函数 $L$ 。 除了满足 $c_i(\mathbf{x}) \leq 0$ 之外,我们只需将 $\alpha_i c_i(\mathbf{x})$ 添加到目标函数 $f(x)$ 。 这样可以确保不会严重违反约束。 事实上,我们一直在使用这个技巧。 比如权重衰减 :numref:sec_weight_decay,在目标函数中加入 $\frac{\lambda}{2} |\mathbf{w}|^2$ ,以确保 $\mathbf{w}$ 不会增长太大。 使用约束优化的观点,我们可以看到,对于若干半径 $r$ ,这将确保 $|\mathbf{w}|^2 - r^2 \leq 0$ 。 通过调整 $\lambda$ 的值,我们可以改变 $\mathbf{w}$ 的大小。 通常,添加惩罚是确保近似满足约束的一种好方法。 在实践中,这被证明比精确的满意度更可靠。 此外,对于非凸问题,许多使精确方法在凸情况下如此吸引人的性质(例如,最优性)不再成立。 ## 3. 投影 满足约束条件的另一种策略是投影(projections)。 同样,我们之前也遇到过,例如在处理梯度裁剪 :8.5节 时,我们确保梯度的长度以 $\theta$ 为界限,通过 $$\mathbf{g} \leftarrow \mathbf{g} \cdot \mathrm{min}(1, \theta/|\mathbf{g}|).$$ 这就是 $\mathbf{g}$ 在半径为 $\theta$ 的球上的投影(projection)。 更泛化的说,在凸集 $\mathcal{X}$ 上的投影被定义为 $$\mathrm{Proj}\mathcal{X}(\mathbf{x}) = \mathop{\mathrm{argmin}}{\mathbf{x}' \in \mathcal{X}} |\mathbf{x} - \mathbf{x}'|.$$ 它是 $\mathcal{X}$ 中离 $\mathbf{X}$ 最近的点。 <img src="../img/projections.svg"> <center>图11.2.4 Convex Projections。</center> 投影的数学定义听起来可能有点抽象,为了解释得更清楚一些,请看 图11.2.4 。 图中有两个凸集,一个圆和一个菱形。 两个集合内的点(黄色)在投影期间保持不变。 两个集合(黑色)之外的点投影到集合中接近原始点(黑色)的点(红色)。 虽然对于 $L_2$ 的球面来说,方向保持不变,但一般情况下不需要这样。 凸投影的一个用途是计算稀疏权重向量。 在本例中,我们将权重向量投影到一个$L_1$的球上, 这是钻石例子的一个广义版本,在 图11.2.4。 # 小结 在深度学习的背景下,凸函数的主要目的是帮助我们详细了解优化算法。 我们由此得出梯度下降法和随机梯度下降法是如何相应推导出来的。 - 凸集的交点是凸的,并集不是。 - 根据詹森不等式,“一个多变量凸函数的总期望值”大于或等于“用每个变量的期望值计算这个函数的总值“。 - 一个二次可微函数是凸函数,当且仅当其Hessian(二阶导数矩阵)是半正定的。 - 凸约束可以通过拉格朗日函数来添加。在实践中,只需在目标函数中加上一个惩罚就可以了。 - 投影映射到凸集中最接近原始点的点。 # 练习 1. 假设我们想要通过绘制集合内点之间的所有直线并检查这些直线是否包含来验证集合的凸性。 i. 证明只检查边界上的点是充分的。 ii. 证明只检查集合的顶点是充分的。 2. 用 $p$ -范数表示半径为 $r$ 的球,证明 $\mathcal{B}_p[r] := {\mathbf{x} | \mathbf{x} \in \mathbb{R}^d \text{ and } |\mathbf{x}|_p \leq r}$ , $\mathcal{B}_p[r]$ 对于所有 $p \geq 1$ 是凸的。 3. 已知凸函数 $f$ 和 $g$ 表明 $\mathrm{max}(f, g)$ 也是凸函数。证明 $\mathrm{min}(f, g)$ 是非凸的。 4. 证明Softmax函数的归一化是凸的,即 $f(x) = \log \sum_i \exp(x_i)$ 的凸性。 5. 证明线性子空间 $\mathcal{X} = {\mathbf{X} | \mathbf{W} \mathbf{X} = \mathbf{b}}$ 是凸集。 6. 证明在线性子空间 $\mathbf{b} = \mathbf{0}$ 的情况下,对于矩阵 $\mathbf{M}$ 的投影 $\mathrm {Proj} \mathcal{X}$ 可以写成$\mathbf{M} \mathbf{X}$。 7. 证明对于凸二次可微函数 $f$ ,对于 $\xi \in [0, \epsilon]$ ,我们可以写成 $f(x + \epsilon) = f(x) + \epsilon f'(x) + \frac{1}{2} \epsilon^2 f''(x + \xi)$ 。 8. 给定一个向量 $\mathbf{w} \in \mathbb{R}^d$ 与 $|\mathbf{w}| 1 > 1$ 计算在 $L_1$ 单位球上的投影。 i. 作为中间步骤,写出惩罚目标 $|\mathbf{w} - \mathbf{w}'|_2^2 + \lambda |\mathbf{w}'|_1$ ,计算给定 $\lambda > 0$ 的解。 ii. 你能无须反复试错就找到 $\lambda$ 的“正确”值吗? 9. 给定一个凸集 $\mathcal{X}$ 和两个向量 $\mathbf{X}$ 和 $\mathbf{y}$ 证明了投影不会增加距离,即$|\mathbf{x} - \mathbf{y}| \geq |\mathrm{Proj}\mathcal{X}(\mathbf{x}) - \mathrm{Proj}\mathcal{X}(\mathbf{y})|$。
github_jupyter
- author: Lee Meng - date: 2018-08-12 12:00 - title: New Post - slug: just-a-test-url - tags: 機器學習, Pytorch - description: This is a description - summary: This is a summary - image: andy-kelly-402111-unsplash.jpg - image_credit_url: https://www.google.com - enable_notebook_download: true - status: draft ``` import torch import torchvision.transforms as transforms from torch.utils.data import DataLoader from TinyImageNet import TinyImageNet from tensorboardX import SummaryWriter from torch.autograd import Variable from utils import show_images_horizontally from NaiveResNet import NaiveResNet %reload_ext autoreload %autoreload 2 ``` ## 步驟 - 讀取資料集 - 簡單 EDA - facets - 定義目標 / loss function - 定義模型 - 訓練模型 - 測試模型 - 視覺化 kernels / parameters ## 前處理資料 - 讀取資料 - 轉換(灰階處理、Augmentation、Crop) 注意在 validation 時我們不需要做 augmentation ``` root = 'dataset' # The output of torchvision datasets are PILImage images of range [0, 1]. # We transform them to Tensors of normalized range [-1, 1]. # normalize 在現在有 batch-normalization 的情況下其實非必要 normalize = transforms.Normalize((.5, .5, .5), (.5, .5, .5)) augmentation = transforms.RandomApply([ transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.RandomResizedCrop(64)], p=.8) training_transform = transforms.Compose([ transforms.Lambda(lambda x: x.convert("RGB")), augmentation, transforms.ToTensor(), normalize]) valid_transform = transforms.Compose([ transforms.Lambda(lambda x: x.convert("RGB")), transforms.ToTensor(), normalize]) ``` 將圖片全讀到記憶體,最小化硬碟 overhead ``` in_memory = False %%time training_set = TinyImageNet(root, 'train', transform=training_transform, in_memory=in_memory) valid_set = TinyImageNet(root, 'val', transform=valid_transform, in_memory=in_memory) training_set ``` ## 顯示處理後圖片 主要是顯示經過 data augmentation 的圖片。為了讓模型更 robust,我們隨機進行水平翻轉、剪裁以及旋轉的處理。在這邊顯示的圖有進行反正規化(un-normalization)。 ``` tmpiter = iter(DataLoader(training_set, batch_size=10, shuffle=True)) for _ in range(5): images, labels = tmpiter.next() show_images_horizontally(images, un_normalize=True) ``` ## 定義 loss function ``` ce_loss = torch.nn.CrossEntropyLoss() ``` ## 建立模型 ``` resnet = NaiveResNet(num_classes=200) device = torch.device("cuda") resnet = resnet.to(device) ``` ## 將模型圖寫到 Tensorboard 以供確認 ``` from tensorboardX import SummaryWriter sw = SummaryWriter(log_dir='./runs', comment='NaiveResNet') dummy_input = Variable(torch.rand(16, 3, 64, 64)).to(device) sw.add_graph(resnet, (dummy_input, )) # out = resnet.forward(dummy_input) # out.size() ``` ## 定義 Optimizer, Scheduler ``` # optimizer = torch.optim.Adam(resnet.parameters(), lr=0.01, weight_decay=0.0001) optimizer = torch.optim.SGD(resnet.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0001) lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 50) ``` ## 訓練模型 ``` max_epochs = 120 trainloader = DataLoader(training_set, batch_size=32, shuffle=True, num_workers=4) validloader = DataLoader(valid_set, batch_size=64, num_workers=6) %%time import time assert torch.cuda.is_available() try: for epoch in range(max_epochs): start = time.time() lr_scheduler.step() epoch_loss = 0.0 resnet.train() for idx, (data, target) in enumerate(trainloader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = resnet(data) batch_loss = ce_loss(output, target) batch_loss.backward() optimizer.step() epoch_loss += batch_loss.item() if idx % 10 == 0: print('{:.1f}% of epoch'.format(idx / float(len(trainloader)) * 100), end='\r') # evaluate on validation set num_hits = 0 num_instances = len(valid_set) with torch.no_grad(): resnet.eval() for idx, (data, target) in enumerate(validloader): data, target = data.to(device), target.to(device) output = resnet(data) _, pred = torch.max(output, 1) # output.topk(1) *1 = top1 num_hits += (pred == target).sum().item() # print('{:.1f}% of validation'.format(idx / float(len(validloader)) * 100), end='\r') valid_acc = num_hits / num_instances * 100 print(f' Validation acc: {valid_acc}%') sw.add_scalar('Validation Accuracy(%)', valid_acc, epoch + 1) epoch_loss /= float(len(trainloader)) # print("Time used in one epoch: {:.1f}".format(time.time() - start)) # save model torch.save(resnet.state_dict(), 'models/weight.pth') # record loss sw.add_scalar('Running Loss', epoch_loss, epoch + 1) except KeyboardInterrupt: print("Interrupted. Releasing resources...") finally: # this is only required for old GPU torch.cuda.empty_cache() !cd models/;ls -alth ``` ## Load model ``` sd = torch.load('models/weight.pth', map_location=lambda storage, location: storage) resnet.load_state_dict(sd) resnet = resnet.to(device) ``` ## Todo - Tensorboard - save model by best metrics - 多點augmentation - 要不要加learning rate schduler - https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
github_jupyter
#### Mini-Project # 4 This project will focus on suitability analysis with raster data. Your tasks will be both conceptual-level and technical. At the conceptual level, you will define a suitability model of your choice, for an area of your choice. See https://en.wikipedia.org/wiki/Suitability_model for a brief description of what a suitability model is. For example, you may be looking for best areas for community gardens: these are often in underutilized land in residential land uses, with good soils, accessible (not steep slope), etc. So you would be looking for areas with a specific type of land use/land cover, with an appropriate range of values of slope, etc. You may build additional criteria based on a range of precipitaiton values, whether the area is affected by wildfires, has mild temperatures, low levels of soil erosion, has vegeration (eg derived from satellite imagery using NDVI), etc. Feel free to use the imagery layers we explored or mentioned during the raster-focused lectures. One of the cells in the last raster notebook contained a list of about a dozen such layers available through AGOL - but feel free to find more. You can use any two of the map combination techniques discussed in the lecture on Thu 5/9. You should clearly identify the map combination technique you use, and discuss any uncertainty issues associated with these specific techniques. As the outcome of this part, you will need to a) describe the suitability model you want to develop; b) identify the raster data layers you will use; and c) describe two of the map combination techniques you will use to derive the two suitablity maps, and their pros and cons. In your model you will need to combine at least 3 raster data layers. The second part will involve implementing your suitability model using arcgis raster functions. For this exercise, we'll only use local functions, and possibly focal_statistics. This is the current state of implementation of raster functions accessible through python API and in ArcGIS Online. Examples of what works are in the lecture notebooks. Look throufgh the last notebook carefully, as there are a few extra cells not shown in the lecture, which demonstrate *working* map combination examples for different types of layers. Be creative - but I'd rather not have you wander into blind alleys too much as you look for a declared function and it is simply not there. Note that since some functions are associated with raster services, they depend on the version of the server a particular imagery layer is served from. See in the lecture notebooks how you can retrieve the version of API supported by each layer, and which functions are supported in each API. The third part will be a brief write-up comparing the two output rasters generated for your suitability model using the two map combination techniques. The notebook should include documentation of the steps, as usually. ### Names: Bolin Yang, Shuibenyang Yuan ### IDs: A92111272, A14031016 1. Formulate a suitability model (markdown, about 100 words) Our goal is to find suitable housing in San Diego areas, based on three factors (mean rainfall, mean temperture and air pollution). We picked these three features based on a census we have done to our friends and classmates. Most of them believe that these three factors are the most important ones in finding suitable housing. We will find the imagery layers for these three factors, and combine them into a single raster using two different map combination techniques. During the process, we will normailize the values in each rasters based on the data and definitions in each features. (Thus, the value will become binary values 0 or 1) The two different map combination will be Exclusionary screening in Dominance rules and Voting Tabulation in Contributory rules. In the final part, we will compare and analyze the result generated by the map combination. ``` # 2. Imports, etc. %matplotlib inline from arcgis import GIS gis = GIS(username='boy015_UCSDOnline8') import arcgis.raster from arcgis.raster import * from arcgis.geometry import * from arcgis.geocoding import geocode # 3. List imagery layers to be used in your model. This cell should contian layer definitions. # Include at least three initial raster sources. # YOUR CODE HERE # rainfall imagery layers # This layer displays average annual rainfall in inches for the period of January 1971 through December 2009. # Examining climate over time is useful when quantifying the effects of climate changes on species’ distributions # for past, current, and forecasted scenarios. The relationships established between species demographics and # distributions with bioclimatic predictors can inform land managers of climatic effects on species during # decision making processes. usa_mean_rainfall = \ ImageryLayer("https://landscape3.arcgis.com/arcgis/rest/services/USA_Mean_Rainfall/ImageServer", gis) # temperature imagery layers # Annual mean temperature is mean of the average temperatures for each month in degrees Celsius # for the period of January 1971 through December 2009. The relationships established between species # demographics and distributions with bioclimatic predictors can inform land managers of climatic effects # on species during decision making processes. usa_mean_temperature = \ ImageryLayer("https://landscape3.arcgis.com/arcgis/rest/services/USA_Mean_Temperature/ImageServer", gis) # air pollution imagery layers # The data set represents a series of three-year running mean grids (1998-2012) of fine particulate matter # (solid particles and liquid droplets) that were derived # from a combination of MODIS (Moderate Resolution Imaging Spectroradiometer), # MISR (Multi-angle Imaging SpectroRadiometer) and SeaWIFS (Sea-Viewing Wide Field-of-View Sensor) # AOD satellite retrievals. Together the grids provide a continuous surface of concentrations # in micrograms per cubic meter of particulate matter 2.5 micrometers or smaller (PM2.5) # for health and environmental research. global_air_pollution = \ ImageryLayer('https://sedac.ciesin.columbia.edu/arcgis/rest/services/sedac/sdei_global_annual_avg_pm2_5_2001_2010_image_service/ImageServer',gis) # 4. Derive the area of interest (AOI) and its geometry and extent. # The smaller the area the better (so that you don't run into raster size limitations) # YOUR CODE HERE # we choose San Diego as our area of interest # find geocode of san diego and extent and find geometry and spatial reference of the area study_area_gcd = geocode(address='San Diego County, CA', out_sr=usa_mean_rainfall.extent['spatialReference']) study_area_extent = study_area_gcd[0]['extent'] counties_item = gis.content.search('USA Counties generalized', 'Feature Layer', outside_org=True)[0] counties_lyr = counties_item.layers[0] study_area_query = counties_lyr.query("FIPS='06073'", return_geometry=True) study_area_geom= study_area_query.features[0].geometry study_area_geom['spatialReference'] = study_area_query.spatial_reference # define more exact location and update the spatial reference study_area_ex = Geometry(study_area_geom).geoextent tup = (('xmin',study_area_ex[0]), ('ymin', study_area_ex[1]), ('xmax',study_area_ex[2]), ('ymax', study_area_ex[3])) study_area_extent = dict(tup) crs = "{'latestwkid':3857, 'wkid':102100}" study_area_extent.update({'spatialReference':{'latestwkid':3857, 'wkid':102100}}) ``` 5. Name the two map combination techniques you will use to combine the data and describe their pros and cons (markdown) We used two kind of map combination techniques: #### - Dominance Exclusionary Screening: - we used a * b * c to perform dominance exclusionary screening. a and b and c are all binary layers, and we have preprocessed to binary form, it will output the raster that all label 1 areas that all three rasters interesct. It will select the area that have qualify the condition of good condition of living. - Pros: it will accurately select the areas that qualify all the condition, and they will for sure under the criteria of 'good for living' - Cons: Exclusion screening is too strict, it will output too little areas which have all 1 labels in all three maps. It will not output the rank of areas that are good for living. For instance, it will not sort out the areas who are good in air condition and raining but not good in temperature. #### - Contributory Voting Tabulation: - we used contributory voting tabulation as our second choice of combining raster layers. We perform this by using a + b + c in raster calculator. The voting tabulation shows how many positive (or negative) factors occur at a location. We can rank the best area for living by ranking them. - Pros: it perserved all the details of the areas qualify how many conditions qualify the condition of good condition of living. - Cons: This technique will lead a inaccurate result. Because each attributes of 'good for living' weights differently, simplying adding them up with contributory voting tabulation will make the rank inaccurate. For example, some people do not like rain, they may weight raining as the most attribute that for good for living. ``` # 6. Prepare your input layers for map combination: # clip to AOI, remap/normalize, add color map, visualize the layers and legends. # YOUR CODE HERE # Rainfall layers # clip usa mean rainfall layer to AOI, remap/normalize, add color map, visualize the layers and legends. rain_study_area = clip(raster=usa_mean_rainfall, geometry=study_area_geom) # create map for the raster # we define the 0-20 mm as suitable place to live based on relative data in san diego, and label as 1, # we define the 20 and above mm as not suitable place to live based on relative data in san diego, and label as 0 map_rain = gis.map() map_rain.extent = study_area_extent map_rain.legend=True clrmap5 = [[1,173, 233, 255],[0,28, 105, 206]] rain_n = remap(raster=rain_study_area, input_ranges=[0, 20, 20, 50], output_values=[1,0], astype='U8') rain_n_c = colormap(rain_n, colormap=clrmap5) map_rain.add_layer(rain_n_c) map_rain # Temperature layers # clip usa mean temperature layer to AOI, remap/normalize, add color map, visualize the layers and legends. temperature_study_area = clip(raster=usa_mean_temperature, geometry=study_area_geom) # create map for the raster # we define the 16 and above degree as suitable place to live in san diego, and label as 1, # we define the 16 and below degree as not suitable place to live in san diego, and label as 0 # based on the article found in https://www.scientificamerican.com/article/why-people-feel-hot/?redirect=1 map_temperature = gis.map() map_temperature.extent = study_area_extent map_temperature.legend=True clrmap5 = [[0,255, 219, 181],[1,255, 101, 12]] temperature_n = remap(raster=temperature_study_area, input_ranges=[10, 16, 16, 23], output_values=[0,1], astype='U8') temperature_n_c = colormap(temperature_n, colormap=clrmap5) map_temperature.add_layer(temperature_n_c) map_temperature # Air pollution layers # clip global air pollution layer to AOI, remap/normalize, add color map, visualize the layers and legends. air_pollution_study_area = clip(raster=global_air_pollution, geometry=study_area_geom) # create map for the raster # we define the 0-8 index as suitable place to live based on relative data in san diego, and label as 1, # we define the 8 and above index as not suitable place to live based on relative data in san diego, and label as 0 map_air_pollution = gis.map() map_air_pollution.extent = study_area_extent map_air_pollution.legend=True clrmap5 = [[1, 226, 226, 226], [0, 79, 79, 79]] air_pollution_n = remap(raster=air_pollution_study_area, input_ranges=[0, 8, 8, 20], output_values=[1,0], astype='U8') #TO BE CHANGED air_pollution_n_c = colormap(air_pollution_n, colormap=clrmap5) map_air_pollution.add_layer(air_pollution_n_c) map_air_pollution # 7a. Generate a composite raster layer for your first map combination technique # name of the technique (refer to lecture PPT): # before combine we need to prepare a dummy layer in order to make raster calculator successful usa_elevation = ImageryLayer('https://elevation.arcgis.com/arcgis/rest/services/WorldElevation/Terrain/ImageServer',gis) slope_lyr = slope(dem=usa_elevation, slope_type='DEGREE', z_factor=10) slope_lyr.extent = study_area_extent slope_study_area = clip(raster=slope_lyr, geometry=study_area_geom) # define colors to be maped clrmap2 = [[0, 230, 0, 0], [1, 38, 115, 0]] slope_b = remap(raster=slope_study_area, input_ranges=[0.0,90, 90.0,90.0], output_values=[1,0], astype='U8') slope_b_c = colormap(slope_b, colormap=clrmap2) slope_b_c # YOUR CODE HERE # First method: Exclusionary screening in Dominance rules # define colors to be maped clrmap2 = [[0, 230, 0, 0], [1, 38, 115, 0]] # combine the three rasters into one using raster calculator based on Exclusionary screening in Dominance rules # this is where the error occurs, we will include this error in the report in the extra credit # basically it is because the dataset is too large to be used in raster calculator function rc_out1 = \ raster_calculator(rasters = [slope_b_c,rain_n_c,air_pollution_n_c,temperature_n_c], \ input_names = ["D","a","b","c"], expression = "D * a * b * c", extent_type='FirstOf', cellsize_type='FirstOf', astype='UNKNOWN') # assuming the previous function works, the code for showing the raster, 0 for not suitable and 1 for suitable relatively # we use red color for the label 0 and green color for the label 1 rc_out_b_c = colormap(rc_out1, colormap=clrmap2) map_ras = gis.map() map_ras.extent = study_area_extent map_ras.legend=True map_ras.add_layer(rc_out_b_c) map_ras # 7b. Generate a composite raster layer for your second map combination technique # name of the technique (refer to lecture PPT): # YOUR CODE HERE # Second method: Voting Tabulation in Contributory rules # define colors to be maped clrmap4 = [[1, 244, 66, 66], [2, 244, 178, 65], [3, 220, 244, 65], [4, 65, 244, 76]] # combine the three rasters into one using raster calculator based on Voting Tabulation in Contributory rules # this is where the error occurs, we will include this error in the report in the extra credit # basically it is because the dataset is too large to be used in raster calculator function rc_out2 = \ raster_calculator(rasters = [slope_b_c,rain_n_c,air_pollution_n_c,temperature_n_c], \ input_names = ["D","a","b","c"], expression = "D + a + b + c", extent_type='FirstOf', cellsize_type='FirstOf', astype='UNKNOWN') # assuming the previous function works, the code for showing the raster, # 1 for very not suitable and 2 for not suitable, 3 for suitable, 4 for very suitable relatively # we use red color for the label 1 and orange color for the label 2, yellowish green color for the label 3 # green color for the label 4 rc_out_2_b_c = colormap(rc_out2, colormap=clrmap4) map_ras2 = gis.map() map_ras2.extent = study_area_extent map_ras2.legend=True map_ras2.add_layer(rc_out_2_b_c) map_ras2 # 8. Compare the results, and describe how different map combination techniques resulted in different outputs (or not.) # # YOUR CODE HERE # Assuming there was no error in the raster calculator functions, we compare the map in the below map_ras map_ras2 ``` In both map combination techniques, we found that there are quite a lot of suitable and unsuitable places in San Diego. Comparing to Exclusionary screening in Dominance rules in the first map, we found more suitable raster cells in Voting Tabulation in Contributory rules in the second map, which means that there are more places to choose for housing in the second map. The reason for this is simply due to voting mapping allows people to tell how many features that are suitable in each area. However, the exclusionary screening only presents places that has all features that are suitable. As a result, there are a lot of relatively suitable place are being ignored in the first mapping techniques (Exclusionary screening in Dominance rules). However, both of the techniques are not that great in our opinon, since everyone values the features importance differently, if anyone want to use this map, we should use weighted voting techniques according to such person's peference in order to find best suitable places according to peronal needs. ### Extra Credict: Issue Report Our map only works because we add the extra layer (slope) in the map. if we do not add this layer, the map will not show. And we documented such issue. The issue mainly happens in the raster combination.<br> Here is the question we found:<br> We tried different ways to perform map combination technique, both adding and multiplying.<br> They output results with the same error message : "Unable to complete operation. Authentication token required. (status code 499)." <br> After we carefully saw the problem, we found that whenever two large raster combines, the server will automatically complain the calculation.<br> In our scenario: we have three layers: A: 'usa_mean_rainfall', B: 'usa_mean_temperature', and C: 'global_air_pollution'. <br> We can do calculation in following scenarios:<br> - A times or adds C - B times or adds C The server will output error in following scenarios: <br> - A times or adds B - (A times or add B) times or add C - (A times or add C) times or add B - (B times or add A) times or add C - (B times or add C) times or add A - (C times or add A) times or add B - (C times or add B) times or add A Since 'usa_mean_rainfall' and 'usa_mean_temperature' came from the same source (both under the link: https://landscape3.arcgis.com/arcgis/rest/services/" , we can exclude the possibility the error comes from the problem of datasets. <br> The error occurs in the following raster_calculator functions: - rc_out1 = raster_calculator(rasters = [rain_n_c,air_pollution_n_c,temperature_n_c], \ input_names = ["a","b","c"], expression = "a * b * c", extent_type='FirstOf', cellsize_type='FirstOf', astype='UNKNOWN') - rc_out2 = raster_calculator(rasters = [rain_n_c,air_pollution_n_c,temperature_n_c], \ input_names = ["a","b","c"], expression = "a + b + c", extent_type='FirstOf', cellsize_type='FirstOf', astype='UNKNOWN') We performed folloing commands to exam the attributes of the error, and the output is: - >>> temperature_n_c.compute_histograms(geometry=study_area_geom) - {'histograms': [{'size': 2, 'min': -0.5, 'max': 1.5, 'counts': [4146, 8445]}]} - ###The histogram summary of our binary transformed raster of 'usa_mean_temperature' in study area### - ########################################################################################################### - >>> rain_n_c.compute_histograms(geometry=study_area_geom) - {'histograms': [{'size': 2, 'min': -0.5, 'max': 1.5, 'counts': [2338, 8728]}]} - ###The histogram summary of our binary transformed raster of 'usa_mean_rainfall' in study area### - ########################################################################################################### - >>> air_pollution_n_c.compute_histograms(geometry=study_area_geom) - {'histograms': [{'size': 2, 'min': -0.5, 'max': 1.5, 'counts': [21, 74]}]} - ###The histogram summary of our binary transformed raster of 'global_air_pollution' in study area### Based on the observation above, we found that the layer 'usa_mean_temperature' and 'usa_mean_rainfall' are large raster with significantly large sets: one counts as 4146, 8445 and the other counts as 2338 and 8728.<br> While, the raster 'air_pollution_n_c' counts as only 21, 74, which is a significantly smaller layer.<br> Then, we suggest the combining of two large raster will demand relatively high computation in the Arcgis server, which will interrupt the raster calculation process.<br> ``` # Please let us know how much time you spent on this project, in hours: # (we will only examine distributions and won't look at individual responses) assignment_timespent = 25 extracredit_timespent = 5 ```
github_jupyter
<a href="https://colab.research.google.com/github/gumdropsteve/intro_to_python/blob/main/day_08/extra/intro_to_data_science_with_python.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Intro to Data Science with Python We're going to use a Dataset built in to `sklearn` - What is `sklearn`? - https://scikit-learn.org/stable/ `sklearn` has 7 built in datasets to easiliy start playing with - check out the whole list [here](https://scikit-learn.org/stable/datasets/index.html) We're going to use the famous [iris datatset](https://en.wikipedia.org/wiki/Iris_flower_data_set) - Which contains data about flowers ``` # Import iris dataset function from sklearn.datasets import load_iris # load iris data set and set as variable (iris) iris = load_iris() # If we didn't know the key value pairs in the data we could find out like this: print(iris.keys()) #150 rows (samples) 4 columns (features: Height & Width of Pedals & Sepals) print(iris.data.shape) # each feature name is 1 column iris.feature_names # here's what the data looks like iris.data # print out the list of lables print("Types of iris: {}".format(iris['target_names'])) # this is the target data iris.target # Print out a list of feature names print("Feature names: \n{}".format(iris['feature_names'])) # print out the first 5 sets of features from each flower print("Sample Data:\n{}".format(iris['data'][:5])) ``` ## Pandas Data Frames [Visit Pandas Docs here](https://pandas.pydata.org/) pandas is a software library written for the Python for data manipulation and analysis.[pa] A really neat feature of Pandas is creating DataFrames Data Frames are 2-dimensional labeled data structure with columns. Very similar looking to a spreadsheet. Lets use a dataframe to look more at our data! ``` #import Pandas import pandas as pd # Make a Dataframe df = pd.DataFrame(iris.data, columns=iris.feature_names) # a pandas DataFrame is it's own type type(df) # returns first 5 rows(default) df.head() # Change settings to display all of our rows. pd.options.display.max_rows = 200 # see entire dataframe df ``` ## Make a better data frame We're not going to cover much `numpy` in this workshop but I absolutley suggest you read more about it after! - You can do that [here](http://www.numpy.org/) Numpy is a library for the Python programming language, adding support for large, multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate on these arrays. - You'll come accross it again as you keep learning about data science! We're using a function of it it here to essentially combine our iris `target` data onto our already exsiting iris data - read more about it [here](https://docs.scipy.org/doc/numpy/reference/generated/numpy.c_.html) ``` import numpy as np betterdf = pd.DataFrame(data= np.c_[iris['data'], iris['target']], columns= iris['feature_names'] + ['target']) betterdf.head() # Print out longer list to see target value change betterdf type(betterdf) ``` # Plotting with Pandas Read more about plotting and charts [here](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.html) Dataframes are great for initally looking at our data, but a chart is often much easier for us humans to see patterns! Lets make a quick chart for exploring our iris dataset! ``` # What are our feature (column) names again? iris.feature_names betterdf.plot(kind='scatter', x = 'sepal length (cm)', y = 'petal width (cm)', c='target') ``` Lets add a more human freindly color! ``` # Add color map at the end betterdf.plot(kind='scatter', x = 'sepal length (cm)', y = 'petal width (cm)', c='target', cmap=('spring')) # Lets clean it up a bit more! # Limit the color to three. # Clean up formatting # Adding Semi colon at the end removes the <matplotlib.axes._subplots.AxesSubplot at 0x7fe3904d6748> # Reaching the limitations of whats built into dataframe plots # We can import matplot lib and start using more of custimization. import matplotlib.pyplot as plt betterdf.plot(kind='scatter', x = 'sepal length (cm)', y = 'petal width (cm)', c='target', cmap=plt.cm.get_cmap('spring',3)); print(iris.target_names) ``` Challenge! Do an exploratory analysis of one of the other built in Data Sets! Make a data frame and look at the data Make a chart, play around with different chart types to see what works best Just try things and see what happens! Don't be afraid of breaking anything! ``` # Get started # Import wine dataset from sklearn.datasets import load_wine # just like load_iris() wine = load_wine() ``` ### A Solution / Example | Try on your Own first! ``` # Import wine dataset from sklearn.datasets import load_wine wine = load_wine() # If we didn't know the key value pairs in the data we could find out like this: print(wine.keys()) #150 rows (samples) 4 columns (features: Height & Width of Pedals & Sepals) print(wine.data.shape) print(wine.target_names) # Make a Dataframe winedf = pd.DataFrame(wine.data, columns=wine.feature_names) winedf import numpy as np betterwinedf = pd.DataFrame(data= np.c_[wine['data'], wine['target']], columns= wine['feature_names'] + ['target']) betterwinedf betterwinedf.plot(kind='scatter', x = 'total_phenols', y = 'color_intensity', c='target', cmap=plt.cm.get_cmap('spring',3)); ``` # Visualize Data better with Matplotlib Using the `.plot` feature on a dataframe is a great quick way to look at your data. You probably noticed that it even used the `matplotlib` library. You can take even more control of your visualization using libraries like [Matplotlib](https://matplotlib.org/) or [Seaborn](https://seaborn.pydata.org/index.html). ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt # choose x & y index values. # Remember our list looks like this # ['sepal length (cm)', # 'sepal width (cm)', # 'petal length (cm)', # 'petal width (cm)'] # 0 = sepal length (cm) # 3 = petal width (cm) x_index = 0 y_index = 3 # this formatter will label the colorbar with the correct target names # Look up lambda functions in python, they're neat! formatter = plt.FuncFormatter(lambda i, *args: iris.target_names[int(i)]) # figsize: width, height Inches #dpi: Dots per inch plt.figure( figsize=(10, 8), dpi=80) plt.scatter(iris.data[:, x_index], iris.data[:, y_index], c=iris.target, cmap=plt.cm.get_cmap('spring', 3)) plt.colorbar(ticks=[0, 1, 2], format=formatter) #plt.clim(-0.5, 2.5) plt.xlabel(iris.feature_names[x_index]) plt.ylabel(iris.feature_names[y_index]); ``` Take a couple minutes and play around with numbers above to see how the chart changes # Machine Learning with Scikit learn We don't have the time to dive super deep into machine learning, but this will give you a quick overview and some ideas of how to start using machine learning We're going to use a machine learning model called `k-nearest neighbors ` or k-NN for short. # Why K-NN We want to predict the PROBABILITY of the flower type We're going to use k-nearest neighbors also referred to as knn knn takes the 'k' nearest categorized values to uncategorized data and assigns the majority value to the data its trying to categorize. You should be able to see why this model makes sense from our data analysis! The example below has the value 4 for 'k'. So we look at the 4 nearest neighboring data points. 3 of of them are labeled versicolor. So we would assign a high probability that our new data point should be labeled versicolor! ![k-NN](https://github.com/sagecodes/intro-machine-learning/blob/master/img/knn4.png?raw=true) These lines are hand drawn and not 100% accurate but you could imagine the predictions falling into the categories like this ![k-NN](https://github.com/sagecodes/intro-machine-learning/raw/master/img/predictsegments.png) 3d exmaple here: https://scikit-learn.org/stable/auto_examples/datasets/plot_iris_dataset.html # Training - Build the Classifier: - Import the neighbors model - Data is typically denoted as X while labels are denoted with y - assign our classifier with 'k' value to a variable - train(fit) our model - output the prediction - output the probability behind the prediction ``` from sklearn import neighbors x = iris.data y = iris.target knn = neighbors.KNeighborsClassifier(n_neighbors=3) #pass features and labels into model knn.fit(x, y) # What kind of iris has 3cm x 5cm sepal and 4cm x 2cm petal? # 0 = setosa' 1 = 'versicolor' 2 = 'virginica'] # Data: sepal length, sepal width, petal length, petal width # Comment / Uncoomment different flowers below to see the prediction. # test_flower = [3, 5, 4, 2] test_flower = [5.1, 3.5, 1.4, .2] # test_flower = [5.1, 4.5, 2.8, 2] result = knn.predict([test_flower]) print(result) knn.predict_proba([test_flower]) # Lets make it into a dataframe so we can read it better pred = knn.predict_proba([test_flower]) preddf = pd.DataFrame(pred, columns=iris.target_names) # output DataFrame preddf # Lets make it a chart so we can read even more easily! preddf.plot(kind='bar'); ``` Train test Split ``` from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=21, stratify=y) knn = neighbors.KNeighborsClassifier(n_neighbors=3) knn.fit(x_train, y_train) y_pred = knn.predict(x_test) print("Test Predictions: \n {}".format(y_pred)) # Print accuracy knn.score(x_test, y_test) ``` ### Fin - random useful thing - how to check the version of `sklearn` ``` # import sklearn with alias import sklearn as skl # check verson skl.__version__ ``` ## Back to GitHub Repo: [https://github.com/gumdropsteve/intro-data-science](https://github.com/gumdropsteve/intro-data-science) ``` ```
github_jupyter
# Visualizing stationary paths of a functional Last revised: 02-Feb-2019 by Dick Furnstahl [furnstahl.1@osu.edu] Consider the functional $\begin{align} S = \int_{x_1}^{x_2} f[y(x), y'(x), x] \, dx \end{align}$ with $y_1 = y(x_1)$ and $y_2 = y(x_2)$ fixed. We denote by $y^*(x)$ the path that minimizes $S$ (or, more generally, makes it stationary). Then we consider the class of candidate paths $y(x)$ given by $\begin{align} y(x) = y^*(x) + \alpha \eta(x) \end{align}$ where $\eta(x)$ is some function that vanishes at the endpoints: $\eta(x_1) = \eta(x_2) = 0$. We can derive the Euler-Lagrange equations by minimizing $S(\alpha)$ with respect to $\alpha$. Here we visualize this problem by considering a particular $S$, choosing among some possible $\eta(x)$ definitions, and seeing how $S$ is minimized with respect to $\alpha$. We will also allow for an incorrect determination of $y^*(x)$, in which case we expect that the minimum alpha will give us a reasonable reproduction of the true $y^*(x)$. The variation of $\alpha$ and the choice of functions will be made using widgets from `ipywidgets`. ## Looking at a plot of the functional evaluation versus $\alpha$ \[We'll use `%matplotlib notebook` so that we can modify figures without redrawing them.\] ``` %matplotlib notebook import numpy as np import matplotlib.pyplot as plt import ipywidgets as widgets from IPython.display import display ``` ### Functional from Taylor problem 6.9 This problem states: "Find the equation of the path from the origin $O$ to the point $P(1,1)$ in the $xy$ plane that makes the integral $\int_O^P (y'^2 + y y' + y^2)$ stationary. The answer from solving the Euler-Lagrange equation is $y^*(x) = \sinh(x)/\sinh(1)$. ``` def y_star(x): """Path that minimizes the functional in Taylor problem 6.9.""" return np.sinh(x) / np.sinh(1.) delta_x = 0.001 x_pts = np.arange(0., 1., delta_x) fig = plt.figure(figsize=(6,3), num='Visualizing stationary paths of a functional') ax1 = fig.add_subplot(1,2,1) ax2 = fig.add_subplot(1,2,2) def setup_figure(): ax1.set_title('Show paths') ax1.plot(x_pts, y_star(x_pts), color='black', lw=2) ax1.set_xlabel('x') ax1.set_ylabel('y(x)') ax2.set_title('Evaluate functional') ax2.set_xlabel(r'$\alpha$') ax2.set_ylabel('functional') ax2.set_xlim(-0.4, 0.4) ax2.set_ylim(1.5, 3.) #ax2.axvline(0., color='black', alpha=0.3) ax2.axhline(evaluate_functional(x_pts, y_star(x_pts)), color='black', alpha=0.3) fig.tight_layout() def evaluate_functional(x_pts, y_pts): """Given arrays of x and y points, evaluate the functional from 6.9.""" # The numpy gradient function takes the derivative of an array y_pts # that is a function of points x in x_pts. y_deriv_pts = np.gradient(y_pts, x_pts) f = y_deriv_pts**2 + y_pts * y_deriv_pts + y_pts**2 # Use the numpy trapezoid rule (trapz) to do the integral over f. return np.trapz(f, x_pts) def make_path(alpha, ax1_passed, ax2_passed, base_function='exact', eta_function='sine'): """Given a base function, which may be the exact y^*(x) or a guess that is not correct, generate and plot the path corresponding to adding alpha*eta(x) to the base function, with eta(x) chosen among some functions that vanish at the endpoints in x. """ # map x_pts to zero to 1 (it may already be there) x_mapped_pts = (x_pts - x_pts[0]) / (x_pts[-1] - x_pts[0]) # Choices for the base function if (base_function == 'exact'): base = lambda x : y_star(x) elif (base_function == 'guess 1'): base = lambda x : np.sinh(2.*x) / np.sinh(2.) elif (base_function == 'guess 2'): base = lambda x : x**3 if (eta_function == 'sine'): eta = lambda x : np.sin(np.pi * x) elif (eta_function == 'parabola'): eta = lambda x : 4. * x * (1. - x) y_new_pts = base(x_pts) + alpha * eta(x_mapped_pts) ax1_passed.plot(x_pts, y_new_pts, color='red', lw=1) ax2_passed.plot(alpha, evaluate_functional(x_pts, y_new_pts), '.', color='red') def reset_graph(event): ax1.clear() ax2.clear() setup_figure() button = widgets.Button( description='reset graph' ) button.on_click(reset_graph) widgets.interact(make_path, alpha=widgets.FloatSlider(min=-1., max=1., step=.05, value=0.0, description=r'$\alpha$', continuous_update=False), ax1_passed=widgets.fixed(ax1), ax2_passed=widgets.fixed(ax2), base_function=widgets.Dropdown(options=['exact', 'guess 1', 'guess 2'], value='exact', description='base function'), eta_function=widgets.Dropdown(options=['sine', 'parabola'], value='sine', description=r'$\eta(x)$') ) setup_figure() button ```
github_jupyter
based on: https://towardsdatascience.com/how-to-use-machine-learning-for-drug-discovery-1ccb5fdf81ad ``` # Download the Delaney's dataset #! wget https://raw.githubusercontent.com/dataprofessor/data/master/delaney.csv # import the necessqary libraries import matplotlib.pyplot as plt import numpy as np import pandas as pd from rdkit import Chem from rdkit.Chem import Descriptors from sklearn.model_selection import train_test_split from sklearn import linear_model from sklearn.metrics import mean_squared_error, r2_score import pickle # read the dataset sol = pd.read_csv('delaney.csv') # define the rdkit molecular objects mol_list= [] for element in sol.SMILES: mol = Chem.MolFromSmiles(element) mol_list.append(mol) ``` Delaney's work to calculate the aqueous solubility, $S (mol/l)$, uses four descriptors: $logP$, $MW$, $RB$, and $AP$. The $logP$ is the decimal logarithm of the octanol/water partition coefficient. It is a measure of the relative affinity of the compound for hydrophobic/aqueous solvents. It depends on the polarity of the compound. The $MW$ is the molecular weight of the compound. $RB$ is the number of the rotatable bonds of the compound. It is a measure of the compound entropy. The $AP$ is the aromatic proportion, the ratio between the number of compound's aromatic atoms and the number of heavy atoms. \begin{equation*} LogS = C_0 + C_1 LogP + C_2 MW + C_3 RB + C_4 AP \end{equation*} where the $LogS$ is the decimal logarithm of the compound's aqueous solubility. ``` # Calculate LogP, MW and RB descriptors def generate(smiles, verbose=False): moldata= [] for elem in smiles: mol=Chem.MolFromSmiles(elem) moldata.append(mol) baseData= np.arange(1,1) i=0 for mol in moldata: desc_MolLogP = Descriptors.MolLogP(mol) desc_MolWt = Descriptors.MolWt(mol) desc_NumRotatableBonds = Descriptors.NumRotatableBonds(mol) row = np.array([desc_MolLogP, desc_MolWt, desc_NumRotatableBonds]) if(i==0): baseData=row else: baseData=np.vstack([baseData, row]) i=i+1 columnNames=["MolLogP","MolWt","NumRotatableBonds"] descriptors = pd.DataFrame(data=baseData,columns=columnNames) return descriptors # Get the SMILES structures from the datqset df = generate(sol.SMILES) # Create a function to calculate the number of aromatic atoms in a molecule def AromaticAtoms(m): aromatic_atoms = [m.GetAtomWithIdx(i).GetIsAromatic() for i in range(m.GetNumAtoms())] aa_count = [] for i in aromatic_atoms: if i==True: aa_count.append(1) sum_aa_count = sum(aa_count) return sum_aa_count # Calculate the number of aromatic atoms desc_AromaticAtoms = [AromaticAtoms(element) for element in mol_list] # Calculate the number of heavy atoms desc_HeavyAtomCount = [Descriptors.HeavyAtomCount(element) for element in mol_list] # Compute the aromatic proportion (AP) descriptor desc_AromaticProportion = [AromaticAtoms(element)/Descriptors.HeavyAtomCount(element) for element in mol_list] df_desc_AromaticProportion = pd.DataFrame(data=desc_AromaticProportion, columns=["AromaticProportion"]) #Create the X matrix by combining the 2 dataframes associated to the four descriptors X = pd.concat([df,df_desc_AromaticProportion], axis=1) #X # Create the Y matrix Y = sol.iloc[:,1] print(Y.shape) # Create the XY matrix (for comparison, mainly) XY = pd.concat([X,Y], axis=1) XY.columns = ['MolLogP', 'MolWt', 'NumRotatableBonds', 'AromaticProportion', 'MeasuredLogSol'] XY.head() # Split the dataset into training and validation X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.2) # Define the linear regression model model = linear_model.LinearRegression() #model.fit(X_train, Y_train) # Predict the LogS value of X_train data Y_pred_train = model.predict(X_train) print('Coefficients:', model.coef_) print('Intercept:', model.intercept_) print('Mean squared error (MSE): %.2f'% mean_squared_error(Y_train, Y_pred_train)) print('Coefficient of determination (R^2): %.2f' % r2_score(Y_train, Y_pred_train)) # Predict the LogS value of X_val data Y_pred_val = model.predict(X_val) print('Coefficients:', model.coef_) print('Intercept:', model.intercept_) print('Mean squared error (MSE): %.2f'% mean_squared_error(Y_val, Y_pred_val)) print('Coefficient of determination (R^2): %.2f'% r2_score(Y_val, Y_pred_val)) ``` ### Deriving the Linear Regression Equation ``` full = linear_model.LinearRegression() #full.fit(X, Y) full_pred = full.predict(X) print('Coefficients:', full.coef_) print('Intercept:', full.intercept_) print('Mean squared error (MSE): %.2f'% mean_squared_error(Y, full_pred)) print('Coefficient of determination (R^2): %.2f'% r2_score(Y, full_pred)) full_yintercept = '%.2f' % full.intercept_ full_LogP = '%.2f LogP' % full.coef_[0] full_MW = '%.4f MW' % full.coef_[1] full_RB = '+ %.4f RB' % full.coef_[2] full_AP = '%.2f AP' % full.coef_[3] print('LogS = ' + ' ' + full_yintercept + ' ' + full_LogP + ' ' + full_MW + ' ' + full_RB + ' ' + full_AP) ``` ## Scatter plot of experimental vs. predicted LogS ### Vertical Plot ``` plt.figure(figsize=(5,11)) # 2 row, 1 column, plot 1 plt.subplot(2, 1, 1) plt.scatter(x=Y_train, y=Y_pred_train, c="#7CAE00", alpha=0.3) # Add trendline # https://stackoverflow.com/questions/26447191/how-to-add-trendline-in-python-matplotlib-dot-scatter-graphs z = np.polyfit(Y_train, Y_pred_train, 1) p = np.poly1d(z) plt.plot(Y_val,p(Y_val),"#F8766D") plt.ylabel('Predicted LogS') # 2 row, 1 column, plot 2 plt.subplot(2, 1, 2) plt.scatter(x=Y_val, y=Y_pred_val, c="#619CFF", alpha=0.3) z = np.polyfit(Y_val, Y_pred_val, 1) p = np.poly1d(z) plt.plot(Y_val,p(Y_val),"#F8766D") plt.ylabel('Predicted LogS') plt.xlabel('Experimental LogS') plt.savefig('plot_vertical_logS.png') plt.savefig('plot_vertical_logS.pdf') plt.show() sol.head() XY.head() SMILES = 'COc1cccc2cc(C(=O)NCCCCN3CCN(c4cccc5nccnc54)CC3)oc21' def predictSingle(smiles, model): mol = Chem.MolFromSmiles(smiles) single_MolLogP = Descriptors.MolLogP(mol) single_MolWt = Descriptors.MolWt(mol) single_NumRotatableBonds = Descriptors.NumRotatableBonds(mol) single_AP = AromaticAtoms(mol)/Descriptors.HeavyAtomCount(mol) single_list = [single_MolLogP, single_MolWt, single_NumRotatableBonds, single_AP] single_df = pd.DataFrame(single_list).T single_df.columns = ['MolLogP', 'MolWt', 'NumRotatableBonds', 'AromaticProportion'] #return single_df return model.predict(single_df)[0] predictSingle(SMILES, full) smiles_new = 'ClCC(Cl)(Cl)Cl' predictSingle(smiles_new, full) smiles_new = 'FC(F)(Cl)C(F)(Cl)Cl' predictSingle(smiles_new, full) # save the model #model = model_rec1 #folder = "01_models" #os.makedirs(folder, exist_ok=True) #model_name = 'model_rec1.h5' #model_path = os.path.join(folder, model_name) #full.save('full.h5') #filename = 'finalized_model.sav' pickle.dump(model, open('full.pkl', 'wb')) ## load the model from disk #loaded_model = pickle.load(open(filename, 'rb')) #result = loaded_model.score(X_test, Y_test) ```
github_jupyter
<!--BOOK_INFORMATION--> <a href="https://www.packtpub.com/big-data-and-business-intelligence/machine-learning-opencv" target="_blank"><img align="left" src="data/cover.jpg" style="width: 76px; height: 100px; background: white; padding: 1px; border: 1px solid black; margin-right:10px;"></a> *This notebook contains an excerpt from the book [Machine Learning for OpenCV](https://www.packtpub.com/big-data-and-business-intelligence/machine-learning-opencv) by Michael Beyeler. The code is released under the [MIT license](https://opensource.org/licenses/MIT), and is available on [GitHub](https://github.com/mbeyeler/opencv-machine-learning).* *Note that this excerpt contains only the raw code - the book is rich with additional explanations and illustrations. If you find this content useful, please consider supporting the work by [buying the book](https://www.packtpub.com/big-data-and-business-intelligence/machine-learning-opencv)!* <!--NAVIGATION--> < [Evaluating a Model](11.01-Evaluating-a-Model.ipynb) | [Contents](../README.md) | [Tuning Hyperparameters with Grid Search](11.03-Tuning-Hyperparameters-with-Grid-Search.ipynb) > # Understanding Cross-Validation Cross-validation is a method of evaluating the generalization performance of a model that is generally more stable and thorough than splitting the dataset into training and test sets. The most commonly used version of cross-validation is $k$-fold cross-validation, where $k$ is a number specified by the user (usually five or ten). Here, the dataset is partitioned into k parts of more or less equal size, called folds. For a dataset that contains $N$ data points, each fold should thus have approximately $N / k$ samples. Then a series of models is trained on the data, using $k - 1$ folds for training and one remaining fold for testing. The procedure is repeated for $k$ iterations, each time choosing a different fold for testing, until every fold has served as a test set once. Refer to the book for an illustration of $k$-fold cross-validation for different values of $k$. Do you know what makes cross-validation different from just splitting the data into training and test sets? ## Manually implementing cross-validation in OpenCV The easiest way to perform cross-validation in OpenCV is to do the data splits by hand. For example, in order to implement two-fold cross-validation, we would follow the following procedure. Load the dataset: ``` from sklearn.datasets import load_iris import numpy as np iris = load_iris() X = iris.data.astype(np.float32) y = iris.target ``` Split the data into two equally sized parts: ``` from sklearn.model_selection import train_test_split X_fold1, X_fold2, y_fold1, y_fold2 = train_test_split( X, y, random_state=37, train_size=0.5 ) ``` Instantiate the classifier: ``` import cv2 knn = cv2.ml.KNearest_create() knn.setDefaultK(1) ``` Train the classifier on the first fold, then predict the labels of the second fold: ``` knn.train(X_fold1, cv2.ml.ROW_SAMPLE, y_fold1) _, y_hat_fold2 = knn.predict(X_fold2) ``` Train the classifier on the second fold, then predict the labels of the first fold: ``` knn.train(X_fold2, cv2.ml.ROW_SAMPLE, y_fold2) _, y_hat_fold1 = knn.predict(X_fold1) ``` Compute accuracy scores for both folds: ``` from sklearn.metrics import accuracy_score accuracy_score(y_fold1, y_hat_fold1) accuracy_score(y_fold2, y_hat_fold2) ``` This procedure will yield two accuracy scores, one for the first fold (92% accuracy), and one for the second fold (88% accuracy). On average, our classifier thus achieved 90% accuracy on unseen data. ## Automating cross-validation using scikit-learn Instantiate the classifier: ``` from sklearn.neighbors import KNeighborsClassifier model = KNeighborsClassifier(n_neighbors=1) ``` Perform cross-validation with the cross_val_score function. This function takes as input a model, the full dataset (`X`), the target labels (`y`) and an integer value for the number of folds (`cv`). It is not necessary to split the data by hand—the function will do that automatically depending on the number of folds. After the cross-validation is completed, the function returns the test scores: ``` from sklearn.model_selection import cross_val_score scores = cross_val_score(model, X, y, cv=5) scores ``` In order to get a sense how the model did on average, we can look at the mean and standard deviation of the five scores: ``` scores.mean(), scores.std() ``` With five folds, we have a much better idea about how robust the classifier is on average. We see that $k$-NN with $k=1$ achieves on average 96% accuracy, and this value fluctuates from run to run with a standard deviation of roughly 2.5%. ## Implementing leave-one-out cross-validation Another popular way to implement cross-validation is to choose the number of folds equal to the number of data points in the dataset. In other words, if there are $N$ data points, we set $k=N$. This means that we will end up having to do $N$ iterations of cross-validation, but in every iteration, the training set will consist of only a single data point. The advantage of this procedure is that we get to use all-but-one data point for training. Hence, this procedure is also known as leave-one-out cross-validation. In scikit-learn, this functionality is provided by the `LeaveOneOut` method from the `model_selection` module: ``` from sklearn.model_selection import LeaveOneOut ``` This object can be passed directly to the `cross_val_score` function in the following way: ``` scores = cross_val_score(model, X, y, cv=LeaveOneOut()) ``` Because every test set now contains a single data point, we would expect the scorer to return 150 values—one for each data point in the dataset. Each of these points we could get either right or wrong. Thus, we expect `scores` to be a list of ones (1) and zeros (0), which corresponds to correct and incorrect classifications, respectively: ``` scores ``` If we want to know the average performance of the classifier, we would still compute the mean and standard deviation of the scores: ``` scores.mean(), scores.std() ``` We can see this scoring scheme returns very similar results to five-fold cross-validation. # Estimating robustness using bootstrapping An alternative procedure to $k$-fold cross-validation is **bootstrapping**. Instead of splitting the data into folds, bootstrapping builds a training set by drawing samples randomly from the dataset. Typically, a bootstrap is formed by drawing samples with replacement. Imagine putting all of the data points into a bag and then drawing randomly from the bag. After drawing a sample, we would put it back in the bag. This allows for some samples to show up multiple times in the training set, which is something cross-validation does not allow. The classifier is then tested on all samples that are not part of the bootstrap (the so-called **out-of-bag** examples), and the procedure is repeated a large number of times (say, 10,000 times). Thus, we get a distribution of the model's score that allows us to estimate the robustness of the model. Bootstrapping can be implemented with the following procedure. Instantiate the classifier: ``` knn = cv2.ml.KNearest_create() knn.setDefaultK(1) ``` From our dataset with $N$ samples, randomly choose $N$ samples with replacement to form a bootstrap. This can be done most easily with the choice function from NumPy's random module. We tell the function to draw len(`X`) samples in the range `[0, len(X)-1]` with replacement (`replace=True`). The function then returns a list of indices, from which we form our bootstrap: ``` idx_boot = np.random.choice(len(X), size=len(X), replace=True) X_boot = X[idx_boot, :] y_boot = y[idx_boot] ``` Put all samples that do not show in the bootstrap in the out-of-bag set: ``` idx_oob = np.array([x not in idx_boot for x in np.arange(len(X))], dtype=np.bool) X_oob = X[idx_oob, :] y_oob = y[idx_oob] ``` Train the classifier on the bootstrap samples: ``` knn.train(X_boot, cv2.ml.ROW_SAMPLE, y_boot) ``` Test the classifier on the out-of-bag samples: ``` _, y_hat = knn.predict(X_oob) accuracy_score(y_oob, y_hat) ``` Then we want to repeat these steps up to 10,000 times to get 10,000 accuracy scores, then average the scores to get an idea of the classifier's mean performance. For our convenience, we can build a function so that it is easy to run the procedure for some `n_iter` number of times. We also pass a model (our $k$-NN classifier, `model`), the feature matrix (`X`), and the vector with all class labels (`y`): ``` def yield_bootstrap(model, X, y, n_iter=10000): for _ in range(n_iter): # train the classifier on bootstrap idx_boot = np.random.choice(len(X), size=len(X), replace=True) X_boot = X[idx_boot, :] y_boot = y[idx_boot] model.train(X_boot, cv2.ml.ROW_SAMPLE, y_boot) # test classifier on out-of-bag examples idx_oob = np.array([x not in idx_boot for x in np.arange(len(X))], dtype=np.bool) X_oob = X[idx_oob, :] y_oob = y[idx_oob] _, y_hat = model.predict(X_oob) # return accuracy yield accuracy_score(y_oob, y_hat) ``` To make sure we all get the same result, let's fix the seed of the random number generator: ``` np.random.seed(42) ``` Now, let's run the procedure for `n_iter=10` times by converting the function output to a list: ``` list(yield_bootstrap(knn, X, y, n_iter=10)) ``` As you can see, for this small sample we get accuracy scores anywhere between 92% and 98%. To get a more reliable estimate of the model's performance, we repeat the procedure 1,000 times and calculate both mean and standard deviation of the resulting scores: ``` acc = list(yield_bootstrap(knn, X, y, n_iter=1000)) np.mean(acc), np.std(acc) ``` You are always welcome to increase the number of repetitions. But once `n_iter` is large enough, the procedure should be robust to the randomness of the sampling procedure. In this case, we do not expect to see any more changes to the distribution of score values as we keep increasing `n_iter` to, for example, 10,000 iterations: ``` acc = list(yield_bootstrap(knn, X, y, n_iter=10000)) np.mean(acc), np.std(acc) ``` Typically, the scores obtained with bootstrapping would be used in a **statistical test** to assess the **significance** of our result. Let's have a look at how that is done. # Implementing Student's t-test One of the most famous statistical tests is **Student's $t$-test**. You might have heard of it before: it allows us to determine whether two sets of data are significantly different from one another. This was a really important test for William Sealy Gosset, the inventor of the test, who worked at the Guinness brewery and wanted to know whether two batches of stout differed in quality. In practice, the $t$-test allows us to determine whether two data samples come from underlying distributions with the same mean or **expected value**. For our purposes, this means that we can use the $t$-test to determine whether the test scores of two independent classifiers have the same mean value. We start by hypothesizing that the two sets of test scores are identical. We call this the **null hypothesis** because this is the hypothesis we want to nullify, that is, we are looking for evidence to **reject** the hypothesis because we want to ensure that one classifier is significantly better than the other. We accept or reject a null hypothesis based on a parameter known as the $p$-value that the $t$-test returns. The $p$-value takes on values between 0 and 1. A $p$-value of 0.05 would mean that the null hypothesis is right only 5 out of 100 times. A small $p$-value thus indicates strong evidence that the hypothesis can be safely rejected. It is customary to use $p=0.05$ as a cut-off value below which we reject the null hypothesis. If this is all too confusing, think of it this way: when we run a $t$-test for the purpose of comparing classifier test scores, we are looking to obtain a small $p$-value because that means that the two classifiers give significantly different results. We can implement Student's $t$-test with SciPy's `ttest_ind` function from the `stats` module: ``` from scipy.stats import ttest_ind ``` Let's start with a simple example. Assume we ran five-fold cross-validation on two classifiers and obtained the following scores: ``` scores_a = [1, 1, 1, 1, 1] scores_b = [0, 0, 0, 0, 0] ``` This means that Model A achieved 100% accuracy in all five folds, whereas Model B got 0% accuracy. In this case, it is clear that the two results are significantly different. If we run the $t$-test on this data, we should thus find a really small $p$-value: ``` ttest_ind(scores_a, scores_b) ``` And we do! We actually get the smallest possible $p$-value, $p=0.0$. On the other hand, what if the two classifiers got exactly the same numbers, except during different folds. In this case, we would expect the two classifiers to be equivalent, which is indicated by a really large $p$-value: ``` scores_a = [0.9, 0.9, 0.9, 0.8, 0.8] scores_b = [0.8, 0.8, 0.9, 0.9, 0.9] ttest_ind(scores_a, scores_b) ``` Analogous to the aforementioned, we get the largest possible $p$-value, $p=1.0$. To see what happens in a more realistic example, let's return to our $k$-NN classifier from earlier example. Using the test scores obtained from the ten-fold cross-validation procedure, we can compare two different $k$-NN classifiers with the following procedure. Obtain a set of test scores for Model A. We choose Model A to be the $k$-NN classifier from earlier ($k=1$): ``` k1 = KNeighborsClassifier(n_neighbors=1) scores_k1 = cross_val_score(k1, X, y, cv=10) np.mean(scores_k1), np.std(scores_k1) ``` Obtain a set of test scores for Model B. Let's choose Model B to be a $k$-NN classifier with $k=3$: ``` k3 = KNeighborsClassifier(n_neighbors=3) scores_k3 = cross_val_score(k3, X, y, cv=10) np.mean(scores_k3), np.std(scores_k3) ``` Apply the $t$-test to both sets of scores: ``` ttest_ind(scores_k1, scores_k3) ``` As you can see, this is a good example of two classifiers giving different cross-validation scores (96.0% and 96.7%) that turn out to be not significantly different! Because we get a large $p$-value ($p=0.777$), we expect the two classifiers to be equivalent 77 out of 100 times. ## Implementing McNemar's test A more advanced statistical technique is McNemar's test. This test can be used on paired data to determine whether there are any differences between the two samples. As in the case of the $t$-test, we can use McNemar's test to determine whether two models give significantly different classification results. McNemar's test operates on pairs of data points. This means that we need to know, for both classifiers, how they classified each data point. Based on the number of data points that the first classifier got right but the second got wrong and vice versa, we can determine whether the two classifiers are equivalent. ``` from scipy.stats import binom def mcnemar_midp(b, c): """ Compute McNemar's test using the "mid-p" variant suggested by: M.W. Fagerland, S. Lydersen, P. Laake. 2013. The McNemar test for binary matched-pairs data: Mid-p and asymptotic are better than exact conditional. BMC Medical Research Methodology 13: 91. `b` is the number of observations correctly labeled by the first---but not the second---system; `c` is the number of observations correctly labeled by the second---but not the first---system. """ n = b + c x = min(b, c) dist = binom(n, .5) p = 2. * dist.cdf(x) midp = p - dist.pmf(x) return midp ``` Let's assume the preceding Model A and Model B were applied to the same five data points. Whereas Model A classified every data point correctly (denoted with a 1), Model B got all of them wrong (denoted with a 0): ``` scores_a = np.array([1, 1, 1, 1, 1]) scores_b = np.array([0, 0, 0, 0, 0]) ``` McNemar's test wants to know two things: - How many data points did Model A get right but Model B get wrong? - How many data points did Model A get wrong but Model B get right? We can check which data points Model A got right but Model B got wrong as follows: ``` a1_b0 = scores_a * (1 - scores_b) a1_b0 ``` Of course, this applies to all of the data points. The opposite is true for the data points that Model B got right and Model A got wrong: ``` a0_b1 = (1 - scores_a) * scores_b a0_b1 ``` Feeding these numbers to McNemar's test should return a small $p$-value because the two classifiers are obviously different: ``` mcnemar_midp(a1_b0.sum(), a0_b1.sum()) ``` And it does! We can apply McNemar's test to a more complicated example, but we cannot operate on cross-validation scores anymore. The reason is that we need to know the classification result for every data point, not just an average. Hence, it makes more sense to apply McNemar's test to the leave-one-out cross-validation. Going back to $k$-NN with $k=1$ and $k=3$, we can calculate their scores as follows: ``` scores_k1 = cross_val_score(k1, X, y, cv=LeaveOneOut()) scores_k3 = cross_val_score(k3, X, y, cv=LeaveOneOut()) ``` The number of data points that one of the classifiers got right but the other got wrong are as follows: ``` np.sum(scores_k1 * (1 - scores_k3)) np.sum((1 - scores_k3) * scores_k3) ``` We got no differences whatsoever! Now it becomes clear why the $t$-test led us to believe that the two classifiers are identical. As a result, if we feed the two sums into McNemar's test function, we get the largest possible $p$-value, $p=1.0$: ``` mcnemar_midp(np.sum(scores_k1 * (1 - scores_k3)), np.sum((1 - scores_k1) * scores_k3)) ``` <!--NAVIGATION--> < [Evaluating a Model](11.01-Evaluating-a-Model.ipynb) | [Contents](../README.md) | [Tuning Hyperparameters with Grid Search](11.03-Tuning-Hyperparameters-with-Grid-Search.ipynb) >
github_jupyter
<a href="https://colab.research.google.com/github/ceos-seo/odc-colab/blob/master/notebooks/02.07.Colab_Vegetation_Phenology_L8.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> Downloads the odc-colab Python module and runs it to setup ODC. ``` !wget -nc https://raw.githubusercontent.com/ceos-seo/odc-colab/master/odc_colab.py from odc_colab import odc_colab_init odc_colab_init(install_odc_gee=True) ``` Downloads an existing index and populates the new ODC environment with it. ``` from odc_colab import populate_db populate_db() ``` # Landsat Vegetation Phenology This notebook calculates vegetation phenology changes using Landsat-8 data. To detect changes, the algorithm uses Normalized Difference Vegetation Index (NDVI) which is a common proxy for vegetation growth and health. The outputs of this notebook can be used to assess differences in agriculture fields over time or space and also allow the assessment of growing states such as planting and harvesting. <br> There are two output products. The first output product is a time series boxplot of NDVI with the data binned by week, month, week of year, or month of year. The second output product is a time series lineplot of the mean NDVI for each year, with the data potentially binned by week or month. This product is useful for comparing different years to each other. ## Load Data Cube Configuration and Import Utilities ``` # Supress Warnings import warnings warnings.filterwarnings('ignore') # Load Data Cube Configuration from odc_gee import earthengine dc = earthengine.Datacube(app='Phenology') # Import Data Cube API import utils.data_cube_utilities.data_access_api as dc_api api = dc_api.DataAccessApi() import xarray as xr import numpy as np import matplotlib.pyplot as plt # Select a Product and Platform product = "ls8_google" platform = "LANDSAT_8" ``` ## <span id="define_extents">Define the Extents of the Analysis [&#9652;](#top)</span> ``` # MODIFY HERE # Select a Latitude-Longitude point for the center of the analysis region # Select the size of the box (in degrees) surrounding the center point # Small Maize Crop in Sudan Savanna, Ghana, Africa lat_long = (11.102, -0.230) box_size_deg = 0.003 # Calculate the latitude and longitude bounds of the analysis box latitude = (lat_long[0]-box_size_deg/2, lat_long[0]+box_size_deg/2) longitude = (lat_long[1]-box_size_deg/2, lat_long[1]+box_size_deg/2) # Define Time Range # Landsat-8 time range: 07-Apr-2013 to current # The format of the time date is YYYY-MM-DD time_extents = ('2017-01-01', '2020-12-31') # The code below renders a map that can be used to view the region. from utils.data_cube_utilities.dc_display_map import display_map display_map(latitude,longitude) ``` ## Load bands needed for NDVI and remove clouds and water ``` landsat_dataset = dc.load(latitude = latitude,longitude=longitude,platform=platform,time=time_extents,product=product,measurements=['red','nir','pixel_qa']) from utils.data_cube_utilities.clean_mask import landsat_qa_clean_mask land_mask = landsat_qa_clean_mask(landsat_dataset, platform=platform, cover_types=['clear']) landsat_dataset = landsat_dataset.drop('pixel_qa') cleaned_dataset = landsat_dataset.where(land_mask) ``` ## Define NDVI and add it to the dataset ``` def NDVI(dataset): return (dataset.nir - dataset.red)/(dataset.nir + dataset.red) cleaned_dataset['NDVI'] = NDVI(cleaned_dataset) ``` ## Plot NDVI vs Time in a Box-and-Whisker Plot ``` # MODIFY HERE # Specify the target aggregation type of the curve fit. # Input can be either 'mean' or 'median'. curve_fit_target = 'median' # The maximum number of data points that appear along time in each plot. # If more than this number of data points need to be plotted, a grid of plots will be created. max_times_per_plot = 50 # Select the binning approach for the vegetation index. Choose one from the list below. # None = do not bin the data # 'week' = bin the data by week with an extended time axis # 'month' = bin the data by month with an extended time axis # 'weekofyear' = bin the data by week and years using a single year time axis # 'monthofyear' = bin the data by month and years using a single year time axis bin_by = 'month' #@title Create NDVI Boxplot from utils.data_cube_utilities.plotter_utils import xarray_time_series_plot veg_proxy = 'NDVI' aggregated_by_str = None if bin_by is None: plotting_data = cleaned_dataset elif bin_by == 'week': plotting_data = cleaned_dataset.resample(time='1w').mean() aggregated_by_str = 'Week' elif bin_by == 'month': plotting_data = cleaned_dataset.resample(time='1m').mean() aggregated_by_str = 'Month' elif bin_by == 'weekofyear': plotting_data = cleaned_dataset.groupby('time.week').mean(dim=('time')) aggregated_by_str = 'Week of Year' elif bin_by == 'monthofyear': plotting_data = cleaned_dataset.groupby('time.month').mean(dim=('time')) aggregated_by_str = 'Month of Year' params = dict(dataset=plotting_data, plot_descs={veg_proxy:{'none':[ {'box':{'boxprops':{'facecolor':'forestgreen'}}}]}}) params['plot_descs'][veg_proxy][curve_fit_target] = [{'gaussian_filter':{}}] fig, curve_fit_plotting_data = \ xarray_time_series_plot(**params, fig_params=dict(figsize=(8,4), dpi=150), max_times_per_plot=max_times_per_plot) plt.title('Box-and-Whisker Plot of {1} with a Curvefit of {0} {1}' .format(curve_fit_target.capitalize(), veg_proxy)) plt.tight_layout() plt.show() ``` ### Plot NDVI vs. Time for each year Note that the curve fits here do not show where some times have no data, as is shown in the box-and-whisker plot. In cases where there is a lack of data, the NDVI Gaussian curve may not represent an accurate phenology. For example, in the baseline case there is data missing for September and October 2019 and also data missing for August and September 2020. The curves for those years do not reflect accurate peak NDVI because of this missing data. Additional Landsat data from the future Landsat-9 mission or more frequent data from Sentinel-2 would likely yield more accurate results. ``` # MODIFY HERE # Select the binning approach for the vegetation index. Set the 'bin_by' parameter. # 'weekofyear' = bin the data by week and years using a single year time axis # 'monthofyear' = bin the data by month and years using a single year time axis bin_by = 'monthofyear' #@title Create NDVI annual Gaussian plots years_with_data = [] plot_descs = {} daysofyear_per_year = {} plotting_data_years = {} time_dim_name = None for year in range(np.datetime64(time_extents[0], 'Y').item().year, np.datetime64(time_extents[1], 'Y').item().year+1): year_data = cleaned_dataset.sel(time=slice('{}-01-01'.format(year), '{}-12-31'.format(year)))[veg_proxy] if len(year_data['time']) == 0: # There is nothing to plot for this year. print("Year {} has no data, so will not be plotted.".format(year)) continue years_with_data.append(year) spec_ind_dayofyear = year_data.groupby('time.dayofyear').mean() daysofyear_per_year[year] = spec_ind_dayofyear[~spec_ind_dayofyear.isnull().sum(dim='dayofyear')].dayofyear aggregated_by_str = None if bin_by == 'weekofyear': plotting_data_year = year_data.groupby('time.week').mean(dim=('time')) time_dim_name = 'week' elif bin_by == 'monthofyear': plotting_data_year = year_data.groupby('time.month').mean(dim=('time')) time_dim_name = 'month' plotting_data_years[year] = plotting_data_year num_time_pts = len(plotting_data_year[time_dim_name]) # Select the curve-fit type. # See the documentation for `xarray_time_series_plot()` regarding the `plot_descs` parameter. plot_descs[year] = {'mean':[{'gaussian_filter':{}}]} time_dim_name = 'week' if bin_by == 'weekofyear' else 'month' if bin_by == 'monthofyear' else 'time' num_times = 54 if bin_by == 'weekofyear' else 12 time_coords_arr = np.arange(1, num_times+1) # In xarray, week and month indices start at 1. time_coords_da = xr.DataArray(time_coords_arr, coords={time_dim_name:time_coords_arr}, dims=[time_dim_name], name=time_dim_name) coords = dict(list(plotting_data_years.values())[0].coords) coords[time_dim_name] = time_coords_da plotting_data = xr.Dataset(plotting_data_years, coords=coords) params = dict(dataset=plotting_data, plot_descs=plot_descs) xarray_time_series_plot(**params, fig_params=dict(figsize=(8,4), dpi=150)) plt.title('Line Plot of {0} for Each Year'.format(veg_proxy)) plt.show() ```
github_jupyter
``` # Erasmus+ ICCT project (2018-1-SI01-KA203-047081) # Toggle cell visibility from IPython.display import HTML tag = HTML('''<script> code_show=true; function code_toggle() { if (code_show){ $('div.input').hide() } else { $('div.input').show() } code_show = !code_show } $( document ).ready(code_toggle); </script> Promijeni vidljivost <a href="javascript:code_toggle()">ovdje</a>.''') display(tag) # Hide the code completely # from IPython.display import HTML # tag = HTML('''<style> # div.input { # display:none; # } # </style>''') # display(tag) %matplotlib notebook import scipy.signal as signal import matplotlib.pyplot as plt from ipywidgets import widgets from ipywidgets import interact import numpy as np import sympy as sym ``` ## PID regulator - vremenski odziv Proporcionalno-integracijsko-derivacijski (PID) algoritam upravljanja daleko je najpoznatiji i najčešće korišteni algoritam automatskog upravljanja. Njegova prijenosna funkcija je \begin{equation} P(s)=K_p \cdot \left( 1 + \frac{1}{T_i s} + T_d s \right). \end{equation} Funkcija predstavlja zbroj proporcionalnog, integracijskog i derivacijskog kanala. Ne moraju svi nužno biti prisutni, pa se koriste i algoritmi upravljanja PI ili PD. U ovom primjeru prikazuje se vremenski odziv P, PI, PD ili PID regulatora za ulazne signale iz skupa: step-funkcija, impuls, rampa i sinus. --- ### Kako koristiti ovaj interaktivni primjer? 1. Izaberite između *jedinična step funkcija*, *jedinična impulsna funkcija*, *rampa funkcija* i *funkcija sinus* za odabir ulaznog signala. 2. Kliknite na gumb *P*, *PI*, *PD* ili *PID* za odabir između proporcionalnog, proporcionalno-integracijskog, proporcionalno-derivacijskog ili proporcionalno-integracijsko-derivacijskog tipa algoritma upravljanja. 3. Pomičite klizače da biste promijenili vrijednosti proporcionalnog ($K_p$), integracijskog ($T_i$) i derivacijskog ($T_d$) koeficijenta PID regulacije. 4. Pomičite klizač $t_{max}$ za promjenu maksimalne vrijednosti vremena na osi x. ``` a = 0.1 # make figure fig = plt.figure(figsize=(9.8, 5),num='PID regulator') # add axes ax = fig.add_subplot(111) ax.grid(which='both', axis='both', color='lightgray') ax.set_title('Vremenski odziv') # plot step function and responses (initalisation) input_plot, = ax.plot([],[],'C0', linewidth=1,label='ulaz') response_plot, = ax.plot([],[], 'C1', linewidth=2,label='izlaz') ax.axhline(linewidth=.5, color='k') ax.axvline(linewidth=.5, color='k') ax.legend() ax.set_xlabel('$t$ [s]') ax.set_ylabel('ulaz, izlaz') plt.show() P, I, D, s = sym.symbols('P, I, D, s') input_type = 'jedinična step funkcija' #input function Time_span = 10 # max time on x-axis plot #initialize global variables KP = 1. TI = 1. TD = 1. num = [] den = [] def update_plot(): global num, den, input_type, Time_span num_temp = [float(i.subs(P,KP).subs(I,TI).subs(D,TD)) for i in num] den_temp = [float(i.subs(P,KP).subs(I,TI).subs(D,TD)) for i in den] system = signal.TransferFunction(num_temp, den_temp) #time, response = signal.step(system) #only for setting time borders (for nicer plot. could also calculate dominant frequency) #time = np.linspace(0,time[-1],1000) time = np.linspace(0, Time_span, 600) if input_type == 'jedinična step funkcija': u = np.ones_like(time) u = np.concatenate((np.array([0]),u)) time, response = signal.step(system, T=time) time = np.concatenate((np.array([0]), time)) response = np.concatenate((np.array([0]), response)) elif input_type == 'jedinična impulsna funkcija': u = np.zeros_like(time) u = np.concatenate((np.array([10]), u)) time, response = signal.impulse(system, T=time) time = np.concatenate((np.array([0]), time)) response = np.concatenate((np.array([0]), response)) elif input_type == 'funkcija sinus': u = np.sin(time*2*np.pi) time, response, _ = signal.lsim(system, U=u, T=time) elif input_type == 'rampa funkcija': u = time time, response, _ = signal.lsim(system, U=u, T=time) else: raise Exception("Greška u programu. Ponovno pokrenite simulaciju.") response_plot.set_data(time, response) input_plot.set_data(time, u) ax.set_ylim([min([np.min(u), min(response),-.1]),min(100,max([max(response)*1.05, 1, 1.05*np.max(u[1:])]))]) ax.set_xlim([-0.1,max(time)]) plt.show() def transfer_func(controller_type): global num, den proportional = P integral = P/(I*s) differential = P*D*s/(a*D*s+1) if controller_type =='P': controller_func = proportional Kp_widget.disabled=False Ti_widget.disabled=True Td_widget.disabled=True elif controller_type =='PI': controller_func = proportional+integral Kp_widget.disabled=False Ti_widget.disabled=False Td_widget.disabled=True elif controller_type == 'PD': controller_func = proportional+differential Kp_widget.disabled=False Ti_widget.disabled=True Td_widget.disabled=False else: controller_func = proportional+integral+differential Kp_widget.disabled=False Ti_widget.disabled=False Td_widget.disabled=False system_func = controller_func num = [sym.fraction(system_func.factor())[0].expand().coeff(s, i) for i in reversed(range(1+sym.degree(sym.fraction(system_func.factor())[0], gen=s)))] den = [sym.fraction(system_func.factor())[1].expand().coeff(s, i) for i in reversed(range(1+sym.degree(sym.fraction(system_func.factor())[1], gen=s)))] update_plot() def func(Kp, Ti, Td, time_span): global KP, TI, TD, Time_span KP = Kp TI = Ti TD = Td Time_span = time_span update_plot() style = {'description_width': 'initial'} def buttons_controller_clicked(event): controller = buttons_controller.options[buttons_controller.index] transfer_func(controller) buttons_controller = widgets.ToggleButtons( options=['P', 'PI', 'PD', 'PID'], description='Odaberite tip algoritma upravljanja:', disabled=False, style=style) buttons_controller.observe(buttons_controller_clicked) def buttons_input_clicked(event): global input_type input_type = buttons_input.options[buttons_input.index] update_plot() buttons_input = widgets.ToggleButtons( options=['jedinična step funkcija','jedinična impulsna funkcija', 'rampa funkcija', 'funkcija sinus'], description='Odaberite ulazni signal:', disabled=False, style=style) buttons_input.observe(buttons_input_clicked) Kp_widget = widgets.IntSlider(value=20,min=1,max=100,step=1,description=r'\(K_p \)', disabled=False,continuous_update=True,orientation='horizontal',readout=True,readout_format='.1d') Ti_widget = widgets.FloatSlider(value=.1,min=0.001,max=3.,step=0.001,description=r'\(T_{i} \)', disabled=False,continuous_update=True,orientation='horizontal',readout=True,readout_format='.3f') Td_widget = widgets.FloatSlider(value=.1,min=0.001,max=3.,step=0.001,description=r'\(T_{d} \)', disabled=False,continuous_update=True,orientation='horizontal',readout=True,readout_format='.3f') time_span_widget = widgets.FloatSlider(value=10.,min=.5,max=50.,step=0.1,description=r'\(t_{max} \)', disabled=False,continuous_update=True,orientation='horizontal',readout=True,readout_format='.1f') transfer_func('P') display(buttons_input) display(buttons_controller) interact(func, Kp=Kp_widget, Ti=Ti_widget, Td=Td_widget, time_span=time_span_widget); ```
github_jupyter
# How to use `refgenconf ` to manage Refgenie assets in a pipeline Below we present an example use of `refgenconf` package. It is installed automatically with `refgenie` (or separately installable with `pip install refgenconf`). All the asset fetching functionality is impelmented in `refgenconf` package, so pipelines that just use Python API do not need to depend on `refgenie`. ## Goal The goal of the code below is to **get a path to the refgenie-managed fasta file for a user-specified genome**. Genome FASTA is a part of `fasta` asset, accessible as a `fasta` seek key. To retrieve the path this file on the command line one would say: `refgenie seek <genome>/fasta`. For example: ``` refgenie seek hg38/fasta ``` ## Steps First, let's set the `$REFGENIE` environmet variable. It should be set by a pipeline user or the config file path should be provided explictly, e.g. as an input to the pipeline (here shown as `user_provided_cfg_path = None` -- not provided) ``` import os os.environ["REFGENIE"] = "./refgenie.yaml" user_provided_cfg_path = None user_provided_genome = "rCRSd" ``` Next, let's import components of `refgenconf` that we'll use ``` from refgenconf import RefGenConf, select_genome_config, RefgenconfError, CFG_ENV_VARS, CFG_FOLDER_KEY from yacman import UndefinedAliasError ``` Now, we can use the `select_genome_config` function to determine the preferred path to the config file. If `user_provided_cfg_path` is `None` (not specified) the `$REFGENIE` environment variable is used. ``` refgenie_cfg_path = select_genome_config(filename=user_provided_cfg_path, check_exist=False) ``` The function returns `None` if none of the above point to a valid path. That's why we raise an aproppriate error below. Obviously, the name of `--rfg-config` argument depends on pipeline design. ``` if not refgenie_cfg_path: raise OSError(f"Could not determine path to a refgenie genome configuration file." f"Use --rfg-config argument or set '{CFG_ENV_VARS}' environment variable to provide it") ``` Otherwise it returns a determined path (`str`). So, we check if it exists and read the object if it does. If it does not, we can initialize the config file ``` if isinstance(refgenie_cfg_path, str) and os.path.exists(refgenie_cfg_path): print(f"Reading refgenie genome configuration file from file: {refgenie_cfg_path}") rgc = RefGenConf(filepath=refgenie_cfg_path) else: print(f"File '{refgenie_cfg_path}' does not exist. Initializing refgenie genome configuration file.") rgc = RefGenConf(entries={CFG_FOLDER_KEY: os.path.dirname(refgenie_cfg_path)}) rgc.initialize_config_file(filepath=refgenie_cfg_path) rgc.subscribe(urls="http://rg.databio.org:82", reset=True) # subscribe to the desired server, if needed ``` Finally, we try to retrieve the path to out asset of interest and pull from `refgenieserver` if the retrieval fails. ``` try: fasta = rgc.seek(genome_name=user_provided_genome, asset_name="fasta", tag_name="default", seek_key="fasta") except (RefgenconfError, UndefinedAliasError): print("Could not determine path to chrom.sizes asset, pulling") rgc.pull(genome=user_provided_genome, asset="fasta", tag="default") fasta = rgc.seek(genome_name=user_provided_genome, asset_name="fasta", tag_name="default", seek_key="fasta") print(f"Determined path to fasta asset: {fasta}") ```
github_jupyter
/* * Copyright 2021 ConsenSys Software Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software dis- * tributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ # Notebook description This notebook contains some basic processing to automate the collection of statistics relating to the Dafny files. By creating functions to perform analysis of Dafny files, additional results can easily be added to the pandas dataframe. The use of a pandas dataframe provides many options for visualisation and the data can easily by stored in a csv. The data can also easily be supplemented with timestamps to faciliate time series analysis. This file is a working file and will be converted to a python script in due course. # TODO: Reformat function documentation to standard style used within this repo ## Libraries ``` # import libraries import os import subprocess import pandas as pd import re import numpy as np import time import shutil ``` ## File processing functions ``` # find *.dfy files, within a given local repo path # this function will search all subfolders of dirName # a sorted list of files is returned def getListOfDafnyFiles(dirName,exclude_folders=[]): listOfFile = os.listdir(dirName) allFiles = list() for entry in listOfFile: fullPath = os.path.join(dirName, entry) # if entry is a directory then append the list of files in this directory to allFiles if os.path.isdir(fullPath): if os.path.abspath(fullPath) not in exclude_folders: allFiles = allFiles + getListOfDafnyFiles(fullPath, exclude_folders) # else append file only if it is a Dafny file else: if entry.endswith(".dfy"): allFiles.append(fullPath) return sorted(allFiles) # find folders within the repo that have *.dfy files # a sorted list of folders is returned (i.e. full path of each folder) def getListOfDafnyFolders(dafnyFiles): listOfDirectories = list() for file in dafnyFiles: listOfDirectories.append(os.path.dirname(file)) return sorted(list(set(listOfDirectories))) # get folder for an individual dafny file # i.e. for the full path of a dafny file, the filename and repo path are striped def getFolder(repo, dafny_file): repo_path, folder = os.path.dirname(dafny_file).split(repo,1) return folder ``` ### Test file processing functions ``` # test the getListOfDafnyFiles, getListOfDafnyFolders and getFolder functions # local repo path needs to be set prior to running the tests and `if False` # must be changed to `if True` if False: repo_directory = "/home/roberto/projects_offline/lavoro/consensys/content/eth2.0-dafny-for-stats" exclude_folders_rel_path = ["src/dafny/libraries/integers"] exclude_folders_full_path = [os.path.join(repo_directory,f) for f in exclude_folders] print("Test getListOfDafnyFiles: ") files = getListOfDafnyFiles(repo_directory, exclude_folders_full_path) for i in files: print(i) print("Length of returned list: ", len(files)) print("Test getListOfDafnyFolders: ") directories = getListOfDafnyFolders(files) for i in directories: print(i) print("Length of returned list: ", len(directories)) print("Test getFolder for each file in files: ") for file in files: print(getFolder(repo_directory, file)) ``` ## Functions to collect statistics ``` # count the number of "lemmas" in a given dafny file # this function uses a subprocess call # an alternative method would be to read and search the file directly def getLemmas(dafny_file): cmd = "cat " + dafny_file +"| grep lemma | wc -l" result = subprocess.run(['/bin/bash', '-i', '-c', cmd], stdout=subprocess.PIPE) return result.stdout.strip().decode('ascii') # count the number of "function methods" in a given dafny file # this function uses a subprocess call # an alternative method would be to read and search the file directly def getFunctions(dafny_file): cmd = "cat " + dafny_file +"| grep function | grep method | wc -l" result = subprocess.run(['/bin/bash', '-i', '-c', cmd], stdout=subprocess.PIPE) return result.stdout.strip().decode('ascii') # count the number of ghost (= function and lemmas) processes # ignores function methods # to be referred to as "Theorems" in the data display def getGhost(dafny_file): tmp_file = open(dafny_file, "r") count = 0 for line in tmp_file.readlines(): if line.strip().startswith(("function", "lemma")): if not line.strip().startswith("function method"): count += 1 #print(line) tmp_file.close() return count # count the number of non-ghost ()= function methods and methods and predicates) processes # to be referred to as "Implementations" in the data display def getNonGhost(dafny_file): tmp_file = open(dafny_file, "r") count = 0 for line in tmp_file.readlines(): if line.strip().startswith(("function method", "method", "predicate")): count += 1 #print(line) tmp_file.close() return count # count the number of lines of code # the count occurs after the dafny file is printed used the compiler # the count also occurs after this output has been cleaned def getLoC(dafny_file): show_ghost = True executable = "dafny" args = [] args += ['/rprint:-'] args += ["/noAutoReq"] args += ["/noVerify"] args += ["/env:0"] if show_ghost: args += ["/printMode:NoIncludes"] else: args += ["/printMode:NoGhost"] args += [dafny_file] cmd = ' '.join([executable] + args) result = subprocess.run(['/bin/bash', '-i', '-c', cmd], stdout=subprocess.PIPE) output = result.stdout.decode('ascii') #print(type(result.stdout.decode('ascii'))) #print(result.stdout.decode('ascii')) #remove this section once code has be tested OR comment out #tmp_file = open("tmp.txt", "w") #tmp_file.write(result.stdout.decode('ascii')) #tmp_file.close() ######--------------------- count = 0 for line in output.splitlines(): # clean output i.e. remove comment at start and verifier status if line.startswith(("Dafny program verifier did not attempt verification", "//")): #print(i) pass else: if line.strip(): count += 1 #print(line) #print("#LoC: ", count) return count # count the number of lines included in the license comment # assumes license comment is at the start of the file and is of format /* ... */ # assumes that it has been confirmed that the file has a license comment def getLicenseLineCount(dafny_file): tmp_file = open(dafny_file, "r") count = 0 flag = 0 for line in tmp_file.readlines(): tmp_line = line.strip() cleaned = ' '.join(i for i in tmp_line.split() if i not in ["//", "/*", "/**", "*", "*/"]) if (not flag) and (tmp_line.startswith("/*")): if cleaned: count += 1 flag = 1 elif flag: if cleaned: count += 1 if tmp_line.startswith("*/"): tmp_file.close() return count # count the number of lines of documentation # don't include license comment or empty comment lines def getDocumentation(dafny_file): tmp_file = open(dafny_file, "r") count = 0 license_flag = 0 for line in tmp_file.readlines(): tmp_line = line.strip() if tmp_line.startswith(("//", "/*", "/**", "*", "*/")): cleaned = ' '.join(i for i in tmp_line.split() if i not in ["//", "/*", "/**", "*", "*/"]) if cleaned: #print(cleaned) count += 1 #print(line) if tmp_line.startswith("* Copyright 2021 ConsenSys Software Inc."): license_flag = 1 tmp_file.close() if license_flag: count -= getLicenseLineCount(dafny_file) #print(getLicenseLineCount(dafny_file)) return count # count the number of theorems (getGhost) and implementations (getNonGhost) proved # i.e. check that the number of errors when verified is zero # TODO: include arguments for getGhost and getNonGhost to reduce duplicate processing def getProved(dafny_file): cmd = "dafny /dafnyVerify:1 /compile:0 " + dafny_file result = subprocess.run(['/bin/bash', '-i', '-c', cmd], stdout=subprocess.PIPE) output = result.stdout.decode('ascii') for line in output.splitlines(): if line.startswith("Dafny program verifier finished with "): # check no errors #print(line, re.findall(r'\d+', line)[1], type(re.findall(r'\d+', line)[1])) if not int(re.findall(r'\d+', line)[1]): return (getGhost(dafny_file) + getNonGhost(dafny_file)) else: pass # if the verifier doesn't finish, return -1 return 0 ``` ### Test statistics functions ``` # s/False/True if need to run the tests if False: # test file options: test_file = "/Users/joannefuller/Documents/vscode/eth2.0-dafny/src/dafny/ssz/BytesAndBits.dfy" #test_file = "/Users/joannefuller/Documents/vscode/eth2.0-dafny/test/dafny/merkle/Merkleise.test.dfy" #test_file = "/Users/joannefuller/Documents/vscode/eth2.0-dafny/test/dafny/ssz/BitListSeDes.tests.dfy" #test_file = "/Users/joannefuller/Documents/vscode/eth2.0-dafny/src/dafny/ssz/BitListSeDes.dfy" #test_file = "/Users/joannefuller/Documents/vscode/eth2.0-dafny/src/dafny/merkle/Merkleise.dfy" #print("Lemmas ...") #print(getLemmas(test_file)) #print("Function methods ...") #print(getFunctions(test_file)) #print("LoC ...") #print(getLoC(test_file)) #print("Documentation ...") #print(getDocumentation(test_file)) print("Proved (verified from compile) ...") print(getProved(test_file)) #print("Ghost ...") #rint(getGhost(test_file)) #print("NonGhost ...") #print(getNonGhost(test_file)) ``` ## Collate results into a pandas dataframe One row per Dafny file. ``` import tempfile # create a pandas dataframe to store stats relating to the dafny files column_list = ['Files', 'Folder', '#LoC', 'Theorems', 'Implementations', "Documentation", "#Doc/#LoC (%)", "Proved"] # list here all the directory not to include in the stat collection with path relative to the root of the repo exclude_folders_rel_path = ["src/dafny/libraries/integers"] # performs a clean checkout from GitHub before collecting the stats with tempfile.TemporaryDirectory() as repo_directory: # subprocess.run(['/bin/bash','-c','git clone git@github.com:PegaSysEng/eth2.0-dafny.git ' + repo_directory], stdout=subprocess.PIPE) repo_directory = "/Users/franck/development/eth2.0-dafny/src/dafny/" exclude_folders_full_path = [os.path.join(repo_directory,f) for f in exclude_folders_rel_path] files = getListOfDafnyFiles(repo_directory, exclude_folders_full_path) df = pd.DataFrame(columns=column_list) # collect data for each dafny file for file in files: loc = getLoC(file) ghost = getGhost(file) nonghost = getNonGhost(file) doc = getDocumentation(file) proved = getProved(file) df2 = pd.DataFrame([[os.path.basename(file), getFolder(repo_directory, file), loc , ghost, nonghost, doc, round(doc/loc * 100), proved]], columns=column_list) df = df.append(df2, ignore_index=True) # create and append totals for numeric columns totals = pd.DataFrame([["", "TOTAL", df['#LoC'].sum(), df['Theorems'].sum(), df['Implementations'].sum(), df['Documentation'].sum(), round(df['Documentation'].sum()/df['#LoC'].sum() * 100), df['Proved'].sum()]], columns=column_list) df = df.append(totals, ignore_index=True) # convert numeric columns to int64 numCols = ['#LoC', 'Theorems', 'Implementations', "Documentation", "#Doc/#LoC (%)", "Proved"] df[numCols] = df[numCols].astype("int64") #display a sample of rows df.head(len(df)) # create a pandas dataframe to store stats relating to the dafny files column_list = ['Files', 'Folder', '#LoC', 'Theorems', 'Implementations', "Documentation", "#Doc/#LoC (%)", "Proved"] # list here all the directory not to include in the stat collection with path relative to the root of the repo exclude_folders_rel_path = ["src/dafny/libraries/integers"] # performs a clean checkout from GitHub before collecting the stats with tempfile.TemporaryDirectory() as repo_directory: # subprocess.run(['/bin/bash','-c','git clone git@github.com:PegaSysEng/eth2.0-dafny.git ' + repo_directory], stdout=subprocess.PIPE) repo_directory = "/Users/franck/development/eth2.0-dafny/src/dafny/" exclude_folders_full_path = [os.path.join(repo_directory,f) for f in exclude_folders_rel_path] files = getListOfDafnyFiles(repo_directory, exclude_folders_full_path) df = pd.DataFrame(columns=column_list) # collect data for each dafny file for file in files: loc = getLoC(file) ghost = getGhost(file) nonghost = getNonGhost(file) doc = getDocumentation(file) proved = getProved(file) df2 = pd.DataFrame([[os.path.basename(file), getFolder(repo_directory, file), loc , ghost, nonghost, doc, round(doc/loc * 100), proved]], columns=column_list) df = df.append(df2, ignore_index=True) # create and append totals for numeric columns totals = pd.DataFrame([["", "TOTAL", df['#LoC'].sum(), df['Theorems'].sum(), df['Implementations'].sum(), df['Documentation'].sum(), round(df['Documentation'].sum()/df['#LoC'].sum() * 100), df['Proved'].sum()]], columns=column_list) df = df.append(totals, ignore_index=True) ``` ### Alternative format May be useful for github ``` from tabulate import tabulate output = tabulate(df, headers='keys', tablefmt='github') with open('../wiki/stats.md', 'w') as f: f.write(output) timestr = time.strftime("%Y-%m-%d-%H:%M") mdfile = 'data/md/data' + timestr + '.md' with open(mdfile, 'w') as f: f.write(output) # sys.stdout = f # Change the standard output to the file we created. # print('This message will be written to a file.') # sys.stdout = original_stdout # ``` | | Files | Folder | #LoC | Theorems | Implementations | Documentation | #Doc/#LoC (%) | Proved | |----|---------------------|----------|--------|------------|-------------------|-----------------|-----------------|----------| | 0 | ForkChoice.dfy | | 226 | 3 | 15 | 172 | 76 | 18 | | 1 | ForkChoiceTypes.dfy | | 7 | 0 | 0 | 17 | 243 | 0 | | 2 | | TOTAL | 233 | 3 | 15 | 189 | 81 | 18 | ## Group data One row per folder. ``` # create a pandas dataframe to store stats relating to the dafny files # stats grouped by folder column_list = ['Folder', '#Files', '#LoC', 'Theorems', 'Implementations', "Documentation", "#Doc/#LoC (%)", "Proved"] df_grouped = pd.DataFrame(columns=column_list) with tempfile.TemporaryDirectory() as repo_directory: subprocess.run(['/bin/bash','-c','git clone git@github.com:PegaSysEng/eth2.0-dafny.git ' + repo_directory], stdout=subprocess.PIPE) exclude_folders_full_path = [os.path.join(repo_directory,f) for f in exclude_folders_rel_path] # TODO: We currently get the list of folders out of the list of files and then in the `for` loop # we retrieve the list of files again for each folder. We may want to think of a more elegant # implementation. allFiles = getListOfDafnyFiles(repo_directory, exclude_folders_full_path) folders = getListOfDafnyFolders(allFiles) for folder in folders: files = getListOfDafnyFiles(folder) nFiles = 0 nLoc = 0 nGhost = 0 nNonGhost = 0 nDoc = 0 nProved = 0 for file in files: nFiles += 1 nLoc += getLoC(file) nGhost += getGhost(file) nNonGhost += getNonGhost(file) nDoc += getDocumentation(file) nProved += getProved(file) df2 = pd.DataFrame([[getFolder(repo_directory, files[0]), nFiles, nLoc , nGhost, nNonGhost, nDoc, round(nDoc/nLoc * 100), nProved]], columns=column_list) df_grouped = df_grouped.append(df2, ignore_index=True) #display a sample of rows df_grouped.head(len(df_grouped)) ``` ### Print dataframe to .csv, .tex and .pdf ``` # create filenames that include the current data string timestr = time.strftime("%Y-%m-%d-%H:%M") rawfile = 'data' + timestr + '.csv' grouped_rawfile = 'dataGrouped' + timestr + '.csv' filename = 'data' + timestr + '.tex' pdffile = 'data' + timestr + '.pdf' # check if data directory already exists and create if necessary if not os.path.exists('data'): os.makedirs('data') #print to csv file without an index df.to_csv("data/csv/" + rawfile, index = False) df_grouped.to_csv("data/csv/" + grouped_rawfile, index = False) #print to pdf via latex template = r'''\documentclass[a4paper, 12pt]{{article}} \usepackage[landscape]{{geometry}} \usepackage{{booktabs}} \begin{{document}} \section*{{https://github.com/ConsenSys/eth2.0-dafny}} \subsection*{{Data collected: {}}} \scriptsize {} \vspace{{2em}} {} \end{{document}} ''' with open(filename, 'w') as f: f.write(template.format(time.strftime("%Y-%m-%d-%H:%M"), df.to_latex(index=False), df_grouped.to_latex(index=False))) subprocess.call(['pdflatex', filename]) # remove surplus files and move .csv, .tex and .pdf files to the data folder os.remove('data' + timestr + '.log') os.remove('data' + timestr + '.aux') shutil.move(filename, "data/tex/" + filename) shutil.move(pdffile, "data/pdf/" + pdffile) ```
github_jupyter
# Convolutional Autoencoder Sticking with the MNIST dataset, let's improve our autoencoder's performance using convolutional layers. Again, loading modules and the data. ``` %matplotlib inline import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', validation_size=0) img = mnist.train.images[2] plt.imshow(img.reshape((28, 28)), cmap='Greys_r') ``` ## Network Architecture The encoder part of the network will be a typical convolutional pyramid. Each convolutional layer will be followed by a max-pooling layer to reduce the dimensions of the layers. The decoder though might be something new to you. The decoder needs to convert from a narrow representation to a wide reconstructed image. For example, the representation could be a 4x4x8 max-pool layer. This is the output of the encoder, but also the input to the decoder. We want to get a 28x28x1 image out from the decoder so we need to work our way back up from the narrow decoder input layer. A schematic of the network is shown below. <img src='assets/convolutional_autoencoder.png' width=500px> Here our final encoder layer has size 4x4x8 = 128. The original images have size 28x28 = 784, so the encoded vector is roughly 16% the size of the original image. These are just suggested sizes for each of the layers. Feel free to change the depths and sizes, but remember our goal here is to find a small representation of the input data. ### What's going on with the decoder Okay, so the decoder has these "Upsample" layers that you might not have seen before. First off, I'll discuss a bit what these layers *aren't*. Usually, you'll see **transposed convolution** layers used to increase the width and height of the layers. They work almost exactly the same as convolutional layers, but in reverse. A stride in the input layer results in a larger stride in the transposed convolution layer. For example, if you have a 3x3 kernel, a 3x3 patch in the input layer will be reduced to one unit in a convolutional layer. Comparatively, one unit in the input layer will be expanded to a 3x3 path in a transposed convolution layer. The TensorFlow API provides us with an easy way to create the layers, [`tf.nn.conv2d_transpose`](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d_transpose). However, transposed convolution layers can lead to artifacts in the final images, such as checkerboard patterns. This is due to overlap in the kernels which can be avoided by setting the stride and kernel size equal. In [this Distill article](http://distill.pub/2016/deconv-checkerboard/) from Augustus Odena, *et al*, the authors show that these checkerboard artifacts can be avoided by resizing the layers using nearest neighbor or bilinear interpolation (upsampling) followed by a convolutional layer. In TensorFlow, this is easily done with [`tf.image.resize_images`](https://www.tensorflow.org/versions/r1.1/api_docs/python/tf/image/resize_images), followed by a convolution. Be sure to read the Distill article to get a better understanding of deconvolutional layers and why we're using upsampling. > **Exercise:** Build the network shown above. Remember that a convolutional layer with strides of 1 and 'same' padding won't reduce the height and width. That is, if the input is 28x28 and the convolution layer has stride = 1 and 'same' padding, the convolutional layer will also be 28x28. The max-pool layers are used the reduce the width and height. A stride of 2 will reduce the size by 2. Odena *et al* claim that nearest neighbor interpolation works best for the upsampling, so make sure to include that as a parameter in `tf.image.resize_images` or use [`tf.image.resize_nearest_neighbor`]( `https://www.tensorflow.org/api_docs/python/tf/image/resize_nearest_neighbor). For convolutional layers, use [`tf.layers.conv2d`](https://www.tensorflow.org/api_docs/python/tf/layers/conv2d). For example, you would write `conv1 = tf.layers.conv2d(inputs, 32, (5,5), padding='same', activation=tf.nn.relu)` for a layer with a depth of 32, a 5x5 kernel, stride of (1,1), padding is 'same', and a ReLU activation. Similarly, for the max-pool layers, use [`tf.layers.max_pooling2d`](https://www.tensorflow.org/api_docs/python/tf/layers/max_pooling2d). ``` inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs') targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets') ### Encoder conv1 = tf.layers.conv2d(inputs_, 16, (3,3), padding='same', activation=tf.nn.relu) # Now 28x28x16 maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same') # Now 14x14x16 conv2 = tf.layers.conv2d(maxpool1, 8, (3,3), padding='same', activation=tf.nn.relu) # Now 14x14x8 maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same') # Now 7x7x8 conv3 = tf.layers.conv2d(maxpool2, 8, (3,3), padding='same', activation=tf.nn.relu) # Now 7x7x8 encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same') # Now 4x4x8 ### Decoder upsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7)) # Now 7x7x8 conv4 = tf.layers.conv2d(upsample1, 8, (3,3), padding='same', activation=tf.nn.relu) # Now 7x7x8 upsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14)) # Now 14x14x8 conv5 = tf.layers.conv2d(upsample2, 8, (3,3), padding='same', activation=tf.nn.relu) # Now 14x14x8 upsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28)) # Now 28x28x8 conv6 = tf.layers.conv2d(upsample3, 16, (3,3), padding='same', activation=tf.nn.relu) # Now 28x28x16 logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None) #Now 28x28x1 decoded = tf.nn.sigmoid(logits, name='decoded') loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits) cost = tf.reduce_mean(loss) opt = tf.train.AdamOptimizer(0.001).minimize(cost) ``` ## Training As before, here wi'll train the network. Instead of flattening the images though, we can pass them in as 28x28x1 arrays. ``` sess = tf.Session() epochs = 20 batch_size = 200 sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) imgs = batch[0].reshape((-1, 28, 28, 1)) batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs, targets_: imgs}) print("Epoch: {}/{}...".format(e+1, epochs), "Training loss: {:.4f}".format(batch_cost)) fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4)) in_imgs = mnist.test.images[:10] reconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))}) for images, row in zip([in_imgs, reconstructed], axes): for img, ax in zip(images, row): ax.imshow(img.reshape((28, 28)), cmap='Greys_r') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) fig.tight_layout(pad=0.1) sess.close() ``` ## Denoising As I've mentioned before, autoencoders like the ones you've built so far aren't too useful in practive. However, they can be used to denoise images quite successfully just by training the network on noisy images. We can create the noisy images ourselves by adding Gaussian noise to the training images, then clipping the values to be between 0 and 1. We'll use noisy images as input and the original, clean images as targets. Here's an example of the noisy images I generated and the denoised images. ![Denoising autoencoder](assets/denoising.png) Since this is a harder problem for the network, we'll want to use deeper convolutional layers here, more feature maps. I suggest something like 32-32-16 for the depths of the convolutional layers in the encoder, and the same depths going backward through the decoder. Otherwise the architecture is the same as before. > **Exercise:** Build the network for the denoising autoencoder. It's the same as before, but with deeper layers. I suggest 32-32-16 for the depths, but you can play with these numbers, or add more layers. ``` inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs') targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets') ### Encoder conv1 = tf.layers.conv2d(inputs_, 32, (3,3), padding='same', activation=tf.nn.relu) # Now 28x28x32 maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same') # Now 14x14x32 conv2 = tf.layers.conv2d(maxpool1, 32, (3,3), padding='same', activation=tf.nn.relu) # Now 14x14x32 maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same') # Now 7x7x32 conv3 = tf.layers.conv2d(maxpool2, 16, (3,3), padding='same', activation=tf.nn.relu) # Now 7x7x16 encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same') # Now 4x4x16 ### Decoder upsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7)) # Now 7x7x16 conv4 = tf.layers.conv2d(upsample1, 16, (3,3), padding='same', activation=tf.nn.relu) # Now 7x7x16 upsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14)) # Now 14x14x16 conv5 = tf.layers.conv2d(upsample2, 32, (3,3), padding='same', activation=tf.nn.relu) # Now 14x14x32 upsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28)) # Now 28x28x32 conv6 = tf.layers.conv2d(upsample3, 32, (3,3), padding='same', activation=tf.nn.relu) # Now 28x28x32 logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None) #Now 28x28x1 decoded = tf.nn.sigmoid(logits, name='decoded') loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits) cost = tf.reduce_mean(loss) opt = tf.train.AdamOptimizer(0.001).minimize(cost) sess = tf.Session() epochs = 100 batch_size = 200 # Set's how much noise we're adding to the MNIST images noise_factor = 0.5 sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) # Get images from the batch imgs = batch[0].reshape((-1, 28, 28, 1)) # Add random noise to the input images noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape) # Clip the images to be between 0 and 1 noisy_imgs = np.clip(noisy_imgs, 0., 1.) # Noisy images as inputs, original images as targets batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs, targets_: imgs}) print("Epoch: {}/{}...".format(e+1, epochs), "Training loss: {:.4f}".format(batch_cost)) ``` ## Checking out the performance Here I'm adding noise to the test images and passing them through the autoencoder. It does a suprising great job of removing the noise, even though it's sometimes difficult to tell what the original number is. ``` fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4)) in_imgs = mnist.test.images[:10] noisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape) noisy_imgs = np.clip(noisy_imgs, 0., 1.) reconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))}) for images, row in zip([noisy_imgs, reconstructed], axes): for img, ax in zip(images, row): ax.imshow(img.reshape((28, 28)), cmap='Greys_r') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) fig.tight_layout(pad=0.1) ```
github_jupyter
<a href="https://colab.research.google.com/github/j3nguyen/jupyter_notebooks/blob/master/Forming_Teams.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> #Forming Teams to Create Synergy ## Background As the saying goes, "two heads are better than one". [Research](https://scholar.google.ca/scholar?q=effectiveness+of+teamwork+in+the+workplace) has shown that individuals working in teams are more effective together than working alone. Knowing this insight, how can we form the right teams to maximize overall productivity? While this idea can be applied to any domain, e.g., sales associates, research groups, etc., we'll form teams of comic book superheroes for this demonstration. ![Superheroes](https://ap2hyc.com/wp-content/uploads/2016/11/12444766_why-marvel-had-to-pull-one-of-its-comic_5138a63e_m.jpg) ### Conditions Before we start to assign heroes to teams, there are certain conditions that the teams must satisfy: 1. **Balance**: Each team must have at least one high performer 2. **Diversity:** Each team must have at least one female and one non-human 3. **Completeness**: All superheroes must be assigned to a team. (We don't want anyone to feel excluded.) 4. **Team Size**: Teams have a maximum of 8 superheroes ## Solution This is an example of constraint optimization (aka constraint programming), where the main goal is to find a solution that meets the given conditions. Although we are still interested in maximizing overall productivity, it is a secondary priority. More details about constraint optimization can be found [here](https://developers.google.com/optimization/cp). For this problem, we will use Google's OR Tools [CP-SAT solver](https://developers.google.com/optimization/reference/python/sat/python/cp_model). The following code is adapted from this wedding guest [example](https://github.com/google/or-tools/blob/stable/examples/notebook/examples/wedding_optimal_chart_sat.ipynb). ``` # install Google's OR tools library %pip install --upgrade --user ortools ``` ### 1. Load and prepare the data To keep things simple, we'll work with only the "good" super heroes from the Marvel and DC comics from this [superhero dataset](https://www.kaggle.com/claudiodavi/superhero-set). Any superhero with above average powers is considered a "high performer". ``` import pandas as pd superheroes = pd.read_csv("heroes_information.csv") powers = pd.read_csv("super_hero_powers.csv") ## keep only good superheroes and DC/Marvel characters for simplicity superheroes = superheroes[(superheroes.Publisher == "Marvel Comics") | (superheroes.Publisher == "DC Comics")] superheroes = superheroes[superheroes.Alignment == "good"] superheroes.head() ## create female indicator column superheroes["is_female"] = superheroes["Gender"] == "Female" superheroes["Gender"].value_counts() ## create non-human indicator column superheroes["is_non_human"] = superheroes["Race"] != "Human" powers.head() ## merge powers with the superheros data superheroes = pd.merge(superheroes, powers, how="left", left_on="name", right_on="hero_names") # sum up each hero's powers: superheroes.replace(to_replace={False: 0, True: 1}, inplace=True) superheroes["powers"] = superheroes.loc[:,"Agility":"Omniscient"].sum(axis=1).map(int) superheroes["powers"].describe() # for this demo, we'll assume if superhero has more than 8 powers, they are a high performer superheroes["is_high_performer"] = superheroes["powers"] > 8 superheroes["is_high_performer"].value_counts() # for time, we'll randomly select 17 superheroes superheroes = superheroes.sample(17) superheroes = superheroes.reset_index() superheroes["is_female"].value_counts() import math MAX_TEAM_SIZE = 8 num_superheroes = superheroes.shape[0] num_teams = math.ceil(num_superheroes / MAX_TEAM_SIZE) all_superheroes = range(superheroes.shape[0]) all_teams = range(num_teams) print("There are %d superheroes." % num_superheroes) print("We can make at most %d teams of size %d" % (num_teams, MAX_TEAM_SIZE)) print(superheroes.loc[:,["name","is_high_performer","Gender","Publisher"]]) ``` ### 2. Instantiate the solver ``` from ortools.sat.python import cp_model model = cp_model.CpModel() ``` ### 3. Declare decision variables Decision variables allow the solver to assign superheroes to teams. The solver will set the variables to `1` if a superhero is on a certain team. 1. Let $t_{ij} = 1$ if superhero $j$ is on team $i$ and $t_{ij} = 0$ otherwise. 2. Let $m_{jk} = 1$ if superhero $j$ is on the same team as superhero $k$ and $m_{jk} = 0$ otherwise. 3. Let $s_{ijk} = 1$ if superhero $j$ and $k$ are on team $i$, and $s_{ijk} = 0$ otherwise. We add these variables using the `NewBoolVar` [method](https://developers.google.com/optimization/reference/python/sat/python/cp_model#newboolvar). ``` # decision variables # superhero a is in team t if team[(a,t)] = 1 teams = {} for a in all_superheroes: for t in all_teams: teams[(t, a)] = model.NewBoolVar('team:%i superhero:%i' % (t, a)) team_members = {} for a1 in range(num_superheroes - 1): for a2 in range(a1 + 1, num_superheroes): team_members[(a1, a2)] = model.NewBoolVar('superhero %i is teamed with superhero %i' % (a1, a2)) same_team = {} for a1 in range(num_superheroes - 1): for a2 in range(a1 + 1, num_superheroes): for t in all_teams: same_team[(a1, a2, t)] = model.NewBoolVar( 'superhero %i is teamed with superhero %i on team %i' % (a1, a2, t)) ``` ### 3. Add constraints Now that we have the decision variables, we can add the constraints using the solver's `Add` [method](https://developers.google.com/optimization/reference/python/sat/python/cp_model#add). The solver will decide which of the decision variables to set to `1`, i.e., which superhero to assign to which team, and satisfy the constraints. ``` # set constants MIN_TEAM_SIZE = 1 MIN_HIGH_PERFORMER = 1 MIN_FEMALE = 1 MIN_NONHUMAN = 1 ``` Let $S$ be the set of superheroes and $T$ the set of teams. 1. Every superhero can only be on one team. $$\forall j \in S, \sum_{i\in T} t_{ij} = 1$$ ``` # each superhero is assigned to only one team for a in all_superheroes: model.Add(sum(teams[(t, a)] for t in all_teams) == 1) ``` 2. Teams can have at most 8 superheroes. $$\forall i \in T, \sum_{j \in S} t_{ij} \le 8 $$ ``` # teams can have at most MAX_TEAM_SIZE superheroes for t in all_teams: model.Add(sum(teams[(t, a)] for a in all_superheroes) <= MAX_TEAM_SIZE) ``` 3. Teams have at least one high performer. $$\forall i \in T, \sum_{j \in S} I(j=\text{high performer})*t_{ij} \ge 1 $$ 4. Teams have at least one female. $$\forall i \in T, \sum_{j \in S} I(j=\text{female})*t_{ij} \ge 1 $$ 5. Teams have at least one non-human superhero. $$\forall i \in T, \sum_{j \in S} I(j=\text{non-human})*t_{ij} \ge 1 $$ ``` for t in all_teams: # each team has at least one high performer, i.e., all high performers can't be in the same team model.Add(sum(superheroes.loc[a, 'is_high_performer'] * teams[(t, a)] for a in all_superheroes) >= MIN_HIGH_PERFORMER) # each team has at least one female and one non-human superhero model.Add(sum(superheroes.loc[a, 'is_female'] * teams[(t, a)] for a in all_superheroes) >= MIN_FEMALE) model.Add(sum(superheroes.loc[a, 'is_non_human'] * teams[(t, a)] for a in all_superheroes) >= MIN_NONHUMAN) # add one final constraint to link it all together # Link team members with teams for a1 in range(num_superheroes - 1): for a2 in range(a1 + 1, num_superheroes): for t in all_teams: # Link same_team and teams, i.e., one of the following is true model.AddBoolOr([ teams[(t, a1)].Not(), teams[(t, a2)].Not(), same_team[(a1, a2, t)] ]) # a1 and a2 being on team t means a1 is on team t and a2 is on team t model.AddImplication(same_team[(a1, a2, t)], teams[(t, a1)]) model.AddImplication(same_team[(a1, a2, t)], teams[(t, a2)]) # Link team_members and same_team. model.Add(sum(same_team[(a1, a2, t)] for t in all_teams) == team_members[(a1, a2)]) ``` **Adding an objective function** Optionally, we can also add an objective function to maximize overall team synergy. In practice, the synergies of individuals is estimated or known in advance of applying CO. For this scenario, let's assume that when on the same team, superheroes from the same universe complement each other's powers so that the resulting output is twice their combined powers. On the otherhand, when superheroes from different universes are teamed, they have trouble working together so there is no productivity gained. Let $p_j$ represent the power of superhero $j$ and $u_j$ represent the comic universe of superhero $j$ $$synergy(j,k) = \begin{cases} 2(p_j+p_k) & u_j = u_k \\ 0 & u_j \ne u_k \\ \end{cases} \ $$ We want to maximize the overall synergy of all teams $$ \sum_{j}\sum_{k}\text{synergy}(j,k)*m_{jk} $$ ``` def synergy(universe1, universe2, power1, power2): synergy_factor = 2 # superheroes of the same universe create synergy if universe1 == universe2: return (power1+power2)*synergy_factor #superheroes from different universe detract else: return (power1+power2)*0 # Objective model.Maximize( sum(synergy(superheroes.loc[a1,'Publisher'], superheroes.loc[a2,'Publisher'],superheroes.loc[a1,'powers'],superheroes.loc[a2,'powers']) * team_members[a1, a2] for a1 in range(num_superheroes - 1) for a2 in range(a1 + 1, num_superheroes))) ``` ### 4. Run the solver We can now run the solver to find our teams! ``` # call the solver solver = cp_model.CpSolver() status = solver.Solve(model) solution_printer = SuperheroesPartialSolutionPrinter(teams, num_superheroes, num_teams, range(5)) if status == cp_model.FEASIBLE or status == cp_model.OPTIMAL: for t in all_teams: print('\nTeam %i' % t) for a in all_superheroes: if solver.Value(teams[t, a]): name = superheroes.loc[a, 'name'] gender = superheroes.loc[a,'Gender'] race = superheroes.loc[a,'Race'] num_powers = superheroes.loc[a,'powers'] universe = superheroes.loc[a,'Publisher'] print("%s - %s - %s - %s Powers: %d" % (name, gender, race, universe, num_powers)) print('Statistics') print(' - conflicts : %i' % solver.NumConflicts()) print(' - branches : %i' % solver.NumBranches()) print(' - wall time : %f s' % solver.WallTime()) print(' - solutions found : %i' % solution_printer.solution_count()) ``` As we can see, the solver chose to make teams homogeneous in terms of the comic book universe to maximize productivity. This makes sense since we specified that being from the same universe is more productive. ## Summary We saw how to create teams to potentially maximize productivity (that is, if every superhero puts their egos aside and works cooperatively). Although this was a fun, example, it can be applied to sports, business, even a [wedding](https://www.improbable.com/2012/02/12/finding-an-optimal-seating-chart-for-a-wedding/). ``` ```
github_jupyter
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/2_run_experiment.png) # Running AutoML experiments See the `auto-ml-forecasting-univariate-recipe-experiment-settings` notebook on how to determine settings for seasonal features, target lags and whether the series needs to be differenced or not. To make experimentation user-friendly, the user has to specify several parameters: DIFFERENCE_SERIES, TARGET_LAGS and STL_TYPE. Once these parameters are set, the notebook will generate correct transformations and settings to run experiments, generate forecasts, compute inference set metrics and plot forecast vs actuals. It will also convert the forecast from first differences to levels (original units of measurement) if the DIFFERENCE_SERIES parameter is set to True before calculating inference set metrics. <br/> The output generated by this notebook is saved in the `experiment_output`folder. ### Setup ``` import os import logging import pandas as pd import numpy as np import azureml.automl.runtime from azureml.core.compute import AmlCompute from azureml.core.compute import ComputeTarget import matplotlib.pyplot as plt from helper_functions import ts_train_test_split, compute_metrics import azureml.core from azureml.core.workspace import Workspace from azureml.core.experiment import Experiment from azureml.train.automl import AutoMLConfig # set printing options np.set_printoptions(precision=4, suppress=True, linewidth=100) pd.set_option("display.max_columns", 500) pd.set_option("display.width", 1000) ``` As part of the setup you have already created a **Workspace**. You will also need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource. > Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist. ``` ws = Workspace.from_config() amlcompute_cluster_name = "recipe-cluster" found = False # Check if this compute target already exists in the workspace. cts = ws.compute_targets if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == "AmlCompute": found = True print("Found existing compute target.") compute_target = cts[amlcompute_cluster_name] if not found: print("Creating a new compute target...") provisioning_config = AmlCompute.provisioning_configuration( vm_size="STANDARD_D2_V2", max_nodes=6 ) # Create the cluster.\n", compute_target = ComputeTarget.create( ws, amlcompute_cluster_name, provisioning_config ) print("Checking cluster status...") # Can poll for a minimum number of nodes and for a specific timeout. # If no min_node_count is provided, it will use the scale settings for the cluster. compute_target.wait_for_completion( show_output=True, min_node_count=None, timeout_in_minutes=20 ) ``` ### Data Here, we will load the data from the csv file and drop the Covid period. ``` main_data_loc = "data" train_file_name = "S4248SM144SCEN.csv" TARGET_COLNAME = "S4248SM144SCEN" TIME_COLNAME = "observation_date" COVID_PERIOD_START = ( "2020-03-01" # start of the covid period. To be excluded from evaluation. ) # load data df = pd.read_csv(os.path.join(main_data_loc, train_file_name)) df[TIME_COLNAME] = pd.to_datetime(df[TIME_COLNAME], format="%Y-%m-%d") df.sort_values(by=TIME_COLNAME, inplace=True) # remove the Covid period df = df.query('{} <= "{}"'.format(TIME_COLNAME, COVID_PERIOD_START)) ``` ### Set parameters The first set of parameters is based on the analysis performed in the `auto-ml-forecasting-univariate-recipe-experiment-settings` notebook. ``` # set parameters based on the settings notebook analysis DIFFERENCE_SERIES = True TARGET_LAGS = None STL_TYPE = None ``` Next, define additional parameters to be used in the <a href="https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.automlconfig?view=azure-ml-py"> AutoML config </a> class. <ul> <li> FORECAST_HORIZON: The forecast horizon is the number of periods into the future that the model should predict. Here, we set the horizon to 12 periods (i.e. 12 quarters). For more discussion of forecast horizons and guiding principles for setting them, please see the <a href="https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand"> energy demand notebook </a>. </li> <li> TIME_SERIES_ID_COLNAMES: The names of columns used to group a timeseries. It can be used to create multiple series. If time series identifier is not defined, the data set is assumed to be one time-series. This parameter is used with task type forecasting. Since we are working with a single series, this list is empty. </li> <li> BLOCKED_MODELS: Optional list of models to be blocked from consideration during model selection stage. At this point we want to consider all ML and Time Series models. <ul> <li> See the following <a href="https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.constants.supportedmodels.forecasting?view=azure-ml-py"> link </a> for a list of supported Forecasting models</li> </ul> </li> </ul> ``` # set other parameters FORECAST_HORIZON = 12 TIME_SERIES_ID_COLNAMES = [] BLOCKED_MODELS = [] ``` To run AutoML, you also need to create an **Experiment**. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem. ``` # choose a name for the run history container in the workspace if isinstance(TARGET_LAGS, list): TARGET_LAGS_STR = ( "-".join(map(str, TARGET_LAGS)) if (len(TARGET_LAGS) > 0) else None ) else: TARGET_LAGS_STR = TARGET_LAGS experiment_desc = "diff-{}_lags-{}_STL-{}".format( DIFFERENCE_SERIES, TARGET_LAGS_STR, STL_TYPE ) experiment_name = "alcohol_{}".format(experiment_desc) experiment = Experiment(ws, experiment_name) output = {} output["SDK version"] = azureml.core.VERSION output["Subscription ID"] = ws.subscription_id output["Workspace"] = ws.name output["SKU"] = ws.sku output["Resource Group"] = ws.resource_group output["Location"] = ws.location output["Run History Name"] = experiment_name pd.set_option("display.max_colwidth", -1) outputDf = pd.DataFrame(data=output, index=[""]) print(outputDf.T) # create output directory output_dir = "experiment_output/{}".format(experiment_desc) if not os.path.exists(output_dir): os.makedirs(output_dir) # difference data and test for unit root if DIFFERENCE_SERIES: df_delta = df.copy() df_delta[TARGET_COLNAME] = df[TARGET_COLNAME].diff() df_delta.dropna(axis=0, inplace=True) # split the data into train and test set if DIFFERENCE_SERIES: # generate train/inference sets using data in first differences df_train, df_test = ts_train_test_split( df_input=df_delta, n=FORECAST_HORIZON, time_colname=TIME_COLNAME, ts_id_colnames=TIME_SERIES_ID_COLNAMES, ) else: df_train, df_test = ts_train_test_split( df_input=df, n=FORECAST_HORIZON, time_colname=TIME_COLNAME, ts_id_colnames=TIME_SERIES_ID_COLNAMES, ) ``` ### Upload files to the Datastore The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace) is paired with the storage account, which contains the default data store. We will use it to upload the bike share data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation. ``` df_train.to_csv("train.csv", index=False) df_test.to_csv("test.csv", index=False) datastore = ws.get_default_datastore() datastore.upload_files( files=["./train.csv"], target_path="uni-recipe-dataset/tabular/", overwrite=True, show_progress=True, ) datastore.upload_files( files=["./test.csv"], target_path="uni-recipe-dataset/tabular/", overwrite=True, show_progress=True, ) from azureml.core import Dataset train_dataset = Dataset.Tabular.from_delimited_files( path=[(datastore, "uni-recipe-dataset/tabular/train.csv")] ) test_dataset = Dataset.Tabular.from_delimited_files( path=[(datastore, "uni-recipe-dataset/tabular/test.csv")] ) # print the first 5 rows of the Dataset train_dataset.to_pandas_dataframe().reset_index(drop=True).head(5) ``` ### Config AutoML ``` time_series_settings = { "time_column_name": TIME_COLNAME, "forecast_horizon": FORECAST_HORIZON, "target_lags": TARGET_LAGS, "use_stl": STL_TYPE, "blocked_models": BLOCKED_MODELS, "time_series_id_column_names": TIME_SERIES_ID_COLNAMES, } automl_config = AutoMLConfig( task="forecasting", debug_log="sample_experiment.log", primary_metric="normalized_root_mean_squared_error", experiment_timeout_minutes=20, iteration_timeout_minutes=5, enable_early_stopping=True, training_data=train_dataset, label_column_name=TARGET_COLNAME, n_cross_validations=5, verbosity=logging.INFO, max_cores_per_iteration=-1, compute_target=compute_target, **time_series_settings, ) ``` We will now run the experiment, you can go to Azure ML portal to view the run details. ``` remote_run = experiment.submit(automl_config, show_output=False) remote_run.wait_for_completion() ``` ### Retrieve the best model Below we select the best model from all the training iterations using get_output method. ``` best_run, fitted_model = remote_run.get_output() fitted_model.steps ``` ### Inference We now use the best fitted model from the AutoML Run to make forecasts for the test set. We will do batch scoring on the test dataset which should have the same schema as training dataset. The inference will run on a remote compute. In this example, it will re-use the training compute. ``` test_experiment = Experiment(ws, experiment_name + "_inference") ``` ## Retreiving forecasts from the model We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute. ``` from run_forecast import run_remote_inference remote_run = run_remote_inference( test_experiment=test_experiment, compute_target=compute_target, train_run=best_run, test_dataset=test_dataset, target_column_name=TARGET_COLNAME, ) remote_run.wait_for_completion(show_output=False) remote_run.download_file("outputs/predictions.csv", f"{output_dir}/predictions.csv") ``` ### Download the prediction result for metrics calcuation The test data with predictions are saved in artifact `outputs/predictions.csv`. We will use it to calculate accuracy metrics and vizualize predictions versus actuals. ``` X_trans = pd.read_csv(f"{output_dir}/predictions.csv", parse_dates=[TIME_COLNAME]) X_trans.head() # convert forecast in differences to levels def convert_fcst_diff_to_levels(fcst, yt, df_orig): """Convert forecast from first differences to levels.""" fcst = fcst.reset_index(drop=False, inplace=False) fcst["predicted_level"] = fcst["predicted"].cumsum() fcst["predicted_level"] = fcst["predicted_level"].astype(float) + float(yt) # merge actuals out = pd.merge( fcst, df_orig[[TIME_COLNAME, TARGET_COLNAME]], on=[TIME_COLNAME], how="inner" ) out.rename(columns={TARGET_COLNAME: "actual_level"}, inplace=True) return out if DIFFERENCE_SERIES: # convert forecast in differences to the levels INFORMATION_SET_DATE = max(df_train[TIME_COLNAME]) YT = df.query("{} == @INFORMATION_SET_DATE".format(TIME_COLNAME))[TARGET_COLNAME] fcst_df = convert_fcst_diff_to_levels(fcst=X_trans, yt=YT, df_orig=df) else: fcst_df = X_trans.copy() fcst_df["actual_level"] = y_test fcst_df["predicted_level"] = y_predictions del X_trans ``` ### Calculate metrics and save output ``` # compute metrics metrics_df = compute_metrics(fcst_df=fcst_df, metric_name=None, ts_id_colnames=None) # save output metrics_file_name = "{}_metrics.csv".format(experiment_name) fcst_file_name = "{}_forecst.csv".format(experiment_name) plot_file_name = "{}_plot.pdf".format(experiment_name) metrics_df.to_csv(os.path.join(output_dir, metrics_file_name), index=True) fcst_df.to_csv(os.path.join(output_dir, fcst_file_name), index=True) ``` ### Generate and save visuals ``` plot_df = df.query('{} > "2010-01-01"'.format(TIME_COLNAME)) plot_df.set_index(TIME_COLNAME, inplace=True) fcst_df.set_index(TIME_COLNAME, inplace=True) # generate and save plots fig, ax = plt.subplots(dpi=180) ax.plot(plot_df[TARGET_COLNAME], "-g", label="Historical") ax.plot(fcst_df["actual_level"], "-b", label="Actual") ax.plot(fcst_df["predicted_level"], "-r", label="Forecast") ax.legend() ax.set_title("Forecast vs Actuals") ax.set_xlabel(TIME_COLNAME) ax.set_ylabel(TARGET_COLNAME) locs, labels = plt.xticks() plt.setp(labels, rotation=45) plt.savefig(os.path.join(output_dir, plot_file_name)) ```
github_jupyter
**In the given kernel, I am using 50 x 50 reduced images of the original Malaria Cell Images Dataset for the purpose of classification.** The reason behind this decision was to see the effect of scaling on the classification algorithm being used. ``` import os import cv2 import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split import keras from keras.utils import np_utils from keras.models import Sequential from keras import optimizers from keras.preprocessing.image import ImageDataGenerator from keras.layers import Dense, Conv2D, Flatten from keras.layers import MaxPooling2D, GlobalAveragePooling2D from keras.layers import Activation, BatchNormalization from keras.layers import Dropout # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../input")) # Any results you write to the current directory are saved as output. parasitized_data = os.listdir('../input/cell_images/Parasitized_VGG/') uninfected_data = os.listdir('../input/cell_images/Uninfected_VGG/') ``` Use the following code to find whether Thumbs.db exists in your list of images. It consequently removes the concerned file. ``` # images_1 = os.listdir(base_address_1) # images_0 = os.listdir(base_address_0) # # Removing 'Thumbs.db' file from the list of images # for img in images_1: # if 'Thumbs.db' in img: # idx = images_1.index(img) # images_1.pop(idx) # for img in images_0: # if 'Thumbs.db' in img: # idx = images_0.index(img) # images_0.pop(idx) print('The image files for Uninfected are: '+str(len(uninfected_data))) print('The image files for Infected are: '+str(len(parasitized_data))) data = [] labels = [] for img in parasitized_data: try: img_array = cv2.imread('../input/cell_images/Parasitized_VGG/' + img) data.append(img_array) labels.append(1) except: print("Runtime Exception due to image at index "+ str(parasitized_data.index(img))) for img in uninfected_data: try: img_array = cv2.imread('../input/cell_images/Uninfected_VGG' + "/" + img) data.append(img_array) labels.append(0) except: print("Runtime Exception due to image at index "+ str(uninfected_data.index(img))) print(len(data)) print(len(labels)) # Visualization an uninfected image img = cv2.imread('../input/cell_images/Uninfected_VGG' + "/" + uninfected_data[0]) plt.imshow(img) # Visualization an infected image img = cv2.imread('../input/cell_images/Parasitized_VGG' + "/" + parasitized_data[0]) plt.imshow(img) ``` **Conversion of the lists into arrays** ``` image_data = np.array(data) labels = np.array(labels) # Shuffling the data idx = np.arange(image_data.shape[0]) np.random.shuffle(idx) image_data = image_data[idx] labels = labels[idx] ``` **Train- Test Split** ``` # Training and Test data split X_train, X_test, y_train, y_test = train_test_split(image_data,labels, test_size=0.20, random_state = 101) # One- Hot Encoding the lables y_train = np_utils.to_categorical(y_train, num_classes = 2) y_test = np_utils.to_categorical(y_test, num_classes = 2) # Normalizing the data X_train = X_train.astype('float32')/255 X_test = X_test.astype('float32')/255 ######################################################################################################################## # Defining the model ########################################################################################################################### model = Sequential() model.add(Conv2D(filters=16,kernel_size=2,padding="same",activation="relu",input_shape=(50,50,3))) model.add(MaxPooling2D(pool_size=2)) model.add(BatchNormalization(axis = -1)) model.add(Dropout(0.3)) model.add(Conv2D(filters=32,kernel_size=2,padding="same",activation="relu")) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=64,kernel_size=2,padding="same",activation="relu")) model.add(MaxPooling2D(pool_size=2)) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(500,activation="relu")) model.add(Dropout(0.2)) model.add(Dense(2, activation = 'softmax')) model.summary() # Defining the optimizer, loss, performance of the metrcis of the model batches = 50 optim = optimizers.Adam(lr = 0.001, decay = 0.001 / batches) model.compile(loss='categorical_crossentropy', optimizer=optim, metrics=['accuracy']) model.fit(X_train,y_train,batch_size=batches,epochs=25,verbose=1) ``` * We get an accuracy of 98.27% on the training data with 25 epochs. ``` model.evaluate(X_test,y_test, steps = 1) ``` ...and a validation accuracy of > 95%.
github_jupyter
``` import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data # set random seed for comparing the two result calculations tf.set_random_seed(1) # this is data mnist = input_data.read_data_sets('MNIST_data', one_hot=True) # hyperparameters lr = 0.001 training_iters = 100000 batch_size = 128 n_inputs = 28 # MNIST data input (img shape: 28*28) n_steps = 28 # time steps n_hidden_units = 128 # neurons in hidden layer n_classes = 10 # MNIST classes (0-9 digits) # tf Graph input x = tf.placeholder(tf.float32, [None, n_steps, n_inputs]) y = tf.placeholder(tf.float32, [None, n_classes]) # Define weights weights = { # (28, 128) 'in': tf.Variable(tf.random_normal([n_inputs, n_hidden_units])), # (128, 10) 'out': tf.Variable(tf.random_normal([n_hidden_units, n_classes])) } biases = { # (128, ) 'in': tf.Variable(tf.constant(0.1, shape=[n_hidden_units, ])), # (10, ) 'out': tf.Variable(tf.constant(0.1, shape=[n_classes, ])) } print ("parameters ready") # tf Graph input x = tf.placeholder(tf.float32, [None, n_steps, n_inputs]) y = tf.placeholder(tf.float32, [None, n_classes]) # Define weights weights = { # (28, 128) 'in': tf.Variable(tf.random_normal([n_inputs, n_hidden_units])), # (128, 10) 'out': tf.Variable(tf.random_normal([n_hidden_units, n_classes])) } biases = { # (128, ) 'in': tf.Variable(tf.constant(0.1, shape=[n_hidden_units, ])), # (10, ) 'out': tf.Variable(tf.constant(0.1, shape=[n_classes, ])) } def RNN(X, weights, biases): # hidden layer for input to cell ######################################## # transpose the inputs shape from # X ==> (128 batch * 28 steps, 28 inputs) X = tf.reshape(X, [-1, n_inputs]) # into hidden # X_in = (128 batch * 28 steps, 128 hidden) X_in = tf.matmul(X, weights['in']) + biases['in'] # X_in ==> (128 batch, 28 steps, 128 hidden) X_in = tf.reshape(X_in, [-1, n_steps, n_hidden_units]) # cell ########################################## # basic LSTM Cell. if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1: cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden_units, forget_bias=1.0, state_is_tuple=True) else: cell = tf.contrib.rnn.BasicLSTMCell(n_hidden_units) # lstm cell is divided into two parts (c_state, h_state) init_state = cell.zero_state(batch_size, dtype=tf.float32) # You have 2 options for following step. # 1: tf.nn.rnn(cell, inputs); # 2: tf.nn.dynamic_rnn(cell, inputs). # If use option 1, you have to modified the shape of X_in, go and check out this: # https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/recurrent_network.py # In here, we go for option 2. # dynamic_rnn receive Tensor (batch, steps, inputs) or (steps, batch, inputs) as X_in. # Make sure the time_major is changed accordingly. outputs, final_state = tf.nn.dynamic_rnn(cell, X_in, initial_state=init_state, time_major=False) # hidden layer for output as the final results ############################################# # results = tf.matmul(final_state[1], weights['out']) + biases['out'] # # or # unpack to list [(batch, outputs)..] * steps if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1: outputs = tf.unpack(tf.transpose(outputs, [1, 0, 2])) # states is the last outputs else: outputs = tf.unstack(tf.transpose(outputs, [1,0,2])) results = tf.matmul(outputs[-1], weights['out']) + biases['out'] # shape = (128, 10) return results pred = RNN(x, weights, biases) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) train_op = tf.train.AdamOptimizer(lr).minimize(cost) correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) print ("Network ready") with tf.Session() as sess: # tf.initialize_all_variables() no long valid from # 2017-03-02 if using tensorflow >= 0.12 if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1: init = tf.initialize_all_variables() else: init = tf.global_variables_initializer() sess.run(init) step = 0 while step * batch_size < training_iters: batch_xs, batch_ys = mnist.train.next_batch(batch_size) batch_xs = batch_xs.reshape([batch_size, n_steps, n_inputs]) _, acc, loss=sess.run([train_op,accuracy,cost], feed_dict={ x: batch_xs, y: batch_ys, }) if step % 20 == 0: print ("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \ "{:.6f}".format(loss) + ", Training Accuracy= " + \ "{:.5f}".format(acc)) step += 1 ```
github_jupyter
``` #hide #skip ! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab #hide #all_slow ``` To use `fastai.medical.imaging` you'll need to: ```bash conda install pyarrow pip install pydicom kornia opencv-python scikit-image ``` To run this tutorial on Google Colab, you'll need to uncomment the following two lines and run the cell: ``` #!conda install pyarrow #!pip install pydicom kornia opencv-python scikit-image nbdev from fastai.basics import * from fastai.callback.all import * from fastai.vision.all import * from fastai.medical.imaging import * import pydicom import pandas as pd #hide from nbdev.showdoc import * ``` # Tutorial - Binary classification of chest X-rays > In this tutorial we will build a classifier that distinguishes between chest X-rays with pneumothorax and chest X-rays without pneumothorax. The image data is loaded directly from the DICOM source files, so no prior DICOM data handling is needed. This tutorial also goes through what DICOM images are and review at a high level how to evaluate the results of the classifier. ## Download and import of X-ray DICOM files First, we will use the `untar_data` function to download the _siim_small_ folder containing a subset (250 DICOM files, \~30MB) of the [SIIM-ACR Pneumothorax Segmentation](https://doi.org/10.1007/s10278-019-00299-9) \[1\] dataset. The downloaded _siim_small_ folder will be stored in your _\~/.fastai/data/_ directory. The variable `pneumothorax-source` will store the absolute path to the _siim_small_ folder as soon as the download is complete. ``` pneumothorax_source = untar_data(URLs.SIIM_SMALL) ``` The _siim_small_ folder has the following directory/file structure: ![siim_folder_structure.jpg](images/siim_folder_structure.jpeg) ## What are DICOMs? **DICOM**(**D**igital **I**maging and **CO**mmunications in **M**edicine) is the de-facto standard that establishes rules that allow medical images(X-Ray, MRI, CT) and associated information to be exchanged between imaging equipment from different vendors, computers, and hospitals. The DICOM format provides a suitable means that meets health infomation exchange (HIE) standards for transmision of health related data among facilites and HL7 standards which is the messaging standard that enables clinical applications to exchange data DICOM files typically have a `.dcm` extension and provides a means of storing data in separate ‘tags’ such as patient information as well as image/pixel data. A DICOM file consists of a header and image data sets packed into a single file. By extracting data from these tags one can access important information regarding the patient demographics, study parameters, etc. 16 bit DICOM images have values ranging from `-32768` to `32768` while 8-bit greyscale images store values from `0` to `255`. The value ranges in DICOM images are useful as they correlate with the [Hounsfield Scale](https://en.wikipedia.org/wiki/Hounsfield_scale) which is a quantitative scale for describing radiodensity ### Plotting the DICOM data To analyze our dataset, we load the paths to the DICOM files with the `get_dicom_files` function. When calling the function, we append _train/_ to the `pneumothorax_source` path to choose the folder where the DICOM files are located. We store the path to each DICOM file in the `items` list. ``` items = get_dicom_files(pneumothorax_source/f"train/") ``` Next, we split the `items` list into a train `trn` and validation `val` list using the `RandomSplitter` function: ``` trn,val = RandomSplitter()(items) ``` Pydicom is a python package for parsing DICOM files, making it easier to access the `header` of the DICOM as well as coverting the raw `pixel_data` into pythonic structures for easier manipulation. `fastai.medical.imaging` uses `pydicom.dcmread` to load the DICOM file. To plot an X-ray, we can select an entry in the `items` list and load the DICOM file with `dcmread`. ``` patient = 7 xray_sample = items[patient].dcmread() ``` To view the `header` ``` xray_sample ``` Explanation of each element is beyond the scope of this tutorial but [this](http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_C.7.6.3.html#sect_C.7.6.3.1.4) site has some excellent information about each of the entries Some key pointers on the tag information above: - **Pixel Data** (7fe0 0010) - This is where the raw pixel data is stored. The order of pixels encoded for each image plane is left to right, top to bottom, i.e., the upper left pixel (labeled 1,1) is encoded first - **Photometric Interpretation** (0028, 0004) - also known as color space. In this case it is `MONOCHROME2` where pixel data is represented as a single monochrome image plane where low values=dark, high values=bright. If the colorspace was `MONOCHROME` then the low values=bright and high values=dark info. - **Samples per Pixel** (0028, 0002) - This should be 1 as this image is monochrome. This value would be 3 if the color space was RGB for example - **Bits Stored** (0028 0101) - Number of bits stored for each pixel sample. Typical 8 bit images have a pixel range between `0` and `255` - **Pixel Represenation**(0028 0103) - can either be unsigned(0) or signed(1) - **Lossy Image Compression** (0028 2110) - `00` image has not been subjected to lossy compression. `01` image has been subjected to lossy compression. - **Lossy Image Compression Method** (0028 2114) - states the type of lossy compression used (in this case `ISO_10918_1` represents JPEG Lossy Compression) - **Pixel Data** (7fe0, 0010) - Array of 161452 elements represents the image pixel data that pydicom uses to convert the pixel data into an image. What does `PixelData` look like? ``` xray_sample.PixelData[:200] ``` Because of the complexity in interpreting `PixelData`, pydicom provides an easy way to get it in a convenient form: `pixel_array` which returns a `numpy.ndarray` containing the pixel data: ``` xray_sample.pixel_array, xray_sample.pixel_array.shape ``` You can then use the `show` function to view the image ``` xray_sample.show() ``` You can also conveniently create a dataframe with all the `tag` information as columns for all the images in a dataset by using `from_dicoms` ``` dicom_dataframe = pd.DataFrame.from_dicoms(items) dicom_dataframe[:5] ``` Next, we need to load the labels for the dataset. We import the _labels.csv_ file using pandas and print the first five entries. The **file** column shows the relative path to the _.dcm_ file and the **label** column indicates whether the chest x-ray has a pneumothorax or not. ``` df = pd.read_csv(pneumothorax_source/f"labels.csv") df.head() ``` Now, we use the `DataBlock` class to prepare the DICOM data for training. As we are dealing with DICOM images, we need to use `PILDicom` as the `ImageBlock` category. This is so the `DataBlock` will know how to open the DICOM images. As this is a binary classification task we will use `CategoryBlock` ``` pneumothorax = DataBlock(blocks=(ImageBlock(cls=PILDicom), CategoryBlock), get_x=lambda x:pneumothorax_source/f"{x[0]}", get_y=lambda x:x[1], batch_tfms=aug_transforms(size=224)) dls = pneumothorax.dataloaders(df.values, num_workers=0) ``` Additionally, we plot a first batch with the specified transformations: ``` dls = pneumothorax.dataloaders(df.values) dls.show_batch(max_n=16) ``` ## Training We can then use the `cnn_learner` function and initiate the training. ``` learn = cnn_learner(dls, resnet34, metrics=accuracy) ``` Note that if you do not select a loss or optimizer function, fastai will try to choose the best selection for the task. You can check the loss function by calling `loss_func` ``` learn.loss_func ``` And you can do the same for the optimizer by calling `opt_func` ``` learn.opt_func ``` Use `lr_find` to try to find the best learning rate ``` learn.lr_find() learn.fit_one_cycle(1) learn.predict(pneumothorax_source/f"train/Pneumothorax/000004.dcm") ``` When predicting on an image `learn.predict` returns a tuple (class, class tensor and [probabilities of each class]).In this dataset there are only 2 classes `No Pneumothorax` and `Pneumothorax` hence the reason why each probability has 2 values, the first value is the probability whether the image belongs to `class 0` or `No Pneumothorax` and the second value is the probability whether the image belongs to `class 1` or `Pneumothorax` ``` tta = learn.tta(use_max=True) learn.show_results(max_n=16) interp = Interpretation.from_learner(learn) interp.plot_top_losses(2) ``` ## Result Evaluation Medical models are predominantly high impact so it is important to know how good a model is at detecting a certain condition. This model has an accuracy of 56%. Accuracy can be defined as the number of correctly predicted data points out of all the data points. However in this context we can define accuracy as the probability that the model is correct and the patient has the condition **PLUS** the probability that the model is correct and the patient does not have the condition There are some other key terms that need to be used when evaluating medical models: **False Positive & False Negative** - **False Positive** is an error in which a test result improperly indicates presence of a condition, such as a disease (the result is positive), when in reality it is not present - **False Negative** is an error in which a test result improperly indicates no presence of a condition (the result is negative), when in reality it is present **Sensitivity & Specificity** - **Sensitivity or True Positive Rate** is where the model classifies a patient has the disease given the patient actually does have the disease. Sensitivity quantifies the avoidance of false negatives Example: A new test was tested on 10,000 patients, if the new test has a sensitivity of 90% the test will correctly detect 9,000 (True Positive) patients but will miss 1000 (False Negative) patients that have the condition but were tested as not having the condition - **Specificity or True Negative Rate** is where the model classifies a patient as not having the disease given the patient actually does not have the disease. Specificity quantifies the avoidance of false positives [Understanding and using sensitivity, specificity and predictive values](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2636062/) is a great paper if you are interested in learning more about understanding sensitivity, specificity and predictive values. **PPV and NPV** Most medical testing is evaluated via **PPV** (Positive Predictive Value) or **NPV** (Negative Predictive Value). **PPV** - if the model predicts a patient has a condition what is the probability that the patient actually has the condition **NPV** - if the model predicts a patient does not have a condition what is the probability that the patient actually does not have the condition The ideal value of the PPV, with a perfect test, is 1 (100%), and the worst possible value would be zero The ideal value of the NPV, with a perfect test, is 1 (100%), and the worst possible value would be zero **Confusion Matrix** The confusion matrix is plotted against the `valid` dataset ``` interp = ClassificationInterpretation.from_learner(learn) losses,idxs = interp.top_losses() len(dls.valid_ds)==len(losses)==len(idxs) interp.plot_confusion_matrix(figsize=(7,7)) ``` You can also reproduce the results interpreted from plot_confusion_matrix like so: ``` upp, low = interp.confusion_matrix() tn, fp = upp[0], upp[1] fn, tp = low[0], low[1] print(tn, fp, fn, tp) ``` Note that **Sensitivity = True Positive/(True Positive + False Negative)** ``` sensitivity = tp/(tp + fn) sensitivity ``` In this case the model has a sensitivity of 40% and hence is only capable of correctly detecting 40% True Positives (i.e. who have Pneumothorax) but will miss 60% of False Negatives (patients that actually have Pneumothorax but were told they did not! Not a good situation to be in). This is also know as a **Type II error** **Specificity = True Negative/(False Positive + True Negative)** ``` specificity = tn/(fp + tn) specificity ``` The model has a specificity of 63% and hence can correctly detect 63% of the time that a patient does **not** have Pneumothorax but will incorrectly classify that 37% of the patients have Pneumothorax (False Postive) but actually do not. This is also known as a **Type I error** **Positive Predictive Value (PPV)** ``` ppv = tp/(tp+fp) ppv ``` In this case the model performs poorly in correctly predicting patients with Pneumothorax **Negative Predictive Value (NPV)** ``` npv = tn/(tn+fn) npv ``` This model is better at predicting patients with No Pneumothorax **Calculating Accuracy** The accuracy of this model as mentioned before was 56% but how was this calculated? We can consider accuracy as: **accuracy = sensitivity x prevalence + specificity * (1 - prevalence)** Where **prevalence** is a statistical concept referring to the number of cases of a disease that are present in a particular population at a given time. The prevalence in this case is how many patients in the valid dataset have the condition compared to the total number. To view the files in the valid dataset you call `dls.valid_ds.cat` ``` val = dls.valid_ds.cat #val[0] ``` There are 15 Pneumothorax images in the valid set (which has a total of 50 images and can be checked by using `len(dls.valid_ds)`) so the prevalence here is 15/50 = 0.3 ``` prevalence = 15/50 prevalence accuracy = (sensitivity * prevalence) + (specificity * (1 - prevalence)) accuracy ``` _**Citations:**_ \[1\] _Filice R et al. Crowdsourcing pneumothorax annotations using machine learning annotations on the NIH chest X-ray dataset. J Digit Imaging (2019). https://doi.org/10.1007/s10278-019-00299-9_
github_jupyter
<img src="../../images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left"> # Qiskit Aer: Noise Transformation The latest version of this notebook is available on https://github.com/Qiskit/qiskit-tutorials. ## Introduction This notebook shows how to use the Qiskit Aer `noise.utils.approximate_quantum_error` and `noise.utils. approximate_noise_model` to transform quantum noise channels into a different, more suitable, noise channel. Our guiding example is Clifford simulation. A Clifford simulator can efficiently simulate quantum computations which include gates only from a limited, non-universal set of gates (the Clifford gates). Not all quantum noises can be added to such simulations; hence, we aim to find a "close" noise channel which can be simulated in a Clifford simulator. We begin by importing the transformation functions. ``` from qiskit.providers.aer.noise.utils import approximate_quantum_error from qiskit.providers.aer.noise.utils import approximate_noise_model ``` The name "approximate" suggests that this functions generate the closest (in the Hilbert-Schmidt metric) error possible to the given one. We demonstrate the approximation using several standard error channels defined in Qiskit. ``` from qiskit.providers.aer.noise.errors.standard_errors import amplitude_damping_error from qiskit.providers.aer.noise.errors.standard_errors import reset_error from qiskit.providers.aer.noise.errors.standard_errors import pauli_error import numpy as np ``` ## Overview A 1-qubit quantum channel is a function $\mathcal{C}:\mathbb{C}^{2\times2}\to\mathbb{C}^{2\times2}$ mapping density operators to density operators (to ensure the image is a density operator $\mathcal{C}$ is required to be completely positive and trace preserving, **CTCP**). Given quantum channels $\mathcal{E}_{1},\dots,\mathcal{E}_{r}$, and probabilities $p_1, p_2, \dots, p_r$ such that $0\le p_i \le 1$ and $p_1+\dots +p_r = 1$, a new quantum channel $\mathcal{C}_\mathcal{E}$ can be constructed such that $\mathcal{C}_\mathcal{E}(\rho)$ has the effect of choosing the channel $\mathcal{E}_i$ with probability $p_i$ and applying it to $\rho$. The noise transformation module solves the following optimization problem: Given a channel $\mathcal{C}$ ("goal") and a list of channels $\mathcal{E}_{1},\dots,\mathcal{E}_{r}$, find the probabilities $p_1, p_2, \dots, p_r$ minimizing $D(\mathcal{C}, \mathcal{C}_\mathcal{E})$ according to some distance metric $D$ (the Hilbert-Schmidt metric is currently used). To ensure the approximation is honest, in the sense that the approximate error channel serves as an "upper bound" for the actual error channel, we add the additional honesty constraint $$\text{F}(I,\mathcal{C})\ge F(I,\mathcal{C}_\mathcal{E})$$ Where $\text{F}$ is a fidelity measure and $I$ is the identity channel. ## Example: Approximating amplitude damping noise with reset noise. **Amplitude damping** noise is described by a single parameter $0\le \gamma \le 1$ and given by the Kraus operators: $$\left(\begin{array}{cc} 1 & 0\\ 0 & \sqrt{1-\gamma} \end{array}\right),\left(\begin{array}{cc} 0 & \sqrt{\gamma}\\ 0 & 0 \end{array}\right)$$ **Reset** error is described by probabilities $0\le p, q\le 1$ such that $p+q\le 1$ and given by the Kraus operators: $$\left(\begin{array}{cc} \sqrt{p} & 0\\ 0 & 0 \end{array}\right),\left(\begin{array}{cc} 0 & \sqrt{p}\\ 0 & 0 \end{array}\right),\left(\begin{array}{cc} 0 & 0\\ \sqrt{q} & 0 \end{array}\right),\left(\begin{array}{cc} 0 & 0\\ 0 & \sqrt{q} \end{array}\right)$$ This can be thought of as "resetting" the quantum state of the affected qubit to $\left|0\right\rangle$ with probability $p$, to $\left|1\right\rangle$ with probability $q$, and do nothing with probability $1-(p+q)$. It is not too difficult to determine analytically the best values of $p,q$ to approximate a-$\gamma$ amplitude damping channel, see the details __[here](https://arxiv.org/abs/1207.0046)__. The best approximation is $$p=\frac{1}{2}\left(1+\gamma-\sqrt{1-\gamma}\right), q=0$$ ``` gamma = 0.23 error = amplitude_damping_error(gamma) results = approximate_quantum_error(error, operator_string="reset") ``` We only needed the above code to perform the actual approximation. ``` print(results) p = (1 + gamma - np.sqrt(1 - gamma)) / 2 q = 0 print("") print("Expected results:") print("P(0) = {}".format(1-(p+q))) print("P(1) = {}".format(p)) print("P(2) = {}".format(q)) ``` We got the results predicted analytically. ## Different input types The approximation function is given two inputs: The error channel to approximate, and a set of error channels that can be used in constructing the approximation. The **error channel** to approximate can be given as any input that can be converted to the `QuantumError` object. As an example, we explicitly construct the Kraus matrices of amplitude damping and pass to the same approximation function as before: ``` gamma = 0.23 K0 = np.array([[1,0],[0,np.sqrt(1-gamma)]]) K1 = np.array([[0,np.sqrt(gamma)],[0,0]]) results = approximate_quantum_error((K0, K1), operator_string="reset") print(results) ``` The **error operators** that are used to construct the approximating channel can be either given as a list, a dictionary or a string indicating hard-coded channels. Any channel can be either a list of Kraus operators, or 'QuantumError' objects. The identity channel does not need to be passed directly; it is always implicitly used. As an example, we approximate amplitude damping using an explicit Kraus representation for reset noises: ``` reset_to_0 = [np.array([[1,0],[0,0]]), np.array([[0,1],[0,0]])] reset_to_1 = [np.array([[0,0],[1,0]]), np.array([[0,0],[0,1]])] reset_kraus = (reset_to_0, reset_to_1) gamma = 0.23 error = amplitude_damping_error(gamma) results = approximate_quantum_error(error, operator_list=reset_kraus) print(results) ``` Note the difference in the output channel: The probabilities are the same, but the input Kraus operators were converted to general Kraus channels, which cannot be used in a Clifford simulator. Hence, it is always better to pass a `QuantumError` object instead of the Kraus matrices, when possible.
github_jupyter
``` import os os.chdir("/Users/patrick/Documents/skole/current_courses/VeriNet") import matplotlib.pyplot as plt import numpy as np import torch import matplotlib.pyplot as plt from src.algorithm.verinet import VeriNet from src.data_loader.input_data_loader import load_neurify_mnist from src.data_loader.nnet import NNET from src.algorithm.verification_objectives import ArbitraryAdversarialObjective from src.algorithm.verinet_util import Status %load_ext autoreload %autoreload 2 ``` ### Cifar 10 samples ``` num_images = 100 images = load_neurify_mnist("./data/mnist_neurify", range(num_images)).reshape(num_images, -1) plt.subplot(2,5,1) plt.imshow(images[3].reshape(28,28), cmap="gray") plt.axis("off") plt.subplot(2,5,2) plt.imshow(images[2].reshape(28,28), cmap="gray") plt.axis("off") plt.subplot(2,5,3) plt.imshow(images[1].reshape(28,28), cmap="gray") plt.axis("off") plt.subplot(2,5,4) plt.imshow(images[18].reshape(28,28), cmap="gray") plt.axis("off") plt.subplot(2,5,5) plt.imshow(images[4].reshape(28,28), cmap="gray") plt.axis("off") plt.subplot(2,5,6) plt.imshow(images[8].reshape(28,28), cmap="gray") plt.axis("off") plt.subplot(2,5,7) plt.imshow(images[-2].reshape(28,28), cmap="gray") plt.axis("off") plt.subplot(2,5,8) plt.imshow(images[0].reshape(28,28), cmap="gray") plt.axis("off") plt.subplot(2,5,9) plt.imshow(images[-16].reshape(28,28), cmap="gray") plt.axis("off") plt.subplot(2,5,10) plt.imshow(images[-1].reshape(28,28), cmap="gray") plt.axis("off") ``` ### Plot Relu linear relaxation ``` x = np.linspace(-2, 2, 1000) x_to_zero = np.linspace(-2, 0, 1000) x_zero_up = np.linspace(0, 2, 1000) x_long = np.linspace(-4, 4, 1000) relu = x_long.copy() relu[relu < 0] = 0 relax_low_1 = np.zeros_like(x_to_zero) relax_low_2 = x_zero_up relax_up = 0.5 * x + 1 v_line = np.linspace(-0.1, 0.1, 10) plt.axhline(y=0, color='k', alpha=0.25) plt.axvline(x=0, color='k', alpha=0.25) plt.xticks([]) plt.yticks([]) plt.plot(x_long, relu, "black", x_to_zero, relax_low_1, x_zero_up, relax_low_2, x, relax, np.zeros_like(v_line) - 2, v_line, "red", np.zeros_like(v_line) + 2, v_line, "red") plt.axis((-3, 3, -1, 3)) plt.fill_between(x, np.concatenate((relax_low_1, relax_low_2))[::2], relax_up, alpha=0.5) plt.grid(True, which='both') plt.xlabel("z") plt.ylabel("y") ``` ### Neurify Relu linear relaxation ``` x = np.linspace(-2, 2, 1000) x_long = np.linspace(-4, 4, 1000) relu = x_long.copy() relu[relu < 0] = 0 relax_low = 0.5 * x relax_up = 0.5 * x + 1 v_line = np.linspace(-0.1, 0.1, 10) plt.xticks([]) plt.yticks([]) v_line = np.linspace(-0.1, 0.1, 10) plt.plot(x_long, relu, "k", x, relax_low, "b", x, relax_up, "g", np.zeros_like(v_line) - 2, v_line, "r", np.zeros_like(v_line) + 2, v_line, "r") plt.axhline(y=0, color='k', alpha=0.25) plt.axvline(x=0, color='k', alpha=0.25) plt.axis((-3, 3, -1.5, 2.5)) plt.fill_between(x, 0.5*x, 0.5*x + 1, alpha=0.5) plt.grid(True, which='both') plt.xlabel("z") plt.ylabel("y") ``` ### Sigmoid naive linear relaxation ``` start, end = -4, 0.5 x_long = np.linspace(-6, 2, 1000) x = np.linspace(start, end, 1000) sigmoid = lambda x: 1/(1 + np.exp(-x)) d_sigmoid = lambda x: sigmoid(x)*(1 - sigmoid(x)) s = sigmoid(x) min_d = d_sigmoid(x[0]) relax_low = min_d * x + (s[0] - min_d*start) relax_up = min_d * x + (s[-1] - min_d*end) v_line = np.linspace(-0.1, 0.1, 10) relax_low_opt = d_tangent*x - d_tangent * tangent_point + sigmoid(tangent_point) relax_up_opt = ((s[-1] - s[0])/(x[-1] - x[0]))*(x - x[-1]) + (s[-1]) plt.xticks([]) plt.yticks([]) plt.plot(x_long, sigmoid(x_long), "k", x, relax_low, "r", x, relax_up, "r", x, relax_low_opt, "b", x, relax_up_opt, "b") plt.axhline(y=0, color='k', alpha=0.25) plt.axvline(x=0, color='k', alpha=0.25) plt.axis((-5, 1.5, -0.2, 0.75)) plt.fill_between(x, relax_low, relax_up, alpha=0.5, facecolor="red") plt.fill_between(x, relax_low_opt, relax_up_opt, alpha=0.5) plt.grid(True, which='both') plt.xlabel("z") plt.ylabel("y") start, end = -4, 0.5 x_long = np.linspace(-6, 4, 1000) x = np.linspace(start, end, 1000) sigmoid = lambda x: 1/(1 + np.exp(-x)) d_sigmoid = lambda x: sigmoid(x)*(1 - sigmoid(x)) s = sigmoid(x) min_d = d_sigmoid(x[0]) relax_low = min_d * x + (s[0] - min_d*start) relax_up = min_d * x + (s[-1] - min_d*end) tangent_point = (x[0]**2 - x[-1]**2)/(2*(x[0] - x[-1])) d_tangent = d_sigmoid(tangent_point) relax_low_opt = d_tangent*x - d_tangent * tangent_point + sigmoid(tangent_point) relax_up_opt = ((s[-1] - s[0])/(x[-1] - x[0]))*(x - x[-1]) + (s[-1]) v_line = np.linspace(-0.1, 0.1, 10) plt.xticks([]) plt.yticks([]) plt.plot(x_long, sigmoid(x_long), "k", x, relax_low, "b", x, relax_up, "g") plt.axhline(y=0, color='k', alpha=0.25) plt.axvline(x=0, color='k', alpha=0.25) plt.axis((-6, 4, -0.2, 1)) plt.fill_between(x, relax_low, relax_up, alpha=0.5) plt.grid(True, which='both') plt.xlabel("z") plt.ylabel("y") ``` ### Naive vs. symbolic interval propagation ``` x = np.linspace(-2, 2, 1000) relu = x.copy() relu[relu < 0] = 0 v_line = np.linspace(-0.1, 0.1, 10) plt.plot(x, relu, "k") plt.axhline(y=0, color='k', alpha=0.25) plt.axvline(x=0, color='k', alpha=0.25) plt.axis((-3, 3, -1, 3)) plt.fill_between(x, np.zeros_like(x), np.zeros_like(x) + 2, facecolor="red", alpha=0.4) plt.fill_between(x, 0.5*x, 0.5*x + 1, facecolor="blue", alpha=0.4) plt.grid(True, which='both') plt.xlabel("z") plt.ylabel("y") plt.legend(["Relu", "_no_label_", "_no_label", "Symbolic relaxation", "Naive relaxation"]) ``` ### Adversarial example ``` nnet = NNET("./data/models_nnet/neurify/mnist24.nnet") model = nnet.from_nnet_to_verinet_nn() solver = VeriNet(model, max_procs=20) image = load_neurify_mnist("./data/mnist_neurify/test_images_100/", list(range(100))).reshape((100, -1))[2] correct_class = int(model(torch.Tensor(image)).argmax(dim=1)) eps = 5 input_bounds = np.zeros((*image.shape, 2), dtype=np.float32) input_bounds[:, 0] = image - eps input_bounds[:, 1] = image + eps input_bounds = nnet.normalize_input(input_bounds) objective = ArbitraryAdversarialObjective(correct_class, input_bounds, output_size=10) solver.verify(objective, timeout=3600, no_split=False, verbose=False) counter_example = np.array(solver.counter_example) diff = (abs(counter_example - nnet.normalize_input(image)) * 255).astype(np.int32) * 10 print(correct_class) print(int(model(torch.Tensor(counter_example)).argmax(dim=1))) plt.subplot(1,3,1) plt.imshow(image.reshape((28,28)), cmap="gray") plt.axis('off') plt.subplot(1,3,2) plt.imshow(diff.reshape((28, 28)), cmap="gray", vmin=0, vmax=255) plt.axis('off') plt.subplot(1,3,3) plt.imshow(counter_example.reshape(28, 28), cmap="gray") plt.axis('off') #print(counter_example) start, end = -4, 0.5 x_long = np.linspace(-6, 6, 1000) x = np.linspace(start, end, 1000) sigmoid = lambda x: 1/(1 + np.exp(-x)) d_sigmoid = lambda x: sigmoid(x)*(1 - sigmoid(x)) s = sigmoid(x) min_d = d_sigmoid(x[0]) relax_up_opt = ((s[-1] - s[0])/(x[-1] - x[0]))*(x - x[-1]) + (s[-1]) v_line_start = np.linspace(0, s[0], 100) v_line_end = np.linspace(0, s[-1], 100) plt.xticks([]) plt.yticks([]) plt.plot(x_long, sigmoid(x_long), "k", x, relax_up_opt, "b", np.zeros_like(v_line_start)+start, v_line_start, "r--", np.zeros_like(v_line_end)+end, v_line_end, "r--") plt.axhline(y=0, color='k', alpha=0.25) plt.axvline(x=0, color='k', alpha=0.25) plt.axis((-5, 5, -0.2, 1.2)) plt.fill_between(x, s, relax_up_opt, alpha=0.5) plt.grid(True, which='both') plt.xlabel("z") plt.ylabel("y") start, end = -0.5, 4 x_long = np.linspace(-6, 6, 1000) x = np.linspace(start, end, 1000) sigmoid = lambda x: 1/(1 + np.exp(-x)) d_sigmoid = lambda x: sigmoid(x)*(1 - sigmoid(x)) s = sigmoid(x) min_d = d_sigmoid(x[0]) v_line = np.linspace(-0.1, 0.1, 10) tangent_point = (x[-1]**2 - x[0]**2)/(2*(x[-1] - x[0])) d_tangent = d_sigmoid(tangent_point) relax_up_opt = d_tangent*x - d_tangent * tangent_point + sigmoid(tangent_point) v_line_start = np.linspace(0, relax_up_opt[0], 100) v_line_end = np.linspace(0, relax_up_opt[-1], 100) v_line_tan = np.linspace(0, sigmoid(tangent_point), 100) plt.xticks([]) plt.yticks([]) plt.plot(x_long, sigmoid(x_long), "k", x, relax_up_opt, "b", np.zeros_like(v_line_start)+start, v_line_start, "r--", np.zeros_like(v_line_end)+end, v_line_end, "r--", np.zeros_like(v_line_tan)+tangent_point, v_line_tan, "g--") plt.axhline(y=0, color='k', alpha=0.25) plt.axvline(x=0, color='k', alpha=0.25) plt.axis((-5, 5, -0.2, 1.2)) plt.fill_between(x, s, relax_up_opt, alpha=0.5) plt.grid(True, which='both') plt.xlabel("z") plt.ylabel("y") start, end = -2, 3 x_long = np.linspace(-6, 6, 1000) x = np.linspace(start, end, 1000) sigmoid = lambda x: 1/(1 + np.exp(-x)) d_sigmoid = lambda x: sigmoid(x)*(1 - sigmoid(x)) s = sigmoid(x) min_d = d_sigmoid(x[0]) v_line = np.linspace(-0.1, 0.1, 10) tangent_point = (x[-1]**2 - x[0]**2)/(2*(x[-1] - x[0])) d_tangent = d_sigmoid(tangent_point) relax_up_opt = d_tangent*x - d_tangent * tangent_point + sigmoid(tangent_point) v_line_start = np.linspace(0, relax_up_opt[0], 100) v_line_end = np.linspace(0, relax_up_opt[-1], 100) v_line_tan = np.linspace(0, sigmoid(tangent_point), 100) plt.xticks([]) plt.yticks([]) plt.plot(x_long, sigmoid(x_long), "k", x, relax_up_opt, "b", np.zeros_like(v_line_start)+start, v_line_start, "r--", np.zeros_like(v_line_end)+end, v_line_end, "r--", np.zeros_like(v_line_tan)+tangent_point, v_line_tan, "g--") plt.axhline(y=0, color='k', alpha=0.25) plt.axvline(x=0, color='k', alpha=0.25) plt.axis((-5, 5, -0.2, 1.2)) #plt.fill_between(x, s, relax_up_opt, alpha=0.5) plt.grid(True, which='both') plt.xlabel("z") plt.ylabel("y") start, end = -2, 3 x_long = np.linspace(-6, 6, 1000) x = np.linspace(start, end, 1000) sigmoid = lambda x: 1/(1 + np.exp(-x)) d_sigmoid = lambda x: sigmoid(x)*(1 - sigmoid(x)) s = sigmoid(x) min_d = d_sigmoid(x[0]) v_line = np.linspace(-0.1, 0.1, 10) tangent_point = 0.9 tangent_point_opt = (x[-1]**2 - x[0]**2)/(2*(x[-1] - x[0])) d_tangent = d_sigmoid(tangent_point) relax_up_opt = d_tangent*x - d_tangent * tangent_point + sigmoid(tangent_point) v_line_start = np.linspace(0, relax_up_opt[0], 100) v_line_end = np.linspace(0, relax_up_opt[-1], 100) v_line_tan = np.linspace(0, sigmoid(tangent_point), 100) v_line_tan_opt = np.linspace(0, sigmoid(tangent_point_opt), 100) plt.xticks([]) plt.yticks([]) plt.plot(x_long, sigmoid(x_long), "k", x, relax_up_opt, "b", np.zeros_like(v_line_start)+start, v_line_start, "r--", np.zeros_like(v_line_end)+end, v_line_end, "r--", np.zeros_like(v_line_tan)+tangent_point, v_line_tan, "g--", np.zeros_like(v_line_tan) + tangent_point_opt, v_line_tan_opt, "y--") plt.axhline(y=0, color='k', alpha=0.25) plt.axvline(x=0, color='k', alpha=0.25) plt.axis((-5, 5, -0.2, 1.2)) plt.fill_between(x, s, relax_up_opt, alpha=0.5) plt.grid(True, which='both') plt.xlabel("z") plt.ylabel("y") start, end = -2, 3 x_long = np.linspace(-6, 6, 1000) x = np.linspace(start, end, 1000) sigmoid = lambda x: 1/(1 + np.exp(-x)) d_sigmoid = lambda x: sigmoid(x)*(1 - sigmoid(x)) s = sigmoid(x) min_d = d_sigmoid(x[0]) v_line = np.linspace(-0.1, 0.1, 10) d_tangent = (s[-1] - s[0]) / (x[-1] - x[0]) intercept = d_tangent*x - d_tangent * x[0] + s[0] v_line_start = np.linspace(0, s[0], 100) v_line_end = np.linspace(0, s[-1], 100) plt.xticks([]) plt.yticks([]) plt.plot(x_long, sigmoid(x_long), "k", x, intercept, "b", np.zeros_like(v_line_start)+start, v_line_start, "r--", np.zeros_like(v_line_end)+end, v_line_end, "r--") plt.axhline(y=0, color='k', alpha=0.25) plt.axvline(x=0, color='k', alpha=0.25) plt.axis((-5, 5, -0.2, 1.2)) plt.grid(True, which='both') plt.xlabel("z") plt.ylabel("y") start, end = -2, 3 x_long = np.linspace(-6, 6, 1000) x = np.linspace(start, end, 1000) sigmoid = lambda x: 1/(1 + np.exp(-x)) d_sigmoid = lambda x: sigmoid(x)*(1 - sigmoid(x)) s = sigmoid(x) min_d = d_sigmoid(x[0]) v_line = np.linspace(-0.1, 0.1, 10) d_tangent = (s[-1] - s[0]) / (x[-1] - x[0]) intercept = d_tangent*x - d_tangent * x[0] + s[0] tangent = intercept + 0.12 v_line_tan = np.linspace(0, sigmoid(1.3), 100) v_line_start = np.linspace(0, s[0], 100) v_line_end = np.linspace(0, s[-1], 100) plt.xticks([]) plt.yticks([]) plt.plot(x_long, sigmoid(x_long), "k", x, intercept, "b--", x, tangent, "b", np.zeros_like(v_line_start)+start, v_line_start, "r--", np.zeros_like(v_line_tan)+1.3, v_line_tan, "g--", np.zeros_like(v_line_end)+end, v_line_end, "r--") plt.axhline(y=0, color='k', alpha=0.25) plt.axvline(x=0, color='k', alpha=0.25) plt.axis((-5, 5, -0.2, 1.2)) plt.grid(True, which='both') plt.xlabel("z") plt.ylabel("y") ```
github_jupyter
<!--BOOK_INFORMATION--> <img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png"> *This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).* *The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!* <!--NAVIGATION--> < [Machine Learning](05.00-Machine-Learning.ipynb) | [Contents](Index.ipynb) | [Introducing Scikit-Learn](05.02-Introducing-Scikit-Learn.ipynb) > <a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.01-What-Is-Machine-Learning.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a> # What Is Machine Learning? Before we take a look at the details of various machine learning methods, let's start by looking at what machine learning is, and what it isn't. Machine learning is often categorized as a subfield of artificial intelligence, but I find that categorization can often be misleading at first brush. The study of machine learning certainly arose from research in this context, but in the data science application of machine learning methods, it's more helpful to think of machine learning as a means of *building models of data*. Fundamentally, machine learning involves building mathematical models to help understand data. "Learning" enters the fray when we give these models *tunable parameters* that can be adapted to observed data; in this way the program can be considered to be "learning" from the data. Once these models have been fit to previously seen data, they can be used to predict and understand aspects of newly observed data. I'll leave to the reader the more philosophical digression regarding the extent to which this type of mathematical, model-based "learning" is similar to the "learning" exhibited by the human brain. Understanding the problem setting in machine learning is essential to using these tools effectively, and so we will start with some broad categorizations of the types of approaches we'll discuss here. ## Categories of Machine Learning At the most fundamental level, machine learning can be categorized into two main types: supervised learning and unsupervised learning. *Supervised learning* involves somehow modeling the relationship between measured features of data and some label associated with the data; once this model is determined, it can be used to apply labels to new, unknown data. This is further subdivided into *classification* tasks and *regression* tasks: in classification, the labels are discrete categories, while in regression, the labels are continuous quantities. We will see examples of both types of supervised learning in the following section. *Unsupervised learning* involves modeling the features of a dataset without reference to any label, and is often described as "letting the dataset speak for itself." These models include tasks such as *clustering* and *dimensionality reduction.* Clustering algorithms identify distinct groups of data, while dimensionality reduction algorithms search for more succinct representations of the data. We will see examples of both types of unsupervised learning in the following section. In addition, there are so-called *semi-supervised learning* methods, which falls somewhere between supervised learning and unsupervised learning. Semi-supervised learning methods are often useful when only incomplete labels are available. ## Qualitative Examples of Machine Learning Applications To make these ideas more concrete, let's take a look at a few very simple examples of a machine learning task. These examples are meant to give an intuitive, non-quantitative overview of the types of machine learning tasks we will be looking at in this chapter. In later sections, we will go into more depth regarding the particular models and how they are used. For a preview of these more technical aspects, you can find the Python source that generates the following figures in the [Appendix: Figure Code](06.00-Figure-Code.ipynb). ### Classification: Predicting discrete labels We will first take a look at a simple *classification* task, in which you are given a set of labeled points and want to use these to classify some unlabeled points. Imagine that we have the data shown in this figure: ![](figures/05.01-classification-1.png) [figure source in Appendix](06.00-Figure-Code.ipynb#Classification-Example-Figure-1) Here we have two-dimensional data: that is, we have two *features* for each point, represented by the *(x,y)* positions of the points on the plane. In addition, we have one of two *class labels* for each point, here represented by the colors of the points. From these features and labels, we would like to create a model that will let us decide whether a new point should be labeled "blue" or "red." There are a number of possible models for such a classification task, but here we will use an extremely simple one. We will make the assumption that the two groups can be separated by drawing a straight line through the plane between them, such that points on each side of the line fall in the same group. Here the *model* is a quantitative version of the statement "a straight line separates the classes", while the *model parameters* are the particular numbers describing the location and orientation of that line for our data. The optimal values for these model parameters are learned from the data (this is the "learning" in machine learning), which is often called *training the model*. The following figure shows a visual representation of what the trained model looks like for this data: ![](figures/05.01-classification-2.png) [figure source in Appendix](06.00-Figure-Code.ipynb#Classification-Example-Figure-2) Now that this model has been trained, it can be generalized to new, unlabeled data. In other words, we can take a new set of data, draw this model line through it, and assign labels to the new points based on this model. This stage is usually called *prediction*. See the following figure: ![](figures/05.01-classification-3.png) [figure source in Appendix](06.00-Figure-Code.ipynb#Classification-Example-Figure-3) This is the basic idea of a classification task in machine learning, where "classification" indicates that the data has discrete class labels. At first glance this may look fairly trivial: it would be relatively easy to simply look at this data and draw such a discriminatory line to accomplish this classification. A benefit of the machine learning approach, however, is that it can generalize to much larger datasets in many more dimensions. For example, this is similar to the task of automated spam detection for email; in this case, we might use the following features and labels: - *feature 1*, *feature 2*, etc. $\to$ normalized counts of important words or phrases ("Viagra", "Nigerian prince", etc.) - *label* $\to$ "spam" or "not spam" For the training set, these labels might be determined by individual inspection of a small representative sample of emails; for the remaining emails, the label would be determined using the model. For a suitably trained classification algorithm with enough well-constructed features (typically thousands or millions of words or phrases), this type of approach can be very effective. We will see an example of such text-based classification in [In Depth: Naive Bayes Classification](05.05-Naive-Bayes.ipynb). Some important classification algorithms that we will discuss in more detail are Gaussian naive Bayes (see [In Depth: Naive Bayes Classification](05.05-Naive-Bayes.ipynb)), support vector machines (see [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb)), and random forest classification (see [In-Depth: Decision Trees and Random Forests](05.08-Random-Forests.ipynb)). ### Regression: Predicting continuous labels In contrast with the discrete labels of a classification algorithm, we will next look at a simple *regression* task in which the labels are continuous quantities. Consider the data shown in the following figure, which consists of a set of points each with a continuous label: ![](figures/05.01-regression-1.png) [figure source in Appendix](06.00-Figure-Code.ipynb#Regression-Example-Figure-1) As with the classification example, we have two-dimensional data: that is, there are two features describing each data point. The color of each point represents the continuous label for that point. There are a number of possible regression models we might use for this type of data, but here we will use a simple linear regression to predict the points. This simple linear regression model assumes that if we treat the label as a third spatial dimension, we can fit a plane to the data. This is a higher-level generalization of the well-known problem of fitting a line to data with two coordinates. We can visualize this setup as shown in the following figure: ![](figures/05.01-regression-2.png) [figure source in Appendix](06.00-Figure-Code.ipynb#Regression-Example-Figure-2) Notice that the *feature 1-feature 2* plane here is the same as in the two-dimensional plot from before; in this case, however, we have represented the labels by both color and three-dimensional axis position. From this view, it seems reasonable that fitting a plane through this three-dimensional data would allow us to predict the expected label for any set of input parameters. Returning to the two-dimensional projection, when we fit such a plane we get the result shown in the following figure: ![](figures/05.01-regression-3.png) [figure source in Appendix](06.00-Figure-Code.ipynb#Regression-Example-Figure-3) This plane of fit gives us what we need to predict labels for new points. Visually, we find the results shown in the following figure: ![](figures/05.01-regression-4.png) [figure source in Appendix](06.00-Figure-Code.ipynb#Regression-Example-Figure-4) As with the classification example, this may seem rather trivial in a low number of dimensions. But the power of these methods is that they can be straightforwardly applied and evaluated in the case of data with many, many features. For example, this is similar to the task of computing the distance to galaxies observed through a telescope—in this case, we might use the following features and labels: - *feature 1*, *feature 2*, etc. $\to$ brightness of each galaxy at one of several wave lengths or colors - *label* $\to$ distance or redshift of the galaxy The distances for a small number of these galaxies might be determined through an independent set of (typically more expensive) observations. Distances to remaining galaxies could then be estimated using a suitable regression model, without the need to employ the more expensive observation across the entire set. In astronomy circles, this is known as the "photometric redshift" problem. Some important regression algorithms that we will discuss are linear regression (see [In Depth: Linear Regression](05.06-Linear-Regression.ipynb)), support vector machines (see [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb)), and random forest regression (see [In-Depth: Decision Trees and Random Forests](05.08-Random-Forests.ipynb)). ### Clustering: Inferring labels on unlabeled data The classification and regression illustrations we just looked at are examples of supervised learning algorithms, in which we are trying to build a model that will predict labels for new data. Unsupervised learning involves models that describe data without reference to any known labels. One common case of unsupervised learning is "clustering," in which data is automatically assigned to some number of discrete groups. For example, we might have some two-dimensional data like that shown in the following figure: ![](figures/05.01-clustering-1.png) [figure source in Appendix](06.00-Figure-Code.ipynb#Clustering-Example-Figure-2) By eye, it is clear that each of these points is part of a distinct group. Given this input, a clustering model will use the intrinsic structure of the data to determine which points are related. Using the very fast and intuitive *k*-means algorithm (see [In Depth: K-Means Clustering](05.11-K-Means.ipynb)), we find the clusters shown in the following figure: ![](figures/05.01-clustering-2.png) [figure source in Appendix](06.00-Figure-Code.ipynb#Clustering-Example-Figure-2) *k*-means fits a model consisting of *k* cluster centers; the optimal centers are assumed to be those that minimize the distance of each point from its assigned center. Again, this might seem like a trivial exercise in two dimensions, but as our data becomes larger and more complex, such clustering algorithms can be employed to extract useful information from the dataset. We will discuss the *k*-means algorithm in more depth in [In Depth: K-Means Clustering](05.11-K-Means.ipynb). Other important clustering algorithms include Gaussian mixture models (See [In Depth: Gaussian Mixture Models](05.12-Gaussian-Mixtures.ipynb)) and spectral clustering (See [Scikit-Learn's clustering documentation](http://scikit-learn.org/stable/modules/clustering.html)). ### Dimensionality reduction: Inferring structure of unlabeled data Dimensionality reduction is another example of an unsupervised algorithm, in which labels or other information are inferred from the structure of the dataset itself. Dimensionality reduction is a bit more abstract than the examples we looked at before, but generally it seeks to pull out some low-dimensional representation of data that in some way preserves relevant qualities of the full dataset. Different dimensionality reduction routines measure these relevant qualities in different ways, as we will see in [In-Depth: Manifold Learning](05.10-Manifold-Learning.ipynb). As an example of this, consider the data shown in the following figure: ![](figures/05.01-dimesionality-1.png) [figure source in Appendix](06.00-Figure-Code.ipynb#Dimensionality-Reduction-Example-Figure-1) Visually, it is clear that there is some structure in this data: it is drawn from a one-dimensional line that is arranged in a spiral within this two-dimensional space. In a sense, you could say that this data is "intrinsically" only one dimensional, though this one-dimensional data is embedded in higher-dimensional space. A suitable dimensionality reduction model in this case would be sensitive to this nonlinear embedded structure, and be able to pull out this lower-dimensionality representation. The following figure shows a visualization of the results of the Isomap algorithm, a manifold learning algorithm that does exactly this: ![](figures/05.01-dimesionality-2.png) [figure source in Appendix](06.00-Figure-Code.ipynb#Dimensionality-Reduction-Example-Figure-2) Notice that the colors (which represent the extracted one-dimensional latent variable) change uniformly along the spiral, which indicates that the algorithm did in fact detect the structure we saw by eye. As with the previous examples, the power of dimensionality reduction algorithms becomes clearer in higher-dimensional cases. For example, we might wish to visualize important relationships within a dataset that has 100 or 1,000 features. Visualizing 1,000-dimensional data is a challenge, and one way we can make this more manageable is to use a dimensionality reduction technique to reduce the data to two or three dimensions. Some important dimensionality reduction algorithms that we will discuss are principal component analysis (see [In Depth: Principal Component Analysis](05.09-Principal-Component-Analysis.ipynb)) and various manifold learning algorithms, including Isomap and locally linear embedding (See [In-Depth: Manifold Learning](05.10-Manifold-Learning.ipynb)). ## Summary Here we have seen a few simple examples of some of the basic types of machine learning approaches. Needless to say, there are a number of important practical details that we have glossed over, but I hope this section was enough to give you a basic idea of what types of problems machine learning approaches can solve. In short, we saw the following: - *Supervised learning*: Models that can predict labels based on labeled training data - *Classification*: Models that predict labels as two or more discrete categories - *Regression*: Models that predict continuous labels - *Unsupervised learning*: Models that identify structure in unlabeled data - *Clustering*: Models that detect and identify distinct groups in the data - *Dimensionality reduction*: Models that detect and identify lower-dimensional structure in higher-dimensional data In the following sections we will go into much greater depth within these categories, and see some more interesting examples of where these concepts can be useful. All of the figures in the preceding discussion are generated based on actual machine learning computations; the code behind them can be found in [Appendix: Figure Code](06.00-Figure-Code.ipynb). <!--NAVIGATION--> < [Machine Learning](05.00-Machine-Learning.ipynb) | [Contents](Index.ipynb) | [Introducing Scikit-Learn](05.02-Introducing-Scikit-Learn.ipynb) > <a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.01-What-Is-Machine-Learning.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
github_jupyter
### Imports ``` # Change tensorflow version to 1.x %tensorflow_version 1 import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data #Read MNIST Data mnist_data = input_data.read_data_sets('MNIST_data/',one_hot=True) ``` ### Dataset ``` import matplotlib.pyplot as plt import numpy as np import math inp_batch, gt_batch = mnist_data.train.next_batch(10) x,y = inp_batch[0], gt_batch[0] #Checking one image and one label shapes print(x.shape, y.shape) #Checking a batch of images and a batch of labels shapes print(inp_batch.shape, gt_batch.shape) #Formatting images to matrix, from vector def imformat(x): horlen = int(math.sqrt(len(x))) verlen = horlen x_imformat = x.reshape((horlen,verlen)) return x_imformat x_imformat = imformat(x) plt.imshow(x_imformat,cmap = 'gray') print(x.max(),x.min()) print(np.amax(x),np.amin(x)) ``` ###Network ``` #Definin hyperparameters batch_num = 50 input_shape = 784 label_shape = 10 lr = 0.003 layer_1_neurons = 200 layer_2_neurons = 80 layer_3_neurons = 10 # Placeholders are the things that we FEED to our tensorflow graph when # we run our graph inp = tf.placeholder(dtype = tf.float32 , shape = (None,input_shape)) lab = tf.placeholder(dtype = tf.float32, shape = (None, label_shape)) # We define our variables that we will use in our graph. # Think of this like we define some nodes on the graph, but we didnt define the edges yet W1 = tf.Variable(tf.random_normal(shape = [input_shape, layer_1_neurons])) b1 = tf.Variable(tf.random_normal(shape = [layer_1_neurons])) W2 = tf.Variable(tf.random_normal(shape = [layer_1_neurons, layer_2_neurons])) b2 = tf.Variable(tf.random_normal(shape = [layer_2_neurons])) W3 = tf.Variable(tf.random_normal(shape = [layer_2_neurons, layer_3_neurons])) b3 = tf.Variable(tf.random_normal(shape = [layer_3_neurons])) # Here we finish defining everything in our computational graph y1 = tf.nn.sigmoid(tf.matmul(inp,W1) + b1) y2 = tf.nn.sigmoid(tf.matmul(y1,W2) + b2) y3 = tf.nn.sigmoid(tf.matmul(y2,W3) + b3) pred = y3 # We need loss in our comp graph to optimize it loss = tf.nn.softmax_cross_entropy_with_logits_v2(lab,pred) # We need tstep in our comp graph to obtain the gradients tstep = tf.train.AdamOptimizer(lr).minimize(loss) #if this is an interactive session, I won't be needing python contexts after. sess = tf.InteractiveSession() tf.global_variables_initializer().run() # Our training loop itnum = 1000 epnum = 25 for epoch in range(epnum): aggloss = 0 for itr in range(1,itnum): xbatch,ybatch = mnist_data.train.next_batch(batch_num) # I run my computational graph to obtain LOSS and TSTEP objects residing in my graph # I assign the obtained values to itrloss variable and _ variable (i will not use _ variable) # I feed my graph the INP and LAB objects. inp object is xbatch here, lab object is ybatch here itrloss, _ = sess.run([loss,tstep], feed_dict = {inp:xbatch, lab:ybatch}) aggloss = aggloss + np.mean(itrloss) print(epoch,aggloss/itnum) #Checking accuracy acc = 0 sample_size = 5000 for _ in range(sample_size): xtest, ytest = mnist_data.test.next_batch(50) # I run my graph to obtain my prediction this time. Same things apply as in the previous cell. testpred = sess.run([pred], feed_dict={inp:xtest, lab:ytest}) acc = acc + int(np.argmax(ytest)==np.argmax(testpred)) acc = acc/sample_size print(acc) ```
github_jupyter
<img width=180 align="center" src="https://raw.githubusercontent.com/asc-community/AngouriMath/9d3fd623e1ea0a32193a4de9424379d8f41135c5/Sources/Wrappers/AngouriMath.Interactive/icon.png"/> <hr> <div align="center"> <h1>AngouriMath.<span style="color: #FF8800">Interactive</span></h1> </div> <br> This notebook provides an example of using symbol algebra [AngouriMath](https://am.angouri.org) in an interactive notebook. Software, made it possible: - [Jupyter](https://jupyter.org) - web-based editor for these notebooks - [dotnet/interactive](https://github.com/dotnet/interactive/) - package, which allowed to make formatted values - [AngouriMath.Interactive](https://www.nuget.org/packages/AngouriMath.Interactive) - package, wrapping AM to work in Jupyter [Try this notebook online](https://mybinder.org/v2/gh/asc-community/Try/main?filepath=HelloBook.AngouriMath.Interactive.ipynb) Check [website](https://am.angouri.org) and [wiki](https://github.com/asc-community/AngouriMath/wiki) for more information. ### Setting up Here we install the latest package from NuGet. ``` #r "nuget:AngouriMath.Interactive, *-*" open AngouriMath.FSharp.Core open AngouriMath.FSharp.Functions open AngouriMath.FSharp.Shortcuts ``` ### Building a simple expression ``` let x = symbol "x" let y = symbol "y" x / y + (sqrt x) ``` ### Parsing ``` parsed "x ^ 2 + 3" ``` Because of how F# works, you need to parse numbers when using them along with other expressions ``` x / (parsed 3) parsed "Hello + A gamma gamma omicron upsilon rho iota Math!" ``` ### Variable substitution ``` ("x", 3) -|> "x ^ 2" "x ^ 2" <|- ("x", 3) "6 / y + x" <|- ("x", 3) ``` # I. Calculus \`\`d/dx\`\` is a shortcut, allowing to write expressions even easier ``` ``d/dx`` ((sin x) + y ** x) ``` But if you need differentiating over a custom variable... ``` derivative y (y ** x + y + (atan y)) ``` Same way, integrals: ``` ``int [dx]`` "x + 2" integral "x" "y + z + zeta" ``` Limits: ``` ``lim x->0`` "sin(a x) / (b x)" ``lim x->+oo`` "(a x2 + b) / (c x2 - c)" limit "x" "0" "sin(a x) / (b x)" ``` # II. Algebra ``` open AngouriMath.FSharp.MatrixOperators ``` ## Vectors Vector of numbers ``` let v1 = vector [1; 2; 3] v1 ``` Vector of expressions, parsed from strings ``` let v2 = vector ["x + 2"; "y"; "pi + lambda"] v2 ``` Matrix of numbers ``` let m1 = matrix [[1; 2; 3]; [4; 5; 6]] m1 ``` Transposed matrix ``` m1.T ``` Matrix of expressions, parsed from strings ``` let m2 = matrix [["x_y"; "3"]; ["gamma + Omega"; "2"]] m2 ``` Matrix product ``` m1.T |* m2 ``` Tensor product ``` m1 *** m2 ``` Matrix power ``` m2 |** 2 open AngouriMath.FSharp.Constants simplified (m2 - (parsed "lambda") * I_2) ``` ## Sets ``` let int1 = leftInclusive "3" int1 let int1 = leftExclusive "3" int1 let int1 = leftExclusiveRightInclusive "3" "lambda" int1 parsed "{1, 2, 3} \/ RR" ``` # III. Aggressive operators They will override existing arithmetic operators for `obj` and `obj` ``` open AngouriMath.Interactive.AggressiveOperators 16 ** 60 e ** (i * pi) 1038 - 3.4m ```
github_jupyter
``` import tensorflow as tf # things we need for NLP import nltk from nltk.stem.lancaster import LancasterStemmer stemmer = LancasterStemmer() # things we need for Tensorflow import numpy as np from keras.models import Sequential from keras.layers import Dense, Activation, Dropout from keras.optimizers import SGD import pandas as pd import pickle import random nltk.download('punkt') # import our chat-bot intents file import json with open('intents edited.json') as json_data: intents = json.load(json_data) words = [] classes = [] documents = [] ignore_words = ['?'] # loop through each sentence in our intents patterns for intent in intents['intents']: for pattern in intent['patterns']: # tokenize each word in the sentence w = nltk.word_tokenize(pattern) # add to our words list words.extend(w) # add to documents in our corpus documents.append((w, intent['tag'])) # add to our classes list if intent['tag'] not in classes: classes.append(intent['tag']) # stem and lower each word and remove duplicates words = [stemmer.stem(w.lower()) for w in words if w not in ignore_words] words = sorted(list(set(words))) # sort classes classes = sorted(list(set(classes))) # documents = combination between patterns and intents print (len(documents), "documents") # classes = intents print (len(classes), "classes", classes) # words = all words, vocabulary print (len(words), "unique stemmed words", words) # create our training data training = [] # create an empty array for our output output_empty = [0] * len(classes) # training set, bag of words for each sentence for doc in documents: # initialize our bag of words bag = [] # list of tokenized words for the pattern pattern_words = doc[0] # stem each word - create base word, in attempt to represent related words pattern_words = [stemmer.stem(word.lower()) for word in pattern_words] # create our bag of words array with 1, if word match found in current pattern for w in words: bag.append(1) if w in pattern_words else bag.append(0) # output is a '0' for each tag and '1' for current tag (for each pattern) output_row = list(output_empty) output_row[classes.index(doc[1])] = 1 training.append([bag, output_row]) # shuffle our features and turn into np.array random.shuffle(training) training = np.array(training) # create train and test lists. X - patterns, Y - intents train_x = list(training[:,0]) train_y = list(training[:,1]) # Create model - 3 layers. First layer 128 neurons, second layer 64 neurons and 3rd output layer contains number of neurons # equal to number of intents to predict output intent with softmax model = Sequential() model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu')) model.add(Dropout(0.5)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(len(train_y[0]), activation='softmax')) # Compile model. Stochastic gradient descent with Nesterov accelerated gradient gives good results for this model sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss="categorical_crossentropy", optimizer = sgd, metrics = ['accuracy']) #fit the model model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size= 5, verbose=1) def clean_up_sentence(sentence): # tokenize the pattern - split words into array sentence_words = nltk.word_tokenize(sentence) # stem each word - create short form for word sentence_words = [stemmer.stem(word.lower()) for word in sentence_words] return sentence_words # return bag of words array: 0 or 1 for each word in the bag that exists in the sentence def bow(sentence, words, show_details=True): # tokenize the pattern sentence_words = clean_up_sentence(sentence) # bag of words - matrix of N words, vocabulary matrix bag = [0]*len(words) for s in sentence_words: for i,w in enumerate(words): if w == s: # assign 1 if current word is in the vocabulary position bag[i] = 1 return(np.array(bag)) p = bow("hi,how are you?", words) print (p) print (classes) inputvar = pd.DataFrame([p], dtype=float, index=['input']) print(model.predict(inputvar)) # save model to file pickle.dump(model, open("HC-assistant-model.pkl", "wb")) # save all of our data structures pickle.dump( {'words':words, 'classes':classes, 'train_x':train_x, 'train_y':train_y}, open( "HC-assistant-data.pkl", "wb" ) ) def classify_local(sentence): ERROR_THRESHOLD = 0.7 # generate probabilities from the model input_data = pd.DataFrame([bow(sentence, words)], dtype=float, index=['input']) results = model.predict([input_data])[0] # filter out predictions below a threshold, and provide intent index results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD] # sort by strength of probability results.sort(key=lambda x: x[1], reverse=True) return_list = [] for r in results: return_list.append((classes[r[0]], str(r[1]))) # return tuple of intent and probability return return_list classify_local('What are your rates?') def chat(): print("Chat with HEIGHTS COMMUNICATIONS bot assistant! (type quit to exit)") while True: inp = input("You: ") if inp.lower() == "quit": break input_data = pd.DataFrame([bow(inp, words)], dtype=float, index=['input']) results = model.predict([input_data])[0] results_index = np.argmax(results) tag = classes[results_index] if results[results_index] > 0.7: for tg in intents["intents"]: if tg['tag'] == tag: responses = tg["responses"] print("Assistant:", random.choice(responses)) else: print("Assistant:","I am sorry. I am not aware of what you are asking. Please call us at +923232779999") chat() import time saved_model_path = "./hc_model.h5".format(int(time.time())) model.save(saved_model_path) !tensorflowjs_converter --input_format=keras {saved_model_path} ./ ```
github_jupyter
# Solving combinatorial optimization problems using QAOA In this tutorial, we introduce combinatorial optimization problems, explain approximate optimization algorithms, explain how the Quantum Approximate Optimization Algorithm (QAOA) works and present the implementation of an example that can be run on a simulator or on a 5 qubit quantum chip ## Contents 1. [Introduction](#introduction) 2. [Examples](#examples) 3. [Approximate optimization algorithms](#approximateOPT) 4. [The QAOA algorithm](#QAOA) 5. [Qiskit Implementation](#implementation) 5.1 [Running QAOA on a simulator](#implementationsim) 5.2 [Running QAOA on a real quantum device](#implementationdev) 6. [Problems](#problems) 7. [References](#references) ## 1. Introduction <a id='introduction'></a> Combinatorial optimization [1](#references) means searching for an optimal solution in a finite or countably infinite set of potential solutions. Optimality is defined with respect to some criterion function, which is to be minimized or maximized, which is typically called the cost function. There are various types of optimization problems. These include Minimization: cost, distance, length of a traversal, weight, processing time, material, energy consumption, number of objects. Maximization: profit, value, output, return, yield, utility, efficiency, capacity, number of objects. Any maximization problem can be cast in terms of a minimization problem and vice versa. Hence the general form a combinatorial optimization problem is given by $$ \text{maximize } \;\; C(x)$$ $$ \text{subject to } \;\; x \in S $$ where $x \in S$, is a discrete variable and $C : D \rightarrow \mathbb{R}$ is the cost function, that maps from some domain $S$ in to the real numbers $\mathbb{R}$. The variable $x$ can be subject to a set of constraints and lies within the set $S \subset D$ of feasible points. In binary combinatorial optimization problems, the cost function $C$ can typically be expressed as a sum of terms that only involve a subset $Q \subset[n]$ of the $n$ bits in the string $x \in \{0,1\}^n$ and is written in the canonical form $$ C(x) = \sum_{(Q,\overline{Q}) \subset [n]} w_{(Q,\overline{Q})} \; \prod_{i\in Q} x_i \; \prod_{j\in \overline{Q}} (1- x_j), $$ where $x_i \in \{0,1\}$ and $w_{(Q,\overline{Q})}\in \mathbb{R}$. We want to find the n-bit string $x$ for which $C(x)$ is the maximal. ### 1.1 Diagonal Hamiltonians This cost function can be mapped to a Hamiltonian that is diagonal in the computational basis. Given the cost-function $C$ this Hamiltonian is then written as $$ H = \sum_{x \in \{0,1\}^n} C(x) |x \rangle\langle x| $$ where $x \in \{0,1\}^n$ labels the computational basis states $|x \rangle \in \mathbb{C}^{2^n}$. If the cost function only has at most weight $k$ terms, i.e. when only $Q$ contribute that involve at most $Q \leq k$ bits, then this diagonal Hamiltonian is also only a sum of weight $k$ Pauli $Z$ operators. The expansion of $H$ in to Pauli $Z$ operators can be obtained from the canonical expansion of the cost-function $C$ by substituting for every binary variable $x_i \in \{0,1\}$ the matrix $x_i \rightarrow 2^{-1}(1 - Z_i)$. Here $Z_i$ is read as the Pauli $Z$ operator that acts on qubit $i$ and trivial on all others, i.e. $$ Z_i = \left(\begin{array}{cc} 1 & 0 \\ 0 & -1 \end{array}\right). $$ This means that the spin Hamiltonian encoding the classical cost function is written as a $|Q|$ - local quantum spin Hamiltonian only involving Pauli $Z$- operators. $$ H = \sum_{(Q,\overline{Q}) \subset [n]} w_{(Q,\overline{Q})} \; \frac{1}{2^{|Q| + |\overline{Q}|}}\prod_{i\in Q} \left(1 - Z_i\right) \; \prod_{j\in \overline{Q}} \left(1 + Z_j\right).$$ Now, we will assume that only a few (polynomially many in $n$) $w_{(Q,\overline{Q})}$ will be non-zero. Moreover we will assume that the set $|(Q,\overline{Q})|$ is bounded and not too large. This means we can write the cost function as well as the Hamiltonian $H$ as the sum of $m$ local terms $\hat{C}_k$, $$ H = \sum_{k = 1}^m \hat{C}_k, $$ where both $m$ and the support of $\hat{C}_k$ is reasonably bounded. ## 2 Examples: <a id='examples'></a> We consider 2 examples to illustrate combinatorial optimization problems. We will only implement the first example as in Qiskit, but provide a sequence of exercises that give the instructions to implement the second example as well. ### 2.1 (weighted) $MAXCUT$ Consider an $n$-node non-directed graph *G = (V, E)* where *|V| = n* with edge weights $w_{ij}>0$, $w_{ij}=w_{ji}$, for $(j,k)\in E$. A cut is defined as a partition of the original set V into two subsets. The cost function to be optimized is in this case the sum of weights of edges connecting points in the two different subsets, *crossing* the cut. By assigning $x_i=0$ or $x_i=1$ to each node $i$, one tries to maximize the global profit function (here and in the following summations run over indices 0,1,...n-1) $$C(\textbf{x}) = \sum_{i,j = 1}^n w_{ij} x_i (1-x_j).$$ To simplify notation, we assume uniform weights $ w_{ij} = 1$ for $(i,j) \in E$. In order to find a solution to this problem on a quantum computer, one needs first to map it to a diagonal Hamiltonian as discussed above. We write the sum as a sum over edges in the set $(i,j) = E$ $$C(\textbf{x}) = \sum_{i,j = 1}^n w_{ij} x_i (1-x_j) = \sum_{(i,j) \in E} \left( x_i (1-x_j) + x_j (1-x_i)\right)$$ To map is to a spin Hamiltonian we make the assignment $x_i\rightarrow (1-Z_i)/2$, where $Z_i$ is the Pauli Z operator that has eigenvalues $\pm 1$ and obtain $X \rightarrow H$ $$ H = \sum_{(j,k) \in E} \frac{1}{2}\left(1 - Z_j Z_k \right).$$ This means that the Hamiltonian can be written as a sum of $m = |E|$ local terms $\hat{C}_e = \frac{1}{2}\left(1 - Z_{e1}Z_{e2}\right)$ with $e = (e1,e2) \in E$. ### 2.2 Constraint satisfaction problems and $MAX \; 3-SAT$. Another example of a combinatorial optimization problem is $3-SAT$. Here the cost function $C(\textbf{x}) = \sum_{k = 1}^m c_k(\textbf{x})$ is a sum of clauses $c_k(\textbf{x})$ that constrain the values of $3$ bits of some $\textbf{x} \in \{0,1\}^n$ that participate in the clause. Consider for instance this example of a $3-SAT$ clause $$ c_1(\textbf{x}) = (1-x_1)(1-x_3)x_{132} $$ for a bit string $\textbf{x} \in \{0,1\}^{133}$. The clause can only be satisfied by setting the bits $x_1 = 0$,$x_3 = 0$ and $x_{132} = 1$. The $3-SAT$ problem now asks whether there is a bit string that satisfies all of the $m$ clauses or whether no such string exists. This decision problem is the prime example of a problem that is $NP$-complete. The closely related optimization problem $MAX \; 3-SAT$ asks to find the bit string $\textbf{x}$ that satisfies the maximal number of of clauses in $C(\textbf{x})$. This can of course be turned again in to a decision problem if we ask where there exists a bit string that satisfies more than $\tilde{m}$ of the $m$ clauses, which is again $NP$-complete. ## 3. Approximate optimization algorithms <a id='approximateOPT'></a> Both the previously considered problems $MAXCUT$ and $MAX \; 3-SAT$ are actually known to be a NP-hard problems [1](#references). In fact it turns out that many combinatorial optimization problems are computationally hard to solve in general. In light of this fact, we can't expect to find a provably efficient algorithm, i.e. an algorithm with polynomial runtime in the problem size, that solves these problems. This also applies to quantum algorithms. There are two main approaches to dealing with such problems. First approach is approximation algorithms that are guaranteed to find solution of specified quality in polynomial time. The second approach are heuristic algorithms that don't have a polynomial runtime guarantee but appear to perform well on some instances of such problems. Approximate optimization algorithms are efficient and provide a provable guarantee on how close the approximate solution is to the actual optimum of the problem. The guarantee typically comes in the form of an approximation ratio, $\alpha \leq 1$. A probabilistic approximate optimization algorithm guarantees that it produces a bit-string $\textbf{x}^* \in \{0,1\}^n$ so that *with high probability* we have that with a positive $C_{max} = \max_{\textbf{x}}C(\textbf{x})$ $$ C_{max} \geq C(\textbf{x}^*) \geq \alpha C_{max}. $$ For the $MAXCUT$ problem there is a famous approximate algorithm due to Goemans and Williamson [2](#references) . This algorithm is based on an SDP relaxation of the original problem combined with a probabilistic rounding technique that yields an with high probability approximate solution $\textbf{x}^*$ that has an approximation ratio of $\alpha \approx 0.868$. This approximation ratio is actually believed to optimal so we do not expect to see an improvement by using a quantum algorithm. ## 4. The QAOA algorithm <a id="QAOA"></a> The Quantum approximate optimization algorithm (QAOA) by Farhi, Goldstone and Gutmann [3](#references) is an example of a heuristic algorithm. Unlike Goemans-Williamson algorithm, QAOA does not come with performance guarantees. QAOA takes the approach of classical approximate algorithms and looks for a quantum analogue that will likewise produce a classical bit string $x^*$ that with high probability is expected to have a good approximation ratio $\alpha$. Before discussing the details, let us first present the general idea of this approach. ### 4.1 Overview: We want to find a quantum state $|\psi_p(\vec{\gamma},\vec{\beta})\rangle$, that depends on some real parameters $\vec{\gamma},\vec{\beta} \in \mathbb{R}^p$, which has the property that it maximizes the expectation value with respect to the problem Hamiltonian $H$. Given this trial state we search for parameters $\vec{\gamma}^*,\vec{\beta}^*$ that maximize $F_p(\vec{\gamma},\vec{\beta}) = \langle \psi_p(\vec{\gamma},\vec{\beta})|H|\psi_p(\vec{\gamma},\vec{\beta})\rangle$. Once we have such a state and the corresponding parameters we prepare the state $|\psi_p(\vec{\gamma}^*,\vec{\beta}^*)\rangle$ on a quantum computer and measure the state in the $Z$ basis $|x \rangle = |x_1,\ldots x_n \rangle$ to obtain a random outcome $x^*$. We will see that this random $x^*$ is going to be a bit string that is with high probability close to the expected value $M_p = F_p(\vec{\gamma}^*,\vec{\beta}^*)$. Hence, if $M_p$ is close to $C_{max}$ so is $C(x^*)$. ### 4.2 The components of the QAOA algorithm. ### 4.2.1 The QAOA trial state <a id="section_421"></a> Central to QAOA is the trial state $|\psi_p(\vec{\gamma},\vec{\beta})\rangle$ that will be prepared on the quantum computer. Ideally we want this state to give rise to a large expectation value $F_p(\vec{\gamma},\vec{\beta}) = \langle \vec{\gamma},\vec{\beta})|H|\psi_p(\vec{\gamma},\vec{\beta})\rangle$ with respect to the problem Hamiltonian $H$. In Farhi [3](#references), the trial states $|\psi_p(\vec{\gamma},\vec{\beta})\rangle$ are constructed from the problem Hamiltonian $H$ together with single qubit Pauli $X$ rotations. That means, given a problems Hamiltonian $$ H = \sum_{k = 1}^m \hat{C}_k $$ diagonal in the computational basis and a transverse field Hamiltonian $$ B = \sum_{i = 1}^n X_i $$ the trial state is prepared by applying $p$ alternating unitaries $$ |\psi_p(\vec{\gamma},\vec{\beta})\rangle = e^{ -i\beta_p B } e^{ -i\gamma_p H } \ldots e^{ -i\beta_1 B } e^{ -i\gamma_1 H } |+\rangle^n $$ to the product state $|+\rangle^n$ with $ X |+\rangle = |+\rangle$. This particular ansatz has the advantage that there exists an explicit choice for the vectors $\vec{\gamma}^*,\vec{\beta}^*$ such that for $M_p = F_p(\vec{\gamma}^*,\vec{\beta}^*)$ when we take the limit $\lim_{p \rightarrow \infty} M_p = C_{max}$. This follows by viewing the trial state $|\psi_p(\vec{\gamma},\vec{\beta}) \rangle$ as the state that follows from troterizing the adiabatic evolution with respect to $H$ and the transverse field Hamiltonian $B$, c.f. Ref [3](#references). Conversely the disadvantage of this trial state is one would typically want a state that has been generated from a quantum circuit that is not too deep. Here depth is measured with respect to the gates that can be applied directly on the quantum chip. Hence there are other proposals that suggest using Ansatz trial state that are more tailored to the Hardware of the quantum chip Ref. [4](#references), Ref. [5](#references). ### 4.2.2 Computing the expectation value <a id="section_422"></a> An important component of this approach is that we will have to compute or estimate the expectation value $$ F_p(\vec{\gamma},\vec{\beta}) = \langle \psi_p(\vec{\gamma},\vec{\beta})|H|\psi_p(\vec{\gamma},\vec{\beta})\rangle $$ so we can optimize the parameters $\vec{\gamma},\vec{\beta}$. We will be considering two scenarios here. #### Classical evaluation Note that when the circuit to prepare $|\psi_p(\vec{\gamma},\vec{\beta})\rangle$ is not too deep it may be possible to evaluate the expectation value $F_p$ classically. This happens for instance when one considers $MAXCUT$ for graphs with bounded degree and one considers a circuit with $p=1$. We will see an example of this in the Qiskit implementation below (section 5.2) and provide an exercise to compute the expectation value. To illustrate the idea, recall that the Hamiltonian can be written as a sum of individual terms $H = \sum_{k = 1}^m \hat{C}_k$. Due to the linearity of the expectation value, it is sufficient to consider the expectation values of the individual summands. For $p = 1$ one has that $$ \langle \psi_1(\vec{\gamma},\vec{\beta})|\hat{C}_k|\psi_1(\vec{\gamma},\vec{\beta})\rangle = \langle +^n | e^{ i\gamma_1 H } e^{ i\beta_1 B } | \hat{C}_k | e^{ -i\beta_1 B } e^{ -i\gamma_1 H } |+^n\rangle.$$ Observe that with $B = \sum_{i = 1}^n X_i$ the unitary $e^{ -i\beta_1 B }$ is actually a product of single qubit rotations about $X$ with an angle $\beta$ for which we will write $X(\beta)_k = \exp(i\beta X_k)$. All the individual rotations that don't act on the qubits where $\hat{C}_k$ is supported commute with $\hat{C}_k$ and therefore cancel. This does not increase the support of the operator $\hat{C}_k$. This means that the second set of unitary gates $e^{ -i\gamma_1 H } = \prod_{l=1}^m U_l(\gamma)$ have a large set of gates $U_l(\gamma) = e^{ -i\gamma_1 \hat{C}_l }$ that commute with the operator $e^{ i\beta_1 B } \hat{C}_k e^{ -i\beta_1 B }$. The only gates $U_l(\gamma) = e^{ -i\gamma_1 \hat{C}_l }$ that contribute to the expectation value are those which involve qubits in the support of the original $\hat{C}_k$. Hence, for bounded degree interaction the support of $e^{ i\gamma_1 H } e^{ i\beta_1 B } \hat{C}_k e^{ -i\beta_1 B } e^{ -i\gamma_1 H }$ only expands by an amount given by the degree of the interaction in $H$ and is therefore independent of the system size. This means that for these smaller sub problems the expectation values are independent of $n$ and can be evaluated classically. The case of a general degree $3$ is considered in [3](#references). This is a general observation, which means that if we have a problem where the circuit used for the trial state preparation only increases the support of each term in the Hamiltonian by a constant amount the cost function can be directly evaluated. When this is the case, and only a few parameters $\beta, \gamma$ are needed in the preparation of the trial state, these can be found easily by a simple grid search. Furthermore, an exact optimal value of $M_p$ can be used to bound the approximation ratio $$ \frac{M_p}{C_{max}} \geq \alpha $$ to obtain an estimate of $\alpha$. For this case the QAOA algorithm has the same characteristics as a conventional approximate optimization algorithm that comes with a guaranteed approximation ratio that can be obtained with polynomial efficiency in the problem size. #### Evaluation on a quantum computer When the quantum circuit becomes too deep to be evaluated classically, or when the connectivity of the Problem Hamiltonian is too high we can resort to other means of estimating the expectation value. This involves directly estimating $F_p(\vec{\gamma},\vec{\beta})$ on the quantum computer. The approach here follows the path of the conventional expectation value estimation as used in VQE [4](#references), where a trial state $| \psi(\vec{\gamma},\vec{\beta})$ is prepared directly on the quantum computer and the expectation value is obtained from sampling. Since QAOA has a diagonal Hamiltonian $H$ it is actually straight forward to estimate the expectation value. We only need to obtain samples from the trial state in the computational basis. Recall that $H = \sum_{x \in \{0,1\}^n} C(x) |x \rangle\langle x|$ so that we can obtain the sampling estimate of $$ \langle \psi_p(\vec{\gamma},\vec{\beta})|H|\psi_p(\vec{\gamma},\vec{\beta})\rangle = \sum_{x \in \{0,1\}^n} C(x) |\langle x| \psi_p(\vec{\gamma},\vec{\beta}) \rangle |^2$$ by repeated single qubit measurements of the state $| \psi_p(\vec{\gamma},\vec{\beta}) \rangle $ in the $Z$ basis. For every bit string $x$ obtained from the distribution $|\langle x| \psi_p(\vec{\gamma},\vec{\beta}) \rangle |^2$ we evaluate the cost function $C(x)$ and average it over the total number of samples. The resulting empirical average approximates the expectation value up to an additive sampling error that lies within the variance of the state. The variance will be discussed below. With access to the expectation value, we can now run a classical optimization algorithm, such as [6](#references), to optimize the $F_p$. While this approach does not lead to an a-priori approximation guarantee for $x^*$, the optimized function value can be used later to provide an estimate for the approximation ratio $\alpha$. ### 4.3.3 Obtaining a solution with a given approximation ratio with high probability The algorithm is probabilistic in nature and produces random bit strings from the distribution $|\langle x| \psi_p(\vec{\gamma},\vec{\beta}) \rangle |^2$. So how can we be sure that we will sample an approximation $x^*$ that is close to the value of the optimized expectation value $M_p$? Note that this question is also relevant to the estimation of $M_p$ on a quantum computer in the first place. If the samples drawn from $|\langle x| \psi_p(\vec{\gamma},\vec{\beta}) \rangle |^2$ have too much variance, many samples are necessary to determine the mean. We will draw a bit string $x^*$ that is close to the mean $M_p$ with high probability when the energy as variable has little variance. Note that the number of terms in the Hamiltonian $H = \sum_{k=1}^m \hat{C}_k$ are bounded by $m$. Say each individual summand $\hat{C}_k$ has an operator norm that can be bounded by a universal constant $\|\hat{C}_k\| \leq \tilde{C}$ for all $k = 1\ldots m$. Then consider $$ \begin{eqnarray} \langle \psi_p(\vec{\gamma},\vec{\beta})|H^2|\psi_p(\vec{\gamma},\vec{\beta})\rangle - \langle \psi_p(\vec{\gamma},\vec{\beta})|H|\psi_p(\vec{\gamma},\vec{\beta})\rangle^2 &\leq & \langle \psi_p(\vec{\gamma},\vec{\beta})|H^2|\psi_p(\vec{\gamma},\vec{\beta})\rangle \\\nonumber &=& \sum_{k,l =1}^m \langle \psi_p(\vec{\gamma},\vec{\beta})|\hat{C}_k \hat{C}_l |\psi_p(\vec{\gamma},\vec{\beta})\rangle \\\nonumber &\leq& m^2 \tilde{C}^2 \\\nonumber \end{eqnarray} $$ where we have used that $\langle \psi_p(\vec{\gamma},\vec{\beta})|\hat{C}_k \hat{C}_l |\psi_p(\vec{\gamma},\vec{\beta})\rangle \leq \tilde{C}^2$. This means that the variance of any expectation $F_p(\vec{\gamma},\vec{\beta})$ is bounded by $m^2 \tilde{C}^2$. Hence this in particular applies for $M_p$. Furthermore if $m$ only grows polynomially in the number of qubits $n$, we know that taking polynomially growing number of samples $s = O\left(\frac{\tilde{C}^2 m^2}{\epsilon^2}\right)$ from $|\langle x| \psi_p(\vec{\gamma},\vec{\beta}) \rangle |^2$ will be sufficient to obtain a $x^*$ that leads to an $C(x^*)$ that will be close to $M_p$. ## 5. Qiskit Implementation<a id='implementation'></a> As the example implementation we consider the $MAXCUT$ problem on the butterfly graph of the openly available IBMQ 5-qubit chip. The graph will be defined below and corresponds to the native connectivity of the device. This allows us to implement the original version of the $QAOA$ algorithm, where the cost function $C$ and the Hamiltonian $H$ that is used to generate the state coincide. Moreover, for such a simple graph the exact cost function can be calculated analytically, avoiding the need to find optimal parameters variationally [7](#references). To implement the circuit, we follow the notation and gate definitions from the [Qiskit Documentation](https://qiskit.org/documentation/). As the first step will will load Qiskit and additional python packages. ``` import numpy as np import networkx as nx # tool to handle general Graphs import matplotlib.pyplot as plt from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter from qiskit import Aer, IBMQ from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, transpile, assemble from qiskit.providers.ibmq import least_busy from qiskit.tools.monitor import job_monitor from qiskit.visualization import plot_histogram ``` ### 5.1 Problem definition We define the cost function in terms of the butterfly graph of the superconducting chip. The graph has $n = 5$ vertices $ V = \{0,1,2,3,4,5\}$ and six edges $E = \{(0,1),(0,2),(1,2),(3,2),(3,4),(4,2)\}$, which will all carry the same unit weight $w_{ij} = 1$. We load an additional network package to encode the graph and plot connectivity below. ``` # Generating the butterfly graph with 5 nodes n = 5 V = np.arange(0,n,1) E =[(0,1,1.0),(0,2,1.0),(1,2,1.0),(3,2,1.0),(3,4,1.0),(4,2,1.0)] G = nx.Graph() G.add_nodes_from(V) G.add_weighted_edges_from(E) # Generate plot of the Graph colors = ['r' for node in G.nodes()] default_axes = plt.axes(frameon=True) pos = nx.spring_layout(G) nx.draw_networkx(G, node_color=colors, node_size=600, alpha=1, ax=default_axes, pos=pos) ``` ### 5.2 Optimal trial state parameters<a id="implementation_sec52"></a> In this example we consider the case for $p = 1$, i.e. only layer of gates. The expectation value $F_1(\gamma,\beta) = \langle \psi_1(\gamma,\beta))|H|\psi_1(\gamma,\beta) \rangle$ can be calculated analytically for this simple setting. Let us discuss the steps explicitly for the Hamiltonian $H = \sum_{(j,k) \in E} \frac{1}{2}\left(1 - Z_i Z_k\right)$. Due to the linearity of the expectation value we can compute the expectation value for the edges individually $$f_{(i,k)}(\gamma,\beta) = \langle \psi_1(\gamma,\beta)|\;\frac{1}{2}\left(1 - Z_i Z_k\right)\;|\psi_1(\gamma,\beta)\rangle. $$ For the butterfly graph as plotted above, we observe that there are only two kinds of edges $A = \{(0,1),(3,4)\}$ and $B = \{(0,2),(1,2),(2,3),(2,4)\}$. The edges in $A$ only have two neighboring edges, while the edges in $B$ have four. You can convince yourself that we only need to compute the expectation of a single edge in each set since the other expectation values will be the same. This means that we can compute $F_1(\gamma,\beta) = 2 f_A(\gamma,\beta) + 4f_B(\gamma,\beta)$ by evaluating only computing two expectation values. Note, that following the argument as outlined in [section 4.2.2](#section_422), all the gates that do not intersect with the Pauli operator $Z_0Z_1$ or $Z_0Z_2$ commute and cancel out so that we only need to compute $$f_A(\gamma,\beta) = \frac{1}{2}\left(1 - \langle +^3|U_{21}(\gamma)U_{02}(\gamma)U_{01}(\gamma)X_{0}(\beta)X_{1}(\beta)\;Z_0Z_1\; X^\dagger_{1}(\beta)X^\dagger_{0}(\beta)U^\dagger_{01}(\gamma)U^\dagger_{02}(\gamma)U^\dagger_{12}(\gamma) | +^3 \rangle \right)$$ and $$f_B(\gamma,\beta) = \frac{1}{2}\left(1 - \langle +^5|U_{21}(\gamma)U_{24}(\gamma)U_{23}(\gamma)U_{01}(\gamma)U_{02}(\gamma)X_{0}(\beta)X_{2}(\beta)\;Z_0Z_2\; X^\dagger_{0}(\beta)X^\dagger_{2}(\beta)U^\dagger_{02}(\gamma)U^\dagger_{01}(\gamma)U^\dagger_{12}(\gamma)U^\dagger_{23}(\gamma)U^\dagger_{24}(\gamma) | +^5 \rangle \right)$$ How complex these expectation values become in general depend only on the degree of the graph we are considering and is independent of the size of the full graph if the degree is bounded. A direct evaluation of this expression with $U_{k,l}(\gamma) = \exp\frac{i\gamma}{2}\left(1 - Z_kZ_l\right)$ and $X_k(\beta) = \exp(i\beta X_k)$ yields $$f_A(\gamma,\beta) = \frac{1}{2}\left(sin(4\gamma)sin(4\beta) + sin^2(2\beta)sin^2(2\gamma)\right)$$ and $$f_B(\gamma,\beta) = \frac{1}{2}\left(1 - sin^2(2\beta)sin^2(2\gamma)cos^2(4\gamma) - \frac{1}{4}sin(4\beta)sin(4\gamma)(1+cos^2(4\gamma))\right) $$ These results can now be combined as described above, and the expectation value is therefore given by $$ F_1(\gamma,\beta) = 3 - \left(sin^2(2\beta)sin^2(2\gamma)- \frac{1}{2}sin(4\beta)sin(4\gamma)\right)\left(1 + cos^2(4\gamma)\right),$$ We plot the function $F_1(\gamma,\beta)$ and use a simple grid search to find the parameters $(\gamma^*,\beta^*)$ that maximize the expectation value. ``` # Evaluate the function step_size = 0.1; a_gamma = np.arange(0, np.pi, step_size) a_beta = np.arange(0, np.pi, step_size) a_gamma, a_beta = np.meshgrid(a_gamma,a_beta) F1 = 3-(np.sin(2*a_beta)**2*np.sin(2*a_gamma)**2-0.5*np.sin(4*a_beta)*np.sin(4*a_gamma))*(1+np.cos(4*a_gamma)**2) # Grid search for the minimizing variables result = np.where(F1 == np.amax(F1)) a = list(zip(result[0],result[1]))[0] gamma = a[0]*step_size; beta = a[1]*step_size; # Plot the expetation value F1 fig = plt.figure() ax = fig.gca(projection='3d') surf = ax.plot_surface(a_gamma, a_beta, F1, cmap=cm.coolwarm, linewidth=0, antialiased=True) ax.set_zlim(1,4) ax.zaxis.set_major_locator(LinearLocator(3)) ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f')) plt.show() #The smallest parameters and the expectation can be extracted print('\n --- OPTIMAL PARAMETERS --- \n') print('The maximal expectation value is: M1 = %.03f' % np.amax(F1)) print('This is attained for gamma = %.03f and beta = %.03f' % (gamma,beta)) ``` ### 5.3 Quantum circuit<a id="implementation_sec53"></a> With these parameters we can now construct the circuit that prepares the trial state for the Graph or the Graph $G = (V,E)$ described above with vertex set $V = \{0,1,2,3,4\}$ and the edges are $E = \{(0,1),(0,2),(1,2),(3,2),(3,4),(4,2)\}$. The circuit is going to require $n = 5$ qubits and we prepare the state $$ |\psi_1(\gamma ,\beta)\rangle = e^{ -i\beta B } e^{ -i\gamma H } |+\rangle^n. $$ Recall that the terms are given by $B = \sum_{k \in V} X_k$ and $H = \sum_{(k,m) \in E} \frac{1}{2}\left(1 - Z_kZ_m\right)$. To generate the circuit we follow these steps: - We first implement 5 Hadamard $H$ gates to generate the uniform superposition. - This is follow by $6$ Ising type gates $U_{k,l}(\gamma)$ with angle $\gamma$ along the edges $(k,l) \in E$. This gate can be expressed in terms of the native Qiskit gates as $$ U_{k,l}(\gamma) = C_{u1}(-2\gamma)_{k,l}u1(\gamma)_k u1(\gamma)_l$$ - Lastly we apply single qubit $X$ rotations $X_k(\beta)$ for every vertex $k \in V$ with $\beta$ as angle. This gate directly parametrized as $X_k(\beta) = R_x(2\beta)_k$ in Qiskit. - In the last step we measure the qubits in the computational basis, i.e. we perform a $Z$ measurement and record the resulting bit-string $x \in \{0,1\}^5$. ``` # prepare the quantum and classical resisters QAOA = QuantumCircuit(len(V), len(V)) # apply the layer of Hadamard gates to all qubits QAOA.h(range(len(V))) QAOA.barrier() # apply the Ising type gates with angle gamma along the edges in E for edge in E: k = edge[0] l = edge[1] QAOA.cp(-2*gamma, k, l) QAOA.p(gamma, k) QAOA.p(gamma, l) # then apply the single qubit X rotations with angle beta to all qubits QAOA.barrier() QAOA.rx(2*beta, range(len(V))) # Finally measure the result in the computational basis QAOA.barrier() QAOA.measure(range(len(V)),range(len(V))) ### draw the circuit for comparison QAOA.draw() ``` ### 5.4 Cost function evaluation<a id="implementation_sec54"></a> Finally, we need a routine to compute the cost function value from the bit string. This is necessary to decide whether we have found a "good candidate" bit string $x$ but could also be used to estimate the expectation value $F_1(\gamma,\beta)$ in settings where the expectation value can not be evaluated directly. ``` # Compute the value of the cost function def cost_function_C(x,G): E = G.edges() if( len(x) != len(G.nodes())): return np.nan C = 0; for index in E: e1 = index[0] e2 = index[1] w = G[e1][e2]['weight'] C = C + w*x[e1]*(1-x[e2]) + w*x[e2]*(1-x[e1]) return C ``` ## 5a. Running QAOA on a simulator<a id="implementationsim"></a> We first run the algorithm on a local QASM simulator. ``` # run on local simulator backend = Aer.get_backend("qasm_simulator") shots = 10000 TQAOA = transpile(QAOA, backend) qobj = assemble(TQAOA) QAOA_results = backend.run(qobj).result() plot_histogram(QAOA_results.get_counts(),figsize = (8,6),bar_labels = False) ``` #### Evaluate the date from the simulation Let us now proceed to calculate the relevant information from the simulated data. We will use the obtained results to - Compute the mean energy and check whether it agrees with the theoretical prediction - Report the sampled bit string $x^*$ with the largest observed cost function $C(x^*)$ - Plot the Histogram of the energies to see whether it indeed concentrates around the predicted mean ``` # Evaluate the data from the simulator counts = QAOA_results.get_counts() avr_C = 0 max_C = [0,0] hist = {} for k in range(len(G.edges())+1): hist[str(k)] = hist.get(str(k),0) for sample in list(counts.keys()): # use sampled bit string x to compute C(x) x = [int(num) for num in list(sample)] tmp_eng = cost_function_C(x,G) # compute the expectation value and energy distribution avr_C = avr_C + counts[sample]*tmp_eng hist[str(round(tmp_eng))] = hist.get(str(round(tmp_eng)),0) + counts[sample] # save best bit string if( max_C[1] < tmp_eng): max_C[0] = sample max_C[1] = tmp_eng M1_sampled = avr_C/shots print('\n --- SIMULATION RESULTS ---\n') print('The sampled mean value is M1_sampled = %.02f while the true value is M1 = %.02f \n' % (M1_sampled,np.amax(F1))) print('The approximate solution is x* = %s with C(x*) = %d \n' % (max_C[0],max_C[1])) print('The cost function is distributed as: \n') plot_histogram(hist,figsize = (8,6),bar_labels = False) ``` ## 5b. Running QAOA on a real quantum device<a id="implementationdev"></a> We then see how the same circuit can be executed on real-device backends. ``` # Use the IBMQ essex device provider = IBMQ.load_account() backend = provider.get_backend('ibmq_santiago') shots = 2048 TQAOA = transpile(QAOA, backend) qobj = assemble(TQAOA, shots=shots) job_exp = backend.run(qobj) job_monitor(job_exp) exp_results = job_exp.result() plot_histogram(exp_results.get_counts(),figsize = (10,8),bar_labels = False) ``` #### Evaluate the data from the experiment We can now repeat the same analysis as before and compare the experimental result. ``` # Evaluate the data from the experiment counts = exp_results.get_counts() avr_C = 0 max_C = [0,0] hist = {} for k in range(len(G.edges())+1): hist[str(k)] = hist.get(str(k),0) for sample in list(counts.keys()): # use sampled bit string x to compute C(x) x = [int(num) for num in list(sample)] tmp_eng = cost_function_C(x,G) # compute the expectation value and energy distribution avr_C = avr_C + counts[sample]*tmp_eng hist[str(round(tmp_eng))] = hist.get(str(round(tmp_eng)),0) + counts[sample] # save best bit string if( max_C[1] < tmp_eng): max_C[0] = sample max_C[1] = tmp_eng M1_sampled = avr_C/shots print('\n --- EXPERIMENTAL RESULTS ---\n') print('The sampled mean value is M1_sampled = %.02f while the true value is M1 = %.02f \n' % (M1_sampled,np.amax(F1))) print('The approximate solution is x* = %s with C(x*) = %d \n' % (max_C[0],max_C[1])) print('The cost function is distributed as: \n') plot_histogram(hist,figsize = (8,6),bar_labels = False) ``` ## 6. Problems<a id="problems"></a> 0. The QAOA algorithm produces a bit string, is this string the optimal solution for this graph? Compare the experimental results from the superconducting chip with the results from the local QASM simulation. 1. We have computed the cost function $F_1$ analytically in [section 5.2](#implementation_sec52). Verify the steps and compute $f_A(\gamma,\beta)$ as well $f_B(\gamma,\beta)$. 2. We have given an exact expression for $F_1$ in the Qiskit implementation. -Write a routine to estimate the expectation value $F_1(\gamma,\beta)$ from the samples obtained in the result (hint: use the function cost_function_C(x,G) from [section 5.4](#implementation_sec54) and the evaluation of the data in both section [5.a / 5.b](#implementationsim)) -Use an optimization routine,e.g. SPSA from the VQE example in this tutorial, to optimize the parameters in the sampled $F_1(\gamma,\beta)$ numerically. Do you find the same values for $\gamma^*,\beta^*$ ? 3. The Trial circuit in [section 5.3](#implementation_sec53) corresponds to depth $p=1$ and was directly aimed at being compatible with the Hardware. -Use the routine from exercise 2 to evaluate the cost functions $F_p(\gamma,\beta)$ for $p=2,3$. What do you expect to see in the actual Hardware? -Generalize this class of trial state to other candidate wave functions, such as the Hardware efficient ansatz of Ref. [4](#references). 4. Consider an example of $MAX \;\; 3-SAT$ as discussed in the example section and modify the function cost_function_C(c,G) from [section 5.4](#implementation_sec54) you have used to compute $F_p$ accordingly. Run the QAOA algorithm for this instance of $MAX \; 3-SAT$ using the hardware efficient algorithm and analyze the results. ## 7. References<a id="references"></a> 1. Garey, Michael R.; David S. Johnson (1979). Computers and Intractability: A Guide to the Theory of NP-Completeness. W. H. Freeman. ISBN 0-7167-1045-5 2. Goemans, Michel X., and David P. Williamson. [Journal of the ACM (JACM) 42.6 (1995): 1115-1145](http://www-math.mit.edu/~goemans/PAPERS/maxcut-jacm.pdf). 3. Farhi, Edward, Jeffrey Goldstone, and Sam Gutmann. "A quantum approximate optimization algorithm." arXiv preprint [arXiv:1411.4028 (2014)](https://arxiv.org/abs/1411.4028). 4. Kandala, Abhinav, et al. "Hardware-efficient variational quantum eigensolver for small molecules and quantum magnets." [Nature 549.7671 (2017): 242](https://www.nature.com/articles/nature23879). 5. Farhi, Edward, et al. "Quantum algorithms for fixed qubit architectures." arXiv preprint [arXiv:1703.06199 (2017)](https://arxiv.org/abs/1703.06199). 6. Spall, J. C. (1992), [IEEE Transactions on Automatic Control, vol. 37(3), pp. 332–341](https://ieeexplore.ieee.org/document/119632). 7. Michael Streif and Martin Leib "Training the quantum approximate optimization algorithm without access to a quantum processing unit" (2020) [Quantum Sci. Technol. 5 034008](https://doi.org/10.1088/2058-9565/ab8c2b) ``` import qiskit qiskit.__qiskit_version__ ```
github_jupyter
This framework is concerned with comparing two sets of data, for instance source brain and target brain. It does not take care of trying multiple combinations of source data (such as multiple layers in models), but only makes direct comparisons. ## Metrics A metric tells us how similar to assemblies (sets of data) are to each other. For comparison, they might be re-mapped (neural predictivity) or compared in sub-spaces (RDMs). ### Pre-defined metrics Brain-Score comes with many standard metrics used in the field. One standardly used metric is neural predictivity: (1) it uses linear regression to linearly map between two systems (e.g. from model activations to neural firing rates), (2) it computes the correlation between predicted firing rates on held-out images, and (3) wraps all of that in cross-validation to estimate generalization. #### Neural Predictivity with Pearson Correlation ``` from brainscore.metrics.regression import CrossRegressedCorrelation, pls_regression, pearsonr_correlation regression = pls_regression() # 1: define the regression correlation = pearsonr_correlation() # 2: define the correlation metric = CrossRegressedCorrelation(regression, correlation) # 3: wrap in cross-validation ``` We can then run this metric on some datasets to obtain a score: ``` import numpy as np from numpy.random import RandomState from brainio_base.assemblies import NeuroidAssembly rnd = RandomState(0) # for reproducibility assembly = NeuroidAssembly((np.arange(30 * 25) + rnd.standard_normal(30 * 25)).reshape((30, 25)), coords={'image_id': ('presentation', np.arange(30)), 'object_name': ('presentation', ['a', 'b', 'c'] * 10), 'neuroid_id': ('neuroid', np.arange(25)), 'region': ('neuroid', [0] * 25)}, dims=['presentation', 'neuroid']) prediction, target = assembly, assembly # we're testing how well the metric can predict the dataset itself score = metric(source=prediction, target=target) print(score) ``` The score values above are aggregates over splits and neuroids. We can also check the raw values, i.e. the value per split and per neuroid. ``` print(score.raw) ``` #### RDM Brain-Score also includes comparison methods not requiring any fitting, such as the Representational Dissimilarity Matrix (RDM). ``` from brainscore.metrics.rdm import RDMCrossValidated metric = RDMCrossValidated() rdm_score = metric(assembly1=assembly, assembly2=assembly) print(rdm_score) ``` ### Custom metrics A metric simply returns a Score for the similarity of two assemblies. For instance, the following computes the Euclidean distance of regressed and target neuroids. ``` from brainio_base.assemblies import DataAssembly from brainscore.metrics.transformations import CrossValidation from brainscore.metrics.xarray_utils import XarrayRegression from brainscore.metrics.regression import LinearRegression class DistanceMetric: def __init__(self): regression = LinearRegression() self._regression = XarrayRegression(regression=regression) self._cross_validation = CrossValidation() def __call__(self, source, target): return self._cross_validation(source, target, apply=self._apply, aggregate=self._aggregate) def _apply(self, source_train, target_train, source_test, target_test): self._regression.fit(source_train, target_train) prediction = self._regression.predict(source_test) score = self._compare(prediction, target_test) return score def _compare(self, prediction, target): prediction, target = prediction.sortby('image_id').sortby('neuroid_id'), target.sortby('image_id').sortby('neuroid_id') assert all(prediction['image_id'].values == target['image_id'].values) assert all(prediction['neuroid_id'].values == target['neuroid_id'].values) difference = np.abs(target.values - prediction.values) # lower is better return DataAssembly(difference, coords=target.coords, dims=target.dims) def _aggregate(self, scores): return scores.median('neuroid').mean('presentation') metric = DistanceMetric() score = metric(assembly, assembly) print(score) ```
github_jupyter
``` # default_exp gen_cb ``` Contains utilities for generating calibration board images # Import ``` #export import copy import descartes import matplotlib.pyplot as plt import numpy as np import skimage.draw import skimage.filters import skimage.transform from IPython.core.debugger import set_trace from shapely import affinity from shapely.geometry import Point, Polygon ``` # Utility ``` #export def meshgrid2ps(r_x, r_y, order='C'): xs, ys = np.meshgrid(r_x, r_y) return np.c_[xs.ravel(order), ys.ravel(order)] #export def _xform_ps(ps, mat): ps, mat = map(np.array, [ps,mat]) ps_aug = np.concatenate([ps, np.ones((ps.shape[0], 1))], axis=1) return (mat@ps_aug.T).T #export def affine_ps(ps, mat): # Assumes last row of mat is [0, 0, 1] return _xform_ps(ps, mat)[:, 0:2] #export def homography_ps(ps, mat): ps = _xform_ps(ps, mat) return ps[:, 0:2]/ps[:, 2:] #export def rotate_ps(ps, deg): theta = np.radians(deg) R = [[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [ 0, 0, 1]] return affine_ps(ps, R) #export def get_circle_poly(p, r): return Point(p).buffer(r) #export def rotate_poly(poly, deg): return affinity.rotate(poly, deg, origin=(0,0)) #export def affine_poly(poly, mat): mat = np.array(mat) return affinity.affine_transform(poly, np.r_[mat[0:2,0:2].ravel(), mat[0,2], mat[1,2]]) #export def poly2coords(poly): if isinstance(poly, Polygon): poly = [poly] coords = [] for idx, p in enumerate(poly): coord = {} coord['ext'] = np.array(p.exterior.coords) coord['int'] = [] for i in p.interiors: coord['int'].append(np.array(i.coords)) coords.append(coord) return coords #export def _xform_coords(coords, mat, f_ps): coords = copy.deepcopy(coords) for coord in coords: coord['ext'] = f_ps(coord['ext'], mat) for idx in range(len(coord['int'])): coord['int'][idx] = f_ps(coord['int'][idx], mat) return coords #export def affine_coords(coords, mat): return _xform_coords(coords, mat, affine_ps) #export def homography_coords(coords, mat): return _xform_coords(coords, mat, homography_ps) #export def plot_coords(coords): plt.figure(figsize=(10,10)) for coord in coords: plt.scatter(coord['ext'][:,0], coord['ext'][:,1], c='g') for ps_int in coord['int']: plt.scatter(ps_int[:,0], ps_int[:,1], c='r') plt.axis('equal') #export def euler2R(euler): theta_x, theta_y, theta_z = euler R_x = [[1, 0, 0], [0, np.cos(theta_x), -np.sin(theta_x)], [0, np.sin(theta_x), np.cos(theta_x)]] R_y = [[ np.cos(theta_y), 0, np.sin(theta_y)], [ 0, 1, 0], [-np.sin(theta_y), 0, np.cos(theta_y)]] R_z = [[np.cos(theta_z), -np.sin(theta_z), 0], [np.sin(theta_z), np.cos(theta_z), 0], [ 0, 0, 1]]; R_x, R_y, R_z = map(np.array, [R_x, R_y, R_z]) return R_z@R_y@R_x #export def ARt2H(A, R, t): A, R, t = map(np.array,[A, R, t]) return A@np.c_[R[:,0:2], t] #export def get_bb(ps): return np.array([[ps[:,0].min(), ps[:,1].min()], [ps[:,0].max(), ps[:,1].max()]]) ``` # Fiducal Marker Polygons ``` #export def get_fiducial_poly(num): # Returns fiducial marker normalized and centered around (0, 0) deg_pad = 46.1564 # Get "outer" and "center" base polygons poly_o = get_circle_poly((0,0), 0.5).difference(get_circle_poly((0,0), 1/3)) poly_c = get_circle_poly((0,0), 1/6) def _triangle(deg=0): p1 = np.array([[0, 0]]) p2 = rotate_ps([[0, 1]], -deg_pad/2) p3 = rotate_ps([[0, 1]], deg_pad/2) return rotate_poly(Polygon(np.concatenate([p1, p2, p3])), deg) def _circles(deg=0): poly_c1 = get_circle_poly(rotate_ps([[0, 5/12]], -deg_pad/2).ravel(), 1/12) poly_c2 = get_circle_poly(rotate_ps([[0, 5/12]], deg_pad/2).ravel(), 1/12) return rotate_poly(poly_c1, deg), rotate_poly(poly_c2, deg) def _split(poly_o, deg): poly_t = _triangle(deg) poly_c1, poly_c2 = _circles(deg) return (poly_o.difference(poly_t) .union(poly_c1) .union(poly_c2)) # Modify based on marker num if num == 1: pass # First marker has no splits elif 2 <= num <= 4: for deg in np.linspace(0, 360, num+1)[:-1]: poly_o = _split(poly_o, deg) else: raise RuntimeError(f'Invalid fiducial marker number: {num}') # Return polygon return poly_o.union(poly_c) # NOTE: I *think* this ordering may matter for drawing ``` ### Test ``` get_fiducial_poly(num=1) get_fiducial_poly(num=2) get_fiducial_poly(num=3) get_fiducial_poly(num=4) ``` # Target polygons ``` #export def get_checker_poly(i, j): # Returns checker target normalized and centered around (0, 0) poly_s1 = Polygon([[-.5, 0.5], [0.0, 0.5], [0.0, 0.0], [-.5, 0.0]]) poly_s2 = Polygon([[0.0, 0.0], [0.5, 0.0], [0.5, -.5], [0.0, -.5]]) poly = poly_s1.union(poly_s2) if np.mod(i+j, 2) == 1: poly = rotate_poly(poly, 90) return poly ``` ### Test ``` get_checker_poly(0,0) get_checker_poly(1,0) get_checker_poly(0,1) get_checker_poly(1,1) ``` # Calibration board polygon ``` #export def get_ps_b(opts): h_cb, w_cb = opts['height_cb'], opts['width_cb'] return meshgrid2ps([-w_cb/2, w_cb/2], [-h_cb/2, h_cb/2], 'F') #export def get_ps_fp(opts): h_fp, w_fp = opts['height_fp'], opts['width_fp']; return meshgrid2ps([-w_fp/2, w_fp/2], [-h_fp/2, h_fp/2], 'F') #export def get_ps_t(opts): s_t, num_t_w, num_t_h = opts['spacing_target'], opts['num_targets_width'], opts['num_targets_height'] w_t, h_t = s_t*(num_t_w-1), s_t*(num_t_h-1) return meshgrid2ps(np.linspace(-w_t/2, w_t/2, num_t_w), np.linspace(h_t/2, -h_t/2, num_t_h)) #export def get_poly_cb(f_fiducial_poly, f_target_poly, opts): # Get board ps_b = get_ps_b(opts) poly_cb = Polygon(ps_b[[0,2,3,1], :]) # Subtract fiducial markers s_f = opts['size_fiducial'] ps_fp = get_ps_fp(opts) for idx, p_fp in enumerate(ps_fp): fiducial_poly = f_fiducial_poly(idx+1) poly_cb = poly_cb.difference(affine_poly(fiducial_poly, [[s_f, 0, p_fp[0]], [ 0, s_f, p_fp[1]], [ 0, 0, 1]])) # Subtract targets sz_t, num_t_w, num_t_h = opts['size_target'], opts['num_targets_width'], opts['num_targets_height'] ps_t = get_ps_t(opts) for idx, p_t in enumerate(ps_t): target_poly = f_target_poly(*np.unravel_index(idx, (num_t_h, num_t_w))) poly_cb = poly_cb.difference(affine_poly(target_poly, [[sz_t, 0, p_t[0]], [ 0, sz_t, p_t[1]], [ 0, 0, 1]])) return poly_cb #export def plot_cb_poly(cb_poly, opts): w_cb, h_cb = opts['width_cb'], opts['height_cb'] plt.figure(figsize=(10,10)) plt.gca().add_patch(descartes.PolygonPatch(cb_poly)) plt.gca().set_xlim((-w_cb/2, w_cb/2)) plt.gca().set_ylim((-h_cb/2, h_cb/2)) plt.gca().set_aspect(1) ``` ### Test ``` # Checkerboard geometry opts = {} opts['height_cb'] = 50.8 opts['width_cb'] = 50.8 opts['num_targets_height'] = 16 opts['num_targets_width'] = 16 opts['spacing_target'] = 2.032 opts['height_fp'] = 42.672 opts['width_fp'] = 42.672 opts['size_fiducial'] = 2.5*opts['spacing_target'] opts['size_target'] = opts['spacing_target'] poly_cb = get_poly_cb(get_fiducial_poly, get_checker_poly, opts) plot_cb_poly(poly_cb, opts) ``` # Drawing calibration board ``` #export def draw_ps(ps, img, val): i, j = [], [] if ps.shape[0] > 0: j, i = skimage.draw.polygon(ps[:, 0], ps[:, 1], img.T.shape) img[i, j] = val return img #export def draw_coords(coords_cb, img, val_ext=1, val_int=0): for coord in coords_cb: draw_ps(coord['ext'], img, val_ext) for ps_int in coord['int']: draw_ps(ps_int, img, val_int) return img ``` ### Test ``` # Checkerboard geometry opts = {} opts['height_cb'] = 50.8 opts['width_cb'] = 50.8 opts['num_targets_height'] = 16 opts['num_targets_width'] = 16 opts['spacing_target'] = 2.032 opts['height_fp'] = 42.672 opts['width_fp'] = 42.672 opts['size_fiducial'] = 2.5*opts['spacing_target'] opts['size_target'] = opts['spacing_target'] poly_cb = get_poly_cb(get_fiducial_poly, get_checker_poly, opts) coords_cb = poly2coords(poly_cb) plot_coords(coords_cb) sf = 20 padding = 400 h, w = sf*opts['height_cb']+2*padding, sf*opts['width_cb']+2*padding x, y = w/2, h/2 coords_cb = (affine_coords(coords_cb, [[sf, 0, x], [ 0, sf, y], [ 0, 0, 1]])) sz_img = (round(h), round(w)) img = np.random.normal(0, 1, size=sz_img) img = draw_coords(coords_cb, img) plt.figure(figsize=(10,10)) plt.imshow(img, vmin=0, vmax=1, cmap='gray', interpolation='bilinear') ``` # Create random calibration board and masks ``` # Checkerboard geometry opts = {} opts['height_cb'] = 50.8 opts['width_cb'] = 50.8 opts['num_targets_height'] = 16 opts['num_targets_width'] = 16 opts['spacing_target'] = 2.032 opts['height_fp'] = 42.672 opts['width_fp'] = 42.672 opts['size_fiducial'] = 2.5*opts['spacing_target'] opts['size_target'] = opts['spacing_target'] poly_cb = get_poly_cb(get_fiducial_poly, get_checker_poly, opts) coords_cb_w = poly2coords(poly_cb) h, w = 1536, 2048 x_o, y_o = (w-1)/2, (h-1)/2 alpha = 3650 A = [[alpha, 0, x_o], [ 0, alpha, y_o], [ 0, 0, 1]] t_z = np.random.uniform(150, 300) theta_t_x = np.random.uniform(-np.pi/64, np.pi/64) theta_t_y = np.random.uniform(-np.pi/32, np.pi/32) theta_x = np.random.uniform(-np.pi/4, np.pi/4) theta_y = np.random.uniform(-np.pi/4, np.pi/4) theta_z = np.random.uniform(-np.pi, np.pi) t = euler2R([theta_t_x, theta_t_y, 0])@np.array([[0], [0], [t_z]]) R = euler2R([theta_x, theta_y, theta_z]) H = ARt2H(A,R,t) coords_cb_p = homography_coords(coords_cb_w, H) img = draw_coords(coords_cb_p, np.zeros((h, w), dtype=np.float32)) ps_fp_w = get_ps_fp(opts) ps_fp_p = homography_ps(ps_fp_w, H) s_f = opts['size_fiducial'] bbs_f_p = [] mask_f = np.zeros((h, w), dtype=np.int) for idx, p_fp_w in enumerate(ps_fp_w): ps_f_p = homography_ps(np.array(get_circle_poly(p_fp_w, s_f/2).exterior.coords), H) bbs_f_p.append(get_bb(ps_f_p)) draw_ps(ps_f_p, mask_f, idx+1) plt.figure(1, figsize=(10,10)) plt.imshow(img, vmin=0, vmax=1, cmap='gray', interpolation='bilinear') for idx, (p_fp_p, bb_f_p) in enumerate(zip(ps_fp_p, bbs_f_p)): plt.text(p_fp_p[0], p_fp_p[1], str(idx+1), horizontalalignment='center', verticalalignment='center', fontsize=14, weight='bold', color='red') plt.plot(bb_f_p[[0,0,1,1,0],0], bb_f_p[[0,1,1,0,0],1], 'r') plt.figure(2, figsize=(10,10)) plt.imshow(mask_f) ``` # Build ``` !nbdev_build_lib ```
github_jupyter
``` %matplotlib inline ``` Transfer Learning tutorial ========================== Source : http://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html **Author**: `Sasank Chilamkurthy <https://chsasank.github.io>`_ In this tutorial, you will learn how to train your network using transfer learning. You can read more about the transfer learning at `cs231n notes <http://cs231n.github.io/transfer-learning/>`__ Quoting this notes, In practice, very few people train an entire Convolutional Network from scratch (with random initialization), because it is relatively rare to have a dataset of sufficient size. Instead, it is common to pretrain a ConvNet on a very large dataset (e.g. ImageNet, which contains 1.2 million images with 1000 categories), and then use the ConvNet either as an initialization or a fixed feature extractor for the task of interest. These two major transfer learning scenarios looks as follows: - **Finetuning the convnet**: Instead of random initializaion, we initialize the network with a pretrained network, like the one that is trained on imagenet 1000 dataset. Rest of the training looks as usual. - **ConvNet as fixed feature extractor**: Here, we will freeze the weights for all of the network except that of the final fully connected layer. This last fully connected layer is replaced with a new one with random weights and only this layer is trained. ``` # License: BSD # Author: Sasank Chilamkurthy from __future__ import print_function, division import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler from torch.autograd import Variable import numpy as np import torchvision from torchvision import datasets, models, transforms import matplotlib.pyplot as plt import time import os import copy plt.ion() # interactive mode ``` Load Data --------- We will use torchvision and torch.utils.data packages for loading the data. The problem we're going to solve today is to train a model to classify **ants** and **bees**. We have about 120 training images each for ants and bees. There are 75 validation images for each class. Usually, this is a very small dataset to generalize upon, if trained from scratch. Since we are using transfer learning, we should be able to generalize reasonably well. This dataset is a very small subset of imagenet. .. Note :: Download the data from `here <https://download.pytorch.org/tutorial/hymenoptera_data.zip>`_ and extract it to the current directory. ``` # Data augmentation and normalization for training # Just normalization for validation data_transforms = { 'train': transforms.Compose([ transforms.RandomSizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'val': transforms.Compose([ transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } data_dir = '../hymenoptera_data' image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, shuffle=True, num_workers=4) for x in ['train', 'val']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']} class_names = image_datasets['train'].classes use_gpu = torch.cuda.is_available() ``` Visualize a few images ^^^^^^^^^^^^^^^^^^^^^^ Let's visualize a few training images so as to understand the data augmentations. ``` def imshow(inp, title=None): """Imshow for Tensor.""" inp = inp.numpy().transpose((1, 2, 0)) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) inp = std * inp + mean inp = np.clip(inp, 0, 1) plt.imshow(inp) if title is not None: plt.title(title) plt.pause(0.001) # pause a bit so that plots are updated # Get a batch of training data inputs, classes = next(iter(dataloaders['train'])) # Make a grid from batch out = torchvision.utils.make_grid(inputs) imshow(out, title=[class_names[x] for x in classes]) ``` Training the model ------------------ Now, let's write a general function to train a model. Here, we will illustrate: - Scheduling the learning rate - Saving the best model In the following, parameter ``scheduler`` is an LR scheduler object from ``torch.optim.lr_scheduler``. ``` def train_model(model, criterion, optimizer, scheduler, num_epochs=25): since = time.time() best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': scheduler.step() model.train(True) # Set model to training mode else: model.train(False) # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Iterate over data. for data in dataloaders[phase]: # get the inputs inputs, labels = data # wrap them in Variable if use_gpu: inputs = Variable(inputs.cuda()) labels = Variable(labels.cuda()) else: inputs, labels = Variable(inputs), Variable(labels) # zero the parameter gradients optimizer.zero_grad() # forward outputs = model(inputs) _, preds = torch.max(outputs.data, 1) loss = criterion(outputs, labels) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics running_loss += loss.data[0] * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects / dataset_sizes[phase] print('{} Loss: {:.4f} Acc: {:.4f}'.format( phase, epoch_loss, epoch_acc)) # deep copy the model if phase == 'val' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) print() time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) # load best model weights model.load_state_dict(best_model_wts) return model ``` Visualizing the model predictions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Generic function to display predictions for a few images ``` def visualize_model(model, num_images=6): images_so_far = 0 fig = plt.figure() for i, data in enumerate(dataloaders['val']): inputs, labels = data if use_gpu: inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda()) else: inputs, labels = Variable(inputs), Variable(labels) outputs = model(inputs) _, preds = torch.max(outputs.data, 1) for j in range(inputs.size()[0]): images_so_far += 1 ax = plt.subplot(num_images//2, 2, images_so_far) ax.axis('off') ax.set_title('predicted: {}'.format(class_names[preds[j]])) imshow(inputs.cpu().data[j]) if images_so_far == num_images: return ``` Finetuning the convnet ---------------------- Load a pretrained model and reset final fully connected layer. ``` model_ft = models.resnet18(pretrained=True) num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, 2) if use_gpu: model_ft = model_ft.cuda() criterion = nn.CrossEntropyLoss() # Observe that all parameters are being optimized optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9) # Decay LR by a factor of 0.1 every 7 epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1) ``` Train and evaluate ^^^^^^^^^^^^^^^^^^ It should take around 15-25 min on CPU. On GPU though, it takes less than a minute. ``` model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=25) visualize_model(model_ft) ``` ConvNet as fixed feature extractor ---------------------------------- Here, we need to freeze all the network except the final layer. We need to set ``requires_grad == False`` to freeze the parameters so that the gradients are not computed in ``backward()``. You can read more about this in the documentation `here <http://pytorch.org/docs/notes/autograd.html#excluding-subgraphs-from-backward>`__. ``` model_conv = torchvision.models.resnet18(pretrained=True) for param in model_conv.parameters(): param.requires_grad = False # Parameters of newly constructed modules have requires_grad=True by default num_ftrs = model_conv.fc.in_features model_conv.fc = nn.Linear(num_ftrs, 2) if use_gpu: model_conv = model_conv.cuda() criterion = nn.CrossEntropyLoss() # Observe that only parameters of final layer are being optimized as # opoosed to before. optimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9) # Decay LR by a factor of 0.1 every 7 epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1) ``` Train and evaluate ^^^^^^^^^^^^^^^^^^ On CPU this will take about half the time compared to previous scenario. This is expected as gradients don't need to be computed for most of the network. However, forward does need to be computed. ``` model_conv = train_model(model_conv, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=25) visualize_model(model_conv) plt.ioff() plt.show() ```
github_jupyter
``` %pylab inline from pyiron import Project data_pr = Project("../../datasets") if len(data_pr.job_table()) == 0: data_pr.unpack("Cu_training_archive") data_pr.job_table() ``` We will use smalles dataset for fitting, as real dataset took much more time, and go outside of the scope of the workshop ``` data_job = data_pr.load('df1_A1_A2_A3_EV_elast_phon') data_job.to_pandas() ``` # Fitting project ``` fit_pr = Project("pacemaker_fit") job = fit_pr.create_job(job_type=fit_pr.job_type.PaceMakerJob, job_name="df1_cut5_pyace") ``` ## Fit ``` cutoff = 5.0 # potential cutoff ``` Potential specification ``` job.input["cutoff"] = cutoff # global potential cutoff job.input["potential"]= { "deltaSplineBins": 0.001, # spline bins, used for fast radial functions evaluations "element": "Cu", # element - copper "npot": "FinnisSinclairShiftedScaled", # embedding functions "ndensity": 2, # number of densities: rho_1 and rho_2 "fs_parameters": [1, 1, 1, 0.5], # parameters of embedding functions (for two densities) #this embedding function corresponds to rho_1 + sqrt(abs(rho_2)) , with some modifications # radial base functions specification "radbase": "ChebExpCos", "radparameters": [5.25], "rcut": cutoff, "dcut": 0.01, "NameOfCutoffFunction": "cos", # MOST IMPORTANT: potential "shape" i.e.: # number of orders (rank), maximum index of radial functions (nradmax) and orbital moments (lmax) for each rank "rankmax": 3, "nradmax": [7,2,1], "lmax": [0,2,1], } ``` Fitting settings: ``` job.input["fit"]= { # loss function specification 'loss': { 'kappa': 0.5, # relative weight of forces residuals # coefficients L1-L2 regularization 'L1_coeffs': 5e-7, # L1-regularization 'L2_coeffs': 5e-7, # L2-regularization 'w1_coeffs': 1, 'w2_coeffs': 1, #radial smoothness regularization 'w0_rad': 1e-4, # for radial functions values 'w1_rad': 1e-4, # for radial functions first derivatives 'w2_rad': 1e-4, # for radial functions second derivatives }, # minimization setup: 'optimizer': 'BFGS', # scipy BFGS algorithm 'maxiter': 150, # max number of iterations } ``` Assign the dataset to fit on ``` job.structure_data=data_job ``` Run fitting job ``` job.run() fit_pr.job_table(full_table=True) ``` # Analyse the fitting results loss function ``` plt.plot(job["output/log/loss"]) plt.yscale('log') plt.xlabel("# iter") plt.ylabel("Loss"); ``` RMSE of energies per atoms ``` plt.plot(job["output/log/rmse_energy"]) plt.yscale('log') plt.xlabel("# iter") plt.ylabel("Energy RMSE, meV/atom"); ``` RMSE of forces norm ``` plt.plot(job["output/log/rmse_forces"]) plt.yscale('log') plt.xlabel("# iter") plt.ylabel("Forces RMSE, meV/Ang/structure"); ``` # Overview of the fitted potential internals ``` from pyace import * final_potential = job.get_final_potential() final_basis_set = ACEBBasisSet(final_potential) ``` For single-species potential there is only one **species block** for *Cu*: ``` len(final_potential.funcspecs_blocks) Cu_block = final_potential.funcspecs_blocks[0] ``` Basic definitions and notations: * Radial functions: $R_{nl}(r) = \sum_k c_{nlk} g_k(r)$ * Spherical harmonics: $ Y_{lm}(\hat{\pmb{r}}_{ji}) $ * Basis function: $\phi_{\mu_j \mu_i nlm}(\pmb{r}_{ji}) = R_{nl}^{\mu_j \mu_i}(r_{ji}) Y_{lm}(\hat{\pmb{r}}_{ji}) $ * Atomic base (A-functions): $ A_{i \mu n l m} = \sum_j \delta_{\mu \mu_j} \phi_{\mu_j\mu_i nlm}(\pmb{r}_{ji}) $ * Product of atomic base: $ \pmb{A}_{i\pmb{\mu n l m}} = \prod_{t = 1}^{\nu} A_{i \mu_t n_t l_t m_t} $ * Equivariant basis (B-functions): $ {B}_{i\pmb{\mu n l L}} = \sum_{\pmb{m}} \left( \begin{array}{c} \pmb{l m} \\ \pmb{L M} \end{array} L_R \right) \pmb{A}_{i\pmb{\mu n l m}} $ , where $ \left(\begin{array}{c} \pmb{l m} \\ \pmb{L M}\end{array} L_R\right) $ is *generalized Clebsh-Gordan coefficients* * Atomic property (densities) $ \rho_i^{(p)} = \sum_{\pmb{\mu n l L}} {c}^{(p)}_{\mu_i\pmb{\mu n l L}} {B}_{i\pmb{\mu n l L}} $ * Atomic energy: $ E_i = F(\rho_i^{(1)}, \dots,\rho_i^{(P)} ) $, where $F$ is embedding function radial coefficients $c_{nlk}$: ``` np.shape(Cu_block.radcoefficients) ``` Visualize the radial basis functions ($g_k$) and radial functions ($R_{nl}$) and their derivatives: ``` RadialFunctionsVisualization(final_basis_set).plot() ``` Total number of basis functions ``` len(Cu_block.funcspecs) ``` List of B-basis functions $ {B}_{i\pmb{\mu n l L}}$: $\pmb{\mu} = $ `elements`, $\pmb{n} = $ `ns`, $\pmb{l} = $ `ls`, $\pmb{L} = $ `LS`) and corresponding coefficients $ {c}^{(p)}_{\mu_i\pmb{\mu n l L}} =$ `coeffs` for two densities ``` Cu_block.funcspecs ``` Compare to potential "shape" i.e.: * "rankmax": 3 * "nradmax": [7,2,1] * "lmax": [0,2,1] ``` print("Trainable parameters number") print() print("B-functions coefficients: ",Cu_block.ndensityi * len(Cu_block.funcspecs)) print("Radial functions coefficients: ",len(np.array(Cu_block.radcoefficients).flatten())) print("-"*40) print("Total number of trainable parameters: ",len(final_potential.get_all_coeffs())) ``` # Test fitted potential ``` test_pr = Project("test_ace_potential") test_pr.remove_jobs_silently() test_pr.job_table() cu_ace_potential = job.get_lammps_potential() cu_ace_potential ``` ## Optimization ``` lammps_job = test_pr.create.job.Lammps("opt_lammps", delete_existing_job=True) lammps_job.potential = cu_ace_potential lammps_job.structure = test_pr.create.structure.ase_bulk("Cu","fcc",cubic=True) lammps_job.calc_minimize(pressure=0.0) lammps_job.run() test_pr.job_table() ``` ## E-V curve ``` ev_job = test_pr.create.job.Murnaghan("murn", delete_existing_job=True) ref_job = test_pr.create.job.Lammps("ref_job", delete_existing_job=True) ref_job.potential = cu_ace_potential ref_job.structure = lammps_job.get_structure() ev_job.ref_job = ref_job ev_job.run() ev_job.plot() ev_job["output/equilibrium_bulk_modulus"] ev_job["output/equilibrium_b_prime"] ev_job["output/equilibrium_energy"]/4 ev_job["output/equilibrium_volume"]/4 ``` # Advanced (already trained) potential More advanced Cu potential trained on `df3` and longer range (`cutoff=7.5`) ``` advanced_potential = BBasisConfiguration("good_potentials/df3_cut75_large_body_order.yaml") advanced_basis_set = ACEBBasisSet(advanced_potential) ``` For single-species potential there is only one **species block** for *Cu*: ``` len(advanced_potential.funcspecs_blocks) Cu_block = advanced_potential.funcspecs_blocks[0] ``` radial coefficients $c_{nlk}$: ``` np.shape(Cu_block.radcoefficients) print("Cutoff: ",Cu_block.rcutij, " Ang") ``` Visualize the radial basis functions ($g_k$) and radial functions ($R_{nl}$) and their derivatives: ``` RadialFunctionsVisualization(advanced_basis_set).plot() ``` Total number of basis functions ``` len(Cu_block.funcspecs) ``` List of B-basis functions $ {B}_{i\pmb{\mu n l L}}$: $\pmb{\mu} = $ `elements`, $\pmb{n} = $ `ns`, $\pmb{l} = $ `ls`, $\pmb{L} = $ `LS`) and corresponding coefficients $ {c}^{(p)}_{\mu_i\pmb{\mu n l L}} =$ `coeffs` for two densities first 20 B-functions ``` Cu_block.funcspecs[:20] ``` last 20 B-functions ``` Cu_block.funcspecs[-20:] print("Trainable parameters number") print() print("B-functions coefficients: ",Cu_block.ndensityi * len(Cu_block.funcspecs)) print("Radial functions coefficients: ",len(np.array(Cu_block.radcoefficients).flatten())) print("-"*40) print("Total number of trainable parameters: ",len(advanced_potential.get_all_coeffs())) ```
github_jupyter
**Note**: Click on "*Kernel*" > "*Restart Kernel and Clear All Outputs*" in [JupyterLab](https://jupyterlab.readthedocs.io/en/stable/) *before* reading this notebook to reset its output. If you cannot run this file on your machine, you may want to open it [in the cloud <img height="12" style="display: inline-block" src="../static/link/to_mb.png">](https://mybinder.org/v2/gh/webartifex/intro-to-python/develop?urlpath=lab/tree/09_mappings/05_appendix.ipynb). # Chapter 9: Mappings & Sets (Appendix) The [collections <img height="12" style="display: inline-block" src="../static/link/to_py.png">](https://docs.python.org/3/library/collections.html) module in the [standard library <img height="12" style="display: inline-block" src="../static/link/to_py.png">](https://docs.python.org/3/library/index.html) provides specialized mapping types for common use cases. ## The `defaultdict` Type The [defaultdict <img height="12" style="display: inline-block" src="../static/link/to_py.png">](https://docs.python.org/3/library/collections.html#collections.defaultdict) type allows us to define a factory function that creates default values whenever we look up a key that does not yet exist. Ordinary `dict` objects would throw a `KeyError` exception in such situations. Let's say we have a `list` with *records* of goals scored during a soccer game. The records consist of the fields "Country," "Player," and the "Time" when a goal was scored. Our task is to group the goals by player and/or country. ``` goals = [ ("Germany", "Müller", 11), ("Germany", "Klose", 23), ("Germany", "Kroos", 24), ("Germany", "Kroos", 26), ("Germany", "Khedira", 29), ("Germany", "Schürrle", 69), ("Germany", "Schürrle", 79), ("Brazil", "Oscar", 90), ] ``` Using a normal `dict` object, we have to tediously check if a player has already scored a goal before. If not, we must create a *new* `list` object with the first time the player scored. Otherwise, we append the goal to an already existing `list` object. ``` goals_by_player = {} for _, player, minute in goals: if player not in goals_by_player: goals_by_player[player] = [minute] else: goals_by_player[player].append(minute) goals_by_player ``` Instead, with a `defaultdict` object, we can portray the code fragment's intent in a concise form. We pass a reference to the [list() <img height="12" style="display: inline-block" src="../static/link/to_py.png">](https://docs.python.org/3/library/functions.html#func-list) built-in to `defaultdict`. ``` from collections import defaultdict goals_by_player = defaultdict(list) for _, player, minute in goals: goals_by_player[player].append(minute) goals_by_player type(goals_by_player) ``` A reference to the factory function is stored in the `default_factory` attribute. ``` goals_by_player.default_factory ``` If we want this code to produce a normal `dict` object, we pass `goals_by_player` to the [dict() <img height="12" style="display: inline-block" src="../static/link/to_py.png">](https://docs.python.org/3/library/functions.html#func-dict) constructor. ``` dict(goals_by_player) ``` Being creative, we use a factory function, created with a `lambda` expression, that returns another [defaultdict <img height="12" style="display: inline-block" src="../static/link/to_py.png">](https://docs.python.org/3/library/collections.html#collections.defaultdict) with [list() <img height="12" style="display: inline-block" src="../static/link/to_py.png">](https://docs.python.org/3/library/functions.html#func-list) as its factory to group on the country and the player level simultaneously. ``` goals_by_country_and_player = defaultdict(lambda: defaultdict(list)) for country, player, minute in goals: goals_by_country_and_player[country][player].append(minute) goals_by_country_and_player ``` Conversion into a normal and nested `dict` object is now a bit tricky but can be achieved in one line with a comprehension. ``` {country: dict(by_player) for country, by_player in goals_by_country_and_player.items()} ``` ## The `Counter` Type A common task is to count the number of occurrences of elements in an iterable. The [Counter <img height="12" style="display: inline-block" src="../static/link/to_py.png">](https://docs.python.org/3/library/collections.html#collections.Counter) type provides an easy-to-use interface that can be called with any iterable and returns a `dict`-like object of type `Counter` that maps each unique elements to the number of times it occurs. To continue the previous example, let's create an overview that shows how many goals a player scorred. We use a generator expression as the argument to `Counter`. ``` goals from collections import Counter scorers = Counter(x[1] for x in goals) scorers type(scorers) ``` Now we can look up individual players. `scores` behaves like a normal dictionary with regard to key look-ups. ``` scorers["Müller"] ``` By default, it returns `0` if a key is not found. So, we do not have to handle a `KeyError`. ``` scorers["Lahm"] ``` `Counter` objects have a [.most_common() <img height="12" style="display: inline-block" src="../static/link/to_py.png">](https://docs.python.org/3/library/collections.html#collections.Counter.most_common) method that returns a `list` object containing $2$-element `tuple` objects, where the first element is the element from the original iterable and the second the number of occurrences. The `list` object is sorted in descending order of occurrences. ``` scorers.most_common(2) ``` We can increase the count of individual entries with the [.update() <img height="12" style="display: inline-block" src="../static/link/to_py.png">](https://docs.python.org/3/library/collections.html#collections.Counter.update) method: That takes an *iterable* of the elements we want to count. Imagine if [Philipp Lahm <img height="12" style="display: inline-block" src="../static/link/to_wiki.png">](https://en.wikipedia.org/wiki/Philipp_Lahm) had also scored against Brazil. ``` scorers.update(["Lahm"]) scorers ``` If we use a `str` object as the argument instead, each individual character is treated as an element to be updated. That is most likely not what we want. ``` scorers.update("Lahm") scorers ``` ## The `ChainMap` Type Consider `to_words`, `more_words`, and `even_more_words` below. Instead of merging the items of the three `dict` objects together into a *new* one, we want to create an object that behaves as if it contained all the unified items in it without materializing them in memory a second time. ``` to_words = { 0: "zero", 1: "one", 2: "two", } more_words = { 2: "TWO", # to illustrate a point 3: "three", 4: "four", } even_more_words = { 4: "FOUR", # to illustrate a point 5: "five", 6: "six", } ``` The [ChainMap <img height="12" style="display: inline-block" src="../static/link/to_py.png">](https://docs.python.org/3/library/collections.html#collections.ChainMap) type allows us to do precisely that. ``` from collections import ChainMap ``` We simply pass all mappings as positional arguments to `ChainMap` and obtain a **proxy** object that occupies almost no memory but gives us access to the union of all the items. ``` chain = ChainMap(to_words, more_words, even_more_words) ``` Let's loop over the items in `chain` and see what is "in" it. The order is obviously *unpredictable* but all seven items we expected are there. Keys of later mappings do *not* overwrite earlier keys. ``` for number, word in chain.items(): print(number, word) ``` When looking up a non-existent key, `ChainMap` objects raise a `KeyError` just like normal `dict` objects would. ``` chain[10] ```
github_jupyter
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script> <script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); </script> # IDScalarWaveNRPy: An Einstein Toolkit Initial Data Thorn for the Scalar Wave Equation ## Author: Terrence Pierre Jacques & Zach Etienne ### Formatting improvements courtesy Brandon Clark [comment]: <> (Abstract: TODO) [comment]: <> (Notebook Status and Validation Notes: TODO) ### NRPy+ Source Code for this module: [ScalarWave/InitialData.py](../edit/ScalarWave/InitialData.py) [\[**tutorial**\]](Tutorial-ScalarWave.ipynb) Contructs the SymPy expressions for spherical gaussian and plane-wave initial data ## Introduction: In this part of the tutorial, we will construct an Einstein Toolkit (ETK) thorn (module) that will set up *initial data* for the scalar wave initial value problem. In a [previous tutorial notebook](Tutorial-ScalarWave.ipynb), we used NRPy+ to contruct the SymPy expressions for either spherical gaussian or plane-wave initial data. This thorn is largely based on and should function similarly to the $\text{IDScalarWaveC}$ thorn included in the Einstein Toolkit (ETK) $\text{CactusWave}$ arrangement. We will construct this thorn in two steps. 1. Call on NRPy+ to convert the SymPy expressions for the initial data into one C-code kernel. 1. Write the C code and linkages to the Einstein Toolkit infrastructure (i.e., the .ccl files) to complete this Einstein Toolkit module. <a id='toc'></a> # Table of Contents $$\label{toc}$$ This notebook is organized as follows 1. [Step 1](#initializenrpy): Call on NRPy+ to convert the SymPy expression for the scalar wave initial data into a C-code kernel 1. [Step 2](#einstein): Interfacing with the Einstein Toolkit 1. [Step 2.a](#einstein_c): Constructing the Einstein Toolkit C-code calling functions that include the C code kernels 1. [Step 2.b](#einstein_ccl): CCL files - Define how this module interacts and interfaces with the larger Einstein Toolkit infrastructure 1. [Step 2.c](#einstein_list): Add the C code to the Einstein Toolkit compilation list 1. [Step 3](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file <a id='initializenrpy'></a> # Step 1: Initialize needed Python/NRPy+ modules \[Back to [top](#toc)\] $$\label{initializenrpy}$$ ``` # Step 1: Import needed core NRPy+ modules from outputC import lhrh # NRPy+: Core C code output module import finite_difference as fin # NRPy+: Finite difference C code generation module import NRPy_param_funcs as par # NRPy+: Parameter interface import grid as gri # NRPy+: Functions having to do with numerical grids import loop as lp # NRPy+: Generate C code loops import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support import reference_metric as rfm # NRPy+: Reference metric support import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface import os, sys # Standard Python modules for multiplatform OS-level functions import time # Standard Python module; useful for benchmarking # Step 1a: Create directories for the thorn if they don't exist. # Create directory for WaveToyNRPy thorn & subdirectories in case they don't exist. outrootdir = "IDScalarWaveNRPy/" cmd.mkdir(os.path.join(outrootdir)) outdir = os.path.join(outrootdir,"src") # Main C code output directory cmd.mkdir(outdir) # Step 1b: This is an Einstein Toolkit (ETK) thorn. Here we # tell NRPy+ that gridfunction memory access will # therefore be in the "ETK" style. par.set_parval_from_str("grid::GridFuncMemAccess","ETK") ``` <a id='einstein'></a> # Step 2: Interfacing with the Einstein Toolkit \[Back to [top](#toc)\] $$\label{einstein}$$ <a id='einstein_c'></a> ## Step 2.a: Constructing the Einstein Toolkit C-code calling functions that include the C code kernels \[Back to [top](#toc)\] $$\label{einstein_c}$$ Using sympy, we construct the exact expressions for all scalar wave initial data currently supported in NRPy, documented in [Tutorial-ScalarWave.ipynb](Tutorial-ScalarWave.ipynb). We write the generated C codes into different C files, corresponding to the type of initial data the may want to choose at run time. Note that the code below can be easily extensible to include other types of initial data. ``` # Step 1c: Call the InitialData() function from within the # ScalarWave/InitialData.py module. import ScalarWave.InitialData as swid # Step 1e: Call the InitialData() function to set up initial data. # Options include: # "PlaneWave": monochromatic (single frequency/wavelength) plane wave # "SphericalGaussian": spherically symmetric Gaussian, with default stdev=3 ID_options = ["PlaneWave", "SphericalGaussian"] for ID in ID_options: gri.glb_gridfcs_list = [] # Within the ETK, the 3D gridfunctions x, y, and z store the # Cartesian grid coordinates. Setting the gri.xx[] arrays # to point to these gridfunctions forces NRPy+ to treat # the Cartesian coordinate gridfunctions properly -- # reading them from memory as needed. x,y,z = gri.register_gridfunctions("AUX",["x","y","z"]) rfm.xx[0] = x rfm.xx[1] = y rfm.xx[2] = z swid.InitialData(Type=ID, default_sigma=0.25, default_k0=1.0, default_k1=0., default_k2=0.) # Step 1f: Register uu and vv gridfunctions so they can be written to by NRPy. uu,vv = gri.register_gridfunctions("EVOL",["uu","vv"]) # Step 1g: Set the uu and vv gridfunctions to the uu_ID & vv_ID variables # defined by InitialData_PlaneWave(). uu = swid.uu_ID vv = swid.vv_ID # Step 1h: Create the C code output kernel. ScalarWave_ID_SymbExpressions = [\ lhrh(lhs=gri.gfaccess("out_gfs","uu"),rhs=uu),\ lhrh(lhs=gri.gfaccess("out_gfs","vv"),rhs=vv),] ScalarWave_ID_CcodeKernel = fin.FD_outputC("returnstring",ScalarWave_ID_SymbExpressions) ScalarWave_ID_looped = lp.loop(["i2","i1","i0"],["0","0","0"],["cctk_lsh[2]","cctk_lsh[1]","cctk_lsh[0]"],\ ["1","1","1"],["#pragma omp parallel for","",""],"",\ ScalarWave_ID_CcodeKernel.replace("time","cctk_time")) # Write the C code kernel to file. with open(os.path.join(outdir,"ScalarWave_"+ID+"ID.h"), "w") as file: file.write(str(ScalarWave_ID_looped)) ``` <a id='einstein_ccl'></a> ## Step 2. b: CCL files - Define how this module interacts and interfaces with the larger Einstein Toolkit infrastructure \[Back to [top](#toc)\] $$\label{einstein_ccl}$$ Writing a module ("thorn") within the Einstein Toolkit requires that three "ccl" files be constructed, all in the root directory of the thorn: 1. `interface.ccl`: defines the gridfunction groups needed, and provides keywords denoting what this thorn provides and what it should inherit from other thorns. Specifically, this file governs the interaction between this thorn and others; more information can be found in the [official Einstein Toolkit documentation](https://einsteintoolkit.org/usersguide/UsersGuide.html#x1-179000D2.2). With "implements", we give our thorn its unique name. By "inheriting" other thorns, we tell the Toolkit that we will rely on variables that exist and are declared "public" within those functions. ``` evol_gfs_list = [] for i in range(len(gri.glb_gridfcs_list)): if gri.glb_gridfcs_list[i].gftype == "EVOL": evol_gfs_list.append( gri.glb_gridfcs_list[i].name+"GF") # NRPy+'s finite-difference code generator assumes gridfunctions # are alphabetized; not sorting may result in unnecessary # cache misses. evol_gfs_list.sort() with open(os.path.join(outrootdir,"interface.ccl"), "w") as file: file.write(""" # With "implements", we give our thorn its unique name. implements: IDScalarWaveNRPy # By "inheriting" other thorns, we tell the Toolkit that we # will rely on variables/function that exist within those # functions. inherits: WaveToyNRPy grid """) ``` 2. `param.ccl`: specifies free parameters within the thorn, enabling them to be set at runtime. It is required to provide allowed ranges and default values for each parameter. More information on this file's syntax can be found in the [official Einstein Toolkit documentation](https://einsteintoolkit.org/usersguide/UsersGuide.html#x1-184000D2.3). ``` def keep_param__return_type(paramtuple): keep_param = True # We'll not set some parameters in param.ccl; # e.g., those that should be #define'd like M_PI. typestring = "" # Separate thorns within the ETK take care of grid/coordinate parameters; # thus we ignore NRPy+ grid/coordinate parameters: if paramtuple.module == "grid" or paramtuple.module == "reference_metric" or paramtuple.parname == "wavespeed": keep_param = False partype = paramtuple.type if partype == "bool": typestring += "BOOLEAN " elif partype == "REAL": if paramtuple.defaultval != 1e300: # 1e300 is a magic value indicating that the C parameter should be mutable typestring += "CCTK_REAL " else: keep_param = False elif partype == "int": typestring += "CCTK_INT " elif partype == "#define": keep_param = False elif partype == "char": # FIXME: char/string parameter types should in principle be supported print("Error: parameter "+paramtuple.module+"::"+paramtuple.parname+ " has unsupported type: \""+ paramtuple.type + "\"") sys.exit(1) else: print("Error: parameter "+paramtuple.module+"::"+paramtuple.parname+ " has unsupported type: \""+ paramtuple.type + "\"") sys.exit(1) return keep_param, typestring paramccl_str=""" # This param.ccl file was automatically generated by NRPy+. # You are advised against modifying it directly; instead # modify the Python code that generates it. shares: grid USES KEYWORD type shares: WaveToyNRPy USES REAL wavespeed restricted: CCTK_KEYWORD initial_data "Type of initial data" {""" for ID in ID_options: paramccl_str +=''' "'''+ID+'''" :: "'''+ID+'"' paramccl_str +=''' } "'''+ID+'''" ''' paramccl_str +=""" restricted: """ for i in range(len(par.glb_Cparams_list)): # keep_param is a boolean indicating whether we should accept or reject # the parameter. singleparstring will contain the string indicating # the variable type. keep_param, singleparstring = keep_param__return_type(par.glb_Cparams_list[i]) if keep_param: parname = par.glb_Cparams_list[i].parname partype = par.glb_Cparams_list[i].type singleparstring += parname + " \""+ parname +" (see NRPy+ for parameter definition)\"\n" singleparstring += "{\n" if partype != "bool": singleparstring += " *:* :: \"All values accepted. NRPy+ does not restrict the allowed ranges of parameters yet.\"\n" singleparstring += "} "+str(par.glb_Cparams_list[i].defaultval)+"\n\n" paramccl_str += singleparstring with open(os.path.join(outrootdir,"param.ccl"), "w") as file: file.write(paramccl_str) ``` 3. `schedule.ccl`: allocates storage for gridfunctions, defines how the thorn's functions should be scheduled in a broader simulation, and specifies the regions of memory written to or read from gridfunctions. More information on this file's syntax can be found in the [official Einstein Toolkit documentation](https://einsteintoolkit.org/usersguide/UsersGuide.html#x1-187000D2.4). We specify here the standardized ETK "scheduling bins" in which we want each of our thorn's functions to run. ``` with open(os.path.join(outrootdir,"schedule.ccl"), "w") as file: file.write(""" # This schedule.ccl file was automatically generated by NRPy+. # You are advised against modifying it directly; instead # modify the Python code that generates it. if (CCTK_EQUALS (initial_data, "PlaneWave")) { schedule IDScalarWaveNRPy_param_check at CCTK_PARAMCHECK { LANG: C OPTIONS: global } "Check sanity of parameters" } schedule IDScalarWaveNRPy_InitialData at CCTK_INITIAL as WaveToy_InitialData { STORAGE: WaveToyNRPy::scalar_fields[3] LANG: C } "Initial data for 3D wave equation" """) ``` <a id='einstein_list'></a> ## Step 2.c: Add the C code to the Einstein Toolkit compilation list \[Back to [top](#toc)\] $$\label{einstein_list}$$ We will also need `make.code.defn`, which indicates the list of files that need to be compiled. This thorn only has the one C file to compile. ``` make_code_defn_list = [] def append_to_make_code_defn_list(filename): if filename not in make_code_defn_list: make_code_defn_list.append(filename) return os.path.join(outdir,filename) with open(append_to_make_code_defn_list("InitialData.c"),"w") as file: file.write(""" #include <math.h> #include <stdio.h> #include <string.h> #include "cctk.h" #include "cctk_Parameters.h" #include "cctk_Arguments.h" void IDScalarWaveNRPy_param_check(CCTK_ARGUMENTS) { DECLARE_CCTK_ARGUMENTS; DECLARE_CCTK_PARAMETERS; if (kk0 == 0 && kk1 == 0 && kk2 == 0) { CCTK_WARN(0,"kk0==kk1==kk2==0: Zero wave vector cannot be normalized. Set one of the kk's to be != 0."); } } void IDScalarWaveNRPy_InitialData(CCTK_ARGUMENTS) { DECLARE_CCTK_ARGUMENTS DECLARE_CCTK_PARAMETERS const CCTK_REAL *xGF = x; const CCTK_REAL *yGF = y; const CCTK_REAL *zGF = z; if (CCTK_EQUALS (initial_data, "PlaneWave")) { #include "ScalarWave_PlaneWaveID.h" } else if (CCTK_EQUALS (initial_data, "SphericalGaussian")) { #include "ScalarWave_SphericalGaussianID.h" } } """) with open(os.path.join(outdir,"make.code.defn"), "w") as file: file.write(""" # Main make.code.defn file for thorn WaveToyNRPy # Source files in this directory SRCS =""") filestring = "" for i in range(len(make_code_defn_list)): filestring += " "+make_code_defn_list[i] if i != len(make_code_defn_list)-1: filestring += " \\\n" else: filestring += "\n" file.write(filestring) ``` <a id='latex_pdf_output'></a> # Step 3: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] $$\label{latex_pdf_output}$$ The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename [Tutorial-ETK_thorn-IDScalarWaveNRPy.pdf](Tutorial-ETK_thorn-IDScalarWaveNRPy.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) ``` import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-ETK_thorn-IDScalarWaveNRPy.ipynb") ```
github_jupyter
# Cryptocurrency Word Mover's Distance Semantic Analysis ### Authors | Student Name | Student Number | |---------------------------------|--------------------| | Raj Sandhu | 101111960 | | Akaash Kapoor | 101112895 | | Ali Alvi | 101114940 | | Hassan Jallad | 101109334 | | Areeb Ul Haq | 101115337 | | Ahmad Abuoudeh | 101072636 | ## Libraries to Import ``` import pandas as pd import gensim.downloader as api ``` ## Read In Processed Coin Dataset ``` coin_df = pd.read_csv("coin-info.csv") #Read in the processed dataframe generated in phase 2. coin_df.head() #Print first 5 rows of dataframe to assess validity. ``` ## Load In Pretrained Word Embedding Model ``` model = api.load("word2vec-google-news-300") #Load in the pretrained word embedding model which is used to perform word mover's distance between pairs of documents. ``` ## Generate Similarity Matrix for the Word Mover's Distance Metric ``` coin_similarity_matrix = pd.DataFrame([[model.wmdistance(p1, p2) for p2 in coin_df.iloc[:, -1].str.split()] for p1 in coin_df.iloc[:, -1].str.split()], columns = coin_df.iloc[:, 0], index= coin_df.iloc[:, 0]) #Performs pairwise computations over all possible pairwise combinations of provided descriptions, and stores these computations in a similarity matrix. #Descriptions are split using split() function because the wmdistance function requires a list of string tokens for proper results. ``` ## Display Similarity Matrix and Check Validity ``` coin_similarity_matrix #Display computed similarity matrix. #Obtain information of first two coins to perform a sanity check of calculations performed. coin_desc_1 = coin_df["Description"][0] coin_desc_2 = coin_df["Description"][1] coin_name_1 = coin_df["Name"][0] coin_name_2 = coin_df["Name"][1] coin_similarity_1 = coin_similarity_matrix.iloc[0].iloc[1] assert model.wmdistance(coin_desc_1.split(), coin_desc_2.split()) == coin_similarity_1, "Coins " + coin_name_1 + " and " + coin_name_2 + " fail unit test. Computed word mover's distances do not match." print("Coins " + coin_name_1 + " and " + coin_name_2 + " pass the unit test. They have a word mover's distance of: " + str(coin_similarity_1)) #Verifies that the word mover's distance computed for the first two coins is correct. ``` In the above cell, a sanity check is performed to ensure that word mover's distance calculations were performed correctly, ensuring the obtained similarity matrix is of the highest quality. This is done through the assert statement. A manual computation of word mover's distance of the first two coins is performed, and this computation is also retrieved from the similarity matrix. These computations are then compared for equality with the assert statement. If this unit test is passed, a success message is printed, otherwise an error is thrown with the provided error message. ## Download Similarity Matrix as a CSV File ``` coin_similarity_matrix.to_csv("coin-similarity-matrix-description.csv") #exports similarity matrix to a csv file. ``` From here, the csv file should be downloaded and you should be able to see it on the left side of the screen in the Files section. From here, simply right click it and download it and then save it in the models folder of the repo.
github_jupyter
<a href="https://www.skills.network/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0120ENSkillsNetwork20629446-2021-01-01"><img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DL0120ENedX/labs/Template%20for%20Instructional%20Hands-on%20Labs/images/IDSNlogo.png" width="400px" align="center"></a> <h1 align="center"><font size="5">RESTRICTED BOLTZMANN MACHINES</font></h1> <h3>Introduction</h3> <b>Restricted Boltzmann Machine (RBM):</b> RBMs are shallow neural nets that learn to reconstruct data by themselves in an unsupervised fashion. <h4>Why are RBMs important?</h4> An RBM are a basic form of autoencoder. It can automatically extract <b>meaningful</b> features from a given input. <h4>How does it work?</h4> RBM is a 2 layer neural network. Simply, RBM takes the inputs and translates those into a set of binary values that represents them in the hidden layer. Then, these numbers can be translated back to reconstruct the inputs. Through several forward and backward passes, the RBM will be trained, and a trained RBM can reveal which features are the most important ones when detecting patterns. <h4>What are the applications of an RBM?</h4> RBM is useful for <a href='http://www.cs.utoronto.ca/~hinton/absps/netflixICML.pdf?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0120ENSkillsNetwork20629446-2021-01-01'> Collaborative Filtering</a>, dimensionality reduction, classification, regression, feature learning, topic modeling and even <b>Deep Belief Networks</b>. <h4>Is RBM a generative or Discriminative model?</h4> RBM is a generative model. Let me explain it by first, see what is different between discriminative and generative models: <b>Discriminative:</b> Consider a classification problem where we want to learn to distinguish between Sedan cars (y = 1) and SUV cars (y = 0), based on some features of cars. Given a training set, an algorithm like logistic regression tries to find a straight line, or <i>decision boundary</i>, that separates the suv and sedan. <b>Generative:</b> looking at cars, we can build a model of what Sedan cars look like. Then, looking at SUVs, we can build a separate model of what SUV cars look like. Finally, to classify a new car, we can match the new car against the Sedan model, and match it against the SUV model, to see whether the new car looks more like the SUV or Sedan. Generative Models specify a probability distribution over a dataset of input vectors. We can carry out both supervised and unsupervised tasks with generative models: <ul> <li>In an unsupervised task, we try to form a model for $P(x)$, where $P$ is the probability given $x$ as an input vector.</li> <li>In the supervised task, we first form a model for $P(x|y)$, where $P$ is the probability of $x$ given $y$(the label for $x$). For example, if $y = 0$ indicates that a car is an SUV, and $y = 1$ indicates that a car is a sedan, then $p(x|y = 0)$ models the distribution of SUV features, and $p(x|y = 1)$ models the distribution of sedan features. If we manage to find $P(x|y)$ and $P(y)$, then we can use <b>Bayes rule</b> to estimate $P(y|x)$, because: $$p(y|x) = \frac{p(x|y)p(y)}{p(x)}$$</li> </ul> Now the question is, can we build a generative model, and then use it to create synthetic data by directly sampling from the modeled probability distributions? Lets see. <h2>Table of Contents</h2> <ol> <li><a href="https://#ref1">Initialization</a></li> <li><a href="https://#ref2">RBM layers</a></li> <li><a href="https://#ref3">What RBM can do after training?</a></li> <li><a href="https://#ref4">How to train the model?</a></li> <li><a href="https://#ref5">Learned features</a></li> </ol> <p></p> </div> <br> <hr> <a id="ref1"></a> <h3>Initialization</h3> First, we have to load the utility file which contains different utility functions that are not connected in any way to the networks presented in the tutorials, but rather help in processing the outputs into a more understandable way. ``` import urllib.request with urllib.request.urlopen("https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DL0120EN-SkillsNetwork/labs/Week4/data/utils.py") as url: response = url.read() target = open('utils.py', 'w') target.write(response.decode('utf-8')) target.close() ``` <h2>Installing TensorFlow </h2> We will installing TensorFlow version 2.2.0 and its required prerequistes. Also installing pillow\... ``` !pip install grpcio==1.24.3 !pip install tensorflow==2.2.0 !pip install pillow ``` <b>Notice:</b> This notebook has been created with TensorFlow version 2.2, and might not work with other versions. Therefore we check: ``` import tensorflow as tf from IPython.display import Markdown, display def printmd(string): display(Markdown('# <span style="color:red">'+string+'</span>')) if not tf.__version__ == '2.2.0': printmd('<<<<<!!!!! ERROR !!!! please upgrade to TensorFlow 2.2.0, or restart your Kernel (Kernel->Restart & Clear Output)>>>>>') ``` Now, we load in all the packages that we use to create the net including the TensorFlow package: ``` import tensorflow as tf import numpy as np from PIL import Image from utils import tile_raster_images import matplotlib.pyplot as plt %matplotlib inline ``` <hr> <a id="ref2"></a> <h3>RBM layers</h3> An RBM has two layers. The first layer of the RBM is called the <b>visible</b> (or input layer). Imagine that our toy example, has only vectors with 7 values, so the visible layer must have $V=7$ input nodes. The second layer is the <b>hidden</b> layer, which has $H$ neurons in our case. Each hidden node takes on values of either 0 or 1 (i.e., $h_i = 1$ or $h_i$ = 0), with a probability that is a logistic function of the inputs it receives from the other $V$ visible units, called for example, $p(h_i = 1)$. For our toy sample, we'll use 2 nodes in the hidden layer, so $H = 2$. <center><img src="https://ibm.box.com/shared/static/eu26opvcefgls6vnwuo29uwp0nudmokh.png" alt="RBM Model" style="width: 400px;"></center> Each node in the first layer also has a <b>bias</b>. We will denote the bias as $v\_{bias}$, and this single value is shared among the $V$ visible units. The <b>bias</b> of the second is defined similarly as $h\_{bias}$, and this single value among the $H$ hidden units. ``` v_bias = tf.Variable(tf.zeros([7]), tf.float32) h_bias = tf.Variable(tf.zeros([2]), tf.float32) ``` We have to define weights among the input layer and hidden layer nodes. In the weight matrix, the number of rows are equal to the input nodes, and the number of columns are equal to the output nodes. We define a tensor $\mathbf{W}$ of shape = (7,2), where the number of visible neurons = 7, and the number of hidden neurons = 2. ``` W = tf.constant(np.random.normal(loc=0.0, scale=1.0, size=(7, 2)).astype(np.float32)) ``` <hr> <a id="ref3"></a> <h3>What RBM can do after training?</h3> Think of RBM as a model that has been trained based on images of a dataset of many SUV and sedan cars. Also, imagine that the RBM network has only two hidden nodes, where one node encodes the weight and, and the other encodes the size. In a sense, the different configurations represent different cars, where one is an SUV and the other is Sedan. In a training process, through many forward and backward passes, the RBM adjust its weights to send a stronger signal to either the SUV node (0, 1) or the sedan node (1, 0) in the hidden layer, given the pixels of images. Now, given an SUV in hidden layer, which distribution of pixels should we expect? RBM can give you 2 things. First, it encodes your images in hidden layer. Second, it gives you the probability of observing a case, given some hidden values. <h3>The Inference Process</h3> RBM has two phases: <ul> <li>Forward Pass</li> <li>Backward Pass or Reconstruction</li> </ul> <b>Phase 1) Forward pass:</b> Input one training sample (one image) $\mathbf{x}$ through all visible nodes, and pass it to all hidden nodes. Processing happens in each node in the hidden layer. This computation begins by making stochastic decisions about whether to transmit that input or not (i.e. to determine the state of each hidden layer). First, the probability vector is computed using the input feature vector $\mathbf{x}$, the weight matrix $\mathbf{W}$, and the bias term $h\_{bias}$, as $$p({h_j}|\mathbf x)= \sigma( \sum\_{i=1}^V W\_{ij} x_i + h\_{bias} )$$, where $\sigma(z) = (1+e^{-z})^{-1}$ is the logistic function. So, what does $p({h_j})$ represent? It is the <b>probability distribution</b> of the hidden units. That is, RBM uses inputs $x_i$ to make predictions about hidden node activations. For example, imagine that the hidden node activation values are \[0.51 0.84] for the first training item. It tells you that the conditional probability for each hidden neuron for Phase 1 is: $$p(h\_{1} = 1|\mathbf{v}) = 0.51$$ $$p(h\_{2} = 1|\mathbf{v}) = 0.84$$ As a result, for each row in the training set, vector of probabilities is generated. In TensorFlow, this is referred to as a `tensor` with a shape of (1,2). We then turn unit $j$ with probability $p(h\_{j}|\mathbf{v})$, and turn it off with probability $1 - p(h\_{j}|\mathbf{v})$ by generating a uniform random number vector $\mathbf{\xi}$, and comparing it to the activation probability as <center>If $\xi_j>p(h_{j}|\mathbf{v})$, then $h_j=1$, else $h_j=0$.</center> Therefore, the conditional probability of a configuration of $\mathbf{h}$ given $\mathbf{v}$ (for a training sample) is: $$p(\mathbf{h} \mid \mathbf{v}) = \prod\_{j=1}^H p(h_j \mid \mathbf{v})$$ where $H$ is the number of hidden units. Before we go further, let's look at a toy example for one case out of all input. Assume that we have a trained RBM, and a very simple input vector, such as \[1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0].\ Let's see what the output of forward pass would look like: ``` X = tf.constant([[1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]], tf.float32) v_state = X print ("Input: ", v_state) h_bias = tf.constant([0.1, 0.1]) print ("hb: ", h_bias) print ("w: ", W) # Calculate the probabilities of turning the hidden units on: h_prob = tf.nn.sigmoid(tf.matmul(v_state, W) + h_bias) #probabilities of the hidden units print ("p(h|v): ", h_prob) # Draw samples from the distribution: h_state = tf.nn.relu(tf.sign(h_prob - tf.random.uniform(tf.shape(h_prob)))) #states print ("h0 states:", h_state) ``` <b>Phase 2) Backward Pass (Reconstruction):</b> The RBM reconstructs data by making several forward and backward passes between the visible and hidden layers. So, in the second phase (i.e. reconstruction phase), the samples from the hidden layer (i.e. $\mathbf h$) becomes the input in the backward pass. The same weight matrix and visible layer biases are used to passed to the sigmoid function. The reproduced output is a reconstruction which is an approximation of the original input. ``` vb = tf.constant([0.1, 0.2, 0.1, 0.1, 0.1, 0.2, 0.1]) print ("b: ", vb) v_prob = tf.nn.sigmoid(tf.matmul(h_state, tf.transpose(W)) + vb) print ("p(vi∣h): ", v_prob) v_state = tf.nn.relu(tf.sign(v_prob - tf.random.uniform(tf.shape(v_prob)))) print ("v probability states: ", v_state) ``` RBM learns a probability distribution over the input, and then, after being trained, the RBM can generate new samples from the learned probability distribution. As you know, <b>probability distribution</b>, is a mathematical function that provides the probabilities of occurrence of different possible outcomes in an experiment. The (conditional) probability distribution over the visible units v is given by $$p(\mathbf{v} \mid \mathbf{h}) = \prod\_{i=1}^V p(v_i \mid \mathbf{h}),$$ where, $$p(v_i \mid \mathbf{h}) = \sigma\left(\sum\_{j=1}^H W\_{ji} h_j + v\_{bias} \right)$$ so, given current state of hidden units and weights, what is the probability of generating \[1. 0. 0. 1. 0. 0. 0.] in reconstruction phase, based on the above <b>probability distribution</b> function? ``` inp = X print("input X:" , inp.numpy()) print("probablity vector:" , v_prob[0].numpy()) v_probability = 1 for elm, p in zip(inp[0],v_prob[0]) : if elm ==1: v_probability *= p else: v_probability *= (1-p) print("probability of generating X: " , v_probability.numpy()) ``` How similar are vectors $\mathbf{x}$ and $\mathbf{v}$? Of course, the reconstructed values most likely will not look anything like the input vector, because our network has not been trained yet. Our objective is to train the model in such a way that the input vector and reconstructed vector to be same. Therefore, based on how different the input values look to the ones that we just reconstructed, the weights are adjusted. <hr> <h2>MNIST</h2> We will be using the MNIST dataset to practice the usage of RBMs. The following cell loads the MNIST dataset. ``` #loading training and test data mnist = tf.keras.datasets.mnist (trX, trY), (teX, teY) = mnist.load_data() # showing an example of the Flatten class and operation from tensorflow.keras.layers import Flatten flatten = Flatten(dtype='float32') trX = flatten(trX/255.0) trY = flatten(trY/255.0) ``` Lets look at the dimension of the images. MNIST images have 784 pixels, so the visible layer must have 784 input nodes. For our case, we'll use 50 nodes in the hidden layer, so i = 50. ``` vb = tf.Variable(tf.zeros([784]), tf.float32) hb = tf.Variable(tf.zeros([50]), tf.float32) ``` Let $\mathbf W$ be the Tensor of 784x50 (784 - number of visible neurons, 50 - number of hidden neurons) that represents weights between the neurons. ``` W = tf.Variable(tf.zeros([784,50]), tf.float32) ``` Lets define the visible layer: ``` v0_state = tf.Variable(tf.zeros([784]), tf.float32) #testing to see if the matrix product works tf.matmul( [v0_state], W) ``` Now, we can define hidden layer: ``` #computing the hidden nodes probability vector and checking shape h0_prob = tf.nn.sigmoid(tf.matmul([v0_state], W) + hb) #probabilities of the hidden units print("h0_state shape: " , tf.shape(h0_prob)) #defining a function to return only the generated hidden states def hidden_layer(v0_state, W, hb): h0_prob = tf.nn.sigmoid(tf.matmul([v0_state], W) + hb) #probabilities of the hidden units h0_state = tf.nn.relu(tf.sign(h0_prob - tf.random.uniform(tf.shape(h0_prob)))) #sample_h_given_X return h0_state h0_state = hidden_layer(v0_state, W, hb) print("first 15 hidden states: ", h0_state[0][0:15]) ``` Now, we define reconstruction part: ``` def reconstructed_output(h0_state, W, vb): v1_prob = tf.nn.sigmoid(tf.matmul(h0_state, tf.transpose(W)) + vb) v1_state = tf.nn.relu(tf.sign(v1_prob - tf.random.uniform(tf.shape(v1_prob)))) #sample_v_given_h return v1_state[0] v1_state = reconstructed_output(h0_state, W, vb) print("hidden state shape: ", h0_state.shape) print("v0 state shape: ", v0_state.shape) print("v1 state shape: ", v1_state.shape) ``` <h3>What is the objective function?</h3> <b>Goal</b>: Maximize the likelihood of our data being drawn from that distribution <b>Calculate error:</b>\ In each epoch, we compute the "error" as a sum of the squared difference between step 1 and step n, e.g the error shows the difference between the data and its reconstruction. <b>Note:</b> tf.reduce_mean computes the mean of elements across dimensions of a tensor. ``` def error(v0_state, v1_state): return tf.reduce_mean(tf.square(v0_state - v1_state)) err = tf.reduce_mean(tf.square(v0_state - v1_state)) print("error" , err.numpy()) ``` <a id="ref4"></a> <h3>Training the Model</h3> <b>Warning...</b> The following part is math-heavy, but you can skip it if you just want to run the cells in the next section. As mentioned, we want to give a high probability to the input data we train on. So, in order to train an RBM, we have to maximize the product of probabilities assigned to all rows $\mathbf{v}$ (images) in the training set $\mathbf{V}$ (a matrix, where each row of it is treated as a visible vector $\mathbf{v}$) $$\arg \max_W \prod\_{\mathbf{v}\in\mathbf{V}\_T} p(\mathbf{v})$$ which is equivalent to maximizing the expectation of the log probability, given as $$\arg\max_W\left\[ \mathbb{E} \left(\prod\_{\mathbf v\in \mathbf V}\text{log} \left(p(\mathbf v)\right) \right) \right].$$ So, we have to update the weights $W\_{ij}$ to increase $p(\mathbf{v})$ for all $\mathbf{v}$ in our training data during training. So we have to calculate the derivative: $$\frac{\partial \log p(\mathbf v)}{\partial W\_{ij}}$$ This cannot be easily done by typical <b>gradient descent (SGD)</b>, so we can use another approach, which has 2 steps: <ol> <li>Gibbs Sampling</li> <li>Contrastive Divergence</li> </ol> <h3>Gibbs Sampling</h3> <h4>Gibbs Sampling Step 1</h4> Given an input vector $\mathbf{v}$, we are using $p(\mathbf{h}|\mathbf{v})$ to predict the hidden values $\mathbf{h}$. $$p({h_j}|\mathbf v)= \sigma\left(\sum_{i=1}^V W_{ij} v_i + h_{bias} \right)$$ The samples are generated from this distribution by generating the uniform random variate vector $\mathbf{\xi} \sim U[0,1]$ of length $H$ and comparing to the computed probabilities as <center>If $\xi_j>p(h_{j}|\mathbf{v})$, then $h_j=1$, else $h_j=0$.</center> <h4>Gibbs Sampling Step 2</h4> Then, knowing the hidden values, we use $p(\mathbf v| \mathbf h)$ for reconstructing of new input values v. $$p({v_i}|\mathbf h)= \sigma\left(\sum\_{j=1}^H W^{T}*{ij} h_j + v*{bias} \right)$$ The samples are generated from this distribution by generating a uniform random variate vector $\mathbf{\xi} \sim U\[0,1]$ of length $V$ and comparing to the computed probabilities as <center>If $\xi_i>p(v_{i}|\mathbf{h})$, then $v_i=1$, else $v_i=0$.</center> Let vectors $\mathbf v_k$ and $\mathbf h_k$ be for the $k$th iteration. In general, the $kth$ state is generrated as: <b>Iteration</b> $k$: $$\mathbf v\_{k-1} \Rightarrow p(\mathbf h\_{k-1}|\mathbf v\_{k-1})\Rightarrow \mathbf h\_{k-1}\Rightarrow p(\mathbf v\_{k}|\mathbf h\_{k-1})\Rightarrow \mathbf v_k$$ <h3>Contrastive Divergence (CD-k)</h3> The update of the weight matrix is done during the Contrastive Divergence step. Vectors v0 and vk are used to calculate the activation probabilities for hidden values h0 and hk. The difference between the outer products of those probabilities with input vectors v0 and vk results in the update matrix: $$\Delta \mathbf W_k =\mathbf v_k \otimes \mathbf h_k - \mathbf v\_{k-1} \otimes \mathbf h\_{k-1}$$ Contrastive Divergence is actually matrix of values that is computed and used to adjust values of the $\mathbf W$ matrix. Changing $\mathbf W$ incrementally leads to training of the $\mathbf W$ values. Then, on each step (epoch), $\mathbf W$ is updated using the following: $$\mathbf W_k = \mathbf W\_{k-1} + \alpha \* \Delta \mathbf W_k$$ Reconstruction steps: <ul> <li> Get one data point from data set, like <i>x</i>, and pass it through the following steps:</li> <b>Iteration</b> $k=1$: Sampling (starting with input image) $$\mathbf x = \mathbf v\_0 \Rightarrow p(\mathbf h\_0|\mathbf v\_0)\Rightarrow \mathbf h\_0 \Rightarrow p(\mathbf v\_1|\mathbf h\_0)\Rightarrow \mathbf v\_1$$\ followed by the CD-k step $$\Delta \mathbf W\_1 =\mathbf v\_1 \otimes \mathbf h\_1 - \mathbf v\_{0} \otimes \mathbf h\_{0}$$\ $$\mathbf W\_1 = \mathbf W\_{0} + \alpha \* \Delta \mathbf W\_1$$ <li> $\mathbf v_1$ is the reconstruction of $\mathbf x$ sent to the next iteration).</li> <b>Iteration</b> $k=2$: Sampling (starting with $\mathbf v\_1$) $$\mathbf v\_1 \Rightarrow p(\mathbf h\_1|\mathbf v\_1)\Rightarrow \mathbf h\_1\Rightarrow p(\mathbf v\_2|\mathbf h\_1)\Rightarrow \mathbf v\_2$$ followed by the CD-k step $$\Delta \mathbf W\_2 =\mathbf v\_2 \otimes \mathbf h\_2 - \mathbf v\_{1} \otimes \mathbf h\_{1}$$\ $$\mathbf W\_2 = \mathbf W\_{1} + \alpha \* \Delta \mathbf W\_2$$ <li> $\mathbf v_2$ is the reconstruction of $\mathbf v_1$ sent to the next iteration).</li> <b>Iteration</b> $k=K$: Sampling (starting with $\mathbf v\_{K-1}$) $$\mathbf v\_{K-1} \Rightarrow p(\mathbf h\_{K-1}|\mathbf v\_{K-1})\Rightarrow \mathbf h\_{K-1}\Rightarrow p(\mathbf v_K|\mathbf h\_{K-1})\Rightarrow \mathbf v_K$$ followed by the CD-k step $$\Delta \mathbf W_K =\mathbf v_K \otimes \mathbf h_K - \mathbf v\_{K-1} \otimes \mathbf h\_{K-1}$$\ $$\mathbf W_K = \mathbf W\_{K-1} + \alpha \* \Delta \mathbf W_K$$ <b>What is $\alpha$?</b>\ Here, alpha is some small step size, and is also known as the "learning rate". $K$ is adjustable, and good performance can be achieved with $K=1$, so that we just take one set of sampling steps per image. ``` h1_prob = tf.nn.sigmoid(tf.matmul([v1_state], W) + hb) h1_state = tf.nn.relu(tf.sign(h1_prob - tf.random.uniform(tf.shape(h1_prob)))) #sample_h_given_X ``` Lets look at the error of the first run: ``` print("error: ", error(v0_state, v1_state)) #Parameters alpha = 0.01 epochs = 1 batchsize = 200 weights = [] errors = [] batch_number = 0 K = 1 #creating datasets train_ds = \ tf.data.Dataset.from_tensor_slices((trX, trY)).batch(batchsize) for epoch in range(epochs): for batch_x, batch_y in train_ds: batch_number += 1 for i_sample in range(batchsize): for k in range(K): v0_state = batch_x[i_sample] h0_state = hidden_layer(v0_state, W, hb) v1_state = reconstructed_output(h0_state, W, vb) h1_state = hidden_layer(v1_state, W, hb) delta_W = tf.matmul(tf.transpose([v0_state]), h0_state) - tf.matmul(tf.transpose([v1_state]), h1_state) W = W + alpha * delta_W vb = vb + alpha * tf.reduce_mean(v0_state - v1_state, 0) hb = hb + alpha * tf.reduce_mean(h0_state - h1_state, 0) v0_state = v1_state if i_sample == batchsize-1: err = error(batch_x[i_sample], v1_state) errors.append(err) weights.append(W) print ( 'Epoch: %d' % epoch, "batch #: %i " % batch_number, "of %i" % int(60e3/batchsize), "sample #: %i" % i_sample, 'reconstruction error: %f' % err) ``` Let's take a look at the errors at the end of each batch: ``` plt.plot(errors) plt.xlabel("Batch Number") plt.ylabel("Error") plt.show() ``` What is the final weight matrix $W$ after training? ``` print(W.numpy()) # a weight matrix of shape (50,784) ``` <a id="ref5"></a> <h3>Learned features</h3> We can take each hidden unit and visualize the connections between that hidden unit and each element in the input vector. In our case, we have 50 hidden units. Lets visualize those. Let's plot the current weights: <b>tile_raster_images</b> helps in generating an easy to grasp image from a set of samples or weights. It transforms the <b>uw</b> (with one flattened image per row of size 784), into an array (of size $28\times28$) in which images are reshaped and laid out like tiles on a floor. ``` tile_raster_images(X=W.numpy().T, img_shape=(28, 28), tile_shape=(5, 10), tile_spacing=(1, 1)) import matplotlib.pyplot as plt from PIL import Image %matplotlib inline image = Image.fromarray(tile_raster_images(X=W.numpy().T, img_shape=(28, 28) ,tile_shape=(5, 10), tile_spacing=(1, 1))) ### Plot image plt.rcParams['figure.figsize'] = (18.0, 18.0) imgplot = plt.imshow(image) imgplot.set_cmap('gray') ``` Each tile in the above visualization corresponds to a vector of connections between a hidden unit and visible layer's units. Let's look at one of the learned weights corresponding to one of hidden units for example. In this particular square, the gray color represents weight = 0, and the whiter it is, the more positive the weights are (closer to 1). Conversely, the darker pixels are, the more negative the weights. The positive pixels will increase the probability of activation in hidden units (after multiplying by input/visible pixels), and negative pixels will decrease the probability of a unit hidden to be 1 (activated). So, why is this important? So we can see that this specific square (hidden unit) can detect a feature (e.g. a "/" shape) and if it exists in the input. ``` from PIL import Image image = Image.fromarray(tile_raster_images(X =W.numpy().T[10:11], img_shape=(28, 28),tile_shape=(1, 1), tile_spacing=(1, 1))) ### Plot image plt.rcParams['figure.figsize'] = (4.0, 4.0) imgplot = plt.imshow(image) imgplot.set_cmap('gray') ``` Let's look at the reconstruction of an image now. Imagine that we have a destructed image of figure 3. Lets see if our trained network can fix it: First we plot the image: ``` !wget -O destructed3.jpg https://ibm.box.com/shared/static/vvm1b63uvuxq88vbw9znpwu5ol380mco.jpg img = Image.open('destructed3.jpg') img ``` Now let's pass this image through the neural net: ``` # convert the image to a 1d numpy array sample_case = np.array(img.convert('I').resize((28,28))).ravel().reshape((1, -1))/255.0 sample_case = tf.cast(sample_case, dtype=tf.float32) ``` Feed the sample case into the network and reconstruct the output: ``` hh0_p = tf.nn.sigmoid(tf.matmul(sample_case, W) + hb) hh0_s = tf.round(hh0_p) print("Probability nodes in hidden layer:" ,hh0_p) print("activated nodes in hidden layer:" ,hh0_s) # reconstruct vv1_p = tf.nn.sigmoid(tf.matmul(hh0_s, tf.transpose(W)) + vb) print(vv1_p) #rec_prob = sess.run(vv1_p, feed_dict={ hh0_s: hh0_s_val, W: prv_w, vb: prv_vb}) ``` Here we plot the reconstructed image: ``` img = Image.fromarray(tile_raster_images(X=vv1_p.numpy(), img_shape=(28, 28),tile_shape=(1, 1), tile_spacing=(1, 1))) plt.rcParams['figure.figsize'] = (4.0, 4.0) imgplot = plt.imshow(img) imgplot.set_cmap('gray') ``` <hr> ## Want to learn more? Also, you can use **Watson Studio** to run these notebooks faster with bigger datasets.**Watson Studio** is IBM’s leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, **Watson Studio** enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of **Watson Studio** users today with a free account at [Watson Studio](https://cocl.us/ML0120EN_DSX).This is the end of this lesson. Thank you for reading this notebook, and good luck on your studies. ### Thanks for completing this lesson! Notebook created by: <a href = "https://ca.linkedin.com/in/saeedaghabozorgi?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0120ENSkillsNetwork20629446-2021-01-01">Saeed Aghabozorgi</a> Updated to TF 2.X by <a href="https://ca.linkedin.com/in/nilmeier?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0120ENSkillsNetwork20629446-2021-01-01"> Jerome Nilmeier</a><br /> ### References: [https://en.wikipedia.org/wiki/Restricted_Boltzmann_machine](https://en.wikipedia.org/wiki/Restricted_Boltzmann_machine?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0120ENSkillsNetwork20629446-2021-01-01)\ [http://deeplearning.net/tutorial/rbm.html](http://deeplearning.net/tutorial/rbm.html?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0120ENSkillsNetwork20629446-2021-01-01)\ [http://www.cs.utoronto.ca/\~hinton/absps/netflixICML.pdf](http://www.cs.utoronto.ca/\~hinton/absps/netflixICML.pdf?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0120ENSkillsNetwork20629446-2021-01-01)<br> <http://imonad.com/rbm/restricted-boltzmann-machine/> <hr> Copyright © 2018 [Cognitive Class](https://cocl.us/DX0108EN_CC). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0120ENSkillsNetwork20629446-2021-01-01).
github_jupyter
# Stochastic Volatility model ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns sns.set_context('talk') import pymc3 as pm from pymc3.distributions.timeseries import GaussianRandomWalk from scipy import optimize ``` Asset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others very stable. Stochastic volatility models model this with a latent volatility variable, modeled as a stochastic process. The following model is similar to the one described in the No-U-Turn Sampler paper, Hoffman (2011) p21. $$ \sigma \sim Exponential(50) $$ $$ \nu \sim Exponential(.1) $$ $$ s_i \sim Normal(s_{i-1}, \sigma^{-2}) $$ $$ log(r_i) \sim t(\nu, 0, exp(-2 s_i)) $$ Here, $r$ is the daily return series and $s$ is the latent log volatility process. ## Build Model First we load some daily returns of the S&P 500. ``` n = 400 returns = pd.read_csv(pm.get_data("SP500.csv"), index_col='date')['change'] returns[:5] ``` As you can see, the volatility seems to change over time quite a bit but cluster around certain time-periods. Around time-points 2500-3000 you can see the 2009 financial crash. ``` fig, ax = plt.subplots(figsize=(14, 8)) returns.plot(label='S&P500') ax.set(xlabel='time', ylabel='returns') ax.legend(); ``` Specifying the model in `PyMC3` mirrors its statistical specification. ``` with pm.Model() as model: step_size = pm.Exponential('sigma', 50.) s = GaussianRandomWalk('s', sigma=step_size, shape=len(returns)) nu = pm.Exponential('nu', .1) r = pm.StudentT('r', nu=nu, lam=pm.math.exp(-2*s), observed=returns) ``` ## Fit Model For this model, the full maximum a posteriori (MAP) point is degenerate and has infinite density. NUTS, however, gives the correct posterior. ``` with model: trace = pm.sample(tune=2000, target_accept=0.9) pm.traceplot(trace, var_names=['sigma', 'nu']); fig, ax = plt.subplots() plt.plot(trace['s'].T, 'b', alpha=.03); ax.set(title=str(s), xlabel='time', ylabel='log volatility'); ``` Looking at the returns over time and overlaying the estimated standard deviation we can see how the model tracks the volatility over time. ``` fig, ax = plt.subplots(figsize=(14, 8)) returns.plot(ax=ax) ax.plot(np.exp(trace[s].T), 'r', alpha=.03); #ax.set(xlabel='time', ylabel='returns') ax.legend(['S&P500', 'stoch vol']); ``` ## References 1. Hoffman & Gelman. (2011). [The No-U-Turn Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte Carlo](http://arxiv.org/abs/1111.4246).
github_jupyter
##### Copyright 2019 The TensorFlow Probability Authors. Licensed under the Apache License, Version 2.0 (the "License"); ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" } # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # TFP Probabilistic Layers: Variational Auto Encoder <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/probability/examples/Probabilistic_Layers_VAE"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_VAE.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_VAE.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/probability/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_VAE.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> In this example we show how to fit a Variational Autoencoder using TFP's "probabilistic layers." ### Dependencies & Prerequisites ``` #@title Import { display-mode: "form" } import numpy as np import tensorflow.compat.v2 as tf tf.enable_v2_behavior() import tensorflow_datasets as tfds import tensorflow_probability as tfp tfk = tf.keras tfkl = tf.keras.layers tfpl = tfp.layers tfd = tfp.distributions ``` ### Make things Fast! Before we dive in, let's make sure we're using a GPU for this demo. To do this, select "Runtime" -> "Change runtime type" -> "Hardware accelerator" -> "GPU". The following snippet will verify that we have access to a GPU. ``` if tf.test.gpu_device_name() != '/device:GPU:0': print('WARNING: GPU device not found.') else: print('SUCCESS: Found GPU: {}'.format(tf.test.gpu_device_name())) ``` Note: if for some reason you cannot access a GPU, this colab will still work. (Training will just take longer.) ### Load Dataset ``` datasets, datasets_info = tfds.load(name='mnist', with_info=True, as_supervised=False) def _preprocess(sample): image = tf.cast(sample['image'], tf.float32) / 255. # Scale to unit interval. image = image < tf.random.uniform(tf.shape(image)) # Randomly binarize. return image, image train_dataset = (datasets['train'] .map(_preprocess) .batch(256) .prefetch(tf.data.AUTOTUNE) .shuffle(int(10e3))) eval_dataset = (datasets['test'] .map(_preprocess) .batch(256) .prefetch(tf.data.AUTOTUNE)) ``` Note that _preprocess() above returns `image, image` rather than just `image` because Keras is set up for discriminative models with an (example, label) input format, i.e. $p_\theta(y|x)$. Since the goal of the VAE is to recover the input x from x itself (i.e. $p_\theta(x|x)$), the data pair is (example, example). ### VAE Code Golf #### Specify model. ``` input_shape = datasets_info.features['image'].shape encoded_size = 16 base_depth = 32 prior = tfd.Independent(tfd.Normal(loc=tf.zeros(encoded_size), scale=1), reinterpreted_batch_ndims=1) encoder = tfk.Sequential([ tfkl.InputLayer(input_shape=input_shape), tfkl.Lambda(lambda x: tf.cast(x, tf.float32) - 0.5), tfkl.Conv2D(base_depth, 5, strides=1, padding='same', activation=tf.nn.leaky_relu), tfkl.Conv2D(base_depth, 5, strides=2, padding='same', activation=tf.nn.leaky_relu), tfkl.Conv2D(2 * base_depth, 5, strides=1, padding='same', activation=tf.nn.leaky_relu), tfkl.Conv2D(2 * base_depth, 5, strides=2, padding='same', activation=tf.nn.leaky_relu), tfkl.Conv2D(4 * encoded_size, 7, strides=1, padding='valid', activation=tf.nn.leaky_relu), tfkl.Flatten(), tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(encoded_size), activation=None), tfpl.MultivariateNormalTriL( encoded_size, activity_regularizer=tfpl.KLDivergenceRegularizer(prior)), ]) decoder = tfk.Sequential([ tfkl.InputLayer(input_shape=[encoded_size]), tfkl.Reshape([1, 1, encoded_size]), tfkl.Conv2DTranspose(2 * base_depth, 7, strides=1, padding='valid', activation=tf.nn.leaky_relu), tfkl.Conv2DTranspose(2 * base_depth, 5, strides=1, padding='same', activation=tf.nn.leaky_relu), tfkl.Conv2DTranspose(2 * base_depth, 5, strides=2, padding='same', activation=tf.nn.leaky_relu), tfkl.Conv2DTranspose(base_depth, 5, strides=1, padding='same', activation=tf.nn.leaky_relu), tfkl.Conv2DTranspose(base_depth, 5, strides=2, padding='same', activation=tf.nn.leaky_relu), tfkl.Conv2DTranspose(base_depth, 5, strides=1, padding='same', activation=tf.nn.leaky_relu), tfkl.Conv2D(filters=1, kernel_size=5, strides=1, padding='same', activation=None), tfkl.Flatten(), tfpl.IndependentBernoulli(input_shape, tfd.Bernoulli.logits), ]) vae = tfk.Model(inputs=encoder.inputs, outputs=decoder(encoder.outputs[0])) ``` #### Do inference. ``` negloglik = lambda x, rv_x: -rv_x.log_prob(x) vae.compile(optimizer=tf.optimizers.Adam(learning_rate=1e-3), loss=negloglik) _ = vae.fit(train_dataset, epochs=15, validation_data=eval_dataset) ``` ### Look Ma, No ~~Hands~~Tensors! ``` # We'll just examine ten random digits. x = next(iter(eval_dataset))[0][:10] xhat = vae(x) assert isinstance(xhat, tfd.Distribution) #@title Image Plot Util import matplotlib.pyplot as plt def display_imgs(x, y=None): if not isinstance(x, (np.ndarray, np.generic)): x = np.array(x) plt.ioff() n = x.shape[0] fig, axs = plt.subplots(1, n, figsize=(n, 1)) if y is not None: fig.suptitle(np.argmax(y, axis=1)) for i in range(n): axs.flat[i].imshow(x[i].squeeze(), interpolation='none', cmap='gray') axs.flat[i].axis('off') plt.show() plt.close() plt.ion() print('Originals:') display_imgs(x) print('Decoded Random Samples:') display_imgs(xhat.sample()) print('Decoded Modes:') display_imgs(xhat.mode()) print('Decoded Means:') display_imgs(xhat.mean()) # Now, let's generate ten never-before-seen digits. z = prior.sample(10) xtilde = decoder(z) assert isinstance(xtilde, tfd.Distribution) print('Randomly Generated Samples:') display_imgs(xtilde.sample()) print('Randomly Generated Modes:') display_imgs(xtilde.mode()) print('Randomly Generated Means:') display_imgs(xtilde.mean()) ```
github_jupyter
``` import numpy as np # Very slow for many datapoints. Fastest for many costs, most readable def is_pareto_efficient_dumb(costs): """ Find the pareto-efficient points :param costs: An (n_points, n_costs) array :return: A (n_points, ) boolean array, indicating whether each point is Pareto efficient """ is_efficient = np.ones(costs.shape[0], dtype = bool) for i, c in enumerate(costs): is_efficient[i] = np.all(np.any(costs[:i]>c, axis=1)) and np.all(np.any(costs[i+1:]>c, axis=1)) return is_efficient import sys import os from pathlib import Path import numpy as np import joblib %load_ext autoreload %autoreload 2 import pandas as pd import shap from xgboost import XGBRegressor from sklearn.preprocessing import StandardScaler from sklearn.feature_selection import VarianceThreshold from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns plt.rcParams['font.family'] = 'sans-serif' DATADIR = '../data' TRAIN_SIZE = 0.7 import sys sys.path.append('../') from dispersant_screener.definitions import FEATURES from dispersant_screener.utils import plot_parity import wandb from lightgbm import LGBMRegressor df_full_factorial_feat = pd.read_csv(os.path.join(DATADIR, 'new_features_full_random.csv')).values a2 = pd.read_csv(os.path.join(DATADIR, 'b1-b21_random_virial_large_new.csv'))['A2_normalized'].values deltaGMax = pd.read_csv(os.path.join(DATADIR, 'b1-b21_random_virial_large_new.csv'))['A2_normalized'].values gibbs = pd.read_csv(os.path.join(DATADIR, 'b1-b21_random_deltaG.csv'))['deltaGmin'].values gibbs_max = pd.read_csv(os.path.join(DATADIR, 'b1-b21_random_virial_large_new.csv'))['deltaGmax'].values force_max = pd.read_csv(os.path.join(DATADIR, 'b1-b21_random_virial_large_fit2.csv'))['F_repel_max'].values rg = pd.read_csv(os.path.join(DATADIR, 'rg_results.csv'))['Rg'].values y = np.hstack([ rg.reshape(-1, 1), -1* gibbs.reshape(-1, 1), gibbs_max.reshape(-1, 1), ]) assert len(df_full_factorial_feat) == len(a2) == len(gibbs) == len(y) indices = is_pareto_efficient_dumb(-y) sum(indices) fig, ax = plt.subplots(1,2, sharey=True) ax[0].scatter(y[:,1], y[:,0], label='all polymers') ax[0].scatter(y[indices,1], y[indices,0], label='Pareto optimal') ax[1].scatter(y[:,1], y[:,2], label='all polymers') ax[1].scatter(y[indices,1], y[indices,2], label='Pareto optimal') for a in ax: a.spines['left'].set_smart_bounds(True) a.spines['bottom'].set_smart_bounds(True) ax[0].set_xlabel('$-\Delta G_\mathrm{ads}$ / $k_\mathrm{B}T$') ax[0].set_ylabel('$R_\mathrm{g}$ / nm') ax[1].set_xlabel('$\Delta G_\mathrm{rep}$ / $k_\mathrm{B}T$') ax[0].legend(loc='lower left') fig.tight_layout() fig.savefig('design_space.pdf', bbox_inches='tight') from sklearn.decomposition import PCA X = StandardScaler().fit_transform(df_full_factorial_feat) pca = PCA(2) X_pca = pca.fit_transform(X) fig, ax = plt.subplots(1,1) ax.scatter(X_pca[:,0], X_pca[:,1], label='all polymers') ax.scatter(X_pca[indices,0], X_pca[indices,1], label='Pareto optimal') ax.spines['left'].set_smart_bounds(True) ax.spines['bottom'].set_smart_bounds(True) ax.set_xlabel('first principal component') ax.set_ylabel('second principal component') ax.legend() fig.tight_layout() fig.savefig('pca.pdf', bbox_inches='tight') import umap reducer = umap.UMAP() embedding = reducer.fit_transform(X) fig, ax = plt.subplots(1,1) ax.scatter(embedding[:,0], embedding[:,1], label='all polymers') ax.scatter(embedding[indices,0], embedding[indices,1], label='Pareto optimal') ax.spines['left'].set_smart_bounds(True) ax.spines['bottom'].set_smart_bounds(True) ax.legend() fig.tight_layout() fig.savefig('umap.pdf', bbox_inches='tight') ```
github_jupyter
``` import numpy as np import pandas as pd from statsmodels.tsa.arima_model import ARIMA %matplotlib inline import matplotlib.pyplot as plt from pmdarima.arima.utils import ndiffs from statsmodels.tsa.stattools import adfuller from sklearn.metrics import mean_squared_error import warnings # import temperature time series df = pd.read_csv('full_external_temperatures.csv', usecols=['dateTime', 'data']) display(df.columns) # convert to datetime object df['dateTime'] = pd.to_datetime(df['dateTime']) # # convert to pandas time series ts = df.set_index('dateTime') # plot results plt.plot(ts) # let's take a slice of our data, e.g., 3 weeks ts = ts.loc['2014-12-01':'2014-12-07'] ts_train = ts.loc['2014-12-01':'2014-12-06'] ts_test = ts.loc['2014-12-06':'2014-12-07'] # plot plt.plot(ts_train) plt.plot(ts_test) # let's check the order of parameter d def test_stationarity(timeseries, window = 12, cutoff = 0.01): #Determing rolling statistics rolmean = timeseries.rolling(window).mean() rolstd = timeseries.rolling(window).std() #Plot rolling statistics: fig = plt.figure(figsize=(12, 8)) orig = plt.plot(timeseries, color='blue',label='Original') mean = plt.plot(rolmean, color='red', label='Rolling Mean') std = plt.plot(rolstd, color='black', label = 'Rolling Std') plt.legend(loc='best') plt.title('Rolling Mean & Standard Deviation') plt.show() #Perform Dickey-Fuller test: print('Results of Dickey-Fuller Test:') dftest = adfuller(timeseries.iloc[:,0].values) pvalue = dftest[1] if pvalue < cutoff: print('p-value = %.4f. The series is likely stationary.' % pvalue) else: print('p-value = %.4f. The series is likely non-stationary.' % pvalue) test_stationarity(ts_train) ndiffs(ts_train, test='adf') # ad-fuller test # if time series is stationary, parameter d = 0 d = 1 # evaluate an ARIMA model for a given order (p,d,q) def check_arima_model(ts, arima_order): # prepare training dataset train_size = int(len(ts) * 0.6) train, test = ts[0:train_size], ts[train_size:] history = [x for x in train] # make predictions predictions = list() #for t in range(len(test)): # model = ARIMA(history, order=arima_order) # model_fit = model.fit(disp=0) # yhat = model_fit.forecast()[0] # predictions.append(yhat) # history.append(test[t]) model = ARIMA(history, order=arima_order) model_fit = model.fit(disp=0) yhats = model_fit.forecast(len(test))[0] predictions = [x for x in yhats] # calculate out of sample error error = mean_squared_error(test, predictions) return error # evaluate combinations of p and q values for an ARIMA model def get_arima_order(ts, p_vector, q_vector, d=0): best_score, best_cfg = float("inf"), None for p in p_vector: for q in q_vector: order = (p, d, q) try: mse = check_arima_model(ts, order) if mse < best_score: best_score, best_cfg = mse, order print('ARIMA%s MSE=%.3f' % (order,mse)) except: continue print('Best ARIMA%s MSE=%.3f' % (best_cfg, best_score)) # let's generate the ranges of values p_vector = range(0, 3) q_vector = range(0, 3) warnings.filterwarnings("ignore") get_arima_order(ts_train.iloc[:,0].values, p_vector, q_vector, d=0) # Build Model model = ARIMA(ts_train.iloc[:,0].values, order=(2, 0, 1)) fitted = model.fit(disp=-1) print(fitted.summary()) # Forecast fc, se, conf = fitted.forecast(len(ts_test), alpha=0.05) # 95% conf # Make as pandas series fc_series = pd.Series(fc, index=ts_test.index) lower_series = pd.Series(conf[:, 0], index=ts_test.index) upper_series = pd.Series(conf[:, 1], index=ts_test.index) # Plot plt.figure() plt.plot(ts_train, label='training') plt.plot(ts_test, label='actual') plt.plot(fc_series, label='forecast') plt.fill_between(lower_series.index, lower_series, upper_series, color='k', alpha=.15) plt.title('Forecast vs Actuals') plt.legend(loc='upper left', fontsize=8) plt.show() # auto arima import pmdarima as pm model = pm.auto_arima(ts_train.iloc[:,0].values, start_p=1, start_q=1, test='adf', max_p=6, max_q=6, m=1,d=None, seasonal=False, # No Seasonality start_P=0, D=0, trace=True, error_action='ignore', suppress_warnings=True, stepwise=True) print(model.summary()) n_periods = len(ts_test) fc, confint = model.predict(n_periods=n_periods, return_conf_int=True) index_of_fc = ts_test.index fc_series = pd.Series(fc, index=index_of_fc) lower_series = pd.Series(confint[:, 0], index=ts_test.index) upper_series = pd.Series(confint[:, 1], index=ts_test.index) # Plot auto arima results plt.figure() plt.plot(ts_train, label='training') plt.plot(ts_test, label='actual') plt.plot(fc_series, label='forecast') plt.fill_between(lower_series.index, lower_series, upper_series, color='k', alpha=.15) plt.title('Forecast vs Actuals') plt.legend(loc='upper left', fontsize=8) plt.show() # seasonality model = pm.auto_arima(ts_train.iloc[:,0].values, start_p=1, start_q=1, test='adf', max_p=6, max_q=6, m=1,d=None, seasonal=True, # Seasonality start_P=0, D=0, trace=True, error_action='ignore', suppress_warnings=True, stepwise=True) print(model.summary()) n_periods = len(ts_test) fc, confint = model.predict(n_periods=n_periods, return_conf_int=True) index_of_fc = ts_test.index fc_series = pd.Series(fc, index=index_of_fc) lower_series = pd.Series(confint[:, 0], index=ts_test.index) upper_series = pd.Series(confint[:, 1], index=ts_test.index) # Plot auto sarima results plt.figure() plt.plot(ts_train, label='training') plt.plot(ts_test, label='actual') plt.plot(fc_series, label='forecast') plt.fill_between(lower_series.index, lower_series, upper_series, color='k', alpha=.15) plt.title('Forecast vs Actuals') plt.legend(loc='upper left', fontsize=8) plt.show() ``` # **My Test and fit :** ``` # Build Model model_arima = ARIMA(ts_train.iloc[:,0].values, order=(2, 0, 1)) fitted = model_arima.fit(disp=-1) print(type(ts_test)) print(len(ts_test)) result, se, conf = fitted.forecast(len(ts_test),alpha=0.05) print(result, " ",se," ", conf) fc_series = pd.Series(result,index=ts_test.index) plt.figure() plt.plot(ts_train, label='training') plt.plot(ts_test, label='actual') plt.plot(fc_series, label='forecast') plt.show() # Build Model model_arima = ARIMA(ts_train.iloc[:,0].values, order=(2, 0, 1)) fitted = model_arima.fit(disp=0) result2 = fitted.forecast(len(ts_test)) fc_series = pd.Series(result2[0],index=ts_test.index) plt.figure() plt.plot(ts_train, label='training') plt.plot(ts_test, label='actual') plt.plot(fc_series, label='forecast') plt.show() ```
github_jupyter
``` import numpy as np from embiggen.embedders.layers import GraphAttention from ensmallen.datasets.linqs import Cora from ensmallen.datasets.linqs.parse_linqs import get_words_data from plot_keras_history import plot_history cora = Cora() features = get_words_data(cora) cora = cora.filter_from_names(node_type_name_to_filter=['Word']).remove_edge_weights().remove_edge_types() features = features.loc[cora.get_node_names()] #cora = cora.generate_new_edges_from_node_features(features.values, neighbours_number=3, max_degree=3) cora nodes_number = cora.get_nodes_number() train_graph, validation_graph = cora.node_label_holdout(0.8, use_stratification=True) A = np.zeros((cora.get_nodes_number(), cora.get_nodes_number()), dtype=float) A[cora.get_source_node_ids(True), cora.get_destination_node_ids(True)] = 1.0 train_node_ids = np.array([ node_id for node_id, node_type in enumerate(train_graph.get_node_type_ids()) if node_type is not None ]) test_node_ids = np.array([ node_id for node_id, node_type in enumerate(validation_graph.get_node_type_ids()) if node_type is not None ]) validation_data = ( test_node_ids, validation_graph.get_one_hot_encoded_node_types()[test_node_ids] ) """Graph Convolutional Neural Network (GCNN) model for graph embedding.""" from typing import Dict, List, Union import pandas as pd import numpy as np from sklearn.utils import shuffle import tensorflow as tf from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau from extra_keras_metrics import get_minimal_multiclass_metrics from tensorflow.keras.layers import Input, GlobalAveragePooling1D, Dense, Attention, Embedding, Reshape, Dropout from tensorflow.keras.initializers import Initializer from tensorflow.keras.regularizers import Regularizer from tensorflow.keras.constraints import Constraint from tqdm.keras import TqdmCallback from tensorflow.keras.models import Model # pylint: disable=import-error from tensorflow.keras.optimizers import \ Optimizer # pylint: disable=import-error from ensmallen import Graph node_ids = Input( shape=(1,), name="NodeIds", dtype=tf.int64, ) const_A = tf.constant(A, dtype=float) const_A = const_A * const_A * const_A all_node_features = tf.constant(features.values, dtype=float) central_node_features = Embedding( weights=[all_node_features], trainable=False, input_length=1, input_dim=features.shape[0], output_dim=features.shape[1] )(node_ids) k = 30 reduced_central_node_features = Dense(k, activation='relu')( Dropout(0.25)(central_node_features) ) reduced_all_node_features = Dense(k, activation='relu')( Dropout(0.25)(all_node_features) ) smax = tf.reshape( tf.nn.softmax( tf.matmul( reduced_central_node_features, tf.transpose(reduced_all_node_features) ) ), [-1, 2708] ) mask = tf.reshape( tf.cast( tf.nn.embedding_lookup(const_A, node_ids), dtype=float ), [-1, 2708] ) att = tf.matmul( smax, # * mask, reduced_all_node_features, ) output = Dense(7, activation="softmax")(att) model = Model( inputs=node_ids, outputs=output, name="GAT" ) model.compile( loss="categorical_crossentropy", optimizer="nadam", weighted_metrics=get_minimal_multiclass_metrics() ) model.summary() history = pd.DataFrame(model.fit( train_node_ids, train_graph.get_one_hot_encoded_node_types()[train_node_ids], validation_data=validation_data, epochs=1024, verbose=False, shuffle=True, batch_size=256, callbacks=[ EarlyStopping( monitor="loss", min_delta=0.001, patience=40, mode="min", ), ReduceLROnPlateau( monitor="loss", min_delta=0.001, patience=10, factor=0.9, mode="min", ), TqdmCallback() ] ).history) _ = plot_history(history) ```
github_jupyter
# Linear State-Space Models Provided are two examples of linear state-space models on which one can perform Bayesian filtering and smoothing in order to obtain a posterior distribution over a latent state trajectory based on noisy observations. In order to understand the theory behind these methods in detail we refer to [1] and [2]. We provide examples for two different types of state-space model: 1. [Linear, Discrete State-Space Model](#1.-Linear-Discrete-State-Space-Model:-Car-Tracking): Car Tracking 2. [Linear, Continuous-Discrete State-Space Model](#2.-Linear-Continuous-Discrete-State-Space-Model:-Ornstein-Uhlenbeck-Process): The Ornstein-Uhlenbeck Process **References**: > [1] Särkkä, Simo, and Solin, Arno. Applied Stochastic Differential Equations. Cambridge University Press, 2019. > > [2] Särkkä, Simo. Bayesian Filtering and Smoothing. Cambridge University Press, 2013. ``` import numpy as np import probnum as pn from probnum import filtsmooth, randvars, statespace np.random.seed(12345) # Make inline plots vector graphics instead of raster graphics %matplotlib inline from IPython.display import set_matplotlib_formats set_matplotlib_formats('pdf', 'svg') # Plotting import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec plt.style.use('../../probnum.mplstyle') ``` ## 1. **Linear Discrete** State-Space Model: Car Tracking --- We begin showcasing the arguably most simple case in which we consider the following state-space model. Consider matrices $A \in \mathbb{R}^{d \times d}$ and $H \in \mathbb{R}^{m \times d}$ where $d$ is the state dimension and $m$ is the dimension of the measurements. Then we define the dynamics and the measurement model as follows: For $k = 1, \dots, K$ and $x_0 \sim \mathcal{N}(\mu_0, \Sigma_0)$: $$ \begin{align} \boldsymbol{x}_k &\sim \mathcal{N}(\boldsymbol{A} \, \boldsymbol{x}_{k-1}, \boldsymbol{Q}) \\ \boldsymbol{y}_k &\sim \mathcal{N}(\boldsymbol{H} \, \boldsymbol{x}_k, \boldsymbol{R}) \end{align} $$ This defines a dynamics model that assumes a state $\boldsymbol{x}_k$ in a **discrete** sequence of states arising from a linear projection of the previous state $x_{k-1}$ corrupted with additive Gaussian noise under a **process noise** covariance matrix $Q$. Similarly, the measurements $\boldsymbol{y}_k$ are assumed to be linear projections of the latent state under additive Gaussian noise according to a **measurement noise** covariance $R$. In the following example we consider projections and covariances that are constant over the state and measurement trajectories (linear time invariant, or **LTI**). Note that this can be generalized to a linear time-varying state-space model, as well. Then $A$ is a function $A: \mathbb{T} \rightarrow \mathbb{R}^{d \times d}$ and $H$ is a function $H: \mathbb{T} \rightarrow \mathbb{R}^{m \times d}$ where $\mathbb{T}$ is the "time dimension". In other words, here, every relationship is linear and every distribution is a Gaussian distribution. Under these simplifying assumptions it is possible to obtain a filtering posterior distribution over the state trajectory $(\boldsymbol{x}_k)_{k=1}^{K}$ by using a **Kalman Filter**. The example is taken from Example 3.6 in [2]. ### Define State-Space Model #### I. Discrete Dynamics Model: Linear, Time-Invariant, Gaussian Transitions ``` state_dim = 4 observation_dim = 2 delta_t = 0.2 # Define linear transition operator dynamics_transition_matrix = np.eye(state_dim) + delta_t * np.diag(np.ones(2), 2) # Define process noise (covariance) matrix process_noise_matrix = ( np.diag(np.array([delta_t ** 3 / 3, delta_t ** 3 / 3, delta_t, delta_t])) + np.diag(np.array([delta_t ** 2 / 2, delta_t ** 2 / 2]), 2) + np.diag(np.array([delta_t ** 2 / 2, delta_t ** 2 / 2]), -2) ) ``` To create a discrete, LTI Gaussian dynamics model, `probnum` provides the `DiscreteLTIGaussian` class that takes - `state_trans_mat` : the linear transition matrix (above: $A$) - `shift_vec` : a force vector for _affine_ transformations of the state (here: zero) - `proc_noise_cov_mat` : the covariance matrix for the Gaussian process noise ``` # Create discrete, Linear Time-Invariant Gaussian dynamics model dynamics_model = statespace.DiscreteLTIGaussian( state_trans_mat=dynamics_transition_matrix, shift_vec=np.zeros(state_dim), proc_noise_cov_mat=process_noise_matrix, ) ``` #### II. Discrete Measurement Model: Linear, Time-Invariant, Gaussian Measurements ``` measurement_marginal_variance = 0.5 measurement_matrix = np.eye(observation_dim, state_dim) measurement_noise_matrix = measurement_marginal_variance * np.eye(observation_dim) measurement_model = statespace.DiscreteLTIGaussian( state_trans_mat=measurement_matrix, shift_vec=np.zeros(observation_dim), proc_noise_cov_mat=measurement_noise_matrix, ) ``` #### III. Initial State Random Variable ``` mu_0 = np.zeros(state_dim) sigma_0 = 0.5 * measurement_marginal_variance * np.eye(state_dim) initial_state_rv = randvars.Normal(mean=mu_0, cov=sigma_0) ``` ### Generate Data for the State-Space Model `statespace.generate_samples()` is used to sample both latent states and noisy observations from the specified state space model. ``` time_grid = np.arange(0., 20., step=delta_t) latent_states, observations = statespace.generate_samples( dynmod=dynamics_model, measmod=measurement_model, initrv=initial_state_rv, times=time_grid, ) ``` ### Kalman Filtering #### I. Kalman Filter ``` kalman_filter = filtsmooth.Kalman( dynamics_model=dynamics_model, measurement_model=measurement_model, initrv=initial_state_rv ) ``` #### II. Perform Kalman Filtering + Rauch-Tung-Striebel Smoothing ``` state_posterior = kalman_filter.filtsmooth( dataset=observations, times=time_grid, ) ``` The method `filtsmooth` returns a `KalmanPosterior` object which provides convenience functions for e.g. sampling and interpolation. We can also extract the just computed posterior smoothing state variables by querying the `.state_rvs` property. This yields a list of Gaussian Random Variables from which we can extract the statistics in order to visualize them. ``` posterior_state_rvs = state_posterior.state_rvs # List of <num_time_points> Normal Random Variables posterior_state_means = posterior_state_rvs.mean # Shape: (num_time_points, state_dim) posterior_state_covs = posterior_state_rvs.cov # Shape: (num_time_points, state_dim, state_dim) ``` ### Visualize Results ``` state_fig = plt.figure() state_fig_gs = gridspec.GridSpec(ncols=2, nrows=2, figure=state_fig) ax_00 = state_fig.add_subplot(state_fig_gs[0, 0]) ax_01 = state_fig.add_subplot(state_fig_gs[0, 1]) ax_10 = state_fig.add_subplot(state_fig_gs[1, 0]) ax_11 = state_fig.add_subplot(state_fig_gs[1, 1]) # Plot means mu_x_1, mu_x_2, mu_x_3, mu_x_4 = [posterior_state_means[:, i] for i in range(state_dim)] ax_00.plot(time_grid, mu_x_1, label="posterior mean"); ax_01.plot(time_grid, mu_x_2); ax_10.plot(time_grid, mu_x_3); ax_11.plot(time_grid, mu_x_4); # Plot marginal standard deviations std_x_1, std_x_2, std_x_3, std_x_4 = [np.sqrt(posterior_state_covs[:, i, i]) for i in range(state_dim)] ax_00.fill_between(time_grid, mu_x_1 - 1.96 * std_x_1, mu_x_1 + 1.96 * std_x_1, alpha=0.2, label="1.96 marginal stddev"); ax_01.fill_between(time_grid, mu_x_2 - 1.96 * std_x_2, mu_x_2 + 1.96 * std_x_2, alpha=0.2); ax_10.fill_between(time_grid, mu_x_3 - 1.96 * std_x_3, mu_x_3 + 1.96 * std_x_3, alpha=0.2); ax_11.fill_between(time_grid, mu_x_4 - 1.96 * std_x_4, mu_x_4 + 1.96 * std_x_4, alpha=0.2); # Plot groundtruth obs_x_1, obs_x_2 = [observations[:, i] for i in range(observation_dim)] ax_00.scatter(time_grid, obs_x_1, marker=".", label="measurements"); ax_01.scatter(time_grid, obs_x_2, marker="."); # Add labels etc. ax_00.set_xlabel("t") ax_01.set_xlabel("t") ax_10.set_xlabel("t") ax_11.set_xlabel("t") ax_00.set_title(r"$x_1$") ax_01.set_title(r"$x_2$") ax_10.set_title(r"$x_3$") ax_11.set_title(r"$x_4$") handles, labels = ax_00.get_legend_handles_labels() state_fig.legend(handles, labels, loc='center left', bbox_to_anchor=(1, 0.5)) state_fig.tight_layout() ``` ## 2. **Linear Continuous-Discrete** State-Space Model: Ornstein-Uhlenbeck Process --- Now, consider we have a look at **continuous** dynamics. We assume that there is a continuous process that defines the dynamics of our latent space from which we collect discrete linear-Gaussian measurements (as above). Only the dynamics model becomes continuous. In particular, we formulate the dynamics as a stochastic process in terms of a linear time-invariant stochastic differential equation (LTISDE). We refer to [1] for more details. Consider matrices $\boldsymbol{F} \in \mathbb{R}^{d \times d}$, $\boldsymbol{L} \in \mathbb{R}^{s \times d}$ and $H \in \mathbb{R}^{m \times d}$ where $d$ is the state dimension and $m$ is the dimension of the measurements. We define the following **continuous-discrete** state-space model: Let $x(t_0) \sim \mathcal{N}(\mu_0, \Sigma_0)$. $$ \begin{align} d\boldsymbol{x} &= \boldsymbol{F} \, \boldsymbol{x} \, dt + \boldsymbol{L} \, d \boldsymbol{\omega} \\ \boldsymbol{y}_k &\sim \mathcal{N}(\boldsymbol{H} \, \boldsymbol{x}(t_k), \boldsymbol{R}), \qquad k = 1, \dots, K \end{align} $$ where $\boldsymbol{\omega} \in \mathbb{R}^s$ denotes a vector of driving forces (often Brownian Motion). Note that this can be generalized to a linear time-varying state-space model, as well. Then $\boldsymbol{F}$ is a function $\mathbb{T} \rightarrow \mathbb{R}^{d \times d}$, $\boldsymbol{L}$ is a function $\mathbb{T} \rightarrow \mathbb{R}^{s \times d}$, and $H$ is a function $\mathbb{T} \rightarrow \mathbb{R}^{m \times d}$ where $\mathbb{T}$ is the "time dimension". In the following example, however, we consider a LTI SDE, namely, the Ornstein-Uhlenbeck Process from which we observe discrete linear Gaussian measurements. ### Define State-Space Model #### I. Continuous Dynamics Model: Linear, Time-Invariant Stochastic Differential Equation (LTISDE) ``` state_dim = 1 observation_dim = 1 delta_t = 0.2 # Define Linear, time-invariant Stochastic Differential Equation that models # the (scalar) Ornstein-Uhlenbeck Process drift_constant = 0.21 dispersion_constant = np.sqrt(0.5) drift = -drift_constant * np.eye(state_dim) force = np.zeros(state_dim) dispersion = dispersion_constant * np.eye(state_dim) ``` The _continuous_ counterpart to the discrete LTI Gaussian model from above is provided via the `LTISDE` class. It is initialized by the state space components - `driftmat` : the drift matrix $\boldsymbol{F}$ - `forcevec` : a force vector that is added to the state (note that this is **not** $\boldsymbol{\omega}$.) Here: zero. - `dispmat` : the dispersion matrix $\boldsymbol{L}$ ``` # Create dynamics model dynamics_model = statespace.LTISDE( driftmat=drift, forcevec=force, dispmat=dispersion, ) ``` #### II. Discrete Measurement Model: Linear, Time-Invariant Gaussian Measurements ``` measurement_marginal_variance = 0.1 measurement_matrix = np.eye(observation_dim, state_dim) measurement_noise_matrix = measurement_marginal_variance * np.eye(observation_dim) ``` As above, the measurement model is discrete, LTI Gaussian. Only the dymanics are continuous (i.e. continuous-discrete). ``` measurement_model = statespace.DiscreteLTIGaussian( state_trans_mat=measurement_matrix, shift_vec=np.zeros(observation_dim), proc_noise_cov_mat=measurement_noise_matrix, ) ``` #### III. Initial State Random Variable ``` mu_0 = 10. * np.ones(state_dim) sigma_0 = np.eye(state_dim) initial_state_rv = randvars.Normal(mean=mu_0, cov=sigma_0) ``` ### Generate Data for the State-Space Model `statespace.generate_samples()` is used to sample both latent states and noisy observations from the specified state space model. ``` time_grid = np.arange(0., 20., step=delta_t) latent_states, observations = statespace.generate_samples( dynmod=dynamics_model, measmod=measurement_model, initrv=initial_state_rv, times=time_grid, ) ``` ### Kalman Filtering In fact, since we still consider a **linear** model, we can apply Kalman Filtering in this case again. According to Section 10 in [1], the moments of the filtering posterior in the continuous-discrete case are solutions to linear differential equations, which `probnum` solves for us when invoking the `<Kalman_object>.filtsmooth(...)` method. #### I. Kalman Filter ``` kalman_filter = filtsmooth.Kalman( dynamics_model=dynamics_model, measurement_model=measurement_model, initrv=initial_state_rv, ) ``` #### II. Perform Kalman Filtering + Rauch-Tung-Striebel Smoothing ``` state_posterior = kalman_filter.filtsmooth( dataset=observations, times=time_grid, ) ``` The method `filtsmooth` returns a `KalmanPosterior` object which provides convenience functions for e.g. sampling and prediction. We can also extract the just computed posterior smoothing state variables by querying the `.state_rvs` property. This yields a list of Gaussian Random Variables from which we can extract the statistics in order to visualize them. ``` posterior_state_rvs = state_posterior.state_rvs # List of <num_time_points> Normal Random Variables posterior_state_means = posterior_state_rvs.mean.squeeze() # Shape: (num_time_points, ) posterior_state_covs = posterior_state_rvs.cov # Shape: (num_time_points, ) ``` ### Visualize Results ``` state_fig = plt.figure() ax = state_fig.add_subplot() # Plot means ax.plot(time_grid, posterior_state_means, label="posterior mean") # Plot marginal standard deviations std_x = np.sqrt(posterior_state_covs).squeeze() ax.fill_between( time_grid, posterior_state_means - 1.96 * std_x, posterior_state_means + 1.96 * std_x, alpha=0.2, label="1.96 marginal stddev", ) ax.scatter(time_grid, observations, marker=".", label="measurements") # Add labels etc. ax.set_xlabel("t") ax.set_title(r"$x$") ax.legend() state_fig.tight_layout() ```
github_jupyter
# Text Preprocessing In the previous section we discussed some properties that make language unique. The key is that the number of tokens (aka words) is large and very unevenly distributed. Hence, a naive multiclass classification approach to predict the next symbol doesn't always work very well. Moreover, we need to turn text into a format that we can optimize over, i.e. we need to map it to vectors. At its extreme we have two alternatives. One is to treat each word as a unique entity, e.g. `Salton.Wong.Yang.1975`. The problem with this strategy is that we might well have to deal with 100,000 to 1,000,000 vectors for very large and diverse corpora. At the other extreme lies the strategy to predict one character at a time, as suggested e.g. by [Ling et al., 2015](https://arxiv.org/pdf/1508.02096.pdf). A good balance in between both strategies is [byte-pair encoding](https://en.wikipedia.org/wiki/Byte_pair_encoding), as described by [Sennrich, Haddow and Birch, 2015](https://arxiv.org/abs/1508.07909) for the purpose of neural machine translation. It decomposes text into syllable-like fragments that occur frequently. This allows for models that are able to generate words like `heteroscedastic` or `pentagram` based on previously viewed words, e.g. `heterogeneous`, `homoscedastic`, `diagram`, and `pentagon`. Going into details of these models is beyond the scope of the current chapter. We will address this later when discussing natural language processing (`chapter_nlp`) in much more detail. Suffice it to say that it can contribute significantly to the accuracy of natural language processing models. For the sake of simplicity we will limit ourselves to pure character sequences. We use H.G. Wells' *The Timemachine* as before. We begin by filtering the text and convert it into a a sequence of character IDs. ## Data Loading We begin, as before, by loading the data and by mapping it into a sequence of whitespaces, punctuation signs and regular characters. Preprocessing is minimal and we limit ourselves to removing multiple whitespaces. ``` import sys sys.path.insert(0, '..') import torch import random import collections with open('../data/timemachine.txt', 'r') as f: raw_text = f.read() print(raw_text[0:110]) ``` ## Tokenization Next we need to split the dataset, a string, into tokens. A token is a data point the model will train and predict. We common use a word or a character as a token. ``` lines = raw_text.split('\n') text = ' '.join(' '.join(lines).lower().split()) print('# of chars:', len(text)) print(text[0:70]) ``` ## Vocabulary Then we need to map tokens into numerical indices. We often call it a vocabulary. Its input is a list of tokens, called a corpus. Then it counts the frequency of each token in this corpus, and then assigns an numerical index to each token according to its frequency. Rarely appeared tokens are often removed to reduce the complexity. A token doesn't exist in corpus or has been removed is mapped into a special unknown (“&lt;unk&gt;”) token. We optionally add another three special tokens: “&lt;pad&gt;” a token for padding, “&lt;bos&gt;” to present the beginning for a sentence, and “&lt;eos&gt;” for the ending of a sentence. ``` class Vocab(object): # This class is saved in d2l. def __init__(self, tokens, min_freq=0, use_special_tokens=False): # sort by frequency and token counter = collections.Counter(tokens) token_freqs = sorted(counter.items(), key=lambda x: x[0]) token_freqs.sort(key=lambda x: x[1], reverse=True) if use_special_tokens: # padding, begin of sentence, end of sentence, unknown self.pad, self.bos, self.eos, self.unk = (0, 1, 2, 3) tokens = ['<pad>', '<bos>', '<eos>', '<unk>'] else: self.unk = 0 tokens = ['<unk>'] tokens += [token for token, freq in token_freqs if freq >= min_freq] self.idx_to_token = [] self.token_to_idx = dict() for token in tokens: self.idx_to_token.append(token) self.token_to_idx[token] = len(self.idx_to_token) - 1 def __len__(self): return len(self.idx_to_token) def __getitem__(self, tokens): if not isinstance(tokens, (list, tuple)): return self.token_to_idx.get(tokens, self.unk) else: return [self.__getitem__(token) for token in tokens] def to_tokens(self, indices): if not isinstance(indices, (list, tuple)): return self.idx_to_token[indices] else: return [self.idx_to_token[index] for index in indices] ``` We construct a vocabulary with the time machine dataset as the corpus, and then print the map between tokens to indices. ``` vocab = Vocab(text) print(vocab.token_to_idx) ``` After that, each character in the training data set is converted into an index ID. To illustrate things we print the first 20 characters and their corresponding indices. ``` corpus_indices = [vocab[char] for char in text] sample = corpus_indices[:15] print('chars:', [vocab.idx_to_token[idx] for idx in sample]) print('indices:', sample) ``` We packaged the above code in the `(corpus_indices, vocab) = load_data_time_machine()` function of the `d2l` package to make it easier to call it in later chapters. ## Training Data Preparation During training, we need to read mini-batches of examples and labels at random. Since sequence data is by its very nature sequential, we need to address the issue of processing it. We did so in a rather ad-hoc manner when we introduced in `chapter_sequence`. Let's formalize this a bit. Consider the beginning of the book we just processed. If we want to split it up into sequences of 5 symbols each, we have quite some freedom since we could pick an arbitrary offset. ``` from IPython.display import SVG SVG(filename= '../img/timemachine-5gram.svg') ``` Fig: Different offsets lead to different subsequences when splitting up text. In fact, any one of these offsets is fine. Hence, which one should we pick? In fact, all of them are equally good. But if we pick all offsets we end up with rather redundant data due to overlap, particularly if the sequences are long. Picking just a random set of initial positions is no good either since it does not guarantee uniform coverage of the array. For instance, if we pick $n$ elements at random out of a set of $n$ with random replacement, the probability for a particular element not being picked is $(1-1/n)^n \to e^{-1}$. This means that we cannot expect uniform coverage this way. Even randomly permuting a set of all offsets does not offer good guarantees. Instead we can use a simple trick to get both *coverage* and *randomness*: use a random offset, after which one uses the terms sequentially. We describe how to accomplish this for both random sampling and sequential partitioning strategies below. ### Random sampling The following code randomly generates a minibatch from the data each time. Here, the batch size `batch_size` indicates to the number of examples in each mini-batch and `num_steps` is the length of the sequence (or time steps if we have a time series) included in each example. In random sampling, each example is a sequence arbitrarily captured on the original sequence. The positions of two adjacent random mini-batches on the original sequence are not necessarily adjacent. The target is to predict the next character based on what we've seen so far, hence the labels are the original sequence, shifted by one character. Note that this is not recommended for latent variable models, since we do not have access to the hidden state *prior* to seeing the sequence. We packaged the above code in the `load_data_time_machine` function of the `d2l` package to make it easier to call it in later chapters. It returns four variables: `corpus_indices`, `char_to_idx`, `idx_to_char`, and `vocab_size`. ``` # This function is saved in the d2l package for future use def data_iter_random(corpus_indices, batch_size, num_steps, ctx=None): # Offset for the iterator over the data for uniform starts offset = int(random.uniform(0,num_steps)) corpus_indices = corpus_indices[offset:] # Subtract 1 extra since we need to account for the sequence length num_examples = ((len(corpus_indices) - 1) // num_steps) - 1 # Discard half empty batches num_batches = num_examples // batch_size example_indices = list(range(0, num_examples * num_steps, num_steps)) random.shuffle(example_indices) # This returns a sequence of the length num_steps starting from pos def _data(pos): return corpus_indices[pos: pos + num_steps] for i in range(0, batch_size * num_batches, batch_size): # Batch_size indicates the random examples read each time batch_indices = example_indices[i:(i+batch_size)] X = [_data(j) for j in batch_indices] Y = [_data(j + 1) for j in batch_indices] yield torch.Tensor(X, device=ctx), torch.Tensor(Y, device=ctx) ``` Let us generate an artificial sequence from 0 to 30. We assume that the batch size and numbers of time steps are 2 and 5 respectively. This means that depending on the offset we can generate between 4 and 5 $(x,y)$ pairs. With a minibatch size of 2 we only get 2 minibatches. ``` my_seq = list(range(30)) for X, Y in data_iter_random(my_seq, batch_size=2, num_steps=5): print('X: ', X, '\nY:', Y) ``` ### Sequential partitioning In addition to random sampling of the original sequence, we can also make the positions of two adjacent random mini-batches adjacent in the original sequence. Now, we can use a hidden state of the last time step of a mini-batch to initialize the hidden state of the next mini-batch, so that the output of the next mini-batch is also dependent on the input of the mini-batch, with this pattern continuing in subsequent mini-batches. This has two effects on the implementation of a recurrent neural network. On the one hand, when training the model, we only need to initialize the hidden state at the beginning of each epoch. On the other hand, when multiple adjacent mini-batches are concatenated by passing hidden states, the gradient calculation of the model parameters will depend on all the mini-batch sequences that are concatenated. In the same epoch as the number of iterations increases, the costs of gradient calculation rise. So that the model parameter gradient calculations only depend on the mini-batch sequence read by one iteration, we can separate the hidden state from the computational graph before reading the mini-batch (this can be done by detaching the graph). We will gain a deeper understand this approach in the following sections. ``` # This function is saved in the d2l package for future use def data_iter_consecutive(corpus_indices, batch_size, num_steps, ctx=None): # Offset for the iterator over the data for uniform starts offset = int(random.uniform(0,num_steps)) # Slice out data - ignore num_steps and just wrap around num_indices = ((len(corpus_indices) - offset) // batch_size) * batch_size indices = torch.Tensor(corpus_indices[offset:(offset + num_indices)], device=ctx) indices = indices.reshape((batch_size,-1)) # Need to leave one last token since targets are shifted by 1 num_epochs = ((num_indices // batch_size) - 1) // num_steps for i in range(0, num_epochs * num_steps, num_steps): X = indices[:,i:(i+num_steps)] Y = indices[:,(i+1):(i+1+num_steps)] yield X, Y ``` Using the same settings, print input `X` and label `Y` for each mini-batch of examples read by random sampling. The positions of two adjacent random mini-batches on the original sequence are adjacent. ``` for X, Y in data_iter_consecutive(my_seq, batch_size=2, num_steps=6): print('X: ', X, '\nY:', Y) ``` Sequential partitioning decomposes the sequence into `batch_size` many strips of data which are traversed as we iterate over minibatches. Note that the $i$-th element in a minibatch matches with the $i$-th element of the next minibatch rather than within a minibatch. ## Summary * Documents are preprocessed by tokenizing the words and mapping them into IDs. There are multiple methods: * Character encoding which uses individual characters (good e.g. for Chinese) * Word encoding (good e.g. for English) * Byte-pair encoding (good for languages that have lots of morphology, e.g. German) * The main choices for sequence partitioning are whether we pick consecutive or random sequences. In particular for recurrent networks the former is critical. * Given the overall document length, it is usually acceptable to be slightly wasteful with the documents and discard half-empty minibatches. ## Exercises 1. Which other other mini-batch data sampling methods can you think of? 1. Why is it a good idea to have a random offset? * Does it really lead to a perfectly uniform distribution over the sequences on the document? * What would you have to do to make things even more uniform? 1. If we want a sequence example to be a complete sentence, what kinds of problems does this introduce in mini-batch sampling? Why would we want to do this anyway?
github_jupyter
# The rightful Data Science workflow ## Learn the complete Data Science pipeline while building a real world capstone project. # PREDICTING STUDENTS PERFORMANCE WITH LINEAR REGRESSION The statistics of the number of data science content on the web. What makes this unique. Table of content. i. Introducing the problem statement, the objectives and the various libraries to be used for the project. ii. Understanding the project through Exploratory Data Analysis. iii. Data preprocessing and feature engineering. iv. Machine learning section; predictive modelling. ## 1. Introduction Linear Regression is unique in that it belongs to the field of Mathematics but it also adapts well to machine learning. With Linear Regression, we build a model that combines specific set of input numeric values, and its solution being the predicted numeric output. Because we deal with Numeric values, this helps in problems solving scale, number, statistical probability, grades of which student performance evaluation is an essential need. ### Objectives **The main goal of this project is to use Linear Regression, a machine learning algorithm to develop a model that will predict the grade of a student based on a given student attributes.** we will begin by importing various python libraries that will help us in our data explorations. **Data wrangling libraries** 1. `Pandas` is a fast, powerful, flexible and easy to use open source data analysis and manipulation tool, built on top of the Python programming language.[Read more about pandas](https://pandas.pydata.org/) 2. `Numpy` is one of the most powerful open source scientific computational tool used by scientist, statisticians and other people in the quantitative field. [You can read about numpy ease-of-use and its magic here](https://numpy.org) **Data Visualization libraries** 3. `Matplotlib` is a comprehensive library for creating static, animated, and interactive visualizations in Python.[Read about matplotlib here](https://matplotlib.org/) 4. `Seaborn` is a Python data visualization library based on matplotlib. It provides a high-level interface for drawing attractive and informative statistical graphics.[Read more about seaborn](https://seaborn.pydata.org/) **Model creation and evaluation libraries** * These libraries with it's operations would be explained into details as we move along the project. ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline import matplotlib.gridspec as gridspec import seaborn as sns sns.set_style('darkgrid') plt.style.use('fivethirtyeight') # for data preprocessing from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import StandardScaler from statsmodels.stats.outliers_influence import variance_inflation_factor from scipy import stats # library for model creation from sklearn.linear_model import LinearRegression,SGDRegressor from sklearn.linear_model import Lasso from sklearn.dummy import DummyRegressor from sklearn.model_selection import train_test_split,GridSearchCV # model evaluation from sklearn.metrics import mean_squared_error,r2_score # silent warnings import warnings warnings.filterwarnings('ignore') ``` The script will add borderlines to the dataframes ``` %%HTML <style type='text/css'> table.dataframe td,table.dataframe th{ border: 1px solid black !important; color: solid black !important } </style> ``` ## 2. Exploratory data analysis We will first of all, have to understand the data and explore how various attributes of each student may directly or indirectly affects his/her score. The data set is in the `data` subdirectory folder. We will use the pandas `read` method to read both data sets and pass `;` for the `sep` parameter because each column is separated by a semi-colons. ``` # read the datasets stu_mat = pd.read_csv('data/student-mat.csv',sep=';') stu_por = pd.read_csv('data/student-por.csv',sep=';') ``` Lets preview the first five of both datasets. * we have two different datasets `maths course` and `portuguese language course` each having the same attributes. * For simplicity sake, we will combine and analyze both datasets together with the help of pandas `concatenate` method. ``` stu_mat.head() stu_por.head() # concatenate both datasets comb_df = pd.concat([stu_mat,stu_por],axis=0) ``` ### Description of the dataset * There are $1044$ of data entries with only two distinct schools. * The percentage of female to male is 53% and 47% respectively * The average age of a student is $17$ years with 15 years minimum and 22 years maximum. * A student may averagely,absent hiself/herself twice from school. * The total number of features are 33 while 16 is integer type and 17 string type ``` comb_df.describe(include='all').T # comb_df['school'] # comb_df.info() ``` Let's combine the two datasets and remove duplicate rows by checking it with some list of attributes. * There are 382 duplicate rows. * Now, lets use the pandas `duplicated` method to remove duplicates from the data. * The `~` sign simply means the opposite of whatever outcome. * In this context, the sign is used to negate the outcome of boolean values. **NB:** Since we have combined and got ridden of the duplicate rows, we can now role-in with the exploratory analysis by doing visualization. ``` # list of students attributes sim_attr = ["school","sex","age","address","famsize","Pstatus","Medu","Fedu","Mjob","Fjob","reason","nursery","internet"] print('We have {} duplicate rows in the dataset'.format(comb_df[comb_df.duplicated(sim_attr,keep='first')].shape[0])) # Lets drop the duplicates and maintain its first occurance df = comb_df[~comb_df.duplicated(sim_attr,keep='first')] # The shape of the data after dropping duplicates df.shape ``` ### Data Visualization #### Let check how `sex` and `age` affects the student's grade. - In average, male students with the age less than or equal to $19$ years perform better than those more than $20$ years. - Female students with the age more than $19$ performs far better than those less than or equal to $19$ which is directly opposite to their male mates. ###### Therefore we can say that, the older the age of a female student, the better her performance and the otherwise to male students. ``` # defining matplotlib figure plt.figure(figsize=(12,6),dpi=80) # this is matplotlib colormaps you can get your prefered color from matplotlib.cm.get_cmap color = plt.cm.Purples_r(np.linspace(0,1,4)) # We use seaborn to create a barplot with x and y axis being age and target variable respectively ax = sns.barplot(x='age',y='G3',data=df,hue='sex',ci=None,palette=color) # removing the spines from the plot ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) # setting x and y labels ax.set_ylabel('Final grade') ax.set_title('Age and Sex vs Final grade') # renaming the default legend plt.legend(labels=('Female','Male'),loc=1) plt.savefig('age_and_sex.png') ``` #### Can student relationship status affects his/her grade? * It observed that both genders with no romantic lives perform much better that those in relationship * Averagely, Male students perform slightly better than female students in both the romantic status ``` fig,ax = plt.subplots(figsize=(12,6)) pl = sns.barplot('romantic','G3',data=df,hue='sex',ci=None,palette=plt.cm.summer(np.linspace(0, 1, 3))) ax.set_title('Relationship Status VS Final Grade') ax.set_ylabel('Final Grade'); ax.legend(labels=('Female','Male'),bbox_to_anchor=(0.1, 1.05, 0.9, 0)) plt.savefig('relationship.png') ``` #### Is there any impact parental educational level have on students performance. * Upon visualizing both the Father and Mother educational level, it turned out consistently that, parents with no educational background and those with higher educational background have the best performing students. * The rest of the students having parents in the other categories performs similarly and lower. > A study on the Correlation Between Parents’ Education Level and Children’s Success published on the [lamar university website](https://degree.lamar.edu/articles/undergraduate/parents-education-level-and-childrens-success/#:~:text=But%20parents%20influence%20their%20children,high%20value%20on%20educational%20attainment.) shows how significant parents educational level affects their wards performance. The findings suggest that people whose parents did not hold a degree who entered the workforce straight out of high school were more likely to believe that a college degree was not worth the cost or that they did not need further education to pursue their desired career. Parents who didn't get the privilege to attend high school finds it neccessary to encourage their children to go to school and motives them to go extra miles on what they couldn't do. ``` fig,(ax1,ax2) = plt.subplots(nrows=2,ncols=1,figsize=(12,6)) sns.barplot('Medu','G3',data=df,ci=None,palette=plt.cm.Blues(np.linspace(0, 1, 6)),ax=ax1) sns.barplot('Fedu','G3',data=df,ci=None,palette=plt.cm.Blues(np.linspace(0, 1, 6)),ax=ax2) x_labels = ['None','Primary School','5th to 9th grade','Secondary school','Higher Education'] ax1.set_title("Mother's educational level effects on student grade") ax1.set_ylabel('Final Grade') ax1.set_xlabel('Mother Education') ax1.set_xticklabels(x_labels) ax2.set_title("Father's educational level impact on student grade") ax2.set_ylabel('Final Grade') ax2.set_xlabel('Father Education') ax2.set_xticklabels(x_labels) plt.tight_layout(); plt.savefig('parent.png') ``` #### How does financial needs of a student affects his/her grade? * The simple heatmap below shows that student with both family supports and school support tend to perform lower than all the other categories. Kirabo Jackson, a professor at Northwestern University, has [published several studies](https://www.carolinajournal.com/news-article/does-more-money-for-education-lead-to-better-student-performance/) showing a connection between higher spending and improved education outcomes. His research has shown additional funding to low-wealth school districts can make a difference in student performance. Wealthier school districts don’t benefit as much with an extra influx of cash. * It is also surprising that, students with no financial support performs extremely better securing the second highest position on the map. This may be as a results of other non-physical factors such as commitment and determination to make a difference. > We can conclude that lack of finance in this case is not a major contributor to students performance. **map** i.Define a matplotlib figure ii. Use the `pivot_table` method to create an excel style pivot table. iii. Plot the heatmap and apply the necessary arguments. iv. Set the various labels ``` plt.figure(figsize=(12,6)) sup_corr = pd.pivot_table(df,values='G3',index='famsup',columns='schoolsup',aggfunc='mean') # Plot a heatmap ax = sns.heatmap(sup_corr,annot=True,center=0,cbar=False,cmap='nipy_spectral',fmt='f',linewidths=0.5) # add title to the plot plt.title('Correlation of Family support and School support heatmap',pad=20,fontweight='bold') #add axis labels ax.set_xlabel('School Support') ax.set_ylabel=('Family Support'); plt.savefig('family.png') ``` # Section Two: Data Preprocessing and Feature Engineering. Feature engineering is the process of transforming raw data into features that better represent the underlying problem to the predictive models, resulting in improved model accuracy on unseen data. #### Why feature engineering ##### Better features means flexibility. You can choose “the wrong models” (less than optimal) and still get good results. Most models can pick up on good structure in data. The flexibility of good features will allow you to use less complex models that are faster to run, easier to understand and easier to maintain. This is very desirable. ##### Better features means simpler models. With well engineered features, you can choose “the wrong parameters” (less than optimal) and still get good results, for much the same reasons. You do not need to work as hard to pick the right models and the most optimized parameters. With good features, you are closer to the underlying problem and a representation of all the data you have available and could use to best characterize that underlying problem. ##### Better features means better results. The algorithms we used are very standard for Kagglers.We spent most of our efforts in feature engineering. Xavier Conort, on “Q&A with Xavier Conort” on winning the Flight Quest challenge on Kaggle. Lets check the shape of the dataframe. ``` df.shape ``` ### *Lets begin with binarization,descritization and normalization of data set* ### `age feature` * We will then create `age_bin` feature by grouping the age into `teenager`,`youg_adult` and `adult`. * we will create `vote_age` a binary feature showing wheather student is elligible to vote or not. * We will then create `is_teenager` feature. This binary feature shows either student is a teenager or not. The `cut` method categorizes the continous values into a given range. ``` # break the age into categories according to a given range df.loc[:,'age_bin'] = pd.cut(x=df['age'],bins=[-np.inf,18,20,np.inf],labels=['teenager','young_adult','adult']) # Lets visualize the effect of the categorized age on the target variable sns.violinplot('age_bin','G3',data=df); plt.savefig('age.png') # Create a binary feature making true for those greater or equal to >= 18 and false the otherwise df.loc[:,'can_vote'] = (df['age']>=18).astype(int) sns.barplot('can_vote','G3',data=df); # You are teenager if your age is less than or equal to 19 df.loc[:,'is_teenager'] = (df['age']<=19).astype(int) # sns.barplot('is_teenager','G3',data=df); plt.savefig('age2.png') ``` ### `Medu`and `Fedu` feature numeric: 0 - none 1 - primary education (4th grade) 2 – 5th to 9th grade 3 – secondary education 4 – higher education) * We will derive a feature called `higher_edu` to contain boolean of either the parent attended secondary education or not. * The higher the parent education, the higher their ward grade our observation. ``` # Create the two features df.loc[:,'higher_fedu'] = (df['Fedu']>2).astype(int) df.loc[:,'higher_medu'] = (df['Medu']>2).astype(int) # Lets visualize the two newly created features against the target variable fig,ax = plt.subplots(1,2,figsize=(12,6)) sns.barplot('higher_fedu','G3',data=df,ax=ax[0]) sns.barplot('higher_medu','G3',data=df,ax=ax[1]) plt.savefig('medu.png',bbox_inches='tight') ``` ### `Fjob` and `Mjob` feature * Create the `Fjob_cat` feature combining the `at_home`,`teacher`,`health` into one category * We will leave the `Mjob` as-is ``` # df['Fjob'].value_counts().plot(kind='bar'); # use lambda expression to create category of three;'other','services' and 'employee' df.loc[:,'Fjob_cat'] = df['Fjob'].apply(lambda x: x if (x == 'other' or x == 'services') else 'employee') ``` `guardian` feature * We will create `has_parent`; a boolean feature of student having or not having a parent. * We could see that those having their parents as their guardians performs much better than those without. ``` # creating their feature df.loc[:,'has_parent'] = df['guardian'].apply(lambda x: True if (x=='mother' or x =='father') else False).astype(int) # plot sns.boxplot('has_parent','G3',data=df); plt.savefig('guardian.png',bbox_inches='tight') ``` ### `absence` feature *We will use discretization to form a new feature named `absent_cat`* *We will create `absented` feature which will be a binary feature representing either a student once absented or not.* ``` # Create categories of absence df.loc[:,'absent_cat'] = pd.cut(x=df['absences'],bins=[-np.inf,0,5,10,np.inf], labels=['no_abs','low','moderate','high'],right=True) # create the binary feature df.loc[:,'absented'] = (df['absences']>0).astype(int) ``` ### Label encoding Ordinal-Categorical variables Label Encoding is a popular encoding technique for handling categorical variables. In this technique, each label is assigned a unique integer based on alphabetical ordering ``` # list of features to label encode label_encode_feats = ['famsize','studytime','traveltime','freetime','age_bin','absent_cat'] # initialize label encoder le = LabelEncoder() for label in label_encode_feats: df[label] = le.fit_transform(df[label]) ``` ### One-hot-encoding of non-ordinal category Encode categorical features as a one-hot numeric array. The input to this transformer should be an array-like of integers or strings, denoting the values taken on by categorical (discrete) features. The features are encoded using a one-hot (aka ‘one-of-K’ or ‘dummy’) encoding scheme. This creates a binary column for each category and returns a sparse matrix or dense array (depending on the sparse parameter) ``` # Select categorical non-ordinal features to one-hot-encode def select_label(label): return label not in (label_encode_feats + ['absences','age','G1','G2','G3']) # return selected features filtered = [i for i in filter(select_label,df.columns.tolist())] # function to transform data into one-hot-encodings def get_hot_encode(df): encoder_list = list() # iterate through the column labels for label in filtered: encoder_list.append(pd.get_dummies(df[label],prefix=label,drop_first=True)) return encoder_list # initialize and transform the dataset with the function hot_encoded = get_hot_encode(df) df = pd.concat([df,*hot_encoded],axis=1).drop(filtered,axis=1) ``` ## Continuous Variables ### Handling skewness and outliers in countinous variables In statistics, an outlier is an observation point that is distant from other observations. We wouldn't want to have outliers in our dataset since that have significant effects on the model performance. So we will first of all, visualize the `continous variables` then we will decide either to cap it or remove it with statistical method. * Some data beyond the age $21$ appears to be an outlier. * The distribution of the number of absences is positively skewed. * On the bottom, plot between G1 and G2 has outlier grades. ``` # use gridspec to partition the figure into subplots plt.figure(figsize=(15,10)) gspec = gridspec.GridSpec(4, 4) # defining the axes top_left = plt.subplot(gspec[:2, :2]) top_right = plt.subplot(gspec[:2, 2:]) lower_middle = plt.subplot(gspec[2:, 1:3]) # plot maps top_left.boxplot(df['age'],vert=False) top_left.set_xlabel('age') top_right.hist(df['absences'],bins='auto') top_right.set_xlabel('absences') top_right.set_ylabel('Frequency') lower_middle.scatter('G1','G2',data=df) lower_middle.set_xlabel('G1') lower_middle.set_ylabel('G2') plt.legend(loc='best') plt.tight_layout() plt.savefig('skewness.png',bbox_inches='tight') ``` <!-- #### Handling skewness and outliers In statistics, an outlier is an observation point that is distant from other observations. We wouldn't want to have outliers in our dataset since that have significant effects on the model performance. So we will first of all, visualize the `continous variables` then we validate it with some statiscal measurements. * Some data beyond the age $21$ appears to be an outlier. * The distribution of the number of absences is positively skewed. * On the bottom, plot between G1 and G2 has outlier grades. As mentioned earlier, we probably should verify this by taking some measurements. Two good metrics we can use are the kurtosis and skew. where kurtosis measure the height of our distribution and skew measures whether it is positively or negatively skewed. We will use the `scipy.stats` module to do the measurements. *** **Kurtosis** - Any distribution with **kurtosis ≈3 (excess ≈0)** is called mesokurtic. This is a normal distribution - Any distribution with **kurtosis <3 (excess kurtosis <0)** is called platykurtic. Tails are shorter and thinner, and often its central peak is lower and broader. - Any distribution with **kurtosis >3 (excess kurtosis >0)** is called leptokurtic. Tails are longer and fatter, and often its central peak is higher and sharper. *** **Skewness** - If skewness is **less than −1 or greater than +1**, the distribution is highly skewed. - If skewness is **between −1 and −½ or between +½ and +1**, the distribution is moderately skewed. - If skewness is **between −½ and +½**, the distribution is approximately symmetric. --> It is either we use Z-score function defined in scipy library to detect the outliers or scale our data with `Standardization`. *The Z-score is the signed number of standard deviations by which the value of an observation or data point is above the mean value of what is being observed or measured* The intuition behind Z-score is to describe any data point by finding their relationship with the Standard Deviation and Mean of the group of data points. Z-score is finding the distribution of data where mean is 0 and standard deviation is 1 i.e. normal distribution. In most of the cases a threshold of 3 or -3 is used i.e if the Z-score value is greater than or less than 3 or -3 respectively, that data point will be identified as outliers. Standardization Standardization (or z-score normalization) scales the values while taking into account standard deviation. If the standard deviation of features is different, their range also would differ from each other. This reduces the effect of the outliers in the features. **We will use the standard scaling at the model creation time** ## Checking for Multicollinearity Multicollinearity is the correlation between independent variables. It is considered as disturbance in the data, if present will weaken the statistical power of the regression model. The Variance Inflation Factor (VIF) is a measure of collinearity among predictor variables within a multiple regression. It is calculated by taking the the ratio of the variance of all a given model’s betas divide by the variance of a single beta if it were fit alone. `V.I.F. = 1 / (1 – R^2)` * VIF value <= 4 suggests no multicollinearity * VIF value of >= 10 implies serious multicollinearity ``` df.values # create a dataframe vif = pd.DataFrame() vif['vif'] = [variance_inflation_factor(df.values,i) for i in range(df.shape[1])] vif['feature'] = df.columns vif_filter_columns = (vif[vif['vif']>=10].feature.values).tolist() df = df.drop(vif_filter_columns,axis=1) # get the copy of preprocessed dataframe prep_df = df.copy() # save the preprocessed version to file prep_df.drop('G3',axis=1).to_csv('data/prep_df.csv',index=False) # covMat = np.array(covMat, dtype=float) prep_df ``` # 3. MODEL CREATION Finally, we have come to the model creation section. * We will split the data into `train` and `test` using the `train_test_split` method from the scikit-learn library. >**Sklearn (or Scikit-learn):** Is a Python library that offers various features for data processing that can be used for classification, clustering, and model selection. >**Train_test_split:** Is a function in Sklearn model selection for splitting data arrays into two subsets: for training data and for testing data. ### Train_test_split parameters explained **X, y:** The first parameter is the dataset you're selecting to use. **train_size:** This parameter sets the size of the training dataset. **test_size:** This parameter specifies the size of the testing dataset. **random_state:** The default mode performs a random split using np.random. Alternatively, you can add an integer using an exact number. ``` # drop the target variable X = prep_df.drop('G3',axis=1) # get target y = prep_df['G3'] # calling the splitting function X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=31) ``` ### Scaling the data This takes into account the standard deviation hence reducing the effect of outliers ``` # initialize the function scaler = StandardScaler() # apply the transformation X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) ``` ### Linear regression Linear regression uses traditional slope-intercept form, where m and b are the variables our algorithm will try to “learn” to produce the most accurate predictions. ### `y = wx + b` `x` represents our input data `y` represents our prediction. `w` is the slope of the line `b` is the intercept, that is the value of y when `x = 0` ### *Multivariable regression* A multi-variable linear regression might look like this: > ### `f(x,y,z)=w1x + w2y + w3z` where `w` represents the coefficients, or weights, our model will try to learn. The variables `x,y,z` represent the attributes, or distinct pieces of information, we have about each observation. ### *Derivation* $ y = w x + b $ y = $ ( y^1 y^2 ... y^n )$ y = $ (w_1x_1^i + w_2x_2^i + w_nx_n^i + b) $ ``` # Check shapes print('X_train shape = {} and y_train shape = {}'.format(X_train.shape,y_train.shape)) print('X_test shape = {} and y_test shape = {}'.format(X_test.shape,y_test.shape)) # initialize regression = LinearRegression() # fit to data lin_reg = regression.fit(X_train,y_train) # generate prediction prediction = lin_reg.predict(X_test) # use the DummyRegressor as baseline in comparism to the real regressor lm_dummy_mean = DummyRegressor(strategy = 'mean').fit(X_train, y_train) y_predict_dummy_mean = lm_dummy_mean.predict(X_test) print('\n\n Dummy train score ',lm_dummy_mean.score(X_train,y_train)) print(' Dummy test score ',lm_dummy_mean.score(X_test,y_test)) print('\n\ntrain score ',lin_reg.score(X_train,y_train)) print('test score ',lin_reg.score(X_test,y_test)) len(lin_reg.coef_) X.columns.tolist() fig,ax = plt.subplots(figsize=(14,6)) sns.barplot(X.columns.tolist(),lin_reg.coef_) plt.xticks(rotation=90) plt.ylabel('Feature Coefficient') plt.xlabel('Feature names') # plt.savefig('feature_imp.png') plt.savefig('feature_imp.png',bbox_inches='tight') ``` # Model Evaluation ### `mean_squared_error` `MSE` measures the average squared difference between an observation’s actual and predicted values. The output is a single number representing the cost, or score, associated with our current set of weights. ``` print('test mse ',mean_squared_error(y_test,prediction)) print('\n\n dummy test mse ',mean_squared_error(y_test,y_predict_dummy_mean)) ``` ### `R-Squared` The R-Squared metric provides us a way to measure the goodness of fit or how well our data fits the model. The higher the R-Squared metric, the better the data fit our model. ``` print('dummy test r2_score',r2_score(y_test,y_predict_dummy_mean)) print('test r2_score',r2_score(y_test,prediction)) # lin_reg.predict(X_train) # print('dummy test r2_score',mean_squared_error(y_train,lin_reg.predict(X_train))) ``` ## Making predictions ``` # create a dataframe for ground truth and prediction pred_df = pd.DataFrame([prediction,np.round(prediction),y_test], index=['prediction','round_pred','ground_truth']) # pd.crosstab(pred_df['ground_truth'],pred_df['prediction']) pred_df2 = pred_df.T.groupby('ground_truth').mean().reset_index() pred_df2['prediction'] = pred_df2.apply(lambda x:np.round(x['prediction']),axis=1,result_type='expand') pred_df2 = pred_df2.astype(int) width = 0.4 fig,ax = plt.subplots(figsize=(15,6)) x_axis = np.arange(len(pred_df2['ground_truth'])) bar1 = plt.bar(x_axis - (width/2),pred_df2['ground_truth'],width,label='Target Variable') bar2 = plt.bar(x_axis + (width/2),pred_df2['prediction'],width,label='Predictions') ax.set_xticks(x_axis) ax.set_xticklabels(pred_df2['ground_truth']) ax.set_xlabel('Grades') ax.set_ylabel('Average Grade') ax.set_title('Students Grade vs Average grade prediction and average target value') # removing the spines from the plot ax.spines['left'].set_visible(False) ax.legend(title='90% accuracy score') def labeller(bars): for bar in bars: height = bar.get_height() ax.annotate('{}'.format(height),xy=(bar.get_x() + bar.get_width() / 2, height), xytext=(0, 3),textcoords="offset points",ha='center', va='bottom') labeller(bar1) labeller(bar2) fig.tight_layout() plt.savefig('prediction_vs_gt',dpi=80) ``` ## Plotting `ground_truth` vs `prediction` ``` fig,ax = plt.subplots(figsize=(12,6)) sns.regplot(pred_df.loc['ground_truth',:],pred_df.loc['prediction',:],data=pred_df,color='g',line_kws={'color':'red'},marker='*'); plt.xlabel('Ground Truth') plt.ylabel('Prediction') plt.title('Linear model giving up to {} percent test accuracy'.format(np.round(lin_reg.score(X_test,y_test)*100))); plt.savefig('ground_truth.png',bbox_inches='tight') ``` ### Saving model for future use ``` import pickle # save the model with open('model/multi_linear_reg.sav','wb') as f: pickle.dump(lin_reg, f) ``` ## Custom prediction ``` from predictor import Predictor predict = Predictor('model/multi_linear_reg.sav','data/prep_df.csv',scaler) first_grade = 5 second_grade = 4 absences = 0 index = 6 predict.get_prediction(first_grade,second_grade,absences,index) # Establish a model from sklearn.linear_model import SGDRegressor model = SGDRegressor() param_grid = { 'alpha': 10.0 ** -np.arange(-0.1,5,0.1), 'loss': ['squared_loss', 'huber', 'epsilon_insensitive'], 'penalty': ['l2', 'l1', 'elasticnet'], 'learning_rate': ['constant', 'optimal', 'invscaling'], } clf = GridSearchCV(model, param_grid,return_train_score=True) clf.fit(X_train, y_train) print("Best score: " + str(clf.best_score_)) plt.subplots(figsize=(12,6)) ax = sns.lineplot((clf.cv_results_['param_alpha']).data,(clf.cv_results_['mean_train_score'])) ax.set_xlabel('Apha value') ax.set_ylabel('Train Score') ax.set_title('The relationship of the alpha value vs the training score') plt.ylim(-0.4,1) plt.xlim(0,) plt.savefig('relationship.png',bbox_inches='tight') # clf.best_estimator_ # # 10 ** np.arange(-1,5,0.1) # 10.0 ** -np.arange(-0.1,5,0.1) # # (clf.cv_results_['param_alpha']).data # from datetime import datetime as dt # import pandas as pd # dt.today() - pd.to_datetime('2024-09-27') ```
github_jupyter
Notebook for developing functions in analyze.py ``` # figures.py imports from __future__ import division #from cStringIO import StringIO import datetime import glob import os import arrow from dateutil import tz import matplotlib.dates as mdates import matplotlib.gridspec as gridspec import matplotlib.pyplot as plt import matplotlib.cm as cm import netCDF4 as nc import numpy as np import pandas as pd import requests from scipy import interpolate as interp from salishsea_tools import ( nc_tools, viz_tools, stormtools, tidetools, ) #from salishsea_tools.nowcast import figures #from salishsea_tools.nowcast import analyze #from salishsea_tools.nowcast import residuals %matplotlib inline t_orig=datetime.datetime(2015, 1, 22); t_final=datetime.datetime(2015, 1, 29) bathy = nc.Dataset('/data/nsoontie/MEOPAR/NEMO-forcing/grid/bathy_meter_SalishSea2.nc') ``` # Constants ``` paths = {'nowcast': '/data/dlatorne/MEOPAR/SalishSea/nowcast/', 'forecast': '/ocean/sallen/allen/research/MEOPAR/SalishSea/forecast/', 'forecast2': '/ocean/sallen/allen/research/MEOPAR/SalishSea/forecast2/'} colours = {'nowcast': 'DodgerBlue', 'forecast': 'ForestGreen', 'forecast2': 'MediumVioletRed', 'observed': 'Indigo', 'predicted': 'ForestGreen', 'model': 'blue', 'residual': 'DimGray'} ``` # Functions in module ``` def create_path(mode, t_orig, file_part): """ Creates a path to a file associated with a simulation for date t_orig. E.g. create_path('nowcast',datatime.datetime(2015,1,1), 'SalishSea_1h*grid_T.nc') gives /data/dlatorne/MEOPAR/SalishSea/nowcast/01jan15/SalishSea_1h_20150101_20150101_grid_T.nc :arg mode: Mode of results - nowcast, forecast, forecast2. :type mode: string :arg t_orig: The simulation start date. :type t_orig: datetime object :arg file_part: Identifier for type of file. E.g. SalishSea_1h*grif_T.nc or ssh*.txt :type grid: string :returns: filename, run_date filename is the full path of the file or an empty list if the file does not exist. run_date is a datetime object that represents the date the simulation ran """ run_date = t_orig if mode == 'nowcast': results_home = paths['nowcast'] elif mode == 'forecast': results_home = paths['forecast'] run_date = run_date + datetime.timedelta(days=-1) elif mode == 'forecast2': results_home = paths['forecast2'] run_date = run_date + datetime.timedelta(days=-2) results_dir = os.path.join(results_home, run_date.strftime('%d%b%y').lower()) filename = glob.glob(os.path.join(results_dir, file_part)) try: filename = filename[-1] except IndexError: pass return filename, run_date create_path('forecast2', t_orig, 'SalishSea*.nc') def verified_runs(t_orig): """ Compiles a list of run types (nowcast, forecast, and/or forecast 2) that have been verified as complete by checking if their corresponding .nc files for that day (generated by create_path) exist. :arg t_orig: :type t_orig: datetime object :returns: runs_list, list strings representing the runs that completed """ runs_list = [] for mode in ['nowcast', 'forecast', 'forecast2']: files, run_date = create_path(mode, t_orig, 'SalishSea*grid_T.nc') if files: runs_list.append(mode) return runs_list def truncate_data(data,time, sdt, edt): """ Truncates data for a desired time range: sdt <= time <= edt data and time must be numpy arrays. sdt, edt, and times in time must all have a timezone or all be naive. :arg data: the data to be truncated :type data: numpy array :arg time: array of times associated with data :type time: numpy array :arg sdt: the start time of the tuncation :type sdt: datetime object :arg edt: the end time of the truncation :type edt: datetime object :returns: data_trun, time_trun, the truncated data and time arrays """ inds = np.where(np.logical_and(time <=edt, time >=sdt)) return data[inds], time[inds] def calculate_residual(ssh, time_ssh, tides, time_tides): """ Calculates the residual of the model sea surface height or observed water levels with respect to the predicted tides. :arg ssh: Sea surface height (observed or modelled). :type ssh: numpy array :arg time_ssh: Time component for sea surface height (observed or modelled) :type time_ssh: numpy array :arg tides: Predicted tides. :type tides: dataFrame object :arg time_tides: Time component for predicted tides. :type time_tides: dataFrame object :returns: res, the residual """ tides_interp = figures.interp_to_model_time(time_ssh, tides, time_tides) res = ssh - tides_interp return res def plot_residual_forcing(ax, runs_list, t_orig): """ Plots the observed water level residual at Neah Bay against forced residuals from existing ssh*.txt files for Neah Bay. Function may produce none, any, or all (nowcast, forecast, forecast 2) forced residuals depending on availability for specified date (runs_list). :arg ax: The axis where the residuals are plotted. :type ax: axis object :arg runs_list: Runs that are verified as complete. :type runs_list: list :arg t_orig: Date being considered. :type t_orig: datetime object """ # truncation times sdt = t_orig.replace(tzinfo=tz.tzutc()) edt = sdt + datetime.timedelta(days=1) # retrieve observations, tides and residual start_date = t_orig.strftime('%d-%b-%Y'); end_date = start_date stn_no = figures.SITES['Neah Bay']['stn_no'] obs = figures.get_NOAA_wlevels(stn_no, start_date, end_date) tides = figures.get_NOAA_tides(stn_no, start_date, end_date) res_obs = calculate_residual(obs.wlev, obs.time, tides.pred, tides.time) # truncate and plot res_obs_trun, time_trun = truncate_data(np.array(res_obs),np.array(obs.time), sdt, edt) ax.plot(time_trun, res_obs_trun, colours['observed'], label='observed', linewidth=2.5) # plot forcing for each simulation for mode in runs_list: filename_NB, run_date = create_path(mode, t_orig, 'ssh*.txt') if filename_NB: data = residuals._load_surge_data(filename_NB) surge, dates = residuals._retrieve_surge(data, run_date) surge_t, dates_t = truncate_data(np.array(surge),np.array(dates),sdt,edt) ax.plot(dates_t, surge_t, label=mode, linewidth=2.5, color=colours[mode]) ax.set_title('Comparison of observed and forced sea surface height residuals at Neah Bay:' '{t_forcing:%d-%b-%Y}'.format(t_forcing=t_orig)) def plot_residual_model(axs, names, runs_list, grid_B, t_orig): """ Plots the observed sea surface height residual against the sea surface height model residual (calculate_residual) at specified stations. Function may produce none, any, or all (nowcast, forecast, forecast 2) model residuals depending on availability for specified date (runs_list). :arg ax: The axis where the residuals are plotted. :type ax: list of axes :arg names: Names of station. :type names: list of names :arg runs_list: Runs that have been verified as complete. :type runs_list: list :arg grid_B: Bathymetry dataset for the Salish Sea NEMO model. :type grid_B: :class:`netCDF4.Dataset` :arg t_orig: Date being considered. :type t_orig: datetime object """ bathy, X, Y = tidetools.get_bathy_data(grid_B) t_orig_obs = t_orig + datetime.timedelta(days=-1) t_final_obs = t_orig + datetime.timedelta(days=1) # truncation times sdt = t_orig.replace(tzinfo=tz.tzutc()) edt = sdt + datetime.timedelta(days=1) for ax, name in zip(axs, names): lat = figures.SITES[name]['lat']; lon = figures.SITES[name]['lon']; msl = figures.SITES[name]['msl'] j, i = tidetools.find_closest_model_point(lon, lat, X, Y, bathy, allow_land=False) ttide = figures.get_tides(name) wlev_meas = figures.load_archived_observations(name, t_orig_obs.strftime('%d-%b-%Y'), t_final_obs.strftime('%d-%b-%Y')) res_obs = calculate_residual(wlev_meas.wlev, wlev_meas.time, ttide.pred_all + msl, ttide.time) # truncate and plot res_obs_trun, time_obs_trun = truncate_data(np.array(res_obs), np.array(wlev_meas.time), sdt, edt) ax.plot(time_obs_trun, res_obs_trun, color=colours['observed'], linewidth=2.5, label='observed') for mode in runs_list: filename, run_date = create_path(mode, t_orig, 'SalishSea_1h_*_grid_T.nc') grid_T = nc.Dataset(filename) ssh_loc = grid_T.variables['sossheig'][:, j, i] t_start, t_final, t_model = figures.get_model_time_variables(grid_T) res_mod = calculate_residual(ssh_loc, t_model, ttide.pred_8, ttide.time) # truncate and plot res_mod_trun, t_mod_trun = truncate_data(res_mod, t_model, sdt, edt) ax.plot(t_mod_trun, res_mod_trun, label=mode, color=colours[mode], linewidth=2.5) ax.set_title('Comparison of modelled sea surface height residuals at {station}: {t:%d-%b-%Y}'.format(station=name, t=t_orig)) def calculate_error(res_mod, time_mod, res_obs, time_obs): """ Calculates the model or forcing residual error. :arg res_mod: Residual for model ssh or NB surge data. :type res_mod: numpy array :arg time_mod: Time of model output. :type time_mod: numpy array :arg res_obs: Observed residual (archived or at Neah Bay) :type res_obs: numpy array :arg time_obs: Time corresponding to observed residual. :type time_obs: numpy array :return: error """ res_obs_interp = figures.interp_to_model_time(time_mod, res_obs, time_obs) error = res_mod - res_obs_interp return error def calculate_error_model(names, runs_list, grid_B, t_orig): """ Sets up the calculation for the model residual error. :arg names: Names of station. :type names: list of strings :arg runs_list: Runs that have been verified as complete. :type runs_list: list :arg grid_B: Bathymetry dataset for the Salish Sea NEMO model. :type grid_B: :class:`netCDF4.Dataset` :arg t_orig: Date being considered. :type t_orig: datetime object :returns: error_mod_dict, t_mod_dict, t_orig_dict """ bathy, X, Y = tidetools.get_bathy_data(grid_B) t_orig_obs = t_orig + datetime.timedelta(days=-1) t_final_obs = t_orig + datetime.timedelta(days=1) # truncation times sdt = t_orig.replace(tzinfo=tz.tzutc()) edt = sdt + datetime.timedelta(days=1) error_mod_dict = {}; t_mod_dict = {}; t_orig_dict = {} for name in names: error_mod_dict[name] = {}; t_mod_dict[name] = {}; t_orig_dict[name] = {} lat = figures.SITES[name]['lat']; lon = figures.SITES[name]['lon']; msl = figures.SITES[name]['msl'] j, i = tidetools.find_closest_model_point(lon, lat, X, Y, bathy, allow_land=False) ttide = figures.get_tides(name) wlev_meas = figures.load_archived_observations(name, t_orig_obs.strftime('%d-%b-%Y'), t_final_obs.strftime('%d-%b-%Y')) res_obs = calculate_residual(wlev_meas.wlev, wlev_meas.time, ttide.pred_all + msl, ttide.time) for mode in runs_list: filename, run_date = create_path(mode, t_orig, 'SalishSea_1h_*_grid_T.nc') grid_T = nc.Dataset(filename) ssh_loc = grid_T.variables['sossheig'][:, j, i] t_start, t_final, t_model = figures.get_model_time_variables(grid_T) res_mod = calculate_residual(ssh_loc, t_model, ttide.pred_8, ttide.time) # truncate res_mod_trun, t_mod_trun = truncate_data(res_mod, t_model, sdt, edt) error_mod = calculate_error(res_mod_trun, t_mod_trun, res_obs, wlev_meas.time) error_mod_dict[name][mode] = error_mod; t_mod_dict[name][mode] = t_mod_trun; t_orig_dict[name][mode] = t_orig return error_mod_dict, t_mod_dict, t_orig_dict def calculate_error_forcing(name, runs_list, t_orig): """ Sets up the calculation for the forcing residual error. :arg names: Name of station. :type names: string :arg runs_list: Runs that have been verified as complete. :type runs_list: list :arg t_orig: Date being considered. :type t_orig: datetime object :returns: error_frc_dict, t_frc_dict """ # truncation times sdt = t_orig.replace(tzinfo=tz.tzutc()) edt = sdt + datetime.timedelta(days=1) # retrieve observed residual start_date = t_orig.strftime('%d-%b-%Y'); end_date = start_date stn_no = figures.SITES['Neah Bay']['stn_no'] obs = figures.get_NOAA_wlevels(stn_no, start_date, end_date) tides = figures.get_NOAA_tides(stn_no, start_date, end_date) res_obs_NB = calculate_residual(obs.wlev, obs.time, tides.pred, tides.time) # calculate forcing error error_frc_dict = {}; t_frc_dict = {}; error_frc_dict[name] = {}; t_frc_dict[name] = {} for mode in runs_list: filename_NB, run_date = create_path(mode, t_orig, 'ssh*.txt') if filename_NB: data = residuals._load_surge_data(filename_NB) surge, dates = residuals._retrieve_surge(data, run_date) surge_t, dates_t = truncate_data(np.array(surge),np.array(dates), sdt, edt) error_frc = calculate_error(surge_t, dates_t, res_obs_NB, obs.time) error_frc_dict[name][mode] = error_frc; t_frc_dict[name][mode] = dates_t return error_frc_dict, t_frc_dict def plot_error_model(axs, names, runs_list, grid_B, t_orig): """ Plots the model residual error. :arg axs: The axis where the residual errors are plotted. :type axs: list of axes :arg names: Names of station. :type names: list of strings :arg runs_list: Runs that have been verified as complete. :type runs_list: list of strings :arg grid_B: Bathymetry dataset for the Salish Sea NEMO model. :type grid_B: :class:`netCDF4.Dataset` :arg t_orig: Date being considered. :type t_orig: datetime object """ error_mod_dict, t_mod_dict, t_orig_dict = calculate_error_model(names, runs_list, grid_B, t_orig) for ax, name in zip(axs, names): ax.set_title('Comparison of modelled residual errors at {station}: {t:%d-%b-%Y}'.format(station=name, t=t_orig)) for mode in runs_list: ax.plot(t_mod_dict[name][mode], error_mod_dict[name][mode], label=mode, color=colours[mode], linewidth=2.5) def plot_error_forcing(ax, runs_list, t_orig): """ Plots the forcing residual error. :arg ax: The axis where the residual errors are plotted. :type ax: axis object :arg runs_list: Runs that have been verified as complete. :type runs_list: list :arg t_orig: Date being considered. :type t_orig: datetime object """ name = 'Neah Bay' error_frc_dict, t_frc_dict = calculate_error_forcing(name, runs_list, t_orig) for mode in runs_list: ax.plot(t_frc_dict[name][mode], error_frc_dict[name][mode], label=mode, color=colours[mode], linewidth=2.5) ax.set_title('Comparison of observed and forced residual errors at Neah Bay: {t_forcing:%d-%b-%Y}'.format(t_forcing=t_orig)) def plot_residual_error_all(subject ,grid_B, t_orig, figsize=(20,16)): """ Sets up and combines the plots produced by plot_residual_forcing and plot_residual_model or plot_error_forcing and plot_error_model. This function specifies the stations for which the nested functions apply. Figure formatting except x-axis limits and titles are included. :arg subject: Subject of figure, either 'residual' or 'error' for residual error. :type subject: string :arg grid_B: Bathymetry dataset for the Salish Sea NEMO model. :type grid_B: :class:`netCDF4.Dataset` :arg t_orig: Date being considered. :type t_orig: datetime object :arg figsize: Figure size (width, height) in inches. :type figsize: 2-tuple :returns: fig """ # set up axis limits - based on full 24 hour period 0000 to 2400 sax = t_orig eax = t_orig +datetime.timedelta(days=1) runs_list = verified_runs(t_orig) fig, axes = plt.subplots(4, 1, figsize=figsize) axs_mod = [axes[1], axes[2], axes[3]] names = ['Point Atkinson', 'Victoria', 'Campbell River'] if subject == 'residual': plot_residual_forcing(axes[0], runs_list, t_orig) plot_residual_model(axs_mod, names, runs_list, grid_B, t_orig) elif subject == 'error': plot_error_forcing(axes[0], runs_list, t_orig) plot_error_model(axs_mod, names, runs_list, grid_B, t_orig) for ax in axes: ax.set_ylim([-0.4, 0.4]) ax.set_xlabel('[hrs UTC]') ax.set_ylabel('[m]') hfmt = mdates.DateFormatter('%m/%d %H:%M') ax.xaxis.set_major_formatter(hfmt) ax.legend(loc=2, ncol=4) ax.grid() ax.set_xlim([sax,eax]) return fig def compare_errors(name, mode, start, end, grid_B, figsize=(20,12)): """ compares the model and forcing error at a station between dates start and end for a simulation mode.""" # array of dates for iteration numdays = (end-start).days dates = [start + datetime.timedelta(days=num) for num in range(0, numdays+1)] dates.sort() # intiialize figure and arrays fig,axs = plt.subplots(3,1,figsize=figsize) e_frc=np.array([]) t_frc=np.array([]) e_mod=np.array([]) t_mod=np.array([]) # mean daily error frc_daily= np.array([]) mod_daily = np.array([]) t_daily = np.array([]) ttide=figures.get_tides(name) for t_sim in dates: # check if the run happened if mode in verified_runs(t_sim): # retrieve forcing and model error e_frc_tmp, t_frc_tmp = calculate_error_forcing('Neah Bay', [mode], t_sim) e_mod_tmp, t_mod_tmp, _ = calculate_error_model([name], [mode], grid_B, t_sim) e_frc_tmp= figures.interp_to_model_time(t_mod_tmp[name][mode],e_frc_tmp['Neah Bay'][mode],t_frc_tmp['Neah Bay'][mode]) # append to larger array e_frc = np.append(e_frc,e_frc_tmp) t_frc = np.append(t_frc,t_mod_tmp[name][mode]) e_mod = np.append(e_mod,e_mod_tmp[name][mode]) t_mod = np.append(t_mod,t_mod_tmp[name][mode]) # append daily mean error frc_daily=np.append(frc_daily, np.mean(e_frc_tmp)) mod_daily=np.append(mod_daily, np.mean(e_mod_tmp[name][mode])) t_daily=np.append(t_daily,t_sim+datetime.timedelta(hours=12)) else: print '{mode} simulation for {start} did not occur'.format(mode=mode, start=t_sim) # Plotting time series ax=axs[0] ax.plot(t_frc, e_frc, 'b', label = 'Forcing error', lw=2) ax.plot(t_mod, e_mod, 'g', lw=2, label = 'Model error') ax.set_title(' Comparison of {mode} error at {name}'.format(mode=mode,name=name)) ax.set_ylim([-.4,.4]) hfmt = mdates.DateFormatter('%m/%d %H:%M') # Plotting daily means ax=axs[1] ax.plot(t_daily, frc_daily, 'b', label = 'Forcing daily mean error', lw=2) ax.plot([t_frc[0],t_frc[-1]],[np.mean(e_frc),np.mean(e_frc)], '--b', label='Mean forcing error', lw=2) ax.plot(t_daily, mod_daily, 'g', lw=2, label = 'Model daily mean error') ax.plot([t_mod[0],t_mod[-1]],[np.mean(e_mod),np.mean(e_mod)], '--g', label='Mean model error', lw=2) ax.set_title(' Comparison of {mode} daily mean error at {name}'.format(mode=mode,name=name)) ax.set_ylim([-.2,.2]) # Plot tides ax=axs[2] ax.plot(ttide.time,ttide.pred_all, 'k', lw=2, label='tides') ax.set_title('Tidal predictions') ax.set_ylim([-3,3]) # format axes hfmt = mdates.DateFormatter('%m/%d %H:%M') for ax in axs: ax.xaxis.set_major_formatter(hfmt) ax.legend(loc=2, ncol=4) ax.grid() ax.set_xlim([start,end+datetime.timedelta(days=1)]) ax.set_ylabel('[m]') return fig ``` * Clear tidal signal in model errors. I don't think we are removing the tidal energy in the residual calculation. * Bizarre forcing behavior on Jan 22. Looked at the ssh text file in run directory and everything was recorded as a forecast. Weird!! Is it possible that this text file did not generate the forcing for the Jan 22 nowcast run? * Everything produced by Jan 22 (18hr) text file is a fcst * worker links forcing in obs and fcst. So the obs/Jan21 was not related to this text file. But does that matter? This is a nowcast so it should only use Jan 22 forcing data fcst. There are 4 Jan 22 ssh text files in /ocean/nsoontie/MEOPAR/sshNeahBay/txt/ * ssh-2015-02-22_12.txt is a forecast2 file * '' 18, 19, 21 are all in forecast/22jan15 * '' 18 are is also in nowcast/22jan15 So it appears that the forecast had to be restarted several times. What about the nowcast? Did that run smoothly? ``` def get_filenames(t_orig, t_final, period, grid, model_path): """Returns a list with the filenames for all files over the defined period of time and sorted in chronological order. :arg t_orig: The beginning of the date range of interest. :type t_orig: datetime object :arg t_final: The end of the date range of interest. :type t_final: datetime object :arg period: Time interval of model results (eg. 1h or 1d). :type period: string :arg grid: Type of model results (eg. grid_T, grid_U, etc). :type grid: string :arg model_path: Defines the path used (eg. nowcast) :type model_path: string :returns: files, a list of filenames """ numdays = (t_final-t_orig).days dates = [t_orig + datetime.timedelta(days=num) for num in range(0, numdays+1)] dates.sort() allfiles = glob.glob(model_path+'*/SalishSea_'+period+'*_'+grid+'.nc') sdt = dates[0].strftime('%Y%m%d') edt = dates[-1].strftime('%Y%m%d') sstr = 'SalishSea_{}_{}_{}_{}.nc'.format(period, sdt, sdt, grid) estr = 'SalishSea_{}_{}_{}_{}.nc'.format(period, edt, edt, grid) files = [] for filename in allfiles: if os.path.basename(filename) >= sstr: if os.path.basename(filename) <= estr: files.append(filename) files.sort(key=os.path.basename) return files def combine_files(files, var, depth, j, i): """Returns the value of the variable entered over multiple files covering a certain period of time. :arg files: Multiple result files in chronological order. :type files: list :arg var: Name of variable (sossheig = sea surface height, vosaline = salinity, votemper = temperature, vozocrtx = Velocity U-component, vomecrty = Velocity V-component). :type var: string :arg depth: Depth of model results ('None' if var=sossheig). :type depth: integer or string :arg j: Latitude (y) index of location (<=897). :type j: integer :arg i: Longitude (x) index of location (<=397). :type i: integer :returns: var_ary, time - array of model results and time. """ time = np.array([]) var_ary = np.array([]) for f in files: G = nc.Dataset(f) if depth == 'None': var_tmp = G.variables[var][:, j, i] else: var_tmp = G.variables[var][:, depth, j, i] var_ary = np.append(var_ary, var_tmp, axis=0) t = nc_tools.timestamp(G, np.arange(var_tmp.shape[0])) for ind in range(len(t)): t[ind] = t[ind].datetime time = np.append(time, t) return var_ary, time def plot_files(ax, grid_B, files, var, depth, t_orig, t_final, name, label, colour): """Plots values of variable over multiple files covering a certain period of time. :arg ax: The axis where the variable is plotted. :type ax: axis object :arg grid_B: Bathymetry dataset for the Salish Sea NEMO model. :type grid_B: :class:`netCDF4.Dataset` :arg files: Multiple result files in chronological order. :type files: list :arg var: Name of variable (sossheig = sea surface height, vosaline = salinity, votemper = temperature, vozocrtx = Velocity U-component, vomecrty = Velocity V-component). :type var: string :arg depth: Depth of model results ('None' if var=sossheig). :type depth: integer or string :arg t_orig: The beginning of the date range of interest. :type t_orig: datetime object :arg t_final: The end of the date range of interest. :type t_final: datetime object :arg name: The name of the station. :type name: string :arg label: Label for plot line. :type label: string :arg colour: Colour of plot lines. :type colour: string :returns: axis object (ax). """ bathy, X, Y = tidetools.get_bathy_data(grid_B) lat = figures.SITES[name]['lat']; lon = figures.SITES[name]['lon'] [j, i] = tidetools.find_closest_model_point(lon, lat, X, Y, bathy, allow_land=False) # Call function var_ary, time = combine_files(files, var, depth, j, i) # Plot ax.plot(time, var_ary, label=label, color=colour, linewidth=2) # Figure format ax_start = t_orig ax_end = t_final + datetime.timedelta(days=1) ax.set_xlim(ax_start, ax_end) hfmt = mdates.DateFormatter('%m/%d %H:%M') ax.xaxis.set_major_formatter(hfmt) return ax def compare_ssh_tides(grid_B, files, t_orig, t_final, name, PST=0, MSL=0, figsize=(20, 5)): """ :arg grid_B: Bathymetry dataset for the Salish Sea NEMO model. :type grid_B: :class:`netCDF4.Dataset` :arg files: Multiple result files in chronological order. :type files: list :arg t_orig: The beginning of the date range of interest. :type t_orig: datetime object :arg t_final: The end of the date range of interest. :type t_final: datetime object :arg name: Name of station. :type name: string :arg PST: Specifies if plot should be presented in PST. 1 = plot in PST, 0 = plot in UTC. :type PST: 0 or 1 :arg MSL: Specifies if the plot should be centred about mean sea level. 1=centre about MSL, 0=centre about 0. :type MSL: 0 or 1 :arg figsize: Figure size (width, height) in inches. :type figsize: 2-tuple :returns: matplotlib figure object instance (fig). """ # Figure fig, ax = plt.subplots(1, 1, figsize=figsize) # Model ax = plot_files(ax, grid_B, files, 'sossheig', 'None', t_orig, t_final, name, 'Model', colours['model']) # Tides figures.plot_tides(ax, name, PST, MSL, color=colours['predicted']) # Figure format ax.set_title('Modelled Sea Surface Height versus Predicted Tides at {station}: {t_start:%d-%b-%Y} to {t_end:%d-%b-%Y}'.format(station=name, t_start=t_orig, t_end=t_final)) ax.set_ylim([-3.0, 3.0]) ax.set_xlabel('[hrs]') ax.legend(loc=2, ncol=2) ax.grid() return fig def plot_wlev_residual_NOAA(t_orig, elements, figsize=(20, 5)): """ Plots the water level residual as calculated by the function calculate_residual_obsNB and has the option to also plot the observed water levels and predicted tides over the course of one day. :arg t_orig: The beginning of the date range of interest. :type t_orig: datetime object :arg elements: Elements included in figure. 'residual' for residual only and 'all' for residual, observed water level, and predicted tides. :type elements: string :arg figsize: Figure size (width, height) in inches. :type figsize: 2-tuple :returns: fig """ res_obs_NB, obs, tides = calculate_residual_obsNB('Neah Bay', t_orig) # Figure fig, ax = plt.subplots(1, 1, figsize=figsize) # Plot ax.plot(obs.time, res_obs_NB, 'Gray', label='Obs Residual', linewidth=2) if elements == 'all': ax.plot(obs.time, obs.wlev, 'DodgerBlue', label='Obs Water Level', linewidth=2) ax.plot(tides.time, tides.pred[tides.time == obs.time], 'ForestGreen', label='Pred Tides', linewidth=2) if elements == 'residual': pass ax.set_title('Residual of the observed water levels at Neah Bay: {t:%d-%b-%Y}'.format(t=t_orig)) ax.set_ylim([-3.0, 3.0]) ax.set_xlabel('[hrs]') hfmt = mdates.DateFormatter('%m/%d %H:%M') ax.xaxis.set_major_formatter(hfmt) ax.legend(loc=2, ncol=3) ax.grid() return fig def feet_to_metres(feet): """ Converts feet to metres. :returns: metres """ metres = feet*0.3048 return metres def load_surge_data(filename_NB): """Loads the textfile with surge predictions for Neah Bay. :arg filename_NB: Path to file of predicted water levels at Neah Bay. :type filename_NB: string :returns: data (data structure) """ # Loading the data from that text file. data = pd.read_csv(filename_NB, skiprows=3, names=['date', 'surge', 'tide', 'obs', 'fcst', 'anom', 'comment'], comment='#') # Drop rows with all Nans data = data.dropna(how='all') return data def to_datetime(datestr, year, isDec, isJan): """ Converts the string given by datestr to a datetime object. The year is an argument because the datestr in the NOAA data doesn't have a year. Times are in UTC/GMT. :arg datestr: Date of data. :type datestr: datetime object :arg year: Year of data. :type year: datetime object :arg isDec: True if run date was December. :type isDec: Boolean :arg isJan: True if run date was January. :type isJan: Boolean :returns: dt (datetime representation of datestr) """ dt = datetime.datetime.strptime(datestr, '%m/%d %HZ') # Dealing with year changes. if isDec and dt.month == 1: dt = dt.replace(year=year+1) elif isJan and dt.month == 12: dt = dt.replace(year=year-1) else: dt = dt.replace(year=year) dt = dt.replace(tzinfo=tz.tzutc()) return dt def retrieve_surge(data, run_date): """ Gathers the surge information a forcing file from on run_date. :arg data: Surge predictions data. :type data: data structure :arg run_date: Simulation run date. :type run_date: datetime object :returns: surges (meteres), times (array with time_counter) """ surge = [] times = [] isDec, isJan = False, False if run_date.month == 1: isJan = True if run_date.month == 12: isDec = True # Convert datetime to string for comparing with times in data for d in data.date: dt = _to_datetime(d, run_date.year, isDec, isJan) times.append(dt) daystr = dt.strftime('%m/%d %HZ') tide = data.tide[data.date == daystr].item() obs = data.obs[data.date == daystr].item() fcst = data.fcst[data.date == daystr].item() if obs == 99.90: # Fall daylight savings if fcst == 99.90: # If surge is empty, just append 0 if not surge: surge.append(0) else: # Otherwise append previous value surge.append(surge[-1]) else: surge.append(_feet_to_metres(fcst-tide)) else: surge.append(_feet_to_metres(obs-tide)) return surge, times ``` # Close up ``` def compare_errors1(name, mode, start, end, grid_B, figsize=(20,3)): """ compares the model and forcing error at a station between dates start and end for a simulation mode.""" # array of dates for iteration numdays = (end-start).days dates = [start + datetime.timedelta(days=num) for num in range(0, numdays+1)] dates.sort() # intiialize figure and arrays fig,ax = plt.subplots(1,1,figsize=figsize) e_frc=np.array([]) t_frc=np.array([]) e_mod=np.array([]) t_mod=np.array([]) ttide=figures.get_tides(name) for t_sim in dates: # check if the run happened if mode in verified_runs(t_sim): # retrieve forcing and model error e_frc_tmp, t_frc_tmp = calculate_error_forcing('Neah Bay', [mode], t_sim) e_mod_tmp, t_mod_tmp, _ = calculate_error_model([name], [mode], grid_B, t_sim) e_frc_tmp= figures.interp_to_model_time(t_mod_tmp[name][mode],e_frc_tmp['Neah Bay'][mode],t_frc_tmp['Neah Bay'][mode]) # append to larger array e_frc = np.append(e_frc,e_frc_tmp) t_frc = np.append(t_frc,t_mod_tmp[name][mode]) e_mod = np.append(e_mod,e_mod_tmp[name][mode]) t_mod = np.append(t_mod,t_mod_tmp[name][mode]) else: print '{mode} simulation for {start} did not occur'.format(mode=mode, start=t_sim) # Plotting time series ax.plot(t_mod, e_mod*5, 'g', lw=2, label = 'Model error x 5') ax.plot(ttide.time,ttide.pred_all, 'k', lw=2, label='tides') ax.set_title(' Comparison of {mode} error at {name}'.format(mode=mode,name=name)) ax.set_ylim([-3,3]) hfmt = mdates.DateFormatter('%m/%d %H:%M') ax.xaxis.set_major_formatter(hfmt) ax.legend(loc=2, ncol=4) ax.grid() ax.set_xlim([start,end+datetime.timedelta(days=1)]) ax.set_ylabel('[m]') return fig t_orig=datetime.datetime(2015,1,10) t_final = datetime.datetime(2015,1,19) mode = 'nowcast' fig = compare_errors1('Point Atkinson', mode, t_orig,t_final,bathy) fig = compare_errors1('Victoria', mode, t_orig,t_final,bathy) fig = compare_errors1('Campbell River', mode, t_orig,t_final,bathy) def compare_errors2(ax, name, mode, start, end, grid_B, cf, cm): """ compares the model and forcing error at a station between dates start and end for a simulation mode.""" # array of dates for iteration numdays = (end-start).days dates = [start + datetime.timedelta(days=num) for num in range(0, numdays+1)] dates.sort() # intiialize figure and arrays e_frc=np.array([]) t_frc=np.array([]) e_mod=np.array([]) t_mod=np.array([]) # mean daily error frc_daily= np.array([]) mod_daily = np.array([]) t_daily = np.array([]) ttide=figures.get_tides(name) for t_sim in dates: # check if the run happened if mode in verified_runs(t_sim): # retrieve forcing and model error e_frc_tmp, t_frc_tmp = calculate_error_forcing('Neah Bay', [mode], t_sim) e_mod_tmp, t_mod_tmp, _ = calculate_error_model([name], [mode], grid_B, t_sim) e_frc_tmp= figures.interp_to_model_time(t_mod_tmp[name][mode],e_frc_tmp['Neah Bay'][mode],t_frc_tmp['Neah Bay'][mode]) # append to larger array e_frc = np.append(e_frc,e_frc_tmp) t_frc = np.append(t_frc,t_mod_tmp[name][mode]) e_mod = np.append(e_mod,e_mod_tmp[name][mode]) t_mod = np.append(t_mod,t_mod_tmp[name][mode]) # append daily mean error frc_daily=np.append(frc_daily, np.mean(e_frc_tmp)) mod_daily=np.append(mod_daily, np.mean(e_mod_tmp[name][mode])) t_daily=np.append(t_daily,t_sim+datetime.timedelta(hours=12)) else: print '{mode} simulation for {start} did not occur'.format(mode=mode, start=t_sim) # Plotting daily means ax.plot(t_daily, frc_daily, cf, label = 'Forcing, ' + mode, lw=2) ax.plot(t_daily, mod_daily, cm, lw=2, label = 'Model, ' + mode) ax.set_title(' Comparison of daily mean error at {name}'.format(mode=mode,name=name)) ax.set_ylim([-.35,.35]) # format axes hfmt = mdates.DateFormatter('%m/%d %H:%M') ax.xaxis.set_major_formatter(hfmt) ax.legend(loc=2, ncol=6) ax.grid() ax.set_xlim([start,end+datetime.timedelta(days=1)]) ax.set_ylabel('[m]') return fig t_orig=datetime.datetime(2015,1,1) t_final = datetime.datetime(2015,1,31) fig,axs = plt.subplots(3,1,figsize=(20,12)) for name, n in zip (['Point Atkinson','Victoria','Campbell River'], np.arange(3)): fig = compare_errors2(axs[n], name, 'nowcast', t_orig,t_final,bathy,'DeepSkyBlue','YellowGreen') fig = compare_errors2(axs[n], name, 'forecast', t_orig,t_final,bathy,'DodgerBlue','OliveDrab') fig = compare_errors2(axs[n], name, 'forecast2', t_orig,t_final,bathy,'SteelBlue','DarkGreen') def compare_errors3(name, mode, start, end, grid_B, figsize=(20,3)): """ compares the model and forcing error at a station between dates start and end for a simulation mode.""" # array of dates for iteration numdays = (end-start).days dates = [start + datetime.timedelta(days=num) for num in range(0, numdays+1)] dates.sort() fig,ax = plt.subplots(1,1,figsize=figsize) # intiialize figure and arrays e_frc=np.array([]) t_frc=np.array([]) e_mod=np.array([]) t_mod=np.array([]) # mean daily error frc_daily= np.array([]) mod_daily = np.array([]) t_daily = np.array([]) ttide=figures.get_tides(name) for t_sim in dates: # check if the run happened if mode in verified_runs(t_sim): # retrieve forcing and model error e_frc_tmp, t_frc_tmp = calculate_error_forcing('Neah Bay', [mode], t_sim) e_mod_tmp, t_mod_tmp, _ = calculate_error_model([name], [mode], grid_B, t_sim) e_frc_tmp= figures.interp_to_model_time(t_mod_tmp[name][mode],e_frc_tmp['Neah Bay'][mode],t_frc_tmp['Neah Bay'][mode]) # append to larger array e_frc = np.append(e_frc,e_frc_tmp) t_frc = np.append(t_frc,t_mod_tmp[name][mode]) e_mod = np.append(e_mod,e_mod_tmp[name][mode]) t_mod = np.append(t_mod,t_mod_tmp[name][mode]) # append daily mean error frc_daily=np.append(frc_daily, np.mean(e_frc_tmp)) mod_daily=np.append(mod_daily, np.mean(e_mod_tmp[name][mode])) t_daily=np.append(t_daily,t_sim+datetime.timedelta(hours=12)) # stdev stdev_mod = (max(np.cumsum((mod_daily-np.mean(e_mod))**2))/len(mod_daily))**0.5 else: print '{mode} simulation for {start} did not occur'.format(mode=mode, start=t_sim) # Plotting daily means ax.plot(t_daily, frc_daily, 'b', label = 'Forcing, ' + mode, lw=2) ax.plot(t_daily, mod_daily, 'g', lw=2, label = 'Model, ' + mode) #ax.plot([t_frc[0],t_frc[-1]],[np.mean(e_frc),np.mean(e_frc)], '--b', label='Mean forcing error', lw=2) #ax.plot([t_mod[0],t_mod[-1]],[np.mean(e_mod),np.mean(e_mod)], '--g', label='Mean model error', lw=2) ax.set_title(' Comparison of daily mean error at {name}'.format(mode=mode,name=name)) ax.set_ylim([-.35,.35]) # format axes hfmt = mdates.DateFormatter('%m/%d %H:%M') ax.xaxis.set_major_formatter(hfmt) ax.legend(loc=2, ncol=6) ax.grid() ax.set_xlim([start,end+datetime.timedelta(days=1)]) ax.set_ylabel('[m]') print stdev_mod return fig t_orig=datetime.datetime(2015,1,22) t_final = datetime.datetime(2015,1,24) fig = compare_errors3('Victoria', 'nowcast', t_orig,t_final,bathy) fig = compare_errors3('Victoria', 'forecast', t_orig,t_final,bathy) fig = compare_errors3('Victoria', 'forecast2', t_orig,t_final,bathy) ```
github_jupyter
# An Introduction to Algorithms in Qiskit This is an introduction to algorithms in Qiskit and provides a high-level overview to help understand the various aspects of the functionality to get started. Other tutorials will provide more in-depth material, on given algorithms, and ways to use them etc. ## How is the algorithm library structured? Qiskit provides a number of [Algorithms](https://qiskit.org/documentation/apidoc/qiskit.aqua.algorithms.html) and they are grouped by category according to the task they can perform. For instance `Minimum Eigensolvers` to find the smallest eigen value of an operator, for example ground state energy of a chemistry Hamiltonian or a solution to an optimization problem when expressed as an Ising Hamiltonian. There are `Classifiers` for machine learning classification problems and `Amplitude Estimators` for value estimation that can be used say in financial applications. The full set of categories can be seen in the Algorithms documentation link above. Algorithms are configurable and often part of the configuration will be in the form of smaller building blocks, of which different instances of the building block type can be given. For instance with `VQE`, the Variational Quantum Eigensolver, it takes a trial wavefunction, in the form of a `QuantumCircuit` and a classical optimizer among other things. Such building blocks can be found as [Components](https://qiskit.org/documentation/apidoc/qiskit.aqua.components.html) and as circuits from the [Circuit Library](https://qiskit.org/documentation/apidoc/circuit_library.html). Let's take a look at an example to construct a VQE instance. Here `TwoLocal` is the variational form (trial wavefunction), a parameterized circuit which can be varied, and `SLSQP` a classical optimizer. These are created as separate instances and passed to VQE when it is constructed. Trying, for example, a different classical optimizer, or variational form is simply a case of creating an instance of the one you want and passing it into VQE. ``` from qiskit.aqua.algorithms import VQE from qiskit.aqua.components.optimizers import SLSQP from qiskit.circuit.library import TwoLocal num_qubits = 2 ansatz = TwoLocal(num_qubits, 'ry', 'cz') opt = SLSQP(maxiter=1000) vqe = VQE(var_form=ansatz, optimizer=opt) ``` Let's draw the ansatz so we can see it's a QuantumCircuit where θ\[0\] through θ\[7\] will be the parameters that are varied as VQE optimizer finds the minimum eigenvalue. We'll come back to the parameters later in a working example below. ``` ansatz.draw() ``` But more is needed before we can run the algorithm so let's get to that next. ## How to run an algorithm? In order to run an algorithm we need to have backend, a simulator or real device, on which the circuits that comprise the algorithm can be run. So for example we can use the `statevector_simulator` from the BasicAer provider. ``` from qiskit import BasicAer backend = BasicAer.get_backend('statevector_simulator') ``` Now a backend on its own does not have information on how you might want to run the circuits etc. For example how many shots, do you want a noise model, even options around transpiling the circuits. For this Qiskit Aqua has a [QuantumInstance](https://qiskit.org/documentation/stubs/qiskit.aqua.QuantumInstance.html) which is provided both the backend as well as various settings around the circuit processing and execution so for instance: ``` from qiskit.aqua import QuantumInstance backend = BasicAer.get_backend('qasm_simulator') quantum_instance = QuantumInstance(backend=backend, shots=800, seed_simulator=99) ``` Note: if you provide the backend directly then internally a QuantumInstance will be created from it, with default settings, so at all times the algorithms are working through a QuantumInstance. So now we would be able to call the [run()](https://qiskit.org/documentation/stubs/qiskit.aqua.algorithms.VQE.html#qiskit.aqua.algorithms.VQE.run) method, which is common to all algorithms and returns a result specific for the algorithm. In this case since VQE is a MinimumEigensolver we could use the [compute_mininum_eigenvalue()](https://qiskit.org/documentation/stubs/qiskit.aqua.algorithms.VQE.html#qiskit.aqua.algorithms.VQE.compute_minimum_eigenvalue) method. The latter is the interface of choice for the application modules, such as Chemistry and Optimization, in order that they can work interchangeably with any algorithm within the specific category. ## A complete working example Let's put what we have learned from above together and create a complete working example. VQE will find the minimum eigenvalue, i.e. minimum energy value of a Hamilitonian operator and hence we need such an operator for VQE to work with. Such an operator is given below. This was originally created by the Chemistry application module as the Hamiltonian for an H2 molecule at 0.735A interatomic distance. It's a sum of Pauli terms as below, but for now I am not going to say anything further about it since the goal is to run the algorithm, but further information on operators can be found in other tutorials. ``` from qiskit.aqua.operators import X, Z, I H2_op = (-1.052373245772859 * I ^ I) + \ (0.39793742484318045 * I ^ Z) + \ (-0.39793742484318045 * Z ^ I) + \ (-0.01128010425623538 * Z ^ Z) + \ (0.18093119978423156 * X ^ X) ``` So let's build a VQE instance passing the operator, and a backend using a QuantumInstance, to the constructor. Now VQE does have setters so the operator and backend can also be passed later. Setting them later can be useful when creating an algorithm without this problem specific information and then later using it, say with the chemistry application module, which would create the operator for the specific chemistry problem being solved. Note: In order that you can run this notebook and see the exact same output the random number generator used throughout Aqua in aqua_globals, as well as the transpiler and simulator, via the QuantumInstance, are seeded. You do not have to do this but if want to be able to reproduce the exact same outcome each time then this is how it's done. So let's run VQE and print the result object it returns. ``` from qiskit.aqua import aqua_globals seed = 50 aqua_globals.random_seed = seed qi = QuantumInstance(BasicAer.get_backend('statevector_simulator'), seed_transpiler=seed, seed_simulator=seed) ansatz = TwoLocal(rotation_blocks='ry', entanglement_blocks='cz') slsqp = SLSQP(maxiter=1000) vqe = VQE(operator=H2_op, var_form=ansatz, optimizer=slsqp, quantum_instance=qi) result = vqe.run() import pprint pp = pprint.PrettyPrinter(indent=4) pp.pprint(result) ``` From the above result we can see it took the optimizer `72` evaluations of parameter values until it found the minimum eigenvalue of `-1.85727` which is the electronic ground state energy of the given H2 molecule. The optimal parameters of the ansatz can also be seen which are the values that were in the ansatz at the minimum value. ## Using VQE as a MinimumEigensolver To close off let's use the MinimumEigensolver interface and also create a VQE instance without supplying either the operator or QuantumInstance. We later set the QuantumInstance and finally pass the operator on the `compute_minimum_eigenvalue` method (though we could have passed it in earlier via the setter instead, as long as by the time VQE runs it has an operator to work on). ``` aqua_globals.random_seed = seed qi = QuantumInstance(BasicAer.get_backend('statevector_simulator'), seed_transpiler=seed, seed_simulator=seed) ansatz = TwoLocal(rotation_blocks='ry', entanglement_blocks='cz') slsqp = SLSQP(maxiter=1000) vqe = VQE(var_form=ansatz, optimizer=slsqp) vqe.quantum_instance = qi result = vqe.compute_minimum_eigenvalue(operator=H2_op) pp.pprint(result) ``` As the identical seeding was used as the prior example the result can be seen to be the same. This concludes this introduction to algorithms in Qiskit. Please check out the other algorithm tutorials in this series for both broader as well as more in depth coverage of the algorithms. ``` import qiskit.tools.jupyter %qiskit_version_table %qiskit_copyright ```
github_jupyter
<table align="left" width="100%"> <tr> <td style="background-color:#ffffff;"><a href="https://qsoftware.lu.lv/index.php/qworld/" target="_blank"><img src="..\images\qworld.jpg" width="35%" align="left"></a></td> <td align="right" style="background-color:#ffffff;vertical-align:bottom;horizontal-align:right"> prepared by Özlem Salehi (<a href="http://qworld.lu.lv/index.php/qturkey/" target="_blank">QTurkey</a>) </td> </tr></table> <table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table> $\newcommand{\Mod}[1]{\ (\mathrm{mod}\ #1)}$ $ \newcommand{\bra}[1]{\langle #1|} $ $ \newcommand{\ket}[1]{|#1\rangle} $ $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ $ \newcommand{\dot}[2]{ #1 \cdot #2} $ $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ $ \newcommand{\mypar}[1]{\left( #1 \right)} $ $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ $ \newcommand{\onehalf}{\frac{1}{2}} $ $ \newcommand{\donehalf}{\dfrac{1}{2}} $ $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ $ \newcommand{\vzero}{\myvector{1\\0}} $ $ \newcommand{\vone}{\myvector{0\\1}} $ $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $ $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $ <h1> <font color="blue"> Solutions for </font> Order Finding Algorithm</h1> <a id="task1"></a> <h3>Task 1</h3> Let $x=5$ and $N=21$. Plot $x^ i \Mod{N}$ for $i$ values between $1$ and $50$ and find the order of $x$. <h3>Solution</h3> ``` import matplotlib.pyplot as plt #Create an empty list l = [] x=5 N=21 #Append x^i mod N values for i in range (50): l.append((x**i) % N) plt.plot(l) plt.show() ``` Let's check each integer and stop when we find an integer $r$ satisfying $x^r = 1 \Mod{N}$ $ 5^1 = 5 ~\mod 21 $ $ 5^2 = 4 ~\mod 21 $ $ 5^3 = 20 \mod 21 $ $ 5^4 = 16 \mod 21 $ $ 5^5 = 17 \mod 21 $ $ 5^6 = 1 ~\mod 21 $ Hence the order $r$ is equal to 6. <a id="task2"></a> <h3>Task 2 (on paper)</h3> Let $\ket{\psi_0}=\ket{1 \Mod{N}}+\ket{x\Mod{N}}+\ket{x^2\Mod{N}}+ \cdots + \ket{x^{r-1}\Mod{N}}$. What is $U_x \ket{\psi_0}$? What can you conclude about $\ket{\psi_0}$? Repeat the same task for $\ket{\psi_1}=\ket{1}+ \omega^{-1}\ket{x\Mod{N}}+\omega^{-2}\ket{x^2\Mod{N}}+ \cdots + \omega^{-(r-1)} \ket{x^{r-1}\Mod{N}}$ where $\omega=e^{-\frac{2{\pi}i}{r}}$. <h3>Solution</h3> \begin{align*} U_x\ket{\psi_0} &= U_x\ket{1 \Mod{N}} + U_x \ket{x \Mod{N}}+ \dots + U_x \ket{x^{r-1} \Mod{N}}\\ &= \ket{x \Mod{N}} + \ket{x^2\Mod{N}}+ \dots + \ket{1\Mod{N}} \\ &= \ket{\psi_0} \end{align*} Hence we can conclude that $\ket{\psi_0}$ is an eigenvector of the operator $U_x$ with eigenvalue 1. Let's repeat the same for $\ket{\psi_1}$. \begin{align*} U_x\ket{\psi_1} &= U_x\ket{1 \Mod{N}} + \omega^{-1}U_x \ket{x \Mod{N}}+ \dots +\omega^{-(r-1)} U_x \ket{x^{r-1} \Mod{N}}\\ &= \ket{x \Mod{N}} + \omega^{-1}\ket{x^2 \Mod{N}}+ \dots + \omega^{-(r-1)}\ket{1 \Mod{N}} \\ &= \omega( \omega^{-1}\ket{x \Mod{N}} + \omega^{-2}\ket{x^2 \Mod{N}}+ \dots + \omega^{-(r)}\ket{1 \Mod{N}}) \\ &= \omega\ket{\psi_1} \end{align*} Hence, $\ket{\psi_1}$ is an eigenvector of the operator $U_x$ with an eigenvalue $\omega$. <a id="task3"></a> <h3>Task 3 (on paper)</h3> Show that $\displaystyle \frac{1}{\sqrt{r}}\sum_{s=0}^{r-1}\ket{u_s}= \ket{1}$. <h3>Solution</h3> Let's replace $\ket{u_s}$ in the sum above. We have the following expression: $ \displaystyle \frac{1}{\sqrt{r}}\sum_{s=0}^{r-1}\ket{u_s}= \frac{1}{\sqrt{r}}\sum_{s=0}^{r-1}\frac{1}{\sqrt{r}}\sum_{k=0}^{r-1}e^{\frac{-2{\pi}i s k}{r}}\ket{{x^k} \Mod{N}}$ If $ k=0 $, $ \displaystyle \sum_{s=0}^{r-1}e^{\frac{-2{\pi}i s k}{r}} = r $ and the state $ \ket{1} $ has amplitude 1 in which case for all other $ k $, all amplitudes are equal to 0. Hence $ \displaystyle \frac{1}{\sqrt{r}}\sum_{s=0}^{r-1}\ket{u_s}=\ket{1}.$ <a id="task4"></a> <h3>Task 4</h3> Find the continued fractions expression for $\frac{31}{13}$ and find the convergents first using pen and paper and then using the functions defined above. <h3>Solution</h3> $\frac{31}{13}$ can be expressed as $\frac{31}{13}=2+\frac{5}{13}$. Continuing like this, \begin{align*} =2+\frac{1}{2+\frac{3}{5}} = 2+\frac{1}{2+\frac{1}{\frac{5}{3}}} \end{align*} The resulting expression will be \begin{align*} 2+\frac{1}{2+\frac{1}{1 + \frac{1}{1+ \frac{1}{2}}}} \end{align*} with the continued fraction expression $[2,2,1,1,2]$. The convergents are $c_1=2$, $c_2=2 + \frac{1}{2} = \frac{5}{2} $, $c_3 = 2 + \frac{1}{2 + \frac{1}{1}} = \frac{7}{3}$, $c_4 = 2+ \frac{ 1}{2 + \frac{1}{1 + \frac{1}{1}}} = \frac{12}{5}$, $c_5 = 2+\frac{1}{2+\frac{1}{1 + \frac{1}{1+ \frac{1}{2}}}} = \frac{31}{13}$ Let's find the continued fractions expression and convergents for $\frac{31}{13}$ using the functions defined in the notebook. ``` %run ../include/helpers.py cf=contFrac(31/13) print(cf) cv=convergents(cf) print(cv) ``` <a id="task5"></a> <h3>Task 5</h3> You are given a function named $U_x$ which implements $ U_x \ket{y} \rightarrow \ket{xy {\Mod{N}}}$ and returns its controlled version. Run the following cell to load the function. ``` %run operator.py ``` In order to use the function you should pass $x$ and $N$ as parameter. <pre>CU=Ux(x,N)</pre> Let $x=3$ and $N=20$. Use phase estimation procedure to find the estimates for $\frac{s}{r}$. Pick the correct values for $t$ and $L$. You can use the <i>qpe</i> function you have already implemented. Plot your results using a histogram. Where do the peaks occur? <h3>Solution</h3> ``` # %load qpe.py import cirq def qpe(t,control, target, circuit, CU): #Apply Hadamard to control qubits circuit.append(cirq.H.on_each(control)) #Apply CU gates for i in range(t): #Obtain the power of CU gate CUi = CU**(2**i) #Apply CUi gate where t-i-1 is the control circuit.append(CUi(control[t-i-1],*target)) #Apply inverse QFT iqft(t,control,circuit) # %load iqft.py import cirq from cirq.circuits import InsertStrategy from cirq import H, SWAP, CZPowGate def iqft(n,qubits,circuit): #Swap the qubits for i in range(n//2): circuit.append(SWAP(qubits[i],qubits[n-i-1]), strategy = InsertStrategy.NEW) #For each qubit for i in range(n-1,-1,-1): #Apply CR_k gates where j is the control and i is the target k=n-i #We start with k=n-i for j in range(n-1,i,-1): #Define and apply CR_k gate crk = CZPowGate(exponent = -2/2**(k)) circuit.append(crk(qubits[j],qubits[i]),strategy = InsertStrategy.NEW) k=k-1 #Decrement at each step #Apply Hadamard to the qubit circuit.append(H(qubits[i]),strategy = InsertStrategy.NEW) ``` $t$ should be picked as $2L + 1 + \big \lceil \log \big( 2 + \frac{1}{2\epsilon} \big) \big \rceil$. Let $\epsilon=0.1$. First let's find $L$. ``` import math L=math.ceil(math.log2(20)) print(L) ``` $ L= \big \lceil \log N \big \rceil $ = $\big \lceil \log 20 \big \rceil =5.$ ``` e = 0.1 num_t = 2*L+1+math.ceil(math.log2(2+1/(2*e))) print(num_t) import cirq import matplotlib #Create a circuit circuit = cirq.Circuit() #Assign the size of the registers t=num_t n=L #Create control and target qubits control = [cirq.LineQubit(i) for i in range(1,t+1) ] target = [cirq.LineQubit(i) for i in range(t+1,t+1+n) ] circuit.append(cirq.X(target[n-1])) #Create operator CU x=3 N=20 CU=Ux(x,N) #Call phase estimation circuit qpe(t,control, target, circuit, CU) #Measure the control register circuit.append(cirq.measure(*control, key='result')) #Sample the circuit s=cirq.Simulator() print('Sample the circuit:') samples=s.run(circuit, repetitions=1000) # Print a histogram of results results= samples.histogram(key='result') print(results) import matplotlib.pyplot as plt plt.bar([str(key) for key in results.keys()], results.values()) plt.show() ``` <a id="task6"></a> <h3>Task 6</h3> For each one of the possible outcomes in Task 5, try to find out the value of $r$ using continued fractions algorithm. You can use the functions defined above. <h3>Solution</h3> The outcomes are 0, 4096, 8192 and 12288. - From 0, we don't get any meaningful result. - Let's check $\frac{4096}{2^{14}}$. ``` %run ../include/helpers.py cf = contFrac(4096/2**14) cv = convergents(cf) print(cv) ``` The candidate is $s'=1$ and $r'=4$. Indeed 4 is the answer. Let's check the other cases as well. ``` cf = contFrac(8192/2**14) cv = convergents(cf) print(cv) ``` From 8192, we can not get the correct result. The reason is that $s=2$ and $r=4$ which are not relatively prime and as a result we get $s'=1$ and $r'=2$. ``` cf = contFrac(12288/2**14) cv = convergents(cf) print(cv) ``` The candidate is $s'=3$ and $r'=4$. We get the correct result. <a id="task7"></a> <h3>Task 7</h3> Repeat Task 5 and Task 6 with $x$=5 and $N=42$. <h3>Solution</h3> ``` %run operator.py # %load qpe.py import cirq def qpe(t,control, target, circuit, CU): #Apply Hadamard to control qubits circuit.append(cirq.H.on_each(control)) #Apply CU gates for i in range(t): #Obtain the power of CU gate CUi = CU**(2**i) #Apply CUi gate where t-i-1 is the control circuit.append(CUi(control[t-i-1],*target)) #Apply inverse QFT iqft(t,control,circuit) # %load iqft.py import cirq from cirq.circuits import InsertStrategy from cirq import H, SWAP, CZPowGate def iqft(n,qubits,circuit): #Swap the qubits for i in range(n//2): circuit.append(SWAP(qubits[i],qubits[n-i-1]), strategy = InsertStrategy.NEW) #For each qubit for i in range(n-1,-1,-1): #Apply CR_k gates where j is the control and i is the target k=n-i #We start with k=n-i for j in range(n-1,i,-1): #Define and apply CR_k gate crk = CZPowGate(exponent = -2/2**(k)) circuit.append(crk(qubits[j],qubits[i]),strategy = InsertStrategy.NEW) k=k-1 #Decrement at each step #Apply Hadamard to the qubit circuit.append(H(qubits[i]),strategy = InsertStrategy.NEW) ``` $t$ should be picked as $2L + 1 + \big \lceil \log \big( 2 + \frac{1}{2\epsilon} \big) \big \rceil$. Let $\epsilon=0.1$. First let's find $L$. ``` import math L=math.ceil(math.log2(42)) print(L) ``` $ L= \big \lceil \log N \big \rceil $ = $ L= \big \lceil \log 42 \big \rceil =6.$ ``` e = 0.1 num_t = 2*L+1+math.ceil(math.log2(2+1/(2*e))) print(num_t) import cirq import matplotlib #Create a circuit circuit = cirq.Circuit() #Assign the size of the registers t=num_t n=L #Create control and target qubits control = [cirq.LineQubit(i) for i in range(1,t+1) ] target = [cirq.LineQubit(i) for i in range(t+1,t+1+n) ] circuit.append(cirq.X(target[n-1])) #Create operator CU x=5 N=42 CU=Ux(x,N) #Call phase estimation circuit qpe(t,control, target, circuit, CU) #Measure the control register circuit.append(cirq.measure(*control, key='result')) #Sample the circuit s=cirq.Simulator() print('Sample the circuit:') samples=s.run(circuit, repetitions=1000) # Print a histogram of results results= samples.histogram(key='result') print(results) import matplotlib.pyplot as plt plt.bar(results.keys(), results.values()) plt.show() ``` The peaks occur at 0, 10923, 21845 32768, 43691, 54613. - From 0, we don't get any meaningful result. - Let's check $\frac{10923}{2^{16}}$. ``` cf = contFrac(10923/2**16) cv = convergents(cf) print(cv) ``` The candidates are $s'=1$, $r'=5$ and $s''=1$, $r''=6$. Indeed the answer is 6. ``` cf = contFrac(21845/2**16) cv = convergents(cf) print(cv) ``` From 21845, we can not get the correct result. The reason is that $s=2$ and $r=6$ which are not relatively prime and as a result we get $s'=1$ and $r'=3$. ``` cf = contFrac(32768/2**16) cv = convergents(cf) print(cv) ``` From 32768, we can not get the correct result. The reason is that $s=3$ and $r=6$ which are not relatively prime and as a result we get $s'=1$ and $r'=2$. ``` cf = contFrac(43691/2**16) cv = convergents(cf) print(cv) ``` From 43691, we can not get the correct result. The reason is that $s=4$ and $r=6$ which are not relatively prime and as a result we get $s'=2$ and $r'=3$. ``` cf = contFrac(54613/2**16) cv = convergents(cf) print(cv) ``` The candidates are $s'=4$, $r'=5$ and $s''=5$, $r''=6$.
github_jupyter
# Updates and GDPR using Delta Lake - PySpark In this notebook, we will review Delta Lake's end-to-end capabilities in PySpark. You can also look at the original Quick Start guide if you are not familiar with [Delta Lake](https://github.com/delta-io/delta) [here](https://docs.delta.io/latest/quick-start.html). It provides code snippets that show how to read from and write to Delta Lake tables from interactive, batch, and streaming queries. In this notebook, we will cover the following: - Creating sample mock data containing customer orders - Writing this data into storage in Delta Lake table format (or in short, Delta table) - Querying the Delta table using functional and SQL - The Curious Case of Forgotten Discount - Making corrections to data - Enforcing GDPR on your data - Oops, enforced it on the wrong customer! - Looking at the audit log to find mistakes in operations - Rollback all the way! - Closing the loop - 'defrag' your data # Creating sample mock data containing customer orders For this tutorial, we will setup a sample file containing customer orders with a simple schema: (order_id, order_date, customer_name, price). ``` spark.sql("DROP TABLE IF EXISTS input"); spark.sql(""" CREATE TEMPORARY VIEW input AS SELECT 1 order_id, '2019-11-01' order_date, 'Saveen' customer_name, 100 price UNION ALL SELECT 2, '2019-11-01', 'Terry', 50 UNION ALL SELECT 3, '2019-11-01', 'Priyanka', 100 UNION ALL SELECT 4, '2019-11-02', 'Steve', 10 UNION ALL SELECT 5, '2019-11-03', 'Rahul', 10 UNION ALL SELECT 6, '2019-11-03', 'Niharika', 75 UNION ALL SELECT 7, '2019-11-03', 'Elva', 90 UNION ALL SELECT 8, '2019-11-04', 'Andrew', 70 UNION ALL SELECT 9, '2019-11-05', 'Michael', 20 UNION ALL SELECT 10, '2019-11-05', 'Brigit', 25""") orders = spark.sql("SELECT * FROM input") orders.show() orders.printSchema() ``` # Writing this data into storage in Delta Lake table format (or in short, Delta table) To create a Delta Lake table, you can write a DataFrame out in the **delta** format. You can use existing Spark SQL code and change the format from parquet, csv, json, and so on, to delta. These operations create a new Delta Lake table using the schema that was inferred from your DataFrame. If you already have existing data in Parquet format, you can do an "in-place" conversion to Delta Lake format. The code would look like following: DeltaTable.convertToDelta(spark, $"parquet.`{path_to_data}`"); //Confirm that the converted data is now in the Delta format DeltaTable.isDeltaTable(parquetPath) ``` import random session_id = random.randint(0,1000) path = "/delta/delta-table-{0}".format(session_id) path # Here's how you'd do this in Parquet: # orders.repartition(1).write().format("parquet").save(path) orders.repartition(1).write.format("delta").save(path) ``` # Querying the Delta table using functional and SQL ``` ordersDataFrame = spark.read.format("delta").load(path) ordersDataFrame.show() ordersDataFrame.createOrReplaceTempView("ordersDeltaTable") spark.sql("SELECT * FROM ordersDeltaTable").show ``` # Understanding Meta-data In Delta Lake, meta-data is no different from data i.e., it is stored next to the data. Therefore, an interesting side-effect here is that you can peek into meta-data using regular Spark APIs. ``` [log_line.value for log_line in spark.read.text(path + "/_delta_log/").collect()] ``` # The Curious Case of Forgotten Discount - Making corrections to data Now that you are able to look at the orders table, you realize that you forgot to discount the orders that came in on November 1, 2019. Worry not! You can quickly make that correction. ``` from delta.tables import * from pyspark.sql.functions import * table = DeltaTable.forPath(spark, path) # Update every transaction that took place on November 1, 2019 and apply a discount of 10% table.update( condition = expr("order_date == '2019-11-01'"), set = {"price": expr("price - price*0.1") }) table.toDF() ``` When you now inspect the meta-data, what you will notice is that the original data is over-written. Well, not in a true sense but appropriate entries are added to Delta's transaction log so it can provide an "illusion" that the original data was deleted. We can verify this by re-inspecting the meta-data. You will see several entries indicating reference removal to the original data. ``` [log_line.value for log_line in spark.read.text(path + "/_delta_log/").collect()] ``` # Enforcing GDPR on your data One of your customers wanted their data to be deleted. But wait, you are working with data stored on an immutable file system (e.g., HDFS, ADLS, WASB). How would you delete it? Using Delta Lake's Delete API. Delta Lake provides programmatic APIs to conditionally update, delete, and merge (upsert) data into tables. For more information on these operations, see [Table Deletes, Updates, and Merges](https://docs.delta.io/latest/delta-update.html). ``` # Delete the appropriate customer table.delete(condition = expr("customer_name == 'Saveen'")) table.toDF().show() ``` # Oops, enforced it on the wrong customer! - Looking at the audit/history log to find mistakes in operations Delta's most powerful feature is the ability to allow looking into history i.e., the changes that were made to the underlying Delta Table. The cell below shows how simple it is to inspect the history. ``` table.history().drop("userId", "userName", "job", "notebook", "clusterId", "isolationLevel", "isBlindAppend").show(20, 1000, False) ``` # Rollback all the way using Time Travel! You can query previous snapshots of your Delta Lake table by using a feature called Time Travel. If you want to access the data that you overwrote, you can query a snapshot of the table before you overwrote the first set of data using the versionAsOf option. Once you run the cell below, you should see the first set of data, from before you overwrote it. Time Travel is an extremely powerful feature that takes advantage of the power of the Delta Lake transaction log to access data that is no longer in the table. Removing the version 0 option (or specifying version 1) would let you see the newer data again. For more information, see [Query an older snapshot of a table (time travel)](https://docs.delta.io/latest/delta-batch.html#deltatimetravel). ``` spark.read.format("delta").option("versionAsOf", "1").load(path).write.mode("overwrite").format("delta").save(path) # Delete the correct customer - REMOVE table.delete(condition = expr("customer_name == 'Rahul'")) table.toDF().show() table.history().drop("userId", "userName", "job", "notebook", "clusterId", "isolationLevel", "isBlindAppend").show(20, 1000, False) ``` # Closing the loop - 'defrag' your data ``` spark.conf.set("spark.databricks.delta.retentionDurationCheck.enabled", "false") table.vacuum(0.01) # Alternate Syntax: spark.sql($"VACUUM delta.`{path}`").show ```
github_jupyter
<a href="https://colab.research.google.com/github/vksriharsha/Causal-Inference-on-Gene-Expression-Data/blob/main/PCA_CMI_HumanCancer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` from google.colab import drive drive.mount('/content/drive') !pip install pycm import pandas as pd from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn import preprocessing from sklearn.feature_selection import VarianceThreshold import numpy as np from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import keras from keras.callbacks import EarlyStopping, ModelCheckpoint from keras.layers.advanced_activations import LeakyReLU from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import precision_recall_curve, roc_curve, auc, average_precision_score from sklearn.metrics import confusion_matrix from sklearn.model_selection import StratifiedKFold from pycm import * from matplotlib.pyplot import figure import seaborn as sn import time import os import numpy as np import pandas as pd import argparse import matplotlib.pyplot as plt from copy import deepcopy from scipy import interpolate from sklearn.feature_selection import mutual_info_regression from scipy.stats import pearsonr import scipy.sparse import sys import pickle import re from scipy import stats from numpy import savetxt from numpy import genfromtxt import networkx as nx from scipy.stats import norm import itertools import math import copy from sklearn.metrics import roc_curve, roc_auc_score from sklearn.metrics import precision_recall_curve, roc_curve, auc, average_precision_score from sklearn.metrics import confusion_matrix from pycm import * tcga_data_df = pd.read_csv('/content/drive/MyDrive/Thesis/Human-Cancer-Prediction/TCGA_GTEX_Data_18212_7142.tsv', delimiter='\t') tcga_metadata_df = pd.read_csv('/content/drive/MyDrive/Thesis/Human-Cancer-Prediction/TCGA_GTEX_MetaData_7142_23.tsv', delimiter='\t') tcga_data_df = tcga_data_df.drop(['NCBI_description','NCBI_other_designations','NCBI_chromosome', 'NCBI_map_location', 'NCBI_OMIM', 'CGC_Tumour Types(Somatic)', 'CGC_Tumour Types(Germline)', 'CGC_Role in Cancer', 'CGC_Translocation Partner', 'CGC_Somatic', 'CGC_Germline', 'CGC_Mutation Types', 'CGC_Molecular Genetics', 'CGC_Tissue Type', 'CGC_Cancer Syndrome', 'CGC_Other Syndrome', 'OMIM_Comments', 'OMIM_Phenotypes', 'Hugo_RefSeq IDs', 'Hugo_Ensembl gene ID', 'Hugo_Enzyme IDs', 'Hugo_Pubmed IDs', 'Hugo_Locus group', 'Hugo_Gene group name'],axis=1) tcga_data_df = tcga_data_df.T tcga_data_df.columns = tcga_data_df.iloc[0] tcga_data_df = tcga_data_df.drop(tcga_data_df.index[0]) def x(a): return np.log2(a.astype('float32') + 1) tcga_data_df = tcga_data_df.apply(x, axis = 1) tcga_data_df tcga_metadata_df = tcga_metadata_df[['portions.analytes.aliquots.submitter_id', 'clinical.disease']] tcga_metadata_df['clinical.disease'] = tcga_metadata_df['clinical.disease'].fillna('normal') tcga_metadata_df = tcga_metadata_df.set_index('portions.analytes.aliquots.submitter_id') tcga_metadata_df tcga_data_df = pd.merge(tcga_data_df, tcga_metadata_df, left_index=True, right_index=True) tcga_data_df some_values = ['BRCA', 'Breast_normal'] tcga_data_breast_df = tcga_data_df.loc[tcga_data_df['clinical.disease'].isin(some_values)] tcga_data_breast_df tcga_data_breast_df = tcga_data_breast_df[['CD300LG','COL10A1','CA4','ADH1B','SCARA5','AQP7','FABP4','RBP4','MMP13','CIDEC', 'clinical.disease']] tcga_data_breast_df tcga_data_brca_df = tcga_data_breast_df.loc[tcga_data_breast_df['clinical.disease'] == 'BRCA'] tcga_data_brca_df = tcga_data_brca_df[['CD300LG','COL10A1','CA4','ADH1B','SCARA5','AQP7','FABP4','RBP4','MMP13','CIDEC']] tcga_data_breastnormal_df = tcga_data_breast_df.loc[tcga_data_breast_df['clinical.disease'] == 'Breast_normal'] tcga_data_breastnormal_df = tcga_data_breastnormal_df[['CD300LG','COL10A1','CA4','ADH1B','SCARA5','AQP7','FABP4','RBP4','MMP13','CIDEC']] tcga_data_breastnormal_df def conditional_mutual_info(X,Y,Z=np.array(1)): if X.ndim == 1: X = np.reshape(X, (-1, 1)) if Y.ndim == 1: Y = np.reshape(Y, (-1, 1)) if Z.ndim == 0: c1 = np.cov(X) if c1.ndim != 0: d1 = np.linalg.det(c1) else: d1 = c1.item() c2 = np.cov(Y) if c2.ndim != 0: d2 = np.linalg.det(c2) else: d2 = c2.item() c3 = np.cov(X,Y) if c3.ndim != 0: d3 = np.linalg.det(c3) else: d3 = c3.item() cmi = (1/2)*np.log((d1*d2)/d3) else: if Z.ndim == 1: Z = np.reshape(Z, (-1, 1)) c1 = np.cov(np.concatenate((X, Z), axis=0)) if c1.ndim != 0: d1 = np.linalg.det(c1) else: d1 = c1.item() c2 = np.cov(np.concatenate((Y, Z), axis=0)) if c2.ndim != 0: d2 = np.linalg.det(c2) else: d2 = c2.item() c3 = np.cov(Z) if c3.ndim != 0: d3 = np.linalg.det(c3) else: d3 = c3.item() c4 = np.cov(np.concatenate((X, Y, Z), axis=0)) if c4.ndim != 0: d4 = np.linalg.det(c4) else: d4 = c4.item() cmi = (1/2)*np.log((d1*d2)/(d3*d4)) if math.isinf(cmi): cmi = 0 return cmi def pca_cmi(data, theta, max_order,filename): genes = list(data.columns) predicted_graph = nx.complete_graph(genes) num_edges = predicted_graph.number_of_edges() L = -1 nochange = False while L < max_order and nochange == False: L = L+1 predicted_graph, nochange = remove_edges(predicted_graph, data, L, theta) print() print() print("Final Prediction:") print("-----------------") print("Order : {}".format(L)) print("Number of edges in the predicted graph : {}".format(predicted_graph.number_of_edges())) f = plt.figure() nx.draw(predicted_graph, with_labels=True, font_weight='bold') plt.savefig('/content/drive/MyDrive/COM S 673/DREAM3 in silico challenge/Results_HumanCancer/Undirected_'+filename+'_'+str(theta)+'.png') plt.show() print() return predicted_graph def remove_edges(predicted_graph, data, L, theta): initial_num_edges = predicted_graph.number_of_edges() edges = predicted_graph.edges() for edge in edges: neighbors = nx.common_neighbors(predicted_graph, edge[0], edge[1]) nhbrs = copy.deepcopy(sorted(neighbors)) T = len(nhbrs) if T < L and L != 0: continue else: x = data[edge[0]].to_numpy() if x.ndim == 1: x = np.reshape(x, (-1, 1)) y = data[edge[1]].to_numpy() if y.ndim == 1: y = np.reshape(y, (-1, 1)) K = list(itertools.combinations(nhbrs, L)) if L == 0: cmiVal = conditional_mutual_info(x.T, y.T) if cmiVal < theta: predicted_graph.remove_edge(edge[0], edge[1]) else: maxCmiVal = 0 for zgroup in K: z = data[list(zgroup)].to_numpy() if z.ndim == 1: z = np.reshape(z, (-1, 1)) cmiVal = conditional_mutual_info(x.T, y.T, z.T) if cmiVal > maxCmiVal: maxCmiVal = cmiVal if maxCmiVal < theta: predicted_graph.remove_edge(edge[0], edge[1]) final_num_edges = predicted_graph.number_of_edges() if final_num_edges < initial_num_edges: return predicted_graph, False return predicted_graph, True def get_chains(graph): adj_list = nx.generate_adjlist(graph, delimiter=" ") mapping = {} for idx,line in enumerate(adj_list): line = line.split(" ") mapping[line[0]] = set(line[1:]) for element in mapping: for adjacent_element in mapping[element]: mapping[adjacent_element].add(element) triples = [] for element in mapping: for adjacent_element in mapping[element]: for adj_adj_element in mapping[adjacent_element]: if adj_adj_element != element: triple = [element, adjacent_element, adj_adj_element] triples.append(triple) return triples def forms_v_shape(adjMatrix, point1, point2): length = adjMatrix.shape[0] for i in range(0,length): if adjMatrix[i][point2] == 1 and adjMatrix[point2][i] == 0 and i != point1: return True return False def forms_cycle(adjMatrix, point1, point2): len = adjMatrix.shape[0] for i in range(0,len): for j in range(0,len): if adjMatrix[i][j] == 1 and adjMatrix[j][i] == 1: adjMatrix[i][j] = 0 adjMatrix[j][i] = 0 adjMatrix[point1][point2] = 1 adjMatrix[point2][point1] = 0 G = nx.from_numpy_matrix(adjMatrix,create_using=nx.DiGraph) return not(nx.is_directed_acyclic_graph(G)) def align_edges(graph, data, theta): num_nodes = graph.number_of_nodes() directed_graph = nx.to_numpy_array(graph) #Step 1: Align the v-structure mapping = {} for i in range(0,num_nodes): mapping[i] = 'G'+str(i+1) non_edge_pairs = list(nx.non_edges(graph)) for non_edge in non_edge_pairs: common_neighbors = sorted(nx.common_neighbors(graph, non_edge[0], non_edge[1])) x = data[non_edge[0]].to_numpy() if x.ndim == 1: x = np.reshape(x, (-1, 1)) y = data[non_edge[1]].to_numpy() if y.ndim == 1: y = np.reshape(y, (-1, 1)) for neighbor in common_neighbors: z = data[neighbor].to_numpy() if z.ndim == 1: z = np.reshape(z, (-1, 1)) cmiVal = conditional_mutual_info(x.T, y.T, z.T) xind = data.columns.get_loc(non_edge[0]) yind = data.columns.get_loc(non_edge[1]) zind = data.columns.get_loc(neighbor) if directed_graph[xind][zind] == 1 and directed_graph[zind][xind] == 1 and directed_graph[yind][zind] == 1 and directed_graph[zind][yind] == 1: if not cmiVal < theta: directed_graph[xind][zind] = 1 directed_graph[zind][xind] = 0 directed_graph[yind][zind] = 1 directed_graph[zind][yind] = 0 # Step 2: Use Rule 1 of edge alignments to orient edges a -> b - c to a -> b -> c if adding the edge does not form a cycle or v-structure triples = get_chains(graph) for triple in triples: xind = data.columns.get_loc(triple[0]) yind = data.columns.get_loc(triple[1]) zind = data.columns.get_loc(triple[2]) if directed_graph[xind][zind] == 0 and directed_graph[zind][xind] == 0 : frozen_graph = np.copy(directed_graph) forms_v = forms_v_shape(frozen_graph, yind, zind) forms_cyc = forms_cycle(frozen_graph, yind, zind) if not ( forms_v or forms_cyc ): if directed_graph[xind][yind] == 1 and directed_graph[yind][xind] == 0 and directed_graph[yind][zind] == 1 and directed_graph[zind][yind] == 1: directed_graph[yind][zind] = 1 directed_graph[zind][yind] = 0 # Step 3: Use Rule 2 of edge alignments to orient edges that form a cycle if oriented the other way. frozen_graph = np.copy(directed_graph) for i in range(0,num_nodes): for j in range(0,num_nodes): if frozen_graph[i][j] == 1 and frozen_graph[j][i] == 1: if forms_cycle(frozen_graph, i, j) and not(forms_cycle(frozen_graph, j, i)): directed_graph[j][i] = 1 directed_graph[i][j] = 0 G = nx.from_numpy_matrix(directed_graph,create_using=nx.DiGraph) G = nx.relabel_nodes(G, mapping) return G predicted_graph_brca = pca_cmi(tcga_data_brca_df, 0.05, 20, "HumanCancer_BRCA") predicted_graph_breastnormal = pca_cmi(tcga_data_breastnormal_df, 0.05, 20, "HumanCancer_BreastNormal") ```
github_jupyter
``` # # https://github.com/crflynn/fbm - Christopher Flynn # """Generate realizations of fractional Brownian motion.""" # import warnings # import numpy as np # class FBM(object): # """The FBM class. # After instantiating with n = number of increments, hurst parameter, length # of realization (default = 1) and method of generation # (default daviesharte), call fbm() for fBm, fgn() # for fGn, or times() to get corresponding time values. # """ # def __init__(self, n, hurst, length=1, method="daviesharte"): # """Instantiate the FBM.""" # self._methods = {"daviesharte": self._daviesharte, "cholesky": self._cholesky, "hosking": self._hosking} # self.n = n # self.hurst = hurst # self.length = length # self.method = method # self._fgn = self._methods[self.method] # # Some reusable values to speed up Monte Carlo. # self._cov = None # self._eigenvals = None # self._C = None # # Flag if some params get changed # self._changed = False # def __str__(self): # """Str method.""" # return ( # "fBm (" # + str(self.method) # + ") on [0, " # + str(self.length) # + "] with Hurst value " # + str(self.hurst) # + " and " # + str(self.n) # + " increments" # ) # def __repr__(self): # """Repr method.""" # return ( # "FBM(n=" # + str(self.n) # + ", hurst=" # + str(self.hurst) # + ", length=" # + str(self.length) # + ', method="' # + str(self.method) # + '")' # ) # @property # def n(self): # """Get the number of increments.""" # return self._n # @n.setter # def n(self, value): # if not isinstance(value, int) or value <= 0: # raise TypeError("Number of increments must be a positive int.") # self._n = value # self._changed = True # @property # def hurst(self): # """Hurst parameter.""" # return self._hurst # @hurst.setter # def hurst(self, value): # if not isinstance(value, float) or value <= 0 or value >= 1: # raise ValueError("Hurst parameter must be in interval (0, 1).") # self._hurst = value # self._changed = True # @property # def length(self): # """Get the length of process.""" # return self._length # @length.setter # def length(self, value): # if not isinstance(value, (int, float)) or value <= 0: # raise ValueError("Length of fbm must be greater than 0.") # self._length = value # self._changed = True # @property # def method(self): # """Get the algorithm used to generate.""" # return self._method # @method.setter # def method(self, value): # if value not in self._methods: # raise ValueError("Method must be 'daviesharte', 'hosking' or 'cholesky'.") # self._method = value # self._fgn = self._methods[self.method] # self._changed = True # def fbm(self): # """Sample the fractional Brownian motion.""" # return np.insert(self.fgn().cumsum(), [0], 0) # def fgn(self): # """Sample the fractional Gaussian noise.""" # scale = (1.0 * self.length / self.n) ** self.hurst # gn = np.random.normal(0.0, 1.0, self.n) # # If hurst == 1/2 then just return Gaussian noise # if self.hurst == 0.5: # return gn * scale # else: # fgn = self._fgn(gn) # # Scale to interval [0, L] # return fgn * scale # def times(self): # """Get times associated with the fbm/fgn samples.""" # return np.linspace(0, self.length, self.n + 1) # def _autocovariance(self, k): # """Autocovariance for fgn.""" # return 0.5 * (abs(k - 1) ** (2 * self.hurst) - 2 * abs(k) ** (2 * self.hurst) + abs(k + 1) ** (2 * self.hurst)) # def _daviesharte(self, gn): # """Generate a fgn realization using Davies-Harte method. # Uses Davies and Harte method (exact method) from: # Davies, Robert B., and D. S. Harte. "Tests for Hurst effect." # Biometrika 74, no. 1 (1987): 95-101. # Can fail if n is small and hurst close to 1. Falls back to Hosking # method in that case. See: # Wood, Andrew TA, and Grace Chan. "Simulation of stationary Gaussian # processes in [0, 1] d." Journal of computational and graphical # statistics 3, no. 4 (1994): 409-432. # """ # # Monte carlo consideration # if self._eigenvals is None or self._changed: # # Generate the first row of the circulant matrix # row_component = [self._autocovariance(i) for i in range(1, self.n)] # reverse_component = list(reversed(row_component)) # row = [self._autocovariance(0)] + row_component + [0] + reverse_component # # Get the eigenvalues of the circulant matrix # # Discard the imaginary part (should all be zero in theory so # # imaginary part will be very small) # self._eigenvals = np.fft.fft(row).real # self._changed = False # # If any of the eigenvalues are negative, then the circulant matrix # # is not positive definite, meaning we cannot use this method. This # # occurs for situations where n is low and H is close to 1. # # Fall back to using the Hosking method. See the following for a more # # detailed explanation: # # # # Wood, Andrew TA, and Grace Chan. "Simulation of stationary Gaussian # # processes in [0, 1] d." Journal of computational and graphical # # statistics 3, no. 4 (1994): 409-432. # if np.any([ev < 0 for ev in self._eigenvals]): # warnings.warn( # "Combination of increments n and Hurst value H " # "invalid for Davies-Harte method. Reverting to Hosking method." # " Occurs when n is small and Hurst is close to 1. " # ) # # Set method to hosking for future samples. # self.method = "hosking" # # Don"t need to store eigenvals anymore. # self._eigenvals = None # return self._hosking(gn) # # Generate second sequence of i.i.d. standard normals # gn2 = np.random.normal(0.0, 1.0, self.n) # # Resulting sequence from matrix multiplication of positive definite # # sqrt(C) matrix with fgn sample can be simulated in this way. # w = np.zeros(2 * self.n, dtype=complex) # for i in range(2 * self.n): # if i == 0: # w[i] = np.sqrt(self._eigenvals[i] / (2 * self.n)) * gn[i] # elif i < self.n: # w[i] = np.sqrt(self._eigenvals[i] / (4 * self.n)) * (gn[i] + 1j * gn2[i]) # elif i == self.n: # w[i] = np.sqrt(self._eigenvals[i] / (2 * self.n)) * gn2[0] # else: # w[i] = np.sqrt(self._eigenvals[i] / (4 * self.n)) * (gn[2 * self.n - i] - 1j * gn2[2 * self.n - i]) # # Resulting z is fft of sequence w. Discard small imaginary part (z # # should be real in theory). # z = np.fft.fft(w) # fgn = z[: self.n].real # return fgn # def _cholesky(self, gn): # """Generate a fgn realization using the Cholesky method. # Uses Cholesky decomposition method (exact method) from: # Asmussen, S. (1998). Stochastic simulation with a view towards # stochastic processes. University of Aarhus. Centre for Mathematical # Physics and Stochastics (MaPhySto)[MPS]. # """ # # Monte carlo consideration # if self._C is None or self._changed: # # Generate covariance matrix # G = np.zeros([self.n, self.n]) # for i in range(self.n): # for j in range(i + 1): # G[i, j] = self._autocovariance(i - j) # # Cholesky decomposition # self._C = np.linalg.cholesky(G) # self._changed = False # # Generate fgn # fgn = np.dot(self._C, np.array(gn).transpose()) # fgn = np.squeeze(fgn) # return fgn # def _hosking(self, gn): # """Generate a fGn realization using Hosking's method. # Method of generation is Hosking's method (exact method) from his paper: # Hosking, J. R. (1984). Modeling persistence in hydrological time series # using fractional differencing. Water resources research, 20(12), # 1898-1908. # """ # fgn = np.zeros(self.n) # phi = np.zeros(self.n) # psi = np.zeros(self.n) # # Monte carlo consideration # if self._cov is None or self._changed: # self._cov = np.array([self._autocovariance(i) for i in range(self.n)]) # self._changed = False # # First increment from stationary distribution # fgn[0] = gn[0] # v = 1 # phi[0] = 0 # # Generate fgn realization with n increments of size 1 # for i in range(1, self.n): # phi[i - 1] = self._cov[i] # for j in range(i - 1): # psi[j] = phi[j] # phi[i - 1] -= psi[j] * self._cov[i - j - 1] # phi[i - 1] /= v # for j in range(i - 1): # phi[j] = psi[j] - phi[i - 1] * psi[i - j - 2] # v *= 1 - phi[i - 1] * phi[i - 1] # for j in range(i): # fgn[i] += phi[j] * fgn[i - j - 1] # fgn[i] += np.sqrt(v) * gn[i] # return fgn # def fbm(n, hurst, length=1, method="daviesharte"): # """One off sample of fBm.""" # f = FBM(n, hurst, length, method) # return f.fbm() # def fgn(n, hurst, length=1, method="daviesharte"): # """One off sample of fGn.""" # f = FBM(n, hurst, length, method) # return f.fgn() # def times(n, length=1): # """Generate the times associated with increments n and length.""" # return np.linspace(0, length, n + 1) # # General use # # Estimate std scaling factor for h=[0.1,0.8] , n=1024 # import numpy as np # import matplotlib.pyplot as plt # import scipy.stats as stats # data_std=[] # h_arr= np.arange(0.1,0.8,0.05) # for h in h_arr: # # generate some random data # data=[] # print('h =',h) # for j in range(1000): # f = FBM(1024, h) # fbm_sample = f.fbm() # data.append(fbm_sample[4]-fbm_sample[3]) # # calc mean # meanval = np.mean(data) # print('mean =',meanval) # # calc standard deviation # data_cen = data - np.mean(data) # # or use numpy function # stdval = np.std(data,ddof=1) # note the second input to provide an unbiased estimate # print('std =',stdval) # # calc variance # varval1 = stdval**2 # # or use numpy function # varval2 = np.var(data,ddof=1) # print('varval1 =',varval1) # print('varval2 =',varval2) # data_std.append(stdval) # print('scale correction array h=',h_arr,'=> std_arr =',data_std) # plt.plot(data_std) # plt.plot(0.5**(h_arr*10)) ``` # fBm - 1D ``` # import matplotlib.pyplot as plt # x=[] # y=[] # for i in range(3): # h=0.25*(i+1) # f = FBM(n=1024, hurst=h, length=1, method='daviesharte') # x.append(f.times()) # y.append(f.fbm()) # plt.subplot(3, 1, 1) # plt.plot(x[0], y[0], '-') # plt.title('1D fBm - H=0.25 ; H=0.5 ; H=0.75') # plt.ylabel('H=0.25') # plt.subplot(3, 1, 2) # plt.plot(x[1], y[1], '-') # plt.xlabel('time (s)') # plt.ylabel('H=0.5') # plt.subplot(3, 1, 3) # plt.plot(x[2], y[2], '-') # plt.xlabel('time (s)') # plt.ylabel('H=0.75') # plt.tight_layout() # plt.show() # # fBm simulation below is based on the davies-harte method # # Davies, Robert B., and D. S. Harte. "Tests for Hurst effect." Biometrika 74, no. 1 (1987): 95-101 # import warnings # import numpy as np # from fbm import FBM # from fbm import fbm # from fbm import times # import matplotlib.pyplot as plt # alpha_arr=np.array([1.67,0.8]) # h_arr=0.5*alpha_arr # x=[] # y=[] # for h in h_arr: # f = FBM(n=1024, hurst=h, length=1, method='daviesharte') # scale=0.5**(h*10) # x.append(f.fbm()/scale) # y.append(f.fbm()/scale) # fig_min,fig_max=-0.5,0.5 # plt.figure(figsize=(5,10)) # plt.subplot(2, 1, 1) # plt.plot(x[0], y[0], '-') # plt.title('2D fBm') # plt.ylabel('alpha=1.67 ; H=0.835') # # plt.xlim(fig_min,fig_max) # # plt.ylim(fig_min,fig_max) # plt.subplot(2, 1, 2) # plt.plot(x[1], y[1], '-') # plt.ylabel('alpha=0.8 ; H=0.4') # # plt.xlim(fig_min,fig_max) # # plt.ylim(fig_min,fig_max) # plt.tight_layout() # plt.show() ``` # MBM - Multifractional Brownian Motion # 1D MBM ## alpha persistent = 1.67 , alpha antipersistent = 0.8 ### A self-avoiding walk with neural delays as a model of fxational eye movements Carl J. J. Herrmann, Ralf Metzler & Ralf Engbert ``` # import inspect # from math import gamma # import numpy as np # from mbm import MBM # alpha_arr=np.array([1.67,0.8]) # trans_time=0.02 # the transition time from persistent to antipersistent motion # h_arr=alpha_arr/2 # # Hurst function with respect to time. # def h(t): # return h_arr[0]+(h_arr[1]-h_arr[0])*(np.tanh(t-trans_time)+1)/2 # m = MBM(n=1024, hurst=h, length=1, method='riemannliouville') # t_values = m.times() # x=m.mbm() # plt.plot(t_values,x) # plt.show() ``` # 2D MBM ## According to Engbert 2017, Nature: ## alpha persistent = 1.67 , alpha antipersistent = 0.8 ## h = 0.835 , h = 0.4 ``` # import inspect # from math import gamma # import numpy as np # from mbm import MBM # alpha_arr=np.array([1.67,0.8]) # trans_time=0.02 # the transition time from persistent to antipersistent motion # h_arr=alpha_arr/2 # # Hurst function with respect to time. # def h(t): # return h_arr[0]+(h_arr[1]-h_arr[0])*(np.tanh(t-trans_time)+1)/2 # m = MBM(n=1024, hurst=h, length=1, method='riemannliouville') # # Get the times associated with the mBm # t_values = m.times() # x=[] # y=[] # for i in range(3): # x.append(m.mbm()) # y.append(m.mbm()) # fig_min,fig_max=-0.5,0.5 # plt.figure(figsize=(5,10)) # plt.subplot(3, 1, 1) # plt.plot(x[0], y[0], '-') # plt.title('Traj 1') # plt.ylabel('') # plt.subplot(3, 1, 2) # plt.plot(x[1], y[1], '-') # plt.title('Traj 2') # plt.ylabel('') # plt.subplot(3, 1, 3) # plt.plot(x[2], y[2], '-') # plt.title('Traj 3') # plt.ylabel('') # plt.tight_layout() # plt.show() # # test time # import time # # fBm simulation below is based on the davies-harte method # # Davies, Robert B., and D. S. Harte. "Tests for Hurst effect." Biometrika 74, no. 1 (1987): 95-101 # import warnings # import numpy as np # from fbm import FBM # from fbm import fbm # from fbm import times # import matplotlib.pyplot as plt # # Function output - x,y coordinates [float] arrays of size n+1 # # h = alpha/2 ; Engbert 2017 alpha values were: alpha=1.67 for 20 ms persistent traj and alpha=0.8 for 100-400ms anti-persistent trajectory # def ocdr_fbm(bm_steps=1024,h=0.5,ocdr_period_sec=0.5,n_samples=10,fov_len_pix=8,scale_sample_step=1): # f = FBM(n=bm_steps, hurst=h, length=ocdr_period_sec, method='daviesharte') # scale_std = (ocdr_period_sec/bm_steps)**h # scale_bm_step = n_samples/bm_steps # x,y = scale_sample_step*scale_bm_step*f.fbm()/scale_std, scale_sample_step*scale_bm_step*f.fbm()/scale_std # scale to normal gausian distribution of simulation step size # max_pos_dis_arr = np.ones(x.shape[0])*fov_len_pix/2 # max_neg_dis_arr = -np.ones(x.shape[0])*fov_len_pix/2 # x = np.minimum(x,max_pos_dis_arr) # x = np.maximum(x,max_neg_dis_arr) # y = np.minimum(y,max_pos_dis_arr) # y = np.maximum(y,max_neg_dis_arr) # sample_ind=np.arange(0,bm_steps,bm_steps//n_samples) # # return x,y # return the full trajectory n+1 points # return x[sample_ind],y[sample_ind] # returns sampled trajectory array of size (n_samples+1) # # User input: # h = 0.4 # set 0.1<h<0.9 ; brownian motion: h=0.5 ; persistent motion: h>0.5 ; anti-persistent motion: h<0.5 # bm_steps = 1024 # number of small brownian motion steps # n_samples = 10 # number of samples from the brownian motion trajectory # ocdr_period_sec=0.5 # ocular drift period [sec] # fov_len_pix = 8 # fov_len_pix value corresponds with foveal pixl length and sets the motion max displacment to be +-(fov_len_pix/2) # scale_sample_step = 1 # scales the brownian motion step size std # print(t_end-t_start) # data=[] # for j in range(1000): # t_start=time.time() # # Generate 2D fractional brownian motion trajectory # x , y = ocdr_fbm(bm_steps,h,ocdr_period_sec,n_samples,max_dis_pix,scale_sample_step) # t_end=time.time() # data.append((t_end-t_start)*1000) # # calc mean # meanval = np.mean(data) # print('mean =',meanval) # # calc standard deviation # data_cen = data - np.mean(data) # # or use numpy function # stdval = np.std(data,ddof=1) # note the second input to provide an unbiased estimate # print('std =',stdval) # # calc variance # varval1 = stdval**2 # # or use numpy function # varval2 = np.var(data,ddof=1) # print('varval1 =',varval1) # print('varval2 =',varval2) # plt.plot(x,y) # plt.show() ``` # ICLR Ocular drift simulation with fBm # Load the FBM class - by Christopher Flynn https://github.com/crflynn/fbm MIT License Copyright (c) 2017-2018 Christopher Flynn Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # fBm - 2D # Sasha please use h = 0.4 for the 0.5 sec ocular drift period ``` # Sasha - This the function that you use to produce random fractional brownian motion trajectories # fBm simulation below is based on the davies-harte method # Davies, Robert B., and D. S. Harte. "Tests for Hurst effect." Biometrika 74, no. 1 (1987): 95-101 import warnings import numpy as np # from fbm import FBM # from fbm import fbm # from fbm import times import matplotlib.pyplot as plt import warnings import sys import os sys.path.insert(1, os.getcwd()+'/fbm_mbm_lic') import numpy as np from fbm import FBM from fbm import fbm from fbm import times import matplotlib.pyplot as plt # Function output - x,y coordinates [float] arrays of size n+1 # h = alpha/2 ; Engbert 2017 alpha values were: alpha=1.67 for 20 ms persistent traj and alpha=0.8 for 100-400ms anti-persistent trajectory def ocdr_fbm(bm_steps=1024,h=0.5,ocdr_period_sec=0.5,n_samples=10,fov_len_pix=8,scale_sample_step=1): f = FBM(n=bm_steps, hurst=h, length=ocdr_period_sec, method='daviesharte') scale_std = (ocdr_period_sec/bm_steps)**h scale_bm_step = n_samples/bm_steps x,y = scale_sample_step*scale_bm_step*f.fbm()/scale_std, scale_sample_step*scale_bm_step*f.fbm()/scale_std # scale to normal gausian distribution of simulation step size # max_pos_dis_arr = np.ones(x.shape[0])*fov_len_pix/2 # max_neg_dis_arr = -np.ones(x.shape[0])*fov_len_pix/2 # x = np.minimum(x,max_pos_dis_arr) # x = np.maximum(x,max_neg_dis_arr) # y = np.minimum(y,max_pos_dis_arr) # y = np.maximum(y,max_neg_dis_arr) sample_ind=np.arange(0,bm_steps,bm_steps//n_samples) # return x,y # return the full trajectory n+1 points return x[sample_ind],y[sample_ind] # returns sampled trajectory array of size (n_samples+1) for h in [0.25, 0.5,0.75,0.98]: # User input: # h = 0.4 # set 0.1<h<0.9 ; brownian motion: h=0.5 ; persistent motion: h>0.5 ; anti-persistent motion: h<0.5 bm_steps = 64 # number of small brownian motion steps n_samples = 10 # number of samples from the brownian motion trajectory ocdr_period_sec=0.5 # ocular drift period [sec] fov_len_pix = 8 # fov_len_pix value corresponds with foveal pixl length and sets the motion max displacment to be +-(fov_len_pix/2) scale_sample_step = 1 # scales the brownian motion step size std # Generate 2D fractional brownian motion trajectory x , y = ocdr_fbm(bm_steps,h,ocdr_period_sec,n_samples,fov_len_pix,scale_sample_step) plt.figure() plt.plot(x,y,'-o') # uncomment to plot the trjectory plt.title(str(h)) plt.show() %timeit x , y = ocdr_fbm(bm_steps,h,ocdr_period_sec,n_samples,fov_len_pix,scale_sample_step) 30e-3*5e4 7e-3*5e4 %timeit x , y = ocdr_fbm(bm_steps,h,ocdr_period_sec,5,fov_len_pix,scale_sample_step) %timeit nana=np.sin(38) ```
github_jupyter
# Resumo, Teoria e Prática - Power Iteration Method > Gil Miranda<br> > Fontes bibliográficas: * Bernardo Costa. (2018). <i>Notebooks de aula</i>. * Trefethen, L. & Bau, D. (1997) <i>Numerical Linear Algebra</i>. SIAM --- ``` import numpy as np import matplotlib.pyplot as plt ``` ## Calculando autovetores > Um dos algoritmos mais clássicos de cálculo de autovetores é "multiplicar e normalizar": tome um vetor "qualquer" $u_0$ e aplique a matriz $A$, obtendo $w_1 = Au_0$. Normalize $w_1$, ou seja, divida pela sua norma para obter um vetor unitário de mesma direção, e chame-o de $u_1$. Repita: $w_2 = Au_1$, e $u_2 = \frac{w_2}{N(w_2)}$. E assim por diante. Em geral (isso depende de $u_0$), a sequência dos $u_n$ converge para um autovetor $u$ correspondente ao autovalor de $A$ de maior módulo. Semana14-Parte1-Autovetores e autovalores Completo.ipynb - [Bernardo Freitas Paulo da Costa](http://www.im.ufrj.br/bernardofpc) Esse metódo é conhecido como Power Iteration Method, ou Metódo das Potências. O metódo converge sob certas condições, como a matriz $\boldsymbol{A}$ ser simétrica e possuir um maior autovalor em módulo.<br> Dada estas condições, o metódo consegue fornecer uma boa aproximação do 'maior' autovetor e seu autovalor. ## Preparativos $\renewcommand{\blacksquare}{\texttt{Q.E.D.}}$ ###### Definição: Autovalor e autovetor dominante Seja $\{\lambda_i\}_{i \in \{1,\dots,n\}}$ autovalores de uma matriz $\boldsymbol{A} \, \scriptsize{n \times n}$.<br> $\lambda_k \in \{\lambda_i\}_{i \in \{1,\dots,n\}}$ é dito autovalor dominante de $\boldsymbol{A}$ se: $$ |\lambda_k| > |\lambda_i|, \forall i \in \{1,\dots,n\}\setminus \{k\} $$ O autovetor $\boldsymbol{v_k}$ correspondente ao autovalor $\lambda_k$ dominante é dito autovetor dominante. ###### Produto de vetor linha e sua transposta Tomando um vetor $\boldsymbol{v} \in \mathbb{R}^n$, vamos utilizar o seguinte fato $$ \boldsymbol{v} \cdot \boldsymbol{v^T} = ||\boldsymbol{v}||^2 $$ Onde $\odot$ denota a operação de produto de matrizes, para facilitar a notação escreveremos apenas $\boldsymbol{vv^T}$ --- Proof: $\boldsymbol{v}$ é uma matriz $\scriptsize{1 \times n}$, $\boldsymbol{v^T}$ é então uma matriz $\scriptsize{n \times 1}$, logo o produto será uma matriz $\scriptsize{1\times1}$, que pode ser visto como um escalar.<br> Como estamos lidando com uma matriz e sua transposta, teremos a seguinte relação: seja $a_{i,j}$ o elemento associado a linha $i$ e coluna $j$ de $\boldsymbol{v}$, então este mesmo elemento estará em $b_{j,i}$, elemento de $\boldsymbol{v^T}$.<br> Seja $u_{1,1}$ o elemento único da matriz produto $\boldsymbol{U}$ e utilizando o fato que $a_{i,j} = b_{j,i}$, temos: $$ \boldsymbol{U} = \boldsymbol{vv^T}\\ u_{1,1} = a_{1,1} \cdot b_{1,1} + a_{1,2} \cdot b_{2,1} + \dots + a_{1,n} \cdot b_{n,1}\\ u_{1,1} = \sum_{j = 1}^{n} a_{1,j}^2 \\ \therefore u_{1,1} = ||\boldsymbol{v}||^2\\ $$ <div style="text-align: right">$\blacksquare$</div> --- #### Calculando o autovalor - O quociente de Rayleigh Seja $\boldsymbol{v}$ um autovetor associado a uma matriz $\boldsymbol{A}$ de transformação linear, vamos olhar para seu autovalor $$ \boldsymbol{Av} = \lambda \boldsymbol{v}\\ \boldsymbol{v^T A v} = \boldsymbol{v^T} \lambda \boldsymbol{v} \\ \boldsymbol{v^T A v} = \lambda (\boldsymbol{v^T v}) \\ \boldsymbol{A v} = \lambda ||\boldsymbol{v}||^2 \\ \therefore \lambda = \frac{\boldsymbol{v^T A v}}{||\boldsymbol{v}||^2} $$ Esta aproximação, chamada de quociente de Rayleigh provem de um teorema creditado a John William Rayleigh (1842-1919). Esta fórmula será útil para estimarmos o autovalor e criar um critério de parada ``` ## Implementação do algoritmo acima para estimar um valor aproximado para o autovalor associado ## ao autovetor v da matriz A def evalue_estimate(A,v): norm2 = np.linalg.norm(v)**2 # Norma ao quadrado do vetor v r = v.T@A@v # v^T A v return (r / norm2) ``` ###### Um critério de parada Tendo em mãos um candidato a autovetor $\boldsymbol{v}$ da matriz $\boldsymbol{A}$ como podemos verificar se este é de fato um autovetor?<br> Bom, se for um autovetor, então a seguinte relação é verdadeira: $$ \boldsymbol{Av} = \lambda \boldsymbol{v} \\ \boldsymbol{Av} - \lambda \boldsymbol{v} = 0 \\ $$ Assumindo que sabemos quem é a matriz, e tendo o candidato a autovetor, podemos calcular seu autovalor pelo quociente de Rayleigh, caso seja realmente um autovetor então a segunda relação descrita nos fornece um bom critério de parada, como estamos lidando com metódos numéricos, assumimos uma tolerância a erro $\epsilon$ $$ ||\boldsymbol{Av} - \lambda \boldsymbol{v}|| < \epsilon \\ $$ Temos agora um critério de parada para o algoritmo a ser construído ## O Algoritmo Seja $\boldsymbol{A} \in \mathbb{R^{n\times n}}$ uma matriz que admita uma base de autovetores, vamos tomar uma matriz simétrica e que possua autovetor e autovalor dominantes, então é possível construir uma sequência que converge para o autovetor e autovalor dominante da transformação linear<br> <br>Seja $\boldsymbol{u_0} \in \mathbb{R}$ um vetor qualquer, tomamos $\boldsymbol{x_0} = \frac{ \boldsymbol{u_0}}{||\boldsymbol{u_0}||}$, o próximo termo da sequência é dado aplicando a transformação em $\boldsymbol{x_0}$ e normalizando $$ \boldsymbol{x_1} = \boldsymbol{A}\frac{\boldsymbol{x_0}}{||\boldsymbol{x_0}||}\\ \vdots\\ \boldsymbol{x_{k+1}} = \boldsymbol{A}\frac{\boldsymbol{x_k}}{||\boldsymbol{x_k}||} = \boldsymbol{A^k}\frac{\boldsymbol{x_0}}{||\boldsymbol{x_0}||}\\ k \to \infty \implies \boldsymbol{x_{k+1}} \to \boldsymbol{v} $$ Onde $\boldsymbol{v}$ é o autovetor dominante --- Proof: Como a matriz admite base de autovetores, tomamos então esta base $\{\boldsymbol{v_1},\dots , \boldsymbol{v_n}\}$<br> Dado um vetor $\boldsymbol{x_0}$, podemos escrever como combinação linear dos vetores da base $$ \boldsymbol{x_k} = c_1 \boldsymbol{v_1} + c_2 \boldsymbol{v_2} + c_3 \boldsymbol{v_3} + \dots + c_n \boldsymbol{v_n} $$ Aplicando a transformação linear $k$ vezes e utilizando o fato que $\boldsymbol{v_i}$ é autovetor $$ \boldsymbol{A^k} \boldsymbol{x_k} = \lambda_1^k c_1 \boldsymbol{v_1} + \lambda_2^k c_2 \boldsymbol{v_2} + \lambda_3^k c_3 \boldsymbol{v_3} + \dots + \lambda_n^k c_n \boldsymbol{v_n} $$ Como a soma de reais é comutativa, podemos reorganizar os termos de modo que $\lambda_1$ seja o autovalor dominante, e fatorando este autovalor teremos $$ \boldsymbol{A^k} \boldsymbol{x_k} = \lambda_1^k \left(c_1 \boldsymbol{v_1} + \frac{\lambda_2^k}{\lambda_1^k} c_2 \boldsymbol{v_2} + \frac{\lambda_3^k}{\lambda_3^k} c_3 \boldsymbol{v_3} + \dots + \frac{\lambda_n^k}{\lambda_1^k} c_n \boldsymbol{v_n}\right) $$ Como $\lambda_1$ é autovalor dominante, temos: $$ lim_{k\to \infty} \left|\frac{\lambda_i^k}{\lambda_1^k}\right| = 0 $$ Portanto $$ \boldsymbol{A^k} \boldsymbol{x_k} = \lambda_1^k \left(c_1 \boldsymbol{v_1} + 0 c_2 \boldsymbol{v_2} + 0 c_3 \boldsymbol{v_3} + \dots + 0 c_n \boldsymbol{v_n}\right) \\ \boldsymbol{A^k} \boldsymbol{x_k} = \lambda_1^k c_1 \boldsymbol{v_1} $$ E $$ \boldsymbol{x_{k+1}} = \boldsymbol{A} \frac{\boldsymbol{x_k}}{||\boldsymbol{x_k}||} = \lambda \frac{\boldsymbol{x_k}}{||\boldsymbol{x_k}||} $$ $ \therefore \boldsymbol{x_{k+1}}$ é um autovetor com módulo igual a seu autovalor <div style="text-align: right">$\blacksquare$</div> --- ``` # Implementação númerica def power_iter(A, tol=1e-12): # Recebe uma matriz A quadrada e uma tolerância de erro n,m = np.shape(A) assert n==m, 'Matriz A deve ser quadrada' u = np.random.rand(n) # Gera um vetor qualquer control = False while control == False: u = A@u # Aplica a transformação linear u_norm = np.linalg.norm(u) u *= 1/u_norm # Normaliza l = evalue_estimate(A,u) # Estimador de autovalor err = np.linalg.norm(A@u - l*u) if(err < tol): control = True return u,l ``` ## Testando o metódo Para testar se nosso metódo está funcionando bem, podemos utilizar da função `np.linalg.eig` da biblioteca `numpy`, por ser uma biblioteca númerica muito usada, podemos depositar uma 'confiança' e usar como uma referência para testar nosso algoritmo.<br> A ideia aqui é simples, gerar aleatoriamente uma matriz $\boldsymbol{A}$ e encontrar um autovetor e um autovalor $p\_v$ e $p\_l$ pelo metódo das potências, e utilizar `np.linalg.eig` para retornar uma lista de todos os autovetores e autovalores da matriz, após isso pegamos o maior autovalor em módulo e seu autovetor associado e comparamos com $p\_v$ e $p\_l$, dada uma tolerância de $\scriptsize{10 ^{-12}}$ ``` ## Gerando 500 matrizes de tamanhos diferentes e comparando com o metódo do numpy, tol = 10^-8 for i in range(500): n = np.random.rand(1) t = np.random.rand(1)*100 n = int(n*t) if n == 0: n = 1 A = np.random.rand(n,n) # Gera a matriz p_v, p_l = power_iter(A) # autovetor e autovalor dominante l, w = np.linalg.eig(A) # numpy l_i = np.argmax(l) # indice do autovalor dominante l = l[l_i] # autovalor dominante pelo numpy w = w[:,l_i] # autovetor dominante pelo numpy assert np.allclose(abs(w), abs(p_v), rtol=1e-8, atol=0) ```
github_jupyter
``` #import the library used to query a website import urllib2 from bs4 import BeautifulSoup #wiki = "https://en.wikipedia.org/wiki/List_of_state_and_union_territory_capitals_in_India" wiki = "https://en.wikipedia.org/wiki/List_of_World_Heritage_Sites_in_South_Korea" #Query the website and return the html to the variable 'page' page = urllib2.urlopen(wiki) #Parse the html in the 'page' variable, and store it in Beautiful Soup format soup = BeautifulSoup(page) print soup.prettify() ##Find all the links within page’s <a> tags. <a href> </a> stores web links soup.find_all("a") all_links=soup.find_all("a") for l in all_links: print l.get("href") all_tables=soup.find_all('table') #extact information between <table> </table> tags right_table=soup.find('table', class_='content') right_table wiki = "http://en.wikipedia.org/wiki/List_of_postcode_districts_in_the_United_Kingdom" header = {'User-Agent': 'Mozilla/5.0'} #Needed to prevent 403 error on Wikipedia req = urllib2.Request(wiki,headers=header) page = urllib2.urlopen(req) soup = BeautifulSoup(page) area = "" district = "" town = "" county = "" table = soup.find("table", { "class" : "wikitable sortable" }) print table for row in table.findAll("tr"): cells = row.findAll("td") #For each "tr", assign each "td" to a variable. if len(cells) == 4: area = cells[0].find(text=True) district = cells[1].findAll(text=True) town = cells[2].find(text=True) county = cells[3].find(text=True) f = open('output.csv', 'w') for row in table.findAll("tr"): cells = row.findAll("td") #For each "tr", assign each "td" to a variable. if len(cells) == 4: area = cells[0].find(text=True) district = cells[1].findAll(text=True) town = cells[2].find(text=True) county = cells[3].find(text=True) #district can be a list of lists, so we want to iterate through the top level lists first... for x in range(len(district)): #For each list, split the string postcode_list = district[x].split(",") #For each item in the split list... for i in range(len(postcode_list)): #Check it's a postcode and not other text if (len(postcode_list[i]) > 2) and (len(postcode_list[i]) <= 5): #Strip out the "\n" that seems to be at the start of some postcodes write_to_file = area + "," + postcode_list[i].lstrip('\n').strip() + "," + town + "," + county + "\n" print write_to_file f.write(write_to_file) f.close() wiki = "https://en.wikipedia.org/wiki/List_of_World_Heritage_Sites_in_South_Korea" header = {'User-Agent': 'Mozilla/5.0'} #Needed to prevent 403 error on Wikipedia req = urllib2.Request(wiki,headers=header) page = urllib2.urlopen(req) soup = BeautifulSoup(page) area = "" district = "" town = "" county = "" table = soup.find("table", { "class" : "wikitable sortable" }) print table import pandas as pd import json #examine the json file online: https://codebeautify.org/jsonviewer df = pd.read_json("skorea.json") df.head() df.shape web = pd.read_json('https://api.github.com/repos/pydata/pandas/issues?per_page=5') web.head() #http://pandas.pydata.org/pandas-docs/version/0.20/io.html#io-read-html #http://www.basketball-reference.com/draft/NBA_2014.html import pandas as pd #import the library used to query a website import urllib2 from bs4 import BeautifulSoup import scipy as sp import numpy as np s = sp.randn(100) # Hundred random numbers from a standard Gaussian print len(s) print(s.mean()) #https://oneau.wordpress.com/2011/02/28/simple-statistics-with-scipy/#descriptive-statistics from scipy import stats n, min_max, mean, var, skew, kurt = stats.describe(s) print("Minimum: {0:8.6f} Maximum: {1:8.6f}".format(min_max[0], min_max[1])) print(mean) import pandas as pd import html5lib uss=pd.read_html('https://simple.wikipedia.org/wiki/List_of_U.S._states') print(type(uss)) # #this is a list of dataframes u = uss[0] #grab dataframe from index 0 print(type(u)) url = 'http://www.fdic.gov/bank/individual/failed/banklist.html' dfs = pd.read_html(url) #this is a list of dataframes print(type(dfs)) df = dfs[0] #list of dataframes print(df) whsSK=pd.read_html('https://en.wikipedia.org/wiki/List_of_World_Heritage_Sites_in_South_Korea') whs_sk = whsSK[0] #grab dataframe from index 0 print(whs_sk) #page with 1+ table whsPH=pd.read_html('https://en.wikipedia.org/wiki/List_of_World_Heritage_Sites_in_the_Philippines') whs_phl = whsPH[2] #grab dataframe from index 1, table 2 of the tentative WHS sites print(whs_phl) whs_phl.shape ```
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt from qucat import Network, L,J,C,R from numpy import pi ``` # Modelling a distributed multi-mode resonator In this section we use QuCAT to study the convergence of parameters in the first order Hamiltonian $\hat{H} = \sum_m\sum_{n\ne m} (\hbar\omega_m-A_m-\frac{\chi_{mn}}{2})\hat{a}_m^\dagger\hat{a}_m -\frac{A_m}{2}\hat{a}_m^\dagger\hat{a}_m^\dagger\hat{a}_m\hat{a}_m -\chi_{mn}\hat{a}_m^\dagger\hat{a}_m\hat{a}_n^\dagger\hat{a}_n$ of a transmon qubit coupled to a multi-mode resonator. As done experimentally in https://arxiv.org/pdf/1704.06208.pdf and theoretically in https://arxiv.org/pdf/1701.05095.pdf Using a length of coplanar waveguide terminated with engineered boundary conditions is a common way of building a microwave resonator. One implementation is a $\lambda/4$ resonator terminated on one end by a large shunt capacitor, acting as a near-perfect short circuit for microwaves such that only a small amount of radiation may enter or leave the resonator. The shunt capacitor creates a voltage node, and at the open end the voltage is free to oscillate, and can couple, as shown below to a transmon qubit. ![alt text](graphics/MMUSC_circuit.png "") *optical micrograph from https://arxiv.org/abs/1704.06208* This resonator hosts a number of normal modes, justifying its lumped element equivalent circuit shown in **(c)**, a series of LC oscillators with increasing resonance frequency. We will use QuCAT to track the evolution of different characteristics of the system as the number of considered resonator modes $N$ increases. We start by defining some constants ``` # fundamental mode frequency of the resonator f0 = 4.603e9 w0 = f0*2.*pi # characteristic impedance of the resonator Z0 = 50 # Josephson energy (in Hertz) Ej = 18.15e9 # Coupling capacitance Cc = 40.3e-15 # Capacitance to ground Cj = 5.13e-15 # Capacitance of all resonator modes C0 = pi/4/w0/Z0 # Inductance of first resonator mode L0 = 4*Z0/pi/w0 ``` we can then generate a list of Qcircuits, each one corresponding to a different number of resonator modes $N$ ``` # initialize list of Qcircuits for different number of resonator modes qcircuits = [] # Maximum number of resonator modes we will be considering N_max = 6 for N in range(1,N_max+1): # Initialize list of components for Transmon and coupling capacitor netlist = [ J(N+2,1,Ej,use_E=True), C(N+2,1,Cj), C(1,2,Cc)] for m in range(N): # Nodes of m-th oscillator node_minus = 2+m node_plus = (2+m+1) # Inductance of m-th oscillator Lm = L0/(2*m+1)**2 # Add oscillator to netlist netlist = netlist + [ L(node_minus,node_plus,Lm), C(node_minus,node_plus,C0)] # Add Qcircuit to our list qcircuits.append(Network(netlist)) ``` Note that $N+2$ is the index of the ground node. We can now access some parameters of the system. To get an initial idea of the mode distribution in the circuit, let us display the mode resonance frequencies and anharmonicities of the transmon coupled to 6 resonator modes ``` f,k,A,chi = qcircuits[-1].f_k_A_chi(pretty_print=True) ``` Mode `1`, is the only one with a significant anharmonicity and is thus the qubit-like mode. A list of this modes frequencies, for varying number of resonator modes is given by ``` transmon_frequencies = [cir.eigenfrequencies()[1] for cir in qcircuits] ``` and the anharmonicity of the transmon, computed from first order perturbation theory is ``` transmon_anharmonicities = [cir.anharmonicities()[1] for cir in qcircuits] ``` Finally the Lamb shift, or shift in the transmon frequency resulting from the zero-point fluctuations of the resonator modes, is given, following the equation shown at the top of the page, by the sum of half the cross-Kerr couplings between the transmon mode and the others ``` lamb_shifts = [] for cir in qcircuits: lamb_shift = 0 K = cir.kerr() N_modes = len(K[0]) for m in range(N_modes): if m!=1: lamb_shift = lamb_shift + K[1][m]/2 lamb_shifts.append(lamb_shift) ``` Finally, we can plot these parameters ``` # Define plot with 3 subplots fig,ax = plt.subplots(3,1,figsize = (4,8), sharex = True) ax[0].plot(range(1,N_max+1),transmon_frequencies,'o') # plot anharmonicity of the transmon ax[1].plot(range(1,N_max+1),transmon_anharmonicities,'o') # plot Lamb shift of the transmon ax[2].plot(range(1,N_max+1),lamb_shifts,'o') ax[2].set_xlabel("Number of modes") ax[0].set_ylabel(" Freq (Hz)") ax[1].set_ylabel("Anharm (Hz)") ax[2].set_ylabel("Lamb shift (Hz)") plt.show() ```
github_jupyter
## Imports and parameters ``` import os from scipy.io import loadmat,savemat,mmread import numpy as np import matplotlib.pyplot as plt from scipy.stats import mode from matplotlib.colors import LinearSegmentedColormap,LogNorm from voxnet.plotting import * from voxnet.utilities import h5read from skimage.measure import find_contours import h5py inj_site='VISp' # for virtual injections inj_radius=1 # units of voxels inj_stride=2 int_axis=1 plot_type = 'low_rank' #save_stem='allvis_sdk_free_noshell' save_stem='extra_vis_friday_harbor' contour_list=[425,533,402] lambda_str = '1e5' output_dir='integrated_gaussian_%s_norm' % lambda_str do_int_plots=True base_dir=os.path.join('../connectivities',save_stem) fn_matrices=os.path.join(base_dir, save_stem + '.mat') fig_dir=os.path.join(base_dir, "figures") int_plot_dir=os.path.join(fig_dir,output_dir) try: os.makedirs(int_plot_dir) except OSError: pass fout_virt=os.path.join(int_plot_dir,'virt_0.90_gaussian_lambda100.vtk') fout_real=os.path.join(int_plot_dir,'real_0.90_gaussian_lambda100.vtk') select_injections=[74, 76, 89, 234, 236, 238] select_colors=["myred", "mygreen", "myblue", "myred", "mygreen","myblue"] view_paths_fn = r'/home/kameron/work/allen/data/TopView/top_view_paths_10.h5' bbox_100 = [132, 80, 114] ``` ## Load data ``` print "Making plots for " + save_stem print output_dir ## Load data mat=loadmat(fn_matrices) locals().update(mat) #X=h5read(os.path.join(base_dir, save_stem + '_X.h5')) X=h5read(os.path.join(base_dir, save_stem + '_X_norm_by_Y.h5')) #X=mmread(os.path.join(base_dir, save_stem + '_X.mtx')) #Y_ipsi=h5read(os.path.join(base_dir, save_stem + '_Y_ipsi.h5')) Y_ipsi=h5read(os.path.join(base_dir, save_stem + '_Y_norm_by_Y.h5')) #Y_ipsi=mmread(os.path.join(base_dir, save_stem + '_Y_ipsi.mtx')) W_ipsi=h5read(os.path.join(base_dir, 'W_ipsi_%s.h5' % lambda_str)) # antero #W_ipsi=h5read(os.path.join(base_dir, 'W_ipsi_%s.h5' % lambda_str)).T # retro #W_ipsi=h5read(os.path.join(base_dir, 'W_low_rank_res_160.h5')) # residual #W_ipsi=h5read(os.path.join(base_dir, 'W_low_rank_160.h5')) # low rank #W_ipsi=h5read(os.path.join(base_dir, 'W_norm_ipsi_10.h5')) # normalized view_paths_file = h5py.File(view_paths_fn, 'r') view_lut = view_paths_file['view lookup'][:] view_paths = view_paths_file['paths'][:] view_paths_file.close() print "W dims: %d x %d" % (W_ipsi.shape[0], W_ipsi.shape[1]) print "Data all loaded" def map_to_surface(im, lut, paths, scale = 1, fun = np.max, set_nan = True): ''' maps a gridded voxel image onto the cortical surface ''' old_dims = im.shape # deal with scaling through re-indexing def remap_coord(c, old_dims, scale): new_dims = (1320, 800, 1140) # hard-coded in because for i,dim in enumerate(new_dims): assert np.floor(old_dims[i] * scale).astype(int) == dim, \ "dimension mismatch" #new_dims = tuple(np.round(np.array(old_dims) * scale).astype(int)) (I,J,K) = np.unravel_index(c, new_dims) I = np.floor(I / scale).astype(int) J = np.floor(J / scale).astype(int) K = np.floor(K / scale).astype(int) return np.ravel_multi_index((I,J,K), old_dims) # calculate output array output_pd = np.zeros(lut.shape, dtype=im.dtype) # all pixels in surface view with a stream line ind = np.where(lut > -1) ind = zip(ind[0], ind[1]) for curr_ind in ind: curr_path_id = lut[curr_ind] curr_path = paths[curr_path_id, :] if scale != 1: curr_path_rescale = remap_coord(curr_path[np.nonzero(curr_path)], old_dims, scale) else: curr_path_rescale = curr_path[np.nonzero(curr_path)] # image along path curr_pd_line = im.flat[curr_path_rescale] output_pd[curr_ind] = fun(curr_pd_line) if set_nan is True: output_pd[lut == -1] = np.nan return output_pd ``` ## Wavelets! ``` test_col = map_to_regular_grid(W_ipsi[:,124], voxel_coords_target_ipsi, bbox_100) test_sfc = map_to_surface(test_col, view_lut, view_paths, scale=10) test_col.shape %matplotlib inline # fig = plt.figure(figsize = (10,10)) ax = fig.add_subplot(121) h = ax.imshow(test_sfc) ax = fig.add_subplot(122) h = ax.imshow(np.max(test_col, axis=int_axis)) from numpy.fft import rfftn, irfftn test_fft = rfftn(test_col) (nx,ny,nz) = test_fft.shape t = 1 for (x,y,z), value in np.ndenumerate(test_fft): l = np.power(2,2*(x/float(nx) + y/float(ny) + z/float(nz))) test_fft[x,y,z] = value * np.exp(-t*l) test_fft_r = irfftn(test_fft) print test_col.max() print test_fft_r.max() fft_sfc = map_to_surface(test_fft_r, view_lut, view_paths, scale=10, set_nan=True) %matplotlib inline # fig = plt.figure(figsize = (10,10)) ax = fig.add_subplot(121) h = ax.imshow(test_sfc) ax = fig.add_subplot(122) h = ax.imshow(fft_sfc) type(test_fft_r) import pywt db1 = pywt.Wavelet('db8') A = pywt.wavedecn(test_col, db1, mode = 'zero') pywt.dwt_max_level(test_col.shape[2], db1) def wthresh(a, thresh): #Soft threshold res = np.abs(a) - thresh return np.sign(a) * ((res > 0) * res) eps = 1e-4 for i in range(len(B)): if type(B[i]) is dict: for key in B[i].keys(): #B[i][key] = wthresh(B[i][key], eps) thresh_data = pywt.threshold(B[i][key], value=eps) B[i][key] = ndsparse.sparse(thresh_data).compress() elif type(B[i]) is np.ndarray: pass #B[i] = wthresh(B[i], eps) test_recn = pywt.waverecn(B, db1, mode = 'zero') test_recn.shape def wavelet_laplacian(coeffs): new_coeffs = list() # initialize but doesn't deep copy for i in range(len(coeffs)): new_coeffs.append(coeffs[i].copy()) # need to copy if i == 0: pass else: for key in new_coeffs[i].keys(): new_coeffs[i][key] = new_coeffs[i][key] * np.power(2, 2*i) return new_coeffs def wavelet_add(coeffs1, coeffs2): new_coeffs = list() # initialize but doesn't deep copy for i in range(len(new_coeffs)): new_coeffs[i].append(coeffs1[i].copy()) # need to copy if i == 0: new_coeffs[i] = coeffs1[i] + coeffs2[i] # ndarray else: for key in new_coeffs[i].keys(): new_coeffs[i][key] = coeffs1[i][key] + coeffs2[i][key] return new_coeffs try: del A except Exception: pass A = pywt.wavedecn(test_col, db1, mode = 'zero') # iterate the heat-like equation u(t) = exp(L t) u(0) t = 10 for i in range(len(A)): l = np.power(2,2*(i+1)) # eigenvalue of "wavelet laplacian" if i == 0: #A[i] = A[i] * (1 - 0.1 * np.power(2, 2*(i+1))) A[i] = A[i] * np.exp(-t * l) else: for key in A[i].keys(): A[i][key] = A[i][key] * np.exp(-t * l) #L = wavelet_laplacian(A) #A = wavelet_add(A, L) Lu_recn = pywt.waverecn(A, db1, mode = 'zero') Lu_recn.max() test_Lu_sfc_r_2 = map_to_surface(Lu_recn, view_lut, view_paths, scale=10) fig = plt.figure(figsize = (10,10)) ax = fig.add_subplot(111) h = ax.imshow(test_sfc) fig.colorbar(h) fig = plt.figure(figsize = (10,10)) ax = fig.add_subplot(111) h = ax.imshow(fft_sfc) fig.colorbar(h) fig = plt.figure(figsize = (10,10)) ax = fig.add_subplot(111) h = ax.imshow(test_Lu_sfc_r_2) fig.colorbar(h) fig = plt.figure(figsize = (10,10)) ax = fig.add_subplot(111) h = ax.imshow(test_Lu_sfc_r_2 - test_Lu_sfc_r) fig.colorbar(h) np.max(np.abs(test_recn - test_col)) test_sfc = map_to_surface(test_col, view_lut, view_paths, scale=10) test_sfc_r = map_to_surface(test_recn, view_lut, view_paths, scale=10) %matplotlib inline fig = plt.figure(figsize = (10,10)) ax = fig.add_subplot(111) h = ax.imshow(test_sfc) fig.colorbar(h) fig2 = plt.figure(figsize = (10,10)) ax2 = fig2.add_subplot(111) h2 = ax2.imshow(test_sfc_r) fig2.colorbar(h2) fig3 = plt.figure(figsize = (10,10)) ax3 = fig3.add_subplot(111) h3 = ax3.imshow(test_sfc_r - test_sfc) fig3.colorbar(h3) np.nanmax(np.abs(test_sfc_r - test_sfc)) ``` ## Start plotting ``` ## Voxel lookup tables inj_site_id = source_ids[np.where(source_acro==inj_site)] coord_vox_map_source = index_lookup_map(voxel_coords_source) coord_vox_map_target_contra = index_lookup_map(voxel_coords_target_ipsi) coord_vox_map_target_ipsi = index_lookup_map(voxel_coords_target_contra) ## Compute region annotation rearrange_2d = lambda(arr): arr #rearrange_2d=lambda(arr): np.fliplr(arr) #rearrange_2d=lambda(arr): np.swapaxes(arr,0,1) # remap labels new_labels = col_label_list_source.copy() new_label_map = [] for i,label in enumerate(np.unique(new_labels)): new_labels[new_labels == label] = i+1 new_label_map.append(label) label_grid = map_to_regular_grid(new_labels, voxel_coords_source).squeeze() label_grid[label_grid==0] = np.nan label_grid_2d = mode(label_grid, axis=int_axis)[0].squeeze() label_grid_2d[label_grid_2d==0] = np.nan label_unique = np.unique(new_labels) label_grid_2d = rearrange_2d(label_grid_2d) ## Compute some region contours and setup helper functions contours = find_contours(label_grid_2d, 385.5) # this threshold just happens # to work ok for visual areas def plot_region_contours(): ''' Convenience function that plots some region boundaries ''' for n, contour in enumerate(contours): ax.plot(contour[:, 1], contour[:, 0], linewidth=1, c='gray') def draw_region_labels(): ''' Convenience function that draws region labels ''' for i,label in enumerate(np.unique(col_label_list_source)): x,y = centroid_of_region_2d(label_grid_2d, i+1) # region_name=source_acro[source_ids==label_lookup[newlab]][0][0] region_name=source_acro[source_ids==label][0][0] # print "%s centroid at (%d, %d)" % (region_name,x,y) plt.annotate(region_name, xy=(x-1., y)) ## Plot region annotation fig,ax=plt.subplots() ax.imshow(label_grid_2d, cmap=plt.get_cmap('Accent'), interpolation='none') plt.hold(True) draw_region_labels() plt.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off') plt.xlabel('center - right', fontsize=24) plt.ylabel('posterior - anterior', fontsize=24) plt.savefig(os.path.join(int_plot_dir,"region_names.png")) plt.close() ## Build virtual injections Xvirt,inj_centers = build_injection_vectors(voxel_coords_source, coord_vox_map_source, col_label_list_source, inj_site_id, inj_radius, inj_stride) num_virt=Xvirt.shape[1] if num_virt < 1: raise Exception("No virtual injections fit!") ## Compute virtual projections Yvirt_ipsi=np.dot(W_ipsi,Xvirt) ## Map to 3d grid Xvirt_grid = map_to_regular_grid(Xvirt, voxel_coords_source) Yvirt_ipsi_grid = map_to_regular_grid(Yvirt_ipsi, voxel_coords_target_ipsi) Xreal_grid = map_to_regular_grid(X, voxel_coords_source) Yreal_ipsi_grid = map_to_regular_grid(Y_ipsi, voxel_coords_target_ipsi) Xvirt_int_grid = np.sum(Xvirt_grid, axis=int_axis) Yvirt_ipsi_int_grid = np.sum(Yvirt_ipsi_grid, axis=int_axis) ## Save VTKs --- volumetric data print "Saving VTKs" save_as_vtk_old(fout_virt,Xvirt_grid,Yvirt_ipsi_grid, voxel_coords_source,voxel_coords_target_ipsi) save_as_vtk_old(fout_real,Xreal_grid,Yreal_ipsi_grid, voxel_coords_source,voxel_coords_target_ipsi) print "VTKs saved." ## Plot virtual injections def plot_integrated(fig,ax,inj,proj_cname,inj_cname,Xgrid,Ygrid): cax=ax.imshow(rearrange_2d(Ygrid[:,:,inj]), cmap=plt.get_cmap(proj_cname), clim=(0.0,0.03), #clim=(-0.003,0.003), #norm=LogNorm(vmin=1e-3), interpolation='none') cbar = fig.colorbar(cax) tmp=rearrange_2d(Xgrid[:,:,inj]) masked_tmp=np.ma.masked_where(tmp==0.0,tmp) ax.imshow(masked_tmp, cmap=plt.get_cmap(inj_cname), clim=(0.0,0.3), interpolation='none') return if do_int_plots: for inj in range(num_virt): y_inj=inj_centers[1,inj] fig,ax=plt.subplots() # Reds: linear colormap for connectivities plot_integrated(fig, ax, inj, 'Reds', 'Blues', Xvirt_int_grid,Yvirt_ipsi_int_grid) # PuOr: diverging colormap for resids # plot_integrated(fig,ax,inj,'PuOr','Blues', # Xvirt_int_grid,Yvirt_ipsi_int_grid) plt.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off') plt.xlabel('center - right', fontsize=24) plt.ylabel('posterior - anterior', fontsize=24) plt.title('depth y = %d' % y_inj) plt.hold(True) plot_region_contours() draw_region_labels() # ax.imshow(edges, # cmap=plt.get_cmap('gray_r'),interpolation='none', # alpha=0.1) fig_file=os.path.join(int_plot_dir, "int_virt_inj%d.png" % inj) plt.savefig(fig_file) plt.close() print fig_file ## Setup select injection colors cdictred={'red': [(0., 0., 0.), (1., 1., 1.)], 'green': [(0., 0., 0.), (1., 0., 0.)], 'blue': [(0., 0., 0.), (1., 0., 0.)]} cdictgreen={'red': [(0., 0., 0.), (1., 0., 0.)], 'green': [(0., 0., 0.), (1., 1., 1.)], 'blue': [(0., 0., 0.), (1., 0., 0.)]} cdictblue={'red': [(0., 0., 0.), (1., 0., 0.)], 'green': [(0., 0., 0.), (1., 0., 0.)], 'blue': [(0., 0., 0.), (1., 1., 1.)]} red=LinearSegmentedColormap('myred',cdictred) plt.register_cmap(cmap=red) green=LinearSegmentedColormap('mygreen',cdictgreen) plt.register_cmap(cmap=green) blue=LinearSegmentedColormap('myblue',cdictblue) plt.register_cmap(cmap=blue) ## Plot select injections for i, inj in enumerate(select_injections): fig,ax=plt.subplots() color=select_colors[i] plot_integrated(fig, ax, inj, color, color, Xvirt_int_grid, Yvirt_ipsi_int_grid) plt.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off') plt.xlabel('center - right', fontsize=24) plt.ylabel('posterior - anterior', fontsize=24) fig_file=os.path.join(int_plot_dir, "select_virt_inj%d.png" % i) plt.savefig(fig_file) plt.close() ```
github_jupyter
# Simulated Sky Signal in map domain This lesson is about simulating the input sky signal using PySM 3. ## PySM 3 If you used `PySM` in the past, you most probably used `PySM 2` from https://github.com/bthorne93/PySM_public. `PySM 3` is a rewrite of `PySM` which offers all the same functionality and the same models of `PySM 2` but is focused on: * improving performance using just-in-time compilation and multi-threading with `numba` * lowering memory requirements by reworking the underlying algorithms * improved capability of running in parallel with MPI It is available from https://github.com/healpy/pysm, it is still missing a few models and the documentation but is already integrated into TOAST to overcame the strong performance limits of `PySM 2`. If anyone is interested in learning more about PySM 3, check the [PySM 3 tutorial](https://github.com/zonca/pysm_tutorial), we can work through this during the hack day. ## PySMSky The lower level TOAST class is `PySMSky`, it performs the following operations: * initialize `PySM` with the input sky configuration * loop through all channels and for each calls `PySM` to generate the sky emission at all frequencies in the bandpass and integrate ``` # Load common tools for all lessons import sys sys.path.insert(0, "..") from lesson_tools import ( fake_focalplane ) # Capture C++ output in the jupyter cells %reload_ext wurlitzer import toast import healpy as hp import numpy as np env = toast.Environment.get() env.set_log_level("DEBUG") focal_plane = fake_focalplane() from toast.todmap import PySMSky PySMSky? NSIDE = 64 npix = hp.nside2npix(NSIDE) ``` ### PySM models You can find out details about all the models available in PySM 2 and 3 at: https://pysm-public.readthedocs.io/en/latest/models.html ``` pysm_sky_config = ["s1", "f1", "a1", "d1"] pysm_sky = PySMSky(comm=None, pixel_indices=None, nside=NSIDE, units="uK_RJ", pysm_sky_config=pysm_sky_config ) pysm_sky ``` ### PySM Sky object We can directly access the underlying `PySM.Sky` object as the `sky` attribute of the `PySMSky` object. ``` pysm_sky.sky import pysm.units as u pysm_sky.sky.get_emission(12 * u.GHz) %matplotlib inline ## _ refers to the output of the previous cell, so this works only if you run cells in sequence hp.mollview(_[0], cmap="coolwarm", min=-100, max=1e4) ``` ### Execute the PySMSky object First we need bandpasses for the channels, first element of the tuple is frequency, second element are weights, we define a top-hat of 10 points with a bandwidth of 10 GHz: ``` bandpasses = {} for ch in focal_plane: # loops through dict keys bandpasses[ch] = (np.linspace(65, 75, 10), np.ones(10)) bandpasses local_maps = {} pysm_sky.exec(local_maps, out="sky", bandpasses=bandpasses) hp.mollview(local_maps["sky_0A"][0], cmap="coolwarm", min=0, max=1e3, unit="uK_RJ") local_maps["sky_0B"][0]-local_maps["sky_0A"][0] bandpasses["0B"] = (np.linspace(63, 73, 10), np.ones(10)) local_maps = {} pysm_sky.exec(local_maps, out="sky", bandpasses=bandpasses) hp.mollview(local_maps["sky_0A"][0]-local_maps["sky_0B"][0], cmap="coolwarm", unit="uK_RJ") hp.gnomview(local_maps["sky_0A"][0]-local_maps["sky_0B"][0], rot=(0,0), xsize=5000, ysize=2000, cmap="coolwarm") ```
github_jupyter
Recurrent Neural networks ===== ### RNN <img src="../imgs/rnn.png" width="20%"> A recurrent neural network (RNN) is a class of artificial neural network where connections between units form a directed cycle. This creates an internal state of the network which allows it to exhibit dynamic temporal behavior. ```python keras.layers.recurrent.SimpleRNN(units, activation='tanh', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0) ``` #### Arguments: <ul> <li><strong>units</strong>: Positive integer, dimensionality of the output space.</li> <li><strong>activation</strong>: Activation function to use (see <a href="http://keras.io/activations/">activations</a>). If you pass None, no activation is applied (ie. "linear" activation: <code>a(x) = x</code>).</li> <li><strong>use_bias</strong>: Boolean, whether the layer uses a bias vector.</li> <li><strong>kernel_initializer</strong>: Initializer for the <code>kernel</code> weights matrix, used for the linear transformation of the inputs. (see <a href="https://keras.io/initializers/">initializers</a>).</li> <li><strong>recurrent_initializer</strong>: Initializer for the <code>recurrent_kernel</code> weights matrix, used for the linear transformation of the recurrent state. (see <a href="https://keras.io/initializers/">initializers</a>).</li> <li><strong>bias_initializer</strong>: Initializer for the bias vector (see <a href="https://keras.io/initializers/">initializers</a>).</li> <li><strong>kernel_regularizer</strong>: Regularizer function applied to the <code>kernel</code> weights matrix (see <a href="https://keras.io/regularizers/">regularizer</a>).</li> <li><strong>recurrent_regularizer</strong>: Regularizer function applied to the <code>recurrent_kernel</code> weights matrix (see <a href="https://keras.io/regularizers/">regularizer</a>).</li> <li><strong>bias_regularizer</strong>: Regularizer function applied to the bias vector (see <a href="https://keras.io/regularizers/">regularizer</a>).</li> <li><strong>activity_regularizer</strong>: Regularizer function applied to the output of the layer (its "activation"). (see <a href="https://keras.io/regularizers/">regularizer</a>).</li> <li><strong>kernel_constraint</strong>: Constraint function applied to the <code>kernel</code> weights matrix (see <a href="https://keras.io/constraints/">constraints</a>).</li> <li><strong>recurrent_constraint</strong>: Constraint function applied to the <code>recurrent_kernel</code> weights matrix (see <a href="https://keras.io/constraints/">constraints</a>).</li> <li><strong>bias_constraint</strong>: Constraint function applied to the bias vector (see <a href="https://keras.io/constraints/">constraints</a>).</li> <li><strong>dropout</strong>: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs.</li> <li><strong>recurrent_dropout</strong>: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state.</li> </ul> #### Backprop Through time Contrary to feed-forward neural networks, the RNN is characterized by the ability of encoding longer past information, thus very suitable for sequential models. The BPTT extends the ordinary BP algorithm to suit the recurrent neural architecture. <img src="../imgs/rnn2.png" width="45%"> **Reference**: [Backpropagation through Time](http://ir.hit.edu.cn/~jguo/docs/notes/bptt.pdf) ``` %matplotlib inline import numpy as np import pandas as pd import theano import theano.tensor as T import keras import numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split # -- Keras Import from keras.models import Sequential from keras.layers import Dense, Activation from keras.preprocessing import image from keras.datasets import imdb from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.utils import np_utils from keras.preprocessing import sequence from keras.layers.embeddings import Embedding from keras.layers.recurrent import LSTM, GRU, SimpleRNN from keras.layers import Activation, TimeDistributed, RepeatVector from keras.callbacks import EarlyStopping, ModelCheckpoint ``` ## IMDB sentiment classification task This is a dataset for binary sentiment classification containing substantially more data than previous benchmark datasets. IMDB provided a set of 25,000 highly polar movie reviews for training, and 25,000 for testing. There is additional unlabeled data for use as well. Raw text and already processed bag of words formats are provided. http://ai.stanford.edu/~amaas/data/sentiment/ ### Data Preparation - IMDB ``` max_features = 20000 maxlen = 100 # cut texts after this number of words (among top max_features most common words) batch_size = 32 print("Loading data...") (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=max_features) print(len(X_train), 'train sequences') print(len(X_test), 'test sequences') print('Example:') print(X_train[:1]) print("Pad sequences (samples x time)") X_train = sequence.pad_sequences(X_train, maxlen=maxlen) X_test = sequence.pad_sequences(X_test, maxlen=maxlen) print('X_train shape:', X_train.shape) print('X_test shape:', X_test.shape) ``` #### Model building ``` print('Build model...') model = Sequential() model.add(Embedding(max_features, 128, input_length=maxlen)) model.add(SimpleRNN(128)) model.add(Dropout(0.5)) model.add(Dense(1)) model.add(Activation('sigmoid')) # try using different optimizers and different optimizer configs model.compile(loss='binary_crossentropy', optimizer='adam') print("Train...") model.fit(X_train, y_train, batch_size=batch_size, epochs=1, validation_data=(X_test, y_test)) ``` ### LSTM A LSTM network is an artificial neural network that contains LSTM blocks instead of, or in addition to, regular network units. A LSTM block may be described as a "smart" network unit that can remember a value for an arbitrary length of time. Unlike traditional RNNs, an Long short-term memory network is well-suited to learn from experience to classify, process and predict time series when there are very long time lags of unknown size between important events. <img src="../imgs/gru.png" width="60%"> ```python keras.layers.recurrent.LSTM(units, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0) ``` #### Arguments <ul> <li><strong>units</strong>: Positive integer, dimensionality of the output space.</li> <li><strong>activation</strong>: Activation function to use If you pass None, no activation is applied (ie. "linear" activation: <code>a(x) = x</code>).</li> <li><strong>recurrent_activation</strong>: Activation function to use for the recurrent step.</li> <li><strong>use_bias</strong>: Boolean, whether the layer uses a bias vector.</li> <li><strong>kernel_initializer</strong>: Initializer for the <code>kernel</code> weights matrix, used for the linear transformation of the inputs.</li> <li><strong>recurrent_initializer</strong>: Initializer for the <code>recurrent_kernel</code> weights matrix, used for the linear transformation of the recurrent state.</li> <li><strong>bias_initializer</strong>: Initializer for the bias vector.</li> <li><strong>unit_forget_bias</strong>: Boolean. If True, add 1 to the bias of the forget gate at initialization. Setting it to true will also force <code>bias_initializer="zeros"</code>. This is recommended in <a href="http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf">Jozefowicz et al.</a></li> <li><strong>kernel_regularizer</strong>: Regularizer function applied to the <code>kernel</code> weights matrix.</li> <li><strong>recurrent_regularizer</strong>: Regularizer function applied to the <code>recurrent_kernel</code> weights matrix.</li> <li><strong>bias_regularizer</strong>: Regularizer function applied to the bias vector.</li> <li><strong>activity_regularizer</strong>: Regularizer function applied to the output of the layer (its "activation").</li> <li><strong>kernel_constraint</strong>: Constraint function applied to the <code>kernel</code> weights matrix.</li> <li><strong>recurrent_constraint</strong>: Constraint function applied to the <code>recurrent_kernel</code> weights matrix.</li> <li><strong>bias_constraint</strong>: Constraint function applied to the bias vector.</li> <li><strong>dropout</strong>: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs.</li> <li><strong>recurrent_dropout</strong>: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state.</li> </ul> ### GRU Gated recurrent units are a gating mechanism in recurrent neural networks. Much similar to the LSTMs, they have fewer parameters than LSTM, as they lack an output gate. <img src="../imgs/gru.png" /> ```python keras.layers.recurrent.GRU(units, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0) ``` ### Your Turn! - Hands on Rnn ``` print('Build model...') model = Sequential() model.add(Embedding(max_features, 128, input_length=maxlen)) # !!! Play with those! try and get better results! #model.add(SimpleRNN(128)) #model.add(GRU(128)) #model.add(LSTM(128)) model.add(Dropout(0.5)) model.add(Dense(1)) model.add(Activation('sigmoid')) # try using different optimizers and different optimizer configs model.compile(loss='binary_crossentropy', optimizer='adam') print("Train...") model.fit(X_train, y_train, batch_size=batch_size, epochs=4, validation_data=(X_test, y_test)) score, acc = model.evaluate(X_test, y_test, batch_size=batch_size) print('Test score:', score) print('Test accuracy:', acc) ``` --- ## Convolutional LSTM > This section demonstrates the use of a **Convolutional LSTM network**. > This network is used to predict the next frame of an artificially generated movie which contains moving squares. #### Artificial Data Generation Generate movies with `3` to `7` moving squares inside. The squares are of shape $1 \times 1$ or $2 \times 2$ pixels, which move linearly over time. For convenience we first create movies with bigger width and height (`80x80`) and at the end we select a $40 \times 40$ window. ``` # Artificial Data Generation def generate_movies(n_samples=1200, n_frames=15): row = 80 col = 80 noisy_movies = np.zeros((n_samples, n_frames, row, col, 1), dtype=np.float) shifted_movies = np.zeros((n_samples, n_frames, row, col, 1), dtype=np.float) for i in range(n_samples): # Add 3 to 7 moving squares n = np.random.randint(3, 8) for j in range(n): # Initial position xstart = np.random.randint(20, 60) ystart = np.random.randint(20, 60) # Direction of motion directionx = np.random.randint(0, 3) - 1 directiony = np.random.randint(0, 3) - 1 # Size of the square w = np.random.randint(2, 4) for t in range(n_frames): x_shift = xstart + directionx * t y_shift = ystart + directiony * t noisy_movies[i, t, x_shift - w: x_shift + w, y_shift - w: y_shift + w, 0] += 1 # Make it more robust by adding noise. # The idea is that if during inference, # the value of the pixel is not exactly one, # we need to train the network to be robust and still # consider it as a pixel belonging to a square. if np.random.randint(0, 2): noise_f = (-1)**np.random.randint(0, 2) noisy_movies[i, t, x_shift - w - 1: x_shift + w + 1, y_shift - w - 1: y_shift + w + 1, 0] += noise_f * 0.1 # Shift the ground truth by 1 x_shift = xstart + directionx * (t + 1) y_shift = ystart + directiony * (t + 1) shifted_movies[i, t, x_shift - w: x_shift + w, y_shift - w: y_shift + w, 0] += 1 # Cut to a 40x40 window noisy_movies = noisy_movies[::, ::, 20:60, 20:60, ::] shifted_movies = shifted_movies[::, ::, 20:60, 20:60, ::] noisy_movies[noisy_movies >= 1] = 1 shifted_movies[shifted_movies >= 1] = 1 return noisy_movies, shifted_movies ``` ### Model ``` from keras.models import Sequential from keras.layers.convolutional import Conv3D from keras.layers.convolutional_recurrent import ConvLSTM2D from keras.layers.normalization import BatchNormalization import numpy as np from matplotlib import pyplot as plt %matplotlib inline ``` We create a layer which take as input movies of shape `(n_frames, width, height, channels)` and returns a movie of identical shape. ``` seq = Sequential() seq.add(ConvLSTM2D(filters=40, kernel_size=(3, 3), input_shape=(None, 40, 40, 1), padding='same', return_sequences=True)) seq.add(BatchNormalization()) seq.add(ConvLSTM2D(filters=40, kernel_size=(3, 3), padding='same', return_sequences=True)) seq.add(BatchNormalization()) seq.add(ConvLSTM2D(filters=40, kernel_size=(3, 3), padding='same', return_sequences=True)) seq.add(BatchNormalization()) seq.add(ConvLSTM2D(filters=40, kernel_size=(3, 3), padding='same', return_sequences=True)) seq.add(BatchNormalization()) seq.add(Conv3D(filters=1, kernel_size=(3, 3, 3), activation='sigmoid', padding='same', data_format='channels_last')) seq.compile(loss='binary_crossentropy', optimizer='adadelta') ``` ### Train the Network #### Beware: This takes time (~3 mins per epoch on my hardware) ``` # Train the network noisy_movies, shifted_movies = generate_movies(n_samples=1200) seq.fit(noisy_movies[:1000], shifted_movies[:1000], batch_size=10, epochs=20, validation_split=0.05) ``` ### Test the Network ``` # Testing the network on one movie # feed it with the first 7 positions and then # predict the new positions which = 1004 track = noisy_movies[which][:7, ::, ::, ::] for j in range(16): new_pos = seq.predict(track[np.newaxis, ::, ::, ::, ::]) new = new_pos[::, -1, ::, ::, ::] track = np.concatenate((track, new), axis=0) # And then compare the predictions # to the ground truth track2 = noisy_movies[which][::, ::, ::, ::] for i in range(15): fig = plt.figure(figsize=(10, 5)) ax = fig.add_subplot(121) if i >= 7: ax.text(1, 3, 'Predictions !', fontsize=20, color='w') else: ax.text(1, 3, 'Inital trajectory', fontsize=20) toplot = track[i, ::, ::, 0] plt.imshow(toplot) ax = fig.add_subplot(122) plt.text(1, 3, 'Ground truth', fontsize=20) toplot = track2[i, ::, ::, 0] if i >= 2: toplot = shifted_movies[which][i - 1, ::, ::, 0] plt.imshow(toplot) plt.savefig('imgs/convlstm/%i_animate.png' % (i + 1)) ```
github_jupyter
# Programming Microblaze Subsystems from Jupyter In the Base I/O overlays that accompany the PYNQ release Microblazes are used to control peripherals attached to the various connectors. These can either be programmed with existing programs compiled externally or from within Jupyter. This notebook explains how the Microblazes can be integrated into Jupyter and Python. The Microblaze is programmed in C as the limited RAM available (64 KB) limits what runtimes can be loaded - as an example, the MicroPython runtime requires 256 KB of code and data space. The PYNQ framework provides a mechanism to write the C code inside Jupyter, compile it, load it on to the Microblaze and then execute and interact with it. The first stage is to load an overlay. ``` from pynq.overlays.base import BaseOverlay base = BaseOverlay('base.bit') ``` Now we can write some C code. The `%%microblaze` magic provides an environment where we can write the code and it takes a single argument - the Microblaze we wish to target this code at. This first example simply adds two numbers together and returns the result. ``` %%microblaze base.ARDUINO int add(int a, int b) { return a + b; } ``` The functions we defined in the magic are now available for us to interact with in Python as any other function. ``` add(4,6) ``` ## Data Motion The main purpose of the Python bindings it to transfer data between the host and slave processors. For simple cases, any primitive C type can be used as function parameters and return values and Python values will be automatically converted as necessary. ``` %%microblaze base.ARDUINO float arg_passing(float a, char b, unsigned int c) { return a + b + c; } arg_passing(1, 2, 3) ``` Arrays can be passed in two different way. If a type other than `void` is provided then the data will be copied to the microblaze and if non-`const` the data will be copied back as well. And iterable and modifiable object can be used as the argument in this case. ``` %%microblaze base.ARDUINO int culm_sum(int* val, int len) { int sum = 0; for (int i = 0; i < len; ++i) { sum += val[i]; val[i] = sum; } return sum; } numbers = [i for i in range(10)] culm_sum(numbers, len(numbers)) print(numbers) ``` Finally we can pass a `void` pointer which will allow the Microblaze to directly access the memory of the host processing system for transferring large quantities of data. In Python these blocks of memory should be allocated using the `Xlnk.cma_array` function and it is the responsibility of the programmer to make sure that the Python and C code agree on the types used. ``` %%microblaze base.ARDUINO long long big_sum(void* data, int len) { int* int_data = (int*)data; long long sum = 0; for (int i = 0; i < len; ++i) { sum += int_data[i]; } return sum; } from pynq import Xlnk allocator = Xlnk() buffer = allocator.cma_array(shape=(1024 * 1024), dtype='i4') buffer[:] = range(1024*1024) big_sum(buffer, len(buffer)) ``` ## Debug printing One unique feature of the PYNQ Microblaze environment is the ability to print debug information directly on to the Jupyter or Python console using the new `pyprintf` function. This functions acts like `printf` and `format` in Python and allows for a format string and variables to be passed back to Python for printing. In this release on the `%d` format specifier is supported but this will increase over time. To use `pyprintf` first the appropriate header needs to be included ``` %%microblaze base.ARDUINO #include <pyprintf.h> int debug_sum(int a, int b) { int sum = a + b; pyprintf("Adding %d and %d to get %d\n", a, b, sum); return sum; } debug_sum(1,2) ``` ## Long running processes So far all of the examples presented have been synchronous with the Python code with the Python code blocking until a result is available. Some applications call instead for a long-running process which is periodically queried by other functions. If a C function return `void` then the Python process will resume immediately leaving the function running on its own. Other functions can be run while the long-running process is active but as there is no pre-emptive multithreading the persistent process will have to `yield` at non-timing critical points to allow other queued functions to run. In this example we launch a simple counter process and then pull the value using a second function. ``` %%microblaze base.ARDUINO #include <yield.h> static int counter = 0; void start_counter() { while (1) { ++counter; yield(); } } int counter_value() { return counter; } ``` We can now start the counter going. ``` start_counter() ``` And interrogate its current value ``` counter_value() ``` There are some limitations with using `pyprintf` inside a persistent function in that the output will not be displayed until a subsequent function is called. If the buffer fills in the meantime this can cause the process to deadlock. Only one persistent process can be called at once - if another is started it will block the first until it returns. If two many processes are stacked in this way a stack overflow may occur leading to undefined results. ## Creating class-like objects In the C code `typedef`s can be used to create psuedo classes in Python. If you have a `typedef` called `my_class` then any functions that being `my_class_` are assumed to be associated with it. If one of those functions takes `my_class` as the first argument it is taken to be equivalent to `self`. Note that the `typedef` can only ultimately refer a primitive type. The following example does some basic modular arithmetic base 53 using this idiom. ``` %%microblaze base.ARDUINO typedef unsigned int mod_int; mod_int mod_int_create(int val) { return val % 53; } mod_int mod_int_add(mod_int lhs, int rhs) { return (lhs + rhs) % 53; } ``` We can now create instances using our `create` function and call the `add` method on the returned object. The underlying value of the typedef instance can be retrieved from the `.val` attribute. ``` a = mod_int_create(63) b = a.add(4) print(b) print(b.val) ``` ## Coding Guidelines for Microblaze Interfacing Code There are some limitations to be aware of in the Jupyter integration with the Microblaze subsystem in particular the following things are unsupported and will result in the function not being available. * `struct`s or `union`s of any kind * Pointers to pointers * returning pointers All non `void*` paramters are passed on the stack so beware of passing large arrays in this fashion or a stack overflow will result.
github_jupyter
<a href="https://colab.research.google.com/github/mdsalem17/scientific-guide-notebooks/blob/main/question-answering/T5_SQuAD_Model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # T5 SQuAD Model This time we will perform decoding using the T5 SQuAD model. In this notebook we'll perform Question Answering by providing a "Question", its "Context" and see how well we get the "Target" answer. --- ## Downloading and loading dependencies We need to download these dependencies once every time we open the colab. We can ignore the `kfac` error. ``` #Install trax if needed !pip -q install trax #Install t5 !pip install t5 #check trax version !pip list | grep trax import string import t5 import numpy as np import trax from trax.supervised import decoding import textwrap # Will come handy later. wrapper = textwrap.TextWrapper(width=70) ``` # Mounting drive for data accessibility We need to run the cells below to mount drive. ``` from google.colab import drive drive.mount('/content/drive/', force_remount=True) ``` The directory is accessible [here](https://drive.google.com/drive/folders/1-CqJkrX61O9-rs6-XJozdYEQdffqtNea?usp=sharing) ``` path = "/content/drive/MyDrive/scientific-guide-notebooks/question-answering/" !ls $path ``` ## Getting things ready Run the cell below to ready some functions which will later help us in decoding. This code and the functions are the same as the ones we have in `model-description.ipynb`. ``` PAD, EOS, UNK = 0, 1, 2 def detokenize(np_array): return trax.data.detokenize( np_array, vocab_type = 'sentencepiece', vocab_file = 'sentencepiece.model', vocab_dir = path + "/models/") def tokenize(s): # The trax.data.tokenize function operates on streams, # that's why we have to create 1-element stream with iter # and later retrieve the result with next. return next(trax.data.tokenize( iter([s]), vocab_type = 'sentencepiece', vocab_file = 'sentencepiece.model', vocab_dir = path + "/models/")) vocab_size = trax.data.vocab_size( vocab_type = 'sentencepiece', vocab_file = 'sentencepiece.model', vocab_dir = path + "/models/") def get_sentinels(vocab_size): sentinels = {} for i, char in enumerate(reversed(string.ascii_letters), 1): decoded_text = detokenize([vocab_size - i]) # Sentinels, ex: <Z> - <a> sentinels[decoded_text] = f'<{char}>' return sentinels sentinels = get_sentinels(vocab_size) def pretty_decode(encoded_str_list, sentinels=sentinels): # If already a string, just do the replacements. if isinstance(encoded_str_list, (str, bytes)): for token, char in sentinels.items(): encoded_str_list = encoded_str_list.replace(token, char) return encoded_str_list # We need to decode and then prettyfy it. return pretty_decode(detokenize(encoded_str_list)) ``` ## Fine-tuning on SQuAD Now let's try to fine tune on SQuAD and see what becomes of the model. For this, we need to write a function that will create and process the SQuAD `tf.data.Dataset`. Below is how T5 pre-processes SQuAD dataset as a text2text example. Before we jump in, we will have to first load in the data. ### Loading in the data and preprocessing We first start by loading in the dataset. The text2text example for a SQuAD example looks like: ``` { 'inputs': 'question: <question> context: <article>', 'targets': '<answer_0>', } ``` The squad pre-processing function takes in the dataset and processes it using the sentencePiece vocabulary we have seen above. It generates the features from the vocab and encodes the string features. It takes on question, context, and answer, and returns "question: Q context: C" as input and "A" as target. ``` # Retrieve Question, C, A and return "question: Q context: C" as input and "A" as target. def squad_preprocess_fn(dataset, mode='train'): return t5.data.preprocessors.squad(dataset) # train generator, this takes about 1 minute train_generator_fn, eval_generator_fn = trax.data.tf_inputs.data_streams( 'squad/v1.1:3.0.0', data_dir= path + '/data/', bare_preprocess_fn=squad_preprocess_fn, input_name='inputs', target_name='targets' ) train_generator = train_generator_fn() next(train_generator) #print example from train_generator (inp, out) = next(train_generator) print(inp.decode('utf8').split('context:')[0]) print() print('context:', inp.decode('utf8').split('context:')[1]) print() print('target:', out.decode('utf8')) ``` <a name='3.2'></a> ### 3.2 Decoding from a fine-tuned model We will initialize, then load in a model that we trained earlier, and then try with our own input. ``` # Initialize the model model = trax.models.Transformer( d_ff = 4096, d_model = 1024, max_len = 2048, n_heads = 16, dropout = 0.1, input_vocab_size = 32000, n_encoder_layers = 24, n_decoder_layers = 24, mode='predict') # Change to 'eval' for slow decoding. # load in the model # this will take few minutes shape11 = trax.shapes.ShapeDtype((1, 1), dtype=np.int32) model.init_from_file(path + '/models/model_squad.pkl.gz', weights_only=True, input_signature=(shape11, shape11)) # Uncomment to see the transformer's structure. print(model) # create inputs # a simple example # inputs = 'question: She asked him where is john? context: John was at the game' # an extensive example inputs = 'question: What are some of the colours of a rose? context: A rose is a woody perennial flowering plant of the genus Rosa, in the family Rosaceae, or the flower it bears.There are over three hundred species and tens of thousands of cultivars. They form a group of plants that can be erect shrubs, climbing, or trailing, with stems that are often armed with sharp prickles. Flowers vary in size and shape and are usually large and showy, in colours ranging from white through yellows and reds. Most species are native to Asia, with smaller numbers native to Europe, North America, and northwestern Africa. Species, cultivars and hybrids are all widely grown for their beauty and often are fragrant.' # tokenizing the input so we could feed it for decoding print(tokenize(inputs)) test_inputs = tokenize(inputs) # Temperature is a parameter for sampling. # # * 0.0: same as argmax, always pick the most probable token # # * 1.0: sampling from the distribution (can sometimes say random things) # # * values inbetween can trade off diversity and quality, try it out! output = decoding.autoregressive_sample(model, inputs=np.array(test_inputs)[None, :], temperature=0.0, max_length=10) print(wrapper.fill(pretty_decode(output[0]))) ``` We can notice that the RAM is almost full, it is because the model and the decoding is memory heavy. ## Reference 1. Coursera. ‘Natural Language Processing with Attention Models’. https://www.coursera.org/specializations/natural-language-processing. 2. ‘Trax Quick Intro — Trax Documentation’. https://trax-ml.readthedocs.io/en/latest/notebooks/trax_intro.html#Supervised-training. 3. ‘Trax.Data — Trax Documentation’. https://trax-ml.readthedocs.io/en/latest/trax.data.html.
github_jupyter
``` import pathlib import tensorflow as tf import tensorflow.keras.backend as K import skimage import imageio import numpy as np import matplotlib.pyplot as plt # Makes it so any changes in pymedphys is automatically # propagated into the notebook without needing a kernel reset. from IPython.lib.deepreload import reload %load_ext autoreload %autoreload 2 from pymedphys._experimental.autosegmentation import unet structure_uids = [ path.name for path in pathlib.Path('data').glob('*') ] structure_uids split_num = len(structure_uids) - 2 training_uids = structure_uids[0:split_num] testing_uids = structure_uids[split_num:] training_uids testing_uids def get_image_paths_for_uids(uids): image_paths = [ str(path) for path in pathlib.Path('data').glob('**/*_image.png') if path.parent.name in uids ] np.random.shuffle(image_paths) return image_paths def mask_paths_from_image_paths(image_paths): mask_paths = [ f"{image_path.split('_')[0]}_mask.png" for image_path in image_paths ] return mask_paths training_image_paths = get_image_paths_for_uids(training_uids) training_mask_paths = mask_paths_from_image_paths(training_image_paths) len(training_image_paths), len(training_mask_paths) testing_image_paths = get_image_paths_for_uids(testing_uids) testing_mask_paths = mask_paths_from_image_paths(testing_image_paths) len(testing_image_paths), len(testing_mask_paths) def _centre_crop(image): shape = image.shape cropped = image[ shape[0]//4:3*shape[0]//4, shape[1]//4:3*shape[1]//4, ... ] return cropped def _process_mask(png_mask): normalised_mask = png_mask / 255 cropped = _centre_crop(normalised_mask) return cropped # def _remove_mask_weights(weighted_mask): # return weighted_mask / mask_weights for mask_path in testing_mask_paths[0:1]: png_mask = imageio.imread(mask_path) processed_mask = _process_mask(png_mask) plt.imshow(png_mask) plt.show() plt.imshow(processed_mask) plt.colorbar() plt.show() processed_mask.shape def _process_image(png_image): normalised_image = png_image[:,:,None].astype(float) / 255 cropped = _centre_crop(normalised_image) return cropped for image_path in testing_image_paths[0:1]: png_image = imageio.imread(image_path) processed_image = _process_image(png_image) plt.imshow(png_image) plt.colorbar() plt.show() plt.imshow(processed_image) plt.colorbar() plt.show() def get_datasets(image_paths, mask_paths): input_arrays = [] output_arrays = [] for image_path, mask_path in zip(image_paths, mask_paths): input_arrays.append(_process_image(imageio.imread(image_path))) output_arrays.append(_process_mask(imageio.imread(mask_path))) images = tf.cast(np.array(input_arrays), tf.float32) masks = tf.cast(np.array(output_arrays), tf.float32) return images, masks training_images, training_masks = get_datasets(training_image_paths, training_mask_paths) testing_images, testing_masks = get_datasets(testing_image_paths, testing_mask_paths) # dir(K) mask_dims = training_masks.shape mask_dims testing_masks.shape def display(display_list): plt.figure(figsize=(18, 5)) title = ['Input Image', 'True Mask', 'Predicted Mask'] for i in range(len(display_list)): plt.subplot(1, len(display_list), i+1) plt.title(title[i]) plt.imshow(display_list[i]) plt.colorbar() plt.axis('off') plt.show() display([testing_images[0,:,:,:], testing_masks[0,:,:,:]]) has_brain = np.sum(testing_masks[:,:,:,1], axis=(1,2)) has_eyes = np.sum(testing_masks[:,:,:,0], axis=(1,2)) brain_sort = 1 - np.argsort(has_brain) / len(has_brain) eyes_sort = 1 - np.argsort(has_eyes) / len(has_eyes) max_combo = np.argmax(brain_sort * eyes_sort * has_brain * has_eyes) sample_index = max_combo # eyes_sort # brain_sort sample_image = testing_images[max_combo,:,:,:] sample_mask = testing_masks[max_combo,:,:,:] plt.imshow(sample_image) plt.imshow(sample_mask) assert mask_dims[1] == mask_dims[2] grid_size = int(mask_dims[2]) output_channels = int(mask_dims[-1]) tf.keras.backend.clear_session() model = unet.unet( grid_size=grid_size, output_channels=output_channels, number_of_filters_start=32, max_filter_num=64, min_grid_size=8, num_of_fc=2, batch_normalisation=False, use_dropout=False, ) model.summary() def show_prediction(): predicted_masks = model.predict(testing_images) display( [ sample_image, sample_mask, predicted_masks[sample_index,:,:,:] ] ) class DisplayCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs=None): show_prediction() print ('\nSample Prediction after epoch {}\n'.format(epoch+1)) show_prediction() model.compile( optimizer=tf.keras.optimizers.Adam( # learning_rate=0.0001 ), loss=tf.keras.losses.BinaryCrossentropy(), metrics=[ tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.Recall(), tf.keras.metrics.Precision() ] ) # model.load_weights('./checkpoints/binomial-cross-entropy') history = model.fit( training_images, training_masks, epochs=100, # batch_size=training_masks.shape[0]//3, validation_data=(testing_images, testing_masks), callbacks=[DisplayCallback()] ) # checkpoints_dir = pathlib.Path('checkpoints') # checkpoints_dir.mkdir(exist_ok=True) # model.save_weights(checkpoints_dir.joinpath('final')) ```
github_jupyter
``` suppressWarnings({ library(lmerTest) library(lme4) library(ggplot2) library(dplyr) library(tidyr) library(sjPlot) library(IRdisplay) library(car) library(ggeffects) library(gridExtra) }) ``` # Investigating MHWs + Phytoplankton Performance Response via LMMs Tony Cannistra, May 2020 **Purpose**: To experiment with linear mixed effects models to examine whether a significant relationship exists between latitude, sea surface temperature, and performance ratio during marine heatwave events. **Method**: Build a linear mixed model of performance ratio with the following effects: *Fixed*: * latitude + Latitude ^2 * mean_sst_event_year_local *Random*: * phytoplankton isolate * season? Perhaps season should be a fixed effect, especially since it likely does have control over the direction + shape of the curve. ## Data We compute individual isolate performance for all local heatwaves in [this notebook](../Individual_Isolate_performance.ipynb), and use these data below. ``` mhwPerformance <- read.csv("../isolate_performance.csv") mhwPerformance = mhwPerformance %>% mutate(isolate = factor(isolate)) head(mhwPerformance) ``` We need to adjust for the effect of hemisphere to "align" seasons and convert to "season" factor variables from "peak_doy" column: ``` mhwPerformance[between(mhwPerformance$lat, -90, 0), 'peak_doy'] = (mhwPerformance[between(mhwPerformance$lat, -90, 0), 'peak_doy'] - 180) %% 365 mhwPerformance = mhwPerformance %>% mutate(season = case_when( (between(peak_doy, 0, 77) | between(peak_doy, 355, 366)) ~ "winter", # January 1 - March XX or December XX - December 31 between(peak_doy, 78, 170) ~ "spring", # March XX - June XX between(peak_doy, 171, 295) ~ "summer", # June XX - September XX between(peak_doy, 296, 354) ~ "fall" # September XX - December XX ) ) head(mhwPerformance %>% select(peak_doy, season)) ``` We'll clean up by dropping NAs and `Inf`s ``` mhwPerformance = mhwPerformance %>% filter(!is.na(performance_ratio_mean)) %>% filter(is.finite(performance_ratio_mean)) perfratio_quantile <- quantile(mhwPerformance$performance_ratio_mean, probs=c(.25, .75), na.rm = FALSE) perfratio_iqr <- IQR(mhwPerformance$performance_ratio_mean) perfratio_low <- perfratio_quantile[2]+1.5*perfratio_iqr # Upper Range perfratio_high <- perfratio_quantile[1]-1.5*perfratio_iqr # Lower Range mhwPerformace = mhwPerformance %>% filter(performance_ratio_mean > perfratio_low & performance_ratio_mean < perfratio_high) ``` We'll also **scale the input variables** for later, but the initial models will use the raw values: ``` mhwPerformance = mhwPerformance %>% mutate( lat_scaled = scale(lat), sst_scaled = scale(current_year_sst_mean), abslat_scaled = scale(abs(lat)) ) ``` ## Is `season` a random or fixed effect? ``` ggplot(mhwPerformance, aes(x = season, y = performance_ratio_mean)) + geom_boxplot() + ylim(0, 3) ``` ## Models We'll start with a simple linear model with all terms, **excluding** interaction terms: ``` simple_lm = lm(performance_ratio_mean ~ (poly(lat_scaled, 2) + sst_scaled + season) , data=mhwPerformance) summary(simple_lm) baseline_hline = geom_hline(yintercept=1, linetype='dashed', color='blue', size=0.6, alpha=0.6) crPlots(simple_lm) ``` ## Simple Model with Random Effects Only `isolate` as random effect for now: ``` simple_re = lmer(performance_ratio_mean ~ (poly(lat_scaled, 2) + sst_scaled + season)^2 + (1|isolate), data=mhwPerformance) summary(simple_re) simple_re_plots = plot_model(simple_re, type='pred') simple_re_plots$lat + baseline_hline simple_re_plots$current_year_sst_mean + baseline_hline simple_re_plots$season + baseline_hline plot_model(simple_re, type='re') + xlab("Isolate ID") + ylab('random effect') ``` **Random Effects model with Season** ``` season_re = lmer(performance_ratio_mean ~ poly(lat,2) + current_year_sst_mean + poly(lat,2)*current_year_sst_mean + season-1 + (1 | isolate), data=mhwPerformance) summary(season_re) plot_model(season_re, type='pred') ``` ## More Intense Heatwaves =? Stronger Signal? There's probably a lot of noise from shorter events here. Lets take a look at the distribution of heatwaves by intensity: ``` median(mhwPerformance$intensity_mean) ggplot(mhwPerformance, aes(x=intensity_mean) ) + geom_histogram() + geom_vline(xintercept=median(mhwPerformance$intensity_mean)) ``` We can use ~1.5 C as our threshold (median?) ``` intensity_threshold = 1.5 intenseMhwPerformance = mhwPerformance %>% filter(intensity_mean > intensity_threshold) head(intenseMhwPerformance) nrow(intenseMhwPerformance) ``` **Simple Model** ``` simple_intense_lm = lm(performance_ratio_mean ~ (abs(lat) +current_year_sst_mean + season)^2, data=intenseMhwPerformance) summary(simple_intense_lm) plot_model(simple_intense_lm)+ ylim(-10, 10) plot_model(simple_intense_lm, type='slope',) ``` **Simple Random Effects** ``` simple_intense_re = lmer(performance_ratio_mean ~ (poly(lat,2) + current_year_sst_mean + season)^2 + (1 | isolate), data=intenseMhwPerformance, ) summary(simple_intense_re) simple_intense_scaled_re = lmer(performance_ratio_mean ~ (poly(lat_scaled,2) + sst_scaled + season)^2 + (1 | isolate), data=intenseMhwPerformance, ) summary(simple_intense_scaled_re) anova(simple_intense_re, simple_intense_scaled_re) plot_model(simple_intense_re, type='pred', terms=c("season")) plot(ggeffect(simple_intense_scaled_re, terms='lat_scaled')) ``` ## Absolute Latitude instead of Poly(2) ``` abslat_re = lmer(performance_ratio_mean ~ (abslat_scaled + sst_scaled + season)^2 + (1 | isolate), data=mhwPerformance, ) summary(abslat_re) abslat_re = lmer(performance_ratio_mean ~ (abslat_scaled + sst_scaled + season)^2 + (1 | isolate), data=intenseMhwPerformance, ) summary(abslat_re) plot(ggeffect(abslat_re, terms=c('abslat_scaled', 'season [summer, winter]'))) + baseline_hline plot() # png("./lat_season.png", width=1440, height=700, res=180) seas = plot(ggeffect(simple_intense_scaled_re, terms='season')) + baseline_hline + xlab("Season") + ylab("Performance Ratio") + ggtitle("A) Season") + ylim(0, 5) seaslat = plot(ggeffect(simple_intense_scaled_re, terms=c('lat_scaled', 'season [summer, winter]'))) + baseline_hline + xlab("Latitude [scaled]") + ylab("Performance Ratio") + ggtitle("B) Latitude") + ylim(0, 5) grid.arrange(seas, seaslat, nrow=1, widths=c(1,1.3)) # dev.off() # seaslat summary(simple_intense_re) tab_model(simple_re, simple_intense_scaled_re, show.stat=TRUE, use.viewer=FALSE, dv.labels=c("Performance Ratio [all events]", "Performance Ratio [intense events]"), file = "simple_intense_re.html") # png("./lat_season_all.png", width=1440, height=700, res=180) seas = plot(ggeffect(simple_re, terms='season')) + baseline_hline + xlab("Season") + ylab("Performance Ratio") + ggtitle("A) Season") + ylim(0, 5) seaslat = plot(ggeffect(simple_re, terms='lat_scaled')) + baseline_hline + xlab("Latitude [scaled]") + ylab("Performance Ratio") + ggtitle("B) Latitude") + ylim(0, 5) grid.arrange(seas, seaslat, nrow=1, widths=c(1,1.3)) # dev.off() # seaslat anova(simple_re, simple_intense_re) png("./compare_coefs.png", width=1440, height=950, res=180) simple_coefs = plot_model(simple_re, ci.lvl=NA, show.values = TRUE, value.offset=.4) + ylim(-20, 20) + ggtitle("A) All MHWs") intense_coefs = plot_model(simple_intense_scaled_re,show.values = TRUE, value.offset=.4, ci.lvl=NA)+ theme(axis.text.y = element_blank()) + ylim(-20, 20) + ggtitle("B) Intense MHWs") grid.arrange(simple_coefs, intense_coefs, nrow=1, widths=c(2., 1) ) dev.off() ```
github_jupyter
## Deploy a simple S3 dispersed storage archive solution ### Prerequisites In order to execute this example you have to have one of the following: - have the TF Grid SDK installed and are looking at this content in the Jupyter Lab setup in SDK. `Insert How to get here` - have the TF Grid SDK installed and are building / executing python scripts. `Insert Link to the SDK installation doc ### Overview The design a simple S3 archive solution we need to follow a few simple steps: - create (or identify and use) an overlay network that spans all of the nodes needed in the solution - identify which nodes are involved in the archive for storage and which nodes are running the storage software - create reservations on the storage nodes for low level storage. Create and deploy zero-DB's - collect information of how to access and use the low level storage devices to be passed on to the S3 storage software - design the architecture, data and parity disk design - deploy the S3 software in a container #### Create overlay network of identity an previously deployed overlay network Each overlay network is private and contains private IP addresses. Each overlay network is deployed in such a way that is has no connection to the public (IPv4 or IPv6) network directly. In order to work with such a network a tunnel needs to be created between the overlay network on the grid and the private overlay network. **Required unique parameters**: - a unique port used to build the tunnel termination between your laptop / server and the overlay network - a TF 3Node with an IPv4 IP address if your location does not have IPv6 connectivity available - a unique network name #### Set up the capacity environment to find, reserve and configure Make sure that your SDK points to the mainnet explorer for deploying this capacity example. Also make sure you have an identity loaded. The example code uses the default identity. Multiple identities could be stored in the TF Grid SDK. To check your available identities you could request the number of identities available for you by typing `j.tools.threebot.me` in the kosmos shell. ``` # Which identities are available in you SDK j.tools.threebot.me # Make sure I have an identity (set default one for mainnet of testnet) me = j.tools.threebot.me.default j.clients.threebot.explorer_addr_set('explorer.grid.tf') # Load the zero-os sal and reate empty reservation method zos = j.sal.zosv2 r = zos.reservation_create() ``` #### Setup your overlay network (skip this step if you have a network setup and available) An overlay network creates a private peer2peer network over selected nodes. These overlay network will launch encrypted peer2peer tunnels between each and every individual nodes providing unlimited access and connectivity between nodes. The nodes (3Nodes) could be anywhere in the TF Grid and by reserving capacity on these nodes ``` # Set the unique parameters for this network deployment u_port=int(8101) u_networkname=str("weynand_new") print("Port is set to:", u_port) print("Unique network name is set to:", u_networkname) # create overlay network definition in datastructure called "network" network = zos.network.create(r, ip_range="172.20.0.0/16", network_name=u_networkname) nodes_mazraa = zos.nodes_finder.nodes_search(farm_id=123428) # (IPv6 nodes) nodes_salzburg = zos.nodes_finder.nodes_search(farm_id=12775) # (IPv6 nodes) nodes_vienna_1 = zos.nodes_finder.nodes_search(farm_id=82872) # (IPv6 nodes) nodes_belgium = zos.nodes_finder.nodes_search(farm_id=1) # (IPv4 nodes, to be used as ingress/egress point. These are not Web Gatewaysm, just nodes connected to the internet with IPv4 addresses) nodes_munich = zos.nodes_finder.nodes_search(farm_id=50669) #(IPv6 nodes) # Make a network spanning Salzburg, Vienna, Lichristu and Munich nodes_all = nodes_salzburg[:5] + nodes_vienna_1[:5] + nodes_belgium[:5]+nodes_mazraa[:5] # make sure to set a new port,empty for i, node in enumerate(nodes_all): if zos.nodes_finder.filter_is_up(node): iprange = f"172.20.{i+10}.0/24" zos.network.add_node(network, node.node_id , iprange, wg_port=u_port) print("Node: ", i, " ID: ", node.node_id, " IPv4 address: ", iprange) # Enter here the node_id for the node that is the IPv4 bridge to create the wireguard config. wg_config = zos.network.add_access(network, 'CBDY1Fu4CuxGpdU3zLL9QT5DGaRkxjpuJmzV6V5CBWg4', '172.20.100.0/24', ipv4=True) print("wireguard configuration") print(wg_config) # Set the duration for the reservation import time expiration = j.data.time.epoch + (10*60) #expiration = j.data.time.epoch + (5*60) # register the reservation rid = zos.reservation_register(r, expiration, identity=me) time.sleep(5) # inspect the result of the reservation provisioning result = zos.reservation_result(rid) print("provisioning result") print(result) ``` ### Wireguard config network (8037) reservation id: 9011 ``` [Interface] Address = 100.64.20.100/32 PrivateKey = HQwOoFyXqyDb5YbvQrINiaoKhzYKPTRUIAIHIAbkAgM= [Peer] PublicKey = ptEPaC6eq9ek/fLa6DV4tRL3wYqYBfDlBVIavrmcMFs= AllowedIPs = 172.20.0.0/16, 100.64.20.0/32 PersistentKeepalive = 25 Endpoint = 185.69.166.242:8037 ``` ### Node networks network "weynand_testnet_37" ``` Node: 0 ID: 7fHSAHEvUGtUcYSqLtpGq8ANssPikTyyHC52FddDYF4Y IPv4 address: 172.20.10.0/24 Node: 1 ID: FjwyHVvfATkVb4Puh4x6jCMS79TVVgSYagAuZTxWrsbj IPv4 address: 172.20.11.0/24 Node: 2 ID: 9211BFV7MFwktD2b8jHE9Ub3fHRtaYQyBBfwT9kEKA7q IPv4 address: 172.20.12.0/24 Node: 3 ID: HugtVL51BFNLbZbbxnWu2GEe8hV97YVPac19zy5wwNpT IPv4 address: 172.20.13.0/24 Node: 4 ID: 9KAbX21NGbZYupBJ6EeeWx3ZTKDx7ADevr8qtmEa5WkC IPv4 address: 172.20.14.0/24 Node: 5 ID: 9kcLeTuseybGHGWw2YXvdu4kk2jZzyZCaCHV9t6Axqqx IPv4 address: 172.20.15.0/24 Node: 6 ID: 3h4TKp11bNWjb2UemgrVwayuPnYcs2M1bccXvi3jPR2Y IPv4 address: 172.20.16.0/24 Node: 7 ID: FUq4Sz7CdafZYV2qJmTe3Rs4U4fxtJFcnV6mPNgGbmRg IPv4 address: 172.20.17.0/24 Node: 8 ID: 5Pb5NMBQWLTWhXK2cCM8nS6JZrnP2HaTP452TfMMYT9p IPv4 address: 172.20.18.0/24 Node: 9 ID: DUF2knurdMuX2eJVp9o7tXq4eNBy2fbxBoWhrMXWPEtF IPv4 address: 172.20.19.0/24 Node: 10 ID: 8zdqjFD7GLsSSfsTgFYcGusw91gQ3tdx7jbUhJep2a5X IPv4 address: 172.20.20.0/24 Node: 11 ID: 6chi1iSczxfF4U2iyCcJwkwWnwzcDgQHzCRExK9r4V1j IPv4 address: 172.20.21.0/24 Node: 15 ID: BvJzAiQTqTJoBZ1F5WzYoPpWUBoyRWp7agXSWnY7SBre IPv4 address: 172.20.25.0/24 Node: 16 ID: CpssVPA4oh455qDxakYhiazgG6t2FT6gAGvmPJMKJL2d IPv4 address: 172.20.26.0/24 Node: 17 ID: HkfruwpT1yjx3TTiKn5PVBGFDmnTEqrzz6S36e4rFePb IPv4 address: 172.20.27.0/24 Node: 18 ID: 9LmpYPBhnrL9VrboNmycJoGfGDjuaMNGsGQKeqrUMSii IPv4 address: 172.20.28.0/24 Node: 19 ID: 3FPB4fPoxw8WMHsqdLHamfXAdUrcRwdZY7hxsFQt3odL IPv4 address: 172.20.29.0/24 Node: 20 ID: CrgLXq3w2Pavr7XrVA7HweH6LJvLWnKPwUbttcNNgJX7 IPv4 address: 172.20.30.0/24 Node: 21 ID: 9TeVx6vtivk65GGf7QSAfAuEPy5GBDJe3fByNmxt73eT IPv4 address: 172.20.31.0/24 Node: 22 ID: Dv127zmU6aVkS8LFUMgvsptgReokzGj9pNwtz1ZLgcWf IPv4 address: 172.20.32.0/24 Node: 23 ID: HXRB7qxBwMp1giM3fzRDRGYemSfTDiLUhteqtAvmWiBh IPv4 address: 172.20.33.0/24 Node: 24 ID: GiSqnwbuvQagEiqMoexkq582asC8MattsjbFFuMdsaCz IPv4 address: 172.20.34.0/24 Node: 25 ID: 6mVGwQ41R9f7VJpNoJ6QLs4V15dsfMNXfEmQYhVEwCz6 IPv4 address: 172.20.35.0/24 Node: 26 ID: CayXiccrTd1uudPtBi1y6YusEXFFTENX3TShPJ85FnLJ IPv4 address: 172.20.36.0/24 Node: 27 ID: 8rDpKs6gEru87Lk3zsap1zG6nFLMHCufoWA7WY2KEg3q IPv4 address: 172.20.37.0/24 Node: 28 ID: CLbt5He2JibpLb4VQtBEeYz3r7j1YYopeNSGAtjZKPPQ IPv4 address: 172.20.38.0/24 Node: 29 ID: J1Xb2piba1vZM2pSmWP24CPgREnoTce7EuUJysttCyz6 IPv4 address: 172.20.39.0/24 Node: 30 ID: A34YUGenHKyhjDMAUKZe4cVDtJM2wQ4n4XRkfGUUEYdy IPv4 address: 172.20.40.0/24 Node: 31 ID: HYwvrxCy5z1QPALC5gTAiAMYWLH7orQ8xWEDbEyLiJV7 IPv4 address: 172.20.41.0/24 Node: 32 ID: CBDY1Fu4CuxGpdU3zLL9QT5DGaRkxjpuJmzV6V5CBWg4 IPv4 address: 172.20.42.0/24 Node: 33 ID: Hb6oVe2B5v9UBzDcDeQfZGn5bwFeM2R3rJh6U93AWfiN IPv4 address: 172.20.43.0/24 Node: 34 ID: 54S1qFXxWgnjmvEFVvqUbR7dHhvCshLbQrp2UpmE7GhZ IPv4 address: 172.20.44.0/24 Node: 35 ID: 9WhwSbM2xBNb9E3ws3PNJfyeajnKXWDZAMBLZMXCA9jf IPv4 address: 172.20.45.0/24 Node: 36 ID: 2hgRioV9ZKe8Apnm84TZn8Bn5XczyPU2nkPmozUY4rYw IPv4 address: 172.20.46.0/24 Node: 37 ID: 6gBWuYT8MTCJiRJp3mLiPLSXDJpDCGrLyi4HpVissb5j IPv4 address: 172.20.47.0/24 Node: 39 ID: HARGRBPyxF315bXDBCdaTnAD9cqMpwxvQnnzkZjhpVCA IPv4 address: 172.20.49.0/24 Node: 40 ID: Ddstj2hPbGip8Ci26TUaEzY52DHZs3JphLFXy9UgWSRS IPv4 address: 172.20.50.0/24 Node: 41 ID: FZZxePXsMfWpoepiPtdmc7EjRs3i8cmFF8xXJsFHKFB4 IPv4 address: 172.20.51.0/24 Node: 42 ID: Aiux6s3V8wg398FHzWWy3LC81C8vuP4JLkck7uR9T3pD IPv4 address: 172.20.52.0/24 Node: 43 ID: D9pVi21QMozECuZ6inVKQZP5UFrtusKUhW5mfW5fRxbK IPv4 address: 172.20.53.0/24 Node: 44 ID: 35A85gq6FRBFjMfdHDE6d7j5BYr786h4afy1wNcmdzre IPv4 address: 172.20.54.0/24 Node: 45 ID: 4TksThCbMYo8THcsADEckaCwurHSbn6EvDapfXvga97r IPv4 address: 172.20.55.0/24 Node: 46 ID: JE3GQ7LhStnoTR5mSfJZE1gcLhTr2oMiURVH1C1ipsgo IPv4 address: 172.20.56.0/24 Node: 47 ID: 7Qa7fxRPtMc5R72cnNms4XWD9PJkeQJ6iD8iTU9T8p2o IPv4 address: 172.20.57.0/24 Node: 48 ID: 9if6GSLuz1awA9EAywgYWDdgyH7Aq79X68kv9adJNAFL IPv4 address: 172.20.58.0/24 Node: 49 ID: 59R1GU6qh4RiWQRvUFm4GzwBbTM2JStyTFmgpzruq9Gt IPv4 address: 172.20.59.0/24 ``` ### Deploy Min.io simple storage solution Now that we have a network that spans 49 nodes - let's build a S3 server that uses disks in a number of these servers ``` # ---------------------------------------------------------------------------------- # Parameters and Variables # ---------------------------------------------------------------------------------- # load the zero-os sal zos = j.sal.zosv2 day=24*60*60 hour=60*60 # Node: 49 ID: 59R1GU6qh4RiWQRvUFm4GzwBbTM2JStyTFmgpzruq9Gt IPv4 address: 172.20.59.0/24 minio_node_id = '59R1GU6qh4RiWQRvUFm4GzwBbTM2JStyTFmgpzruq9Gt' minio_node_ip = '172.20.59.16' # ---------------------------------------------------------------------------------- reservation_network = zos.reservation_create() reservation_zdbs = zos.reservation_create() reservation_storage = zos.reservation_create() rid_network=0 rid_zdbs=0 rid_storage=0 password = "supersecret" u_port=int(8037) u_networkname=str("weynand_testnet_37") print("Port is set to:", u_port) print("Unique network name is set to:", u_networkname) # ---------------------------------------------------------------------------------- # Setup the environment # ---------------------------------------------------------------------------------- # make sure I have an identity (set default one for mainnet of testnet) me = j.tools.threebot.me.mainnet j.clients.threebot.explorer_addr_set('explorer.grid.tf') # ---------------------------------------------------------------------------------- # Overview of the nodes and network # ---------------------------------------------------------------------------------- nodes_salzburg = zos.nodes_finder.nodes_search(farm_id=12775) # (IPv6 nodes) nodes_vienna_1 = zos.nodes_finder.nodes_search(farm_id=82872) # (IPv6 nodes) nodes_munich = zos.nodes_finder.nodes_search(farm_id=50669) #(IPv6 nodes) nodes_all = nodes_salzburg[5:8] + nodes_vienna_1[5:8] + nodes_munich # make sure to set a new port,empty for i, node in enumerate(nodes_all): if zos.nodes_finder.filter_is_up(node): print("Node: ", i, " ID: ", node.node_id) # ---------------------------------------------------------------------------------- # Overview of the nodes and network - OUTPUT # ---------------------------------------------------------------------------------- Salzburg Node: 5 ID: 9kcLeTuseybGHGWw2YXvdu4kk2jZzyZCaCHV9t6Axqqx IPv4 address: 172.20.15.0/24 Node: 6 ID: 3h4TKp11bNWjb2UemgrVwayuPnYcs2M1bccXvi3jPR2Y IPv4 address: 172.20.16.0/24 Node: 7 ID: FUq4Sz7CdafZYV2qJmTe3Rs4U4fxtJFcnV6mPNgGbmRg IPv4 address: 172.20.17.0/24 Vienna (1) Node: 18 ID: 9LmpYPBhnrL9VrboNmycJoGfGDjuaMNGsGQKeqrUMSii IPv4 address: 172.20.28.0/24 Node: 19 ID: 3FPB4fPoxw8WMHsqdLHamfXAdUrcRwdZY7hxsFQt3odL IPv4 address: 172.20.29.0/24 Node: 20 ID: CrgLXq3w2Pavr7XrVA7HweH6LJvLWnKPwUbttcNNgJX7 IPv4 address: 172.20.30.0/24 Munich Node: 49 ID: 59R1GU6qh4RiWQRvUFm4GzwBbTM2JStyTFmgpzruq9Gt IPv4 address: 172.20.59.0/24 # ---------------------------------------------------------------------------------- # Select and create a reservation for nodes to deploy a ZDB # first find the node where to reserve 0-DB namespaces. Select all the salzburg nodes # ---------------------------------------------------------------------------------- nodes_salzburg = zos.nodes_finder.nodes_search(farm_id=12775) # (IPv6 nodes) nodes_vienna_1 = zos.nodes_finder.nodes_search(farm_id=82872) # (IPv6 nodes) nodes_munich = zos.nodes_finder.nodes_search(farm_id=50669) #(IPv6 nodes) # ---------------------------------------------------------------------------------- # Definition of functional nodes # ---------------------------------------------------------------------------------- nodes_all = nodes_salzburg[5:8] + nodes_vienna_1[5:8] # ---------------------------------------------------------------------------------- # Create ZDB reservation for the selected nodes # ---------------------------------------------------------------------------------- for node in nodes_all: zos.zdb.create( reservation=reservation_zdbs, node_id=node.node_id, size=10, mode='seq', password='supersecret', disk_type="SSD", public=False) # ---------------------------------------------------------------------------------- # Attach persistant storage to container - for storing metadata # ---------------------------------------------------------------------------------- volume = zos.volume.create(reservation_storage,minio_node_id,size=10,type='SSD') volume_rid = zos.reservation_register(reservation_storage, j.data.time.epoch+(1*hour), identity=me) results = zos.reservation_result(volume_rid) # ---------------------------------------------------------------------------------- # Actuate the reservation for the ZDB's The IP addresses are going to be selfassigned. # ---------------------------------------------------------------------------------- expiration = j.data.time.epoch + (1*hour) # register the reservation rid_zdb = zos.reservation_register(reservation_zdbs, expiration, identity=me) time.sleep(5) results = zos.reservation_result(rid_zdb) while len(results) < len(nodes_all): time.sleep(2) results = zos.reservation_result(rid_zdb) # ---------------------------------------------------------------------------------- # Read the IP address of the 0-DB namespaces after they are deployed # we will need these IPs when creating the minio container # ---------------------------------------------------------------------------------- namespace_config = [] for result in results: data = result.data_json cfg = f"{data['Namespace']}:{password}@[{data['IPs']}]:{data['Port']}" namespace_config.append(cfg) # All IP's for the zdb's are now known and stored in the namespace_config structure. print(namespace_config) ''' rid = 9012 (30 minutes) workload_id = "9012-1" JS-NG> results category = "ZDB" data_json = "{\n \"IPs\": \"2a04:7700:1003:1:54f0:edff:fe87:2c48\",\n \"Namespace\": \"9012-4\",\n \"Port\": 9900\n}" epoch = "2020/03/25 07:38:33" message = "" signature = "db9ffc8b89702887575ae1c54481a916bafea6036ce85419ab95302756c3ca45955fd8961901d87ccb3f0a92eca31bc202106fe3d1d746e32d0b01017c0b220e" state = "OK" workload_id = "9012-4" category = "ZDB" data_json = "{\n \"IPs\": \"2a02:16a8:1000:0:5c2f:ddff:fe5a:1a70\",\n \"Namespace\": \"9012-1\",\n \"Port\": 9900\n}" epoch = "2020/03/25 07:38:35" message = "" signature = "0cded492a91fc54c862a79a56b4e41372ee4a7bd298ba01b94134b63679f35856a697fae8d9aa53d3b9de3aeb324b3ddea034eadeea708df0bf8e3d30176540a" state = "OK" workload_id = "9012-1" category = "ZDB" data_json = "{\n \"IPs\": \"2a02:16a8:1000:0:1083:59ff:fe38:ce71\",\n \"Namespace\": \"9012-2\",\n \"Port\": 9900\n}" epoch = "2020/03/25 07:38:38" message = "" signature = "caf5c78a314e4673abadf2a53a79e20939598ef9c4dab07cd461c82cc195c8df940b0d7bb05544c409e5a3e695220c432d2c31e2366f595d46f4141b106dbc09" state = "OK" workload_id = "9012-2" category = "ZDB" data_json = "{\n \"IPs\": \"2003:d6:2f32:8500:dc78:d6ff:fe04:7368\",\n \"Namespace\": \"9012-7\",\n \"Port\": 9900\n}" epoch = "2020/03/25 07:38:40" message = "" signature = "8eca8bc3feff37997f0a1958ab9c7b563932c7c4fc05fab9c95a4d353fb79e12ea1b1f3e355a8d13c790edc5e4fabe139970346a0fccbc9c32f4da91a7f7f20f" state = "OK" workload_id = "9012-7" category = "ZDB" data_json = "{\n \"IPs\": \"2a02:16a8:1000:0:fc7c:4aff:fec8:baf\",\n \"Namespace\": \"9012-3\",\n \"Port\": 9900\n}" epoch = "2020/03/25 07:38:43" message = "" signature = "707ac7ed6a3930175b12488857a08a67b5d64dbc431fa19d3ccc1cea097b6c6bbbaac3a54de19360ca405079123f5f3f089e8ea3623a83e561fad5137dfa1507" state = "OK" workload_id = "9012-3" category = "ZDB" data_json = "{\n \"IPs\": \"2a04:7700:1003:1:acc0:2ff:fed3:1692\",\n \"Namespace\": \"9012-5\",\n \"Port\": 9900\n}" epoch = "2020/03/25 07:38:44" message = "" signature = "4023a55eaf26a02dddb61004334c5324d4f880d31327eec4ad0884c6a0b66eaeff4b5e0f14725953ac45074abe6c984f71e06f8e2b37d3a341e4fe9d7a7e500f" state = "OK" workload_id = "9012-5" category = "ZDB" data_json = "{\n \"IPs\": \"2a04:7700:1003:1:ac9d:f3ff:fe6a:47a9\",\n \"Namespace\": \"9012-6\",\n \"Port\": 9900\n}" epoch = "2020/03/25 07:38:44" message = "" signature = "86992453291b9c6dbf19965248ecc23a55f1b0379546b2fa41aa7476fde84e15e63174a0f8ee9e2e622d7e3986ecd15e07cba81d98d5a54f8bdc722b1fe64705" state = "OK" workload_id = "9012-6" JS-NG> namespace_config ['9012-4:supersecret@[2a04:7700:1003:1:54f0:edff:fe87:2c48]:9900', '9012-1:supersecret@[2a02:16a8:1000:0:5c2f:ddff:fe5a:1a70]:9900', '9012-2:supersecret@[2a02:16a8:1000:0:1083:59ff:fe38:ce71]:9900', '9012-7:supersecret@[2003:d6:2f32:8500:dc78:d6ff:fe04:7368]:9900', '9012-3:supersecret@[2a02:16a8:1000:0:fc7c:4aff:fec8:baf]:9900', '9012-5:supersecret@[2a04:7700:1003:1:acc0:2ff:fed3:1692]:9900', '9012-6:supersecret@[2a04:7700:1003:1:ac9d:f3ff:fe6a:47a9]:9900'] ``` # ---------------------------------------------------------------------------------- # With the low level disk managers done and the IP adresses discovered we could now build # the reservation for the min.io S3 interface. # ---------------------------------------------------------------------------------- reservation_minio = zos.reservation_create() # Make sure to adjust the node_id and network name to the appropriate in copy / paste mode :-) minio_container=zos.container.create(reservation=reservation_minio, node_id=minio_node_id, network_name=u_networkname, ip_address=minio_node_ip, Flist='https://hub.grid.tf/azmy.3Bot/minio.Flist', interactive=False, entrypoint='/bin/entrypoint', cpu=2, memory=2048, env={ "SHARDS":','.join(namespace_config), "DATA":"3", "PARITY":"2", "ACCESS_KEY":"minio", "SECRET_KEY":"passwordpassword", }) # ---------------------------------------------------------------------------------- # Attach persistant storage to container - for storing metadata # ---------------------------------------------------------------------------------- zos.volume.attach_existing( container=minio_container, volume_id=f'{volume_rid}-{volume.workload_id}', mount_point='/data') # ---------------------------------------------------------------------------------- # Write reservation for min.io container in BCDB - end user interface # ---------------------------------------------------------------------------------- expiration = j.data.time.epoch + (1*hour) # register the reservation rid = zos.reservation_register(reservation_minio, expiration, identity=me) time.sleep(5) results = zos.reservation_result(rid) ```
github_jupyter
# [ATM 623: Climate Modeling](../index.ipynb) [Brian E. J. Rose](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany # Assignment 6: Orbital variations and insolation ## Warning: content out of date and not maintained You really should be looking at [The Climate Laboratory book](https://brian-rose.github.io/ClimateLaboratoryBook) by Brian Rose, where all the same content (and more!) is kept up to date. ***Here you are likely to find broken links and broken code.*** The purpose of this assignment is two-fold: 1. Get you familiar with doing orbital and insolation calculations with `climlab` 2. Introduce you to some modern twists on the Astronomical Theory of the ice ages. This assignment is due **Friday April 7 2017.** ## Early Pleistocene Glacial Cycles and the Integrated Summer Insolation Forcing Although the causes of the ice ages have been discussed for over 150 years, there are still many unsolved problems. Go back and look at the timeseries of global ice volume we showed in Section 1 of the [notes on Orbital variations](../Lectures/Lecture13 -- Orbital variations.ipynb) The most recent glaciations (the "Late Pleistocene") have been large in amplitude and occur on a roughly 100 kyr timescale. The earlier glaciations were smaller in magnitude and occurred on shorter 40 kyr timescales -- apparently in sync with obliquity variations. One very big outstanding question is this: **Why did the dominant frequency for the ice ages change from 40 kyr to 100 kyr roughly 800 kyr ago?** We are not going to answer that here. There is, however, a very nice clear explanation for the obliquity-pacing of the Early Pleistocene. This has been presented by **Peter Huybers** in this paper: > Huybers, P. (2006). Early Pleistocene glacial cycles and the integrated summer insolation forcing. Science, 313:508–511. First, **read the paper**. I will distribute a pdf of the paper through the class web page. Here is Figure 2 from the paper: <img src='../images/Huybers_Fig2.png' width=800> Your tasks is to reproduce these orbital / insolation calculations using the `climlab` tools. Specifically: 1. **Reproduce the blue curve in Fig. 2A**: number of days per year that insolation is above 275 W m$^{-2}$ at 65ºN, between 2000 and 1000 kyrs before present. 2. **Reproduce the red curve in Fig. 2C**: Integrated summer insolation: the total accumulated insolation for every day for which this insolation is above the threshold of 275 W m$^{-2}$. 3. **Reproduce the red curve in Fig. 2A**: Average summer insolation intensity. This is the integrated summer insolation above the intensity threshold divided by the number of seconds during which the insolation threshold is exceeded. (Preferably plot this on the same graph as the number of days to reproduce Fig. 2A completely). <div class="alert alert-success"> [Back to ATM 623 notebook home](../index.ipynb) </div> ____________ ## Credits The author of this notebook is [Brian E. J. Rose](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany. It was developed in support of [ATM 623: Climate Modeling](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/), a graduate-level course in the [Department of Atmospheric and Envionmental Sciences](http://www.albany.edu/atmos/index.php) Development of these notes and the [climlab software](https://github.com/brian-rose/climlab) is partially supported by the National Science Foundation under award AGS-1455071 to Brian Rose. Any opinions, findings, conclusions or recommendations expressed here are mine and do not necessarily reflect the views of the National Science Foundation. ____________
github_jupyter
# 2. Introducing Mechanical Vibrations By Investigating a Book Oscillating on a Cylindrical Cup ## Introduction This notebook introduces a single degree of freedom vibratory system of a textbook balancing on a cylindrical coffee cup. The system is implemented as a model that students can interact with in order to visualize its free response and compare the computer simulation to a demonstration in the classroom. ### Learning Objectives After the completion of this assignment students will be able to: - load a specific system - view and set parameters (constants, coordinates) on a system - create a visualization that depicts the system's configuration - compute the system's free response - visualize the change in configuration wit respect to the free response - interactively adjust the book shape to see the affect on system response - understand the concept of natural frequency and its relationship to mass/inertia - estimate the period of a sinusoidal vibration from a time series - compare a computer simulation result to experimental result ## What are vibrations? Here we will study a simple vibratory system. A vibrating mechanical system is typically defined as a collection of rigid and flexible objects that interact in a closed envelope. If you set a cylindrical cup on its side and place a book lying flat on top of it, the book will oscillate if initially displaced at a small non-horizontal angle. Note that it oscillates about a horizontal position. This position is called an *equilibrium point*, *equilibrium state*, or *equilibrium configuration* which is a natural position the system comes to when there is no motion. Vibration is formally defined as an *oscillation about an equilibrium*. The video below shows the real system in action: ``` from IPython.display import YouTubeVideo YouTubeVideo('B12HbAOKnqI') ``` During this class, we will examine and explore many different vibratory systems, such as this simple book on a cup system. We will have some live demos, as we are showing now, but in general we will work with computational representations of systems to experiment and learn about the nature of vibration. For example, the following code loads a system that represents the book on a cup. To execute the following Jupyter code cell press the shift and enter key simultaneously: ``` from resonance.linear_systems import BookOnCupSystem ``` The line above loads (imports) the command, `BookOnCupSystem`, from the appropriate resonance package. This command can create systems that behave like the live demo. To create a new system append parentheses the command and assign the result that it returns it to a variable called `sys`, execute the following cell: ``` sys = BookOnCupSystem() ``` `sys` is now a new system object that you can interact with. This system has many variables and functions associated with it. You can see what the system has and can do by using the Python dot notation. Type `sys.` and press the tab key to see a list of all the variables and functions that are associated with this system. ## Constants One thing that systems have are different constants, for example this system has geometry, such as the book's thickness and length and the cup's radius. The book also has a mass and, in this case, an underlying assumption is that the book is uniformly dense. Note that all of these do not change with time, i.e. they are constant. You can view all of the constants, which are stored in a Python dictionary by typing: ``` sys.constants ``` A Python dictionary maps keys, in this case the constant's names, to values, the numerical values you'd like to assign to the constant. For example the key `'thickness'` is associated with a value `0.029`. An individual constant's value can be accessed by using square brackets: ``` sys.constants['radius'] ``` You can set the values of the constants as such: ``` sys.constants['length'] = 0.184 # m, short side of book ``` *Note that you will be responsible for ensuring that the units are consistent and that all angles should be in radians.* Overwrite the `sys` variable by loading the system again to get back the default constants. ``` sys = BookOnCupSystem() sys.constants ``` All systems will have different sets of constants. There is no unique way to define the constants either. For example, we could have chosen area and one linear dimension instead of three linear dimensions. This system could also have more constants, for example what if you were on the moon? Maybe the acceleration due to gravity should be an explicit constant. Or what if the book to cup connection was very slippery? Maybe the coefficient of friction would be a specific constant. It is important to note that the system constants, as we've defined here, are all constant with respect to time. ## Coordinates There are other system values of interest too. Another very important type are those that vary with time. > **Exercise**: What might be the time varying parameters of this system? There are are an infinite number of time varying parameters, but it is often preferable to choose a uniquely simple set of time varying parameters, often called *generalized coordinates*. These coordinates define the **configuration** of the system. In our case, the vertical and horizontal location of the book's mass center could uniquely describe the configuration of the system (if the book can't slip on the cup). But a better choice would be to use the single time varying angle of the books surface relative to horizontal to define the configuration. <img src="balancing-book.svg" alt="image" width="600" /> The angle of the book is thus a generalized coordinate because no fewer number of time varying parameters can possibly be used to describe the configuration. For simple systems, the number of generalized coordinates corresponds to the number of *degrees of freedom* of a system. The degrees of freedom are the number of independent parameters that define the configuration. The non-slipping book on a cup has 1 degree of freedom which is described by the single generalized coordinate, the book's angle. The system's generalized coordinates can be accessed as such: ``` sys.coordinates sys.coordinates['book_angle'] ``` ## Measurements Another type of time varying parameter that can be extracted from systems are non-generalized coordinates or other combinations of constant parameters. We will call these *measurements*. For example, maybe you are interested in the vertical and horizontal location of the book's lower left corner. If you had an appropriate dynamics distance sensor you could measure this location as it changes with time. Or you can define how these two measurement parameters are a function of the book angle and the system's geometry. You can add measurement parameters to a system by defining Python functions that compute the geometric relationship. Given the value of a generalized coordinate and the values of the system's constant parameters, we can define a function that computes the measurement parameter. These functions need to be to accept scalars and arrays, so import NumPy as a shorthand variable `np` to make use of array aware functions like `np.cos()` and `np.sin()` : ``` import numpy as np ``` The thickness of the bottom left corner of the book relative to the origin is defined by this mathematical function: $$P_y = r + r \cos{\theta} + (r \theta + l / 2) \sin{\theta}$$ and the Python function that implements this would look like: ``` def bottom_left_y(radius, length, book_angle): # define new simpler variables here in the function so the math is # is easier to type r = radius l = length theta = book_angle return r + r * np.cos(theta) + (r * theta + l / 2) * np.sin(theta) ``` Note that the variable names in the function signature `(radius, length, book_angle)` must be exactly as you have defined them in the `constants`. This function can now be used like so: ``` angles = np.deg2rad(np.arange(10)) angles bottom_left_y(0.042, 0.029, angles) ``` Now that you know it works, you can add this measurement function to the system, so that it automatically calculates the measurement for you with: ``` sys.add_measurement('bottom_left_y', bottom_left_y) ``` `add_measurement()` is an example of a function that is associated with the system and the parentheses give that away. Similarly, you can add the horizontal position of the bottom left corner: ``` def bottom_left_x(radius, length, book_angle): r = radius l = length theta = book_angle return r * np.sin(theta) - (r * theta + l / 2) * np.cos(theta) sys.add_measurement('bottom_left_x', bottom_left_x) ``` Now, if you change the book angle you'll get an updated measurement: ``` sys.coordinates['book_angle'] = np.deg2rad(1) sys.measurements['bottom_left_y'] ``` Notice that all angles must be in radians and that we use the NumPy function `np.deg2rad()` to do this. ## Visualizing the Configuration of the System It is often very helpful to visualize a system's configuration. In this case we need a two dimensional drawing similar to the diagram above. The package `matplotlib` provides "patch" objects that represent a circle and a rectangle that should be able to make a reasonable diagram. First import these functions: ``` import matplotlib.pyplot as plt from matplotlib.patches import Circle, Rectangle ``` And for nice interactive plotting in the Jupyter notebook, use this command to turn it on: ``` %matplotlib notebook ``` Just as we did with the measurement functions, you can create a function that generates the matplotlib figure using the system's various constants, coordinates, and measurements: ``` def create_plot(radius, length, thickness, book_angle, bottom_left_x, bottom_left_y): # create a blank figure and set basic settings on the axis fig, ax = plt.subplots(1, 1) ax.set_xlim((-0.15, 0.15)) ax.set_ylim((0.0, 0.2)) ax.set_xlabel('x [m]') ax.set_ylabel('y [m]') ax.set_aspect('equal') # circles are created by supplying an (x, y) pair and the radius circ = Circle((0.0, radius), radius=radius) # rectangles are created by supplying the (x, y) pair locating the # bottom left corner, the width, the thickness, and the to rotation # angle. notice that the rotation angle is defined in the opposite # direction as we have and it is supposed to be in degrees not radians rect = Rectangle((bottom_left_x, bottom_left_y), length, thickness, angle=-np.rad2deg(book_angle), color='black') ax.add_patch(circ) ax.add_patch(rect) return fig ``` A system can have a single configuration plot function and you let the system know about it by assigning it to the `config_plot_func` variable. ``` sys.config_plot_func = create_plot ``` Now, have a look at the configuration plot by calling the function `plot_configuration()` : ``` sys.plot_configuration(); ``` > **Exercise:** Change the system's constants and the book angle and see how the plot reflects these changes. ## Free Response Now that we have a system with defined constant parameters we can make it vibrate. There are two ways to create this motion: apply perturbing forces to the system or set the coordinate to an initial angle other than the equilibrium angle. We will do the later here. The resulting motion is called the *free response* of the system, meaning that no external forces are causing the motion. To simulate the free response of the system, some values of time are needed. In this case a final time value, effectively the duration, is passed into the `free_response()` function. First, set the initial angle of the book and then call `free_repsonse()`, storing the returned result in a variable named `trajectories` : ``` sys.coordinates['book_angle'] = np.deg2rad(1) trajectories = sys.free_response(5.0) ``` This creates what is called a [data frame](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html). Data frames are defined in the [Pandas](http://pandas.pydata.org/) Python package and are one of the most common Python data types. They are essentially a 2D table with labels for each column and an index for each row. In our case the index is the time value and the columns are the values of the coordinates and the measurements at that particular time: ``` type(trajectories) trajectories ``` The data frames have useful plotting functions associated with them, so it is rather easy to plot the various coordinates and measurements versus time: ``` trajectories.plot(); ``` It is often a little clearer if each column is in a subplots, especially if they have different units, as these do: ``` trajectories.plot(subplots=True); ``` A single column can be accessed and plotted too: ``` plt.figure() trajectories['book_angle'].plot(); ``` Maybe you want to use degrees for the book angle instead, just make a new column: ``` trajectories['book_angle_deg'] = np.rad2deg(trajectories['book_angle']) plt.figure() trajectories['book_angle_deg'].plot(); ``` > **Exercise:** Create the free response of the system with different initial coordinate values and parameter values. > > - Does the simulation always work, if not what doesn't work? *Hint: try a tall stack of books, can you find a stack height that is significant?* > - What mathematical function can be used describe the change in the book angle? > - Why does the book corner x position seem to oscillate faster? ## Animate The Motion Now that we we have a time varying response, we can animate the configuration figure to visualize how the system moves. There is one minor change that needs to be made to the configuration plot function first. We need to make sure that it also returns any of the objects that change with time. Update the function by add the `Rectangle` as a second returned value: ``` def create_plot(radius, length, thickness, book_angle, bottom_left_x, bottom_left_y): fig, ax = plt.subplots(1, 1) ax.set_xlim((-0.15, 0.15)) ax.set_ylim((0.0, 0.2)) ax.set_xlabel('x [m]') ax.set_ylabel('y [m]') ax.set_aspect('equal') circ = Circle((0.0, radius), radius=radius) # NOTE : The rectangle's position and angle will change with time. rect = Rectangle((bottom_left_x, bottom_left_y), length, thickness, angle=-np.rad2deg(book_angle), color='black') ax.add_patch(circ) ax.add_patch(rect) # make sure to return the rectangle, which moves at each time step! return fig, rect sys.config_plot_func = create_plot ``` Now, an animation update function can be created which updates the bottom left corner's x and y coordinate at each time step. The last argument in the function signature must be the object(s) that changes. ``` def update_frame(book_angle, bottom_left_x, bottom_left_y, rect): rect.set_xy((bottom_left_x, bottom_left_y)) rect.angle = -np.rad2deg(book_angle) ``` Lastly, add this function to the system: ``` sys.config_plot_update_func = update_frame ``` The visualization can now be animated with: ``` sys.animate_configuration(interval=8) ``` The interval parameter helps speed it up closer to real time. See the documentation for matplotlib's [FuncAnimation](https://matplotlib.org/api/_as_gen/matplotlib.animation.FuncAnimation.html#matplotlib.animation.FuncAnimation) for more options. > **Exercise:** There is a special variable `time` that can be specified in the plot setup and update functions. Add this variable to the function signatures and create some text on the plot that displays the current time using: > > - `text = ax.text(-0.125, 0.025, 'Time = {:0.3f} s'.format(time))` > - `text.set_text('Time = {:0.3f} s'.format(time))` ## Time Series Analysis Now that we have some data produced from the simulation we can see how it compares to what we can measure in real life. > **Exercise:** Either using the video of the oscillation or the demo available in the classroom, count the number of oscillations in a few seconds and compute the period of the oscillation. > > **Exercise:** From the above plots you can see that the oscillation is periodic and sinusoidal. Using your program, create a function that calculates the period of the oscillations to three significant figures when the initial book angle is 2 degrees. Compare the period predicted by the system to the period measured in class. You can also compare it to the value given from `sys.period()`. > > *Hint: Look for sign changes with np.sign(), use boolean indexing to extract important times, and finally np.diff() and np.mean() can be useful for finding the delta times and averaging. Note that np.diff() returns one fewer item in the array it operates on.* ``` def find_period(time, theta): """Computes the period of oscillation based on the trajectory of theta. Parameters ========== time : array_like, shape(n,) An array of monotonically increasing time values. theta : array_like, shape(n,) An array of values for theta at each time in ``t``. Returns ======= period : float An estimate of the period of oscillation. """ # delete the following line and replace with your code period = None return period find_period(trajectories.index, trajectories.book_angle) ``` > **Exercise:** Plot the period versus change in mass, length, and radius. Is there anything interesting about these plots? Explain you interpretations in a markdown cell.
github_jupyter
# Chainer basic module introduction 2 Advanced memo is written as "Note". You can skip reading this for the first time reading. In previous tutorial, we learned * Variable * Link * Function * Chain Let's try training the model (Chain) in this tutorial. In this section, we will learn * Optimizer - Optimizes/tunes the internal parameter to fit to the target function * Serializer - Handle save/load the model (Chain) For other chainer modules are explained in later tutorial. ## Training What we want to do here is regression analysis (Wikipedia). Given set of input `x` and its output `y`, we would like to construct a model (function) which estimates `y` as close as possible from given input `x`. This is done by tuning an internal parameters of model (this is represented by Chain class in Chainer). And the procedure to tune this internal parameters of model to get a desired model is often denoted as "training". ## Initial setup Below is typecal `import` statement of chainer modules. ``` # Initial setup following http://docs.chainer.org/en/stable/tutorial/basic.html import numpy as np import chainer from chainer import cuda, Function, gradient_check, report, training, utils, Variable from chainer import datasets, iterators, optimizers, serializers from chainer import Link, Chain, ChainList import chainer.functions as F import chainer.links as L from chainer.training import extensions import matplotlib.pyplot as plt # define target function def target_func(x): """Target function to be predicted""" return x ** 3 - x ** 2 + x - 3 # create efficient function to calculate target_func of numpy array in element wise target_func_elementwise = np.frompyfunc(target_func, 1, 1) # define data domain [xmin, xmax] xmin = -3 xmax = 3 # number of training data sample_num = 20 x_data = np.array(np.random.rand(sample_num) * (xmax - xmin) + xmin) # create 20 y_data = target_func_elementwise(x_data) x_detail_data = np.array(np.arange(xmin, xmax, 0.1)) y_detail_data = target_func_elementwise(x_detail_data) # plot training data plt.clf() plt.scatter(x_data, y_data, color='r') plt.show() #print('x', x_data, 'y', y_data) # plot target function plt.clf() plt.plot(x_detail_data, y_detail_data) plt.show() ``` Our task is to make regression ## Linear regression using sklearn You can skip this section if you are only interested in Chainer or deep learning. At first, let's see linear regression approach. using sklearn library, Reference: [http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html) ``` from sklearn import linear_model # clf stands for 'classifier' model = linear_model.LinearRegression() model.fit(x_data.reshape(-1, 1), y_data) y_predict_data = model.predict(x_detail_data.reshape(-1, 1)) plt.clf() plt.scatter(x_data, y_data, color='r') plt.plot(x_detail_data, y_predict_data) plt.show() ``` ## Optimizer Chainer optimizer manages the optimization process of model fit. Concretely, current deep learning works based on the technic of Stocastic Gradient Descent (SGD) based method. Chainer provides several optimizers in `chainer.optimizers` module, which includes following * SGD * MomentumSGD * AdaGrad * AdaDelta * Adam Around my community, MomentumSGD and Adam are more used these days. ## Construct model - implement your own Chain `Chain` is to construct neural networks. Let's see example, ``` from chainer import Chain, Variable # Defining your own neural networks using `Chain` class class MyChain(Chain): def __init__(self): super(MyChain, self).__init__( l1=L.Linear(None, 30), l2=L.Linear(None, 30), l3=L.Linear(None, 1) ) def __call__(self, x): h = self.l1(x) h = self.l2(F.sigmoid(h)) return self.l3(F.sigmoid(h)) ``` Here `L.Linear` is defined with `None` in first argument, input size. When `None` is used, Linear Link will determine its input size at the first time when it gets the input Variable. In other words, Link's input size can be dynamically defined and you don't need to fix the size at the declaration timing. This flexibility comes from the Chainer's concept "define by run". ``` # Setup a model model = MyChain() # Setup an optimizer optimizer = chainer.optimizers.MomentumSGD() optimizer.use_cleargrads() # this is for performance efficiency optimizer.setup(model) x = Variable(x_data.reshape(-1, 1).astype(np.float32)) y = Variable(y_data.reshape(-1, 1).astype(np.float32)) def lossfun(x, y): loss = F.mean_squared_error(model(x), y) return loss # this iteration is "training", to fit the model into desired function. for i in range(300): optimizer.update(lossfun, x, y) # above one code can be replaced by below 4 codes. # model.cleargrads() # loss = lossfun(x, y) # loss.backward() # optimizer.update() y_predict_data = model(x_detail_data.reshape(-1, 1).astype(np.float32)).data plt.clf() plt.scatter(x_data, y_data, color='r') plt.plot(x_detail_data, np.squeeze(y_predict_data, axis=1)) plt.show() ``` Notes for data shape: `x_data` and `y_data` are reshaped when Variable is made. `Linear` function input and output is of the form (batch_index, feature_index). In this example, `x_data` and `y_data` have 1 dimensional feature with the batch_size = sample_num (20). At first, optimizer is set up as following code. We can choose which kind of optimizing method is used during training (in this case, MomentumSGD is used). ``` # Setup an optimizer optimizer = chainer.optimizers.MomentumSGD() optimizer.use_cleargrads() # this is for performance efficiency optimizer.setup(model) ``` Once optimizer is setup, training proceeds with iterating following code. ``` optimizer.update(lossfun, x, y) ``` By the update, optimizer tries to tune internal parameters of model by decreasing the loss defined by `lossfun`. In this example, squared error is used as loss ``` def lossfun(x, y): loss = F.mean_squared_error(model(x), y) return loss ``` ## Serializer Serializer supports save/load of Chainer's class. After training finished, we want to save the model so that we can load it in inference stage. Another usecase is that we want to save the optimizer together with the model so that we can abort and restart the training. The code below is almost same with the training code above. Only the difference is that `serializers.load_npz()` (or `serializers.load_hdf5()`) and `serializers.save_npz()` (or `serializers.save_hdf5()` are implemented. So now it supports resuming training, by implemeting save/load. Note that `model` and `optimizer` need to be instantiated to appropriate class before load. ``` # Execute with resume = False at first time # Then execute this code again and again by with resume = True resume = False # Setup a model model = MyChain() # Setup an optimizer optimizer = chainer.optimizers.MomentumSGD() optimizer.setup(model) x = Variable(x_data.reshape(-1, 1).astype(np.float32)) y = Variable(y_data.reshape(-1, 1).astype(np.float32)) model_save_path = 'mlp.model' optimizer_save_path = 'mlp.state' # Init/Resume if resume: print('Loading model & optimizer') # --- use NPZ format --- serializers.load_npz(model_save_path, model) serializers.load_npz(optimizer_save_path, optimizer) # --- use HDF5 format (need h5py library) --- #%timeit serializers.load_hdf5(model_save_path, model) #serializers.load_hdf5(optimizer_save_path, optimizer) def lossfun(x, y): loss = F.mean_squared_error(model(x), y) return loss # this iteration is "training", to fit the model into desired function. # Only 20 iteration is not enough to finish training, # please execute this code several times by setting resume = True for i in range(20): optimizer.update(lossfun, x, y) # above one code can be replaced by below 4 codes. # model.cleargrads() # loss = lossfun(x, y) # loss.backward() # optimizer.update() # Save the model and the optimizer print('saving model & optimizer') # --- use NPZ format --- serializers.save_npz(model_save_path, model) serializers.save_npz(optimizer_save_path, optimizer) # --- use HDF5 format (need h5py library) --- #%timeit serializers.save_hdf5(model_save_path, model) # serializers.save_hdf5(optimizer_save_path, optimizer) y_predict_data = model(x_detail_data.reshape(-1, 1).astype(np.float32)).data plt.clf() plt.scatter(x_data, y_data, color='r', label='training data') plt.plot(x_detail_data, np.squeeze(y_predict_data, axis=1), label='model') plt.legend(loc='lower right') plt.show() ``` Please execute above by setting `resume = False` at the first time, and then please execute the same code several times by setting `resume = True`. You can see "the dynamics" of how the model fits to the data by training proceeds. ### Save format Chainer supports two format, *NPZ* and *HDF5*. - NPZ : Supported in numpy. So it does not require additional environment setup. - HDF5 : Supported in h5py library. It is usually **faster** than npz format, but you need to install the library. In my environment, it took - NPZ : load 2.5ms, save 22ms - HDF5: load 2.0ms, save 15ms In one words, I recommend to use HDF5 format version, `serializers.save_hdf5()` and `serializers.load_hdf5()`. Just run `pip install h5py` if you haven't install the library. ## Predict Once the model is trained, you can apply this model to new data. Compared to "training", this is often called "predict" or "inference". ``` # Setup a model model = MyChain() model_save_path = 'mlp.model' print('Loading model') # --- use NPZ format --- serializers.load_npz(model_save_path, model) # --- use HDF5 format (need h5py library) --- #%timeit serializers.load_hdf5(model_save_path, model) # calculate new data from model (predict value) x_test_data = np.array(np.random.rand(sample_num) * (xmax - xmin) + xmin) # create 20 x_test = Variable(x_test_data.reshape(-1, 1).astype(np.float32)) y_test_data = model(x_test).data # this is predicted value # calculate target function (true value) x_detail_data = np.array(np.arange(xmin, xmax, 0.1)) y_detail_data = target_func_elementwise(x_detail_data) plt.clf() # plot model predict data plt.scatter(x_test_data, y_test_data, color='k', label='Model predict value') # plot target function plt.plot(x_detail_data, y_detail_data, label='True value') plt.legend(loc='lower right') plt.show() ``` Compare with the black dot and blue line. It is preferable if the black dot is as close as possible to the blue line. If you train the model with enough iteration, black dot should be shown almost on the blue line in this easy example. ## Summary You learned Optimizers and Serializers module, and how these are used in training code. Optimizers update the model (Chain instance) to fit to the data. Serializers provides save/load functionality to chainer module, especially model and optimizer. Now you understand the very basic modules of Chainer. So let's proceed to MNIST example, this is considered as "hello world" program in machine learning community. ``` ```
github_jupyter
## Flow(Total, TCP and UDP) per application type ``` import pandas as pd import matplotlib.pyplot as plt %matplotlib inline import os class Trace: def __init__(self, trace_prefix, type): self._trace_prefix=trace_prefix self._type=type @property def trace_prefix(self): return self._trace_prefix @property def type(self): return self._type class ApplicationType: STREAMING_VIDEO='Streaming Video' VOIP='VoIP' APP_DOWNLOAD='Application Downloads' ONLINE_GAME='Online Game' class FeatureLevel: FLOW='flow' PACKET='pkt' class Protocol: UDP='udp' TCP='tcp' LOCAL_DATA_PATH='data' FEATURE_FILE_TYPE = 'csv' youtube = Trace(trace_prefix='youtube', type=ApplicationType.STREAMING_VIDEO) bilibili = Trace(trace_prefix='bilibili', type=ApplicationType.STREAMING_VIDEO) skype = Trace(trace_prefix='Skype_HongKong', type=ApplicationType.VOIP) wechat = Trace(trace_prefix='wechat_video', type=ApplicationType.VOIP) mac_app_store = Trace(trace_prefix='APP_DOWNLOAD', type=ApplicationType.APP_DOWNLOAD) google_drive = Trace(trace_prefix='google_drive_download', type=ApplicationType.APP_DOWNLOAD) lol = Trace(trace_prefix='LOL_AI', type=ApplicationType.ONLINE_GAME) netease = Trace(trace_prefix='netease_game', type=ApplicationType.ONLINE_GAME) tencent = Trace(trace_prefix='tencent_game_na', type=ApplicationType.ONLINE_GAME) TRACES=[youtube, bilibili, skype, wechat, mac_app_store, google_drive, lol, netease, tencent] def get_tcp_udp_info(trace_name, feature_level, file_type): udp_filename = os.path.join(LOCAL_DATA_PATH, '{trace_name}_{udp}_{feature_level}.{file_type}' .format(trace_name=trace_name, udp=Protocol.UDP, feature_level=feature_level, file_type=file_type)) tcp_filename = os.path.join(LOCAL_DATA_PATH, '{trace_name}_{tcp}_{feature_level}.{file_type}' .format(trace_name=trace_name, tcp=Protocol.TCP, feature_level=feature_level, file_type=file_type)) udp_info, tcp_info = pd.read_csv(udp_filename).dropna(axis=0, how='any'), pd.read_csv(tcp_filename).dropna(axis=0, how='any') return tcp_info.shape[0], udp_info.shape[0] def get_app_type_feature(trace, file_type): tcp_flow_num, udp_flow_num = get_tcp_udp_info(trace.trace_prefix, FeatureLevel.FLOW, FEATURE_FILE_TYPE) category = trace.type return { 'category':category, 'total number flow': tcp_flow_num+udp_flow_num, 'tcp flow number': tcp_flow_num, 'udp flow number': udp_flow_num } flow_per_app_type = pd.DataFrame(columns=['category', 'total number flow', 'tcp flow number', 'udp flow number']) for trace in TRACES: flow_feature = get_app_type_feature(trace, FEATURE_FILE_TYPE) flow_per_app_type = flow_per_app_type.append(flow_feature, ignore_index=True) flow_per_app_type = flow_per_app_type.groupby('category').sum().reset_index() flow_per_app_type[['total number flow','tcp flow number','udp flow number']] = flow_per_app_type[['total number flow','tcp flow number','udp flow number']].astype(int) flow_per_app_type ```
github_jupyter
# Introduction to Python ## Pandas Intro Some examples from [here](https://towardsdatascience.com/40-examples-to-master-pandas-c69d058f434e) ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import os %matplotlib inline datapath = "../Data/" ``` ## Pandas Data Structures: [Series](https://pandas.pydata.org/pandas-docs/stable/reference/series.html) Creating Series ``` obj = pd.Series([4, 12, -5, 3, 5]) obj #dir(obj) ``` Exporting to Numpy array ``` obj.values ``` Examining the index object ``` obj.index ``` Redefining the index ``` obj.index = ['Bob', 'Steve', 'Jeff', 'Ryan', 'Fernie'] obj ``` Creating series and passing the inidex as parameter ``` obj2 = pd.Series([4, 7, -5, 3], index=['d', 'b', 'a', 'c']) obj2 ``` Slicing and accessing elements in the series ``` obj2['c'] obj2[['c', 'a', 'd']] ``` Slicing using boolean expressions ``` obj2[obj2 < 0] ``` Scalar operations with the series ``` obj2 * 2 np.exp(obj2) ``` Creating Series from dictionaries ``` sdata = {'Ohio': 35000, 'Texas': 71000, 'Oregon': 16000, 'Utah': 5000} obj3 = pd.Series(sdata) obj3 states = ['California', 'Ohio', 'Oregon', 'Texas'] obj4 = pd.Series(sdata, index=states) obj4 ``` isnull() and notnull() methods ``` pd.isnull(obj4) pd.notnull(obj4) obj3.add(obj4, fill_value=10) obj4.name = 'Population' obj4.index.name = 'State' obj4 ``` ## Pandas Data Structures: [Date Time Range](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.date_range.html) Specify start and end, with the default daily frequency. ``` ts_idx1 = pd.date_range(start='1/1/2018', end='1/08/2018') ts_idx1 ``` Specify start and periods, the number of periods (days). ``` ts_idx2 = pd.date_range("20160101", periods=10, freq='D') ts_idx2 ``` Specify end and periods, the number of periods (days). ``` ts_idx3 = pd.date_range(end='1/1/2018', periods=8) ts_idx3 ``` Specify start, end, and periods; the frequency is generated automatically (linearly spaced). ``` ts_idx4 = pd.date_range(start='2018-04-24', end='2018-04-27', periods=3) ts_idx4 ``` Other parameters ``` pd.date_range(start='1/1/2018', periods=5, freq='M') pd.date_range(start='1/1/2018', periods=5, freq='3M') pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo') pd.date_range(start='2017-01-01', end='2017-01-04', closed=None) pd.date_range(start='2017-01-01', end='2017-01-04', closed='left') pd.date_range(start='2017-01-01', end='2017-01-04', closed='right') ``` ## Pandas Data Structures: [Dataframe](https://pandas.pydata.org/pandas-docs/stable/reference/frame.html) Creating Dataframe from a dictionary of lists ``` data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'], 'year': [2000, 2001, 2002, 2001, 2002], 'pop': [1.5, 1.7, 3.6, 2.4, 2.9]} frame = pd.DataFrame(data) frame #dir(frame) print(type(frame)) print(type(frame['state'])) ``` Specifying the names of columns ``` d = pd.DataFrame(data, columns=['year', 'state', 'pop']) d ``` Making a column as the index ``` d.set_index('year', inplace=True, drop=False) d ``` Dropping a column ``` d.drop('year', axis=1, inplace=True) d ``` Dropping one or some lines ``` d.drop(2000, axis=0, inplace=False) d d.drop(2000, axis=0, inplace=True) d ``` Slicing by column / index ``` d['pop'] d.loc[:,'pop'] d.loc[2001] d.loc[[2001,2002], ["state", "pop"]] d.iloc[[0,1],[1]] d.iloc[0:3,[0,1]] ``` Accessing index and columns objects ``` d.index d.columns ``` Using alternate notation to access columns as a property ``` d['state'] d.state ``` Slicing with boolean conditions (&, |, ==, !=) ``` d = d[(d["state"] != 'Nevada') | (d.index != 2002)] d ``` Creating Dataframes from existing structures ``` frame2 = pd.DataFrame(data, columns=['year', 'state', 'pop', 'debt'], index=['one', 'two', 'three', 'four', 'five']) frame2 ``` Creating new column ``` frame2['new'] = 13 frame2 frame2.loc['three'] frame2.loc['three', 'pop'] frame2.iloc[2] frame2.iloc[2,1] pop = {'Nevada': {2001: 2.4, 2002: 2.9},'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}} frame3 = pd.DataFrame(pop) frame3 frame2['debt'] = 16.5 frame2 frame2['debt'] = np.arange(5.) frame2 ``` Creating a column from a series ``` val = pd.Series([-1.2, -1.5, -1.7], index=['two', 'four', 'five']) frame2['debt'] = val frame2 ``` Creating a column from a bolean expression ``` frame2['eastern'] = frame2.state == 'Ohio' frame2 ``` Deleting a column ``` del frame2['eastern'] frame2.columns pd.DataFrame(pop, index=[2001, 2002, 2003]) pdata = {'Ohio': frame3['Ohio'][:-1],'Nevada': frame3['Nevada'][:2]} pd.DataFrame(pdata) frame3.index.name = 'year' frame3.columns.name = 'state' frame3 pop = {'Nevada': {2001: 2.4, 2002: 2.9},'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}} frame4 = pd.DataFrame(pop) frame4 frame4.loc[2000,'Nevada'] = 2 frame4 frame5 = pd.concat([frame4, frame4], axis=0) frame5.iloc[3:,1] = 32 frame5 frame5.drop_duplicates(['Nevada'], inplace=True) frame5 dates = pd.date_range("20160101", periods=10, freq='D') dates data = np.random.random((10,3)) data column_names = ['Column1', 'Column2', 'Column3'] df = pd.DataFrame(data, index=dates, columns=column_names) df.head(10) df[1:3] df['20160104':'20160107'] df.loc['2016-01-01':'2016-01-11','Column2'] df.iloc[0:11,1] df[(df.index > '20160102') & (df.index < '20160106')] df.query('(Column1 < Column2) & (Column1 < Column3)') df.loc['20160101':'20160102',['Column1','Column3']] df.iloc[3:5, 0:2] df.info() df.describe() df.sort_index(axis=0, ascending=True,) # inplace=True) df[sorted(df.columns)] df.sort_values(by='Column2') dates1 = pd.date_range("20160101", periods=6) data1 = np.random.random((6,2)) column_names1 = ['ColumnA', 'ColumnB'] dates2 = pd.date_range("20160104", periods=7) data2 = np.random.random((7,2)) column_names2 = ['ColumnC', 'ColumnD'] df1 = pd.DataFrame(data1, index=dates1, columns=column_names1) df2 = pd.DataFrame(data2, index=dates2, columns=column_names2) df1 df2 #https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.join.html df1.join(df2, how='left') df1.join(df2, how='right') df1.join(df2, how='outer') df1.join(df2, how='inner') df2['ColumnA'] = df1.ColumnA+1 #Example when columns have the same name df1.join(df2, how='left', rsuffix='_df2') del df2['ColumnA'] df3 = df1.join(df2) # add a column to df to group on df3['ProfitLoss'] = pd.Series(['Profit', 'Loss', 'Profit', 'Same', 'Profit', 'Loss', 'Profit', 'Profit', 'Same', 'Loss'], index=dates) df3['Student'] = pd.Series(['Alex', 'Alex', 'Alex', 'Marcos', 'Hannah', 'Hannah', 'Marcos', 'Hannah', 'Hannah', 'Barbara'], index=dates) df3 grupos = df3.groupby('ProfitLoss')#.mean() grupos.mean() ``` Verifying Python's ordering heuristics ``` max(['name1', 'name2', 'Name3']) df4 = df3.groupby(['Student','ProfitLoss']).max() df4 df4.index.get_level_values('Student') df4.loc[('Hannah','Profit'), 'ColumnA'] ``` ### [Pandas Useful Functions](https://pandas.pydata.org/pandas-docs/stable/reference/general_functions.html) ``` df3 ``` ### Transpose ``` df3.T df3.transpose() ``` ### [idmin & idmax](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.idxmax.html) ``` df3['ColumnA'].idxmax() df3.loc[df3['ColumnA'].idxmax()] max(df3['ColumnA']) df3['ColumnA'].idxmin() df3.loc[df3['ColumnA'].idxmin()] min(df3['ColumnA']) ``` ### [Not Equal - ne](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.ne.html) ``` df = pd.DataFrame() df['x'] = [0,0,0,0,0,0,1,2,3,4,5,6,7] df['x'].ne(0) ``` ### Nsmallest ``` df['x'].nsmallest(3) df.nsmallest(3, 'x') ``` ### Nlargest ``` df['x'].nlargest(3) df.nlargest(3, 'x') ``` ### Is in ``` df.isin([0,2]).head(10) df[df.isin([0,2])] #.dropna() df[~df.isin([0,2])].dropna() # Create a test dataframe # Untidy dataframe # x : Subjects # y : Student names marks = pd.DataFrame(np.random.randint(0, 100, size = (20,5)), columns = ['Maths', 'Physics','Chemistry', 'Biology', 'Computer_Science']) marks['Student'] = ['Student ' + str(i) for i in range(1,21)] marks['Test'] = np.random.choice(['T1', 'T2', 'T3'], size=len(marks)) marks = marks[['Test','Student','Maths', 'Physics','Chemistry', 'Biology', 'Computer_Science']] display(marks.head()) ``` ### [Agg](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.agg.html) ``` marks[['Maths', 'Physics','Chemistry', 'Biology', 'Computer_Science']].agg(['sum', 'min', 'mean']) ``` ### Group By ``` grouped = marks.groupby('Test') for group in grouped: print(group[0]) display(group[1].head()) grouped['Maths'].mean() grouped2 = grouped.agg({"Student": "nunique"}) grouped2 = grouped2.reset_index() print(grouped2) ``` ### Pivot ``` pivot = marks.pivot(index='Test', columns='Student', values='Chemistry') pivot.head() ``` ### [Melt](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.melt.html) ``` marks.head(10) tidy = pd.melt(marks, id_vars = 'Student', value_name = 'Frequency') tidy.tail(10) tidy.info() ```
github_jupyter
(MPRIMALDUALBARRERALOG)= # 4.5 Método primal-dual de barrera logarítmica (BL) ```{admonition} Notas para contenedor de docker: Comando de docker para ejecución de la nota de forma local: nota: cambiar `<ruta a mi directorio>` por la ruta de directorio que se desea mapear a `/datos` dentro del contenedor de docker. `docker run --rm -v <ruta a mi directorio>:/datos --name jupyterlab_optimizacion_2 -p 8888:8888 -p 8787:8787 -d palmoreck/jupyterlab_optimizacion_2:3.0.0` password para jupyterlab: `qwerty` Detener el contenedor de docker: `docker stop jupyterlab_optimizacion_2` Documentación de la imagen de docker `palmoreck/jupyterlab_optimizacion_2:3.0.0` en [liga](https://github.com/palmoreck/dockerfiles/tree/master/jupyterlab/optimizacion_2). ``` --- Nota generada a partir de [liga1](https://drive.google.com/file/d/16-_PvWNaO0Zc9x04-SRsxCRdn5fxebf2/view), [liga2](https://drive.google.com/file/d/1oulU1QAKyLyYrkpJBLSPlbWnKFCWpllX/view), [liga3](https://drive.google.com/file/d/1RMwUXEN_SOHKue-J9Cx3Ldvj9bejLjiM/view) ```{admonition} Al final de esta nota el y la lectora: :class: tip * Conocerá el método primal dual de barrera logarítmica para resolver programas lineales. * Aprenderá que tal método: * es un caso particular de métodos de penalización para resolver problemas de optimización con restricciones. * tiene la capacidad de resolver problemas convexos. * trabaja con bloques de matrices lo que ayuda a aprovechar operaciones vectoriales o matriciales. * puede implementarse con cómputo en paralelo. ``` El método primal-dual de barrera logarítmica (BL) es un método iterativo que realiza un manejo de las variables primales y duales del problema de optimización a resolver. Se le clasifica como un método por puntos interiores. ```{sidebar} Un poco de historia ... El [método símplex](https://en.wikipedia.org/wiki/Simplex_algorithm) desarrollado por Dantzig en los $40$'s hizo posible que se formularan y analizaran modelos grandes en una forma sistemática y eficiente. Hoy en día continúa siendo uno de los métodos más utilizados para resolver programas lineales. No obstante puede ser ineficiente en problemas lineales "patológicos" (ver [Klee-Minty cube](https://en.wikipedia.org/wiki/Klee%E2%80%93Minty_cube)) pues el tiempo para resolver tales problemas es exponencial respecto al tamaño del problema (medido como el número de variables y la cantidad de almacenamiento para los datos del problema). Para la mayoría de problemas prácticos el método símplex es mucho más eficiente que estos casos "patológicos" pero esto motivó la investigación y desarrollo de nuevos algoritmos con mejor desempeño. En 1984 Karmarkar publicó el [algoritmo](https://en.wikipedia.org/wiki/Karmarkar%27s_algorithm) que lleva su mismo nombre que tiene una complejidad polinomial y en la práctica resultó ser eficiente. Pertenece a la clase de métodos con el nombre de [puntos interiores](https://en.wikipedia.org/wiki/Interior-point_method). Hay diferentes tipos de métodos por puntos interiores siendo los de la clase primal-dual ampliamente usados en la práctica. ``` ## Métodos por puntos interiores (PI) Los métodos por puntos interiores (PI) son esquemas iterativos que en un inicio se utilizaron para resolver PL's, sin embargo, se ha extendido su uso al caso no lineal. Por ejemplo, distintos tipos de métodos por PI han sido usados para resolver problemas de optimización convexos, ver {ref}`problemas de optimización convexa en su forma estándar o canónica <PROBOPTCONVEST>`. ```{margin} Recuérdese que nombramos problemas de optimización con restricciones *large scale* a aquellos problemas de optimización que tienen un número de variables y restricciones mayor o igual a $10^5$ (ambas). ``` En cada iteración de los métodos PI las restricciones de desigualdad del problema de optimización se satisfacen de forma estricta. Cada iteración es costosa de calcular y realiza avance significativo a la solución en contraste con el método símplex que requiere un gran número de iteraciones no costosas. Una característica que tienen los métodos PI es que los problemas *large scale* no requieren muchas más iteraciones que los problemas *small scale* a diferencia del método símplex. Sin embargo para problemas *small scale* en general realizan más iteraciones que el método símplex. En cada iteración el método símplex se mueve de la solución FEV actual a una solución FEV adyacente por una arista de la frontera de la región factible, ver {ref}`método símplex <METODOSIMPLEX>`. Los problemas del tipo *large scale* tienen una cantidad enorme de soluciones FEV. Para ver esto piénsese en un PL al que se le van añadiendo restricciones funcionales. Entonces se añadirán aristas y por tanto soluciones FEV. Los métodos PI evitan tal comportamiento pues avanzan por el interior de la región factible hacia los puntos óptimos y tiene muy poco efecto el ir añadiedo restricciones funcionales al PL para el desempeño de los métodos PI. ```{admonition} Observación :class: tip Los métodos PI han mostrado "buena" eficiencia (en términos del número de iteraciones realizadas) en resolver problemas de optimización *large scale*. Además son métodos que pueden implementarse para procesamiento con cómputo en paralelo. ``` Los métodos PI conforme avanzan en las interaciones aproximan a los puntos óptimos en el límite. Por ejemplo, para el {ref}`ejemplo prototipo <EJPROTOTIPO>` de un programa lineal (PL) a continuación se presenta una trayectoria obtenida por un método PI que se aproxima a la solución óptima $(2, 6)$: $$\displaystyle \max_{x \in \mathbb{R}^2} 3x_1 + 5x_2$$ $$\text{sujeto a: }$$ $$x_1 \leq 4$$ $$2x_2 \leq 12$$ $$3x_1 + 2x_2 \leq 18$$ $$x_1 \geq 0, x_2 \geq 0$$ ``` import numpy as np import matplotlib.pyplot as plt np.set_printoptions(precision=3, suppress=True) #x_1 ≤ 4 point1_x_1 = (4,0) point2_x_1 = (4, 10) point1_point2_x_1 = np.row_stack((point1_x_1, point2_x_1)) #x_1 ≥ 0 point3_x_1 = (0,0) point4_x_1 = (0, 10) point3_point4_x_1 = np.row_stack((point3_x_1, point4_x_1)) #2x_2 ≤ 12 or x_2 ≤ 6 point1_x_2 = (0, 6) point2_x_2 = (8, 6) point1_point2_x_2 = np.row_stack((point1_x_2, point2_x_2)) #x_2 ≥ 0 point3_x_2 = (0, 0) point4_x_2 = (8, 0) point3_point4_x_2 = np.row_stack((point3_x_2, point4_x_2)) #3x_1 + 2x_2 ≤ 18 x_1_region_1 = np.linspace(0,4, 100) x_2_region_1 = 1/2*(18 - 3*x_1_region_1) x_1 = np.linspace(0,6, 100) x_2 = 1/2*(18 - 3*x_1) plt.plot(point1_point2_x_1[:,0], point1_point2_x_1[:,1], point3_point4_x_1[:,0], point3_point4_x_1[:,1], point1_point2_x_2[:,0], point1_point2_x_2[:,1], point3_point4_x_2[:,0], point3_point4_x_2[:,1], x_1, x_2) optimal_point = (2, 6) plt.scatter(optimal_point[0], optimal_point[1], marker='o', s=150, facecolors="none", edgecolors='b') plt.legend(["$x_1 = 4$", "$x_1 = 0$", "$2x_2 = 12$", "$x_2 = 0$", "$3x_1+2x_2 = 18$", "(óptimo coordenada 1, óptimo coordenada 2)"], bbox_to_anchor=(1, 1)) point_1_interior_points = (1, 2) point_2_interior_points = (1.27, 4) point_3_interior_points = (1.38, 5) point_4_interior_points = (1.56, 5.5) points_interior_points = np.row_stack((point_1_interior_points, point_2_interior_points, point_3_interior_points, point_4_interior_points)) plt.plot(points_interior_points[:, 0], points_interior_points[:, 1], marker='o', color="blue" ) plt.fill_between(x_1_region_1, 0, x_2_region_1, where=x_2_region_1<=6, color="plum") x_1_region_2 = np.linspace(0,2, 100) plt.fill_between(x_1_region_2, 0, 6, color="plum") plt.title("Región factible del PL") plt.show() ``` ```{margin} Recuérdese que los parámetros de un PL son $b_i, c_i, a_{ij}$. ``` Aunque los métodos PI son una buena alternativa para resolver PL's perdemos ventajas que tiene el método símplex como es el análisis de sensibilidad y el análisis posterior que puede realizarse al modificar los parámetros del PL. Ver las referencias al final de la nota para tales análisis. ## Método primal-dual Se describirán dos ideas que se utilizan en los métodos primal dual y posteriormente una tercera idea que utiliza la función de barrera logarítmica (FBL). Para esto, considérese la forma estándar de un PL (PLE): $$ \displaystyle \min_{x \in \mathbb{R}^n} c^Tx\\ \text{sujeto a:} \\ Ax=b\\ x \geq 0 $$ donde: $A \in \mathbb{R}^{m \times n}, b \in \mathbb{R}^m$, $m < n$ con *rank* completo por renglones y las restricciones se interpretan de una forma *pointwise*. ```{margin} Las restricciones $Ax = b$ se pueden escribir con funciones $h: \mathbb{R}^n \rightarrow \mathbb{R}$ , $h_i(x) = b_i-a_i ^Tx$, $a_i$ $i$-ésimo renglón de $A \in \mathbb{R}^{m \times n}$ y $b_i$ $i$-ésima entrada de $b$ para $i=1, \cdots, m$ ``` La función Lagrangiana del problema anterior es: $$\mathcal{L}(x, \lambda, \nu) = f_o(x) + \displaystyle \sum_{i=1}^n \lambda_i f_i(x) + \sum_{i=1}^m \nu_i h_i(x) = c^Tx + \lambda^T(-x) + \nu^T(b-Ax)$$ donde: $\mathcal{L}: \mathbb{R}^n \times \mathbb{R}^n \times \mathbb{R}^m \rightarrow \mathbb{R}$. El problema dual asociado es: ```{margin} El problema primal es: $\displaystyle \min_{x \in \mathbb{R}^n} c^Tx \\ \text{sujeto a:}\\ Ax = b \\ x \geq 0 $ $A \in \mathbb{R}^{m \times n}$ y *rank* de A igual a $m < n$. ``` $$\displaystyle \max_{\nu \in \mathbb{R}^m, \lambda \in \mathbb{R}^n} b^T \nu \\ \text{sujeto a :} \\ c - A^T \nu - \lambda = 0 \\ \lambda \geq 0 $$ Las condiciones KKT son: ```{margin} Ver {ref}`las condiciones KKT para un PL en su forma estándar<CONDKKTPLESTANDAR>`. ``` $$ \begin{eqnarray} \nabla_x \mathcal{L}(x, \lambda, \nu) &=& c - A^T\nu - \lambda = 0 \nonumber \\ \lambda^Tx &=& 0 \nonumber \\ Ax &=& b \nonumber \\ -x &\leq& 0 \nonumber \\ \lambda &\geq& 0 \end{eqnarray} $$ Los métodos de la clase primal-dual encuentran soluciones $(x^*, \lambda^*, \nu^*)$ para las igualdades anteriores y modifican las direcciones de búsqueda y tamaños de paso para que las desigualdades se satisfagan de forma **estricta** en cada iteración. En los métodos de la clase primal-dual reescribimos las condiciones KKT de optimalidad anteriores mediante una función $F: \mathbb{R}^{2n + m} \rightarrow \mathbb{R}^{2n+m}$ dada por: $$F(x, \lambda, \nu ) = \left [ \begin{array}{c} c - A^T \nu - \lambda \\ X \Lambda e \\ b - Ax \end{array} \right ]$$ y resolvemos la ecuación **no lineal** $F(x, \lambda, \nu )=0$ para $(x, \lambda) \geq 0$, donde: $X = \text{diag}(x_1, \dots, x_n)$, $\Lambda = \text{diag}(\lambda_1, \dots, \lambda_n)$ y $e$ es un vector de $1$'s en $\mathbb{R}^n$. Además en cada iteración se cumple $x^{(k)} > 0$ y $\lambda^{(k)} > 0$ para $(x^{(k)}, \lambda^{(k)}, \nu^{(k)})$, por esto tales métodos son considerados como puntos interiores. Como la mayoría de los métodos iterativos en optimización, los métodos primal-dual tienen un procedimiento para determinar la dirección de búsqueda y una cantidad que debe ser monitoreada cuyo valor alcance un valor objetivo. En el caso de los PLE's tal cantidad es la *duality gap* medida como: $\lambda^Tx$, ver {ref}`brecha dual <BRECHADUAL>`. (PRIMIDEAMETPRIMDUAL)= ### Primera idea: determinar la dirección de búsqueda ```{margin} Sistema de ecuaciones no lineales a resolver: $F(x, \lambda, \nu ) = \left [ \begin{array}{c} c - A^T \nu - \lambda \\ X \Lambda e \\ b - Ax \end{array} \right ] = 0$ ``` La dirección de búsqueda se determina aplicando el método de Newton al sistema de ecuaciones no lineales que se muestra en el margen del PLE. Por tanto, se resuelve el sistema de ecuaciones lineales: $$J_F(x, \lambda, \nu) \left [ \begin{array}{c} \Delta x \\ \Delta \lambda \\ \Delta \nu \end{array} \right ] = - F(x, \lambda, \nu)$$ donde: $J_F$ es la Jacobiana de $F$ cuya expresión es: $$J_F(x, \lambda, \nu) = \left [ \begin{array}{ccc} 0 & I & -A^T \\ \Lambda & X & 0 \\ -A & 0 & 0 \end{array} \right ].$$ para el vector de incógnitas $\left [ \begin{array}{c} \Delta x \\ \Delta \lambda \\ \Delta \nu \end{array} \right ]$. Una vez calculado tal vector de incógnitas se realiza la actualización: $$\left [ \begin{array}{c} x \\ \lambda \\ \nu \end{array} \right ]^{(k+1)} = \left [ \begin{array}{c} x \\ \lambda \\ \nu \end{array} \right ]^{(k)} + \left [ \begin{array}{c} \Delta x \\ \Delta \lambda \\ \Delta \nu \end{array} \right ]$$ donde: $k$ hace referencia a la $k$-ésima iteración. Ver {ref}`Sistema de ecuaciones no lineales<SISTECNOLINEALES>`. ```{admonition} Observación :class: tip Si bien podría elegirse otra dirección de búsqueda, la dirección de Newton (o variantes de ésta) se prefiere por sus propiedades de convergencia e invarianza ante transformaciones afín. ``` Si denotamos $r_d = c - A^T \nu - \lambda, r_p = b - Ax$ como el residual para factibilidad dual y residual para factibilidad primal respectivamente entonces el sistema de ecuaciones lineales a resolver es: $$\left [ \begin{array}{ccc} 0 & I & -A^T \\ \Lambda & X & 0 \\ -A & 0 & 0 \end{array} \right ] \left [ \begin{array}{c} \Delta x \\ \Delta \lambda \\ \Delta \nu \end{array} \right ] = - \left [ \begin{array}{c} r_d \\ X \Lambda e \\ r_p \end{array} \right ]$$ ```{admonition} Comentarios * El sistema de ecuaciones lineales anterior para problemas *large scale* no se construye pues es un sistema cuadrado de tamaño $2n + m \times 2n + m$ y se resuelve reduciéndolo a sistemas de ecuaciones equivalentes. Representa el paso más costoso del método primal-dual. * Se pueden eliminar los signos negativos que están en los bloques de la matriz del sistema de ecuaciones lineales anterior que contienen $A, A^T$ pero por consistencia con lo desarrollado en la nota de dualidad para un PL se mantienen los signos negativos (si se eliminan también debe de ajustarse el lado derecho del sistema). Esta modificación se relaciona con la definición de la función Lagrangiana. ``` (SEGUNIDEAPRIMDUAL)= ### Segunda idea: cortar el paso Si se toma un paso completo es muy posible que en la siguiente iteración se encuentre muy cerca de alguna de las fronteras de restricción o bien se salga de la región factible. Para esto se define un parámetro $t^{(k)} \in (0, 1]$ y por tanto la actualización es: $$\left [ \begin{array}{c} x \\ \lambda \\ \nu \end{array} \right ]^{(k+1)} = \left [ \begin{array}{c} x \\ \lambda \\ \nu \end{array} \right ]^{(k)} + t^{(k)} \left [ \begin{array}{c} \Delta x \\ \Delta \lambda \\ \Delta \nu \end{array} \right ]$$ donde: $k$ hace referencia a la $k$-ésima iteración. ```{admonition} Comentario El parámetro $t^{(k)}$ se calcula con metodologías como búsqueda de línea o regiones de confianza, ver [line search](https://en.wikipedia.org/wiki/Line_search), {ref}`método de búsqueda de línea por backtracking <MBUSLINBACK>`, [trust region](https://en.wikipedia.org/wiki/Trust_region). ``` ## Método primal-dual de barrera logarítmica (BL) ### Tercera idea: reducir la *duality gap* y centrar. Uso de la función de barrera logarítmica (FBL) En cada iteración los métodos primal-dual buscan reducir la *duality gap* o bien mantenerse "cerca" de la trayectoria nombrada trayectoria central. ```{margin} Sistema de ecuaciones lineales $\left [ \begin{array}{ccc} 0 & I & -A^T \\ \Lambda & X & 0 \\ -A & 0 & 0 \end{array} \right ] \left [ \begin{array}{c} \Delta x \\ \Delta \lambda \\ \Delta \nu \end{array} \right ] = - \left [ \begin{array}{c} r_d \\ X \Lambda e \\ r_p \end{array} \right ]$ ``` ```{admonition} Comentario Recuérdese que la *duality gap* en un PLE para el par $(x, \nu)$ primal-dual factible está dada por la diferencia: $c^Tx - b^T \nu$. La *duality gap* en un PLE es igual a $\lambda^Tx$. En el sistema de ecuaciones lineales que se muestra en el margen se representa cada sumando de $\lambda^Tx$ con el producto $X \Lambda e$ (recuérdese $X, \Lambda$ son matrices diagonales). ``` La trayectoria central se define a partir de la FBL, ver [Barrier function](https://en.wikipedia.org/wiki/Barrier_function). La definición siguiente se da para un POCE de forma general. ```{margin} Las restricciones $Ax = b$ se pueden escribir con funciones $h_i: \mathbb{R}^n \rightarrow \mathbb{R}$ , $h_i(x) = b_i-a_i ^Tx$, $a_i$ $i$-ésimo renglón de $A \in \mathbb{R}^{p \times n}$ y $b_i$ $i$-ésima entrada de $b$ para $i=1, \cdots, p$ ``` ```{admonition} Definición Considérese el problema de optimización convexa en la forma estándar (POCE): $$ \begin{eqnarray} \displaystyle \min_{x \in \mathbb{R}^n} &f_o(x)& \nonumber \\ &\text{sujeto a:}& \nonumber\\ f_i(x) &\leq& 0 \quad i=1,\dots,m \nonumber \\ Ax &=& b \end{eqnarray} $$ con $A \in \mathbb{R}^{p \times n}$ y *rank* de $A$ igual a $p < n$. Se define la función de barrera logarítmica (FBL) como: $$\phi(x) =-\displaystyle \sum_{i=1}^m \log(-f_i(x))$$ ``` ```{sidebar} Un poco de historia ... La metodología para resolver el problema de barrera logarítmica (PBL) está fundamentada en la *sequential unconstrained minimization technique (SUMT)*, [A. V. Fiacco, G. P. McCormick, 1965](https://www.jstor.org/stable/168637?seq=1). Es una técnica para resolver problemas no lineales sin restricciones que genera una secuencia de puntos interiores factibles que convergen a la solución del problema. Se eligen funciones de barrera con propiedades como la convexidad. Hay versiones de la SUMT para puntos exteriores que inician con puntos no factibles y vía la penalización se busca la convergencia hacia la región factible. En cada iteración de SUMT se define un valor del parámetro de barrera y se resuelve un problema de optimización más sencillo que el original con el método de Newton. La solución de tal problema se utiliza para definir puntos iniciales del siguiente problema a resolver con un valor del parámetro de barrera diferente. A medida que se avanza en las iteraciones la función objetivo del PBL se aproxima cada vez más a $f_o$, al valor óptimo y al conjunto óptimo. ``` El POCE se resuelve planteando el siguiente problema: $$ \displaystyle \min_{x \in \mathbb{R}^n} f_B(x|t_B) \\ \text{sujeto a:} \\ Ax = b $$ donde: $f_B(x|t_B) = f_o(x) + \frac{1}{t_B} \phi(x) = f_o(x) - \frac{1}{t_B} \displaystyle \sum_{i=1}^m \log(-f_i(x))$, $\phi: \mathbb{R}^n \rightarrow \mathbb{R}$ con $t_B$ un parámetro positivo que nombramos **parámetro de barrera**. Denotamos a este problema como **problema de barrera logarítmica (PBL)**. ``` x = np.linspace(-2, -.1, 100) log_barrier = -np.log(-x) t_B1 = 0.2 t_B2 = 0.5 t_B3 = 1 t_B4 = 2 t_B5 = 10 plt.plot(x, 1/t_B1*log_barrier, "r", x, 1/t_B2*log_barrier, "b", x, 1/t_B3*log_barrier, "g", x, 1/t_B4*log_barrier, "m", x, 1/t_B5*log_barrier, "c") plt.legend(["$t_{B1}=0.2$", "$t_{B2}=0.5$", "$t_{B3}=1$", "$t_{B4}=2$", "$t_{B5}=10$"], bbox_to_anchor=(1,1)) plt.axhline(color="black") plt.axvline(color="black") plt.title("Gráfica de la FBL variando el parámetro $t_B$") plt.show() ``` Valores más grandes de $t_B$ hacen que $f_B(x|t_B)$ tienda a $f_o(x)$. Como se observa en la gráfica anterior al elegir un valor de $t_B$ cada vez más grande se tiene: $f_B(x|t_B) = f_o(x) + \frac{1}{t_B} \phi(x) \approx f_o(x)$. ```{margin} El POCE recuérdese es: $ \begin{eqnarray} \displaystyle \min_{x \in \mathbb{R}^n} &f_o(x)& \nonumber \\ &\text{sujeto a:}& \nonumber\\ f_i(x) &\leq& 0 \quad i=1,\dots,m \nonumber \\ Ax &=& b \end{eqnarray} $ y el PBL es: $ \displaystyle \min_{x \in \mathbb{R}^n} f_B(x|t_B) \\ \text{sujeto a:} \\ Ax = b $ con $A \in \mathbb{R}^{p \times n}$ y *rank* de $A$ igual a $p < n$, $\begin{eqnarray} f_B(x|t_B) &=& f_o(x) + \frac{1}{t_B} \phi(x) \nonumber \\ &=& f_o(x) - \frac{1}{t_B} \displaystyle \sum_{i=1}^m \log(-f_i(x)) \end{eqnarray} $. ``` ```{admonition} Comentarios * La función $\phi(x) = -\frac{1}{t_B} \log(-x)$ para $x < 0$ es convexa, diferenciable y aproxima a la función indicadora: $$I(x) = \begin{cases} \infty \text{ si } x > 0 \\ 0 \text{ si } x \leq 0 \end{cases}$$ la cual es una función discontinua: <img src="https://dl.dropboxusercontent.com/s/i4t3j8c9amzue1n/indicator_func.png?dl=0" heigth="250" width="250"> En el dibujo anterior sólo se visualiza para el eje horizontal el intervalo $(-\infty, 0)$. * La función indicadora del punto anterior ayuda a reescribir el POCE como: $$\displaystyle \min_{x \in \mathbb{R}^n} f_o(x) + \displaystyle \sum_{i=1}^m I(f_i(x))$$ $$\text{sujeto a:}$$ $$Ax = b$$ por esto resolver el PBL es equivalente a resolver el POCE para valores más grandes de $t_B$. * La FBL es un caso particular de funciones de barrera que penalizan al no satisfacer las restricciones de desigualdad, ver [Penalty method](https://en.wikipedia.org/wiki/Penalty_method). * En general las funciones de barrera deben tener las siguientes propiedades para $x$ primal factibles: 1. Tener valores "pequeños" si $x$ está "lejos" de la frontera de la región factible. 2. Tener valores "grandes" si $x$ está "cerca" de la frontera de la región factible. 3. Tener propiedades como convexidad o diferenciabilidad (ventajas al tener tales propiedades). Por lo anterior las funciones de barrera evitan que se cruce o llegue a la frontera de la región factible del problema primal. * Otra función de barrera para un PL es: $\phi(x) = -\displaystyle \sum_{i=1}^m \frac{1}{f_i(x)}$ para $x$ factibles. * Los problemas de optimización convexos con únicamente restricciones de igualdad pueden resolverse aplicando extensiones del método de Newton. ``` ### Trayectoria central determinada por los puntos centrales ```{margin} El PBL para un POCE recuérdese es: $ \displaystyle \min_{x \in \mathbb{R}^n} f_B(x|t_B) \\ \text{sujeto a:} \\ Ax = b $ con $A \in \mathbb{R}^{p \times n}$ y *rank* de $A$ igual a $p < n$, $\begin{eqnarray} f_B(x|t_B) &=& f_o(x) + \frac{1}{t_B} \phi(x) \nonumber \\ &=& f_o(x) - \frac{1}{t_B} \displaystyle \sum_{i=1}^m \log(-f_i(x)) \end{eqnarray} $. ``` ```{admonition} Definición Para cada valor del parámetro de barrera $t_B$, se definen los **puntos centrales** $x^*(t_B)$ como la solución del PBL y el conjunto de puntos centrales se le nombra trayectoria central, *central path*. ``` Revisemos las condiciones KKT de optimalidad que deben cumplir los puntos centrales para un PLE: $$ \displaystyle \min_{x \in \mathbb{R}^n} c^Tx\\ \text{sujeto a:} \\ Ax=b\\ x \geq 0 $$ con $A \in \mathbb{R}^{m \times n}$ y *rank* de $A$ igual a $m < n$. Se tiene: ```{margin} Recuérdese que en un PLE $f_i(x) = - x_i \forall i=1, \cdots, n$. ``` $$ \begin{eqnarray} \phi(x) &=& -\displaystyle \sum_{i=1}^n \log(-f_i(x)) \nonumber \\ &=& - \sum_{i=1}^n \log(x_i) \end{eqnarray} $$ y por tanto el PBL para el PLE (PBL-PLE) es: $$\displaystyle \min_{x \in \mathbb{R}^n} c^Tx - \frac{1}{t_B} \displaystyle \sum_{i=1}^n \log(x_i)$$ $$\text{sujeto a:}$$ $$Ax=b$$ La función Lagrangiana del PBL-PLE es: $$ \begin{eqnarray} \mathcal{L}_B(x, \nu) &=& f_B(x|t_B) + \sum_{i=1}^m \nu_i h_i(x) \nonumber \\ &=& c^Tx - \frac{1}{t_B} \displaystyle \sum_{i=1}^n \log(x_i) + \sum_{i=1}^m \nu_i(b_i-a_i ^Tx) \nonumber \\ &=& c^Tx - \frac{1}{t_B} \displaystyle \sum_{i=1}^n \log(x_i) + \nu^T(b-Ax) \nonumber \end{eqnarray} $$ con $a_i$ $i$-ésimo renglón de $A$ y $b_i$ $i$-ésima entrada de $b$. Las condiciones necesarias y suficientes KKT de optimalidad del PBL-PLE son: ```{margin} Recuérdese que las condiciones de KKT para un PLE son: $ \begin{eqnarray} \nabla_x \mathcal{L}(x, \lambda, \nu) &=& c - A^T\nu - \lambda = 0 \nonumber \\ \lambda^Tx &=& 0 \nonumber \\ Ax &=& b \nonumber \\ -x &\leq& 0 \nonumber \\ \lambda &\geq& 0 \end{eqnarray} $ ``` $$ \begin{eqnarray} \nabla_x \mathcal{L}_B(x, \nu) &=& c - A^T\nu - \frac{1}{t_B}d = 0 \nonumber \\ Ax &=& b \nonumber \\ \end{eqnarray} $$ donde: $d = X^{-1}e = \left [ \begin{array}{c} \frac{1}{x_1} \\ \vdots \\ \frac{1}{x_n} \\ \end{array} \right ]$. Ver {ref}`condiciones KKT para un PL en su forma estándar <CONDKKTPLESTANDAR>` (se muestran en el margen). Los puntos centrales $x^*(t_B)$ resuelven el PBL-PLE y por tanto satisfacen: $$ \begin{eqnarray} \nabla_x \mathcal{L}_B(x^*(t_B), \nu) &=& c - A^T\nu - \frac{1}{t_B} d(t_B) = 0 \nonumber \\ Ax^*(t_B) &=& b \nonumber \\ \end{eqnarray} $$ donde: $d(t_B) = X^{*-1}(t_B)e = \left [ \begin{array}{c} \frac{1}{x_1^*(t_B)} \\ \vdots \\ \frac{1}{x_n^*(t_B)} \\ \end{array} \right ]$. ### Relación entre las condiciones KKT de optimalidad del PLE y las del PBL-PLE Para establecer la relación entre las condiciones KKT de optimalidad del PLE y las del PBL-PLE considérese **sólo** en esta sección que el PBL-PLE es: $$\displaystyle \min_{x \in \mathbb{R}^n} t_B c^Tx - \displaystyle \sum_{i=1}^n \log(x_i)$$ $$\text{sujeto a:}$$ $$Ax=b$$ ```{margin} La FBL en el PBL-PLE recuérdese es: $ \begin{eqnarray} \phi(x) &=& -\displaystyle \sum_{i=1}^n \log(-f_i(x)) \nonumber \\ &=& - \sum_{i=1}^n \log(x_i) \end{eqnarray} $ ``` ```{admonition} Observación :class: tip Esta forma del PBL-PLE es equivalente a la revisada anteriormente en la que la FBL se divide por el parámetro $t_B$. Es una cuestión sólo de escritura matemática lo que se realiza a continuación. ``` Las condiciones KKT son iguales a las revisadas en la sección anterior salvo la posición en la que se tiene el parámetro $t_B$: $$ \begin{eqnarray} \nabla_x \mathcal{L}_B(x^*(t_B), \hat{\nu}) &=& t_Bc - A^T\hat{\nu} - d(t_B) = 0 \nonumber \\ Ax^*(t_B) &=& b \nonumber \\ \end{eqnarray} $$ donde: $\hat{\nu} = t_B \nu$. ```{margin} Recuérdese que las condiciones de KKT para un PLE son: $ \begin{eqnarray} \nabla_x \mathcal{L}(x, \lambda, \nu) &=& c - A^T\nu - \lambda = 0 \nonumber \\ \lambda^Tx &=& 0 \nonumber \\ Ax &=& b \nonumber \\ -x &\leq& 0 \nonumber \\ \lambda &\geq& 0 \end{eqnarray} $ ``` Las condiciones KKT para un PLE se muestran en el margen y obsérvese que si $\lambda_i^*(t_B) = - \frac{1}{t_B f_i(x^*(t_B))} = - \frac{1}{t_B (-x_i^*(t_B))}$ con $x_i^*(t_B)$ $i$-ésima componente de $x^*(t_B)$ $\forall i = 1, \dots, n$ entonces se cumple: $$\lambda_i^*(t_B) > 0$$ pues por la definición de la FBL en el PBL-PLE debe cumplirse: $f_i(x) = -x_i < 0$ o bien para los puntos centrales $-x_i^*(t_B) < 0 \forall i=1, \dots, n$. Lo anterior resulta del dominio de la función $\log$ la cual está definida únicamente en $\mathbb{R}_{++}$ (reales positivos). Esto satisface las desigualdades de factibilidad primal y de factibilidad dual de las condiciones KKT de optimalidad. La holgura complementaria de las condiciones KKT de optimalidad para un PBL-PLE son: $$ \begin{eqnarray} \lambda^*(t_B)^Tx^*(t_B) &=& \displaystyle \sum_{i=1}^n \lambda_i^*(t_B) x_i^*(t_B) \nonumber \\ &=& \displaystyle \sum_{i=1}^n - \frac{x_i^*(t_B)}{t_B (-x_i^*(t_B))} \nonumber \\ &=& \displaystyle \sum_{i=1}^n \frac{1}{t_B} = \frac{n}{t_B} \end{eqnarray} $$ ```{margin} El POCE recuérdese es: $ \begin{eqnarray} \displaystyle \min_{x \in \mathbb{R}^n} &f_o(x)& \nonumber \\ &\text{sujeto a:}& \nonumber\\ f_i(x) &\leq& 0 \quad i=1,\dots,m \nonumber \\ Ax &=& b \end{eqnarray} $ y el PBL es: $ \displaystyle \min_{x \in \mathbb{R}^n} f_B(x|t_B) \\ \text{sujeto a:} \\ Ax = b $ con $A \in \mathbb{R}^{p \times n}$ y *rank* de $A$ igual a $p < n$, $\begin{eqnarray} f_B(x|t_B) &=& f_o(x) + \frac{1}{t_B} \phi(x) \nonumber \\ &=& f_o(x) - \frac{1}{t_B} \displaystyle \sum_{i=1}^m \log(-f_i(x)) \end{eqnarray} $. ``` Por tanto la *duality gap* asociada con $x^*(t_B), \lambda^*(t_B), \nu^*(t_B)$ es: $\frac{n}{t_B}$ donde: $\nu^*(t_B) = \frac{\hat{\nu}}{t_B}$. ```{margin} Recuérdese que las condiciones de KKT para un PLE son: $ \begin{eqnarray} \nabla_x \mathcal{L}(x, \lambda, \nu) &=& c - A^T\nu - \lambda = 0 \nonumber \\ \lambda^Tx &=& 0 \nonumber \\ Ax &=& b \nonumber \\ -x &\leq& 0 \nonumber \\ \lambda &\geq& 0 \end{eqnarray} $ ``` ```{admonition} Comentarios * Por la forma de la *duality gap* anterior para los puntos centrales si $t_B$ se incrementa entonces la *duality gap* tiende a cero en el método primal dual de BL. * Para un PBL que se obtiene de un POCE la *duality gap* anterior para los puntos centrales es $\frac{m}{t_B}$ pues se tienen $m$ funciones $f_i$ de desigualdad. * Las condiciones KKT de optimalidad del PBL-PLE son las condiciones KKT de optimalidad del PLE (que se muestran en el margen) pero perturbadas por el parámetro $t_B$: $$ \begin{eqnarray} \nabla_x \mathcal{L}_B(x(t_B), \hat{\nu}) &=& t_Bc - A^T\hat{\nu} - d(t_B) = 0 \nonumber \\ Ax(t_B) &=& b \nonumber \\ \lambda_i(t_B)x_i(t_B) &=& \frac{1}{t_B} \end{eqnarray} $$ donde: $\hat{\nu} = t_B \nu$, $d(t_B) = X^{-1}(t)e = \left [ \begin{array}{c} \frac{1}{x_1(t_B)} \\ \vdots \\ \frac{1}{x_n(t_B)} \\ \end{array} \right ]$ y la *duality gap* se estima como: $\lambda(t_B)^Tx(t_B) = \frac{n}{t_B}$. ``` ### ¿Cómo calcular los puntos centrales? Para calcular los puntos centrales del PBL-PLE se utiliza la {ref}`primera idea: determinar la dirección de búsqueda <PRIMIDEAMETPRIMDUAL>` en la que se resuelve el siguiente sistema de ecuaciones no lineales con el método de Newton: ```{margin} El PBL-PLE recuérdese es: $ \displaystyle \min_{x \in \mathbb{R}^n} c^Tx - \frac{1}{t_B} \displaystyle \sum_{i=1}^n \log(x_i) \nonumber \\ \text{sujeto a:} \nonumber \\ Ax=b $ con $A \in \mathbb{R}^{m \times n}$ y *rank* de $A$ igual a $m < n$. Las condiciones KKT son: $ \begin{eqnarray} \nabla_x \mathcal{L}_B(x, \nu) &=& c - A^T\nu - \frac{1}{t_B}d = 0 \nonumber \\ Ax &=& b \nonumber \\ \end{eqnarray} $ donde: $d = X^{-1}e = \left [ \begin{array}{c} \frac{1}{x_1} \\ \vdots \\ \frac{1}{x_n} \\ \end{array} \right ]$. ``` $$F(x, \nu) = \left [ \begin{array}{c} c - A^T\nu - \frac{1}{t_B}d(t_B) \\ b- Ax(t_B) \end{array} \right ] = 0$$ donde: $d(t_B) = X^{*-1}(t)e = \left [ \begin{array}{c} \frac{1}{x_1^*(t_B)} \\ \vdots \\ \frac{1}{x_n^*(t_B)} \\ \end{array} \right ]$. Este sistema de ecuaciones no lineales conduce a resolver el sistema de ecuaciones lineales: $$J_F(x, \nu) \left [ \begin{array}{c} \Delta x \\ \Delta \nu \end{array} \right ] = - F(x, \nu)$$ donde: $J_F(x, \nu) = \left [ \begin{array}{cc} \nabla_{xx} ^2 \mathcal{L}_B(x, \nu) & \nabla_{\nu x} \mathcal{L}_B(x,\nu) \\ -A & 0\end{array} \right ] = \left [ \begin{array}{cc} \frac{1}{t_B} D^2(t_B) & -A^T \\ -A & 0\end{array} \right ]$ y $D^2(t_B) = \text{diag}^2(d(t_B)) \in \mathbb{R}^{n \times n}$. La actualización en el método de Newton es: $$\left [ \begin{array}{c} x \\ \nu \end{array} \right ]^{(k+1)} = \left [ \begin{array}{c} x \\ \nu \end{array} \right ]^{(k)} + t^{(k)} \left [ \begin{array}{c} \Delta x \\ \Delta \nu \end{array} \right ]$$ donde se utilizó la {ref}`segunda idea: cortar el paso <SEGUNIDEAPRIMDUAL>`. ```{admonition} Comentarios * Se pueden eliminar los signos negativos que están en los bloques de la matriz del sistema de ecuaciones lineales anterior que contienen $A, A^T$ pero por consistencia con lo desarrollado en la nota de dualidad para un PL se mantienen los signos negativos (si se eliminan también debe de ajustarse el lado derecho del sistema). Esta modificación se relaciona con la definición de la función Lagrangiana. * También una modificación que se realiza para que en el primer bloque del sistema de ecuaciones lineales anterior no tengamos del lado izquierdo y del lado derecho $\frac{1}{t_B}$ se puede trabajar con el problema de optimización equivalente: $$ \displaystyle \min_{x \in \mathbb{R}^n} f_B(x|t_B) \\ \text{sujeto a:} \\ Ax = b $$ donde: $f_B(x|t_B) = t_Bf_o(x) + \phi(x) = t_Bf_o(x) - \displaystyle \sum_{i=1}^m \log(-f_i(x))$. **Estas dos modificaciones se utilizan para implementar el método primal-dual de BL**. ``` ## Método primal-dual de BL aplicado al ejemplo prototipo ``` !pip install --quiet "git+https://github.com/ITAM-DS/analisis-numerico-computo-cientifico.git#egg=opt&subdirectory=src" ``` Se utiliza el paquete de *Python* [opt](https://analisis-numerico-computo-cientifico.readthedocs.io/) en los siguientes `import`'s. ``` from opt.utils_logarithmic_barrier import log_barrier_aux_eval_constraints, \ constraint_inequalities_funcs_generator, constraint_inequalities_funcs_eval, \ phi, logarithmic_barrier, line_search_for_log_barrier_by_backtracking ``` Problema de optimización: $$ \displaystyle \max_{x \in \mathbb{R}^2} 3x_1 + 5x_2\\ \text{sujeto a: } \\ x_1 \leq 4 \nonumber \\ 2x_2 \leq 12 \\ 3x_1 + 2x_2 \leq 18 \\ x_1 \geq 0 \\ x_2 \geq 0 \\ $$ Definimos la función $\phi: \mathbb{R}^n \rightarrow \mathbb{R}$ como $\phi(x) = - \displaystyle \sum_{i=1}^m \log(-(a_i ^Tx-b_i)) - \sum_{i=1}^n \log(-{e}_i ^T(-x))$ con $a_i$ $i$-ésimo renglón de $A = \left [ \begin{array}{cc} 1 & 0 \\0 & 2 \\ 3 & 2 \end{array} \right ]$, $b_i$ $i$-ésima entrada del vector $b = \left [ \begin{array}{c} 4 \\ 12 \\ 18 \end{array} \right ]$ y $e_i$ $i$-ésimo vector canónico. Para este problema las curvas de nivel de la función $\phi$ se ven como sigue: ``` const_ineq_two_pars = {0: lambda x1,x2: x1 - 4, 1: lambda x1,x2: 2*x2 - 12, 2: lambda x1,x2: 3*x1 + 2*x2 - 18, 3: lambda x1,x2: -x1, 4: lambda x1,x2: -x2 } def const_ineq_funcs_eval_two_pars(x1,x2, const_ineq): """ Auxiliary function for the evaluation of constraint inequalities in logarithmic barrier function using two parameters as input. """ const_ineq_funcs_eval = np.array([const(x1,x2) for const in \ constraint_inequalities_funcs_generator(const_ineq)]) return const_ineq_funcs_eval def phi_two_pars(x1,x2, const_ineq): """ Implementation of phi function for logarithmic barrier using two parameters as input. """ const_ineq_funcs_eval = -const_ineq_funcs_eval_two_pars(x1, x2, const_ineq) log_barrier_const_eval = np.log(const_ineq_funcs_eval) return -np.sum(log_barrier_const_eval, axis=0) density=1e-1 x1l=0.1 x2d=0.1 x1r=4 x2u=8 x1_p=np.arange(x1l,x1r,density) x2_p=np.arange(x2d,x2u,density) x1_mesh,x2_mesh = np.meshgrid(x1_p,x2_p) z = phi_two_pars(x1_mesh,x2_mesh,const_ineq_two_pars) plt.plot(point1_point2_x_1[:,0], point1_point2_x_1[:,1], "--", color="black", label="_nolegend_") plt.plot(point3_point4_x_1[:,0], point3_point4_x_1[:,1], "--", color="black", label="_nolegend_") plt.plot(point1_point2_x_2[:,0], point1_point2_x_2[:,1], "--", color="black", label="_nolegend_") plt.plot(point3_point4_x_2[:,0], point3_point4_x_2[:,1], "--", color="black", label="_nolegend_") plt.plot(x_1, x_2, "--", color="black", label="_nolegend_") plt.contour(x1_p, x2_p, z) plt.title("Curvas de nivel de $\phi$") plt.show() ``` Reescribimos el problema anterior sin las restricciones $A x \leq b$ como: $ \displaystyle \min_{x \in \mathbb{R}^2} t_B(-3x_1 -5x_2) - [\log(4-x_1) + \log(12 - 2x_2) + \log(18 - (3 x_1 + 2 x_2)) + \log(x_1) + \log(x_2) ]\\ $ Realizamos la actualización: $$\left [ \begin{array}{c}x_1 \\ x_2 \end{array} \right ] = \left [ \begin{array}{c}x_1 \\ x_2 \end{array} \right ] + t \left [ \begin{array}{c}\Delta x_1 \\ \Delta x_2 \end{array} \right ]$$ donde: $t$ es parámetro de *backtracking* y $\left [ \begin{array}{c}\Delta x_1 \\ \Delta x_2 \end{array} \right ]$ es solución del sistema de ecuaciones lineales: $$ \nabla^2f_{B}(x) \left [ \begin{array}{c}\Delta x_1 \\ \Delta x_2 \end{array} \right ] = - \nabla f_{B}(x) \nonumber \\ $$ que para este problema es: $$ \begin{eqnarray} \tilde{A}^T \text{diag}^2(d(t_B))\tilde{A} \left [ \begin{array}{c}\Delta x_1 \\ \Delta x_2 \end{array} \right ] &=& -(t_Bc + \tilde{A}^Td(t_B)) \nonumber \end{eqnarray} $$ donde: $c = \left [ \begin{array}{c}-3 \\ -5 \end{array} \right ], \tilde{A} = \left [ \begin{array}{c} A \\ I \end{array} \right ] = \left [ \begin{array}{cc} 1 & 0 \\0 & 2 \\ 3 & 2 \\ 1 & 0 \\ 0 & 1 \end{array} \right ]$, $b_i$ $i$-ésima entrada del vector $b = \left [ \begin{array}{c} 4 \\ 12 \\ 18 \end{array} \right ]$. Y el vector $d(t_B) = \left [ \begin{array}{c} \frac{1}{b_1 - a_1^Tx(t_B)} \\ \frac{1}{b_2 - a_2^Tx(t_B)} \\ \frac{1}{b_3 - a_3^Tx(t_B)} \\ \frac{1}{-x_1(t_B)} \\ \frac{1}{-x_2(t_B)} \end{array}\right ] = \left [ \begin{array}{c} \frac{1}{4-x_1(t_B)} \\ \frac{1}{12-2x_2(t_B)} \\ \frac{1}{18-3x_1(t_B)-2x_2(t_B)} \\ \frac{1}{-x_1(t_B)} \\ \frac{1}{-x_2(t_B)} \end{array}\right ]$. ````{admonition} Observación :class: tip Aunque podríamos definir las siguientes líneas de acuerdo al desarrollo matemático anterior: ```python A = np.array([[1, 0], [0, 2], [3, 2]]) m = 3 n = 2 b = np.array([4, 12, 18]) A_tilde = np.row_stack((A, np.eye(n))) d = np.array([1/(b[0]-A[0,:].dot(x)), 1/(b[1]-A[1,:].dot(x)), 1/(b[2]-A[2,:].dot(x)), 1/(-x[0]), 1/(-x[1])]) system_matrix = (A_tilde.T*(d*d))@A_tilde rhs = -(t_B*c +A_tilde.T@d) ``` usamos *SymPy* para uso de diferenciación simbólica (no se recomienda el uso de *SymPy* para problemas *medium* o *large scale*). ```` ```{admonition} Comentario Valores más "grandes" de $t_B$ hacen que la Hessiana de la función objetivo del PBL varíe rápidamente cerca de la frontera del conjunto factible. En este ejemplo prototipo algunas de las entradas de $\text{diag}^2(d(t_B))$ serán muy grandes en tales valores de $t_B$. ``` ``` import sympy from sympy.tensor.array import derive_by_array x1, x2 = sympy.symbols("x1, x2") c = np.array([-3, -5]) fo_sympy = c[0]*x1 + c[1]*x2 phi_sympy = -(sympy.log(4-x1) + sympy.log(12-2*x2) + sympy.log(18-3*x1-2*x2) + sympy.log(x1) + sympy.log(x2)) gf_sympy = derive_by_array(fo_sympy, (x1, x2)) Hf_sympy = derive_by_array(gf_sympy, (x1, x2)) gphi_sympy = derive_by_array(phi_sympy, (x1, x2)) Hphi_sympy = derive_by_array(gphi_sympy, (x1, x2)) constraints_ineq = {0: lambda x: x[0] - b[0], 1: lambda x: 2*x[1] - b[1], 2: lambda x: 3*x[0] + 2*x[1] - b[2], 3: lambda x: -x[0], 4: lambda x: -x[1] } x_0 = np.array([1, 2], dtype=float) x = x_0 fo = lambda x: np.dot(c, x) t_B_0 = 10 b = np.array([4, 12, 18], dtype=float) n = x_0.size gf_B = lambda x, t_B: np.array([component.subs({"x1": x[0], "x2": x[1], "t_B": t_B}) for component in t_B*gf_sympy + gphi_sympy], dtype = float) Hf_B = lambda x, t_B: np.array([second_partial_derivative.subs({"x1": x[0], "x2": x[1], "t_B": t_B}) for second_partial_derivative in t_B*Hf_sympy + Hphi_sympy], dtype=float).reshape(n,n) ``` ### Primera iteración ```{margin} Aquí evaluamos la FBL del ejemplo prototipo: $\begin{eqnarray} f_B(x|t_B) &=& t_B(-3x_1 -5x_2) \nonumber \\ &-& \log(4-x_1) - \log(12 - 2x_2) \nonumber \\ &-& \log(18 - (3 x_1 + 2 x_2)) \nonumber \\ &-& \log(x_1) - \log(x_2) \end{eqnarray} $ ``` ``` log_barrier_eval = logarithmic_barrier(fo,x,t_B_0,constraints_ineq) print(log_barrier_eval) const_ineq_funcs_eval = -constraint_inequalities_funcs_eval(x,constraints_ineq) print(const_ineq_funcs_eval) ``` ```{margin} Aquí revisamos que al evaluar las restricciones esté dentro del dominio de la función $\log$ (valores estrictamente positivos). Si no están los puntos en el dominio debemos devolver un mensaje y detener el método. ``` ``` if(sum(const_ineq_funcs_eval < -np.nextafter(0,1)) >=1): print("Some constraint inequalities evaluated in x were nonpositive, check approximations") fo_eval = fo(x) print(fo_eval) ``` ```{margin} Resolvemos el sistema de ecuaciones lineales para calcular la dirección de Newton. ``` ``` system_matrix = Hf_B(x, t_B_0) rhs = -gf_B(x, t_B_0) dir_Newton = np.linalg.solve(system_matrix, rhs) print(dir_Newton) ``` ```{margin} Aquí calculamos el decremento de Newton cuya definición se da al finalizar las iteraciones. ``` ``` dec_Newton_squared = rhs.dot(dir_Newton) print(dec_Newton_squared) stopping_criteria = dec_Newton_squared/2 print(stopping_criteria) der_direct = -dec_Newton_squared ``` ```{margin} Aquí cortamos el paso con la metodología de búsqueda de línea por *backtracking*. ``` ``` t = line_search_for_log_barrier_by_backtracking(fo,dir_Newton,x_0,t_B_0, constraints_ineq, der_direct) print(t) x = x + t*dir_Newton print(x) ``` ### Segunda iteración ``` system_matrix = Hf_B(x, t_B_0) rhs = -gf_B(x, t_B_0) log_barrier_eval = logarithmic_barrier(fo,x,t_B_0,constraints_ineq) print(log_barrier_eval) const_ineq_funcs_eval = -constraint_inequalities_funcs_eval(x,constraints_ineq) print(const_ineq_funcs_eval) if(sum(const_ineq_funcs_eval < -np.nextafter(0,1)) >=1): print("Some constraint inequalities evaluated in x were nonpositive, check approximations") fo_eval = fo(x) print(fo_eval) dir_Newton = np.linalg.solve(system_matrix, rhs) print(dir_Newton) dec_Newton_squared = rhs.dot(dir_Newton) print(dec_Newton_squared) stopping_criteria = dec_Newton_squared/2 print(stopping_criteria) der_direct = -dec_Newton_squared t = line_search_for_log_barrier_by_backtracking(fo,dir_Newton,x,t_B_0, constraints_ineq, der_direct) print(t) x = x + t*dir_Newton print(x) ``` ### Tercera iteración ``` system_matrix = Hf_B(x, t_B_0) rhs = -gf_B(x, t_B_0) log_barrier_eval = logarithmic_barrier(fo,x,t_B_0,constraints_ineq) print(log_barrier_eval) const_ineq_funcs_eval = -constraint_inequalities_funcs_eval(x,constraints_ineq) print(const_ineq_funcs_eval) if(sum(const_ineq_funcs_eval < -np.nextafter(0,1)) >=1): print("Some constraint inequalities evaluated in x were nonpositive, check approximations") fo_eval = fo(x) print(fo_eval) dir_Newton = np.linalg.solve(system_matrix, rhs) print(dir_Newton) dec_Newton_squared = rhs.dot(dir_Newton) print(dec_Newton_squared) stopping_criteria = dec_Newton_squared/2 print(stopping_criteria) der_direct = -dec_Newton_squared t = line_search_for_log_barrier_by_backtracking(fo,dir_Newton,x,t_B_0, constraints_ineq, der_direct) print(t) x = x + t*dir_Newton print(x) ``` ````{admonition} Ejercicio :class: tip Utiliza las definiciones: ```python A = np.array([[1, 0], [0, 2], [3, 2]]) m = 3 n = 2 b = np.array([4, 12, 18]) A_tilde = np.row_stack((A, np.eye(n))) d = np.array([1/(b[0]-A[0,:].dot(x)), 1/(b[1]-A[1,:].dot(x)), 1/(b[2]-A[2,:].dot(x)), 1/(-x[0]), 1/(-x[1])]) system_matrix = (A_tilde.T*(d*d))@A_tilde rhs = -(t_B*c +A_tilde.T@d) ``` y realiza cuatro iteraciones recalculando lo necesario para el sistema de ecuaciones lineales con `system_matrix` y `rhs` dadas por las últimas dos líneas del código que está en este ejercicio. Corrobora que obtienes los mismos resultados que con *SymPy*. ```` ```{margin} El POCE recuérdese es: $ \begin{eqnarray} \displaystyle \min_{x \in \mathbb{R}^n} &f_o(x)& \nonumber \\ &\text{sujeto a:}& \nonumber\\ f_i(x) &\leq& 0 \quad i=1,\dots,m \nonumber \\ Ax &=& b \end{eqnarray} $ y el PBL es: $ \displaystyle \min_{x \in \mathbb{R}^n} f_B(x|t_B) \\ \text{sujeto a:} \\ Ax = b $ con $A \in \mathbb{R}^{p \times n}$ y *rank* de $A$ igual a $p < n$, $\begin{eqnarray} f_B(x|t_B) &=& f_o(x) + \frac{1}{t_B} \phi(x) \nonumber \\ &=& f_o(x) - \frac{1}{t_B} \displaystyle \sum_{i=1}^m \log(-f_i(x)) \end{eqnarray} $. ``` ```{admonition} Comentarios * La forma general de la condición de KKT de optimalidad $\nabla_x \mathcal{L}_B(x^, \nu) = 0$ para un PBL que se obtuvo de un POCE es: $$ \begin{eqnarray} \nabla_x \mathcal{L}_B(x, \nu) &=& \nabla f_o(x) + \frac{1}{t_B}\nabla \phi(x) + A^T \nu = 0 \nonumber \\ &=& \nabla f_o(x) + \frac{1}{t_B} \displaystyle \sum_{i=1}^m \frac{\nabla f_i(x)}{-f_i(x)} + A^T \nu \nonumber \\ \end{eqnarray} $$ con: $\mathcal{L}: \mathbb{R}^n \times \mathbb{R}^p \rightarrow \mathbb{R}$, $$\begin{eqnarray} \mathcal{L}_B(x, \nu) &=& f_{B}(x|t_B) + \sum_{i=1}^p \nu_i h_i(x) \nonumber \\ &=& f_o(x|t_B) + \frac{1}{t_B} \phi(x) + \nu^T(b-Ax) \end{eqnarray} $$ y al aplicar el método de Newton al sistema de ecuaciones no lineales conduce a resolver el sistema de ecuaciones lineales siguiente: $$\left [ \begin{array}{cc} \nabla^2f_o(x) + \frac{1}{t_B} \nabla^2 \phi(x) & -A^T \\ -A & 0\end{array} \right ] \left [ \begin{array}{c} \Delta x \\ \Delta \nu \end{array} \right ] = -\left [ \begin{array}{c} \nabla f_o(x) + \frac{1}{t_B} \nabla \phi(x) \\ r_p \end{array} \right ]$$ donde: $r_p = b - Ax$ es el residual para factibilidad primal. Ver {ref}`la función Lagrangiana <FUNLAGRANGIANA>`. ``` ## Definición decremento de Newton Para problemas de optimización convexos sin restricciones: $$\min_{x \in \mathbb{R}^n} f_o(x)$$ en los que utilizamos el método de Newton para resolverlos, se utiliza una cantidad en criterios de paro y en resultados de convergencia nombrada el decremento de Newton. ```{admonition} Definición El decremento de Newton para $f_o: \mathbb{R}^n \rightarrow \mathbb{R}$ en $x$ es la cantidad: $$\lambda(x) = (\nabla f_o(x)^T \nabla^2f_o(x)^{-1} \nabla f_o(x))^{1/2}$$ en donde se asume que $f_o \in \mathcal{C}^2(\text{dom}f_o)$ y su Hessiana es definida positiva. ``` ```{admonition} Comentarios * Asumiendo que existe un punto óptimo $x^*$ y el valor óptimo se denota por $p^* = f_o(x^*)$ el decremento de Newton tiene propiedades como son: * $\frac{1}{2} \lambda ^2 (x)$ estima $f_o(x)-p^*$. * $|| \nabla^2 f_o(x)^{-1} \nabla f_o(x)||_{\nabla ^2f_o(x)} = \left ( \nabla f_o(x)^T \nabla^2 f_o(x)^{-1} \nabla ^2 f(x) \nabla ^2 f_o(x)^{-1} \nabla f_o(x) \right )^{1/2} = \lambda(x) $ que indica que $\lambda(x)$ es la norma del paso de Newton en la norma cuadrática definida por la Hessiana. * En el método de búsqueda de línea por *backtracking* $-\lambda (x) ^2$ es la derivada direccional de $f_o$ en $x$ en la dirección de $\Delta x_{\text{nt}}$: $$\frac{df(x+t \Delta x_{\text{nt}})}{dt} \Bigr|_{t=0} = \nabla f_o(x)^T \Delta x_{\text{nt}} = \nabla f_o(x)^T (-\nabla^2 f_o(x)^{-1} \nabla f_o(x)) = -\lambda(x)^2$$ donde: $t$ es el parámetro de búsqueda de línea por *backtracking*, $\Delta x_{\text{nt}} = -\nabla ^2 f_o(x)^{-1} \nabla f_o(x)$ para $x \in \text{dom}f_o$ es la dirección de Newton para $f_o$ en $x$. * En el método primal-dual para resolver un PBL el decremento de Newton se utiliza en las *inner iterations*. ``` ## Algoritmo primal-dual de BL para un PL con únicamente desigualdades Para un problema de la forma: $$\displaystyle \min_{x \in \mathbb{R}^n} c^Tx$$ $$\text{sujeto a:}$$ $$Ax \leq b$$ $$x \geq 0$$ >**Dados** $x$ un punto estrictamente factible, esto es: $x > 0$, $Ax < b$ (todas las entradas de $x$ son positivas y $a_i^Tx < b_i$), $t_B^{(0)}$ parámetro de barrera, $\mu > 1$, $tol > 0$. > >$t_B:= t_B^{(0)}$. > >**Repetir** el siguiente bloque para $k=1,2,\dots$ > >***Outer iterations***: >>**Paso de centrado o *inner iterations***: >> >>Calcular $x^*(t_B)$ que resuelva: $\displaystyle \min_{x \in \mathbb{R}^n} t_Bf_o(x) + \phi(x)$ iniciando con $x$. >> >>Utilizar criterio de paro para *inner iterations*. > >Actualizar $x:=x^*(t_B)$. > > Incrementar $t_B$ por $t_B=\mu t_B$. > > **hasta** convergencia: satisfacer criterio de paro en el que se utiliza $tol$ y $maxiter$. ```{admonition} Observación :class: tip Para un PL únicamente con desigualdades recuérdese: $$t_Bf_o(x) + \phi(x) = t_B c^Tx - \displaystyle \sum_{i=1}^m \log(-(a_i ^Tx-b_i)) - \sum_{i=1}^n \log(-{e}_i ^T(-x))$$ con $a_i$ $i$-ésimo renglón de $A$. ``` ````{admonition} Comentarios * $\mu$ es un parámetro que realiza un *trade-off* en el número de *inner* y *outer iterations*. Controla el seguimiento de la trayectoria central en las *inner iterations*. Valores grandes causan un mayor número de *inner iterations* y valores cercanos a $1$ causan un mayor número de *outer iterations*. * La elección de $t_B^{(0)}$ ayuda a dar una estimación del recíproco de la *duality gap* (recuérdese que la estimación en un PBL-PLE es $\frac{n}{t_B}$). Es similar el efecto que con el parámetro $\mu$. Valores grandes causan que se realicen mayor número de *inner iterations* y valores pequeños un mayor número de *outer iterations*. * El criterio de paro de las *outer iterations* en un PBL-PLE es de la forma: ``` while n/t_B > tol && iterations < max_iters ``` y el de las *inner iterations* es de la forma: ``` while dec_Newton/2 > tol && iterations < max_iters ``` con `dec_Newton/2` el decremento de Newton, `tol` una cantidad pequeña y positiva (comúnmente menor o igual a 10−8), `iterations` un contador de iteraciones. * El algoritmo también puede regresar estimaciones para $\lambda$ con $\lambda^*(t_B)$ y $\nu$ dada por $\nu^*(t_B)$. ```` ## Método primal-dual de BL aplicado al ejemplo prototipo (completo) Problema de optimización: $$ \displaystyle \max_{x \in \mathbb{R}^2} 3x_1 + 5x_2\\ \text{sujeto a: } \\ x_1 \leq 4 \nonumber \\ 2x_2 \leq 12 \\ 3x_1 + 2x_2 \leq 18 \\ x_1 \geq 0 \\ x_2 \geq 0 \\ $$ ``` tol_outer_iter = 1e-6 tol=1e-8 tol_backtracking=1e-12 max_inner_iter=30 mu=10 x_ast = np.array([2, 6], dtype=float) p_ast = fo(x_ast) ``` Se utiliza la función [primal_dual_method](https://analisis-numerico-computo-cientifico.readthedocs.io/en/latest/_autosummary/opt.logarithmic_barrier.linear_program_inequalities.primal_dual_method.html#opt.logarithmic_barrier.linear_program_inequalities.primal_dual_method) del paquete [opt](https://analisis-numerico-computo-cientifico.readthedocs.io/en/latest/) en la siguiente celda: ``` from opt.logarithmic_barrier.linear_program_inequalities import primal_dual_method [x,total_iter,t,x_plot] = primal_dual_method(fo, constraints_ineq, x_0, tol, tol_backtracking, t_B_0, x_ast=x_ast, p_ast=p_ast, max_inner_iter=max_inner_iter, mu=mu, tol_outer_iter=tol_outer_iter, gf_B=gf_B, Hf_B=Hf_B, ) ``` ```{admonition} Observación :class: tip Obsérvese que se realizan más iteraciones con el método primal-dual para este ejemplo prototipo que con el método símplex. ``` ``` from opt.utils_logarithmic_barrier import plot_central_path plt.contour(x1_p, x2_p, z) plt.xlim(-0.1, 5) plt.ylim(-0.1,6.5) #level curves for fo x_1_line_1 = np.linspace(0, 6, 100) x_2_line_1 = 1/5*(-3*x_1_line_1 + 23) x_1_line_2 = np.linspace(0, 6, 100) x_2_line_2 = 1/5*(-3*x_1_line_2 + 29) x_1_line_3 = np.linspace(0, 6, 100) x_2_line_3 = 1/5*(-3*x_1_line_3 + 36) plt.plot(x_1_line_1, x_2_line_1, "green",label="_nolegend_") plt.plot(x_1_line_2, x_2_line_2, "indigo",label="_nolegend_") plt.plot(x_1_line_3, x_2_line_3, "darkturquoise", label="_nolegend_") #central path plot_central_path(x_plot) ``` ```{admonition} Comentario En **este ejemplo** las curvas de nivel de la función objetivo $f_o$ representadas con rectas en la gráfica anterior son tangentes a las curvas de nivel de $\phi$ en $x^*(t_B)$ pues: $t_B \nabla f_o(x^*(t_B)) + \nabla \phi(x^*(t_B)) = 0$ por lo que: $$\nabla \phi(x^*(t_B)) = -t_B \nabla f_o(x^*(t_B)) = -t_Bc$$ ``` ```{admonition} Ejercicio :class: tip Resolver el siguiente problema con el método primal-dual de BL y corrobora con algún software tu respuesta: $$ \displaystyle \min_{x \in \mathbb{R}^2} x_1 + x_2 - 4x_3\\ \text{sujeto a:} \\ x_1 + x_2 + 2x_3 \leq 9 \nonumber \\ x_1 + x_2 - x_3 \leq 2 \nonumber \\ -x_1 + x_2 + x_3 \leq 4 \nonumber \\ x_1 \geq 0, x_2 \geq 0, x_3 \geq 0 $$ ``` ```{admonition} Comentario El método primal-dual puede modificarse para el caso en el que se tengan puntos no primal-dual factibles. En este caso se le nombra *path following method*. ``` ```{admonition} Ejercicios :class: tip 1.Resuelve los ejercicios y preguntas de la nota. ``` **Preguntas de comprehensión** 1)¿Qué es un método por puntos interiores? 2)¿Qué se busca con el método primal-dual y cuáles son las ideas que se utilizan para su desarrollo? 3)¿Por qué al método primal-dual se le nombra así? 4)¿Qué efecto y ventajas tienen añadir funciones de barrera que penalizan al no satisfacer las restricciones de desigualdad de un problema de optimización? 5)¿Qué propiedades se buscan que satisfagan las funciones de barrera? 6)¿Por qué se elige el método de Newton para resolver el problema PBL? 7)¿Qué son los puntos centrales y la trayectoria central? 8)¿Qué relación existe entre las condiciones KKT de optimalidad del PLE y las del PBL-PLE? 9)¿Cómo se define y qué propiedades tiene el decremento de Newton? 10)Explica la tarea que tienen los parámetros $\mu$ y $t_B$ en el problema PBL. Puedes apoyar tu respuesta considerando el efecto que resulta de elegir valores grandes, pequeños de tales parámetros. **Referencias:** 1. S. P. Boyd, L. Vandenberghe, Convex Optimization, Cambridge University Press, 2009. 2. J. Nocedal, S. J. Wright, Numerical Optimization, Springer, 2006. 3. F. Hillier, G. Lieberman, Introduction to Operations Research, Mc Graw Hill, 2014.
github_jupyter
``` from ccgnet import experiment as exp from ccgnet.finetune import * from ccgnet import layers from ccgnet.layers import * import tensorflow as tf import numpy as np import time from sklearn.metrics import balanced_accuracy_score from sklearn.model_selection import KFold from ccgnet.Dataset import Dataset, DataLoader from ccgnet.finetune import Finetuning import glob import random data1 = Dataset('data/CC_Table/ECC_Table.tab', mol_blocks_dir='data/Mol_Blocks.dir') data1.make_graph_dataset(Desc=1, A_type='OnlyCovalentBond', hbond=0, pipi_stack=0, contact=0, make_dataframe=True, max_graph_size=160) data2 = Dataset('data/CC_Table/ECC_Table-DataAug.tab', mol_blocks_dir='data/Mol_Blocks.dir') data2.make_graph_dataset(Desc=1, A_type='OnlyCovalentBond', hbond=0, pipi_stack=0, contact=0, make_dataframe=True, max_graph_size=160) cl20_test = eval(open('data/Test/Test_Samples/CL-20_Test.list').read()) tnt_test = eval(open('data/Test/Test_Samples/TNT_Test.list').read()) cv_set = eval(open('data/ECC_Finetuning_Set.list').read()) # The negative samples in this ECC set was randomly selected from the data/MEPS.csv, which was used in our work. class CCGNet(object): def build_model(self, inputs, is_training, global_step): V = inputs[0] A = inputs[1] labels = inputs[2] mask = inputs[3] graph_size = inputs[4] tags = inputs[5] global_state = inputs[6] subgraph_size = inputs[7] # message passing V, global_state = CCGBlock(V, A, global_state, subgraph_size, no_filters=64, mask=mask, num_updates=global_step, is_training=is_training) V, global_state = CCGBlock(V, A, global_state, subgraph_size, no_filters=16, mask=mask, num_updates=global_step, is_training=is_training) V, global_state = CCGBlock(V, A, global_state, subgraph_size, no_filters=64, mask=mask, num_updates=global_step, is_training=is_training) V, global_state = CCGBlock(V, A, global_state, subgraph_size, no_filters=16, mask=mask, num_updates=global_step, is_training=is_training) # readout V = ReadoutFunction(V, global_state, graph_size, num_head=2, is_training=is_training) # predict with tf.compat.v1.variable_scope('Predictive_FC_1') as scope: V = layers.make_embedding_layer(V, 256) V = layers.make_bn(V, is_training, mask=None, num_updates=global_step) V = tf.nn.relu(V) V = tf.compat.v1.layers.dropout(V, 0.457, training=is_training) with tf.compat.v1.variable_scope('Predictive_FC_2') as scope: V = layers.make_embedding_layer(V, 1024) V = layers.make_bn(V, is_training, mask=None, num_updates=global_step) V = tf.nn.relu(V) V = tf.compat.v1.layers.dropout(V, 0.457, training=is_training) with tf.compat.v1.variable_scope('Predictive_FC_3') as scope: V = layers.make_embedding_layer(V, 256) V = layers.make_bn(V, is_training, mask=None, num_updates=global_step) V = tf.nn.relu(V) V = tf.compat.v1.layers.dropout(V, 0.457, training=is_training) out = layers.make_embedding_layer(V, 2, name='final') return out, labels start = time.time() restore_path = './snapshot/CCGNet/CC_Dataset/*/' for p in glob.glob(restore_path): restore_file = tf.train.latest_checkpoint(p) random.shuffle(cv_set) cv_samples = np.array(cv_set) kf = KFold(n_splits=5, shuffle=True) fold_5 = {} n = 0 for train_ix,test_ix in kf.split(cv_samples): fold_5['fold-{}'.format(n)] = {} fold_5['fold-{}'.format(n)]['train'] = cv_samples[train_ix] fold_5['fold-{}'.format(n)]['valid'] = cv_samples[test_ix] n += 1 dataset_name = 'ECC-'+p.split('/')[-2][-1] snapshot_path = 'finetuning_snapshot' model_name = 'CCGNet' for fold in ['fold-{}'.format(i) for i in range(5)]: print('\n################ {} ################'.format(fold)) train_data1, valid_data1, test_data1 = data1.split(train_samples=fold_5[fold]['train'], valid_samples=fold_5[fold]['valid'], with_test=True, test_samples=list(set(cl20_test+tnt_test))) train_data2, valid_data2, test_data2 = data2.split(train_samples=fold_5[fold]['train'], valid_samples=fold_5[fold]['valid'], with_test=True, test_samples=list(set(cl20_test+tnt_test))) train_data = [] for ix, i in enumerate(train_data1): train_data.append(np.concatenate([i, train_data2[ix]])) tf.reset_default_graph() model = CCGNet() model = Finetuning(model, train_data, valid_data1, with_test=True, test_data=test_data1, snapshot_path=snapshot_path, use_subgraph=True, restore_file=restore_file, model_name=model_name, dataset_name=dataset_name+'/time_{}'.format(fold[-1]), remove_keywords=['Predictive_FC_3', 'final']) history = model.fit(save_info=True, save_att=True, silence=0, metric='loss', early_stop=0, early_stop_cutoff=20) end = time.time() time_gap = end-start h = time_gap//3600 h_m = time_gap%3600 m = h_m//60 s = h_m%60 print('{}h {}m {}s'.format(int(h),int(m),round(s,2))) from ccgnet.parselog import ParseTestLog, ParseTestLogEnsemble, get_info import glob PATH = glob.glob('{}/{}/*'.format(snapshot_path,model_name)) ENS = [] for i in PATH: print('#### '+i.split('/')[-1]+' ####') val_list_ = glob.glob(i+'/*/*val*') ens_ = ParseTestLogEnsemble([ParseTestLog(j) for j in val_list_]) ens_.Reports from ccgnet.parselog import ParseValidLog val_list = glob.glob('{}/{}/*/*'.format(snapshot_path,model_name)) l = [] for i in val_list: l.append(ParseValidLog(i)) def get_test_log(p): length = len(p.split('/')) l = p.split('/')[:length-1] l.append('/model-val-info.txt') return '/'.join(l) best10 = [get_test_log(i[1]) for i in sorted([(i.loss, i.logfile) for i in l])[:10]] best10 ens = ParseTestLogEnsemble([ParseTestLog(i) for i in best10]) print('####### Mean ########') ens.Reports print('####### Bagging ########') ens_bagging = ens.Bagging from ccgnet.parselog import TestAccForEachMol import pandas as pd dic = {} #print('\n######## TNT ########') tnt_bagging = TestAccForEachMol(tnt_test, best10, is_return=1, is_print=0) dic['TNT'] = tnt_bagging[1] #print('\n######## CL-20 ########') cl20_bagging = TestAccForEachMol(cl20_test, best10, is_return=1, is_print=0) dic['CL-20'] = cl20_bagging[1] pd.DataFrame(dic) ```
github_jupyter
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import shap #import utils # we define some useful shortcuts here np.random.seed(0) pd.options.display.max_rows = 10 ``` ## 1. Import Dataset Concrete Compressive Strength Data Set ``` df = pd.read_excel('./data/Concrete_Data.xls', sheet_name='Sheet1') df ``` #### Train-test split ``` from sklearn.model_selection import train_test_split X, y = df[df.columns[:-1]], df[df.columns[-1]] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2 ) from sklearn.preprocessing import MinMaxScaler # Scale inputs scaler = MinMaxScaler() X_train_scaled = scaler.fit_transform(X_train) X_test_scaled = scaler.transform(X_test) ``` ## 2. Build a Prediction Model ``` from sklearn.metrics import mean_squared_error def plot_predictions(y, y_hat, labels): plt.rcParams.update({'font.size': 18, 'font.sans-serif':'arial'}) y, y_hat, labels = list( map( lambda l: l if isinstance(l, list) else [l], [y, y_hat, labels])) color_list = ['blue', 'red'] n_plots = len(y) y_min = min([min(z) for z in y]) y_max = max([max(z) for z in y]) lims = (y_min, y_max) fig, ax = plt.subplots( 1, n_plots, figsize=(6*n_plots, 5), squeeze=False, sharex=True, sharey=True) for axis, target, prediction, label, color in zip(ax[0, :], y, y_hat, labels, color_list): # Scatter plot axis.scatter(target, prediction, alpha=0.3, c = color) # Title and labels rmse_value = np.sqrt(mean_squared_error(target, prediction)) title = label + " (RMSE=%.3f)" % rmse_value axis.set_title(title) axis.set_xlabel('Target Compressive Strength (MPa)') axis.set_ylabel('Predicted Compressive Strength (MPa)') axis.plot(lims, lims, 'k--', alpha=0.75, zorder=0) axis.yaxis.set_tick_params(which='both', labelleft=True) plt.subplots_adjust(wspace = 0.3) plt.show() from sklearn.ensemble import GradientBoostingRegressor regressor = GradientBoostingRegressor() regressor.fit(X_train_scaled, y_train) y_hat_train = regressor.predict(X_train_scaled) y_hat_test = regressor.predict(X_test_scaled) plot_predictions([y_train, y_test], [y_hat_train, y_hat_test], labels=['Train', 'Test']) from gplearn.genetic import SymbolicRegressor from sklearn.utils.random import check_random_state from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import numpy as np import graphviz est_gp = SymbolicRegressor(population_size=5000, generations=30, stopping_criteria=0.01, p_crossover=0.7, p_subtree_mutation=0.1, p_hoist_mutation=0.05, p_point_mutation=0.1, max_samples=0.9, verbose=1, parsimony_coefficient=0.01) est_gp.fit(X_train_scaled, y_train) print(est_gp._program) y_hat_train = est_gp.predict(X_train_scaled) y_hat_test = est_gp.predict(X_test_scaled) plot_predictions([y_train, y_test], [y_hat_train, y_hat_test], labels=['Train', 'Test']) dot_data = est_gp._program.export_graphviz() graph = graphviz.Source(dot_data) import pydotplus graph = pydotplus.graph_from_dot_data(dot_data) from IPython.display import Image # Show graph Image(graph.create_png()) ```
github_jupyter
``` import collections import datasets import functools import mininlp import random import spacy import torch import torch.nn as nn import torch.optim as optim import tqdm seed = 1234 torch.manual_seed(seed) random.seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False ``` ## Loading the Dataset ``` imdb = datasets.load_dataset('imdb') imdb imdb['train'][0] train_data, test_data = datasets.load_dataset('imdb', split=['train', 'test']) print(len(train_data), len(test_data)) train_data[0] def get_train_valid_split(train_data, valid_ratio=0.2, shuffle=True): data = train_data.train_test_split(test_size=valid_ratio, shuffle=shuffle) train_data = data['train'] valid_data = data['test'] return train_data, valid_data valid_ratio = 0.2 shuffle = True train_data, valid_data = get_train_valid_split(train_data, valid_ratio, shuffle) print(len(train_data), len(valid_data), len(test_data)) ``` ## Initializing the Tokenizer ``` tokenize_fn = lambda x : x.split() tokenizer = mininlp.tokenizer.Tokenizer(tokenize_fn) example_string = 'Hello world! How is everyone doing today?' tokenizer.tokenize(example_string) nlp = spacy.load('en_core_web_sm') def spacy_tokenize(s: str, nlp: spacy.lang): return [t.text for t in nlp.tokenizer(s)] spacy_tokenize(example_string, nlp) _spacy_tokenize = functools.partial(spacy_tokenize, nlp=nlp) _spacy_tokenize(example_string) tokenizer = mininlp.tokenizer.Tokenizer(_spacy_tokenize) tokenizer.tokenize(example_string) ``` ## Building the Vocabulary ``` field = 'text' counter = mininlp.vocab.build_vocab_counter(train_data, field, tokenizer) counter.most_common(10) min_freq = 6 max_size = 30_000 vocab = mininlp.vocab.Vocab(counter, min_freq, max_size) len(vocab) vocab.stoi('Hello') vocab.stoi('hello') vocab.itos(11977) vocab.stoi('Cthulhu') vocab.itos(0) example_string = 'Hello world! How is everyone doing today?' example_tokens = tokenizer.tokenize(example_string) print(example_tokens) vocab.stoi(example_tokens) vocab.itos(vocab.stoi(example_tokens)) example_string = 'My best friend is named Cthulhu' example_tokens = tokenizer.tokenize(example_string) vocab.itos(vocab.stoi(example_tokens)) ``` ## Creating the DataLoader ``` text_transforms = mininlp.transforms.sequential_transforms(tokenizer.tokenize, vocab.stoi, mininlp.transforms.to_longtensor) label_transforms = mininlp.transforms.sequential_transforms(mininlp.transforms.to_longtensor) train_dataset = mininlp.dataset.TextClassificationDataset(train_data, text_transforms, label_transforms) train_dataset[0] train_dataset.data[0] vocab.stoi('Soylent') valid_dataset = mininlp.dataset.TextClassificationDataset(valid_data, text_transforms, label_transforms) test_dataset = mininlp.dataset.TextClassificationDataset(test_data, text_transforms, label_transforms) pad_idx = vocab.stoi(vocab.pad_token) print(pad_idx) collator = mininlp.collator.TextClassificationCollator(pad_idx) batch_size = 256 train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collator.collate, num_workers=torch.get_num_threads()) valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, collate_fn=collator.collate, num_workers=torch.get_num_threads()) test_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=False, collate_fn=collator.collate, num_workers=torch.get_num_threads()) ``` ## Creating the NBOW model ``` class NBOW(nn.Module): def __init__(self, input_dim: int, emb_dim: int, output_dim: int, pad_idx: int): super().__init__() self.embedding = nn.Embedding(input_dim, emb_dim, padding_idx = pad_idx) self.fc = nn.Linear(emb_dim, output_dim) def forward(self, text: torch.LongTensor) -> torch.FloatTensor: # text = [seq len, batch size] embedded = self.embedding(text) # embedded = [seq len, batch size, emb dim] pooled = embedded.mean(0) # pooled = [batch size, emb dim] prediction = self.fc(pooled) # prediction = [batch size, output dim] return prediction input_dim = len(vocab) emb_dim = 100 output_dim = 2 model = NBOW(input_dim, emb_dim, output_dim, pad_idx) print(f'The model has {mininlp.utils.count_parameters(model):,} trainable parameters') optimizer = optim.Adam(model.parameters()) criterion = nn.CrossEntropyLoss() device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(f'Using: {device}') model = model.to(device) criterion = criterion.to(device) def train(model, data_loader, optimizer, criterion, device): epoch_loss = 0 epoch_acc = 0 model.train() for text, labels in data_loader: text = text.to(device) labels = labels.to(device) optimizer.zero_grad() predictions = model(text) loss = criterion(predictions, labels) acc = mininlp.utils.calculate_accuracy(predictions, labels) loss.backward() optimizer.step() epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / len(data_loader), epoch_acc / len(data_loader) def evaluate(model, data_loader, criterion, device): epoch_loss = 0 epoch_acc = 0 model.eval() with torch.no_grad(): for text, labels in data_loader: text = text.to(device) labels = labels.to(device) predictions = model(text) loss = criterion(predictions, labels) acc = mininlp.utils.calculate_accuracy(predictions, labels) epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / len(data_loader), epoch_acc / len(data_loader) n_epochs = 10 best_valid_loss = float('inf') for epoch in range(n_epochs): train_loss, train_acc = train(model, train_loader, optimizer, criterion, device) valid_loss, valid_acc = evaluate(model, valid_loader, criterion, device) if valid_loss < best_valid_loss: best_valid_loss = valid_loss torch.save(model.state_dict(), 'nbow-model.pt') print(f'Epoch: {epoch:2}') print(f' Train Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%') print(f' Valid Loss: {valid_loss:.3f} | Valid Acc: {valid_acc*100:.2f}%') model.load_state_dict(torch.load('nbow-model.pt')) test_loss, test_acc = evaluate(model, test_loader, criterion, device) print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%') def predict(sentence, text_transforms, model, device): model.eval() tensor = text_transforms(sentence).unsqueeze(-1).to(device) prediction = model(tensor) probabilities = nn.functional.softmax(prediction, dim=-1) pos_probability = probabilities.squeeze(0)[-1].item() return pos_probability sentence = 'the absolute worst movie of all time.' predict(sentence, text_transforms, model, device) sentence = 'one of the greatest films i have ever seen in my life.' predict(sentence, text_transforms, model, device) sentence = "i thought it was going to be one of the greatest films i have ever seen in my life, \ but it was actually the absolute worst movie of all time." predict(sentence, text_transforms, model, device) sentence = "i thought it was going to be the absolute worst movie of all time, \ but it was actually one of the greatest films i have ever seen in my life." predict(sentence, text_transforms, model, device) ```
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl import matplotlib.font_manager as font_manager plt.style.use('seaborn-darkgrid') %matplotlib inline prop = font_manager.FontProperties('Segoe UI') ##### define parameter arrays ###### #capture rate (CR), #efficiency (eta), #ratio of power plant to CCS construction energy (r_PP_CCS), and #ratio of CCS construction to operation energy (r) as arrays CR_array=list(np.arange(0.0,1.05,0.05)) eta_array=np.arange(0.25,0.7,0.05) r_PP_CCS_array=[0.15]#np.arange(0.05,2.05,0.05) r_array=[0.15]#np.arange(0.0,10.1,0.1) # b is fraction of electricity used for CCS [kWh/kWh = dmnl] # EROI_1 is EROI where CCS construction energy is a ratio of power plant construction energy [MJ_e/MW/(MJ_p/MW) = dmnl] # EROI_1_adj makes adjustment for electrical output [MJ_p/MJ_p = dmnl] # EROI_2 is EROI where CCS cons. energy is a ratio of CCS operation energy [MJ_e/MJ_p = dmnl] # EROI_2_adj makes adjustment for electrical output [MJ_p/MJ_p = dmnl] # E is emissions [kg CO2/kWh] b = np.zeros((len(CR_array), len(eta_array))) CCS_op = np.zeros((len(CR_array))) Elec_CO2 = np.zeros((len(eta_array))) EROI_1 =np.zeros((len(CR_array), len(eta_array), len(r_PP_CCS_array))) #EROI using r_PP_CCS, units [dmnl] EROI_1_adj =np.zeros((len(CR_array), len(eta_array), len(r_PP_CCS_array))) EROI_2 =np.zeros((len(CR_array), len(eta_array), len(r_array))) #EROI using r, units [dmnl] EROI_2_adj =np.zeros((len(CR_array), len(eta_array), len(r_array))) EROI_2x =np.zeros((len(CR_array_x), len(eta_array), len(r_array))) #EROI using r, units [dmnl] EROI_2x_adj =np.zeros((len(CR_array_x), len(eta_array), len(r_array))) E =np.zeros((len(CR_array), len(eta_array))) #Emissions, units [kg CO2/kWh] ##### Define parameters ##### PP_L = 30 #power plant lifetime [yrs] PP_CF = 0.5 #power plant capacity factor [dmnl] CCS_L = 30 #CCS plant lifetime [yrs] C_CO2 = 0.088 #carbon dioxide content of coal [kg/MJ] PP_cons_energy = 40000 #energy cost of power plant construction [MJ/MW], does not include energy embodied in materials ##### loop through calculations ##### for i in range(len(CR_array)): #Define electricity penalty (b) curve from Sanpasternich (2009) Figure 8, units [kWh/kg CO2] CCS_op[i] = 25.957*CR_array[i]**6 - 85.031*CR_array[i]**5 + \ 114.5*CR_array[i]**4 - 80.385*CR_array[i]**3 + \ 31.47*CR_array[i]**2 - 6.7725*CR_array[i] + 1.1137 for j in range(len(eta_array)): PP_op = 0.01 #operational cost of coal fuel cycle [MJ/MJ coal = dmnl] PP_op_L = 1*8760*PP_L*PP_CF*PP_op/eta_array[j] #operational cost of coal fuel cycle over plant lifetime output [kWh] #define carbon intensity of electricity, units [kg CO2/kWh] Elec_CO2[j] = C_CO2*3.6/eta_array[j] #energy penalty of CCS, units [dmnl] b[i,j] = CCS_op[i]*Elec_CO2[j]*CR_array[i] #emissions E[i,j] = Elec_CO2[j]*(1-CR_array[i]) for k in range(len(r_PP_CCS_array)): #energy cost of constructing CCS plant CCS_cons_energy = r_PP_CCS_array[k]*PP_cons_energy EROI_1[i,j,k] = 1*8760*PP_L*PP_CF*(1-b[i,j])*3.6/\ (PP_cons_energy + PP_op_L + CCS_cons_energy*PP_L/CCS_L) EROI_1_adj[i,j,k] = EROI_1[i,j,k]/0.3 for l in range(len(r_array)): #energy cost of operating CCS plant EROI_2[i,j,l] = 1*8760*PP_L*PP_CF*(1-b[i,j])/\ (PP_cons_energy + PP_op_L + r_array[l]*CCS_op[i]) EROI_2_adj[i,j,l] = EROI_2[i,j,l]/0.3 plt.pcolor(b,cmap='viridis',vmax=0.8) plt.colorbar() xk=1 x=eta_array yk=2 y=CR_array ax=plt.gca() ax.set_xticks(range(len(x)+1)[::xk]) ax.set_xticklabels(x[::xk]) ax.set_yticks(range(len(y)+1)[::yk]) ax.set_yticklabels(y[::yk]) plt.xlim(0,len(x)-1) plt.ylim(1,len(y)-1) plt.xlabel('Powerplant efficiency $\eta$') plt.ylabel('Capture ratio $f_{cap}$') plt.title('Fraction of electricity used for CCS') plt.show() fig=plt.figure(figsize=(5,4)) ax=fig.gca() names = ['CR', 'eta', 'r'] A=EROI_1_adj r=0 print(r_PP_CCS_array[r]) index = pd.MultiIndex.from_product([range(s)for s in A.shape], names=names) df = pd.DataFrame({'A': A.flatten()}, index=index)['A'] df = df.unstack(level='CR').swaplevel().sort_index() df.loc[r].plot(ax=ax,color='grey',legend=None) plt.ylim(0,30) xk=4 x=eta_array ax.set_xticks(range(len(x)+1)[::xk]) ax.set_xticklabels(x[::xk]) plt.xlim(1,len(x)-2) plt.xlabel('Powerplant efficiency $\eta$') plt.ylabel('EROEI$_{CCS}$') plt.show() fig=plt.figure(figsize=(5,4)) ax=fig.gca() names = ['CR', 'eta', 'r'] A=EROI_2_adj r=0 print(r_array[r]) index = pd.MultiIndex.from_product([range(s)for s in A.shape], names=names) df = pd.DataFrame({'A': A.flatten()}, index=index)['A'] df = df.unstack(level='CR').swaplevel().sort_index() df.loc[r].plot(ax=ax,color='grey',legend=None) plt.ylim(0,30) xk=4 x=eta_array ax.set_xticks(range(len(x)+1)[::xk]) ax.set_xticklabels(x[::xk]) plt.xlim(1,len(x)-2) plt.xlabel('Powerplant efficiency $\eta$') plt.ylabel('EROEI$_{CCS}$') plt.show() ```
github_jupyter
``` from __future__ import print_function import sys sys.path.append('../build/') %pylab inline np.set_printoptions(precision=4, suppress=True) import versor as vsr import pandas as pd import seaborn as sns sns.set_style('whitegrid') sns.set_context('paper') from versor.drawing import * from motor_estimation import MotorEstimationSolver from game import VDMotorEstimationSolver from icp import best_fit_transform import pickle ``` # Generate motors ``` def create_motors(n_motors=10, d_lims=(0,10), th_lims=(-pi,pi)): motors = [((vsr.Vec(*np.random.random(3)).unit() * np.random.uniform(*d_lims)).trs() * vsr.Rot(vsr.Biv(*np.random.uniform(-1,1,3)).unit() * np.random.uniform(*th_lims) * -0.5)) for i in range(n_motors)] return motors ``` # Planes ``` def rms_motor(results): rms_rot = [] rms_mot = [] rms_trs = [] for result in results: (_, m, em, _) = result # rot_error = np.min([np.linalg.norm(np.array(m.rot()) - np.array(em.rot())), # np.linalg.norm(np.array(m.rot()) + np.array(em.rot()))]) # rot = np.inner(rot_error, rot_error) emm = em.rev() * m angle_error = 2 * np.arccos(np.abs(emm[0])) rot = angle_error**2 # angle_error = 2 * np.arccos(np.abs(m[0])) - 2 * np.arccos(np.abs(em[0])) angle_error2 = 2 * np.arctan2(emm.rot().biv().norm(), emm[0]) rot2 = angle_error2**2 d1 = np.array(m.trs() - em.trs()) trs2 = np.inner(d1,d1) d = emm.trs() trs = np.inner(d,d) rms_mot.append(trs2) rms_rot.append(rot) rms_trs.append(trs) # print('m_during: ', m) # print() motmean = np.sqrt(np.sum(rms_mot) / len(rms_mot)) # rotmean = np.mean(rms_rot) rotmean = np.sqrt(np.sum(rms_rot) / len(rms_rot)) trsmean = np.sqrt(np.sum(rms_trs) / len(rms_trs)) return (motmean, rotmean, trsmean) def compare_rms_motor(results, results_vd, results_tin): RMS = {'Ti': {}, "VD": {}, 'Ti2': {}} our_mot, our_rot, our_trs = rms_motor(results) Ti = RMS['Ti'] Ti['mot'] = our_mot Ti['rot'] = our_rot Ti['trs'] = our_trs vd_mot, vd_rot, _ = rms_motor(results_vd) VD = RMS['VD'] VD['rot'] = vd_rot VD['mot'] = vd_mot our_mot2, our_rot2, our_trs2 = rms_motor(results_tin) Ti2 = RMS['Ti2'] Ti2['mot'] = our_mot2 Ti2['rot'] = our_rot2 Ti2['trs'] = our_trs2 return pd.DataFrame(RMS) def run_planes(n_planes=10, n_sets=10, n_motors=10, trs_noise_std=0.0, rot_noise_std=0.0, noisy=False): motors = create_motors(n_motors, d_lims=(0, 1), th_lims=(0,pi/2)) results = [] results_vd = [] results_tin = [] for n_set in range(n_sets): datasets = [create_random_planes(motor, n_planes, rot_noise_std=rot_noise_std, trs_noise_std=trs_noise_std) for motor in motors] for dataset in datasets: results.append(estimate_motors_planes(dataset, noisy=noisy)) results_vd.append(estimate_motors_planes_vd(dataset, noisy=noisy)) results_tin.append(estimate_motors_planes(dataset, noisy=noisy, commutator=True)) return (results, results_vd, results_tin) def create_random_planes(motor, n_planes=10, trs_noise_std=0.09, rot_noise_std=0.09): planes = [] for i in range(n_planes): dir_vec = vsr.Vec(*np.random.uniform(-1,1,3)).unit() distance = np.random.uniform(0,1) a = vsr.Dlp(dir_vec,distance) b = a.spin(motor) if trs_noise_std > 0.0: t = vsr.Vec(*np.random.random(3)).unit() * np.random.normal(0.0,trs_noise_std,1) else: t = vsr.Vec(0,0,0) if rot_noise_std > 0.0: R = vsr.Rot(vsr.Biv(*np.random.uniform(-1,1,3)).unit() * np.random.normal(0, rot_noise_std) * -0.5) else: R = vsr.Rot(1,0,0,0) noise_motor = t.trs() * R bn = a.spin(noise_motor).spin(motor) planes.append((a,b,bn)) return (planes, motor) def estimate_motors_planes(dataset, initial_motor = vsr.Mot(1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), noisy=False, commutator=False): planes, motor = dataset mes = MotorEstimationSolver(initial_motor) if noisy: for (a, _, bn) in planes: if commutator: mes.add_dual_plane_commutator_residual_block(a,bn) else: mes.add_dual_plane_difference_residual_block(a,bn) else: for (a, b, _) in planes: if commutator: mes.add_dual_plane_commutator_residual_block(a,b) else: mes.add_dual_plane_difference_residual_block(a,b) mes.set_parameterization('BIVECTOR_GENERATOR') mes.linear_solver_type = 'DENSE_QR' (estimated_motor, summary, _) = mes.solve() return (planes, motor, estimated_motor, summary) def estimate_motors_planes_lars(dataset, noisy=False): lines, motor = dataset def solve(L): Lrr = L[:4, :4] Lrq = L[:4, 4:] Lqr = L[4:, :4] Lqq = L[4:, 4:] Lp = Lrr - np.dot(Lrq, np.dot(np.linalg.pinv(Lqq), Lqr)) w, v = np.linalg.eig(Lp) r = v[:, np.argmin(w)] q = np.dot(-np.dot(np.linalg.pinv(Lqq), Lqr), r) return vsr.Mot(*np.array([r, q]).ravel()) Q = np.zeros((8,8)) lines_a = [] lines_b = [] lines_bn = [] for (a,b,bn) in lines: lines_a.append(vsr.CGA(a)) lines_b.append(vsr.CGA(b)) lines_bn.append(vsr.CGA(bn)) if noisy: for b,a in zip(lines_a, lines_bn): D = np.zeros((32,8)) for i in range(8): ei = vsr.Mot(0,0,0,0,0,0,0,0) ei[i] = 1.0 ei = vsr.CGA(ei) D[:,i] = np.array(a * ei - ei * b) Q += np.dot(D.T,D) else: for b,a in zip(lines_a, lines_b): D = np.zeros((32,8)) for i in range(8): ei = vsr.Mot(0,0,0,0,0,0,0,0) ei[i] = 1.0 ei = vsr.CGA(ei) D[:,i] = np.array(a * ei - ei * b) Q += np.dot(D.T,D) estimated_motor = solve(Q) if np.abs((estimated_motor * estimated_motor.rev())[7]) > 0.0000000001: print(wrong) if np.sign(estimated_motor[0]) != np.sign(motor[0]): estimated_motor = estimated_motor * -1.0 return (lines, motor, estimated_motor, _) def estimate_motors_planes_vd(dataset, initial_motor = vsr.Mot(1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), noisy=False): vd = VDMotorEstimationSolver() planes, motor = dataset planes_a = [] planes_b = [] planes_bn = [] for (a,b,bn) in planes: planes_a.append(a) planes_b.append(b) planes_bn.append(bn) if noisy: vd.add_dual_plane_observations(planes_a, planes_bn) else: vd.add_dual_plane_observations(planes_a, planes_b) estimated_motor = vd.solve() if np.sign(estimated_motor[0]) != np.sign(motor[0]): estimated_motor = estimated_motor * -1.0 return (planes, motor, estimated_motor, []) ``` # TRS and ROT SIM ``` n_noise_levels = 10 noise_levels_rot = np.linspace(0.0001, 0.1, n_noise_levels) noise_levels_trs = np.linspace(0.0001, 0.01, n_noise_levels) rot_trs_noise_results = [run_planes(32,10,10, trs_noise_std=noise_level_trs, rot_noise_std=noise_level_rot, noisy=True) for noise_level_rot, noise_level_trs in zip(noise_levels_rot, noise_levels_trs) ] rmss_rot_trs_noise = [compare_rms_motor(*result) for result in rot_trs_noise_results] d = pd.concat(rmss_rot_trs_noise) fig, ax = plt.subplots(2) fig.subplots_adjust(hspace=.33) ax0 = ax[0] # ax.plot(noise_levels_rot, d['Ti2']['rot'], label='Ti commutator') ax0.plot(noise_levels_rot, d['Ti']['rot'], marker='*', label='Ti chordal') ax0.plot(noise_levels_rot, d['VD']['rot'], label='VD') ax0.legend(frameon=False, loc=2) ax0.set_ylabel('Error in rotation [rad]') ax0.set_xlabel('Gaussian mean deviation of induced noise in rotation [rad]') # ax0.set_ylabel('Mean geometric error') # ax0.set_xlabel('Gaussian mean deviation of induced noise') ax1 = ax[1] ax1.plot(noise_levels_trs, d['Ti']['trs'], marker='*', label='Ti chordal') ax1.legend(frameon=False, loc=2) ax1.set_xlabel('Gaussian mean deviation of induced noise in translation [m]') ax1.set_ylabel('Error in translation [m]') # plt.savefig('/Users/lars/latex/latex-phd-thesis/Figures/planes-trs-rot-noise.pdf') ``` # ONLY ROT ``` n_noise_levels = 10 noise_levels_rot = np.linspace(0.0001, 0.1, n_noise_levels) noise_levels_trs = np.linspace(0.0001, 0.01, n_noise_levels) rot_noise_results = [run_planes(32,10,10, trs_noise_std=0.001, rot_noise_std=noise_level_rot, noisy=True) for noise_level_rot, noise_level_trs in zip(noise_levels_rot, noise_levels_trs) ] rmss_rot_noise = [compare_rms_motor(*result) for result in rot_noise_results] d = pd.concat(rmss_rot_noise) fig, ax = plt.subplots(2) fig.subplots_adjust(hspace=.33) ax0 = ax[0] # ax.plot(noise_levels_rot, d['Ti2']['rot'], label='Ti commutator') ax0.plot(noise_levels_rot, d['Ti']['rot'], marker='*', label='Ti chordal') ax0.plot(noise_levels_rot, d['VD']['rot'], label='VD') ax0.legend(frameon=False, loc=2) ax0.set_ylabel('Error in rotation [rad]') ax0.set_xlabel('Gaussian mean deviation of induced noise in rotation [rad]') # ax0.set_ylabel('Mean geometric error') # ax0.set_xlabel('Gaussian mean deviation of induced noise') ax1 = ax[1] ax1.plot(noise_levels_trs, d['Ti']['trs'], marker='*', label='Ti chordal') ax1.legend(frameon=False, loc=2) ax1.set_xlabel('Gaussian mean deviation of induced noise in translation [m]') ax1.set_ylabel('Error in translation [m]') # plt.savefig('/Users/lars/latex/latex-phd-thesis/Figures/planes-rot-noise.pdf') ``` # ONLY TRS ``` n_noise_levels = 10 noise_levels_rot = np.linspace(0.0001, 0.1, n_noise_levels) noise_levels_trs = np.linspace(0.0001, 0.01, n_noise_levels) trs_noise_results = [run_planes(32,10,10, trs_noise_std=noise_level_trs, rot_noise_std=0.001, noisy=True) for noise_level_rot, noise_level_trs in zip(noise_levels_rot, noise_levels_trs) ] rmss_trs_noise = [compare_rms_motor(*result) for result in trs_noise_results] d = pd.concat(rmss_trs_noise) fig, ax = plt.subplots(2) fig.subplots_adjust(hspace=.33) ax0 = ax[0] # ax.plot(noise_levels_rot, d['Ti2']['rot'], label='Ti commutator') ax0.plot(noise_levels_rot, d['Ti']['rot'], marker='*', label='Ti chordal') ax0.plot(noise_levels_rot, d['VD']['rot'], label='VD') ax0.legend(frameon=False, loc=2) ax0.set_ylabel('Error in rotation [rad]') ax0.set_xlabel('Gaussian mean deviation of induced noise in rotation [rad]') # ax0.set_ylabel('Mean geometric error') # ax0.set_xlabel('Gaussian mean deviation of induced noise') ax1 = ax[1] ax1.plot(noise_levels_trs, d['Ti']['trs'], marker='*', label='Ti chordal') ax1.legend(frameon=False, loc=2) ax1.set_xlabel('Gaussian mean deviation of induced noise in translation [m]') ax1.set_ylabel('Error in translation [m]') # plt.savefig('/Users/lars/latex/latex-phd-thesis/Figures/planes-trs-noise.pdf') def analyze_iterations(results): summaries = [result[3] for result in results] iteration_numbers = [] for summary in summaries: iteration_numbers.append(len(pd.DataFrame(summary['iterations'])['cost'])) # median = np.median(iteration_numbers) # argmedian = np.where([iteration_numbers == median])[1][0] # median_costs = pd.DataFrame(summaries[argmedian]['iterations'])['cost'] # median_gradients = pd.DataFrame(summaries[argmedian]['iterations'])['gradient_max_norm'] argmax = np.argmax(iteration_numbers) max_ = np.max(iteration_numbers) max_costs = pd.DataFrame(summaries[argmax]['iterations'])['cost'] max_gradients = pd.DataFrame(summaries[argmax]['iterations'])['gradient_max_norm'] argmin = np.argmin(iteration_numbers) min_ = np.min(iteration_numbers) min_costs = pd.DataFrame(summaries[argmin]['iterations'])['cost'] min_gradients = pd.DataFrame(summaries[argmin]['iterations'])['gradient_max_norm'] return ((max_, argmax, max_costs, max_gradients), (min_, argmin, min_costs, min_gradients)) fig, ax = plt.subplots(1) max_, min_ = analyze_iterations(rot_trs_noise_results[0][0]) ax0 = ax ax0.semilogy(max_[2], label='max') ax0.semilogy(min_[2], label='min') ax0.legend(frameon=False, loc=1) ax0.set_ylabel('Cost') ax0.set_xlabel('Iteration $k$') ax0.xaxis.set_major_locator(MaxNLocator(integer=True)) fig.subplots_adjust(hspace=0.5) plt.savefig('/Users/lars/latex/latex-phd-thesis/Figures/planes-iterations-rot-trs-noise.pdf') ``` # Translation invariance ``` def transform_dataset(dataset, motor): lines, original_motor = dataset transformed_lines = [(a.spin(motor), b.spin(motor), bn.spin(motor)) for a,b,bn in lines] transformed_motor = vsr.CGA(original_motor).spin(vsr.CGA(motor)).mot() return (transformed_lines, transformed_motor) n_noise_levels = 10 noise_levels_rot = np.linspace(1e-5, 0.1, n_noise_levels) # noise_levels_rot = np.zeros(n_noise_levels) noise_levels_trs = np.linspace(1e-6, 0.01, n_noise_levels) def run_trs_invariance(func): rotrmsem1t = [] trsrmsem1t = [] rotrmsem1 = [] trsrmsem1 = [] rotrmsem2 = [] trsrmsem2 = [] def comp_rms(angle_errors, distance_errors): rot = np.sqrt(np.sum(angle_errors) / len(angle_errors)) if rot is np.NaN: rot = 0.0 trs = np.sqrt(np.sum(distance_errors) / len(distance_errors)) return rot, trs def motor_error(mot1, mot2, angle_errors, distance_errors): M_err = mot1 * mot2.rev() angle = 2 * np.arccos(np.abs(M_err[0])) angle = angle**2 distance = M_err.trs() distance = np.inner(distance, distance) if distance < 1.0: angle_errors.append(angle) distance_errors.append(distance) for rot_noise_level, trs_noise_level in zip(noise_levels_rot, noise_levels_trs): angle_errors_em1t = [] distance_errors_em1t = [] angle_errors_em1 = [] distance_errors_em1 = [] angle_errors_em2 = [] distance_errors_em2 = [] for i in range(100): n_motors = 1 d_lims = (0,2) th_lims = (0,pi/2) motors = create_motors(n_motors, d_lims=d_lims, th_lims=th_lims) # motors2 = create_motors(1, d_lims=d_lims, th_lims=th_lims) motor2 = (vsr.Vec(*np.random.uniform(-1,1,3)).unit() * np.random.uniform(1,2)).trs() * vsr.Rot(1,0,0,0) dataset = create_random_planes(motors[0], 32, rot_noise_std=rot_noise_level, trs_noise_std=trs_noise_level, ) noisy=True _, m1, em1 , _ = func(dataset, noisy=noisy) _, m2, em2 , _ = func(transform_dataset(dataset, motor2), noisy=noisy) em1_t = vsr.CGA(em1).spin(vsr.CGA(motor2)).mot() motor_error(em1_t, em2, angle_errors_em1t, distance_errors_em1t) motor_error(em1, m1, angle_errors_em1, distance_errors_em1) motor_error(em2, m2, angle_errors_em2, distance_errors_em2) rot_em1t, trs_em1t = comp_rms(angle_errors_em1t, distance_errors_em1t) rot_em1, trs_em1 = comp_rms(angle_errors_em1, distance_errors_em1) rot_em2, trs_em2 = comp_rms(angle_errors_em2, distance_errors_em2) rotrmsem1t.append(rot_em1t) trsrmsem1t.append(trs_em1t) rotrmsem1.append(rot_em1) trsrmsem1.append(trs_em1) rotrmsem2.append(rot_em2) trsrmsem2.append(trs_em2) return rotrmsem1t, trsrmsem1t, rotrmsem1, trsrmsem1, rotrmsem2, trsrmsem2 planes_rms = run_trs_invariance(estimate_motors_planes) np.save('planes_rot_rms_em1t', planes_rms[0]) np.save('planes_trs_rms_em1t', planes_rms[1]) np.save('planes_rot_rms_em1', planes_rms[2]) np.save('planes_trs_rms_em1', planes_rms[3]) np.save('planes_rot_rms_em2', planes_rms[4]) np.save('planes_trs_rms_em2', planes_rms[5]) ```
github_jupyter
# Visual and Data Analysis - FIFA 19 Players - **Created by Andrés Segura Tinoco** - **Created on May 1, 2019** # 4. Clustering Data ## Loading main libraries and data ``` # Load the Pandas libraries import pandas as pd from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler import numpy as np # Load scikit-learn library for K-Means from sklearn.cluster import KMeans # Load visualization libraries import matplotlib.pyplot as plt import seaborn as sns # Load Interact libraries from __future__ import print_function from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets # Read FIFA 19 players data only using the current skill values dataURL = "../data/fifa19_overall_data.csv" rawdata = pd.read_csv(dataURL) # Loading FIFA 19 positions dataset dataURL = "../data/fifa19_positions.csv" positions = pd.read_csv(dataURL) # Function that obtains the numerical data from the data frame def getNumericalData(data, quality): numData = pd.DataFrame() # Create DataFrame for col in data.columns: if str(data[col].dtype) != "object": numData[col] = data[col] # Remove columns that are not relevant for the analysis nonColumns = ["Order", "ID", "Overall", "Potential", "Value €", "Wage €", "Release Clause €"] numData = numData.drop(nonColumns, axis=1) # Data Quality process if quality and len(numData.columns) > 0: numData = numData.fillna(numData.mean()) print(numData.shape) return numData; # Set a Overall threshold var_filter = "Overall" threshold = rawdata[var_filter].median() threshold # Filter/Delete player with an overall below the threshold rawdata = rawdata.loc[rawdata[var_filter] >= threshold] len(rawdata) # Get only numeric columns/variables numData = getNumericalData(rawdata, True) numData.head() ``` ## Apply PCA with Standardization ``` # Function that apply Principal Component Analysis def applyPCA(data, std): # Standardize the Data if std == True: x = StandardScaler().fit_transform(data.values) else: x = data.values # Create a DataFrame from PCA pca = PCA(n_components = 2) pcaData = pca.fit_transform(x) pcaDF = pd.DataFrame(data = pcaData, columns = ["PC1", "PC2"]) # Show the total explained variance ratio of model print('Explained Variance Ratio:', sum(pca.explained_variance_ratio_) * 100) return pcaDF; # Function that replace the player position by the zone def replacePositionByZone(data): data["Zone"] = data["Position"] for ix in range(len(positions)): data["Zone"] = data["Zone"].replace(positions.Position[ix], positions.Zone[ix]) return data; # Apply the PCA algorithm pcaDF = applyPCA(numData, True) # Create the PCA data pcaDF = pd.concat([pcaDF, rawdata[["Position"]]], axis = 1) pcaDF = pcaDF[pcaDF["PC1"].notnull()] pcaDF = replacePositionByZone(pcaDF) pcaDF.head() ``` ## Clustering: K-Means ``` # Getting the values and plotting it x = pcaDF['PC1'].values y = pcaDF['PC2'].values train = np.array(list(zip(x, y))) # Calculating the Jambu Elbow Nc = range(1, 20) kmeans = [KMeans(n_clusters = i) for i in Nc] score = [kmeans[i].fit(train).score(train) for i in range(len(kmeans))] # Plot the results fig, ax0 = plt.subplots(figsize = (14, 6)) plt.plot(Nc, score, marker='o') plt.axvline(x = 4, color = "#8b0000", linestyle = "--") plt.xticks(np.arange(1, 20, 1)) plt.xlabel("Number of Clusters", fontsize = 12) plt.ylabel("Score", fontsize = 12) plt.title("Jambu Elbow Curve", fontsize = 20) plt.show() ``` ### Compare results with K-means grouping ``` # Calculates the K-Means for (x, y) dataset def runKMeans(k_clusters): kmeans = KMeans(n_clusters = k_clusters, algorithm = "elkan", random_state = 0) kmeans = kmeans.fit(train) # Getting the cluster labels clusters = kmeans.predict(train) # Centroid values centroids = kmeans.cluster_centers_ # Plotting K-Means result plotKMeansData(train, k_clusters, centroids, clusters) # Create scatter plot with K-Means data def plotKMeansData(data, k_clusters, centroids, clusters): fig, ax1 = plt.subplots(figsize = (14, 14)) # Plotting vars colors = ["#1f77b4", "#2ca02c", "#d62728", "#ff7f0e", "#9467bd", "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf"] nData = len(data) # Create scatter plot for i in range(k_clusters): points = np.array([data[j] for j in range(nData) if clusters[j] == i]) sns.scatterplot(ax = ax1, x = points[:, 0], y = points[:, 1], size = 5, color = colors[i]) plt.scatter(centroids[:, 0], centroids[:, 1], s = 20, color = "black", marker = "D") # Plot setup ax1.set_xlabel("PC 1", fontsize = 12) ax1.set_ylabel("PC 2", fontsize = 12) ax1.set_title("Players by Zones", fontsize = 20) ax1.legend(["K-Means: Players with overall >= " + str(threshold)]) ax1.grid() # Create interactive control to control k value # interactive(runKMeans, k_clusters = widgets.IntSlider(min = 1, max = 7, step = 1, value = 4)) runKMeans(k_clusters = 4) ``` ### Compare results with natural grouping ``` # Palette by positions dictionary posPalette = dict() posPalette["GoalKeper"] = "#dc3912" posPalette["Defense"] = "#3366cc" posPalette["Midfield"] = "#ff9900" posPalette["Attack"] = "#109618" # Create scatter plot with players label fig, ax2 = plt.subplots(figsize = (14, 14)) # Create 2D scatter plot plot = sns.scatterplot(ax = ax2, data = pcaDF, x = "PC1", y = "PC2", hue = "Zone", palette = posPalette) # Plot setup ax2.set_xlabel("PC 1", fontsize = 12) ax2.set_ylabel("PC 2", fontsize = 12) ax2.set_title("Players by Zones", fontsize = 20) ax2.legend(["K-Means: Players with overall >= " + str(threshold)]) ax2.grid() ``` ## Insights - Players can be grouped into 4 groups. These groups are highly related to the zones in which players play on the field (goalkeepers, defenders, midfielders and strikers). - Using the Jambu Elbow technique, you can visually select a k = 4 (number of clusters between which the data will be grouped). --- <a href="https://ansegura7.github.io/DataScience_FIFA19Data/">&laquo; Home</a>
github_jupyter
``` # Copyright 2021 Google LLC # Use of this source code is governed by an MIT-style # license that can be found in the LICENSE file or at # https://opensource.org/licenses/MIT. # Author(s): Kevin P. Murphy (murphyk@gmail.com) and Mahmoud Soliman (mjs@aucegypt.edu) ``` <a href="https://opensource.org/licenses/MIT" target="_parent"><img src="https://img.shields.io/github/license/probml/pyprobml"/></a> <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/book1/figures/chapter9_figures.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ## Figure 9.1:<a name='9.1'></a> <a name='gda2d'></a> (a) Some 2d data from 3 different classes. (b) Fitting 2d Gaussians to each class. Figure(s) generated by [discrim_analysis_dboundaries_plot2.py](https://github.com/probml/pyprobml/blob/master/scripts/discrim_analysis_dboundaries_plot2.py) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_and_run("/pyprobml/scripts/discrim_analysis_dboundaries_plot2.py") ``` ## Figure 9.2:<a name='9.2'></a> <a name='LDA'></a> Gaussian discriminant analysis fit to data in \cref fig:gda2d . (a) Unconstrained covariances induce quadratic decision boundaries. (b) Tied covariances induce linear decision boundaries. Figure(s) generated by [discrim_analysis_dboundaries_plot2.py](https://github.com/probml/pyprobml/blob/master/scripts/discrim_analysis_dboundaries_plot2.py) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_and_run("/pyprobml/scripts/discrim_analysis_dboundaries_plot2.py") ``` ## Figure 9.3:<a name='9.3'></a> <a name='ldaGeom'></a> Geometry of LDA in the 2 class case where $\boldsymbol \Sigma _1=\boldsymbol \Sigma _2=\mathbf I $. ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/ldaGeom.png") ``` ## Figure 9.4:<a name='9.4'></a> <a name='fisher'></a> Example of Fisher's linear discriminant applied to data in 2d drawn from two classes. Dashed green line = first principal basis vector. Dotted red line = Fisher's linear discriminant vector. Solid black line joins the class-conditional means. Figure(s) generated by [fisherLDAdemo.m](https://github.com/probml/pmtk3/blob/master/demos/fisherLDAdemo.m) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/fisherLDAdemoData.png") ``` ## Figure 9.5:<a name='9.5'></a> <a name='fisherProj'></a> Example of Fisher's linear discriminant. (a) Projection of points onto Fisher's vector in \cref fig:fisher shows good class separation. (b) Projection of points onto PCA vector in \cref fig:fisher shows poor class separation. Figure(s) generated by [fisherLDAdemo.m](https://github.com/probml/pmtk3/blob/master/demos/fisherLDAdemo.m) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/fisherLDAdemoProjFisher.png") pmlt.show_image("/pyprobml/book1/figures/images/fisherLDAdemoProjPCA.png") ``` ## Figure 9.6:<a name='9.6'></a> <a name='fisherVowel'></a> (a) PCA projection of vowel data to 2d. (b) FLDA projection of vowel data to 2d. We see there is better class separation in the FLDA case. Adapted from Figure 4.11 of <a href='#HastieBook'>[HTF09]</a> . Figure(s) generated by [fisherDiscrimVowelDemo.m](https://github.com/probml/pmtk3/blob/master/demos/fisherDiscrimVowelDemo.m) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/fisherDiscrimVowelPCA.png") pmlt.show_image("/pyprobml/book1/figures/images/fisherDiscrimVowelLDA.png") ``` ## Figure 9.7:<a name='9.7'></a> <a name='NBclassCond'></a> Class conditional densities $p(x_d=1|y=c)$ for two classes, corresponding to ``X windows'' and ``MS windows'', derived from a bag-of-words representation of some email documents, using a vocabulary of 600 words. The big spike at index 107 corresponds to the word ``subject'', which occurs in both classes with probability 1. Figure(s) generated by [naiveBayesBowDemo.m](https://github.com/probml/pmtk3/blob/master/demos/naiveBayesBowDemo.m) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/naiveBayesBow1ClassCond.png") pmlt.show_image("/pyprobml/book1/figures/images/naiveBayesBow2ClassCond.png") ``` ## Figure 9.8:<a name='9.8'></a> <a name='genVsDiscrim'></a> The class-conditional densities $p(x|y=c)$ (left) may be more complex than the class posteriors $p(y=c|x)$ (right). Adapted from Figure 1.27 of <a href='#BishopBook'>[Bis06]</a> . Figure(s) generated by [generativeVsDiscrim.m](https://github.com/probml/pmtk3/blob/master/demos/generativeVsDiscrim.m) ``` #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /pyprobml/scripts pmlt.show_image("/pyprobml/book1/figures/images/genVsDiscrimClassCond.png") pmlt.show_image("/pyprobml/book1/figures/images/genVsDiscrimPost.png") ``` ## References: <a name='BishopBook'>[Bis06]</a> C. Bishop "Pattern recognition and machine learning". (2006). <a name='HastieBook'>[HTF09]</a> T. Hastie, R. Tibshirani and J. Friedman. "The Elements of Statistical Learning". (2009).
github_jupyter
# Differentiable Procedural Generation The idea behind differentiable procgen is to solve for procgen parameters rather than hand tune them. Many popular roguelikes have hand-tuned procgen parameters. It is not clear how they were derived, what effect changing them has, or how they interact with each other. Here are some examples. ## Angband https://github.com/angband/angband/blob/master/lib/gamedata/dungeon_profile.txt ``` params:11:50:200:2 tunnel:10:30:15:25:50 streamer:5:2:3:90:2:40 cutoff:100 ``` ## Crawl https://github.com/crawl/crawl/blob/master/crawl-ref/source/dgn-layouts.cc ```c // Make some rooms: int doorlevel = random2(11); int roomsize = 4 + random2(5); roomsize += random2(6); // force a sequence point between random calls ``` ## NetHack https://github.com/NetHack/NetHack/blob/NetHack-3.6.2/src/mklev.c ```c if (!rn2(50)) ``` ## Pixel Dungeon For example Pixel Dungeon's level generator https://github.com/watabou/pixel-dungeon/blob/master/src/com/watabou/pixeldungeon/levels/RegularLevel.java ```java int nConnected = (int)(rooms.size() * Random.Float( 0.5f, 0.7f )); ``` # Differentiable Programming Differentiable programing is the idea that programs can be differentiated. Usually this means automatic differentiation. Given a function `f` which returns a scalar value, return a new function `grad_f` which returns the slope of `f`. ```python def tanh(x): # Define a function y = np.exp(-2.0 * x) return (1.0 - y) / (1.0 + y) grad_tanh = grad(tanh) ``` # How Does This Apply To Procgen? Given the gradient of a function, we can use gradient descent to find arguments for the function which minimize the output. Consider generation as a function of parameters, and random input. The parameters will be optimized, which the random_input will serve as a source of randomness for the generator. Think of the parameters as the collection of magic numbers demonstrated in the roguelikes above, and random input as all the calls to `rand()`. Instead of calling `rand()` inside the generator, we'll call it outside and the pass random input in. ```python def gen(parameters, random_input) # Use parameters and random_input to make dungeon return dungeon ``` Also consider a loss function as a function of parameters, random input, and a target value where the target value is a scalar. ```python def loss(parameters, random_input, target): dungeon = gen(parameters, random_input) # score the dungeon and compare to target score = ??? return (score - target) ** 2 ``` If the loss function is differentiable (implying that the gen function is also differentiable) then the gradient of the score with respect to parameters can be calculated. Using gradient descent, the parameters can be adjusted to minimize loss. After optimum parameters are found, the generator function can be called with new random inputs to produce fresh output. `gen(optimized_parameters, np.random.rand(...))` # Tips on writing generators * Use 2d convolutions for map generators * Use a fixed kernel and find the optimum bias value or * Consider the kernel as a set of parameters and find the optimum kernel values * Use multiple 'passes' or loops to condition the output * Don't be afraid of having hundreds or more parameters. Gradient descent will probably still work. # Tips on writing loss functions * Start simple with one goal * Add multiple goals by using addition `+` * Train a small neural net to score generator output. Example: deriving a differentiable `num_connected_components` function is difficult, but training a small neural network to estimate the number of connected components is easier and differentiable. * You might be tempeted to add magic numbers to you loss function. That's ok. You're entering the world of reparameterization. Instead of tweaking with weird magic numbers in the generator, you've shifted the problem around to have interpretable numbers in the loss function. * Experiment with batch size, number of epochs, learning rate, starting parameters. Now you're entering hyperparameter land. # Dependencies `jax` and `jaxlib` for differentiable programming support `matplotlib` and `seaborn` for plotting graphs `tensorflow` for connected_components `joblib` for creating training data faster ``` !true || pip install --upgrade jax jaxlib matplotlib seaborn tensorflow joblib import sys sys.version import os from joblib import Parallel, delayed import multiprocessing import time import itertools import numpy as onp import matplotlib.pyplot as plt import jax import jax.numpy as np from jax import grad, jit, vmap, random from jax.scipy.special import logsumexp, expit from jax.experimental import optimizers import jax.experimental.stax as stax import jax.random as random import tensorflow as tf from tensorflow.contrib.image import connected_components num_cores = multiprocessing.cpu_count() ``` # Cave Generation Let's start with a cave generator. The most popular way of generating caves is by thinking of them like cellular automata. The process consists of starting with random noise and then repeating these steps. A rule of thumb is to use five repetitions. 1. For each cell in the map, consider a 3x3 neighborhood. 2. Count each wall as a 1 and each open space as a 0. 3. Add up of the wall. 4. A cell remains a wall if 4 of its neighbors are walls, or becomes a wall if 5 or more of its neighbors are walls. 5. Otherwise, the cell is marked empty. Jax's `conv` function with a one's kernel $\begin{pmatrix} 1 & 1 & 1 \\ 1 & 1 & 1 \\ 1 & 1 & 1 \end{pmatrix}$ is a convenient way to calculate neighborhood sums. ``` cols = 21 rows = 21 WALL = 1. SPACE = 0. key = random.PRNGKey(1) # kernel that counts number of adjacent 1's kernel = jax.numpy.ones((1, 1, 3, 3)) print(kernel.shape) def print_map(a, threshold=0.5): print(np.where(a[0, 0] > threshold , ord(' '), ord('#')).astype(np.uint8)._value.view(f'S{a.shape[2]}')) def caves_discrete(p): a = np.where(random.uniform(key, shape=(rows, cols)) < p, SPACE, WALL)[None, None, :, :] for i in range(4): conv = jax.lax.conv(a, kernel, (1, 1), "SAME") a = np.where(np.logical_or((conv < 5.0), (a + conv < 4.0)), SPACE, WALL) return a print_map(caves_discrete(0.50)) ``` Let's create a continuous version of this generator - one that can be differentiated. First, let's define the sigmoid function. The sigmoid function will map numbers from $[-\infty, +\infty]$ to $[0, 1]$. This will give the generator the liberty to use the full number line but at the end we can still fit the output between 0 and 1. ``` def sigmoid(x): return 1. / (1. + np.exp(-x)) plt.plot(range(-10, 10), [sigmoid(x) for x in range(-10, 10)]) ``` The continuous version of the caves generator uses the same kernel summing method, but has a free parameter: `threshold`. At each step we'll re-center the neighborhood sum and map the results into the range $[0, 1]$ by using the sigmoid function. We'll find the best `threshold` in just a bit. The loss fuction for the caves generator runs the generator and finds the mean value of the output. Remember that the output is not just 0 or 1, but numbers in the range $[0, 1]$ so a mean of 0.5 would mean that on average every cell is half filled in. ``` def caves_continuous(params, x): # x (n, 1, w, h) threshold = params[0] k = int((kernel.shape[2]-2)/2) a = jax.lax.lax.pad(x, WALL, [(0, 0, 0), (0, 0, 0), (k, k, 0), (k, k, 0)]) for i in range(4): conv = jax.lax.conv_general_dilated(a, kernel, (1, 1), "SAME", dimension_numbers=("NCWH", 'OIWH', "NCWH")) # a is the sigmoid(sum of neighbors - threshold) a = sigmoid(conv - threshold) return a def caves_loss_1(params, batch): x, y = batch out = caves_continuous(params, x) scores = np.mean(out, axis=(1, 2, 3)) return np.mean((scores - y) ** 2.) ``` Let's use the caves loss function and see how the loss changes as the `threshold` parameter changes. We'll also differentiate the loss function so that we can also plot the slope. ``` x = random.uniform(key, shape=(rows, cols))[None, None, :, :] plt.plot(np.linspace(-10, 10, 100), [(caves_loss_1([p],(x, np.array([0.5])))) for p in np.linspace(-10, 10, 100)]) g = jit(grad(caves_loss_1)) plt.plot(np.linspace(-10, 10, 100), [g([p], (x, np.array([0.5]))) for p in np.linspace(-10, 10, 100)]) locs, labels = plt.xticks() print(locs) plt.xticks(np.linspace(-10, 10, 11)) plt.legend(['Loss w.r.t. threshold', 'Derivative of loss w.r.t. threshold']) ``` It looks like the lowest loss is just above 4. Let's try it out and view the outputs as a spot check. ``` print_map(caves_continuous(np.array([4.3]), random.uniform(key, shape=(rows, cols))[None, None, :, :])) plt.imshow(caves_continuous([4.3], random.uniform(key, shape=(rows, cols))[None, None, :, :])[0,0,:,:]) plt.colorbar() ``` The spot check looks great. Let's move on to using jax.optimize to narrow in on the `threshold` which results in the lowest loss. ``` def cave_opt(): init_rng = random.PRNGKey(0) step_size = 0.1 momentum_mass = 0.9 num_epochs = 32 batch_size = 32 train_X = np.squeeze(np.array([onp.random.rand(1, 1, rows, cols) for _ in range(num_epochs * batch_size)]), 1) train_y = np.array([.45 for _ in range(train_X.shape[0])]) num_train = train_X.shape[0] num_complete_batches, leftover = divmod(train_X.shape[0], batch_size) num_batches = num_complete_batches + bool(leftover) print("Input shape:", train_X.shape) print("Output shape", train_y.shape) print("Num Epochs:", num_epochs) print("Num Batches:", num_batches) def data_stream(): rng = onp.random.RandomState(0) while True: perm = rng.permutation(num_train) for i in range(num_batches): batch_idx = perm[i * batch_size:(i + 1) * batch_size] yield train_X[batch_idx], train_y[batch_idx] batches = data_stream() opt_init, opt_update, get_params = optimizers.momentum(step_size, mass=momentum_mass) @jit def update(rng, i, opt_state, batch): params = get_params(opt_state) return opt_update(i, grad(caves_loss_1)(params, batch), opt_state) init_params = np.array([2.]) opt_state = opt_init(init_params) itercount = itertools.count() print("\nStarting training...") losses = [] test_accs = [] params = get_params(opt_state) #train_acc = accuracy(params, (train_images, train_cc), rng=init_rng) #losses.append(train_acc) #test_acc = test_accuracy(params) #test_accs.append(test_acc) print("Epoch init ") #print("Training set accuracy {}".format(train_acc)) #print("Test set accuracy {}".format(test_acc)) for epoch in range(num_epochs): rng = random.PRNGKey(epoch) start_time = time.time() epoch_batch = None for _ in range(num_batches): batch = next(batches) epoch_batch = batch opt_state = update(rng, next(itercount), opt_state, batch) params = get_params(opt_state) losses.append(caves_loss_1(params, epoch_batch)) test_accs.append(caves_loss_1(params, next(batches))) epoch_time = time.time() - start_time print("Epoch {} in {:0.2f} sec".format(epoch, epoch_time)) print("Training set accuracy {}".format(losses[-1])) print("Test set accuracy {}".format(test_accs[-1])) plt.plot(losses) plt.plot(test_accs) return params cave_params = cave_opt() cave_params ``` The optimizer found `4.38171196` as the `threshold` parameter with the lowest loss. Let's plug that back into the generator and feed it random starting noise. ``` print(cave_params) print(caves_loss_1(cave_params, (onp.random.rand(1, 1, rows, cols), np.array([0.45])))) plt.subplots(figsize=(12, 12)) for i in range(1, 4): for j in range(1, 5): for _ in range(100): cs = np.where(caves_continuous(cave_params, onp.random.rand(1, 1, rows, cols)) > 0.5, 0., 1.) if np.sum(cs) < 250 : break plt.subplot(3, 4, (i - 1) * 4 + j) plt.imshow(onp.random.binomial(1, cs)[0, 0], vmin=0., vmax=1.) plt.tight_layout() ``` This caves generator is a simple example. The loss function is concerned only with optimizing for density. Notice how sometimes the generator produces disconnected caverns. How could you modify the generator to optimize for connectedness? # Graph Dungeon Generation Let's move on to a more complicated dungeon generator - rooms and corridors. We'll need to draw the output, so let's define a drawing function. ``` dungeon_cols = 12 dungeon_rows = 12 def make_dense_matrix(rows, cols): return np.zeros((1, 2, rows -1, cols - 1)) def draw_dense_dungeon(M): #M (batch, (0=horz/1=vert), row, col) rows = M.shape[2] cols = M.shape[3] #dungeon (row, col) dungeon = onp.ones((rows*4-1, cols*4-1)) for row in range(rows): for col in range(cols): # TODO: logic. room # conn = 1. when there is a corridor conn = np.array([ # above | row > 0 and M[0, 1, row - 1, col] == 1., # below | row < rows - 1 and M[0, 1, row, col] == 1., # left - col > 0 and M[0, 0, row, col - 1] == 1., # right - col < cols - 1 and M[0, 0, row, col] == 1.]).astype(np.float32) if np.sum(conn) == 1. \ or np.sum(conn) == 4. \ or np.sum(conn) > 1. \ and onp.random.binomial(1, 0.85) == 0.: dungeon[4 * row : 4 * row + 3, 4 * col : 4 * col + 3] = 0. # horizontal corridors - (left and right) if col < cols - 1: corridor = M[0, 0, row, col] if corridor > 0.: #dungeon[4 * row + 1, 4 * col + 3] = corridor # d(row, col) dungeon[4 * row + 1, (4 * col + 1):(4 * col + 6)] = 0. # vertical corridors | (above and below) if row < rows - 1: corridor = M[0, 1, row, col] if corridor > 0.: #dungeon[4 * row + 3, 4 * col + 1] = corridor dungeon[(4 * row + 1):(4 * row + 6), 4 * col + 1] = 0. return dungeon def plot_dense_dungeon(M): plt.imshow(draw_dense_dungeon(M), vmin=0., vmax=1.) plot_dense_dungeon(np.ones((1, 2, dungeon_rows, dungeon_cols))) ``` Let's write a little dungeon generation function. Instead of having a fixed kernel like the caves generator, we'll let the kernel values be the target of optimization. The kernel will have the shape `(2, 2, 3, 3)` meaning `(num_iterations, horizontal/vertical corridors, width, height)`. Finally, we can test the generator with a kernel of all `.1111`s. ``` def dungeon(params, x): for i in range(2): kernel_a = params[i:i+1, 0:1, :, :] kernel_b = params[i:i+1, 1:2, :, :] a = x[:, 0:1, :, :] b = x[:, 1:2, :, :] conv_a = sigmoid(jax.lax.conv(a, kernel_a, (1, 1), "SAME")) conv_b = sigmoid(jax.lax.conv(b, kernel_b, (1, 1), "SAME")) x = np.stack([conv_a, conv_b], axis=2)[0] return x dungeon_kernel = jax.numpy.ones((2, 2, 3, 3))/(3.*3.) print(dungeon_kernel) d = onp.random.binomial(1, dungeon(dungeon_kernel, onp.random.rand(1, 2, dungeon_rows, dungeon_cols))) print(np.sum(d)) plot_dense_dungeon(d) ``` # Dungeon Loss Let's start with optimizing the dungeon for low density. Graph density <img src="https://www.python-course.eu/images/graph_density_formula.png" /> is calculated using this formula where V is the number of verticies and E is the number of edges. We don't have an exact count of verticies and edges, so we'll say that every vertex is present in the graph (`width * height`) and then approximate the number of edges by summing the output which is conceptually a bit like the probability that a corridor is present. ``` def dungeon_loss(params, batch): x, y = batch print(params.shape, x.shape, y.shape) out = dungeon(params, x) # map 0.0, 1.0 to -1.0, 1.0, convolve with kernel e = np.sum(out) v = np.prod(np.array(out.shape)) d = 2. * e / (v * (v - 1.)) return np.sum((d - y) ** 2.) def dungeon_opt_1(): init_rng = random.PRNGKey(0) step_size = 0.01 momentum_mass = 0.9 num_epochs = 32 batch_size = 32 train_X = np.squeeze(np.array([onp.random.rand(1, 2, dungeon_rows, dungeon_cols) for _ in range(num_epochs * batch_size)]), 1) train_y = np.array([.45 for _ in range(train_X.shape[0])]) num_train = train_X.shape[0] num_complete_batches, leftover = divmod(train_X.shape[0], batch_size) num_batches = num_complete_batches + bool(leftover) print("Input shape:", train_X.shape) print("Output shape", train_y.shape) print("Num Epochs:", num_epochs) print("Num Batches:", num_batches) def data_stream(): rng = onp.random.RandomState(0) while True: perm = rng.permutation(num_train) for i in range(num_batches): batch_idx = perm[i * batch_size:(i + 1) * batch_size] yield train_X[batch_idx], train_y[batch_idx] batches = data_stream() opt_init, opt_update, get_params = optimizers.momentum(step_size, mass=momentum_mass) @jit def update(rng, i, opt_state, batch): params = get_params(opt_state) return opt_update(i, grad(dungeon_loss)(params, batch), opt_state) init_params = onp.random.randn(2, 2, 5, 5) * onp.sqrt(2/(2 + 5 + 5)) opt_state = opt_init(init_params) itercount = itertools.count() print("\nStarting training...") losses = [] test_accs = [] params = get_params(opt_state) #train_acc = accuracy(params, (train_images, train_cc), rng=init_rng) #losses.append(train_acc) #test_acc = test_accuracy(params) #test_accs.append(test_acc) print("Epoch init ") #print("Training set accuracy {}".format(train_acc)) #print("Test set accuracy {}".format(test_acc)) for epoch in range(num_epochs): rng = random.PRNGKey(epoch) start_time = time.time() epoch_batch = None for _ in range(num_batches): batch = next(batches) epoch_batch = batch opt_state = update(rng, next(itercount), opt_state, batch) params = get_params(opt_state) losses.append(dungeon_loss(params, epoch_batch)) test_accs.append(dungeon_loss(params, next(batches))) epoch_time = time.time() - start_time print("Epoch {} in {:0.2f} sec".format(epoch, epoch_time)) print("Training set accuracy {}".format(losses[-1])) print("Test set accuracy {}".format(test_accs[-1])) plt.plot(losses) plt.plot(test_accs) return params dungeon_params_1 = dungeon_opt_1() dungeon_params_1 ``` The dungeons aren't really that great, huh? For one we're not taking into account the connectedness of the dungeons. ``` d = onp.random.binomial(1, dungeon(dungeon_kernel, onp.random.rand(1, 2, dungeon_rows, dungeon_cols))) print(draw_dense_dungeon(d)) plot_dense_dungeon(d) ``` So how can we bring connectedness into the loss function? Tensorflow's `connected_components` function while useful, isn't differentiable. It is still an open question of whether a differentiable form even exists. ``` t = connected_components(draw_dense_dungeon(d) * -1. + 1.) sess = tf.Session(); with sess.as_default(): print(t.eval()) print(len(onp.unique(t.eval()))) plt.imshow(t.eval()) ``` ## Training a connected components model What if we generated some dungeons, calculated the number of connected components, and used that as a dataset to train a deep learning model? The deep learning model is differentiable and could be called from the loss function! Let's do this. First create the dataset (or load it from disk because it takes a long time). ``` from tqdm import tqdm from os import path num_epochs = 60 batch_size = 128 def gen(i): return onp.random.binomial(1, dungeon(dungeon_kernel, onp.random.rand(1, 2, dungeon_rows, dungeon_cols))) def gen_target(ti): sess = tf.Session() return len(onp.unique(connected_components(draw_dense_dungeon(ti) * -1. + 1.).eval(session=sess))) if path.exists('cc_train.npy') and path.exists('cc_y.npy'): train_images = np.load('cc_train.npy') train_cc = np.load('cc_y.npy') else: print('generating inputs') train_images = Parallel(n_jobs=num_cores)(delayed(gen)(i) for i in tqdm(range(num_epochs * batch_size))) print('generated responses') train_cc = Parallel(n_jobs=num_cores)(delayed(gen_target)(ti) for ti in tqdm(train_images)) train_images = np.squeeze(np.stack(train_images), 1).astype(np.float32) train_cc = np.squeeze(np.stack(train_cc), 1).astype(np.float32) np.save('cc_train.npy', train_images) np.save('cc_y.npy', train_cc) print(train_images.shape, train_images.dtype) print(train_cc.shape, train_cc.dtype) ``` This is the distribution of the number of connected components in the dataset. ``` plt.hist(train_cc) ``` Let's setup a small convolutional neural network to estimate the number of connected components. It trains quickly at first and gives ok, but not great results. If you can do better, let me know! ``` from jax.experimental.stax import (AvgPool, BatchNorm, Conv, Dense, Dropout, FanInConcat, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, SumPool, LogSoftmax) def sigmoid(x): return expit(x) def elu(x): return np.where(x > 0, x, np.expm1(x)) def leaky_relu(x): return np.where(x >= 0, x, 0.01 * x) Sigmoid = stax.elementwise(sigmoid) Elu = stax.elementwise(elu) LeakyRelu = stax.elementwise(leaky_relu) MinPool = stax._pooling_layer(jax.lax.max, -np.inf) init_random_params, predict = stax.serial( GeneralConv(('NCHW', 'HWIO', 'NHWC'), 1, (1, 1), (1, 1), "SAME"), FanOut(2), stax.parallel( stax.serial( MinPool((3, 3), strides=(3, 3)), Dropout(0.5), Sigmoid, FanOut(2), stax.parallel( stax.serial( MinPool((3, 3), strides=(3, 3)), Dropout(0.5), Sigmoid), stax.serial( MaxPool((3, 3), strides=(3, 3)), Dropout(0.5), Sigmoid)), FanInConcat(1)), stax.serial( MaxPool((3, 3), strides=(3, 3)), Dropout(0.5), Sigmoid, FanOut(2), stax.parallel( stax.serial( MinPool((3, 3), strides=(3, 3)), Dropout(0.5), Sigmoid), stax.serial( MaxPool((3, 3), strides=(3, 3)), Dropout(0.5), Sigmoid)), FanInConcat(1))), FanInConcat(1), GeneralConv(('NHWC', 'HWIO', 'NHWC'), 2, (3, 3), (1, 1), "SAME"), Sigmoid, GeneralConv(('NHWC', 'HWIO', 'NHWC'), 1, (1, 1), (1, 1), "SAME"), Dense(16), Sigmoid, Dropout(0.5), Dense(16), Sigmoid, Dropout(0.5), Dense(8), Sigmoid, Dense(1)) def loss(params, batch, **kwargs): inputs, targets = batch preds = predict(params, inputs, **kwargs) #print(inputs.shape, targets.shape, preds.shape) return np.mean((preds - targets) ** 2) def accuracy(params, batch, **kwargs): inputs, targets = batch preds = predict(params, inputs, **kwargs) #print(inputs.shape, targets.shape, preds.shape) return np.mean((preds - targets) ** 2) def test_accuracy(params): test_images = Parallel(n_jobs=num_cores)(delayed(gen)(i) for i in range(16)) test_cc = Parallel(n_jobs=num_cores)(delayed(gen_target)(ti) for ti in test_images) test_images = np.squeeze(np.stack(test_images), 1).astype(np.float32) test_cc = np.squeeze(np.stack(test_cc), 1).astype(np.float32) test_acc = accuracy(params, (test_images, test_cc), rng=random.PRNGKey(0)) return test_acc def ccnet(): init_rng = random.PRNGKey(0) step_size = 0.01 momentum_mass = 0.9 #train_images, _, test_images, _ = datasets.mnist(permute_train=True) num_train = train_images.shape[0] num_complete_batches, leftover = divmod(train_images.shape[0], batch_size) num_batches = num_complete_batches + bool(leftover) nun_epochs = 60 def data_stream(): rng = onp.random.RandomState(0) while True: perm = rng.permutation(num_train) for i in range(num_batches): batch_idx = perm[i * batch_size:(i + 1) * batch_size] yield train_images[batch_idx], train_cc[batch_idx] batches = data_stream() opt_init, opt_update, get_params = optimizers.momentum(step_size, mass=momentum_mass) @jit def update(rng, i, opt_state, batch): params = get_params(opt_state) return opt_update(i, grad(loss)(params, batch, rng=rng), opt_state) _, init_params = init_random_params(init_rng, (-1, 2, dungeon_rows, dungeon_cols)) opt_state = opt_init(init_params) itercount = itertools.count() print("\nStarting training...") losses = [] test_accs = [] params = get_params(opt_state) train_acc = accuracy(params, (train_images, train_cc), rng=init_rng) losses.append(train_acc) test_acc = test_accuracy(params) test_accs.append(test_acc) print("Epoch init ") print("Training set accuracy {}".format(train_acc)) print("Test set accuracy {}".format(test_acc)) for epoch in range(num_epochs): rng = random.PRNGKey(epoch) start_time = time.time() for _ in range(num_batches): opt_state = update(rng, next(itercount), opt_state, next(batches)) params = get_params(opt_state) train_acc = accuracy(params, (train_images, train_cc), rng=rng) losses.append(train_acc) test_acc = test_accuracy(params) test_accs.append(test_acc) epoch_time = time.time() - start_time print("Epoch {} in {:0.2f} sec".format(epoch, epoch_time)) print("Training set accuracy {}".format(train_acc)) print("Test set accuracy {}".format(test_acc)) plt.plot(losses) plt.plot(test_accs) return params cc_params = ccnet() cc_params %%script false import graphviz as gv src = gv.Source(jax.api._make_graphviz(predict)(params, train_images, rng=random.PRNGKey(0))) #print(src.source) src.view() print("Test set accuracy {}".format(test_accuracy(cc_params))) ``` Alright, let's have a second go at a loss function. This one will try to both minimize density and the number of connected components. ``` @jit def dungeon_loss_2(params, batch): '''Simultaneously lower density and number of connected components''' x, y = batch out = dungeon(params, x) # map 0.0, 1.0 to -1.0, 1.0, convolve with kernel e = np.sum(out) v = np.prod(np.array(out.shape)) # d=density d = 2. * e / (v * (v - 1.)) cc = predict(cc_params, out, rng=random.PRNGKey(0)) return np.mean(((d + cc) - y) ** 2.) def dungeon_opt_2(): init_rng = random.PRNGKey(1) step_size = 0.05 momentum_mass = 0.9 num_epochs = 32 batch_size = 32 train_X = np.squeeze(np.array([onp.random.rand(1, 2, dungeon_rows, dungeon_cols) for _ in range(num_epochs * batch_size)]), 1) train_y = np.array([1. for _ in range(train_X.shape[0])]) num_train = train_X.shape[0] num_complete_batches, leftover = divmod(train_X.shape[0], batch_size) num_batches = num_complete_batches + bool(leftover) print("Input shape:", train_X.shape) print("Output shape", train_y.shape) print("Num Epochs:", num_epochs) print("Num Batches:", num_batches) def data_stream(): rng = onp.random.RandomState(0) while True: perm = rng.permutation(num_train) for i in range(num_batches): batch_idx = perm[i * batch_size:(i + 1) * batch_size] yield train_X[batch_idx], train_y[batch_idx] batches = data_stream() opt_init, opt_update, get_params = optimizers.momentum(step_size, mass=momentum_mass) @jit def update(rng, i, opt_state, batch): params = get_params(opt_state) return opt_update(i, grad(dungeon_loss_2)(params, batch), opt_state) #init_params = onp.random.randn(2, 2, 5, 5) * onp.sqrt(2/(2 + 5 + 5)) init_params = jax.numpy.ones((2, 2, 3, 3))/(3.*3.) opt_state = opt_init(init_params) itercount = itertools.count() print("\nStarting training...") losses = [] test_accs = [] params = get_params(opt_state) #train_acc = accuracy(params, (train_images, train_cc), rng=init_rng) #losses.append(train_acc) #test_acc = test_accuracy(params) #test_accs.append(test_acc) print("Epoch init ") #print("Training set accuracy {}".format(train_acc)) #print("Test set accuracy {}".format(test_acc)) for epoch in range(num_epochs): rng = random.PRNGKey(epoch) start_time = time.time() epoch_batch = None for _ in range(num_batches): batch = next(batches) epoch_batch = batch opt_state = update(rng, next(itercount), opt_state, batch) params = get_params(opt_state) losses.append(dungeon_loss_2(params, epoch_batch)) test_accs.append(dungeon_loss_2(params, next(batches))) epoch_time = time.time() - start_time print("Epoch {} in {:0.2f} sec".format(epoch, epoch_time)) print("Training set accuracy {}".format(losses[-1])) print("Test set accuracy {}".format(test_accs[-1])) plt.plot(losses) plt.plot(test_accs) return params dungeon_params_2 = dungeon_opt_2() dungeon_params_2 ``` And this is the result of generating a new dungeon. ``` d = onp.random.binomial(1, dungeon(dungeon_params_2, onp.random.rand(1, 2, dungeon_rows, dungeon_cols))) t = connected_components(draw_dense_dungeon(d) * -1. + 1.) sess = tf.Session(); with sess.as_default(): #print(t.eval()) te = t.eval() print(te) a = onp.array(onp.unique(te, return_counts=True)) sorted_cc = a[:, a[1,:].argsort()[::-1]] print(sorted_cc) plt.imshow(onp.where(te == sorted_cc[0, 1], 0., 1.), vmin=0., vmax=1.) ``` Here are man new dungeons. Let's note that the dungeons are mostly tree-shaped - lots of branches with few loops. That makes sense, loops are likely to get pruned when optimizing for low density. How would you modify the loss function to penalize dead-ends? ``` plt.subplots(figsize=(12, 12)) sess = tf.Session(); with sess.as_default(): for i in range(1, 4): for j in range(1, 5): d = onp.random.binomial(1, dungeon(dungeon_params_2, onp.random.rand(1, 2, dungeon_rows, dungeon_cols))) t = connected_components(draw_dense_dungeon(d) * -1. + 1.).eval() a = onp.array(onp.unique(t, return_counts=True)) sorted_cc = a[:, a[1,:].argsort()[::-1]] #print(4, 5, (i - 1) * 4 + j) plt.subplot(3, 4, (i - 1) * 4 + j) plt.imshow(onp.where(t == sorted_cc[0, 1], 0., 1.), vmin=0., vmax=1.) plt.tight_layout() ``` # Conclusion I hope this notebook can help you explore the world of differentiable procgen. There are many ways to approach procedural generation and maybe having another tool in your belt can help you solve the right problem when you encounter it in the future.
github_jupyter
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. # 41. Export Run History as Tensorboard logs 1. Run some training and log some metrics into Run History 2. Export the run history to some directory as Tensorboard logs 3. Launch a local Tensorboard to view the run history ## Prerequisites Make sure you go through the [00. Installation and Configuration](00.configuration.ipynb) Notebook first if you haven't. ``` # Check core SDK version number import azureml.core print("SDK version:", azureml.core.VERSION) ``` ## Initialize Workspace Initialize a workspace object from persisted configuration. ``` from azureml.core import Workspace, Run, Experiment ws = Workspace.from_config() print('Workspace name: ' + ws.name, 'Azure region: ' + ws.location, 'Subscription id: ' + ws.subscription_id, 'Resource group: ' + ws.resource_group, sep = '\n') ``` ## Set experiment name and start the run ``` experiment_name = 'export-to-tensorboard' exp = Experiment(ws, experiment_name) root_run = exp.start_logging() # load diabetes dataset, a well-known built-in small dataset that comes with scikit-learn from sklearn.datasets import load_diabetes from sklearn.linear_model import Ridge from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split X, y = load_diabetes(return_X_y=True) columns = ['age', 'gender', 'bmi', 'bp', 's1', 's2', 's3', 's4', 's5', 's6'] x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) data = { "train":{"x":x_train, "y":y_train}, "test":{"x":x_test, "y":y_test} } # Example experiment from tqdm import tqdm alphas = [.1, .2, .3, .4, .5, .6 , .7] # try a bunch of alpha values in a Linear Regression (Ridge) model for alpha in tqdm(alphas): # create a bunch of child runs with root_run.child_run("alpha" + str(alpha)) as run: # More data science stuff reg = Ridge(alpha=alpha) reg.fit(data["train"]["x"], data["train"]["y"]) # TODO save model preds = reg.predict(data["test"]["x"]) mse = mean_squared_error(preds, data["test"]["y"]) # End train and eval # log alpha, mean_squared_error and feature names in run history root_run.log("alpha", alpha) root_run.log("mse", mse) ``` ## Export Run History to Tensorboard logs ``` # Export Run History to Tensorboard logs from azureml.contrib.tensorboard.export import export_to_tensorboard import os import tensorflow as tf logdir = 'exportedTBlogs' log_path = os.path.join(os.getcwd(), logdir) try: os.stat(log_path) except os.error: os.mkdir(log_path) print(logdir) # export run history for the project export_to_tensorboard(root_run, logdir) # or export a particular run # export_to_tensorboard(run, logdir) root_run.complete() ``` ## Start Tensorboard Or you can start the Tensorboard outside this notebook to view the result ``` from azureml.contrib.tensorboard import Tensorboard # The Tensorboard constructor takes an array of runs, so be sure and pass it in as a single-element array here tb = Tensorboard([], local_root=logdir, port=6006) # If successful, start() returns a string with the URI of the instance. tb.start() ``` ## Stop Tensorboard When you're done, make sure to call the `stop()` method of the Tensorboard object. ``` tb.stop() ```
github_jupyter