text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` import pandas as pd import plotly.express as px import plotly.graph_objects as go import numpy as np from plotly.subplots import make_subplots from pathlib import Path casos = pd.read_csv('https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto3/TotalesPorRegion_std.csv') casos['Fecha'] = pd.to_datetime(casos['Fecha']) casos_sintomaticos = casos[casos['Categoria']=='Casos nuevos con sintomas'].pivot(index='Fecha', columns='Region', values='Total') casos_nuevos = casos[casos['Categoria']=='Casos nuevos totales'].pivot(index='Fecha', columns='Region', values='Total') casos_activos_conf = casos[casos['Categoria']=='Casos activos confirmados'].pivot(index='Fecha', columns='Region', values='Total') casos_activos_prob = casos[casos['Categoria']=='Casos activos probables'].pivot(index='Fecha', columns='Region', values='Total') casos_nuevos_prob = casos[casos['Categoria']=='Casos probables acumulados'].pivot(index='Fecha', columns='Region', values='Total').diff() casos_nuevos_antigeno = casos[casos['Categoria']=='Casos nuevos confirmados por antigeno'].pivot(index='Fecha', columns='Region', values='Total') casos_sintomaticos.rename(columns={'Total': 'Chile'}, inplace=True) casos_nuevos.rename(columns={'Total': 'Chile'}, inplace=True) casos_activos_conf.rename(columns={'Total': 'Chile'}, inplace=True) casos_activos_prob.rename(columns={'Total': 'Chile'}, inplace=True) casos_nuevos_prob.rename(columns={'Total': 'Chile'}, inplace=True) casos_nuevos_antigeno.rename(columns={'Total': 'Chile'}, inplace=True) casos_nuevos_prob_antigeno = casos_nuevos.add(casos_nuevos_prob, fill_value=0) casos_nuevos_prob_antigeno = casos_nuevos_prob_antigeno.add(casos_nuevos_antigeno, fill_value=0) casos_nuevos_prob_antigeno['Chile'].plot() fig = go.Figure() Wong = ['#000000', '#E69F00', '#56B4E9', '#009E73', '#F0E442', '#0072B2', '#D55E00', '#CC79A7'] fig.add_trace( go.Scatter(x=casos_nuevos.index, y=casos_nuevos['Chile'].rolling(11).sum(), mode='lines', name='Inferencia de activos (DP3)', line_color=Wong[0] ) ) fig.add_trace( go.Scatter(x=casos_nuevos.index, y=casos_nuevos_prob_antigeno['Chile'].rolling(11).sum(), mode='lines', name='Inferencia de activos (DP3)', line_color=Wong[1] ) ) fig.update_layout(hovermode='x') fig.update_layout(template='plotly_white', title='Casos Activos de COVID19 en Chile') fig.update_layout(yaxis_tickformat = ',') casos_uci = pd.read_csv('https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto8/UCI_T.csv') casos_uci.rename(columns={'Region': 'Fecha'}, inplace=True) casos_uci = casos_uci.iloc[2:] casos_uci['Fecha'] = pd.to_datetime(casos_uci['Fecha']) casos_uci.set_index('Fecha', inplace=True) casos_uci['Chile'] = casos_uci[list(casos_uci.columns)].sum(axis=1) casos_uci ucilag = 14 propuci_toto = casos_uci['Chile'].shift(-ucilag)/casos_nuevos_prob_antigeno['Chile'].rolling(11).sum() propuci_toto.median() ```
github_jupyter
``` # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../input/oct2017/OCT2017 /train")) # Any results you write to the current directory are saved as output. Training_Dir = '../input/oct2017/OCT2017 /train' Validation_Dir = '../input/oct2017/OCT2017 /val' Testing_Dir = '../input/oct2017/OCT2017 /test' import tensorflow as tf import keras_preprocessing from keras_preprocessing import image from keras_preprocessing.image import ImageDataGenerator training_datagen = ImageDataGenerator(rescale = 1./255) validation_datagen = ImageDataGenerator(rescale = 1./255) testing_datagen = ImageDataGenerator(rescale = 1./255) train_generator = training_datagen.flow_from_directory( Training_Dir, target_size=(150,150), shuffle=True, class_mode='categorical' ) validation_generator = validation_datagen.flow_from_directory( Validation_Dir, target_size=(150,150), shuffle=True, class_mode='categorical' ) testing_generator = testing_datagen.flow_from_directory( Testing_Dir, target_size=(150,150), shuffle=True, class_mode='categorical' ) import numpy as np np.unique(train_generator.classes) train_generator.class_indices, validation_generator.class_indices, testing_generator.class_indices np.unique(train_generator.classes, return_counts = True) np.unique(testing_generator.classes, return_counts = True) model = tf.keras.models.Sequential([ # Note the input shape is the desired size of the image 150x150 with 3 bytes color # This is the first convolution tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(150, 150, 3)), tf.keras.layers.MaxPooling2D(2, 2), # The second convolution tf.keras.layers.Conv2D(64, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), # The third convolution tf.keras.layers.Conv2D(128, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), # The fourth convolution tf.keras.layers.Conv2D(128, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), # Flatten the results to feed into a DNN tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.5), # 512 neuron hidden layer tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(4, activation='softmax') ]) model.summary() model.compile(loss = 'categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) history = model.fit_generator(train_generator, epochs=1, validation_data = testing_generator, verbose = 1) history = model.fit_generator(train_generator, epochs=2, validation_data = testing_generator, verbose = 1) y_train_pred = model.predict_generator(train_generator) y_train = train_generator.classes[train_generator.index_array] y_train_pred = np.argmax(y_train_pred, axis=-1) from sklearn.metrics import confusion_matrix import itertools cnf_matrix = confusion_matrix(y_train, y_train_pred) cnf_matrix from sklearn.metrics import accuracy_score accuracy_score(y_train, y_train_pred) from sklearn.metrics import classification_report target_names = ['CNV', 'DME', 'DRUSEN', 'NORMAL'] print(classification_report(y_train, y_train_pred, target_names=target_names)) ```
github_jupyter
``` import pandas as pd import numpy as np car=pd.read_csv('quikr_car.csv') car.head() car.shape car.info() ``` # Data Quality - year has many object values & these r in object - Price contains object values and seperacted by comas - kms_driven=Int with kms & are in int and seperated with commas & nan values - fuel type has nan values - name is inconsistant convering it in catrgorial by getting first three words # Data Cleaning ``` #backup the data backup=car.copy() # 1. only keeping those values that are years car=car[car['year'].str.isnumeric()] # 1.1 converting year from object to int car['year']=car['year'].astype(int) car.info() # 2.keeping only those who dont have 'ask for price' car=car[car['Price']!='Ask For Price'] # 2.2 removing commas car['Price']=car['Price'].str.replace(',','') # 2.3 Converting obj to int car['Price']=car['Price'].astype(int) car.info() # 3 removing kms from price with split car['kms_driven']=car['kms_driven'].str.split(' ').str.get(0) # 3.1 replacing commas with empty space car['kms_driven']=car['kms_driven'].str.replace(',','') # 3.3 Keeping only numeric values deleting petrol car=car[car['kms_driven'].str.isnumeric()] # 3.4 converting object to integer car['kms_driven']=car['kms_driven'].astype(int) car.info() # 4.4 removing nan values car['fuel_type'].isna().sum() # there is only one row with nan values # ~ is used for excluding - - car = car[~car['fuel_type'].isna()] # 5.1 keeping 1st three words of name column car['name'].str.split(' ') car['name']=car['name'].str.split(' ').str.slice(0,3) car['name']=car['name'].str.join(' ') car['name'] car #there is missing index for reseting index we need to call reset_index() car.reset_index(drop=True) # If we did only reset index()then previous index will also visible # For dropping replacing/dropping prev index we need to set parameter drop=True # Saving it again in car dataframe car=car.reset_index(drop=True) # The data after cleaning car.info() car.describe() # There is 85lack price of a car checking car whose price is more than 60lach car[car['Price']>6e6] # Keeping those prices which are under 60 lack car=car[car["Price"]<6e6].reset_index(drop=True) car # Storing this clean data in csv car.to_csv('Cleaned_car.csv') ``` # Way to preapare Model ``` x=car.drop(columns=['Price']) y=car['Price'] from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import r2_score from sklearn.compose import make_column_transformer from sklearn.pipeline import make_pipeline xtrain,xtest,ytrain,ytest=train_test_split(x,y,test_size=0.2,random_state=42) ohe = OneHotEncoder() ohe.fit(x[['name','company','fuel_type']]) ohe.categories_ column_trans=make_column_transformer( (OneHotEncoder(categories=ohe.categories_),['name','company','fuel_type']), remainder='passthrough') lr=LinearRegression() pipe=make_pipeline(column_trans,lr) pipe.fit(xtrain,ytrain) y_pred=pipe.predict(xtest) y_pred r2_score(ytest,y_pred) # Checking at what random state the r2 score is biggest score=[] for i in range (1000): xtrain,xtest,ytrain,ytest=train_test_split(x,y,test_size=0.2,random_state=i) lr=LinearRegression() pipe=make_pipeline(column_trans,lr) pipe.fit(xtrain,ytrain) ypred=pipe.predict(xtest) score.append(r2_score(ytest,ypred)) np.argmax(score) score[np.argmax(score)] xtrain,xtest,ytrain,ytest=train_test_split(x,y,test_size=0.2,random_state=np.argmax(score)) lr=LinearRegression() pipe=make_pipeline(column_trans,lr) pipe.fit(xtrain,ytrain) ypred=pipe.predict(xtest) print(r2_score(ytest,ypred)) ``` # Dumping this pipeline ``` import pickle pickle.dump(pipe,open('Linear_Regression.pkl','wb')) ``` # Using demo ``` # Input should in forms of dataframe pipe.predict(pd.DataFrame([['Maruti Suzuki Ritz','Maruti',2011,50000,'Petrol']],columns=['name', 'company', 'year', 'kms_driven', 'fuel_type'])) x.columns ```
github_jupyter
# Part 1: Launch a Grid Node Locally In this tutorial, you'll learn how to deploy a grid node into a local machine and then interact with it using PySyft. _WARNING: Grid nodes publish datasets online and are for EXPERIMENTAL use only. Deploy nodes at your own risk. Do not use OpenGrid with any data/models you wish to keep private._ In order to run a node locally all you need to do is run an application and then start communicating with it through a Grid Worker. In this tutorial we'll use the websocket app available [here](https://github.com/OpenMined/PyGrid/tree/dev/app/websocket). ### Step 1: Download the repository ```bash git clone https://github.com/OpenMined/PyGrid/ ``` ### Step 2: Download dependencies You'll need to have the app dependencies installed. We recommend setting up an independent [conda environment](https://docs.conda.io/projects/conda/en/latest/user-guide/concepts/environments.html) to avoid problems with library versions. You can install the dependencies by running: ```bash cd PyGrid/app/websocket/ pip install -r requirements.txt ``` ### Step 3: Make grid importable Install grid as a python package ```bash cd PyGrid python setup.py install (or python setup.py develop) ``` ### Step 3: Start App Then, to start the app, just run the `websocket_app` script. ```bash cd PyGrid/app/websocket python websocket_app.py --start_local_db --id=<worker_id> --port=<port number> ``` This will start the app with id equals to `<worker_id>` on address: `http://0.0.0.0/<port_number>`. The `--start_local_db` automatically starts a local database so you don't have to configure one yourself. To check what other arguments you can use when running this app, run: ```bash python websocket_app.py --help ``` Let's start a worker with id equals to `bob` on port `3000` ```bash python websocket_app.py --start_local_db --id=bob --port=3000 ``` Great. The script should continue running, if the app started successfully. ### Step 4: Creating a Grid Worker and start communication Let's instantiate a WebsocketGridWorker to talk to this app. ``` # General dependencies import torch as th import syft as sy import grid as gr hook = sy.TorchHook(th) # WARNING: We should use the same id and port as the one used to start the app!!! worker = gr.WebsocketGridClient(hook, id="bob", address="http://localhost:3000") # If you don't connect to the worker you can't send messages to it worker.connect() ``` ### Step 5: Use PySyft Like Normal Now you can simply use the worker you created like you would any other normal PySyft worker. For more on how PySyft works, please see the PySyft tutorials: https://github.com/OpenMined/PySyft/tree/dev/examples/tutorials ``` x = th.tensor([1,2,3,4]).send(worker) x y = x + x y y.get() ```
github_jupyter
<a href="https://colab.research.google.com/github/DemonFlexCouncil/DDSP-48kHz-Stereo/blob/master/ddsp/colab/ddsp_48kHz_stereo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ##### Copyright 2020 Google LLC. Licensed under the Apache License, Version 2.0 (the "License"); ``` # Copyright 2020 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== ``` # Train & Timbre Transfer--DDSP Autoencoder on GPU--48kHz/Stereo Made by [Google Magenta](https://magenta.tensorflow.org/)--altered by [Demon Flex Council](https://soundcloud.com/demonflexcouncil) This notebook demonstrates how to install the DDSP library and train it for synthesis based on your own data using command-line scripts. If run inside of Colab, it will automatically use a free Google Cloud GPU. **A Little Background** A producer friend of mine turned me on to Magenta’s DDSP, and I’m glad he did. In my mind it represents the way forward for AI music. Finally we have a glimpse inside the black box, with access to musical parameters as well as neural net hyperparameters. And DDSP leverages decades of studio knowledge by utilizing traditional processors like synthesizers and effects. One can envision a time when DDSP-like elements will sit at the heart of production DAWs. DDSP will accept most audio sample rates and formats. However, native 48kHz/stereo datasets and primers will sound best. Output files are always 48kHz/stereo. You can upload datasets and primers via the browser or use Google Drive. The algorithm was designed to model single instruments played monophonically, but it can also produce interesting results with denser, polyphonic material and percussion. <img src="https://storage.googleapis.com/ddsp/additive_diagram/ddsp_autoencoder.png" alt="DDSP Autoencoder figure" width="700"> **Note that we prefix bash commands with a `!` inside of Colab, but you would leave them out if running directly in a terminal.** ### Install Dependencies First we install the required dependencies with `pip`. ``` %tensorflow_version 2.x # !pip install -qU ddsp[data_preparation] !pip install -qU git+https://github.com/DemonFlexCouncil/ddsp3@ddsp # Initialize global path for using google drive. DRIVE_DIR = '' # Helper Functions sample_rate = 48000 n_fft = 6144 ``` ### Setup Google Drive (Optional, Recommeded) This notebook requires uploading audio and saving checkpoints. While you can do this with direct uploads / downloads, it is recommended to connect to your google drive account. This will enable faster file transfer, and regular saving of checkpoints so that you do not lose your work if the colab kernel restarts (common for training more than 12 hours). #### Login and mount your drive This will require an authentication code. You should then be able to see your drive in the file browser on the left panel. ``` from google.colab import drive drive.mount('/content/drive') ``` #### Set your base directory * In drive, put all of the audio files with which you would like to train in a single folder. * Typically works well with 10-20 minutes of audio from a single monophonic source (also, one acoustic environment). * Use the file browser in the left panel to find a folder with your audio, right-click **"Copy Path", paste below**, and run the cell. ``` #@markdown (ex. `/content/drive/My Drive/...`) Leave blank to skip loading from Drive. DRIVE_DIR = '' #@param {type: "string"} import os assert os.path.exists(DRIVE_DIR) print('Drive Folder Exists:', DRIVE_DIR) ``` ### Make directories to save model and data ``` #@markdown Check the box below if you'd like to train with latent vectors. LATENT_VECTORS = True #@param{type:"boolean"} !git clone https://github.com/DemonFlexCouncil/gin.git if LATENT_VECTORS: GIN_FILE = 'gin/solo_instrument.gin' else: GIN_FILE = 'gin/solo_instrument_noz.gin' AUDIO_DIR_LEFT = 'data/audio-left' AUDIO_DIR_RIGHT = 'data/audio-right' MODEL_DIR_LEFT = 'data/model-left' MODEL_DIR_RIGHT = 'data/model-right' AUDIO_FILEPATTERN_LEFT = AUDIO_DIR_LEFT + '/*' AUDIO_FILEPATTERN_RIGHT = AUDIO_DIR_RIGHT + '/*' !mkdir -p $AUDIO_DIR_LEFT $AUDIO_DIR_RIGHT $MODEL_DIR_LEFT $MODEL_DIR_RIGHT if DRIVE_DIR: SAVE_DIR_LEFT = os.path.join(DRIVE_DIR, 'ddsp-solo-instrument-left') SAVE_DIR_RIGHT = os.path.join(DRIVE_DIR, 'ddsp-solo-instrument-right') INPUT_DIR = os.path.join(DRIVE_DIR, 'dataset-input') PRIMERS_DIR = os.path.join(DRIVE_DIR, 'primers') OUTPUT_DIR = os.path.join(DRIVE_DIR, 'resynthesis-output') !mkdir -p "$SAVE_DIR_LEFT" "$SAVE_DIR_RIGHT" "$INPUT_DIR" "$PRIMERS_DIR" "$OUTPUT_DIR" ``` ### Upload training audio Upload training audio to the "dataset-input" folder inside the DRIVE_DIR folder if using Drive (otherwise prompts local upload.) ``` !pip install note_seq import glob import os from ddsp.colab import colab_utils from google.colab import files import librosa import numpy as np from scipy.io.wavfile import write as write_audio if DRIVE_DIR: wav_files = glob.glob(os.path.join(INPUT_DIR, '*.wav')) aiff_files = glob.glob(os.path.join(INPUT_DIR, '*.aiff')) aif_files = glob.glob(os.path.join(INPUT_DIR, '*.aif')) ogg_files = glob.glob(os.path.join(INPUT_DIR, '*.ogg')) flac_files = glob.glob(os.path.join(INPUT_DIR, '*.flac')) mp3_files = glob.glob(os.path.join(INPUT_DIR, '*.mp3')) audio_files = wav_files + aiff_files + aif_files + ogg_files + flac_files + mp3_files else: uploaded_files = files.upload() audio_files = list(uploaded_files.keys()) for fname in audio_files: # Convert to 48kHz. audio, unused_sample_rate = librosa.load(fname, sr=48000, mono=False) if (audio.ndim == 2): audio = np.swapaxes(audio, 0, 1) # Mono to stereo. if (audio.ndim == 1): print('Converting mono to stereo.') audio = np.stack((audio, audio), axis=-1) target_name_left = os.path.join(AUDIO_DIR_LEFT, os.path.basename(fname).replace(' ', '_').replace('aiff', 'wav').replace('aif', 'wav').replace('ogg', 'wav').replace('flac', 'wav').replace('mp3', 'wav')) target_name_right = os.path.join(AUDIO_DIR_RIGHT, os.path.basename(fname).replace(' ', '_').replace('aiff', 'wav').replace('aif', 'wav').replace('ogg', 'wav').replace('flac', 'wav').replace('mp3', 'wav')) # Split to dual mono. write_audio(target_name_left, sample_rate, audio[:, 0]) write_audio(target_name_right, sample_rate, audio[:, 1]) ``` ### Preprocess raw audio into TFRecord dataset We need to do some preprocessing on the raw audio you uploaded to get it into the correct format for training. This involves turning the full audio into short (4-second) examples, inferring the fundamental frequency (or "pitch") with [CREPE](http://github.com/marl/crepe), and computing the loudness. These features will then be stored in a sharded [TFRecord](https://www.tensorflow.org/tutorials/load_data/tfrecord) file for easier loading. Depending on the amount of input audio, this process usually takes a few minutes. * (Optional) Transfer dataset from drive. If you've already created a dataset, from a previous run, this cell will skip the dataset creation step and copy the dataset from `$DRIVE_DIR/data` ``` !pip install apache_beam import glob import os TRAIN_TFRECORD_LEFT = 'data/train-left.tfrecord' TRAIN_TFRECORD_RIGHT = 'data/train-right.tfrecord' TRAIN_TFRECORD_FILEPATTERN_LEFT = TRAIN_TFRECORD_LEFT + '*' TRAIN_TFRECORD_FILEPATTERN_RIGHT = TRAIN_TFRECORD_RIGHT + '*' # Copy dataset from drive if dataset has already been created. drive_data_dir = os.path.join(DRIVE_DIR, 'data') drive_dataset_files = glob.glob(drive_data_dir + '/*') if DRIVE_DIR and len(drive_dataset_files) > 0: !cp "$drive_data_dir"/* data/ else: # Make a new dataset. if (not glob.glob(AUDIO_FILEPATTERN_LEFT)) or (not glob.glob(AUDIO_FILEPATTERN_RIGHT)): raise ValueError('No audio files found. Please use the previous cell to ' 'upload.') !ddsp_prepare_tfrecord \ --input_audio_filepatterns=$AUDIO_FILEPATTERN_LEFT \ --output_tfrecord_path=$TRAIN_TFRECORD_LEFT \ --num_shards=10 \ --sample_rate=$sample_rate \ --alsologtostderr !ddsp_prepare_tfrecord \ --input_audio_filepatterns=$AUDIO_FILEPATTERN_RIGHT \ --output_tfrecord_path=$TRAIN_TFRECORD_RIGHT \ --num_shards=10 \ --sample_rate=$sample_rate \ --alsologtostderr # Copy dataset to drive for safe-keeping. if DRIVE_DIR: !mkdir "$drive_data_dir"/ print('Saving to {}'.format(drive_data_dir)) !cp $TRAIN_TFRECORD_FILEPATTERN_LEFT "$drive_data_dir"/ !cp $TRAIN_TFRECORD_FILEPATTERN_RIGHT "$drive_data_dir"/ ``` ### Save dataset statistics for timbre transfer Quantile normalization helps match loudness of timbre transfer inputs to the loudness of the dataset, so let's calculate it here and save in a pickle file. ``` from ddsp.colab import colab_utils import ddsp.training data_provider_left = ddsp.training.data.TFRecordProvider(TRAIN_TFRECORD_FILEPATTERN_LEFT, sample_rate=sample_rate) data_provider_right = ddsp.training.data.TFRecordProvider(TRAIN_TFRECORD_FILEPATTERN_RIGHT, sample_rate=sample_rate) dataset_left = data_provider_left.get_dataset(shuffle=False) dataset_right = data_provider_right.get_dataset(shuffle=False) if DRIVE_DIR: PICKLE_FILE_PATH_LEFT = os.path.join(SAVE_DIR_LEFT, 'dataset_statistics_left.pkl') PICKLE_FILE_PATH_RIGHT = os.path.join(SAVE_DIR_RIGHT, 'dataset_statistics_right.pkl') else: PICKLE_FILE_PATH_LEFT = os.path.join(MODEL_DIR_LEFT, 'dataset_statistics_left.pkl') PICKLE_FILE_PATH_RIGHT = os.path.join(MODEL_DIR_RIGHT, 'dataset_statistics_right.pkl') colab_utils.save_dataset_statistics(data_provider_left, PICKLE_FILE_PATH_LEFT, batch_size=1) colab_utils.save_dataset_statistics(data_provider_right, PICKLE_FILE_PATH_RIGHT, batch_size=1) ``` Let's load the dataset in the `ddsp` library and have a look at one of the examples. ``` from ddsp.colab import colab_utils import ddsp.training from matplotlib import pyplot as plt import numpy as np data_provider_left = ddsp.training.data.TFRecordProvider(TRAIN_TFRECORD_FILEPATTERN_LEFT, sample_rate=sample_rate) dataset_left = data_provider_left.get_dataset(shuffle=False) data_provider_right = ddsp.training.data.TFRecordProvider(TRAIN_TFRECORD_FILEPATTERN_RIGHT, sample_rate=sample_rate) dataset_right = data_provider_right.get_dataset(shuffle=False) try: ex_left = next(iter(dataset_left)) except StopIteration: raise ValueError( 'TFRecord contains no examples. Please try re-running the pipeline with ' 'different audio file(s).') try: ex_right = next(iter(dataset_right)) except StopIteration: raise ValueError( 'TFRecord contains no examples. Please try re-running the pipeline with ' 'different audio file(s).') print('Top: Left, Bottom: Right') colab_utils.specplot(ex_left['audio']) colab_utils.specplot(ex_right['audio']) f, ax = plt.subplots(6, 1, figsize=(14, 12)) x = np.linspace(0, 4.0, 1000) ax[0].set_ylabel('loudness_db L') ax[0].plot(x, ex_left['loudness_db']) ax[1].set_ylabel('loudness_db R') ax[1].plot(x, ex_right['loudness_db']) ax[2].set_ylabel('F0_Hz L') ax[2].set_xlabel('seconds') ax[2].plot(x, ex_left['f0_hz']) ax[3].set_ylabel('F0_Hz R') ax[3].set_xlabel('seconds') ax[3].plot(x, ex_right['f0_hz']) ax[4].set_ylabel('F0_confidence L') ax[4].set_xlabel('seconds') ax[4].plot(x, ex_left['f0_confidence']) ax[5].set_ylabel('F0_confidence R') ax[5].set_xlabel('seconds') ax[5].plot(x, ex_right['f0_confidence']) ``` ### Train Model We will now train a "solo instrument" model. This means the model is conditioned only on the fundamental frequency (f0) and loudness with no instrument ID or latent timbre feature. If you uploaded audio of multiple instruemnts, the neural network you train will attempt to model all timbres, but will likely associate certain timbres with different f0 and loudness conditions. First, let's start up a [TensorBoard](https://www.tensorflow.org/tensorboard) to monitor our loss as training proceeds. Initially, TensorBoard will report `No dashboards are active for the current data set.`, but once training begins, the dashboards should appear. ``` %reload_ext tensorboard import tensorboard as tb if DRIVE_DIR: tb.notebook.start('--logdir "{}"'.format(SAVE_DIR_LEFT)) tb.notebook.start('--logdir "{}"'.format(SAVE_DIR_RIGHT)) else: tb.notebook.start('--logdir "{}"'.format(MODEL_DIR_LEFT)) tb.notebook.start('--logdir "{}"'.format(MODEL_DIR_RIGHT)) ``` ### We will now begin training. Note that we specify [gin configuration](https://github.com/google/gin-config) files for the both the model architecture ([solo_instrument.gin](TODO)) and the dataset ([tfrecord.gin](TODO)), which are both predefined in the library. You could also create your own. We then override some of the spefic params for `batch_size` (which is defined in in the model gin file) and the tfrecord path (which is defined in the dataset file). #### Training Notes: * Models typically perform well when the loss drops to the range of ~5.0-7.0. * Depending on the dataset this can take anywhere from 5k-40k training steps usually. * On the colab GPU, this can take from around 3-24 hours. * We **highly recommend** saving checkpoints directly to your drive account as colab will restart naturally after about 12 hours and you may lose all of your checkpoints. * By default, checkpoints will be saved every 250 steps with a maximum of 10 checkpoints (at ~60MB/checkpoint this is ~600MB). Feel free to adjust these numbers depending on the frequency of saves you would like and space on your drive. * If you're restarting a session and `DRIVE_DIR` points a directory that was previously used for training, training should resume at the last checkpoint. ``` #@markdown Enter number of steps to train. Restart runtime to interrupt training. NUM_STEPS = 1000 #@param {type:"slider", min: 1000, max:40000, step:1000} NUM_LOOPS = int(NUM_STEPS / 1000) if DRIVE_DIR: TRAIN_DIR_LEFT = SAVE_DIR_LEFT TRAIN_DIR_RIGHT = SAVE_DIR_RIGHT else: TRAIN_DIR_LEFT = MODEL_DIR_LEFT TRAIN_DIR_RIGHT = MODEL_DIR_RIGHT for i in range (0, NUM_LOOPS): !ddsp_run \ --mode=train \ --alsologtostderr \ --save_dir="$TRAIN_DIR_LEFT" \ --gin_file="$GIN_FILE" \ --gin_file=datasets/tfrecord.gin \ --gin_param="TFRecordProvider.file_pattern='$TRAIN_TFRECORD_FILEPATTERN_LEFT'" \ --gin_param="batch_size=6" \ --gin_param="train_util.train.num_steps=1000" \ --gin_param="train_util.train.steps_per_save=250" \ --gin_param="trainers.Trainer.checkpoints_to_keep=10" !ddsp_run \ --mode=train \ --alsologtostderr \ --save_dir="$TRAIN_DIR_RIGHT" \ --gin_file="$GIN_FILE" \ --gin_file=datasets/tfrecord.gin \ --gin_param="TFRecordProvider.file_pattern='$TRAIN_TFRECORD_FILEPATTERN_RIGHT'" \ --gin_param="batch_size=6" \ --gin_param="train_util.train.num_steps=1000" \ --gin_param="train_util.train.steps_per_save=250" \ --gin_param="trainers.Trainer.checkpoints_to_keep=10" # Remove extra gin files. if DRIVE_DIR: !cd "$SAVE_DIR_LEFT" && mv "operative_config-0.gin" "$DRIVE_DIR" !cd "$SAVE_DIR_LEFT" && rm operative_config* !cd "$DRIVE_DIR" && mv "operative_config-0.gin" "$SAVE_DIR_LEFT" !cd "$SAVE_DIR_RIGHT" && mv "operative_config-0.gin" "$DRIVE_DIR" !cd "$SAVE_DIR_RIGHT" && rm operative_config* !cd "$DRIVE_DIR" && mv "operative_config-0.gin" "$SAVE_DIR_RIGHT" else: !cd "$MODEL_DIR_LEFT" && mv "operative_config-0.gin" "$AUDIO_DIR_LEFT" !cd "$MODEL_DIR_LEFT" && rm operative_config* !cd "$AUDIO_DIR_LEFT" && mv "operative_config-0.gin" "$MODEL_DIR_LEFT" !cd "$MODEL_DIR_RIGHT" && mv "operative_config-0.gin" "$AUDIO_DIR_RIGHT" !cd "$MODEL_DIR_RIGHT" && rm operative_config* !cd "$AUDIO_DIR_RIGHT" && mv "operative_config-0.gin" "$MODEL_DIR_RIGHT" ``` ### Resynthesis Check how well the model reconstructs the training data ``` !pip install note_seq from ddsp.colab.colab_utils import play, specplot, download import ddsp.training import gin from matplotlib import pyplot as plt import numpy as np from scipy.io.wavfile import write as write_audio data_provider_left = ddsp.training.data.TFRecordProvider(TRAIN_TFRECORD_FILEPATTERN_LEFT, sample_rate=sample_rate) data_provider_right = ddsp.training.data.TFRecordProvider(TRAIN_TFRECORD_FILEPATTERN_RIGHT, sample_rate=sample_rate) dataset_left = data_provider_left.get_batch(batch_size=1, shuffle=False) dataset_right = data_provider_right.get_batch(batch_size=1, shuffle=False) try: batch_left = next(iter(dataset_left)) except OutOfRangeError: raise ValueError( 'TFRecord contains no examples. Please try re-running the pipeline with ' 'different audio file(s).') try: batch_right = next(iter(dataset_right)) except OutOfRangeError: raise ValueError( 'TFRecord contains no examples. Please try re-running the pipeline with ' 'different audio file(s).') # Parse the gin configs. if DRIVE_DIR: gin_file_left = os.path.join(SAVE_DIR_LEFT, 'operative_config-0.gin') gin_file_right = os.path.join(SAVE_DIR_RIGHT, 'operative_config-0.gin') else: gin_file_left = os.path.join(MODEL_DIR_LEFT, 'operative_config-0.gin') gin_file_right = os.path.join(MODEL_DIR_RIGHT, 'operative_config-0.gin') gin.parse_config_file(gin_file_left) gin.parse_config_file(gin_file_right) # Load models model_left = ddsp.training.models.Autoencoder() model_right = ddsp.training.models.Autoencoder() if DRIVE_DIR: model_left.restore(SAVE_DIR_LEFT) model_right.restore(SAVE_DIR_RIGHT) else: model_left.restore(MODEL_DIR_LEFT) model_right.restore(MODEL_DIR_RIGHT) # Resynthesize audio. audio_left = batch_left['audio'] audio_right = batch_right['audio'] outputs_left = model_left(batch_left, training=False) audio_gen_left = model_left.get_audio_from_outputs(outputs_left) outputs_right = model_right(batch_right, training=False) audio_gen_right = model_right.get_audio_from_outputs(outputs_right) # Merge to stereo. audio_left_stereo = np.expand_dims(np.squeeze(audio_left.numpy()), axis=1) audio_right_stereo = np.expand_dims(np.squeeze(audio_right.numpy()), axis=1) audio_stereo = np.concatenate((audio_left_stereo, audio_right_stereo), axis=1) audio_gen_left_stereo = np.expand_dims(np.squeeze(audio_gen_left.numpy()), axis=1) audio_gen_right_stereo = np.expand_dims(np.squeeze(audio_gen_right.numpy()), axis=1) audio_gen_stereo = np.concatenate((audio_gen_left_stereo, audio_gen_right_stereo), axis=1) # Play. print('Original Audio') play(audio_stereo, sample_rate=sample_rate) print('Resynthesis') play(audio_gen_stereo, sample_rate=sample_rate) # Plot. print('Spectrograms: Top two are Original Audio L/R, bottom two are Resynthesis L/R') specplot(audio_left) specplot(audio_right) specplot(audio_gen_left) specplot(audio_gen_right) WRITE_PATH = OUTPUT_DIR + "/resynthesis.wav" write_audio("resynthesis.wav", sample_rate, audio_gen_stereo) write_audio(WRITE_PATH, sample_rate, audio_gen_stereo) !ffmpeg-normalize resynthesis.wav -o resynthesis.wav -t -15 -ar 48000 -f download("resynthesis.wav") ``` ## Timbre Transfer ### Install & Import Install ddsp, define some helper functions, and download the model. This transfers a lot of data and should take a minute or two. ``` # Ignore a bunch of deprecation warnings import warnings warnings.filterwarnings("ignore") import copy import os import time import crepe import ddsp import ddsp.training from ddsp.colab import colab_utils from ddsp.colab.colab_utils import ( auto_tune, detect_notes, fit_quantile_transform, get_tuning_factor, download, play, record, specplot, upload, DEFAULT_SAMPLE_RATE) import gin from google.colab import files import librosa import matplotlib.pyplot as plt import numpy as np import pickle import tensorflow.compat.v2 as tf import tensorflow_datasets as tfds # Helper Functions sample_rate = 48000 n_fft = 2048 print('Done!') ``` ### Primer Audio File ``` from google.colab import files from ddsp.colab.colab_utils import play import re #@markdown * Audio should be monophonic (single instrument / voice). #@markdown * Extracts fundmanetal frequency (f0) and loudness features. #@markdown * Choose an audio file on Drive or upload an audio file. #@markdown * If you are using Drive, place the audio file in the "primers" folder inside the DRIVE_DIR folder. Enter the file name below. PRIMER_FILE = "" #@param {type:"string"} DRIVE_OR_UPLOAD = "Drive" #@param ["Drive", "Upload (.wav)"] # Check for .wav extension. match = re.search(r'.wav', PRIMER_FILE) if match: print ('') else: PRIMER_FILE = PRIMER_FILE + ".wav" if DRIVE_OR_UPLOAD == "Drive": PRIMER_PATH = PRIMERS_DIR + "/" + PRIMER_FILE # Convert to 48kHz. audio, unused_sample_rate = librosa.load(PRIMER_PATH, sr=48000, mono=False) if (audio.ndim == 2): audio = np.swapaxes(audio, 0, 1) else: # Load audio sample here (.wav file) # Just use the first file. audio_files = files.upload() fnames = list(audio_files.keys()) audios = [] for fname in fnames: audio, unused_sample_rate = librosa.load(fname, sr=48000, mono=False) if (audio.ndim == 2): audio = np.swapaxes(audio, 0, 1) audios.append(audio) audio = audios[0] # Mono to stereo. if (audio.ndim == 1): print('Converting mono to stereo.') audio = np.stack((audio, audio), axis=-1) # Setup the session. ddsp.spectral_ops.reset_crepe() # Compute features. audio_left = np.squeeze(audio[:, 0]).astype(np.float32) audio_right = np.squeeze(audio[:, 1]).astype(np.float32) audio_left = audio_left[np.newaxis, :] audio_right = audio_right[np.newaxis, :] start_time = time.time() audio_features_left = ddsp.training.metrics.compute_audio_features(audio_left, n_fft=n_fft, sample_rate=sample_rate) audio_features_right = ddsp.training.metrics.compute_audio_features(audio_right, n_fft=n_fft, sample_rate=sample_rate) audio_features_left['loudness_db'] = audio_features_left['loudness_db'].astype(np.float32) audio_features_right['loudness_db'] = audio_features_right['loudness_db'].astype(np.float32) audio_features_mod_left = None audio_features_mod_right = None print('Audio features took %.1f seconds' % (time.time() - start_time)) play(audio, sample_rate=sample_rate) TRIM = -15 # Plot Features. fig, ax = plt.subplots(nrows=6, ncols=1, sharex=True, figsize=(6, 16)) ax[0].plot(audio_features_left['loudness_db'][:TRIM]) ax[0].set_ylabel('loudness_db L') ax[1].plot(audio_features_right['loudness_db'][:TRIM]) ax[1].set_ylabel('loudness_db R') ax[2].plot(librosa.hz_to_midi(audio_features_left['f0_hz'][:TRIM])) ax[2].set_ylabel('f0 [midi] L') ax[3].plot(librosa.hz_to_midi(audio_features_right['f0_hz'][:TRIM])) ax[3].set_ylabel('f0 [midi] R') ax[4].plot(audio_features_left['f0_confidence'][:TRIM]) ax[4].set_ylabel('f0 confidence L') _ = ax[4].set_xlabel('Time step [frame] L') ax[5].plot(audio_features_right['f0_confidence'][:TRIM]) ax[5].set_ylabel('f0 confidence R') _ = ax[5].set_xlabel('Time step [frame] R') ``` ## Load the Model ``` def find_model_dir(dir_name): # Iterate through directories until model directory is found for root, dirs, filenames in os.walk(dir_name): for filename in filenames: if filename.endswith(".gin") and not filename.startswith("."): model_dir = root break return model_dir if DRIVE_DIR: model_dir_left = find_model_dir(SAVE_DIR_LEFT) model_dir_right = find_model_dir(SAVE_DIR_RIGHT) else: model_dir_left = find_model_dir(MODEL_DIR_LEFT) model_dir_right = find_model_dir(MODEL_DIR_RIGHT) gin_file_left = os.path.join(model_dir_left, 'operative_config-0.gin') gin_file_right = os.path.join(model_dir_right, 'operative_config-0.gin') # Load the dataset statistics. DATASET_STATS_LEFT = None DATASET_STATS_RIGHT = None dataset_stats_file_left = os.path.join(model_dir_left, 'dataset_statistics_left.pkl') dataset_stats_file_right = os.path.join(model_dir_right, 'dataset_statistics_right.pkl') print(f'Loading dataset statistics from {dataset_stats_file_left}') try: if tf.io.gfile.exists(dataset_stats_file_left): with tf.io.gfile.GFile(dataset_stats_file_left, 'rb') as f: DATASET_STATS_LEFT = pickle.load(f) except Exception as err: print('Loading dataset statistics from pickle failed: {}.'.format(err)) print(f'Loading dataset statistics from {dataset_stats_file_right}') try: if tf.io.gfile.exists(dataset_stats_file_right): with tf.io.gfile.GFile(dataset_stats_file_right, 'rb') as f: DATASET_STATS_RIGHT = pickle.load(f) except Exception as err: print('Loading dataset statistics from pickle failed: {}.'.format(err)) # Parse gin config, with gin.unlock_config(): gin.parse_config_file(gin_file_left, skip_unknown=True) # Assumes only one checkpoint in the folder, 'ckpt-[iter]`. if DRIVE_DIR: latest_checkpoint_fname_left = os.path.basename(tf.train.latest_checkpoint(SAVE_DIR_LEFT)) latest_checkpoint_fname_right = os.path.basename(tf.train.latest_checkpoint(SAVE_DIR_RIGHT)) else: latest_checkpoint_fname_left = os.path.basename(tf.train.latest_checkpoint(MODEL_DIR_LEFT)) latest_checkpoint_fname_right = os.path.basename(tf.train.latest_checkpoint(MODEL_DIR_RIGHT)) ckpt_left = os.path.join(model_dir_left, latest_checkpoint_fname_left) ckpt_right = os.path.join(model_dir_right, latest_checkpoint_fname_right) # Ensure dimensions and sampling rates are equal time_steps_train = gin.query_parameter('DefaultPreprocessor.time_steps') n_samples_train = gin.query_parameter('Additive.n_samples') hop_size = int(n_samples_train / time_steps_train) time_steps = int(audio_left.shape[1] / hop_size) n_samples = time_steps * hop_size # print("===Trained model===") # print("Time Steps", time_steps_train) # print("Samples", n_samples_train) # print("Hop Size", hop_size) # print("\n===Resynthesis===") # print("Time Steps", time_steps) # print("Samples", n_samples) # print('') gin_params = [ 'Additive.n_samples = {}'.format(n_samples), 'FilteredNoise.n_samples = {}'.format(n_samples), 'DefaultPreprocessor.time_steps = {}'.format(time_steps), 'oscillator_bank.use_angular_cumsum = True', # Avoids cumsum accumulation errors. ] with gin.unlock_config(): gin.parse_config(gin_params) # Trim all input vectors to correct lengths for key in ['f0_hz', 'f0_confidence', 'loudness_db']: audio_features_left[key] = audio_features_left[key][:time_steps] audio_features_right[key] = audio_features_right[key][:time_steps] audio_features_left['audio'] = audio_features_left['audio'][:, :n_samples] audio_features_right['audio'] = audio_features_right['audio'][:, :n_samples] # Set up the model just to predict audio given new conditioning model_left = ddsp.training.models.Autoencoder() model_right = ddsp.training.models.Autoencoder() model_left.restore(ckpt_left) model_right.restore(ckpt_right) # Build model by running a batch through it. start_time = time.time() unused_left = model_left(audio_features_left, training=False) unused_right = model_right(audio_features_right, training=False) print('Restoring model took %.1f seconds' % (time.time() - start_time)) #@title Modify conditioning #@markdown These models were not explicitly trained to perform timbre transfer, so they may sound unnatural if the incoming loudness and frequencies are very different then the training data (which will always be somewhat true). #@markdown ## Note Detection #@markdown You can leave this at 1.0 for most cases threshold = 1 #@param {type:"slider", min: 0.0, max:2.0, step:0.01} #@markdown ## Automatic ADJUST = True #@param{type:"boolean"} #@markdown Quiet parts without notes detected (dB) quiet = 30 #@param {type:"slider", min: 0, max:60, step:1} #@markdown Force pitch to nearest note (amount) autotune = 0 #@param {type:"slider", min: 0.0, max:1.0, step:0.1} #@markdown ## Manual #@markdown Shift the pitch (octaves) pitch_shift = 0 #@param {type:"slider", min:-2, max:2, step:1} #@markdown Adjsut the overall loudness (dB) loudness_shift = 0 #@param {type:"slider", min:-20, max:20, step:1} audio_features_mod_left = {k: v.copy() for k, v in audio_features_left.items()} audio_features_mod_right = {k: v.copy() for k, v in audio_features_right.items()} ## Helper functions. def shift_ld(audio_features, ld_shift=0.0): """Shift loudness by a number of ocatves.""" audio_features['loudness_db'] += ld_shift return audio_features def shift_f0(audio_features, pitch_shift=0.0): """Shift f0 by a number of ocatves.""" audio_features['f0_hz'] *= 2.0 ** (pitch_shift) audio_features['f0_hz'] = np.clip(audio_features['f0_hz'], 0.0, librosa.midi_to_hz(110.0)) return audio_features mask_on_left = None mask_on_right = None if ADJUST and DATASET_STATS_LEFT and DATASET_STATS_RIGHT is not None: # Detect sections that are "on". mask_on_left, note_on_value_left = detect_notes(audio_features_left['loudness_db'], audio_features_left['f0_confidence'], threshold) mask_on_right, note_on_value_right = detect_notes(audio_features_right['loudness_db'], audio_features_right['f0_confidence'], threshold) if np.any(mask_on_left) or np.any(mask_on_right): # Shift the pitch register. target_mean_pitch_left = DATASET_STATS_LEFT['mean_pitch'] target_mean_pitch_right = DATASET_STATS_RIGHT['mean_pitch'] pitch_left = ddsp.core.hz_to_midi(audio_features_left['f0_hz']) pitch_right = ddsp.core.hz_to_midi(audio_features_right['f0_hz']) mean_pitch_left = np.mean(pitch_left[mask_on_left]) mean_pitch_right = np.mean(pitch_right[mask_on_right]) p_diff_left = target_mean_pitch_left - mean_pitch_left p_diff_right = target_mean_pitch_right - mean_pitch_right p_diff_octave_left = p_diff_left / 12.0 p_diff_octave_right = p_diff_right / 12.0 round_fn_left = np.floor if p_diff_octave_left > 1.5 else np.ceil round_fn_right = np.floor if p_diff_octave_right > 1.5 else np.ceil p_diff_octave_left = round_fn_left(p_diff_octave_left) p_diff_octave_right = round_fn_right(p_diff_octave_right) audio_features_mod_left = shift_f0(audio_features_mod_left, p_diff_octave_left) audio_features_mod_right = shift_f0(audio_features_mod_right, p_diff_octave_right) # Quantile shift the note_on parts. _, loudness_norm_left = colab_utils.fit_quantile_transform( audio_features_left['loudness_db'], mask_on_left, inv_quantile=DATASET_STATS_LEFT['quantile_transform']) _, loudness_norm_right = colab_utils.fit_quantile_transform( audio_features_right['loudness_db'], mask_on_right, inv_quantile=DATASET_STATS_RIGHT['quantile_transform']) # Turn down the note_off parts. mask_off_left = np.logical_not(mask_on_left) mask_off_right = np.logical_not(mask_on_right) loudness_norm_left[mask_off_left] -= quiet * (1.0 - note_on_value_left[mask_off_left][:, np.newaxis]) loudness_norm_right[mask_off_right] -= quiet * (1.0 - note_on_value_right[mask_off_right][:, np.newaxis]) loudness_norm_left = np.reshape(loudness_norm_left, audio_features_left['loudness_db'].shape) loudness_norm_right = np.reshape(loudness_norm_right, audio_features_right['loudness_db'].shape) audio_features_mod_left['loudness_db'] = loudness_norm_left audio_features_mod_right['loudness_db'] = loudness_norm_right # Auto-tune. if autotune: f0_midi_left = np.array(ddsp.core.hz_to_midi(audio_features_mod_left['f0_hz'])) f0_midi_right = np.array(ddsp.core.hz_to_midi(audio_features_mod_right['f0_hz'])) tuning_factor_left = get_tuning_factor(f0_midi_left, audio_features_mod_left['f0_confidence'], mask_on_left) tuning_factor_right = get_tuning_factor(f0_midi_right, audio_features_mod_right['f0_confidence'], mask_on_right) f0_midi_at_left = auto_tune(f0_midi_left, tuning_factor_left, mask_on_left, amount=autotune) f0_midi_at_right = auto_tune(f0_midi_right, tuning_factor_right, mask_on_right, amount=autotune) audio_features_mod_left['f0_hz'] = ddsp.core.midi_to_hz(f0_midi_at_left) audio_features_mod_right['f0_hz'] = ddsp.core.midi_to_hz(f0_midi_at_right) else: print('\nSkipping auto-adjust (no notes detected or ADJUST box empty).') else: print('\nSkipping auto-adujst (box not checked or no dataset statistics found).') # Manual Shifts. audio_features_mod_left = shift_ld(audio_features_mod_left, loudness_shift) audio_features_mod_right = shift_ld(audio_features_mod_right, loudness_shift) audio_features_mod_left = shift_f0(audio_features_mod_left, pitch_shift) audio_features_mod_right = shift_f0(audio_features_mod_right, pitch_shift) # Plot Features. has_mask_left = int(mask_on_left is not None) has_mask_right = int(mask_on_right is not None) n_plots = 4 + has_mask_left + has_mask_right fig, axes = plt.subplots(nrows=n_plots, ncols=1, sharex=True, figsize=(2*n_plots, 10)) if has_mask_left: ax = axes[0] ax.plot(np.ones_like(mask_on_left[:TRIM]) * threshold, 'k:') ax.plot(note_on_value_left[:TRIM]) ax.plot(mask_on_left[:TRIM]) ax.set_ylabel('Note-on Mask L') ax.set_xlabel('Time step [frame]') ax.legend(['Threshold', 'Likelihood','Mask']) if has_mask_right: ax = axes[0 + has_mask_left] ax.plot(np.ones_like(mask_on_right[:TRIM]) * threshold, 'k:') ax.plot(note_on_value_right[:TRIM]) ax.plot(mask_on_right[:TRIM]) ax.set_ylabel('Note-on Mask R') ax.set_xlabel('Time step [frame]') ax.legend(['Threshold', 'Likelihood','Mask']) ax = axes[0 + has_mask_left + has_mask_right] ax.plot(audio_features_left['loudness_db'][:TRIM]) ax.plot(audio_features_mod_left['loudness_db'][:TRIM]) ax.set_ylabel('loudness_db L') ax.legend(['Original','Adjusted']) ax = axes[1 + has_mask_left + has_mask_right] ax.plot(audio_features_right['loudness_db'][:TRIM]) ax.plot(audio_features_mod_right['loudness_db'][:TRIM]) ax.set_ylabel('loudness_db R') ax.legend(['Original','Adjusted']) ax = axes[2 + has_mask_left + has_mask_right] ax.plot(librosa.hz_to_midi(audio_features_left['f0_hz'][:TRIM])) ax.plot(librosa.hz_to_midi(audio_features_mod_left['f0_hz'][:TRIM])) ax.set_ylabel('f0 [midi] L') _ = ax.legend(['Original','Adjusted']) ax = axes[3 + has_mask_left + has_mask_right] ax.plot(librosa.hz_to_midi(audio_features_right['f0_hz'][:TRIM])) ax.plot(librosa.hz_to_midi(audio_features_mod_right['f0_hz'][:TRIM])) ax.set_ylabel('f0 [midi] R') _ = ax.legend(['Original','Adjusted']) !pip3 install ffmpeg-normalize from scipy.io.wavfile import write as write_audio #@title #Resynthesize Audio af_left = audio_features_left if audio_features_mod_left is None else audio_features_mod_left af_right = audio_features_right if audio_features_mod_right is None else audio_features_mod_right # Run a batch of predictions. start_time = time.time() outputs_left = model_left(af_left, training=False) audio_gen_left = model_left.get_audio_from_outputs(outputs_left) outputs_right = model_right(af_right, training=False) audio_gen_right = model_right.get_audio_from_outputs(outputs_right) print('Prediction took %.1f seconds' % (time.time() - start_time)) # Merge to stereo. audio_gen_left = np.expand_dims(np.squeeze(audio_gen_left.numpy()), axis=1) audio_gen_right = np.expand_dims(np.squeeze(audio_gen_right.numpy()), axis=1) audio_gen_stereo = np.concatenate((audio_gen_left, audio_gen_right), axis=1) # Play print('Resynthesis with primer') play(audio_gen_stereo, sample_rate=sample_rate) WRITE_PATH = OUTPUT_DIR + "/resynthesis_primer.wav" write_audio("resynthesis_primer.wav", sample_rate, audio_gen_stereo) write_audio(WRITE_PATH, sample_rate, audio_gen_stereo) !ffmpeg-normalize resynthesis_primer.wav -o resynthesis_primer.wav -t -15 -ar 48000 -f colab_utils.download("resynthesis_primer.wav") ```
github_jupyter
``` import panel as pn pn.extension('ace') ``` The ``Ace`` pane allows embedding a code editor based on [Ace](https://ace.c9.io/). Only a small subset of Ace functionality is currently enabled: - **syntax highlighting** for several languages - **themes** - basic **completion** support via `ctrl+space` (using only static analysis of the code) - **annotations** - **readonly** mode #### Parameters: For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb). * **``code``** (str): A string with (initial) code to set in the editor * **``language``** (str): A string declaring which language to use for code syntax highlighting (default: 'python') * **``theme``** (str): theme of the editor (defaut: 'chrome') * **``annotations``** (list): list of annotations. An annotation is a dict with the following keys: - `'row'`: row in the editor of the annotation - `'column'`: column of the annotation - `'text'`: text displayed when hovering over the annotation - `'type'`: type of annotation and the icon displayed {`warning` | `error`} * **``readonly``** (boolean): Whether the editor should be opened in read-only mode ___ To construct an `Ace` panel we must define it explicitly using `pn.pane.Ace`. We can add some text as initial code. Code inserted in the editor is automatically reflected in the `code` parameter and can be linked to another panel." ``` py_code = """import sys""" editor = pn.pane.Ace(py_code, sizing_mode='stretch_both', height=300) html_pane = pn.pane.HTML(sizing_mode='stretch_both', height=300) editor.link(html_pane,code="object") pn.Row(editor, html_pane) ``` we can add some code in it ``` editor.code += """import Math x = Math.cos(x)**2 + Math.cos(y)**2 """ ``` We can change language and theme of the editor ``` html_code = r"""<!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html; charset=utf-8" /> <title>`substitute(Filename('', 'Page Title'), '^.', '\u&', '')`</title> </head> <body> <h1>Title1</h1> <h2>Title2</h2> <p>Paragraph</p> </body> </html> """ editor.language = "html" editor.theme = "monokai" editor.code = html_code ``` We can add some annotations to the editor ``` editor.annotations= [dict(row=1, column=0, text='an error', type='error'), dict(row=2, column=0, text='a warning', type='warning')] ``` If we want just to display editor content but not edit it we can set the `readonly` property to `True` ``` #editor.readonly = True ```
github_jupyter
## Imports ``` import pandas as pd from glob import glob import os from shutil import copyfile from torch.utils.data import Dataset from PIL import Image import numpy as np from numpy.random import permutation import matplotlib.pyplot as plt from torchvision import transforms from torchvision.datasets import ImageFolder from torchvision.models import resnet18,resnet34,densenet121 from torchvision.models.inception import inception_v3 from torch.utils.data import DataLoader from torch.autograd import Variable import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import pickle %matplotlib inline is_cuda = torch.cuda.is_available() is_cuda ``` ## Utility functions ``` def imshow(inp,cmap=None): """Imshow for Tensor.""" inp = inp.numpy().transpose((1, 2, 0)) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) inp = std * inp + mean inp = np.clip(inp, 0, 1) plt.imshow(inp,cmap) class FeaturesDataset(Dataset): def __init__(self,featlst1,featlst2,featlst3,labellst): self.featlst1 = featlst1 self.featlst2 = featlst2 self.featlst3 = featlst3 self.labellst = labellst def __getitem__(self,index): return (self.featlst1[index],self.featlst2[index],self.featlst3[index],self.labellst[index]) def __len__(self): return len(self.labellst) def fit(epoch,model,data_loader,phase='training',volatile=False): if phase == 'training': model.train() if phase == 'validation': model.eval() volatile=True running_loss = 0.0 running_correct = 0 for batch_idx , (data1,data2,data3,target) in enumerate(data_loader): if is_cuda: data1,data2,data3,target = data1.cuda(),data2.cuda(),data3.cuda(),target.cuda() data1,data2,data3,target = Variable(data1,volatile),Variable(data2,volatile),Variable(data3,volatile),Variable(target) if phase == 'training': optimizer.zero_grad() output = model(data1,data2,data3) loss = F.cross_entropy(output,target) running_loss += F.cross_entropy(output,target,size_average=False).data[0] preds = output.data.max(dim=1,keepdim=True)[1] running_correct += preds.eq(target.data.view_as(preds)).cpu().sum() if phase == 'training': loss.backward() optimizer.step() loss = running_loss/len(data_loader.dataset) accuracy = 100. * running_correct/len(data_loader.dataset) print(f'{phase} loss is {loss:{5}.{2}} and {phase} accuracy is {running_correct}/{len(data_loader.dataset)}{accuracy:{10}.{4}}') return loss,accuracy class LayerActivations(): features=[] def __init__(self,model): self.features = [] self.hook = model.register_forward_hook(self.hook_fn) def hook_fn(self,module,input,output): #out = F.avg_pool2d(output, kernel_size=8) self.features.extend(output.view(output.size(0),-1).cpu().data) def remove(self): self.hook.remove() ``` ## Creating PyTorch datasets ``` data_transform = transforms.Compose([ transforms.Resize((299,299)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) # For Dogs & Cats dataset train_dset = ImageFolder('../../chapter5/dogsandcats/train/',transform=data_transform) val_dset = ImageFolder('../../chapter5/dogsandcats/valid/',transform=data_transform) classes=2 imshow(train_dset[150][0]) ``` ## Creating data loader for training and validation datasets ``` train_loader = DataLoader(train_dset,batch_size=32,shuffle=False,num_workers=3) val_loader = DataLoader(val_dset,batch_size=32,shuffle=False,num_workers=3) ``` ## Creating models ``` #Create ResNet model my_resnet = resnet34(pretrained=True) if is_cuda: my_resnet = my_resnet.cuda() my_resnet = nn.Sequential(*list(my_resnet.children())[:-1]) for p in my_resnet.parameters(): p.requires_grad = False #Create inception model my_inception = inception_v3(pretrained=True) my_inception.aux_logits = False if is_cuda: my_inception = my_inception.cuda() for p in my_inception.parameters(): p.requires_grad = False #Create densenet model my_densenet = densenet121(pretrained=True).features if is_cuda: my_densenet = my_densenet.cuda() for p in my_densenet.parameters(): p.requires_grad = False ``` ## Extract convolution features from ResNet , Inception and DenseNet ``` ### For ResNet trn_labels = [] trn_resnet_features = [] for d,la in train_loader: o = my_resnet(Variable(d.cuda())) o = o.view(o.size(0),-1) trn_labels.extend(la) trn_resnet_features.extend(o.cpu().data) val_labels = [] val_resnet_features = [] for d,la in val_loader: o = my_resnet(Variable(d.cuda())) o = o.view(o.size(0),-1) val_labels.extend(la) val_resnet_features.extend(o.cpu().data) ### For Inception trn_inception_features = LayerActivations(my_inception.Mixed_7c) for da,la in train_loader: _ = my_inception(Variable(da.cuda())) trn_inception_features.remove() val_inception_features = LayerActivations(my_inception.Mixed_7c) for da,la in val_loader: _ = my_inception(Variable(da.cuda())) val_inception_features.remove() ### For Densenet trn_densenet_features = [] for d,la in train_loader: o = my_densenet(Variable(d.cuda())) o = o.view(o.size(0),-1) trn_densenet_features.extend(o.cpu().data) val_densenet_features = [] for d,la in val_loader: o = my_densenet(Variable(d.cuda())) o = o.view(o.size(0),-1) val_densenet_features.extend(o.cpu().data) ``` ## Create Dataset for train and val ``` trn_feat_dset = FeaturesDataset(trn_resnet_features,trn_inception_features.features,trn_densenet_features,trn_labels) val_feat_dset = FeaturesDataset(val_resnet_features,val_inception_features.features,val_densenet_features,val_labels) ``` ## Create Dataloader for train and val ``` trn_feat_loader = DataLoader(trn_feat_dset,batch_size=64,shuffle=True) val_feat_loader = DataLoader(val_feat_dset,batch_size=64) ``` ## Create an Ensemble model ``` class EnsembleModel(nn.Module): def __init__(self,out_size,training=True): super().__init__() self.fc1 = nn.Linear(8192,512) self.fc2 = nn.Linear(131072,512) self.fc3 = nn.Linear(82944,512) self.fc4 = nn.Linear(512,out_size) def forward(self,inp1,inp2,inp3): out1 = self.fc1(F.dropout(inp1,training=self.training)) out2 = self.fc2(F.dropout(inp2,training=self.training)) out3 = self.fc3(F.dropout(inp3,training=self.training)) out = out1 + out2 + out3 out = self.fc4(F.dropout(out,training=self.training)) return out em = EnsembleModel(2) if is_cuda: em = em.cuda() optimizer = optim.Adam(em.parameters(),lr=0.01) ``` ## Train the ensemble model ``` train_losses , train_accuracy = [],[] val_losses , val_accuracy = [],[] for epoch in range(1,10): epoch_loss, epoch_accuracy = fit(epoch,em,trn_feat_loader,phase='training') val_epoch_loss , val_epoch_accuracy = fit(epoch,em,val_feat_loader,phase='validation') train_losses.append(epoch_loss) train_accuracy.append(epoch_accuracy) val_losses.append(val_epoch_loss) val_accuracy.append(val_epoch_accuracy) ```
github_jupyter
# Button network This notebook presents an agent-based model of randomly connecting buttons. It demonstrates how to use the [agentpy](https://agentpy.readthedocs.io) package to work with networks and visualize averaged time-series for discrete parameter samples. ``` # Model design import agentpy as ap import networkx as nx import random # Visualization import seaborn as sns ``` ## About the model This model is based on the [Agentbase Button model](http://agentbase.org/model.html?f4c4388138450bdf9732) by Wybo Wiersma and the following analogy from [Stuart Kauffman](http://www.pbs.org/lifebeyondearth/resources/intkauffmanpop.html): > "Suppose you take 10,000 buttons and spread them out on a hardwood floor. You have a large spool of red thread. Now, what you do is you pick up a random pair of buttons and you tie them together with a piece of red thread. Put them down and pick up another random pair of buttons and tie them together with a red thread, and you just keep doing this. Every now and then lift up a button and see how many buttons you've lifted with your first button. A connective cluster of buttons is called a cluster or a component. When you have 10,000 buttons and only a few threads that tie them together, most of the times you'd pick up a button you'll pick up a single button. > >As the ratio of threads to buttons increases, you're going to start to get larger clusters, three or four buttons tied together; then larger and larger clusters. At some point, you will have a number of intermediate clusters, and when you add a few more threads, you'll have linked up the intermediate-sized clusters into one giant cluster. > >So that if you plot on an axis, the ratio of threads to buttons: 10,000 buttons and no threads; 10,000 buttons and 5,000 threads; and so on, you'll get a curve that is flat, and then all of a sudden it shoots up when you get this giant cluster. This steep curve is in fact evidence of a phase transition. > >If there were an infinite number of threads and an infinite number of buttons and one just tuned the ratios, this would be a step function; it would come up in a sudden jump. So it's a phase transition like ice freezing. > >Now, the image you should take away from this is if you connect enough buttons all of a sudden they all go connected. To think about the origin of life, we have to think about the same thing." ## Model definition ``` # Define the model class ButtonModel(ap.Model): def setup(self): # Create a graph with n agents self.buttons = self.add_network() self.buttons.add_agents(self.p.n) self.threads = 0 def update(self): # Record size of the biggest cluster clusters = nx.connected_components(self.buttons.graph) max_cluster_size = max([len(g) for g in clusters]) / self.p.n self.record('max_cluster_size', max_cluster_size) # Record threads to button ratio self.record('threads_to_button', self.threads / self.p.n) def step(self): # Create random edges based on parameters for _ in range(int(self.p.n * self.p.speed)): self.buttons.graph.add_edge(*self.agents.random(2, replace=False)) self.threads += 1 ``` ## Multi-run experiment ``` # Define parameter ranges parameter_ranges = { 'steps': 30, # Number of simulation steps 'speed': 0.05, # Speed of connections per step 'n': (100, 1000, 10000) # Number of agents } # Create sample for different values of n sample = ap.sample_discrete(parameter_ranges) # Keep dynamic variables exp = ap.Experiment(ButtonModel, sample, iterations=25, record=True) # Perform 75 seperate simulations (3 parameter combinations * 25 repetitions) results = exp.run() # Plot averaged time-series for discrete parameter samples sns.set() data = results.arrange_variables() ax = sns.lineplot(data=data, x='threads_to_button', y='max_cluster_size', hue='n') ```
github_jupyter
``` import time import itertools import math import datetime import pynmea2 import serial import pandas as pd import matplotlib.pyplot as plt file_path = '../../DATA/EXP1/IOT/logfile.txt' def read2df(filename): iot_data = {'time':[], 'latitude':[], 'latitude direction':[], 'longitude':[], 'longitude direction':[], 'quality':[], 'in use':[],'PRN in use':[], 'antenna alt':[], #GGA # GLL 'PDOP':[], 'HDOP':[], 'VDOP':[], #GSA #RMC 'speed kmh':[], #VTG 'date':[] #ZDA } sat_data = { 'time':[], #GGA 'PRN':[], #GSA 'elevation':[], 'azimuth':[], 'SNR':[] # GSV } iot_df = pd.DataFrame(iot_data) sat_df = pd.DataFrame(sat_data) iot_df['latitude direction'] = iot_df['latitude direction'].astype('str') iot_df['longitude direction'] = iot_df['longitude direction'].astype('str') iot_df['date'] = iot_df['date'].astype('str') iot_df['PRN in use'] = iot_df['PRN in use'].astype(object) f = open(filename) reader = pynmea2.NMEAStreamReader(f) #time - update on the go i=0 while 1: try: for msg in reader.next(): msg_type = msg.sentence_type if msg_type == 'GGA': time = msg.timestamp if time not in iot_df['time'].values: iot_df = iot_df.append({'time':time}, ignore_index=True) iot_time_idx = iot_df[iot_df['time'] == time].index.values.astype(int)[0] iot_df.at[iot_time_idx, 'latitude']=msg.lat iot_df.at[iot_time_idx, 'latitude direction']=msg.lat_dir iot_df.at[iot_time_idx, 'longitude']=msg.lon iot_df.at[iot_time_idx, 'longitude direction']=msg.lon_dir iot_df.at[iot_time_idx, 'quality']=msg.gps_qual iot_df.at[iot_time_idx, 'in use']=msg.num_sats iot_df.at[iot_time_idx, 'antenna alt']=msg.altitude elif msg_type == 'GLL': pass #print('gll') elif msg_type == 'GSA': iot_df.at[iot_time_idx, 'PDOP']=msg.pdop iot_df.at[iot_time_idx, 'HDOP']=msg.hdop iot_df.at[iot_time_idx, 'VDOP']=msg.vdop sat_list = [] if(msg.sv_id01): sat_list.append(int(msg.sv_id01)) if(msg.sv_id02): sat_list.append(int(msg.sv_id02)) if(msg.sv_id03): sat_list.append(int(msg.sv_id03)) if(msg.sv_id04): sat_list.append(int(msg.sv_id04)) if(msg.sv_id05): sat_list.append(int(msg.sv_id05)) if(msg.sv_id06): sat_list.append(int(msg.sv_id06)) if(msg.sv_id07): sat_list.append(int(msg.sv_id07)) if(msg.sv_id08): sat_list.append(int(msg.sv_id08)) if(msg.sv_id09): sat_list.append(int(msg.sv_id09)) if(msg.sv_id10): sat_list.append(int(msg.sv_id10)) if(msg.sv_id11): sat_list.append(int(msg.sv_id11)) if(msg.sv_id12): sat_list.append(int(msg.sv_id12)) iot_df.at[iot_time_idx, 'PRN in use'] = sat_list elif msg_type == 'GSV': if msg.sv_prn_num_1: sat_df = sat_df.append({'time':time, 'PRN': msg.sv_prn_num_1, 'elevation':msg.elevation_deg_1, 'azimuth':msg.azimuth_1, 'SNR':msg.snr_1}, ignore_index=True) if msg.sv_prn_num_2: sat_df = sat_df.append({'time':time, 'PRN': msg.sv_prn_num_2, 'elevation':msg.elevation_deg_2, 'azimuth':msg.azimuth_2, 'SNR':msg.snr_2}, ignore_index=True) if msg.sv_prn_num_3: sat_df = sat_df.append({'time':time, 'PRN': msg.sv_prn_num_3, 'elevation':msg.elevation_deg_3, 'azimuth':msg.azimuth_3, 'SNR':msg.snr_3}, ignore_index=True) if msg.sv_prn_num_4: sat_df = sat_df.append({'time':time, 'PRN': msg.sv_prn_num_4, 'elevation':msg.elevation_deg_4, 'azimuth':msg.azimuth_4, 'SNR':msg.snr_4}, ignore_index=True) pass #print('gsv') elif msg_type == 'RMC': pass #print('rmc') elif msg_type == 'VTG': iot_df.at[iot_time_idx, 'speed kmh']=msg.spd_over_grnd_kmph #print('vtg') elif msg_type == 'ZDA': time = msg.timestamp if time not in iot_df['time'].values: iot_df = iot_df.append({'time':time}, ignore_index=True) iot_time_idx = iot_df[iot_df['time'] == time].index.values.astype(int)[0] iot_df.at[iot_time_idx, 'date']=str(msg.day)+'/'+str(msg.month)+'/'+str(msg.year) #print('zda') elif msg_type == 'TXT': pass #print('txt') else: pass #print(msg_type) i += 1 except: break f.close() iot_df = iot_df.set_index('time') sat_df = sat_df.set_index('time','PRN') return iot_df, sat_df iot_df, sat_df = read2df(file_path) sat_df.reset_index(inplace=True) sat_df.head() sat_df.set_index(['time', 'PRN'], inplace = True) sat_df.sort_index(inplace = True) sat_df.loc[sat_df.index.get_level_values('PRN') == '32'].tail() sats_view = [] for item in sat_df.index.get_level_values('PRN'): if item not in sats_view: sats_view.append(item) sats_view.sort() print(sats_view) num_plot = math.ceil(math.sqrt(len(sats_view))) x = list(i for i in range(num_plot) for _ in range(num_plot)) y = list(range(num_plot))*num_plot list(zip(x, y, sats_view)) fig, axs = plt.subplots(num_plot, num_plot, sharex = 'col', sharey = 'row', figsize=(15, 15)) for (i, j, sats) in list(zip(x, y, sats_view)): axs[i, j].plot(sat_df.loc[sat_df.index.get_level_values('PRN') == sats].index.get_level_values('time'), sat_df.loc[sat_df.index.get_level_values('PRN') == sats]['SNR']) axs[i, j].legend([''.join(list(sats))]) #axs[i, j].set_ylim(0, 30) plt.show() ```
github_jupyter
<a href="https://colab.research.google.com/github/zenml-io/zenml/blob/main/examples/quickstart/quickstart.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # ZenML Quickstart Guide Our goal here is to help you to get the first practical experience with our tool and give you a brief overview on some basic functionalities of ZenML. We'll create a training pipeline for the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset and then later the [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset developed by Zalando. If you want to run this notebook in an interactive environment, feel free to run it in a [Google Colab](https://colab.research.google.com/github/zenml-io/zenml/blob/main/examples/quickstart/quickstart.ipynb) or view it on [GitHub](https://github.com/zenml-io/zenml/tree/main/examples/quickstart) directly. ## Purpose This quickstart guide is designed to provide a practical introduction to some of the main concepts and paradigms used by the ZenML framework. If you want more detail, our [full documentation](https://docs.zenml.io/) provides more on the concepts and how to implement them. ## Using Google Colab You will want to use a GPU for this example. If you are following this quickstart in Google's Colab, follow these steps: - Before running anything, you need to tell Colab that you want to use a GPU. You can do this by clicking on the ‘Runtime’ tab and selecting ‘Change runtime type’. A pop-up window will open up with a drop-down menu. - Select ‘GPU’ from the menu and click ‘Save’. - It may ask if you want to restart the runtime. If so, go ahead and do that. <!-- The code for the MNIST training borrows heavily from [this](https://www.tensorflow.org/datasets/keras_example) --> ## Relation to quickstart.py This notebook is a variant of [quickstart.py](https://github.com/zenml-io/zenml/blob/main/examples/quickstart/quickstart.py) which is shown off in the [ZenML Docs](https://docs.zenml.io). The core difference being it adds a modular aspect of the importer step and shows how to fetch pipelines, runs, and artifacts in the post-execution workflow. ## Install libraries ``` # Install the ZenML CLI tool and Tensorflow !pip install zenml !zenml integration install tensorflow ``` Once the installation is completed, you can go ahead and create your first ZenML repository for your project. As ZenML repositories are built on top of Git repositories, you can create yours in a desired empty directory through: ``` # Initialize a git repository !git init # Initialize ZenML's .zen file !zenml init ``` Now, the setup is completed. For the next steps, just make sure that you are executing the code within your ZenML repository. ## Import relevant packages We will use pipelines and steps to train our model. ``` import numpy as np import tensorflow as tf from zenml.pipelines import pipeline from zenml.steps import step from zenml.steps.base_step_config import BaseStepConfig from zenml.steps.step_output import Output ``` ## Define ZenML Steps In the code that follows, you can see that we are defining the various steps of our pipeline. Each step is decorated with `@step`, the main abstraction that is currently available for creating pipeline steps. The first step is an `import` step that downloads the MNIST dataset and returns four numpy arrays as its output. ``` @step def importer() -> Output( X_train=np.ndarray, y_train=np.ndarray, X_test=np.ndarray, y_test=np.ndarray ): """Download the MNIST data store it as numpy arrays.""" (X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data() return X_train, y_train, X_test, y_test ``` We then add a `Trainer` step, that takes the normalized data and trains a Keras classifier on the data. Note that the model is not explicitly saved within the step. Under the hood ZenML uses Materializers to automatically persist the Artifacts that result from each step into the Artifact Store. ``` class TrainerConfig(BaseStepConfig): """Trainer params""" lr: float = 0.001 epochs: int = 1 @step def trainer( X_train: np.ndarray, y_train: np.ndarray, config: TrainerConfig, ) -> tf.keras.Model: """A simple Keras Model to train on the data.""" model = tf.keras.Sequential() model.add(tf.keras.layers.Flatten(input_shape=(28, 28))) model.add(tf.keras.layers.Dense(10)) model.compile( optimizer=tf.keras.optimizers.Adam(config.lr), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"], ) model.fit(X_train, y_train, epochs=config.epochs) return model ``` Finally, we add an `Evaluator` step that takes as input the test set and the trained model and evaluates some final metrics. ``` @step(enable_cache=False) def evaluator( X_test: np.ndarray, y_test: np.ndarray, model: tf.keras.Model, ) -> float: """Calculate the accuracy on the test set""" test_acc = model.evaluate(X_test, y_test, verbose=2) return test_acc ``` ## Define ZenML Pipeline A pipeline is defined with the `@pipeline` decorator. This defines the various steps of the pipeline and specifies the dependencies between the steps, thereby determining the order in which they will be run. ``` @pipeline def mnist_pipeline( importer, trainer, evaluator, ): """Links all the steps together in a pipeline""" X_train, y_train, X_test, y_test = importer() model = trainer(X_train=X_train, y_train=y_train) evaluator(X_test=X_test, y_test=y_test, model=model) ``` ## Run the pipeline Running the pipeline is as simple as calling the `run()` method on an instance of the defined pipeline. Here we explicitly name our pipeline run to make it easier to access later on. Be aware that you can only run the pipeline once with this name. To rerun, rename the the run, or remove the run name. ``` RUN_NAME_1 = "standard_mnist_training_run" # Initialize the pipeline first_pipeline = mnist_pipeline( importer=importer(), trainer=trainer(config=TrainerConfig(epochs=1)), evaluator=evaluator(), ) first_pipeline.run(run_name=RUN_NAME_1) # Make sure to change the name if you want to rerun ``` ## From MNIST to Fashion MNIST We got pretty good results on the MNIST model that we trained, but maybe we want to see how a similar training pipeline would work on a different dataset. You can see how easy it is to switch out one data import step for another in our pipeline. ``` # Define a new modified import data step to download the Fashion MNIST model @step(enable_cache=False) def importer_fashion_mnist() -> Output( X_train=np.ndarray, y_train=np.ndarray, X_test=np.ndarray, y_test=np.ndarray ): """Download the MNIST data store it as an artifact""" (X_train, y_train), ( X_test, y_test, ) = tf.keras.datasets.fashion_mnist.load_data() return X_train, y_train, X_test, y_test RUN_NAME_2 = "fashion_mnist_training_run" # Initialize a new pipeline second_pipeline = mnist_pipeline( importer=importer_fashion_mnist(), trainer=trainer(config=TrainerConfig(epochs=1)), evaluator=evaluator(), ) # Run the new pipeline second_pipeline.run(run_name=RUN_NAME_2) # Make sure to change the name if you want to rerun ``` # Post execution workflow We did mention above that the Materializer takes care of persisting your artifacts for you. But how do you access your runs and their associated artifacts from code? Let's do that step by step. ## Get repo First off, we load your repository: this is where all your pipelines live. ``` from zenml.core.repo import Repository repo = Repository() ``` ## Pipelines This is how you get all of the pipelines within your repository. Above we reused the same pipeline two times with different importers. We should expect to only see one pipeline named `mnist_pipeline` here. ``` pipelines = repo.get_pipelines() print(pipelines) ``` ## Retrieve the pipeline We could now just take the pipeline from above by index using `pipelines[0]`. Alternatively we can get our pipelines by name from our repo. The name of the pipeline defaults to the function name, if not specified. ``` mnist_pipeline = repo.get_pipeline(pipeline_name="mnist_pipeline") ``` ## Get the runs All runs are saved chronologically within the corresponding pipeline. Here you ``` runs = mnist_pipeline.runs # chronologically ordered print(runs) # Let's first extract out the first run on the standard mnist dataset mnist_run = mnist_pipeline.get_run("standard_mnist_training_run") # Now we can extract our second run trained on fashion mnist fashion_mnist_run = mnist_pipeline.get_run("fashion_mnist_training_run") ``` ## Get the steps ``` mnist_run.steps fashion_mnist_run.steps ``` ## Check the results of the evaluator and compare ``` mnist_eval_step = mnist_run.get_step(name='evaluator') fashion_mnist_eval_step = fashion_mnist_run.get_step(name='evaluator') # One output is simply called `output`, multiple is a dict called `outputs`. mnist_eval_step.output.read() fashion_mnist_eval_step.output.read() ``` # Congratulations! … and that's it for the quickstart. If you came here without a hiccup, you must have successly installed ZenML, set up a ZenML repo, configured a training pipeline, executed it and evaluated the results. And, this is just the tip of the iceberg on the capabilities of ZenML. However, if you had a hiccup or you have some suggestions/questions regarding our framework, you can always check our [docs](https://docs.zenml.io/) or our [Github](https://github.com/zenml-io/zenml) or even better join us on our [Slack channel](https://zenml.io/slack-invite). Cheers! For more detailed information on all the components and steps that went into this short example, please continue reading [our more detailed documentation pages](https://docs.zenml.io/).
github_jupyter
# Dynamic Tropopause Calculation By: Kevin Goebbert This example uses MetPy calculation ability to determine the potential temperature on the dynamic tropopause (2 PVU surface), add the derived variables to the xarray dataset and plot using the MetPy declarative syntax. ``` from datetime import datetime, timedelta import metpy.calc as mpcalc from metpy.interpolate import interpolate_to_isosurface from metpy.plots.declarative import * from metpy.units import units import xarray as xr ``` ## Get GFS Data Obtain and subset GFS data to cover the CONUS region ``` date = datetime.utcnow() - timedelta(days=1) ds = xr.open_dataset('https://thredds.ucar.edu/thredds/dodsC/grib/NCEP/GFS/' f'Global_onedeg_ana/GFS_Global_onedeg_ana_{date:%Y%m%d}_1200.grib2').metpy.parse_cf() ds = ds.sel(lat=slice(80, -10), lon=slice(360-140, 360-40)) vtime = ds.time.values[0].astype('datetime64[ms]').astype('O') ``` ## Compute Potential Temperature at 2 PVU The following cell takes the necessary data from the GFS analysis, smooths and calculates needed variables to obtain the potential temperature on the 2 PVU surface (e.g., the dynamic tropopause), as well as interpolate the wind components to that level as well. ``` pressure = ds.isobaric.values * units.Pa lats = ds.lat.values lons = ds.lon.values dx, dy = mpcalc.lat_lon_grid_deltas(lons, lats) uwind = mpcalc.smooth_n_point(ds['u-component_of_wind_isobaric'].squeeze(), 9, 2) vwind = mpcalc.smooth_n_point(ds['v-component_of_wind_isobaric'].squeeze(), 9, 2) LL_avor = mpcalc.smooth_n_point(ds.Absolute_vorticity_isobaric.metpy.sel(vertical=slice(850 * units.hPa, 925 * units.hPa)).squeeze(), 9, 2) avg_LL_avor = LL_avor.mean(axis=0) potemp = mpcalc.smooth_n_point(mpcalc.potential_temperature(pressure[:, None, None], ds.Temperature_isobaric.squeeze()), 9, 2) avg_LL_rvor = avg_LL_avor - mpcalc.coriolis_parameter(lats[:, None] * units.degree) pvor = mpcalc.potential_vorticity_baroclinic(potemp, pressure[:, None, None], uwind, vwind, dx[None, :, :], dy[None, :, :], lats[None, :, None] * units.degrees) DT_potemp = interpolate_to_isosurface(pvor.m*1e6, potemp.m, 2, bottom_up_search=False) DT_uwnd = (interpolate_to_isosurface(pvor.m*1e6, uwind.m, 2, bottom_up_search=False) * units('m/s')).to(units.knots) DT_vwnd = (interpolate_to_isosurface(pvor.m*1e6, vwind.m, 2, bottom_up_search=False) * units('m/s')).to(units.knots) ``` ## Add Variables to Dataset This next cell adds the variables calculated/derived above to the xarray dataset, which will make them available for plotting with the MetPy declarative syntax. ``` ds = ds.assign(dynamic_trop=(tuple(('lat', 'lon')), DT_potemp, {'grid_mapping': ds['u-component_of_wind_isobaric'].grid_mapping, 'units': 'PVU'})) ds = ds.assign(uwnd_DT=(tuple(('lat', 'lon')), DT_uwnd.m, {'grid_mapping': ds['u-component_of_wind_isobaric'].grid_mapping, 'units': 'knots'})) ds = ds.assign(vwnd_DT=(tuple(('lat', 'lon')), DT_vwnd.m, {'grid_mapping': ds['u-component_of_wind_isobaric'].grid_mapping, 'units': 'knots'})) ds = ds.assign(avg_LL_rel_vort=(tuple(('lat', 'lon')), avg_LL_rvor*1e4, {'grid_mapping': ds['u-component_of_wind_isobaric'].grid_mapping, 'units': '1/s'})) ``` ## Create the Plot ``` cntr = ContourPlot() cntr.data = ds cntr.level = None cntr.field = 'avg_LL_rel_vort' cntr.clabels = True cntr.contours = [0.5, 1.5, 2.5, 3.5, 4.5] cntr2 = FilledContourPlot() cntr2.data = ds cntr2.level = None cntr2.field = 'dynamic_trop' cntr2.contours = list(range(250, 420, 1)) cntr2.colormap = 'coolwarm' cntr2.colorbar = 'horizontal' barbs = BarbPlot() barbs.data = ds barbs.field = ['uwnd_DT', 'vwnd_DT'] barbs.skip = (3, 3) panel = MapPanel() panel.projection = 'lcc' panel.area = 'us' panel.layers = ['states', 'borders', 'coastline'] panel.title = ('Dynamic Tropopause Potential Temperature (K), Wind Barbs (kts), and LL Rel. Vort. (s$^{-1}$) at ' f'{vtime}') panel.plots = [cntr2, cntr, barbs] pc = PanelContainer() pc.size = (18, 14) pc.panels = [panel] pc.show() ```
github_jupyter
# Introduction In this tutorial, you'll explore several techniques for **proximity analysis**. In particular, you'll learn how to do such things as: - measure the distance between points on a map, and - select all points within some radius of a feature. ``` #$HIDE_INPUT$ import folium from folium import Marker, GeoJson from folium.plugins import HeatMap import pandas as pd import geopandas as gpd # Function for displaying the map def embed_map(m, file_name): from IPython.display import IFrame m.save(file_name) return IFrame(file_name, width='100%', height='500px') ``` You'll work with a dataset from the US Environmental Protection Agency (EPA) that tracks releases of toxic chemicals in Philadelphia, Pennsylvania, USA. ``` releases = gpd.read_file("../input/geospatial-learn-course-data/toxic_release_pennsylvania/toxic_release_pennsylvania/toxic_release_pennsylvania.shp") releases.head() ``` You'll also work with a dataset that contains readings from air quality monitoring stations in the same city. ``` stations = gpd.read_file("../input/geospatial-learn-course-data/PhillyHealth_Air_Monitoring_Stations/PhillyHealth_Air_Monitoring_Stations/PhillyHealth_Air_Monitoring_Stations.shp") stations.head() ``` # Measuring distance To measure distances between points from two different GeoDataFrames, we first have to make sure that they use the same coordinate reference system (CRS). Thankfully, this is the case here, where both use EPSG 2272. ``` print(stations.crs) print(releases.crs) ``` We also check the CRS to see which units it uses (meters, feet, or something else). In this case, EPSG 2272 has units of feet. (_If you like, you can check this [here](https://epsg.io/2272)._) It's relatively straightforward to compute distances in GeoPandas. The code cell below calculates the distance (in feet) between a relatively recent release incident in `recent_release` and every station in the `stations` GeoDataFrame. ``` # Select one release incident in particular recent_release = releases.iloc[360] # Measure distance from release to each station distances = stations.geometry.distance(recent_release.geometry) distances ``` Using the calculated distances, we can obtain statistics like the mean distance to each station. ``` print('Mean distance to monitoring stations: {} feet'.format(distances.mean())) ``` Or, we can get the closest monitoring station. ``` print('Closest monitoring station ({} feet):'.format(distances.min())) print(stations.iloc[distances.idxmin()][["ADDRESS", "LATITUDE", "LONGITUDE"]]) ``` # Creating a buffer If we want to understand all points on a map that are some radius away from a point, the simplest way is to create a buffer. The code cell below creates a GeoSeries `two_mile_buffer` containing 12 different Polygon objects. Each polygon is a buffer of 2 miles (or, 2\*5280 feet) around a different air monitoring station. ``` two_mile_buffer = stations.geometry.buffer(2*5280) two_mile_buffer.head() ``` We use `folium.GeoJson()` to plot each polygon on a map. Note that since folium requires coordinates in latitude and longitude, we have to convert the CRS to EPSG 4326 before plotting. ``` # Create map with release incidents and monitoring stations m = folium.Map(location=[39.9526,-75.1652], zoom_start=11) HeatMap(data=releases[['LATITUDE', 'LONGITUDE']], radius=15).add_to(m) for idx, row in stations.iterrows(): Marker([row['LATITUDE'], row['LONGITUDE']]).add_to(m) # Plot each polygon on the map GeoJson(two_mile_buffer.to_crs(epsg=4326)).add_to(m) # Show the map embed_map(m, 'm1.html') ``` Now, to test if a toxic release occurred within 2 miles of **any** monitoring station, we could run 12 different tests for each polygon (to check individually if it contains the point). But a more efficient way is to first collapse all of the polygons into a **MultiPolygon** object. We do this with the `unary_union` attribute. ``` # Turn group of polygons into single multipolygon my_union = two_mile_buffer.geometry.unary_union print('Type:', type(my_union)) # Show the MultiPolygon object my_union ``` We use the `contains()` method to check if the multipolygon contains a point. We'll use the release incident from earlier in the tutorial, which we know is roughly 3781 feet to the closest monitoring station. ``` # The closest station is less than two miles away my_union.contains(releases.iloc[360].geometry) ``` But not all releases occured within two miles of an air monitoring station! ``` # The closest station is more than two miles away my_union.contains(releases.iloc[358].geometry) ``` # Your turn In the **[final exercise](#$NEXT_NOTEBOOK_URL$)**, you'll investigate hospital coverage in New York City.
github_jupyter
``` # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # E2E ML on GCP: MLOps stage 2 : experimentation: get started with Feature Store <table align="left"> <td> <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/ml_ops/stage2/get_started_vertex_feature_store.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> <td> <a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/ml_ops/stage2/get_started_vertex_feature_store.ipynb"> Open in Google Cloud Notebooks </a> </td> </table> <br/><br/><br/> ## Overview This tutorial demonstrates how to use Vertex AI for E2E MLOps on Google Cloud in production. This tutorial covers stage 2 : experimentation: get started with Feature Store. ### Dataset The dataset used for this tutorial is the Movie Recommendations. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket, in Avro format. The dataset predicts whether a persons will watch a movie. ### Objective In this tutorial, you learn how to use `Vertex AI Feature Store` for when training and prediction with `Vertex AI`. This tutorial uses the following Google Cloud ML services: - `Vertex AI Feature Store` The steps performed include: - Creating a Vertex AI `Featurestore` resource. - Creating `EntityType` resources for the `Featurestore` resource. - Creating `Feature` resources for each `EntityType` resource. - Import feature values (entity data items) into `Featurestore` resource. - Perform online serving from a `Featurestore` resource. - Perform batch serving from a `Featurestore` resource. ## Installations Install *one time* the packages for executing the MLOps notebooks. ``` ONCE_ONLY = False if ONCE_ONLY: ! pip3 install -U tensorflow==2.5 $USER_FLAG ! pip3 install -U tensorflow-data-validation==1.2 $USER_FLAG ! pip3 install -U tensorflow-transform==1.2 $USER_FLAG ! pip3 install -U tensorflow-io==0.18 $USER_FLAG ! pip3 install --upgrade google-cloud-aiplatform[tensorboard] $USER_FLAG ! pip3 install --upgrade google-cloud-pipeline-components $USER_FLAG ! pip3 install --upgrade google-cloud-bigquery $USER_FLAG ! pip3 install --upgrade google-cloud-logging $USER_FLAG ! pip3 install --upgrade apache-beam[gcp] $USER_FLAG ! pip3 install --upgrade pyarrow $USER_FLAG ! pip3 install --upgrade cloudml-hypertune $USER_FLAG ! pip3 install --upgrade kfp $USER_FLAG ``` ### Restart the kernel Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages. ``` import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ``` #### Set your project ID **If you don't know your project ID**, you may be able to get your project ID using `gcloud`. ``` PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID ``` #### Region You can also change the `REGION` variable, which is used for operations throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you. - Americas: `us-central1` - Europe: `europe-west4` - Asia Pacific: `asia-east1` You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services. Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations). ``` REGION = "us-central1" # @param {type: "string"} ``` #### Timestamp If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial. ``` from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") ``` ### Set up variables Next, set up some variables used throughout the tutorial. ### Import libraries and define constants #### Import Vertex SDK Import the Vertex SDK into your Python environment. ``` import time from google.cloud.aiplatform import gapic as aip ``` #### Import BigQuery Import the BigQuery package into your Python environment. ``` from google.cloud import bigquery ``` ### Create BigQuery client Create the BigQuery client. ``` bqclient = bigquery.Client() ``` #### Vertex AI constants Setup up the following constants for Vertex AI: - `API_ENDPOINT`: The Vertex AI API service endpoint for `FeatureStore` services. ``` # API service endpoint API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION) # Vertex location root path for your dataset, model and endpoint resources PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION ``` ## Set up clients The Vertex SDK works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex AI server. You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront. - Feature Store Service for creating a feature store. - Feature Store Serving Service for serving from a feature store. ``` # client options same for all services client_options = {"api_endpoint": API_ENDPOINT} def create_feature_store_client(): client = aip.FeaturestoreServiceClient(client_options=client_options) return client def create_feature_store_serving_client(): client = aip.FeaturestoreOnlineServingServiceClient(client_options=client_options) return client clients = {} clients["feature_store"] = create_feature_store_client() clients["feature_store_serving"] = create_feature_store_serving_client() for client in clients.items(): print(client) ``` ## Introduction to Vertex AI Feature Store Let's assume you have a recommendation model that predicts a coupon to print on the back of a cash register receipt. Now, if that model was trained only on single transaction instances (what was bought and how much), then (in the past) you used an Apriori algorithm. But now we have historical data on the customer (say it's indexed by credit card number). Like total purchases to date, average purchase per transaction, frequency of purchase by product category, etc. We use this "enriched data" to train a recommender system. Now it's time to do a live prediction. You get a transaction from the cash register, but all it has is the credit card number and this transaction. It does not have the enriched data the model needs. During serving, the credit card number is used as an index to Feature Store to get the enriched data needed for the model. Next problem. Let's say the enriched data the model was trained on was timestamp June 1. This transaction is June 15. Assume that the user has made other transactions between June 1 and 15, and the enriched data has been continuously updated in Feature Store. But the model was trained on June 1st data. FeatureStore knows the version number and serves the June 1 version to the model (not the current June 15); otherwise, if you used June 15 data you have training-serving skew. Next problem, data drift. Things change, suddenly one day everybody is buying toilet paper! There is a significant change in the distribution of the current stored enriched data from the distribution that the deployed model was trained on. FeatureStore can detect changes/thresholds in distribution changes and trigger a notification for retraining the model. Learn more about [Vertex AI Feature Store API](https://cloud.google.com/vertex-ai/docs/featurestore) ## Vertex AI Feature Store data model Vertex AI Feature Store organizes data with the following 3 important hierarchical concepts: Featurestore -> EntityType -> Feature - `Featurestore`: the place to store your features - `EntityType`: under a `Featurestore`, an `EntityType` describes an object to be modeled, real one or virtual one. - `Feature`: under an `EntityType`, a `Feature` describes an attribute of the `EntityType` Learn more about [Vertex AI Feature Store data model](https://cloud.google.com/vertex-ai/docs/featurestore/concepts). In the movie prediction dataset, you create a `Featurestore` resource called movies. This `Featurestore` resource has 2 entity types: - `users`: The entity type has the `age`, `gender`, and `like genres` features. - `movies`: The entity type has the `genres` and `average rating` features. ## Create a `Featurestore` resource First, you create a `Featurestore` for the dataset using the `create_featurestore()` method, with the following parameters: - `parent`: The base portion of the fully qualified resource identifier (projects/<project>/location/<location>) - `featurestore_id`: The name of the feature store. - `featurestore`: Configuration settings for the feature store. - `online_serving_config`: Configuration settings for online serving from the feature store. Note, this is a long-running-operation (LRO), so you do a `response.result()` to block on the operation completing. ``` from google.cloud.aiplatform_v1.types import featurestore, featurestore_service # Represents featurestore resource path. FEATURESTORE_NAME = "movies" response = clients["feature_store"].create_featurestore( featurestore_service.CreateFeaturestoreRequest( parent=PARENT, featurestore_id=FEATURESTORE_NAME, featurestore=featurestore.Featurestore( online_serving_config=featurestore.Featurestore.OnlineServingConfig( fixed_node_count=1 ) ), ) ) response.result() ``` ### List your `Featurestore` resources You can get a list of all your `Featurestore` resources in your project using the `list_featurestores()` method, with the following parameters: - `parent`: The base portion of the fully qualified resource identifier (projects/<project>/location/<location>) ``` featurestores = clients["feature_store"].list_featurestores(parent=PARENT) for featurestore in featurestores: print(featurestore) ``` ### Get a `Featurestore` resource You can get a specifed `Featurestore` resource in your project using the `get_featurestore()` method, with the following parameters: - `name`: The fully qualified resource identifier for the `Featurestore` resource. ``` resource_name = clients["feature_store"].featurestore_path( PROJECT_ID, REGION, FEATURESTORE_NAME ) print(resource_name) featurestore = clients["feature_store"].get_featurestore(name=resource_name) print(featurestore) ``` ## Create entity types for your `Featurestore` resource Next, you create the `EntityType` resources for your `Featurestore` resource using the `create_entity_type()` method, with the following parameters: - `parent`: The fully qualified resource identifier for the `Featurestore` resource. - `entity_type_id`: The name of the `EntityType` resource. - `entity_type`: Configuration settings for the `EntityType` resource. ``` from google.cloud.aiplatform_v1.types import entity_type for name, description in [("users", "Users descrip"), ("movies", "Movies descrip")]: response = clients["feature_store"].create_entity_type( featurestore_service.CreateEntityTypeRequest( parent=resource_name, entity_type_id=name, entity_type=entity_type.EntityType( description=description, ), ) ) response.result() ``` ### Add `Feature` resources for your `EntityType` resources Next, you create the `Feature` resources for each of the `EntityType` resources in your `Featurestore` resource using the `create_feature()` method, with the following parameters: - `parent`: The fully qualified resource identifier for the `EntityType` resource. - `feature_id`: The name of the `Feature` resource. - `feature`: The configuration settings for the `Feature` resource. ``` from google.cloud.aiplatform_v1.types import feature def create_features(featurestore_name, entity_name, features): parent = clients["feature_store"].entity_type_path( PROJECT_ID, REGION, featurestore_name, entity_name ) for name, descrip, dtype in features: response = clients["feature_store"].create_feature( parent=parent, feature=feature.Feature(value_type=dtype, description=descrip), feature_id=name, ) response.result() create_features( FEATURESTORE_NAME, "users", [ ("age", "Age descrip", feature.Feature.ValueType.INT64), ("gender", "Gender descrip", feature.Feature.ValueType.STRING), ("liked_genres", "Genres descrip", feature.Feature.ValueType.STRING_ARRAY), ], ) create_features( FEATURESTORE_NAME, "movies", [ ("title", "Title descrip", feature.Feature.ValueType.STRING), ("genres", "Genres descrip", feature.Feature.ValueType.STRING), ("average_rating", "Ave descrip", feature.Feature.ValueType.DOUBLE), ], ) ``` ### Search all `Feature` resources in your `Featurestore` resources You can get a list of all `Feature` resources in your `Featurestore` resources using the method `search_features()`, with the following parameters: - `location`: The base portion of the fully qualified resource identifier (projects/<project>/location/<location>) ``` features = clients["feature_store"].search_features(location=PARENT) for feature in features: print(features) ``` ### Search `Feature` resources using a query filter You can narrow your search of `Feature` resources by specifying a `query` filter. ``` # Search by name features = clients["feature_store"].search_features( featurestore_service.SearchFeaturesRequest( location=PARENT, query="feature_id:title" ) ) print("By Name") for feature in features: print(features) # Search by data type features = clients["feature_store"].search_features( featurestore_service.SearchFeaturesRequest( location=PARENT, query="value_type=DOUBLE" ) ) print("By Data Type") for feature in features: print(feature) IMPORT_FILE = ( "gs://cloud-samples-data/vertex-ai/feature-store/datasets/movie_prediction.csv" ) FS_ENTITIES = { "users": "gs://cloud-samples-data/vertex-ai/feature-store/datasets/users.avro", "movies": "gs://cloud-samples-data-us-central1/vertex-ai/feature-store/datasets/movies.avro", } ``` ## Import the feature data into your `Featurestore` resource Next, you import the feature data for your `Featurestore` resource. Once imported, you can use these feature values for online and offline (batch) serving. ### Data layout Each imported `EntityType` resource data must have an ID; also, each `EntityType` resource data item can optionally have a timestamp, sepecifying when the feature values were generated. When importing, specify the following in your request: - Data source format: BigQuery Table/Avro/CSV - Data source URL - Destination: featurestore/entity types/features to be imported The feature values for the movies dataset are in Avro format. The Avro schemas are as follows: **Users entity**: ``` schema = { "type": "record", "name": "User", "fields": [ { "name":"user_id", "type":["null","string"] }, { "name":"age", "type":["null","long"] }, { "name":"gender", "type":["null","string"] }, { "name":"liked_genres", "type":{"type":"array","items":"string"} }, { "name":"update_time", "type":["null",{"type":"long","logicalType":"timestamp-micros"}] }, ] } ``` **Movies entity**: ``` schema = { "type": "record", "name": "Movie", "fields": [ { "name":"movie_id", "type":["null","string"] }, { "name":"average_rating", "type":["null","double"] }, { "name":"title", "type":["null","string"] }, { "name":"genres", "type":["null","string"] }, { "name":"update_time", "type":["null",{"type":"long","logicalType":"timestamp-micros"}] }, ] } ``` ### Importing the feature values You import the feature values for the `EntityType` resources using the `import_feature_values()` method, with the following parameters: - `entity_type`: The fully qualified resource identifier for the `EntityType` resource. - The location of the feature values, one of: `avro_source`: The Cloud Storage location of the feature values in Avro format. `csv_source`: The Cloud Storage location of the feature values in Avro format. `bigquery_source`: The BigQuery table for the feature values. - `entity_id_field`: The source column for the unique ID for each entity data item. - `feature_specs`: The source colums for the features to import into the `EntityType` resource. - `feature_time_field`: The source column for the timestamp of each entity data item. - `worker_count`: The number of parallel workers to read in and update the feature values in the `EntityType` resource. ``` from google.cloud.aiplatform_v1.types import io as io request = featurestore_service.ImportFeatureValuesRequest( entity_type=clients["feature_store"].entity_type_path( PROJECT_ID, REGION, FEATURESTORE_NAME, "users" ), avro_source=io.AvroSource( # Source gcs_source=io.GcsSource(uris=[FS_ENTITIES["users"]]) ), entity_id_field="user_id", feature_specs=[ # Features featurestore_service.ImportFeatureValuesRequest.FeatureSpec(id="age"), featurestore_service.ImportFeatureValuesRequest.FeatureSpec(id="gender"), featurestore_service.ImportFeatureValuesRequest.FeatureSpec(id="liked_genres"), ], feature_time_field="update_time", worker_count=1, ) response = clients["feature_store"].import_feature_values(request) response.result() request = featurestore_service.ImportFeatureValuesRequest( entity_type=clients["feature_store"].entity_type_path( PROJECT_ID, REGION, FEATURESTORE_NAME, "movies" ), avro_source=io.AvroSource(gcs_source=io.GcsSource(uris=[FS_ENTITIES["movies"]])), entity_id_field="movie_id", feature_specs=[ featurestore_service.ImportFeatureValuesRequest.FeatureSpec(id="title"), featurestore_service.ImportFeatureValuesRequest.FeatureSpec(id="genres"), featurestore_service.ImportFeatureValuesRequest.FeatureSpec( id="average_rating" ), ], feature_time_field="update_time", worker_count=1, ) response = clients["feature_store"].import_feature_values(request) response.result() ``` ## Vertex AI Feature Store serving The Vertex AI Feature Store service provides the following two services for serving features from a `Featurestore` resource: - Online serving - low-latency serving of small batches of features (prediction). - Batch serving - high-throughput serving of large batches of features (training and prediction). ``` from google.cloud.aiplatform_v1.types import (FeatureSelector, IdMatcher, featurestore_online_service) def serve_features(featurestore_name, entity_name, features, id): feature_selector = FeatureSelector(id_matcher=IdMatcher(ids=features)) request = clients["feature_store_serving"].read_feature_values( featurestore_online_service.ReadFeatureValuesRequest( # Fetch from the following feature store/entity type entity_type=clients["feature_store"].entity_type_path( PROJECT_ID, REGION, featurestore_name, entity_name ), entity_id=id, feature_selector=feature_selector, ) ) return request features = serve_features( FEATURESTORE_NAME, "users", ["age", "gender", "liked_genres"], "alice" ) print(features) ``` ### Multiple entity data items You serve features for multiple entity data items using the `streaming_read_feature_values()` method with the following parameters: - `entity_type`: The fully qualified resource identifier for the `EntityType` resource. - `feature_selector`: The features to serve from the corresponding `EntityType` resource. - `entity_ids`: The unique IDs of the data items to serve the corresponding features. ``` from google.cloud.aiplatform_v1.types import (FeatureSelector, IdMatcher, featurestore_online_service) def serve_streaming_features(featurestore_name, entity_name, features, ids): feature_selector = FeatureSelector(id_matcher=IdMatcher(ids=features)) request = clients["feature_store_serving"].streaming_read_feature_values( featurestore_online_service.StreamingReadFeatureValuesRequest( # Fetch from the following feature store/entity type entity_type=clients["feature_store"].entity_type_path( PROJECT_ID, REGION, featurestore_name, entity_name ), entity_ids=ids, feature_selector=feature_selector, ) ) return request features = serve_streaming_features( FEATURESTORE_NAME, "users", ["age", "gender", "liked_genres"], ["alice", "bob"] ) for feature in features: print(feature) ``` ## Batch Serving The Vertex AI Feature Store batch serving service is optimized for serving large batches of features in real-time with high-throughput, typically for training a model or batch prediction. ### Output dataset For batch serving, you use a BigQuery table for the output. First, you must create this output destination table. ``` # Output dataset DESTINATION_DATASET = f"movies_predictions_{TIMESTAMP}" # Output table. DESTINATION_TABLE = "training_data" # @param {type:"string"} DESTINATION_TABLE_URI = f"bq://{PROJECT_ID}.{DESTINATION_DATASET}.{DESTINATION_TABLE}" dataset_id = f"{PROJECT_ID}.{DESTINATION_DATASET}" dataset = bigquery.Dataset(dataset_id) dataset = bqclient.create_dataset(dataset) print("Created dataset:", dataset_id) ``` ### Batch Read Feature Values Assemble the request which specify the following info: * Where is the label data, i.e., Table 1. * Which features are read, i.e., the column names in Table 2. The output is stored in a BigQuery table. ``` request = featurestore_service.BatchReadFeatureValuesRequest( # featurestore info featurestore=clients["feature_store"].featurestore_path( PROJECT_ID, REGION, FEATURESTORE_NAME ), # URL for the label data, i.e., Table 1. csv_read_instances=io.CsvSource(gcs_source=io.GcsSource(uris=[IMPORT_FILE])), destination=featurestore_service.FeatureValueDestination( bigquery_destination=io.BigQueryDestination( # Output to BigQuery table created earlier output_uri=DESTINATION_TABLE_URI ) ), entity_type_specs=[ featurestore_service.BatchReadFeatureValuesRequest.EntityTypeSpec( # Read the 'age', 'gender' and 'liked_genres' features from the 'users' entity entity_type_id="users", feature_selector=FeatureSelector( id_matcher=IdMatcher( ids=[ # features, use "*" if you want to select all features within this entity type "age", "gender", "liked_genres", ] ) ), ), featurestore_service.BatchReadFeatureValuesRequest.EntityTypeSpec( # Read the 'average_rating' and 'genres' feature values of the 'movies' entity entity_type_id="movies", feature_selector=FeatureSelector( id_matcher=IdMatcher(ids=["average_rating", "genres"]) ), ), ], ) response = clients["feature_store"].batch_read_feature_values(request) response.result() ``` ### Delete a BigQuery dataset Use the method `delete_dataset()` to delete a BigQuery dataset along with all its tables, by setting the parameter `delete_contents` to `True`. ``` bqclient.delete_dataset(dataset, delete_contents=True) ``` ### Delete a `Featurestore` resource You can get a delete a specified `Featurestore` resource using the `delete_featurestores()` method, with the following parameters: - `name`: The fully qualified resource identifier for the `Featurestore` resource. - `force`: Forces deletion of the `Featurestore` resource when non-empty. ``` clients["feature_store"].delete_featurestore(name=resource_name, force=True) ```
github_jupyter
``` # Compute the average colour histogram for each of the RGB Colour channels import cv2 import numpy as np import matplotlib.pyplot as plt import scipy from scipy.stats import norm from scipy.stats import multivariate_normal as mvn import glob %matplotlib inline path_to_image_buoy1 = 'C:\\Users\\shant\\Underwater_Color_Segmentation_GMM_EM_Algorithm\\data\\buoy1\\train\\buoy156.png' path_to_image_buoy2 = 'C:\\Users\\shant\\Underwater_Color_Segmentation_GMM_EM_Algorithm\\data\\buoy2\\train\\buoy108.png' path_to_image_buoy3 = 'C:\\Users\\shant\\Underwater_Color_Segmentation_GMM_EM_Algorithm\\data\\buoy3\\train\\buoy28.png' img1 = cv2.imread(path_to_image_buoy1) img2 = cv2.imread(path_to_image_buoy2) img3 = cv2.imread(path_to_image_buoy3) cv2.imshow('img1',img3) cv2.waitKey(0) cv2.destroyAllWindows() cv2.waitKey(1) # initialise step of em algorithm def initialise_step(n, d, k): """ Inputs: n - number of datapoints d - dimension of the gaussian k - number of the gaussians Outputs: weights_gaussian - weight of the gaussians, size (k) mean_gaussian - mean of the gaussians, size (k x d) covariance_matrix_gaussian - covariance of the gaussians, size (k x d x d) probability_values - probability of the datapoint being in the k-gaussians, size (n x k) """ # initialise weights weights_gaussian = np.zeros(k) for index in range(0, k): weights_gaussian[index] = (1.0 / k) # initialise mean mean_gaussian = np.zeros((k, d)) # initialise covariance covariance_matrix_gaussian = np.zeros((k, d, d)) # randomly initialise probability probability_values = np.zeros((n, k)) for index in range(0, n): probability_values[index][np.random.randint(0, k)] = 1 # return the arrays return (weights_gaussian, mean_gaussian, covariance_matrix_gaussian, probability_values) # Fit a gaussian using the datapoints obtained using the histogram def fit_gaussian(image): color = ('b' , 'g' , 'r') # The number of color channels for i, col in enumerate(color): # Calculate the histogram for the Red Green and Blue color channel # If input is grayscale the channels = [0] but for color image the channels can be [0], [1], [2] histr = cv2.calcHist([image],[i],None,[256],[0,256]) (mu , sigma) = norm.fit(histr) # Maximum likelihood estimate bins = np.linspace(0,255,256) print("mu:" + str(mu) + "sigma:" + str(sigma)) plt.subplot(1,2,1) plt.plot(bins, norm.pdf(bins,mu,(sigma)),color=col); plt.subplot(1,2,2) plt.plot(histr,color = col); plt.show() # gaussian estimation for n-points def gaussian_estimation_array(data_point, mean, covariance, dimension): """ Inputs: data_point - data point of the gaussian, size (n x d) mean - mean of the gaussian, size (1 x d) covariance - covariance of the gaussian, size (1 x d x d) dimension - dimension of the gaussian Outputs: value of the gaussian, size (n x d) """ determinant_covariance = np.linalg.det(covariance) determinant_covariance_root = np.sqrt(determinant_covariance) covariance_inverse = np.linalg.inv(covariance) gaussian_pi_coeff = 1.0 / np.power((2 * np.pi), (dimension / 2)) data_mean_diff = (data_point - mean) data_mean_diff_transpose = data_mean_diff.T val = (gaussian_pi_coeff) * (determinant_covariance_root) * np.exp(-0.5 * np.sum(np.multiply(data_mean_diff * covariance_inverse, data_mean_diff), axis=1)) return np.reshape(val, (data_point.shape[0], data_point.shape[1])) # gaussian estimation for expectation step def gaussian_estimation(data_point, mean, covariance, dimension): """ Inputs: data_point - data point of the gaussian, size (1 x d) mean - mean of the gaussian, size (1 x d) covariance - covariance of the gaussian, size (1 x d x d) dimension - dimension of the gaussian Outputs: value of the gaussian """ determinant_covariance = np.linalg.det(covariance) determinant_covariance_root = np.sqrt(determinant_covariance) covariance_inverse = np.linalg.inv(covariance) gaussian_pi_coeff = 1.0 / np.power((2 * np.pi), (dimension / 2)) data_mean_diff = (data_point - mean) data_mean_diff_transpose = data_mean_diff.T return (gaussian_pi_coeff) * (determinant_covariance_root) * np.exp(-0.5 * np.matmul(np.matmul(data_mean_diff, covariance_inverse), data_mean_diff_transpose)) # Perform the expectation step def expectation_step(n, d, k, data, weights_gaussian, mean_gaussian, covariance_matrix_gaussian): """ Inputs: n - the number of data-points d - dimension of gaussian k - number of gaussians data - data to be trained on of size (n x d) weights_gaussian - weight of gaussians of size (k) mean_gaussian - mean of gaussians of size (k x d) covariance_matrix_gaussian - covariance of gaussians of size (k x d x d) probability_values - probability of the datapoint being in a gaussian of size (n x k) Outputs: probabilities - probability array of size (n x k) """ # create empty array of list of probabilities probabilities = [] # iterate through each item for j in range(0, n): # calculate probability of a point being in the k-gaussians probability_x = 0.0 for i in range(0, k): probability_x = probability_x + gaussian_estimation(data[j], mean_gaussian[i], covariance_matrix_gaussian[i], d) * weights_gaussian[i] probability_x_temp = [] for i in range(0, k): val = (gaussian_estimation(data[j], mean_gaussian[i], covariance_matrix_gaussian[i], d) * weights_gaussian[i]) / probability_x probability_x_temp.append(val) # append probabilities of a point being in k-gaussians of size (1 x k) probabilities.append(probability_x_temp) return np.array(probabilities) # The maximization step will compute the mean, the mixture prior and the Covariance for each of the gaussian" def maximization_gaussian(data_probabilities, input_data, initial_mixture_coeff, initial_mean_gaussian, initial_covariance_gaussian): """ K: Number of components of the gaussian mixture input_data: (NxD) which is number of datapoints times the dimension of each data point phi_k: List of mixture coefficients of size K mean_k: List of mean for the kth component of the Gaussian Mixture of the size (Kxd) sigma_square_k: Variance of the kth component of the mixture model (Kxdxd) data_probabilities: Probability of the ith datapoint coming from gaussian k. The size of this matrix is (nxk) """ # Initializations for mixture coefficients, mean and covariance of the gaussian phi_k = initial_mixture_coeff mean_k = initial_mean_gaussian covariance = initial_covariance_gaussian # Number of Gaussians K = data_probabilities.shape[1] # Calculation for mixture model coefficients phi_k" phi_k = np.mean(data_probabilities, axis = 0) # Calculation of the mean for each k gaussian mean_k = np.matmul(data_probabilities.T, input_data) / np.sum(data_probabilities, axis = 0)[:,np.newaxis] #Calculation of the Variance for each kth Gaussian Distribution # Loop over each Gaussian for k in range(K): # Compute the difference of the ith data point with the kth gaussian mean x = input_data - mean_k[k,:] # Compute the transpose x_transpose = x.T # Convert the kth column of the probability matrix into a sparse diagonal matrix probability_diag = np.diag(data_probabilities[:,k]) covariance_numerator = np.matmul(np.matmul(probability_diag, x) , x_transpose) # The covariance numerator is again a diagonal matrix whose elements we need to sum # k = 0 signifies take the main diagonal covariance_numerator = np.sum(np.diag(covariance_numerator,k=0)) # Compute the covariance covariance[k,:,:] = covariance_numerator / np.sum(data_probabilities,axis=0)[:,np.newaxis][k] return phi_k , mean_k , covariance def compute_log_likelihood(input_data, mixture_coeff, mean_gaussian, covariance_gaussian, K): """ Inputs mixture_coeff: Mixture coefficient is the probability of the kth gaussian of size (k) mean_gaussian: the mean is the mean of the kth gaussian of size (kxd) covariance_gaussian: Covariance of the Kth gaussian of size (kxdxd) data - training data, size (n x d) K: Number of Gaussians """ # We need to compute the log likelihood and do a test for convergence n = input_data.shape[0] log_likelihood = np.log(np.sum(mixture_coeff[k]*mvn.pdf(input_data,mean_gaussian[k],covariance_gaussian[k]) for k in range(K))) return np.sum(log_likelihood) # run e-m algorithm def run_expectation_maximization_algorithm(n, d, k, iterations, data): """ Inputs: n - number of data-points d - dimension of gaussian k - number of gaussians iterations - number of iterations data - training data, size (n x d) Outputs: weights_gaussian - weight of the gaussians, size (k) mean_gaussian - mean of the gaussians, size (k x d) covariance_matrix_gaussian - covariance of the gaussians, size (k x d x d) """ # initialise step (mixture_coeffs, mean_gaussian, covariance_matrix_gaussian, probability_values) = initialise_step(n, d, k) # run for fixed iterations for i in range(0, iterations): # m-step (weights_gaussian, mean_gaussian, covariance_matrix_gaussian) = maximization_gaussian(probability_values,data,mixture_coeffs,mean_gaussian,covariance_matrix_gaussian) # e-step (probability_values) = expectation_step(n, d, k, data, mixture_coeffs, mean_gaussian, covariance_matrix_gaussian) # Loss Function( Test for convergence ) log_likelihood = compute_log_likelihood(data, weights_gaussian, mean_gaussian, covariance_matrix_gaussian,k) if i%10 == 0: print(f' for iteration: {i} weights: {weights_gaussian} mean_gaussian: {mean_gaussian} covariance: {covariance_matrix_gaussian} loss: {log_likelihood}') # return answer return (weights_gaussian, mean_gaussian, covariance_matrix_gaussian) data = [] for row in range(img3.shape[0]): for col in range(img3.shape[1]): val = [] val.append(img1[row, col, 2]) data.append(val) data = np.array(data) data.shape (weights_gaussian, mean_gaussian, covariance_matrix_gaussian) = run_expectation_maximization_algorithm(2500, 1, 2, 50, data) print(covariance_matrix_gaussian.shape) y = [] x = [] for i in range(0, 256): x.append(i) output = gaussian_estimation(i, mean_gaussian[0], covariance_matrix_gaussian[0], 1) * weights_gaussian[0] y.append(output) plt.plot(x, y) plt.show() # Learning and segmenting the buoys fit_gaussian(img3) #img2 = np.array([[[1,2,3],[4,5,6],[7,8,9]],[[10,11,12],[13,14,15],[16,17,18]]]) img2[:,:,np.newaxis][2].shape # Learning and segmenting the buoys path_to_image_buoy1 = 'C:\\Users\\shant\\Underwater_Color_Segmentation_GMM_EM_Algorithm\\data\\buoy1\\train\\buoy156.png' path_to_image_buoy2 = 'C:\\Users\\shant\\Underwater_Color_Segmentation_GMM_EM_Algorithm\\data\\buoy2\\train\\buoy108.png' path_to_image_buoy3 = 'C:\\Users\\shant\\Underwater_Color_Segmentation_GMM_EM_Algorithm\\data\\buoy3\\train\\buoy28.png' img1 = cv2.imread(path_to_image_buoy1) img2 = cv2.imread(path_to_image_buoy2) img3 = cv2.imread(path_to_image_buoy3) cv2.imshow('img1',img1) cv2.waitKey(0) cv2.destroyAllWindows() cv2.waitKey(1) # Segment the 3 buoys into 3 different color channels # get the training data def import_training_data(path, channel1, channel2, channel3): data = [] training_files = glob.glob(path + "/*") for file in training_files: training_image = cv2.imread(file) for row in range(training_image.shape[0]): for col in range(training_image.shape[1]): value = [] # If channel1 is true => 1 then append the data for the Blue color channel # Same is followed for hte green and red colour channels if (channel1): value.append(training_image[row,col,0]) if(channel2): value.append(training_image[row,col,1]) if(channel3): value.append(training_image[row,col,2]) data.append(value) return data train_data = import_training_data('C:\\Users\\shant\\Underwater_Color_Segmentation_GMM_EM_Algorithm\\data\\buoy2\\train\\', 0,1,0) train_data = np.array(train_data) train_data = train_data[:20000,:] # Run the Expectation maximization algorithm (mixture_coeff, mean_gaussian, covariance) = run_expectation_maximization_algorithm(train_data.shape[0],1,3,30,train_data) print(f'mixture_coeff: {mixture_coeff} mean_gaussian: {mean_gaussian} covariance: {covariance}') from scipy.stats import multivariate_normal as mvn y = [] x = [] for i in range(0, 256): x.append(i) output = mvn.pdf(i, mean_gaussian[0], covariance[0]) * mixture_coeff[0] + mvn.pdf(i, mean_gaussian[1], covariance[1]) * mixture_coeff[1] + mvn.pdf(i, mean_gaussian[2], covariance[2]) * mixture_coeff[2] y.append(output) plt.plot(x,y) plt.show() y = [] x = [] for i in range(0, 256): x.append(i) output = gaussian_estimation(i, mean_gaussian[0], covariance[0], 1) * mixture_coeff[0] + gaussian_estimation(i, mean_gaussian[1], covariance[1], 1) * mixture_coeff[1] + gaussian_estimation(i, mean_gaussian[2], covariance[2], 1) * mixture_coeff[2] y.append(output) plt.plot(x,y) plt.show() # Segmentation of the buoys cap = cv2.VideoCapture('data/detectbuoy.avi') # Defining the codec and the videowriter object fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter('orange_buoy.avi',fourcc,20.0,(640,480)) while(cap.isOpened()): ret, frame = cap.read() if ret == False: break # Reshape the image # Flatten it out image = np.reshape(frame[:,:,2], (frame.shape[0]*frame.shape[1],d)) # Maximization of the log likelihood function x = np.array([[[1,2],[4,5],[7,8]],[[1,2],[4,5],[7,8]],[[1,2],[4,5],[7,8]]]) x.shape[2] print(f'mixture_coeff: {mixture_coeff} mean_gaussian: {mean_gaussian} covariance: {covariance}') train_data = import_training_data('C:\\Users\\shant\\Underwater_Color_Segmentation_GMM_EM_Algorithm\\data\\buoy2\\train\\', 0,1,0) train_data = np.array(train_data) train_data = train_data[:30000,:] train_data.shape x = compute_log_likelihood(train_data,mixture_coeff,mean_gaussian,covariance) x ```
github_jupyter
### Classification |Data exploration using iris toy dataset --- **Dataset**: We are going to use the famous iris data set for our example. The dataset consists of four attributes: sepal-width, sepal-length, petal-width and petal-length. These are the attributes of specific types of iris plant. The task is to predict the class to which these plants belong. There are three classes in the dataset: Iris-setosa, Iris-versicolor and Iris-virginica. Further details of the dataset are available at [UCI](https://archive.ics.uci.edu/ml/datasets/iris). ``` from sklearn import datasets import pandas as pd import numpy as np import matplotlib.pyplot as plt plt.style.use('ggplot') iris = datasets.load_iris() type(iris) ``` Bunch is similar to a dictionary in that it contains key-value pairs. ``` print(iris.keys()) ``` Printing the keys, we see that they are the feature names. DESCR, which provides a description of the dataset; the target names; the data, which contains the values features; and the target which is the target data. And both the features and target data are provided as NumPy arrays. ``` type(iris.data), type(iris.target) iris.data.shape ``` The `.shape` attribute of the array feature tell us that there are 150 rows and four columns. **Remember:** Samples are in rows, features are in columns. Thus we have 150 samples and the four features: petal length and width and sepal lenght and width. Moreover, note that the target variable is enconded as zero for "setosa", 1 for "versicolor" and 2 for "virginica". We see this by printing iris.target_names. ``` iris.target_names ``` In order to perform some initial exploratory data analysis (EDA). Will build a DataFrame of the feature data using pandas, and also passing columns names. ``` X = iris.data y = iris.target df = pd.DataFrame(X, columns=iris.feature_names) print(df.head()) ``` Viewing the head of the data frame show us the first five rows. Now, we'll do a bit visual EDA. We use the pandas function scatter matrix to visualize our dataset. We pass it the our DataFrame, along with our target variable as argument to the parameter **c**, which stands for color, ensuring that our data points in our figure will be colored by their species. We also pass a list to fig size of our figure, as well as a marker size and shape. ``` _ = pd.plotting.scatter_matrix(df, c = y, figsize = (10,10), marker = 'D') ``` The result is a matrix of figures, which on the diagonal are histograms of the features corresponding to the row and column. The off-diagonal figures are scatter plots of the column feature versus row feature colored by the target variable. There is a great deal of information in this scatter matrix. The petal hight and lenght are highly correlated, as you may expect, and that flowers are clustered according to species. ### Classification --- We have a set of labeled data and we want to build a classifier that takes unlabeled data as input and output a label. To construct this classifier: We first need choose a type of classifier and it needs to learn from the already labeled data. For this reason, we call the already labeled data the training data. I choose a simple algorithm called **K-Nearst Neighbors**. - Basic ideia of KNN: Predict the label of any data point by: - Looking at the 'k' closest labeled data points. - Taking a majority vote. **Scikit-learn fit and predict** All machine learning models in scikit-learn are implemented as python classes. These classes serve two purposes: - They implement the algotithms for learning and predicting. - Store the information learned from the data. Training a model on the data = 'fitting' a model to the data: `.fit()` method. To predict the labels of new data: `.predit()` method. **Using scikit-learn to fit a classifier** First we import KNN Classifier from sklearn. We then instatiante our KNeighborsClassifier, set the number of neighbors equal to 6, and assign it to the variable knn. Then we can fit this classifier to our training set, the labeled data. ``` from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors = 6) knn.fit(iris['data'], iris['target']) ``` The features and target, used as arguments to fit are NumPy arrays. The scikit-learn API requires firstly that you have the data as a NumPy array or pandas DataFrame. It also requires that the features take on continuous values, and It also requires that there are no missing values in the data. _Later, I will discuss how to deal with categorical features and missing data. In particular, the scikit-learn API requires that the features are in an array where each column is a feature and each row a different observation or data point._ Now that we have fit our classifier, lets use it to predict on some unlabeled data. **Predicting on unlabeled data** ``` # Here we have set of observations, x_new. # We use the predict method on the classifier and pass it the data. # This is a randomic array to set of observations. X_new = np.array([[5.6, 2.8, 3.9, 1.1], [5.7, 2.6, 3.8, 1.3], [4.7, 3.2, 1.3, 0.2]]) prediction = knn.predict(X_new) X_new.shape # That is, three observations and four features print('Prediction: {}'.format(prediction)) ``` It predicts one, which correspondes to 'versicolor' for the first two observations and 0, which correspons to 'setosa' for the third. ### Measuring model perfomance ---- Now that we know to fit a classifier and use it to predict the labels of previouly unseen data, we need to figure out how to measure its performance. We need a metric. - In classification problems, __accuracy__ is a commonly-used metric. - Accuracy = Fraction of correct predictions. - Which data should be used to compute accuracy? What we are really insterested in is: __how well our model will perform on new data?__ that is, samples that the algorithm has never seen before. You could compute accuracy on data used to fit classifier. However, as this data was used to train it, the classifier's perfomance will NOT be indicative of how well it can generalize to unseen data. For this reason, it is common pratice to __split your data into two sets__, a training set and a test set. - Fit/train the classifier on the training set. - Make predictions on the labeled test set. - Compare theses predictions with the know labels. - Compute the accuracy of your predictions. **Train/Test split** To do this, we first import train test split from sklearn model selection. We then use the train test split function to randomly split our data. The first argument will be the feature data, the second the target or labels. The test size keyword argument specifies what proportion of the original data is used for the test set. Lastly, the random state kward sets a seed for the random number generator that splits the data into trains and test. Setting the seed with the same argument later will allow you to reproduce the exact split and your downstream results. ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state= 21, stratify=y) ``` The train test split returns four arrays: The training data, the test data, the training label and the test labels. By default, the function splits the data into 75% training data and 25% test data, which is a good rule of thumb. We specify the size of the test size using the keyword argument `test_size`, which we do here to set it to 30%. It's also best pratice to perfome your split so that the split reflects to the labels on your data. That is, you want the labels to be distributed in train and test sets as they are in the original dataset. To achieve this, we use the keyword argument `stratify = y`, where **y** the list or array containing the labels. Then instantiate our KNN classifier, fit it to the training data using the **fit** method, make our predictions on the test data and store the results as `y_pred`. ``` knn = KNeighborsClassifier(n_neighbors = 8) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) print("Test set predictions:\n {}".format(y_pred)) ``` The check out the accuracy of our model, we use the score method and pass it `X_test` and `y_test`. ``` knn.score(X_test,y_test) from sklearn.metrics import classification_report, confusion_matrix print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) ``` **Model Complexity** - Larger k = smoother decision boundary = less complex model - Smaller k = more complex model = can lead to overfitting ``` # Setup arrays to store train and test accuracies neighbors = np.arange(1, 7) train_accuracy = np.empty(len(neighbors)) test_accuracy = np.empty(len(neighbors)) # Loop over different values of k for i, k in enumerate(neighbors): # Setup a k-NN Classifier with k neighbors: knn knn = KNeighborsClassifier(n_neighbors = k) # Fit the classifier to the training data knn.fit(X_train, y_train) #Compute accuracy on the training set train_accuracy[i] = knn.score(X_train, y_train) #Compute accuracy on the testing set test_accuracy[i] = knn.score(X_test, y_test) # Generate plot plt.title('k-NN: Varying Number of Neighbors') plt.plot(neighbors, test_accuracy, label = 'Testing Accuracy') plt.plot(neighbors, train_accuracy, label = 'Training Accuracy') plt.legend() plt.xlabel('Number of Neighbors') plt.ylabel('Accuracy') plt.show() ```
github_jupyter
# To Do: done! * download actual pic for the website ``` import pandas as pd import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt df = pd.read_csv('cosmos.v1.3.8_neshini.cat',delimiter='\s+',memory_map=True) st = pd.read_csv('Straatmen_2016_Table3.txt', delimiter=' ') agn = pd.read_csv('cosmos.v1.3.6.agn.v0.5_neshini.cat', delimiter='\s+', skiprows = 3, memory_map=True) sfr = pd.read_csv('cosmos.v1.3.6.sfr.v0.4_neshini.cat', delimiter='\s+', memory_map=True) df.query('id < 100') #ONE galaxy graphed + info plt.figure(figsize=(12,5)) gala = list(range( 1284,1285)) k = 1 for k in gala: if df.loc[k,'star'] == 0: print("The", 'C%s' %k, "points shows the galaxy with the id", df.id[k]) plt.text(1.1, 0.85, "Extra Information",fontsize=15, transform = plt.gca().transAxes) ab =['no data','positive detection'] ab[-1] = 'nil detection' y, z = 0.75, .1 plt.text(1.1, y, "Infrared AGN: %s"%ab[agn.ir_agn[k]], fontsize=13.5, transform = plt.gca().transAxes) plt.text(1.1, y-z, "Radio AGN: %s"%ab[agn.radio_agn[k]], fontsize=13.5, transform = plt.gca().transAxes) plt.text(1.1, y-z*2, "X-ray AGN: %s"%ab[agn.xray_agn[k]], fontsize=13.5, transform = plt.gca().transAxes) plt.text(1.1, y-z*3, "UV+IR measured star formation rate: %s"% sfr.SFR_UVIR[k], fontsize=13.5, transform = plt.gca().transAxes) i=0 plt.xlabel('Wavelength ($\mu$m)') plt.ylabel('Flux') while i< len(st): F = 'f_' + st.loc[i, 'Filter'] E = 'e_' + st.loc[i, 'Filter'] if df.loc[k, F] >= 0: if st.loc[i, 'Filter'] == 'F160W': signo = df.loc[k,F]/df.loc[k,E] print("Signal to Noise of", st.loc[i, 'Filter'], signo) #plt.scatter(st.loc[i,'Lambda_c'],df.loc[k, F], c = 'C%s' %k) if i == 0: plt.scatter(st.loc[i,'Lambda_c'],df.loc[k, F], c = 'C%s' %k, label= 'id %s' %df.id[k]) else: plt.scatter(st.loc[i,'Lambda_c'],df.loc[k, F], c = 'C%s' %k) i+=1 else: print ("This is a star with the id", k) print(' ') plt.legend() plt.tight_layout() plt.show() plt.close() #MULTIPLE galaxies graphed plt.figure() gala = list(range( 1216,1224)) k = 1 for k in gala: if df.loc[k,'star'] == 0: i=0 plt.xlabel('Wavelength ($\mu$m)') plt.ylabel('Flux') while i< len(st): F = 'f_' + st.loc[i, 'Filter'] E = 'e_' + st.loc[i, 'Filter'] if df.loc[k, F] >= 0: if i == 0: plt.scatter(st.loc[i,'Lambda_c'],df.loc[k, F], c = 'C%s' %k, label= 'id %s' %df.id[k]) else: plt.scatter(st.loc[i,'Lambda_c'],df.loc[k, F], c = 'C%s' %k) i+=1 else: print ("id ",k, "cannot be plotted because it is a star") print(' ') plt.legend() plt.tight_layout() plt.show() plt.close() #Special galaxies graphed + info # find non negative examples of UV+IR (1285) #find a high star formation rate and plot (1132) #find a source with a good z spec (not -99) (15472) plt.figure(figsize=(12,5)) gala = [1284, 1131, 11799] k = 1 for k in gala: if df.loc[k,'star'] == 0: print("The", 'C%s' %k, "points shows the galaxy with the id", df.id[k]) ab =['no data','positive detection'] ab[-1] = 'nil detection' y, z = 0.75, .1 print ("Infrared AGN: %s"%ab[agn.ir_agn[k]]) print("Radio AGN: %s"%ab[agn.radio_agn[k]]) print("X-ray AGN: %s"%ab[agn.xray_agn[k]]) print("UV+IR measured star formation rate: %s"% sfr.SFR_UVIR[k]) i=0 plt.xlabel('Wavelength ($\mu$m)') plt.ylabel('Flux') while i< len(st): F = 'f_' + st.loc[i, 'Filter'] E = 'e_' + st.loc[i, 'Filter'] if df.loc[k, F] >= 0: if st.loc[i, 'Filter'] == 'F160W': signo = df.loc[k,F]/df.loc[k,E] print("Signal to Noise of", st.loc[i, 'Filter'], signo) print(' ') #plt.scatter(st.loc[i,'Lambda_c'],df.loc[k, F], c = 'C%s' %k) if i == 0: plt.scatter(st.loc[i,'Lambda_c'],df.loc[k, F], c = 'C%s' %k, label= 'id %s' %df.id[k]) else: plt.scatter(st.loc[i,'Lambda_c'],df.loc[k, F], c = 'C%s' %k) i+=1 else: print ("This is a star with the id", k) print(' ') plt.legend() plt.tight_layout() plt.show() plt.close() ```
github_jupyter
``` # Importing essential libraries import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns #import the dataset df = pd.read_csv('../Datasets/diabetes.csv') df.head() # Renaming DiabetesPedigreeFunction as DPF df = df.rename(columns={'DiabetesPedigreeFunction':'DPF'}) # Replacing the 0 values from ['Glucose','BloodPressure','SkinThickness','Insulin','BMI'] by NaN df_copy = df.copy(deep=True) df_copy[['Glucose','BloodPressure','SkinThickness','Insulin','BMI']] = df_copy[['Glucose','BloodPressure','SkinThickness','Insulin','BMI']].replace(0,np.NaN) # Print No. of rows and columns print(f'Total Rows {df_copy.shape[0]}') print(f'Total Rows {df_copy.shape[1]}') ``` # Data Visualization ``` #Collect all columns having Missing values and at what percentage columns_with_na=[features for features in df_copy.columns if df_copy[features].isnull().sum()>0] for feature in columns_with_na: print(feature, np.round(df_copy[feature].isnull().mean(), 4), ' % missing values') for feature in columns_with_na: data = df_copy.copy() data[feature] = np.where(data[feature].isnull(), 1, 0) data.groupby(feature)['Outcome'].value_counts().plot.bar() plt.title(feature) plt.show() #Get all the columns having numerical values num_data = [features for features in df_copy.columns if df_copy[features].dtypes != 'O'] print(f'Number of numerical columns is {len(num_data)}') df_copy[num_data].head() # Check if any numerical columns are discrete discrete_columns = [feature for feature in df_copy.columns if len(df_copy[feature].unique()) < 20 and feature not in ['Outcome'] ] print(f'Number of discrete Columns is {len(discrete_columns)}') for i in discrete_columns: print(f'{i} has {len(df_copy[i].unique())} discrete values') df_copy[discrete_columns].head() #Check distribution of the discrete data for feature in discrete_columns: dt=df_copy.copy() dt.groupby(feature)['Outcome'].value_counts().plot.bar() plt.xlabel(feature) plt.ylabel('Outcome') plt.title(feature) plt.show() # Check if any numerical columns are continous continous_columns = [feature for feature in num_data if feature not in discrete_columns and feature not in ['Outcome'] ] print(f'Number of Continous Columns is {len(continous_columns)}') df_copy[continous_columns].head() #Check distribution of the Continous data for feature in continous_columns: dt = df_copy.copy() dt[feature].hist(bins=25) plt.xlabel(feature) plt.ylabel("Count") plt.title(feature) plt.show() ``` # Data Preprocessing ``` #Collect all columns having Missing values and at what percentage columns_with_na=[features for features in df_copy.columns if df_copy[features].isnull().sum()>0] for feature in columns_with_na: print(feature, np.round(df_copy[feature].isnull().mean(), 4), ' % missing values') #Replace Nan Values with median of that column and make new column as df_copy[feature_nan]. Give it value 1 is Nan is present else 0 for feature in columns_with_na: median_value = df_copy[feature].mean() df_copy[feature+'nan'] = np.where(df_copy[feature].isnull(), 1, 0) df_copy[feature].fillna(median_value, inplace = True) df_copy[columns_with_na].isnull().sum() df_copy.head() # Remove any duplicate entry final_df =df_copy.loc[:,~df_copy.columns.duplicated()] final_df.shape # Model Building from sklearn.model_selection import train_test_split X = df.drop(columns='Outcome') y = df['Outcome'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=0) ``` # Naive Bayes ``` from sklearn.naive_bayes import GaussianNB classifier = GaussianNB() classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) y_pred from sklearn.metrics import confusion_matrix, accuracy_score cm = confusion_matrix(y_test, y_pred) print(cm) print(accuracy_score(y_test, y_pred)) ``` # Random Forest ``` # Creating Random Forest Model from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import RandomizedSearchCV clf = RandomForestClassifier() n_estimators = [10, 20, 30, 50, 100] max_depth = [2, 3, 5, 7, 10] # Define the grid of hyperparameters to search hyperparameter_grid = { 'n_estimators': n_estimators, 'max_depth':max_depth, } random_cv = RandomizedSearchCV(estimator=clf,param_distributions=hyperparameter_grid, cv=5, n_iter=5,scoring = 'neg_mean_absolute_error',n_jobs = 4,verbose = 5, return_train_score = True,random_state=42) random_cv.fit(X_train,y_train) random_cv.best_estimator_ clf = RandomForestClassifier(max_depth=10, n_estimators=50) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) y_pred from sklearn.metrics import confusion_matrix, accuracy_score cm = confusion_matrix(y_test, y_pred) print(cm) print(accuracy_score(y_test, y_pred)) ``` # Decision Tree ``` from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import GridSearchCV classifier = DecisionTreeClassifier() from scipy.stats import randint from sklearn.model_selection import RandomizedSearchCV parameters = {"max_depth":[2,3, None], "max_features":randint(1,9), "min_samples_leaf":randint(1,8), "criterion": ["gini", "entropy"] } tree_cv = RandomizedSearchCV(classifier, parameters, cv = 5) tree_cv.fit(X_train, y_train) tree_cv.best_estimator_ classifier = DecisionTreeClassifier(max_depth=2, max_features=6, min_samples_leaf=7) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) y_pred from sklearn.metrics import confusion_matrix, accuracy_score cm = confusion_matrix(y_test, y_pred) print(cm) print(accuracy_score(y_test, y_pred)) # Creating a pickle file for the classifier import pickle filename = 'diabetes-model.pkl' pickle.dump(clf, open(filename, 'wb')) ```
github_jupyter
# Tutorial in Bayesian Optimization #### Javier Gonzalez (j.h.gonzalez@sheffield.ac.uk) University of Sheffield. ## The basics Bayesian optimization (BO) is an strategy for global optimization of black-box functions. For instance, consider a Lipschitz continuous function $f(x)$ defined on a domain $\mathcal{X}$. BO aims to obtain $$x^* = \arg \max_{\mathcal{X}} f(x)$$ There are two crucial bits in any Bayesian Optimization (BO) procedure approach. * Define a **prior probability measure** on $f$: this function will capture the our prior beliefs on $f$. The prior will be updated to a 'posterior' using the available data. * Define an **acquisition function** $acqu(x)$: this is a criteria to decide where to sample next in order to gain the maximum information about the location of the global maximum of $f$. Given a prior over the function $f$ and an acquisition function a BO procedure will converge to the optimum of $f$ under some conditions. ## Use of Bayesian Optimization in real applications BO has been applied to solve a wide range of problems such us: Interactive animation, Sensor networks, Automatic algorithm configuration, Automatic machine learning toolboxes, Reinforcement learning, Organization planning, Deep learning, Engineering and a long etc! ## 1D-Toy illustration We illustrate the idea behind BO using a one-dimensional example. We start by importing the required libraries for the analysis. Note that we use our library GPy for Gaussian Processes! The on-line documentation of GPy is available from the [SheffieldML github page](https://github.com/SheffieldML/GPy). ``` %matplotlib inline import GPy import pylab as pb import numpy as np import matplotlib.pyplot as plt # plots import scipy.stats from scipy.stats import norm ``` Let's start by considering the function $f(x) = − \cos(2πx ) + \sin(4\pi x )$ defined on the interval $[0.08, 0.92]$. The maximum of this function is located at $x_{max}=0.6010$. Obviously, to obtain it in this example is trivial. But, what if $f$ is not explicitly available and we only have access to a small number of noise evaluations? We see how the BO acts in this case for illustrative purpose but, of course, BO can be used in more complex scenarios. We first generate 3 noisy observations sampled from $f$ and we proceed. ``` ## Function f(x) X_star = np.linspace(0,1,1000)[:,None] Y_star = -np.cos(2*np.pi*X_star) + np.sin(4*np.pi*X_star) X_eval = X_star # Sampled values np.random.seed([1]) n = 3 X = np.array([[0.09],[0.2],[0.8]]) Y = -np.cos(2*np.pi*X) + np.sin(4*np.pi*X) + np.random.randn(n,1)*0.1 # Plot of f(x) and the generated sample plt.rcParams['figure.figsize'] = 8, 5 plt.figure(); plt.plot(X_star,Y_star,c='grey',lw=2,ls='--',mew=1.5) plt.plot(X,Y,'kx',mew=1.5) plt.xlabel('x') plt.ylabel('f(x)') plt.savefig('data.pdf') ``` ### 3.1 Gaussian Process Prior Now we define a Gaussian Process (GP) prior on $f$. A GP is an extension of the multivariate Gaussian distribution to an infinite dimension stochastic process for which any finite combination of dimensions is a Gaussian distribution. Therefore a GP is a distribution over functions, which is totally specified by its mean function $m(x)$ and its covariance function $k(x,x')$: $$f(x) \sim \mathcal{GP}(m(x),k(x,x')) $$ For convenience, the mean is often fixed as $m(x)=0$. We use as covariance function the exponentiated quadratic kernel $$ k(x,x') = l \cdot exp{ \left(\frac{\|x-x'\|}{2\sigma^2}\right)} $$ where $\sigma^2$ and and $l$ are positive parameters. Next, we fit this model in our dataset. We start by a kernel object. ``` # Choose the kernel k = GPy.kern.RBF(input_dim=1, variance=1, lengthscale=0.1) ``` Now we create a Gaussian Process model using as covariance function the the previous kernel and we optimize the parameters by maximizing the log-likelihood. Ir order to avoid local solutions we use 10 different initial points in the optimization process. ``` # We create the GP model m = GPy.models.GPRegression(X, Y, k) m.optimize() m.optimize_restarts(num_restarts = 10) ``` Now, it is time to have a look to the fitted model. We show the parameters and the fitted function and a plot to see how it fit the data. ``` print m #m.plot() fest = m.predict(X_star) plt.plot(X_star,fest[0],c='blue',lw=2,ls='-',mew=1.5) plt.plot(X_star,fest[0]+1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X_star,fest[0]-1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X,Y,'kx',mew=2.5) plt.plot(X_star,Y_star,c='grey',lw=2,ls='--',mew=1.5) plt.title('GP model') plt.xlabel('x') plt.ylabel('f(x)') plt.xlim(0,1) plt.savefig('datamodel.pdf') ``` Given this model, where do you think the maximum of the function should be? Around 0.3 the posterior mean is maximum but around 0.45 the variance is large. If you could collect a new data point, where would you do it? This is the job of the second main element of any Bayesian Optimization procedure: the acquisition function. ### 3.2 Aquisition functions Next lines of code define the three acquisition functions we are going to use in our example. They are the functions that represents our beliefs over the maximum of $f(x)$. Denote by $\theta$ the parameters of the GP model and by $\{x_i,y_i\}$ the available sample. Three of the most common acquisition functions are: * **Maximum probability of improvement (MPI)**: $$acqu_{MPI}(x;\{x_n,y_n\},\theta) = \Phi(\gamma(x)), \mbox{where}\ \gamma(x)=\frac{\mu(x;\{x_n,y_n\},\theta)-f(x_{best})-\psi}{\sigma(x;\{x_n,y_n\},\theta)}.$$ * **Expected improvement (EI)**: $$acqu_{EI}(x;\{x_n,y_n\},\theta) = \sigma(x;\{x_n,y_n\},\theta) (\gamma(x) \Phi(\gamma(x))) + N(\gamma(x);0,1).$$ * **Upper confidence bound (UCB)**: $$acqu_{UCB}(x;\{x_n,y_n\},\theta) = \mu(x;\{x_n,y_n\},\theta)+\eta\sigma(x;\{x_n,y_n\},\theta).$$ Both, $\psi$ and $\eta$, are tunable parameters that help to make the acquisition functions more flexible. Also, in the case of the UBC, the parameter $\eta$ is useful to define the balance between the importance we give to the mean and the variance of the model. This is know as the **exploration/exploitation trade off**. ``` def MPI_max(x,model,par = 0.01): fest = model.predict(x) acqu = norm.cdf((fest[0]-max(fest[0])-par) / fest[1]) return acqu def EI_max(x,model,par = 0.01): fest = model.predict(x) Z = (fest[0]-max(fest[0])-par) / fest[1] acqu = (fest[0]-max(fest[0])-par)*norm.cdf(Z)+fest[1]*norm.pdf(Z) return acqu def UBC_max(x,model,z_mui=1): fest = model.predict(x) acqu = fest[0]+z_mui*np.sqrt(fest[1]) return acqu ``` We evaluate the functions on our interval of interest. Here, the maximum is found using grid search but in higher dimensional problems and the maximum can be systematically obtained with a Conjugate Gradient method. ``` ## Evaluate and get the maximum of the acquisition function (grid seach for-plotting purposes) # MPI acqu_MPI1 = MPI_max(X_eval,m,0.01) acqu_MPI2 = MPI_max(X_eval,m,0.1) acqu_MPI3 = MPI_max(X_eval,m,0.5) max_MPI1 = X_eval[np.argmax(acqu_MPI1)] max_MPI2 = X_eval[np.argmax(acqu_MPI2)] max_MPI3 = X_eval[np.argmax(acqu_MPI3)] # EI acqu_EI1 = EI_max(X_eval,m,0.01) acqu_EI2 = EI_max(X_eval,m,0.1) acqu_EI3 = EI_max(X_eval,m,0.5) max_EI1 = X_eval[np.argmax(acqu_EI1)] max_EI2 = X_eval[np.argmax(acqu_EI2)] max_EI3 = X_eval[np.argmax(acqu_EI3)] res_max_EI3 = max_EI3 # UBC acqu_UBC1 = UBC_max(X_eval,m,0.5) acqu_UBC2 = UBC_max(X_eval,m,1) acqu_UBC3 = UBC_max(X_eval,m,4) max_UBC1 = X_eval[np.argmax(acqu_UBC1)] max_UBC2 = X_eval[np.argmax(acqu_UBC2)] max_UBC3 = X_eval[np.argmax(acqu_UBC3)] res_max_UBC3 = max_UBC3 # Plot GP posterior, collected data and the acquisition function m.plot() plt.ylim(-2,3) plt.plot(X_star,Y_star,c='grey',lw=2,ls='--',mew=1.5) plt.title('GP model') plt.savefig('datamodel.pdf') plt.figure(figsize=(12,4)) plt.subplot(1, 3, 1) plt.title('Acquisition functions for MPI') plt.xlim(0.08,0.92) p1, = plt.plot(X_eval, acqu_MPI1, 'r-',lw=2.5) p2, = plt.plot(X_eval, acqu_MPI2, 'b-',lw=2.5) p3, = plt.plot(X_eval, acqu_MPI3, 'g-',lw=2.5) plt.title('Acquisition functions for MPI') plt.xlim(0.08,0.92) plt.xlabel('x') plt.ylabel('Acquisition value') plt.legend([p1, p2, p3], ["0.01", "0.1", "0.5"]) plt.axvline(x=max_MPI1,ls='-',c='red') plt.axvline(x=max_MPI2,ls='-',c='blue') plt.axvline(x=max_MPI3,ls='-',c='green') plt.subplot(1, 3, 2) plt.plot(X_eval, acqu_EI1, 'r-',lw=2.5) plt.plot(X_eval, acqu_EI2, 'b-',lw=2.5) plt.plot(X_eval, acqu_EI3, 'g-',lw=2.5) plt.title('Acquisition functions for EI') plt.xlim(0.08,0.92) plt.xlabel('x') plt.ylabel('Acquisition value') plt.legend([p1, p2, p3], ["0.01", "0.1", "0.5"]) plt.axvline(x=max_EI1,ls='-',c='red') plt.axvline(x=max_EI2,ls='-',c='blue') plt.axvline(x=max_EI3,ls='-',c='green') plt.subplot(1, 3, 3) p1, = plt.plot(X_eval, acqu_UBC1, 'r-',lw=2.5) p2, = plt.plot(X_eval, acqu_UBC2, 'b-',lw=2.5) p3, = plt.plot(X_eval, acqu_UBC3, 'g-',lw=2.5) plt.title('Acquisition functions for UBC') plt.xlim(0.08,0.92) plt.xlabel('x') plt.ylabel('Acquisition value') plt.legend([p1, p2, p3], ["0.5", "1", "4"]) plt.axvline(x=max_UBC1,ls='-',c='red') plt.axvline(x=max_UBC2,ls='-',c='blue') plt.axvline(x=max_UBC3,ls='-',c='green') ``` Next, we show the how the three functions represents our beliefs about the maximum of $f(x)$. Note that all of them use the **mean** and the **variance** of the Gaussian process we have fitted to the data. In this example we simply select some values of the parameters. You can see how the thee acquisition functions represent their beliefs about the maximum of $f(x)$ in a different way. It is up to the user to select the most appropriate depending on the problem. Typically, if we can collect new data we will do it in the maximum of the acquisition function. ### 3.3 Iterative sampling/sequential design Next, to see how BO works iteratively, we use the Expected improvement with $\psi=0.5$. In each iteration we use the same generative model we considered for our first three data points in the point where $acqu_{EI}(x)$ is maximum. See what happens by running several times the cell below!! ``` # 1.- Collect an new sample where the MPI indicates and attach to the previous dataset x_new = max_EI3 y_new = -np.cos(2*np.pi*x_new) + np.sin(4*np.pi*x_new) + np.random.randn(1,1)*0.1 X = np.vstack([X,x_new]) Y = np.vstack([Y,y_new]) # 2.- Run and optimize the new GP model k = GPy.kern.RBF(input_dim=1, variance=.1, lengthscale=.1) m_augmented = GPy.models.GPRegression(X, Y, k) m_augmented.constrain_positive('') m_augmented.likelihood.fix(0.01) m_augmented.optimize_restarts(num_restarts = 10, messages=0) # 3.- Optimize aquisition function MPI acqu_EI3 = EI_max(X_eval,m_augmented,0.5) max_EI3 = X_eval[np.argmax(acqu_EI3)] res_max_EI3 = np.vstack([res_max_EI3,max_EI3]) x_res = np.linspace(1,res_max_EI3.shape[0],res_max_EI3.shape[0]) # GP plot plt.rcParams['figure.figsize'] = 10, 3 # GP plot fest = m_augmented.predict(X_star) plt.plot(X_star,fest[0],c='blue',lw=2,ls='-',mew=4) plt.plot(X_star,fest[0]+1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X_star,fest[0]-1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X,Y,'kx',mew=2.5) plt.title('GP model') plt.xlabel('x') plt.plot(X_star,Y_star,c='grey',lw=2,ls='--',mew=1.5) plt.ylabel('f(x)') plt.xlim(0,1) plt.savefig('datamodel7.pdf') # EI plot plt.rcParams['figure.figsize'] = 10, 3 plt.figure(figsize=(10,3)) p1, = plt.plot(X_eval,(acqu_EI3-min(acqu_EI3))/(max(acqu_EI3-min(acqu_EI3)) or 1.), 'g-',lw=2.5) plt.title('Acquisition function') plt.xlim(0,1.01) plt.ylim(-0.1,1.1) plt.xlabel('x') plt.ylabel('Value') plt.legend([p1], ["Expected improvement"]) plt.savefig('aq7.pdf') #print m_augmented # Convergence plot #plt.subplot(1, 2, 2) #plt.plot(x_res,res_max_EI3,'kx',mew=4.5) #plt.title('Convergence to the maximum') #plt.xlabel('iteration') #plt.ylabel('Value') #plt.ylim(-0.25,1.5) #plt.plot(x_res,res_max_EI3,'g-',lw=2.5) #axhline(y=0.6010,ls='--',c='red'#) # 1.- Collect an new sample where the MPI indicates and attach to the previous dataset x_new = max_EI3 y_new = -np.cos(2*np.pi*x_new) + np.sin(4*np.pi*x_new) + np.random.randn(1,1)*0.1 X = np.vstack([X,x_new]) Y = np.vstack([Y,y_new]) # 2.- Run and optimize the new GP model k = GPy.kern.RBF(input_dim=1, variance=.1, lengthscale=.1) m_augmented = GPy.models.GPRegression(X, Y, k) m_augmented.constrain_positive('') m_augmented.likelihood.fix(0.01) m_augmented.optimize_restarts(num_restarts = 10, messages=0) # 3.- Optimize aquisition function MPI acqu_EI3 = EI_max(X_eval,m_augmented,0.5) max_EI3 = X_eval[np.argmax(acqu_EI3)] res_max_EI3 = np.vstack([res_max_EI3,max_EI3]) x_res = np.linspace(1,res_max_EI3.shape[0],res_max_EI3.shape[0]) # GP plot plt.rcParams['figure.figsize'] = 10, 3 # GP plot fest = m_augmented.predict(X_star) plt.plot(X_star,fest[0],c='blue',lw=2,ls='-',mew=4) plt.plot(X_star,fest[0]+1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X_star,fest[0]-1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X,Y,'kx',mew=2.5) plt.title('GP model') plt.xlabel('x') plt.plot(X_star,Y_star,c='grey',lw=2,ls='--',mew=1.5) plt.ylabel('f(x)') plt.xlim(0,1) plt.savefig('datamodel7.pdf') # EI plot plt.rcParams['figure.figsize'] = 10, 3 plt.figure(figsize=(10,3)) p1, = plt.plot(X_eval,(acqu_EI3-min(acqu_EI3))/(max(acqu_EI3-min(acqu_EI3)) or 1.), 'g-',lw=2.5) plt.title('Acquisition function') plt.xlim(0,1.01) plt.ylim(-0.1,1.1) plt.xlabel('x') plt.ylabel('Value') plt.legend([p1], ["Expected improvement"]) plt.savefig('aq7.pdf') #print m_augmented # Convergence plot #plt.subplot(1, 2, 2) #plt.plot(x_res,res_max_EI3,'kx',mew=4.5) #plt.title('Convergence to the maximum') #plt.xlabel('iteration') #plt.ylabel('Value') #plt.ylim(-0.25,1.5) #plt.plot(x_res,res_max_EI3,'g-',lw=2.5) #axhline(y=0.6010,ls='--',c='red'#) # 1.- Collect an new sample where the MPI indicates and attach to the previous dataset x_new = max_EI3 y_new = -np.cos(2*np.pi*x_new) + np.sin(4*np.pi*x_new) + np.random.randn(1,1)*0.1 X = np.vstack([X,x_new]) Y = np.vstack([Y,y_new]) # 2.- Run and optimize the new GP model k = GPy.kern.RBF(input_dim=1, variance=.1, lengthscale=.1) m_augmented = GPy.models.GPRegression(X, Y, k) m_augmented.constrain_positive('') m_augmented.likelihood.fix(0.01) m_augmented.optimize_restarts(num_restarts = 10, messages=0) # 3.- Optimize aquisition function MPI acqu_EI3 = EI_max(X_eval,m_augmented,0.5) max_EI3 = X_eval[np.argmax(acqu_EI3)] res_max_EI3 = np.vstack([res_max_EI3,max_EI3]) x_res = np.linspace(1,res_max_EI3.shape[0],res_max_EI3.shape[0]) # GP plot plt.rcParams['figure.figsize'] = 10, 3 # GP plot fest = m_augmented.predict(X_star) plt.plot(X_star,fest[0],c='blue',lw=2,ls='-',mew=4) plt.plot(X_star,fest[0]+1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X_star,fest[0]-1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X,Y,'kx',mew=2.5) plt.title('GP model') plt.xlabel('x') plt.plot(X_star,Y_star,c='grey',lw=2,ls='--',mew=1.5) plt.ylabel('f(x)') plt.xlim(0,1) plt.savefig('datamodel7.pdf') # EI plot plt.rcParams['figure.figsize'] = 10, 3 plt.figure(figsize=(10,3)) p1, = plt.plot(X_eval,(acqu_EI3-min(acqu_EI3))/(max(acqu_EI3-min(acqu_EI3)) or 1.), 'g-',lw=2.5) plt.title('Acquisition function') plt.xlim(0,1.01) plt.ylim(-0.1,1.1) plt.xlabel('x') plt.ylabel('Value') plt.legend([p1], ["Expected improvement"]) plt.savefig('aq7.pdf') #print m_augmented # Convergence plot #plt.subplot(1, 2, 2) #plt.plot(x_res,res_max_EI3,'kx',mew=4.5) #plt.title('Convergence to the maximum') #plt.xlabel('iteration') #plt.ylabel('Value') #plt.ylim(-0.25,1.5) #plt.plot(x_res,res_max_EI3,'g-',lw=2.5) #axhline(y=0.6010,ls='--',c='red'#) # 1.- Collect an new sample where the MPI indicates and attach to the previous dataset x_new = max_EI3 y_new = -np.cos(2*np.pi*x_new) + np.sin(4*np.pi*x_new) + np.random.randn(1,1)*0.1 X = np.vstack([X,x_new]) Y = np.vstack([Y,y_new]) # 2.- Run and optimize the new GP model k = GPy.kern.RBF(input_dim=1, variance=.1, lengthscale=.1) m_augmented = GPy.models.GPRegression(X, Y, k) m_augmented.constrain_positive('') m_augmented.likelihood.fix(0.01) m_augmented.optimize_restarts(num_restarts = 10, messages=0) # 3.- Optimize aquisition function MPI acqu_EI3 = EI_max(X_eval,m_augmented,0.5) max_EI3 = X_eval[np.argmax(acqu_EI3)] res_max_EI3 = np.vstack([res_max_EI3,max_EI3]) x_res = np.linspace(1,res_max_EI3.shape[0],res_max_EI3.shape[0]) # GP plot plt.rcParams['figure.figsize'] = 10, 3 # GP plot fest = m_augmented.predict(X_star) plt.plot(X_star,fest[0],c='blue',lw=2,ls='-',mew=4) plt.plot(X_star,fest[0]+1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X_star,fest[0]-1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X,Y,'kx',mew=2.5) plt.title('GP model') plt.xlabel('x') plt.plot(X_star,Y_star,c='grey',lw=2,ls='--',mew=1.5) plt.ylabel('f(x)') plt.xlim(0,1) plt.savefig('datamodel7.pdf') # EI plot plt.rcParams['figure.figsize'] = 10, 3 plt.figure(figsize=(10,3)) p1, = plt.plot(X_eval,(acqu_EI3-min(acqu_EI3))/(max(acqu_EI3-min(acqu_EI3)) or 1.), 'g-',lw=2.5) plt.title('Acquisition function') plt.xlim(0,1.01) plt.ylim(-0.1,1.1) plt.xlabel('x') plt.ylabel('Value') plt.legend([p1], ["Expected improvement"]) plt.savefig('aq7.pdf') #print m_augmented # Convergence plot #plt.subplot(1, 2, 2) #plt.plot(x_res,res_max_EI3,'kx',mew=4.5) #plt.title('Convergence to the maximum') #plt.xlabel('iteration') #plt.ylabel('Value') #plt.ylim(-0.25,1.5) #plt.plot(x_res,res_max_EI3,'g-',lw=2.5) #axhline(y=0.6010,ls='--',c='red'#) # 1.- Collect an new sample where the MPI indicates and attach to the previous dataset x_new = max_EI3 y_new = -np.cos(2*np.pi*x_new) + np.sin(4*np.pi*x_new) + np.random.randn(1,1)*0.1 X = np.vstack([X,x_new]) Y = np.vstack([Y,y_new]) # 2.- Run and optimize the new GP model k = GPy.kern.RBF(input_dim=1, variance=.1, lengthscale=.1) m_augmented = GPy.models.GPRegression(X, Y, k) m_augmented.constrain_positive('') m_augmented.likelihood.fix(0.01) m_augmented.optimize_restarts(num_restarts = 10, messages=0) # 3.- Optimize aquisition function MPI acqu_EI3 = EI_max(X_eval,m_augmented,0.5) max_EI3 = X_eval[np.argmax(acqu_EI3)] res_max_EI3 = np.vstack([res_max_EI3,max_EI3]) x_res = np.linspace(1,res_max_EI3.shape[0],res_max_EI3.shape[0]) # GP plot plt.rcParams['figure.figsize'] = 10, 3 # GP plot fest = m_augmented.predict(X_star) plt.plot(X_star,fest[0],c='blue',lw=2,ls='-',mew=4) plt.plot(X_star,fest[0]+1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X_star,fest[0]-1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X,Y,'kx',mew=2.5) plt.title('GP model') plt.xlabel('x') plt.plot(X_star,Y_star,c='grey',lw=2,ls='--',mew=1.5) plt.ylabel('f(x)') plt.xlim(0,1) plt.savefig('datamodel7.pdf') # EI plot plt.rcParams['figure.figsize'] = 10, 3 plt.figure(figsize=(10,3)) p1, = plt.plot(X_eval,(acqu_EI3-min(acqu_EI3))/(max(acqu_EI3-min(acqu_EI3)) or 1.), 'g-',lw=2.5) plt.title('Acquisition function') plt.xlim(0,1.01) plt.ylim(-0.1,1.1) plt.xlabel('x') plt.ylabel('Value') plt.legend([p1], ["Expected improvement"]) plt.savefig('aq7.pdf') #print m_augmented # Convergence plot #plt.subplot(1, 2, 2) #plt.plot(x_res,res_max_EI3,'kx',mew=4.5) #plt.title('Convergence to the maximum') #plt.xlabel('iteration') #plt.ylabel('Value') #plt.ylim(-0.25,1.5) #plt.plot(x_res,res_max_EI3,'g-',lw=2.5) #axhline(y=0.6010,ls='--',c='red'#) # 1.- Collect an new sample where the MPI indicates and attach to the previous dataset x_new = max_EI3 y_new = -np.cos(2*np.pi*x_new) + np.sin(4*np.pi*x_new) + np.random.randn(1,1)*0.1 X = np.vstack([X,x_new]) Y = np.vstack([Y,y_new]) # 2.- Run and optimize the new GP model k = GPy.kern.RBF(input_dim=1, variance=.1, lengthscale=.1) m_augmented = GPy.models.GPRegression(X, Y, k) m_augmented.constrain_positive('') m_augmented.likelihood.fix(0.01) m_augmented.optimize_restarts(num_restarts = 10, messages=0) # 3.- Optimize aquisition function MPI acqu_EI3 = EI_max(X_eval,m_augmented,0.5) max_EI3 = X_eval[np.argmax(acqu_EI3)] res_max_EI3 = np.vstack([res_max_EI3,max_EI3]) x_res = np.linspace(1,res_max_EI3.shape[0],res_max_EI3.shape[0]) # GP plot plt.rcParams['figure.figsize'] = 10, 3 # GP plot fest = m_augmented.predict(X_star) plt.plot(X_star,fest[0],c='blue',lw=2,ls='-',mew=4) plt.plot(X_star,fest[0]+1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X_star,fest[0]-1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X,Y,'kx',mew=2.5) plt.title('GP model') plt.xlabel('x') plt.plot(X_star,Y_star,c='grey',lw=2,ls='--',mew=1.5) plt.ylabel('f(x)') plt.xlim(0,1) plt.savefig('datamodel7.pdf') # EI plot plt.rcParams['figure.figsize'] = 10, 3 plt.figure(figsize=(10,3)) p1, = plt.plot(X_eval,(acqu_EI3-min(acqu_EI3))/(max(acqu_EI3-min(acqu_EI3)) or 1.), 'g-',lw=2.5) plt.title('Acquisition function') plt.xlim(0,1.01) plt.ylim(-0.1,1.1) plt.xlabel('x') plt.ylabel('Value') plt.legend([p1], ["Expected improvement"]) plt.savefig('aq7.pdf') #print m_augmented # Convergence plot #plt.subplot(1, 2, 2) #plt.plot(x_res,res_max_EI3,'kx',mew=4.5) #plt.title('Convergence to the maximum') #plt.xlabel('iteration') #plt.ylabel('Value') #plt.ylim(-0.25,1.5) #plt.plot(x_res,res_max_EI3,'g-',lw=2.5) #axhline(y=0.6010,ls='--',c='red'#) # 1.- Collect an new sample where the MPI indicates and attach to the previous dataset x_new = max_EI3 y_new = -np.cos(2*np.pi*x_new) + np.sin(4*np.pi*x_new) + np.random.randn(1,1)*0.1 X = np.vstack([X,x_new]) Y = np.vstack([Y,y_new]) # 2.- Run and optimize the new GP model k = GPy.kern.RBF(input_dim=1, variance=.1, lengthscale=.1) m_augmented = GPy.models.GPRegression(X, Y, k) m_augmented.constrain_positive('') m_augmented.likelihood.fix(0.01) m_augmented.optimize_restarts(num_restarts = 10, messages=0) # 3.- Optimize aquisition function MPI acqu_EI3 = EI_max(X_eval,m_augmented,0.5) max_EI3 = X_eval[np.argmax(acqu_EI3)] res_max_EI3 = np.vstack([res_max_EI3,max_EI3]) x_res = np.linspace(1,res_max_EI3.shape[0],res_max_EI3.shape[0]) # GP plot plt.rcParams['figure.figsize'] = 10, 3 # GP plot fest = m_augmented.predict(X_star) plt.plot(X_star,fest[0],c='blue',lw=2,ls='-',mew=4) plt.plot(X_star,fest[0]+1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X_star,fest[0]-1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X,Y,'kx',mew=2.5) plt.title('GP model') plt.xlabel('x') plt.plot(X_star,Y_star,c='grey',lw=2,ls='--',mew=1.5) plt.ylabel('f(x)') plt.xlim(0,1) plt.savefig('datamodel7.pdf') # EI plot plt.rcParams['figure.figsize'] = 10, 3 plt.figure(figsize=(10,3)) p1, = plt.plot(X_eval,(acqu_EI3-min(acqu_EI3))/(max(acqu_EI3-min(acqu_EI3)) or 1.), 'g-',lw=2.5) plt.title('Acquisition function') plt.xlim(0,1.01) plt.ylim(-0.1,1.1) plt.xlabel('x') plt.ylabel('Value') plt.legend([p1], ["Expected improvement"]) plt.savefig('aq7.pdf') #print m_augmented # Convergence plot #plt.subplot(1, 2, 2) #plt.plot(x_res,res_max_EI3,'kx',mew=4.5) #plt.title('Convergence to the maximum') #plt.xlabel('iteration') #plt.ylabel('Value') #plt.ylim(-0.25,1.5) #plt.plot(x_res,res_max_EI3,'g-',lw=2.5) #axhline(y=0.6010,ls='--',c='red'#) # 1.- Collect an new sample where the MPI indicates and attach to the previous dataset x_new = max_EI3 y_new = -np.cos(2*np.pi*x_new) + np.sin(4*np.pi*x_new) + np.random.randn(1,1)*0.1 X = np.vstack([X,x_new]) Y = np.vstack([Y,y_new]) # 2.- Run and optimize the new GP model k = GPy.kern.RBF(input_dim=1, variance=.1, lengthscale=.1) m_augmented = GPy.models.GPRegression(X, Y, k) m_augmented.constrain_positive('') m_augmented.likelihood.fix(0.01) m_augmented.optimize_restarts(num_restarts = 10, messages=0) # 3.- Optimize aquisition function MPI acqu_EI3 = EI_max(X_eval,m_augmented,0.5) max_EI3 = X_eval[np.argmax(acqu_EI3)] res_max_EI3 = np.vstack([res_max_EI3,max_EI3]) x_res = np.linspace(1,res_max_EI3.shape[0],res_max_EI3.shape[0]) # GP plot plt.rcParams['figure.figsize'] = 10, 3 # GP plot fest = m_augmented.predict(X_star) plt.plot(X_star,fest[0],c='blue',lw=2,ls='-',mew=4) plt.plot(X_star,fest[0]+1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X_star,fest[0]-1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X,Y,'kx',mew=2.5) plt.title('GP model') plt.xlabel('x') plt.plot(X_star,Y_star,c='grey',lw=2,ls='--',mew=1.5) plt.ylabel('f(x)') plt.xlim(0,1) plt.savefig('datamodel7.pdf') # EI plot plt.rcParams['figure.figsize'] = 10, 3 plt.figure(figsize=(10,3)) p1, = plt.plot(X_eval,(acqu_EI3-min(acqu_EI3))/(max(acqu_EI3-min(acqu_EI3)) or 1.), 'g-',lw=2.5) plt.title('Acquisition function') plt.xlim(0,1.01) plt.ylim(-0.1,1.1) plt.xlabel('x') plt.ylabel('Value') plt.legend([p1], ["Expected improvement"]) plt.savefig('aq7.pdf') #print m_augmented # Convergence plot #plt.subplot(1, 2, 2) #plt.plot(x_res,res_max_EI3,'kx',mew=4.5) #plt.title('Convergence to the maximum') #plt.xlabel('iteration') #plt.ylabel('Value') #plt.ylim(-0.25,1.5) #plt.plot(x_res,res_max_EI3,'g-',lw=2.5) #axhline(y=0.6010,ls='--',c='red'#) # 1.- Collect an new sample where the MPI indicates and attach to the previous dataset x_new = max_EI3 y_new = -np.cos(2*np.pi*x_new) + np.sin(4*np.pi*x_new) + np.random.randn(1,1)*0.1 X = np.vstack([X,x_new]) Y = np.vstack([Y,y_new]) # 2.- Run and optimize the new GP model k = GPy.kern.RBF(input_dim=1, variance=.1, lengthscale=.1) m_augmented = GPy.models.GPRegression(X, Y, k) m_augmented.constrain_positive('') m_augmented.likelihood.fix(0.01) m_augmented.optimize_restarts(num_restarts = 10, messages=0) # 3.- Optimize aquisition function MPI acqu_EI3 = EI_max(X_eval,m_augmented,0.5) max_EI3 = X_eval[np.argmax(acqu_EI3)] res_max_EI3 = np.vstack([res_max_EI3,max_EI3]) x_res = np.linspace(1,res_max_EI3.shape[0],res_max_EI3.shape[0]) # GP plot plt.rcParams['figure.figsize'] = 10, 3 # GP plot fest = m_augmented.predict(X_star) plt.plot(X_star,fest[0],c='blue',lw=2,ls='-',mew=4) plt.plot(X_star,fest[0]+1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X_star,fest[0]-1.96*np.sqrt(fest[1]),c='blue',lw=1,ls='-',mew=1) plt.plot(X,Y,'kx',mew=2.5) plt.title('GP model') plt.xlabel('x') plt.plot(X_star,Y_star,c='grey',lw=2,ls='--',mew=1.5) plt.ylabel('f(x)') plt.xlim(0,1) plt.savefig('datamodel7.pdf') # EI plot plt.rcParams['figure.figsize'] = 10, 3 plt.figure(figsize=(10,3)) p1, = plt.plot(X_eval,(acqu_EI3-min(acqu_EI3))/(max(acqu_EI3-min(acqu_EI3)) or 1.), 'g-',lw=2.5) plt.title('Acquisition function') plt.xlim(0,1.01) plt.ylim(-0.1,1.1) plt.xlabel('x') plt.ylabel('Value') plt.legend([p1], ["Expected improvement"]) plt.savefig('aq7.pdf') #print m_augmented # Convergence plot #plt.subplot(1, 2, 2) #plt.plot(x_res,res_max_EI3,'kx',mew=4.5) #plt.title('Convergence to the maximum') #plt.xlabel('iteration') #plt.ylabel('Value') #plt.ylim(-0.25,1.5) #plt.plot(x_res,res_max_EI3,'g-',lw=2.5) #axhline(y=0.6010,ls='--',c='red'#) ``` As you can see, after the first iterations, the aquisition function explores the domain of $f$. In some cases it is either flat or it concentrates all the mass in a point, which normally conincides with the exploration of the limit of the function domain. After some iterations, however, it becomes more narrow and the convergence to the maximum of $f$, 0.6010, is achieved. We only needed 7 data points!!
github_jupyter
# Football Betting with Multinomial Logit * **Data Source:** [https://www.kaggle.com/hugomathien/soccer](https://www.kaggle.com/hugomathien/soccer) * **Author:** Anders Munk-Nielsen ``` import pandas as pd import numpy as np from scipy.optimize import minimize import statsmodels.formula.api as smf import seaborn as sns import matplotlib.pyplot as plt sns.set_theme() # Read dat = pd.read_csv('football_probs.csv') # Data types dat.date = pd.to_datetime(dat.date) cols_to_cat = ['league', 'season', 'team', 'country'] for c in cols_to_cat: dat[c] = dat[c].astype('category') ``` # Outcome The outcome, $y$, can take three values, $y \in \{ 0, 1, 2\}$, corresponding to Lose, Draw, and Win, respectively. ``` dat['y'] = 0 # loss dat.loc[dat.goal_diff == 0.0 , 'y'] = 1 # draw dat.loc[dat.goal_diff > 0.0 , 'y'] = 2 # win assert (dat.loc[dat.goal_diff < 0.0 , 'y'] == 0).all() ``` # Conditional Probabilities We assume that the probability that $y$ takes each of the values is given by $$ \Pr (y_i = j | \mathbf{X}_i, \theta) = \frac{\exp(\mathbf{x}_{ij} \theta_j)}{\sum_{k=0,1,2} \exp(\mathbf{x}_{ik} \theta_k)}. $$ In other words, we can think of $v_{ij} \equiv \mathbf{x}_{ij}\theta_j$ as the scalar index that determines the relative probabilities: the higher is $v_{i1}$ (for fixed $v_{i0},v_{i2}$), the more likely it is that the outcome is $y_i = 1$. Compared to other models we have seen, we now have $\mathbf{x}_{ij}$ varying both over $i$ and $j$. Previously, we have otherwise looked at the *Multinomial Logit* model ($\mathbf{x}$ only varies with $i$, e.g. years of schooling, or alcohol abuse), or the *Conditional Logit* model ($\mathbf{x}$ only varies with $j$, e.g. car attributes). In this dataset, we will allow for covariates that vary over $i$ but which also do not necessarily enter in the "utility" for all outcomes. E.g. we may assume that the Bet365 Probability that the Away team wins, `B365_PrA`, should only affect the probability of Away winning (even though we could allow it to enter in the other utilities. Other regressors, however, like the `home` dummy (for whether the match is played at the home stadium) affects all outcomes, but with different coefficients. ## Normalization, short version We have to normalize $\theta_j = \mathbf{0}_{K\times 1}$ for one $j$. We will choose to normalize $j=0$. * It does not matter which $j$ we choose as the normalizing alternative, except of course for the interpretation of the coefficients. ## Estimation This implies that our probabilities are $$ \Pr(y_i = j| \mathbf{x}_{i1}, \mathbf{x}_{i2}; \boldsymbol{\theta}_1, \boldsymbol{\theta}_2) = \begin{cases} \frac{1}{1+\exp(\mathbf{x}_{i1}\boldsymbol{\theta}_{1})+\exp(\mathbf{x}_{i2}\boldsymbol{\theta}_{2})} & \text{if }j=0\\ \frac{\exp(\mathbf{x}_{ij}\boldsymbol{\theta}_{j})}{1+\exp(\mathbf{x}_{i1}\boldsymbol{\theta}_{1})+\exp(\mathbf{x}_{i2}\boldsymbol{\theta}_{2})} & \text{otherwise.} \end{cases} $$ Our full vector of parameters thus consists of $(\boldsymbol{\theta}_1, \boldsymbol{\theta}_2)$, which is $2K$ long. (There are $J-1 = 2$ sets of coefficients, because we have normalized one of our $J=3$ alternatives to have zero coefficients.) ## Normalization, Longer version Since probabilities must sum to 100%, we cannot allow $v_{ij}$ to be changing freely. For instance, if we add the same scalar, $\lambda \in \mathbb{R}$, to all indices, then we get unchanged probabilities: $$ \frac{\exp(\lambda + v_{ij})}{\sum_{k=0,1,2} \exp(\lambda + v_{i\ell})} = \underset{=1}{\underbrace{\frac{\exp(\lambda)}{\exp(\lambda)}}} \frac{\exp(\mathbf{x}_{ij} \theta_j)}{\sum_{k=0,1,2} \exp(\mathbf{x}_{ik} \theta_k)}. $$ This means that we cannot distinguish the implied behavior by the indices $(v_{i0},v_{i1},v_{i2})$, and that implied by $(v_{i0}+\lambda,v_{i1}+\lambda,v_{i2}+\lambda)$: The choice probabilities are analytically identical. This means that we cannot include a common intercept in all $v_{ij}$, which in practice implies that we can have at most $J-1 = 2$ intercepts. Similar arguments will explain why we cannot have all regressors entering in all $v_{ij}$, which amounts to saying that we have to set $\theta_j = \mathbf{0}_K$ for one alternative $j$. ## Setting up regressors ``` # add an explanatory variable: home dummy, with separate effect on Pr(D) and Pr(W) dat['home_W'] = dat.home.copy() * 1.0 dat['home_D'] = dat.home.copy() * 1.0 dat['const_W'] = 1.0 dat['const_D'] = 1.0 SMALLSCALE = True if SMALLSCALE: # only Bet365, a home-dummy and a constant term cols = [f'{x}{o}' for x in ['B365_Pr', 'home_', 'const_'] for o in ['D','W']] else: # many variables, but not those that have too many missings firms_drop = ['BS', 'GB', 'PS', 'SJ'] # these are missing in too many years cols = [c for c in dat.columns if (c[-4:-1] == '_Pr') # probabilities of win lose draw & (c[-1] != 'L') # lose will be the normalized outcome & (c[:2] not in firms_drop) # do not include from these firms ] cols += ['home_D', 'home_W', 'const_D', 'const_W'] print(f'Chosen columns: {cols}') ``` Dropping observations (rows) with missing values for one or more variables. ``` I = dat[cols].notnull().all(1) print(f'Keeping only non-missings: {I.mean():5.2%} obs. (N = {I.sum():,d})') dat = dat.loc[I, :].copy() ``` ## Pandas to numpy We want to write our objective function in `numpy`, so we have to extract our variables. ``` cols J = 3 # 3 discrete choices: Lose, Draw, Win K = round(len(cols) / (J-1)) # J-1 since we only have covariates for the two N = dat.shape[0] print(f'Chosen columns: {cols}') y = dat['y'].values.reshape((N,)) X = dat[cols].values.reshape((N,K,J-1)) assert not np.any(np.isnan(X)), 'NaNs in X' assert not np.any(np.isnan(y)), 'NaNs in y' ``` ## Visualizing `X` and working with 3D arrays ``` ii = [0,1,-10] # let's look at these rows x = X[ii] # and print them nicely from pandas using "iloc" dat.iloc[ii][cols + ['team', 'enemy_team']] theta = 0.1 * np.ones((K,J-1)) ``` $\theta$: one parameter for each $(k,j)$, but $\theta_{jk}=0 $ for the normalized alternative, $j=0$. ``` theta # labels? pd.DataFrame(np.reshape(cols, (K,J-1)), columns=['Draw', 'Win']) ``` $x_{ijk}$: for a given $i$, $K\times (J-1)$. ``` i = 2 x[i] ``` $x_{ijk} \beta_{jk}$ ``` xb = x[i] * theta xb ``` $v_{ij} \equiv \sum_k x_{ijk} \theta_{jk}$ (one for each $j=1,2$) ``` xbsum = xb.sum(axis=0) xbsum ``` $v_{i0} := 0$ (normalization) ``` u = np.hstack([0., xbsum]) u denom = np.sum(np.exp(u)) ccp = np.exp(u) / denom ccp ``` ### With three individuals ``` theta xb = x * theta xb ``` ### Scale up to the full dataset ``` theta.shape X.shape (X * theta).sum(axis=1).shape ``` $X$ is $N \times K \times J-1$, so we sum over the middle-index $k$, `axis=1`. ``` xb = (X * theta).sum(axis=1) # sum over J-dimension xb u = np.hstack([np.zeros((N,1)), xb]) u ``` To max rescale, compute maximum over rows, i.e. `axis=1` Choice probabilities ``` denom = np.sum(np.exp(u), axis=1).reshape(N,1) (np.exp(u) / denom) ``` _____ # Criterion function Our estimator is defined as $$ \hat{\theta} = \arg\min_\theta \frac{1}{N} \sum_{i=1}^N q_i(\theta),$$ where the criterion function is $$ q_i(\theta) \equiv - \log \Pr(y_i \vert x_i, \theta),$$ and choice probabilities are computed as $$ \Pr(y_i \vert x_i, \theta) = \frac{\exp(v_{iy_i})}{\sum_{j=1}^J \exp(v_{ij})}, $$ and utility indices are given by $$ v_{ij} \equiv \mathbf{x}_{ij} \boldsymbol{\beta}_j = \sum_{k=1}^K \beta_{jk} x_{ijk}.$$ We make the *normalization* that $\boldsymbol{\beta}_1 := \mathbf{0}_{K \times 1}$. Thus, we need $J-1$ regressors (with $K$ variables each) and we are estimating $(J-1)K$ parameters. In reality, the choice probabilities are computed using a "max-rescaling," $$ \Pr(y_i \vert x_i, \theta) = \frac{\exp(v_{iy_i} - K_i)}{\sum_{j=1}^J \exp(v_{ij} - K_i)},\quad K_i \equiv \max_{j \in \{1,...,J\}} v_{ij}.$$ ``` def util(x, thet) -> np.ndarray: '''util: Utility function. Explicitly implements the normalization for the first alternative (i.e. u[:, 0] = 0). Args. x: (N,J-1,K)-matrix of explanatory variables theta: (J-1,K)-matrix of coefficients Returns u: (N,J)-matrix of utilities (max-rescaled) ''' N,K,J_1 = x.shape J = J_1 + 1 assert thet.size == K*(J-1) theta = thet.reshape((K, J-1)) # minimizer may flatten this xb = (x * theta).sum(axis=1) # (N,K,J-1) * (K,J-1), sum over k-axis -> (N,J-1) oo = np.zeros((N,1)) # normalized alternative u = np.hstack([oo, xb]) # full N*J matrix of utilities u -= u.max(axis=1).reshape((N,1)) # max rescale return u def ccp(x, theta): N,K,J_1 = x.shape u = util(x, theta) # (N,J) denom = np.sum(np.exp(u), axis=1).reshape((N,1)) # (N,1) ccp = np.exp(u) / denom # (N,J) matrix return ccp def loglike(y, x, theta): N,K,J_1 = x.shape J = J_1 + 1 assert np.isin(y, np.arange(J)).all() u = util(x, theta) denom = np.sum(np.exp(u), axis=1).reshape((N,1)) u_i = np.take_along_axis(u, y.reshape((N,1)), axis=1) ll_i = u_i - np.log(denom) return ll_i def Q(theta): ll_i = loglike(y, X, theta) # reads y,x from global memory return -np.mean(ll_i) theta0 = np.zeros((K,J-1)) res = minimize(Q, theta0) print(f'Convergence success? {res.success} (after {res.nit} iterations).') pd.DataFrame(res.x, index=cols, columns=['beta']) theta = res.x.reshape((K,J-1)) pd.DataFrame(theta, columns=['Draw', 'Win']) ``` ## Inspect fit for individual rows This will print out the real outcomes, `y`, as well as the predicted probabilities of each outcome, where `0` denotes `L`, `1` denotes `D`, and `2` denotes `W` (Loss, Draw, Win, respectively). ``` probs = ccp(X, res.x) prob_i = np.take_along_axis(probs, y.reshape(N,1), axis=1) # y is the actual outcome of the game, # the other three columns are our predicted probabilities tab = pd.DataFrame(np.hstack([y.reshape(-1,1), probs, dat[['team', 'home', 'B365_PrL', 'B365_PrD', 'B365_PrW']].values.reshape(-1,5) ] ), columns= ['y', 'Pr(0)', 'Pr(1)', 'Pr(2)', 'team', 'home', 'B365 P(0)', 'B365 P(1)', 'B365 P(2)']) tab.sample(10, random_state=1337).round(3) pr_i = np.take_along_axis(probs, y.reshape(-1,1), 1).flatten() probs_B365 = dat[['B365_PrL', 'B365_PrD', 'B365_PrW']].values pr_i_B365 = np.take_along_axis(probs_B365, y.reshape(-1,1), 1).flatten() ``` Evaluate the probability of the observed outcome from our model vs. B365. ``` tab = pd.DataFrame({'Us':pr_i, 'B365':pr_i_B365}) tab tab.mean() avg_prob_point_diff = (tab['Us'] - tab['B365']).mean() avg_times_closer = (tab['Us'] > tab['B365']).mean() print(f'On avg. our model as a diff. of {avg_prob_point_diff:5.2%}-points') print(f'Our model has a higher predicted probability of the observed outcome for {avg_times_closer:5.2%} of matches') if avg_prob_point_diff<0.0: print(f'*sigh* the model is not doing better than B365...') else: print(f'Wahoo, we''ve beat the market!') ``` How is it possible that our model performs *worse*? Well, it's maximizing the avg. *log* probability: ``` tab.mean().to_frame('Avg. Probability') np.log(tab).mean().to_frame('Avg. log Pr') ```
github_jupyter
# Lab 04 : Train vanilla neural network -- exercise # Training a one-layer net on FASHION-MNIST ``` # For Google Colaboratory import sys, os if 'google.colab' in sys.modules: # mount google drive from google.colab import drive drive.mount('/content/gdrive') # find automatically the path of the folder containing "file_name" : file_name = 'train_vanilla_nn_exercise.ipynb' import subprocess path_to_file = subprocess.check_output('find . -type f -name ' + str(file_name), shell=True).decode("utf-8") path_to_file = path_to_file.replace(file_name,"").replace('\n',"") # if previous search failed or too long, comment the previous line and simply write down manually the path below : #path_to_file = '/content/gdrive/My Drive/CS5242_2021_codes/codes/labs_lecture03/lab04_train_vanilla_nn' print(path_to_file) # change current path to the folder containing "file_name" os.chdir(path_to_file) !pwd import torch import torch.nn as nn import torch.optim as optim from random import randint import utils ``` ### Download the TRAINING SET (data+labels) ``` from utils import check_fashion_mnist_dataset_exists data_path=check_fashion_mnist_dataset_exists() train_data=torch.load(data_path+'fashion-mnist/train_data.pt') train_label=torch.load(data_path+'fashion-mnist/train_label.pt') print(train_data.size()) print(train_label.size()) ``` ### Download the TEST SET (data only) ``` test_data=torch.load(data_path+'fashion-mnist/test_data.pt') print(test_data.size()) ``` ### Make a one layer net class ``` class one_layer_net(nn.Module): def __init__(self, input_size, output_size): super(one_layer_net , self).__init__() # complete here def forward(self, x): x = # complete here p = # complete here return p ``` ### Build the net ``` net=one_layer_net(784,10) print(net) ``` ### Take the 4th image of the test set: ``` im= # complete here utils.show(im) ``` ### And feed it to the UNTRAINED network: ``` p = # complete here print(p) ``` ### Display visually the confidence scores ``` utils.show_prob_fashion_mnist(p) ``` ### Train the network (only 5000 iterations) on the train set ``` criterion = nn.NLLLoss() optimizer=torch.optim.SGD(net.parameters() , lr=0.01 ) for iter in range(1,5000): # choose a random integer between 0 and 59,999 # extract the corresponding picture and label # and reshape them to fit the network # complete here # complete here # complete here # feed the input to the net input.requires_grad_() # for backprobagation -- we will discuss it later # complete here # update the weights (all the magic happens here -- we will discuss it later) log_prob=torch.log(prob) loss = criterion(log_prob, label) optimizer.zero_grad() loss.backward() optimizer.step() ``` ### Take the 34th image of the test set: ``` im= # complete here utils.show(im) ``` ### Feed it to the TRAINED net: ``` p = # complete here print(p) ``` ### Display visually the confidence scores ``` utils.show_prob_fashion_mnist(prob) ``` ### Choose image at random from the test set and see how good/bad are the predictions ``` # choose a picture at random idx=randint(0, 10000-1) im=test_data[idx] # diplay the picture utils.show(im) # feed it to the net and display the confidence scores prob = net( im.view(1,784)) utils.show_prob_fashion_mnist(prob) ```
github_jupyter
# ImageNet classification This notebook shows an example of ImageNet classification The network that is used for inference is a variant of DoReFaNet, whose topology is illustrated in the following picture. The pink layers are executed in the Programmable Logic at reduced precision (1 bit for weights, 2 bit for activations) while the other layers are executed in python. This notebook shows how to classify an image choosen by the user, while [dorefanet-imagenet-samples](./dorefanet-imagenet-samples.ipynb) runs the classification on labeled images (extracted from the dataset) ![DoReFaNet topology](dorefanet-topology.svg) ``` import os, pickle, random from datetime import datetime from matplotlib import pyplot as plt from PIL import Image %matplotlib inline import numpy as np import cv2 import qnn from qnn import Dorefanet from qnn import utils ``` ## 1. Instantiate a Classifier Creating a classifier will automatically download the bitstream onto the device, allocate memory buffers and load the network hyperparameters and weights. The neural network to be implemented is specified in a json file (*dorefanet-layers.json* in this example) The weights for the non-offloaded layers are also loaded in a numpy dictionary to be used for execution in python. ``` classifier = Dorefanet() classifier.init_accelerator() net = classifier.load_network(json_layer="/usr/local/lib/python3.6/dist-packages/qnn/params/dorefanet-layers.json") conv0_weights = np.load('/usr/local/lib/python3.6/dist-packages/qnn/params/dorefanet-conv0.npy', encoding="latin1").item() fc_weights = np.load('/usr/local/lib/python3.6/dist-packages/qnn/params/dorefanet-fc-normalized.npy', encoding='latin1').item() ``` ## 2. Get ImageNet Classes information Load labels and *synsets* of the 1000 [ImageNet](www.image-net.org/) classes into dictionaries ``` with open("/home/xilinx/jupyter_notebooks/qnn/imagenet-classes.pkl", 'rb') as f: classes = pickle.load(f) names = dict((k, classes[k][1].split(',')[0]) for k in classes.keys()) synsets = dict((classes[k][0], classes[k][1].split(',')[0]) for k in classes.keys()) ``` ## 3. Open image to be classified The image to be run through object classification is loaded automatically from the path set in the *img_folder* variable. The original image is shown before processing and will be automatically selected as the last one downloaded (both variables *img_folder* and *image_name* can be modified to the desired path/image to be classified). ``` img_folder = "/home/xilinx/jupyter_notebooks/qnn/images/" img_file = os.path.join(img_folder, max(os.listdir(img_folder), key=lambda f: os.path.getctime(os.path.join(img_folder, f)))) img, img_class = classifier.load_image(img_file) im = Image.open(img_file) im ``` ## 4. Execute the first convolutional layer in Python The first layer of this neural network has not been quantized, thus will not be executed in the HW accelerator (which supports only quantized arithmetic). Python provides, with numpy, a backend to execute convolution and other matrix operations. For user convenience the most popular operations (convolutional layer, thresholding, relu and fully connected layer) are provided in the class. ``` conv0_W = conv0_weights['conv0/W'] conv0_T = conv0_weights['conv0/T'] start = datetime.now() # 1st convolutional layer execution, having as input the image and the trained parameters (weights) conv0 = utils.conv_layer(img, conv0_W, stride=4) # The result in then quantized to 2 bits representation for the subsequent HW offload conv0 = utils.threshold(conv0, conv0_T) # Allocate accelerator output buffer end = datetime.now() micros = int((end - start).total_seconds() * 1000000) print("First layer SW implementation took {} microseconds".format(micros)) print(micros, file=open('timestamp.txt', 'w')) ``` ## 5. HW Offload of the quantized layers The core layers, which values have been quantized during training, are executed in the Programmable Logic. The hardware accelerator consists of a dataflow implementation of multiple layers (in this case, convolution+maxpool). The host code parses the network topology (specified in the json file) and manages the sequence of execution on the accelerator. ``` # Compute offloaded convolutional layers out_dim = net['merge4']['output_dim'] out_ch = net['merge4']['output_channels'] conv_output = classifier.get_accel_buffer(out_ch, out_dim); conv_input = classifier.prepare_buffer(conv0) start = datetime.now() classifier.inference(conv_input, conv_output) end = datetime.now() micros = int((end - start).total_seconds() * 1000000) print("HW implementation took {} microseconds".format(micros)) print(micros, file=open('timestamp.txt', 'a')) conv_output = classifier.postprocess_buffer(conv_output) ``` ## 6. Fully connected layers in python The fully connected layers, are executed in the python backend and the classification finalized ``` # Normalize results fc_input = conv_output / np.max(conv_output) start = datetime.now() # FC Layer 0 fc0_W = fc_weights['fc0/Wn'] fc0_b = fc_weights['fc0/bn'] fc0_out = utils.fully_connected(fc_input, fc0_W, fc0_b) fc0_out = utils.qrelu(fc0_out) fc0_out = utils.quantize(fc0_out, 2) # FC Layer 1 fc1_W = fc_weights['fc1/Wn'] fc1_b = fc_weights['fc1/bn'] fc1_out = utils.fully_connected(fc0_out, fc1_W, fc1_b) fc1_out = utils.qrelu(fc1_out) # FC Layer 2 fct_W = fc_weights['fct/W'] fct_b = np.zeros((fct_W.shape[1], )) fct_out = utils.fully_connected(fc1_out, fct_W, fct_b) end = datetime.now() micros = int((end - start).total_seconds() * 1000000) print("Fully-connected layers took {} microseconds".format(micros)) print(micros, file=open('timestamp.txt', 'a')) ``` ## 7. Classification Results The top-5 results of the inference are provided with the corresponding human readable labels. The final classification scores are computed by a SoftMax Operator, that gives the normalized probabilities for all the classes. ``` # Softmax out = utils.softmax(fct_out) # Top-5 results topn = utils.get_topn_indexes(out, 5) for k in topn: print("class:{0:>20}\tprobability:{1:>8.2%}".format(names[k].lower(), out[k])) x_pos = np.arange(len(topn)) plt.barh(x_pos, out[topn], height=0.4, color='g', zorder=3) plt.yticks(x_pos, [names[k] for k in topn]) plt.gca().invert_yaxis() plt.xlim([0,1]) plt.grid(zorder=0) plt.show() ``` ## 8. Performance evaluation This part show the performance of both software and hardware execution in terms of execution time, number of operations and number of operations over time. The software execution includes the first convolutional layer and the fully connected layers, while the hardware execution includes all the offloaded convolutional layers ``` array = np.loadtxt('timestamp.txt') array = list(map(lambda x: x/1000000, array)) MOPS = [238.176256, 1073.856969] TIME = [array[0] + array[2], array[1]] MOPSS = [m / t for (m, t) in zip(MOPS ,TIME)] LABELS = ['SW', 'HW'] f, ((ax1, ax2, ax3)) = plt.subplots(1, 3, sharex='col', sharey='row', figsize=(15,2)) x_pos = np.arange(len(LABELS)) plt.yticks(x_pos, LABELS) ax1.barh(x_pos, TIME, height=0.6, color='r', zorder=3) ax1.invert_yaxis() ax1.set_xlabel("Execution Time [s]") ax1.set_ylabel("Platform") ax1.grid(zorder=0) ax2.barh(x_pos, MOPS, height=0.6, color='g', zorder=3) ax2.invert_yaxis() ax2.set_xlabel("# of Operations [MOPS]") ax2.grid(zorder=0) ax3.barh(x_pos, MOPSS, height=0.6, color='b', zorder=3) ax3.invert_yaxis() ax3.set_xlabel("Performances [MOPS/s]") ax3.grid(zorder=0) plt.show() ``` ## 9. SW execution of the quantized layers The core layers, which values have been quantized during training, are now executed in SW (by executing the HLS C++ source code). The host code parses the network topology (specified in the json file) and manages the sequence of execution on the accelerator. ``` classifier_sw = Dorefanet("python_sw") classifier_sw.init_accelerator() conv_output_sw = classifier_sw.get_accel_buffer(out_ch, out_dim); start = datetime.now() classifier_sw.inference(conv_input, conv_output_sw) end = datetime.now() micros = int((end - start).total_seconds() * 1000000) print("HW implementation took {} microseconds".format(micros)) ``` ## Reset the device ``` classifier.deinit_accelerator() from pynq import Xlnk xlnk = Xlnk(); xlnk.xlnk_reset() ```
github_jupyter
``` from __future__ import print_function from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn import datasets from skimage import exposure import numpy as np #import imutils import cv2 import matplotlib.pyplot as plt # load the MNIST digits dataset mnist = datasets.load_digits() # take the MNIST data and construct the training and testing split, using 75% of the # data for training and 25% for testing (trainData, testData, trainLabels, testLabels) = train_test_split(np.array(mnist.data), mnist.target, test_size=0.25, random_state=42) # now, let's take 10% of the training data and use that for validation (trainData, valData, trainLabels, valLabels) = train_test_split(trainData, trainLabels, test_size=0.1, random_state=84) # show the sizes of each data split print("training data points: {}".format(len(trainLabels))) print("validation data points: {}".format(len(valLabels))) print("testing data points: {}".format(len(testLabels))) # train the k-Nearest Neighbor classifier with the current value of `k` model = KNeighborsClassifier(n_neighbors=9) model.fit(trainData, trainLabels) # evaluate the model and update the accuracies list score = model.score(valData, valLabels) print("k=%d, accuracy=%.2f%%" % (9, score * 100)) # loop over a few random digits for i in np.random.randint(0, high=len(testLabels), size=(5,)): # grab the image and classify it image = testData[i] prediction = model.predict([image])[0] # convert the image for a 64-dim array to an 8 x 8 image compatible with OpenCV # show the prediction imgdata = np.array(image, dtype='float') pixels = imgdata.reshape((8,8)) plt.imshow(pixels,cmap='gray') plt.annotate(prediction,(3,3),bbox={'facecolor':'white'},fontsize=16) print("i think tha digit is : {}".format(prediction)) #cv2.imshow("image", image) plt.show() cv2.waitKey(0) import numpy as np import matplotlib import matplotlib.pyplot as plt import random # load the MNIST digits dataset mnist = datasets.load_digits() class Individual(object): def __init__(self, numbers, mutate_prob=0.01): if numbers is None: self.columns = [np.random.randint(8)] else: self.columns = numbers if mutate_prob > float(np.random.rand()): # Number of columns mutate_index = np.random.randint(8 - len(numbers)) # Make list of unique columns data = list(range(0,8)) data = [elem for elem in data if elem not in self.columns] random.shuffle(data) self.columns.extend(data[0:mutate_index]) def fitness(self): """ Returns fitness of individual Fitness is the accuracy form the classifier """ # take the MNIST data and construct the training and testing split, using 75% of the # data for training and 25% for testing (trainData, testData, trainLabels, testLabels) = train_test_split(np.array(mnist.data), mnist.target, test_size=0.25, random_state=42) for _, val in enumerate(self.columns): for j, _ in enumerate(testData): for k in range(val, 64, 8): testData[j][k] = 0 # evaluate the model and update the accuracies list score = model.score(testData, testLabels) #print("columns= ", self.columns, " accuracy= ", (score * 100)) #imgdata = np.array(testData[0], dtype='float') #pixels = imgdata.reshape((8,8)) #plt.imshow(pixels,cmap='gray') #plt.show() return 1/(score*len(self.columns)) class Population(object): def __init__(self, pop_size=10, mutate_prob=0.01, retain=0.2, random_retain=0.03): """ Args pop_size: size of population fitness_goal: goal that population will be graded against """ self.pop_size = pop_size self.mutate_prob = mutate_prob self.retain = retain self.random_retain = random_retain self.fitness_history = [] self.parents = [] self.done = False # Create individuals self.individuals = [] for x in range(pop_size): self.individuals.append(Individual(numbers=None, mutate_prob=self.mutate_prob)) def grade(self, generation=None): """ Grade the generation by getting the average fitness of its individuals """ fitness_sum = 0 for x in self.individuals: fitness_sum += x.fitness() pop_fitness = fitness_sum / self.pop_size self.fitness_history.append(pop_fitness) # Set Done flag if we hit target if int(round(pop_fitness)) == 0: self.done = True if generation is not None: print("Episode",generation,"Population fitness:", pop_fitness) def select_parents(self): """ Select the fittest individuals to be the parents of next generation (lower fitness it better in this case) Also select a some random non-fittest individuals to help get us out of local maximums """ # Sort individuals by fitness (we use reversed because in this case lower fintess is better) self.individuals = list(reversed(sorted(self.individuals, key=lambda x: x.fitness(), reverse=True))) # Keep the fittest as parents for next gen retain_length = self.retain * len(self.individuals) self.parents = self.individuals[:int(retain_length)] # Randomly select some from unfittest and add to parents array unfittest = self.individuals[int(retain_length):] for unfit in unfittest: if self.random_retain > np.random.rand(): self.parents.append(unfit) def breed(self): """ Crossover the parents to generate children and new generation of individuals """ target_children_size = self.pop_size - len(self.parents) children = [] if len(self.parents) > 0: while len(children) < target_children_size: father = random.choice(self.parents) mother = random.choice(self.parents) if father != mother: child_numbers = [ random.choice(pixel_pair) for pixel_pair in zip(father.columns, mother.columns)] child = Individual(child_numbers) children.append(child) self.individuals = self.parents + children def evolve(self): # 1. Select fittest self.select_parents() # 2. Create children and new generation self.breed() # 3. Reset parents and children self.parents = [] self.children = [] if __name__ == "__main__": pop_size = 100 mutate_prob = 0.01 retain = 0.1 random_retain = 0.03 pop = Population(pop_size=pop_size, mutate_prob=mutate_prob, retain=retain, random_retain=random_retain) SHOW_PLOT = True GENERATIONS = 5000 for x in range(GENERATIONS): pop.grade(generation=x) pop.evolve() if pop.done: print("Finished at generation:", x, ", Population fitness:", pop.fitness_history[-1]) print(pop.individuals[-1].columns) break # Plot fitness history if SHOW_PLOT: print("Showing fitness history graph") plt.plot(np.arange(len(pop.fitness_history)), pop.fitness_history) plt.ylabel('Fitness') plt.xlabel('Generations') plt.title('Fitness - pop_size {} mutate_prob {} retain {} random_retain {}'.format(pop_size, mutate_prob, retain, random_retain)) plt.show() data_item = ['map', 'compass', 'water', 'sandwich', 'glucose', 'tin', 'banana', 'apple', 'cheese', 'beer', 'suntan', 'camera', 'T', 'trousers', 'umbrella', 'w t', 'w o', 'note-case', 'sunglasses', 'towel', 'socks', 'book'] data_weight = [9, 13, 153, 50, 15, 68, 27, 39, 23, 52, 11, 32, 24, 48, 73, 42, 43, 22, 7, 18, 4, 30] data_value = [150, 35, 200, 160, 60, 45, 60, 40, 30, 10, 70, 30, 15, 10, 40, 70, 75, 80, 20, 12, 50, 10] data_sorted = sorted(zip(data_item, data_weight, data_value), key=lambda ivw: ivw[1]//ivw[2], reverse=True) max_weight = 400 class State(object): def __init__(self, level, benefit, weight, token): # token = list marking if a task is token. ex. [1, 0, 0] means # item0 token, item1 non-token, item2 non-token # available = list marking all tasks available, i.e. not explored yet self.level = level self.benefit = benefit self.weight = weight self.token = token self.ub = State.upperbound(self.token[:self.level]+[1]*(len(data_sorted)-level)) @staticmethod def upperbound(available): # define upperbound using fractional knaksack upperbound = 0 # initial upperbound # accumulated weight used to stop the upperbound summation remaining = max_weight for avail, (_, wei, val) in zip(available, data_sorted): wei2 = wei * avail # i could not find a better name if wei2 <= remaining: remaining -= wei2 upperbound += val * avail else: upperbound += val * remaining / wei2 break return upperbound def develop(self): level = self.level + 1 _, weight, value = data_sorted[self.level] left_weight = self.weight + weight if left_weight <= max_weight: # if not overweighted, give left child left_benefit = self.benefit + value left_token = self.token[:self.level]+[1]+self.token[level:] left_child = State(level, left_benefit, left_weight, left_token) else: left_child = None # anyway, give right child right_child = State(level, self.benefit, self.weight, self.token) return ([] if left_child is None else [left_child]) + [right_child] Root = State(0, 0, 0, [0] * len(data_sorted)) # start with nothing waiting_States = [] # list of States waiting to be explored current_state = Root while current_state.level < len(data_sorted): waiting_States.extend(current_state.develop()) # sort the waiting list based on their upperbound waiting_States.sort(key=lambda x: x.ub) # explore the one with largest upperbound current_state = waiting_States.pop() best_item = [item for tok, (item, _, _) in zip(current_state.token, data_sorted) if tok == 1] print("Total weight: ", current_state.weight) print("Total Value: ", current_state.benefit) print("Items:", best_item) def update_bound(columns): (trainData, testData, trainLabels, testLabels) = train_test_split(np.array(mnist.data), mnist.target, test_size=0.25, random_state=42) for _, val in enumerate(columns): for j, _ in enumerate(testData): for k in range(val, 64, 8): testData[j][k] = 0 score = model.score(testData, testLabels) score = score*len(columns) return score def rec(le, cols, cont, GLO_BOUND, explored): end = False while end is False: catch = 0 # Update the score for each node for i, val in enumerate(cols): cols[i][1] = update_bound(val[0]) # Check wrt to global bound if cols[i][1] >= GLO_BOUND: catch = 1 # save whether to continue branch cont[i] = 1 for i, val in enumerate(cont): if val == 1: explored.append(i) le -= 1 cols = [[[i, 0], 0], [[i, 1], 0], [[i, 2], 0], [[i, 3], 0], [[i, 4], 0], [[i, 5], 0], [[i, 6], 0], [[i, 7], 0]] cont = [0, 0, 0, 0, 0, 0, 0, 0] rec(le, cols, cont, GLO_BOUND, explored) # Make next set of columns if catch == 0: end = True level = 1 cols = [[[0], 0], [[1], 0], [[2], 0], [[3], 0], [[4], 0], [[5], 0], [[6], 0], [[7], 0]] cont = [0, 0, 0, 0, 0, 0, 0, 0] GLO_BOUND = 0.90 GLO_RCOLUMNS = [] explored = [] le = 8 rec(le, cols, cont, GLO_BOUND, explored) ```
github_jupyter
``` %matplotlib inline import gym import matplotlib import numpy as np import sys from collections import defaultdict if "../" not in sys.path: sys.path.append("../") from lib.envs.blackjack import BlackjackEnv from lib import plotting matplotlib.style.use('ggplot') env = BlackjackEnv() def make_epsilon_greedy_policy(Q, epsilon, nA): """ Creates an epsilon-greedy policy based on a given Q-function and epsilon. Args: Q: A dictionary that maps from state -> action-values. Each value is a numpy array of length nA (see below) epsilon: The probability to select a random action . float between 0 and 1. nA: Number of actions in the environment. Returns: A function that takes the observation as an argument and returns the probabilities for each action in the form of a numpy array of length nA. """ def policy_fn(observation): # Implement this! A = np.ones(nA, dtype=float)*epsilon/nA best_action = np.argmax(Q[observation]) A[best_action]+=1-epsilon return A return policy_fn def mc_control_epsilon_greedy(env, num_episodes, discount_factor=1.0, epsilon=0.1): """ Monte Carlo Control using Epsilon-Greedy policies. Finds an optimal epsilon-greedy policy. Args: env: OpenAI gym environment. num_episodes: Number of episodes to sample. discount_factor: Gamma discount factor. epsilon: Chance the sample a random action. Float betwen 0 and 1. Returns: A tuple (Q, policy). Q is a dictionary mapping state -> action values. policy is a function that takes an observation as an argument and returns action probabilities """ # Keeps track of sum and count of returns for each state # to calculate an average. We could use an array to save all # returns (like in the book) but that's memory inefficient. returns_sum = defaultdict(float) returns_count = defaultdict(float) # The final action-value function. # A nested dictionary that maps state -> (action -> action-value). Q = defaultdict(lambda: np.zeros(env.action_space.n)) # The policy we're following policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n) # Implement this! for i_episode in range(num_episodes): observation = env.reset() episodes = [] for t in range(100): probs = policy(observation) action = np.random.choice(np.arange(len(probs)), p=probs) next_observation, reward, done, _ = env.step(action) episodes.append([observation, action, reward]) if done: break observation = next_observation state_g = {} for i in range(len(episodes)): state = episodes[i][0] action = episodes[i][1] sa_pair = (state, action) g = sum(episodes[j][2]*discount_factor**(j-i-1) for j in range(i,len(episodes))) if state not in state_g: state_g[state] = g else: g = state_g[state] returns_count[sa_pair]+=1.0 returns_sum[sa_pair]+=g Q[state][action] = returns_sum[sa_pair]/returns_count[sa_pair] return Q, policy Q, policy = mc_control_epsilon_greedy(env, num_episodes=500000, epsilon=0.1) # For plotting: Create value function from action-value function # by picking the best action at each state V = defaultdict(float) for state, actions in Q.items(): action_value = np.max(actions) V[state] = action_value plotting.plot_value_function(V, title="Optimal Value Function") ```
github_jupyter
### Features: * T-SNE and Word2Vec: https://www.kaggle.com/jeffd23/quora-question-pairs/visualizing-word-vectors-with-t-sne * WMD and Word2Vec: https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/WMD_tutorial.ipynb ### Processing: * Replace abbreviations: https://www.kaggle.com/life2short/quora-question-pairs/data-processing-replace-abbreviation-of-word ``` import numpy as np import pandas as pd import gensim import re import nltk import datetime import operator from collections import Counter from nltk.corpus import stopwords from nltk import word_tokenize, ngrams import matplotlib.pyplot as plt %matplotlib inline from pylab import plot, show, subplot, specgram, imshow, savefig import csv from gensim import corpora, models, similarities import gensim import math #https://www.kaggle.com/life2short/data-processing-replace-abbreviation-of-word punctuation='["\'?,\.]' # I will replace all these punctuation with '' abbr_dict={ "what's":"what is", "what're":"what are", "who's":"who is", "who're":"who are", "where's":"where is", "where're":"where are", "when's":"when is", "when're":"when are", "how's":"how is", "how're":"how are", "i'm":"i am", "we're":"we are", "you're":"you are", "they're":"they are", "it's":"it is", "he's":"he is", "she's":"she is", "that's":"that is", "there's":"there is", "there're":"there are", "i've":"i have", "we've":"we have", "you've":"you have", "they've":"they have", "who've":"who have", "would've":"would have", "not've":"not have", "i'll":"i will", "we'll":"we will", "you'll":"you will", "he'll":"he will", "she'll":"she will", "it'll":"it will", "they'll":"they will", "isn't":"is not", "wasn't":"was not", "aren't":"are not", "weren't":"were not", "can't":"can not", "couldn't":"could not", "don't":"do not", "didn't":"did not", "shouldn't":"should not", "wouldn't":"would not", "doesn't":"does not", "haven't":"have not", "hasn't":"has not", "hadn't":"had not", "won't":"will not", punctuation:'', '\s+':' ', # replace multi space with one single space } def process_data(data): data.replace(abbr_dict,regex=True,inplace=True) return data def basic_cleaning2(string): string = str(string) string = string.lower() string = re.sub('[0-9\(\)\!\^\%\$\'\"\.;,-\?\{\}\[\]\\/]', ' ', string) string = ' '.join([i for i in string.split() if i not in ["a", "and", "of", "the", "to", "on", "in", "at", "is"]]) string = re.sub(' +', ' ', string) return string def basic_clean2df(df2): df = df2.copy() text_feats = df.select_dtypes(include=['object']).columns.values for i in text_feats: df[i] = df[i].apply(lambda x: basic_cleaning2(x).split()) df[i] = df[i].apply(lambda x: (' '.join(i for i in x))) return df df_train = pd.read_csv('df_train_stemmed.csv') df_test = pd.read_csv('df_test_stemmed.csv') df_train = process_data(df_train) df_test = process_data(df_test) df_train = basic_clean2df(df_train) df_test = basic_clean2df(df_test) # https://www.kaggle.com/philschmidt/quora-question-pairs/quora-eda-model-selection-roc-pr-plots def normalized_word_share(row): w1 = set(map(lambda word: word.lower().strip(), row['question1'].split(" "))) w2 = set(map(lambda word: word.lower().strip(), row['question2'].split(" "))) return 1.0 * len(w1 & w2)/(len(w1) + len(w2)) def modelselection_features(df2): df = df2.copy() df['q1len'] = df['question1'].str.len() df['q2len'] = df['question2'].str.len() df['q1_n_words'] = df['question1'].apply(lambda row: len(row.split(" "))) df['q2_n_words'] = df['question2'].apply(lambda row: len(row.split(" "))) df['word_share'] = df.apply(normalized_word_share, axis=1) return df eda_train = modelselection_features(df_train) eda_test = modelselection_features(df_test) eda_train.to_csv('train_eda_features.csv', index = False) eda_test.to_csv('test_eda_features.csv', index = False) full = pd.concat([df_train, df_test]) # https://www.kaggle.com/puneetsl/quora-question-pairs/unusual-meaning-map def basic_cleaning(string): string = str(string) try: string = string.decode('unicode-escape') except Exception: pass string = string.lower() string = re.sub(' +', ' ', string) return string def basic_cleaning2(string): string = str(string) string = string.lower() string = re.sub('[0-9\(\)\!\^\%\$\'\"\.;,-\?\{\}\[\]\\/]', ' ', string) string = ' '.join([i for i in string.split() if i not in ["a", "and", "of", "the", "to", "on", "in", "at", "is"]]) string = re.sub(' +', ' ', string) return string def idf(word): return 1 - math.sqrt(docf[word]/total_docs) def w2v_sim(w1, w2): try: return model.similarity(w1, w2)*idf(w1)*idf(w2) except Exception: return 0.0 def img_feature(row): s1 = row['question1'] s2 = row['question2'] t1 = list((basic_cleaning2(s1)).split()) t2 = list((basic_cleaning2(s2)).split()) Z = [[w2v_sim(x, y) for x in t1] for y in t2] a = np.array(Z, order='C') return [np.resize(a,(10,10)).flatten()] def get_img_features(df): s = df img = s.apply(img_feature, axis=1, raw=True) pix_col = [[] for y in range(100)] for k in img.iteritems(): for f in range(len(list(k[1][0]))): pix_col[f].append(k[1][0][f]) x_train = pd.DataFrame() for g in range(len(pix_col)): x_train['img'+str(g)] = pix_col[g] return x_train def to_sentences(df): questions = [] for i in range(len(df)): questions.append(df.loc[i, 'question1']) questions.append(df.loc[i, 'question2']) for i in questions: sentences.append(nltk.word_tokenize(questions[i])) return sentences full = pd.concat([df_train, df_test]) sentences = to_sentences(full) model = gensim.models.Word2Vec(sentences, size=100, window=5, min_count=5, workers=4) xtr = get_img_features(df_train) xte = get_img_features(df_test) xtr.to_csv('train_img_features.csv', index = False) xte.to_csv('test_img_features.csv', index = False) # https://www.kaggle.com/sudalairajkumar/quora-question-pairs/simple-exploration-notebook-quora-ques-pair def get_unigrams(que): return [word for word in word_tokenize(que.lower()) if word not in eng_stopwords] def get_common_unigrams(row): return len( set(row["unigrams_ques1"]).intersection(set(row["unigrams_ques2"])) ) def get_common_unigram_ratio(row): return float(row["unigrams_common_count"]) / max(len( set(row["unigrams_ques1"]).union(set(row["unigrams_ques2"])) ),1) def get_bigrams(que): return [i for i in ngrams(que, 2)] def get_common_bigrams(row): return len( set(row["bigrams_ques1"]).intersection(set(row["bigrams_ques2"])) ) def get_common_bigram_ratio(row): return float(row["bigrams_common_count"]) / max(len( set(row["bigrams_ques1"]).union(set(row["bigrams_ques2"])) ),1) def feature_extraction(row): que1 = str(row['question1']) que2 = str(row['question2']) out_list = [] # get unigram features # unigrams_que1 = [word for word in que1.lower().split() if word not in eng_stopwords] unigrams_que2 = [word for word in que2.lower().split() if word not in eng_stopwords] common_unigrams_len = len(set(unigrams_que1).intersection(set(unigrams_que2))) common_unigrams_ratio = float(common_unigrams_len) / max(len(set(unigrams_que1).union(set(unigrams_que2))),1) out_list.extend([common_unigrams_len, common_unigrams_ratio]) # get bigram features # bigrams_que1 = [i for i in ngrams(unigrams_que1, 2)] bigrams_que2 = [i for i in ngrams(unigrams_que2, 2)] common_bigrams_len = len(set(bigrams_que1).intersection(set(bigrams_que2))) common_bigrams_ratio = float(common_bigrams_len) / max(len(set(bigrams_que1).union(set(bigrams_que2))),1) out_list.extend([common_bigrams_len, common_bigrams_ratio]) # get trigram features # trigrams_que1 = [i for i in ngrams(unigrams_que1, 3)] trigrams_que2 = [i for i in ngrams(unigrams_que2, 3)] common_trigrams_len = len(set(trigrams_que1).intersection(set(trigrams_que2))) common_trigrams_ratio = float(common_trigrams_len) / max(len(set(trigrams_que1).union(set(trigrams_que2))),1) out_list.extend([common_trigrams_len, common_trigrams_ratio]) return out_list eng_stopwords = set(stopwords.words('english')) train_X = np.vstack( np.array(df_train.apply(lambda row: feature_extraction(row), axis=1)) ) test_X = np.vstack( np.array(df_test.apply(lambda row: feature_extraction(row), axis=1)) ) train_X = pd.DataFrame(train_X) train_X.columns = ['common_unigrams_len', 'common_unigrams_ratio', 'common_bigrams_len', 'common_bigrams_ratio', 'common_trigrams_len', 'common_trigrams_ratio'] test_X = pd.DataFrame(test_X) test_X.columns = ['common_unigrams_len', 'common_unigrams_ratio', 'common_bigrams_len', 'common_bigrams_ratio', 'common_trigrams_len', 'common_trigrams_ratio'] train_X.to_csv('train_SRKgrams_features.csv', index = False) test_X.to_csv('test_SRKgrams_features.csv', index = False) # https://www.kaggle.com/dasolmar/xgb-with-whq-jaccard print("Original data: X_train: {}, X_test: {}".format(df_train.shape, df_test.shape)) print("Features processing, be patient...") # If a word appears only once, we ignore it completely (likely a typo) # Epsilon defines a smoothing constant, which makes the effect of extremely rare words smaller def get_weight(count, eps=10000, min_count=2): return 0 if count < min_count else 1 / (count + eps) train_qs = pd.Series(df_train['question1'].tolist() + df_train['question2'].tolist()).astype(str) words = (" ".join(train_qs)).lower().split() counts = Counter(words) weights = {word: get_weight(count) for word, count in counts.items()} stops = set(stopwords.words("english")) def word_shares(row): q1_list = str(row['question1']).lower().split() q1 = set(q1_list) q1words = q1.difference(stops) if len(q1words) == 0: return '0:0:0:0:0:0:0:0' q2_list = str(row['question2']).lower().split() q2 = set(q2_list) q2words = q2.difference(stops) if len(q2words) == 0: return '0:0:0:0:0:0:0:0' words_hamming = sum(1 for i in zip(q1_list, q2_list) if i[0]==i[1])/max(len(q1_list), len(q2_list)) q1stops = q1.intersection(stops) q2stops = q2.intersection(stops) q1_2gram = set([i for i in zip(q1_list, q1_list[1:])]) q2_2gram = set([i for i in zip(q2_list, q2_list[1:])]) shared_2gram = q1_2gram.intersection(q2_2gram) shared_words = q1words.intersection(q2words) shared_weights = [weights.get(w, 0) for w in shared_words] q1_weights = [weights.get(w, 0) for w in q1words] q2_weights = [weights.get(w, 0) for w in q2words] total_weights = q1_weights + q1_weights R1 = np.sum(shared_weights) / np.sum(total_weights) #tfidf share R2 = len(shared_words) / (len(q1words) + len(q2words) - len(shared_words)) #count share R31 = len(q1stops) / len(q1words) #stops in q1 R32 = len(q2stops) / len(q2words) #stops in q2 Rcosine_denominator = (np.sqrt(np.dot(q1_weights,q1_weights))*np.sqrt(np.dot(q2_weights,q2_weights))) Rcosine = np.dot(shared_weights, shared_weights)/Rcosine_denominator if len(q1_2gram) + len(q2_2gram) == 0: R2gram = 0 else: R2gram = len(shared_2gram) / (len(q1_2gram) + len(q2_2gram)) return '{}:{}:{}:{}:{}:{}:{}:{}'.format(R1, R2, len(shared_words), R31, R32, R2gram, Rcosine, words_hamming) df = pd.concat([df_train, df_test]) df['word_shares'] = df.apply(word_shares, axis=1, raw=True) x = pd.DataFrame() x['word_match'] = df['word_shares'].apply(lambda x: float(x.split(':')[0])) x['word_match_2root'] = np.sqrt(x['word_match']) x['tfidf_word_match'] = df['word_shares'].apply(lambda x: float(x.split(':')[1])) x['shared_count'] = df['word_shares'].apply(lambda x: float(x.split(':')[2])) x['stops1_ratio'] = df['word_shares'].apply(lambda x: float(x.split(':')[3])) x['stops2_ratio'] = df['word_shares'].apply(lambda x: float(x.split(':')[4])) x['shared_2gram'] = df['word_shares'].apply(lambda x: float(x.split(':')[5])) x['cosine'] = df['word_shares'].apply(lambda x: float(x.split(':')[6])) x['words_hamming'] = df['word_shares'].apply(lambda x: float(x.split(':')[7])) x['diff_stops_r'] = x['stops1_ratio'] - x['stops2_ratio'] x['len_q1'] = df['question1'].apply(lambda x: len(str(x))) x['len_q2'] = df['question2'].apply(lambda x: len(str(x))) x['diff_len'] = x['len_q1'] - x['len_q2'] x['caps_count_q1'] = df['question1'].apply(lambda x:sum(1 for i in str(x) if i.isupper())) x['caps_count_q2'] = df['question2'].apply(lambda x:sum(1 for i in str(x) if i.isupper())) x['diff_caps'] = x['caps_count_q1'] - x['caps_count_q2'] x['len_char_q1'] = df['question1'].apply(lambda x: len(str(x).replace(' ', ''))) x['len_char_q2'] = df['question2'].apply(lambda x: len(str(x).replace(' ', ''))) x['diff_len_char'] = x['len_char_q1'] - x['len_char_q2'] x['len_word_q1'] = df['question1'].apply(lambda x: len(str(x).split())) x['len_word_q2'] = df['question2'].apply(lambda x: len(str(x).split())) x['diff_len_word'] = x['len_word_q1'] - x['len_word_q2'] x['avg_world_len1'] = x['len_char_q1'] / x['len_word_q1'] x['avg_world_len2'] = x['len_char_q2'] / x['len_word_q2'] x['diff_avg_word'] = x['avg_world_len1'] - x['avg_world_len2'] x['exactly_same'] = (df['question1'] == df['question2']).astype(int) x['duplicated'] = df.duplicated(['question1','question2']).astype(int) add_word_count(x, df,'how') add_word_count(x, df,'what') add_word_count(x, df,'which') add_word_count(x, df,'who') add_word_count(x, df,'where') add_word_count(x, df,'when') add_word_count(x, df,'why') print(x.columns) print(x.describe()) feature_names = list(x.columns.values) create_feature_map(feature_names) print("Features: {}".format(feature_names)) x_train = x[:df_train.shape[0]] x_test = x[df_train.shape[0]:] y_train = df_train['is_duplicate'].values del x, df_train x_train.drop(['len_q1', 'len_q2', 'len_word_q1', 'len_word_q2', 'diff_len'], axis = 1, inplace = True) x_test.drop(['len_q1', 'len_q2', 'len_word_q1', 'len_word_q2', 'diff_len'], axis = 1, inplace = True) x_train.to_csv('train_whq_with_jaccard_feats.csv', index = False) x_test.to_csv('train_whq_with_jaccard_feats.csv', index = False) # https://www.kaggle.com/antriksh5235/doc2vec-starter import math from gensim.models.doc2vec import Doc2Vec from gensim.models import doc2vec def clean_sentence(sent): regex = re.compile('([^\s\w]|_)+') sentence = regex.sub('', sent).lower() sentence = sentence.split(" ") for word in list(sentence): if word in STOP_WORDS: sentence.remove(word) sentence = " ".join(sentence) return sentence def cosine(v1, v2): v1 = np.array(v1) v2 = np.array(v2) return np.dot(v1, v2) / (np.sqrt(np.sum(v1**2)) * np.sqrt(np.sum(v2**2))) def concatenate(data): X_set1 = data['question1'] X_set2 = data['question2'] X = X_set1.append(X_set2, ignore_index=True) return X class LabeledLineSentence(object): def __init__(self, doc_list, labels_list): self.labels_list = labels_list self.doc_list = doc_list def __iter__(self): for idx, doc in enumerate(self.doc_list): yield doc2vec.TaggedDocument(words=word_tokenize(doc), tags=[self.labels_list[idx]]) data = df_train.dropna(how="any") #data = df_test.dropna(how="any") for col in ['question1', 'question2']: data[col] = data[col].apply(clean_sentence) y = data['is_duplicate'] X_train, X_test, y_train, y_test = tts(data[['id','question1', 'question2']], y, test_size=0.3) import multiprocessing cores = multiprocessing.cpu_count() assert gensim.models.doc2vec.FAST_VERSION > -1 X = concatenate(X_train) labels = [] for label in X_train['id'].tolist(): labels.append('SENT_%s_1' % label) for label in X_train['id'].tolist(): labels.append('SENT_%s_2' % label) docs = LabeledLineSentence(X.tolist(), labels) it = docs.__iter__() model1 = Doc2Vec(it, size=12, window=8, min_count=5, workers=4) for epoch in range(10): model1.train(it) model1.alpha -= 0.0002 # decrease the learning rate model1.min_alpha = model1.alpha # fix the learning rate, no deca model1.train(it) X_test.index = np.arange(0, X_test['question1'].shape[0]) y_test.index = np.arange(0, X_test['question1'].shape[0]) count = 0 for i in range(X_test['question1'].shape[0]): doc1 = word_tokenize(X_test['question1'][i]) doc2 = word_tokenize(X_test['question2'][i]) docvec1 = model1.infer_vector(doc1) docvec2 = model1.infer_vector(doc2) print(cosine(docvec1, docvec2), y_test[i]) if count>100: break count+=1 import timeit src = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/data/' train_orig = pd.read_csv(src + 'train.csv', header=0) test_orig = pd.read_csv(src + 'test.csv', header=0) tic0=timeit.default_timer() df1 = train_orig[['question1']].copy() df2 = train_orig[['question2']].copy() df1_test = test_orig[['question1']].copy() df2_test = test_orig[['question2']].copy() df2.rename(columns = {'question2':'question1'},inplace=True) df2_test.rename(columns = {'question2':'question1'},inplace=True) train_questions = df1.append(df2) train_questions = train_questions.append(df1_test) train_questions = train_questions.append(df2_test) #train_questions.drop_duplicates(subset = ['qid1'],inplace=True) train_questions.drop_duplicates(subset = ['question1'],inplace=True) train_questions.reset_index(inplace=True,drop=True) questions_dict = pd.Series(train_questions.index.values,index=train_questions.question1.values).to_dict() train_cp = train_orig.copy() test_cp = test_orig.copy() train_cp.drop(['qid1','qid2'],axis=1,inplace=True) test_cp['is_duplicate'] = -1 test_cp.rename(columns={'test_id':'id'},inplace=True) comb = pd.concat([train_cp,test_cp]) comb['q1_hash'] = comb['question1'].map(questions_dict) comb['q2_hash'] = comb['question2'].map(questions_dict) q1_vc = comb.q1_hash.value_counts().to_dict() q2_vc = comb.q2_hash.value_counts().to_dict() def try_apply_dict(x,dict_to_apply): try: return dict_to_apply[x] except KeyError: return 0 #map to frequency space comb['q1_freq'] = comb['q1_hash'].map(lambda x: try_apply_dict(x,q1_vc) + try_apply_dict(x,q2_vc)) comb['q2_freq'] = comb['q2_hash'].map(lambda x: try_apply_dict(x,q1_vc) + try_apply_dict(x,q2_vc)) train_comb = comb[comb['is_duplicate'] >= 0][['id','q1_hash','q2_hash','q1_freq','q2_freq','is_duplicate']] test_comb = comb[comb['is_duplicate'] < 0][['id','q1_hash','q2_hash','q1_freq','q2_freq']] # https://www.kaggle.com/jpmiller/which-topics-are-actually-hot def connected_tuples(pairs): # for every element, we keep a reference to the list it belongs to lists_by_element = {} def make_new_list_for(x, y): lists_by_element[x] = lists_by_element[y] = [x, y] def add_element_to_list(lst, el): lst.append(el) lists_by_element[el] = lst def merge_lists(lst1, lst2): merged_list = lst1 + lst2 for el in merged_list: lists_by_element[el] = merged_list for x, y in pairs: xList = lists_by_element.get(x) yList = lists_by_element.get(y) if not xList and not yList: make_new_list_for(x, y) if xList and not yList: add_element_to_list(xList, y) if yList and not xList: add_element_to_list(yList, x) if xList and yList and xList != yList: merge_lists(xList, yList) # return the unique lists present in the dictionary return set(tuple(l) for l in lists_by_element.values()) train = pd.read_csv('df_train_stemmed.csv') trainstay = train.loc[train['is_duplicate'] == 1, ['qid1', 'qid2']] stays = pd.Series(trainstay.values.ravel()).unique().tolist() allvals = list(range(1, 537934)) # one larger than our max qid solos = set(allvals) - set (stays) qid1 = trainstay['qid1'].tolist() qid2 = trainstay['qid2'].tolist() mypairs = list(zip(qid1, qid2)) cpairs = connected_tuples(mypairs) universe = cpairs.union(solos) uni2 = list(universe) ctlist = [] i = 0 while i < len(uni2): item = str(uni2[i]) ct = item.count(',') + 1 ctlist.append(ct) i += 1 print('Number of Questions in all Sets: {}'.format(sum(ctlist))) print('Lengths of Connected Sets') # put it in d dataframe qSets = pd.DataFrame( {'qid': uni2, 'set_length': ctlist} ) qSets.sort_values('set_length', axis=0, ascending=False, inplace=True) qSets.reset_index(inplace=True, drop=True) qSets['set_id'] = qSets.index + 1 qSetsS = qSets.loc[qSets['set_length'] == 1] qSetsL = qSets.loc[qSets['set_length'] > 1] # unnest rows = [] _ = qSetsL.apply(lambda row: [rows.append([row['set_id'], row['set_length'], nn]) for nn in row.qid], axis=1) qRef = pd.DataFrame(rows, columns = ['set_id', 'set_length', 'qid']) qRef = qRef.append(qSetsS) qRef.sort_values('qid', inplace=True) qRef.reset_index(inplace=True, drop=True) qRef.to_csv('qRef.csv', index=False) q1s = train.iloc[:, [1,3]] q2s = train.iloc[:, [2,4]] new_cols = ['qid', 'question'] q1s.columns = new_cols q2s.columns = new_cols lookup = pd.concat([q1s, q2s], ignore_index=True) lookup.drop_duplicates('qid', inplace=True) qTop = qRef.drop_duplicates('set_id', keep='first') j = qTop.merge(lookup, how='left', on='qid') j.sort_values('set_length', ascending=False).head(6) ```
github_jupyter
# Por que computação quântica? ## O que é um computador? Visto que você conseguiu acessar esta página, você já deve saber o que é um computador. Hoje, os computadores assumem muitas formas: de laptops e telefones aos sistemas que controlam os semáforos. Parece que os computadores podem fazer qualquer coisa! Esses sistemas podem ser muito complexos e especializados, mas todos eles têm uma coisa em comum: um computador executa um conjunto de instruções sobre algumas informações de entrada para nos fornecer novas informações (de saída). As instruções que damos aos computadores precisam ser muito específicas e inequívocas. Chamamos esses conjuntos de instruções de *algoritmos,* e grande parte da pesquisa em computadores é sobre o comportamento de diferentes algoritmos. Neste curso, consideraremos apenas os computadores em sua forma mais simples; sem teclados, mouses ou telas - apenas informações e algoritmos. ![Uma renderização artística de basicamente todos os computadores](images/why-qc/basically_all_computers.png) ## Classificando algoritmos de computador Para entender o papel dos computadores quânticos perante os computadores tradicionais modernos, primeiro precisamos aprender como medimos o desempenho de diferentes algoritmos. Na ciência da computação, classificamos os algoritmos de acordo com a forma como os [recursos](gloss:resources) que eles usam crescem com o tamanho da entrada. Chamamos isso de *complexidade* do algoritmo. Por exemplo, um algoritmo que decide se um número é par só precisa olhar para o último dígito desse número. Nesse caso, a 'entrada' é um número e a saída é 'Par' ou 'Ímpar'. Chamamos isso de algoritmo de *tempo constante* , porque o tempo que o algoritmo leva para ser concluído não depende do tamanho do número de entrada. Pode levar tempos diferentes para computadores diferentes para obter esse resultado, mas isso se deve a outros fatores e não ao comprimento da entrada. ![Os passos de um algoritmo que determina se um número é par ou ímpar](images/why-qc/odd-even-algo.svg) Vejamos um exemplo diferente. Desta vez, a entrada são dois números de igual comprimento, e o problema é somá-los. Nesse caso, a saída será um novo número. Ao adicionar dois números de vários dígitos, um algoritmo comum que você provavelmente aprendeu na escola começa com o dígito mais à direita de cada número e os soma. Em seguida, ele move um dígito para a esquerda (transportando um '1' se o resultado for maior que 9) e repete o processo. O computador repete isso até que não haja mais dígitos para adicionar e o algoritmo termina. ![Animação mostrando as etapas de um algoritmo de adição](images/why-qc/adding-algo.svg) <!-- ::: q-block.exercise --> ### Quão complexa é a adição? <!-- ::: q-quiz(goal="intro-why-qc-0") --> <!-- ::: .question --> O tempo que esse algoritmo de adição leva para ser concluído... <!-- ::: --> <!-- ::: .option(correct) --> 1. ...cresce linearmente (proporcionalmente) com o comprimento do número de entrada (tempo linear). <!-- ::: --> <!-- ::: .option --> 1. ...não é afetado pelo comprimento do número de entrada (tempo constante) <!-- ::: --> <!-- ::: .option --> 1. ...não é afetado pelo comprimento do número de entrada (tempo quadrático) <!-- ::: --> <!-- ::: --> <!-- ::: --> Novamente, computadores diferentes executarão esse algoritmo em velocidades diferentes; um laptop pode realizar adição milhões de vezes mais rápido do que um humano. Mas se você pode fazer um milhão de operações por segundo ou apenas uma, a taxa de crescimento será a mesma. ![gráfico de tempos de execução constantes e lineares versus tamanhos de entrada para diferentes tempos de execução](images/why-qc/graph-linear-constant.svg) Aqui está um exemplo final que é particularmente interessante para nós. Digamos que eu tenha um número secreto (como um PIN) e o problema seja adivinhar esse número. Nesse caso, o tamanho do problema é o comprimento do número. Digamos que a única maneira de verificarmos se nossa resposta está correta é digitando-a em um teclado. Como não temos informações sobre qual pode ser esse número, o melhor algoritmo para encontrar esse número secreto usa um método de 'força bruta', o que significa que não faz nada inteligente e simplesmente tenta todos os números possíveis. Quanto tempo isso levaria? Em teoria, poderíamos ter sorte e adivinhar a resposta de uma só vez, mas isso é muito improvável. Na média, teríamos que tentar cerca de metade das entradas possíveis, então o tempo de execução do nosso algoritmo é proporcional ao número de combinações possíveis. A questão agora é: como o número de combinações possíveis cresce com o tamanho do número secreto? ![Animação mostrando as etapas de um algoritmo de busca de força bruta](images/why-qc/search-algo.svg) Cada dígito que adicionamos ao nosso número secreto multiplica o número de combinações possíveis por 10. Por exemplo, um número secreto com 1 dígito tem 10 valores possíveis (0, 1, 2, 3, 4, 5, 6, 7, 8 &amp; 9), e um número secreto com 2 dígitos tem 100 valores possíveis. Assumindo que o tempo necessário para adivinhar cada dígito leva a mesma quantidade de tempo (independentemente do comprimento), podemos representar isso matematicamente assim: $$ \cssId{T}{T} \cssId{prop_to}{\propto} 10^\cssId{exp}{d}$$ Você notará que o número de dígitos (d) é o expoente nesta equação e, como tal, dizemos que este é um algoritmo de *tempo exponencial* e que o tempo de execução cresce exponencialmente com o comprimento da entrada. ![gráfico de tempos de execução constantes, lineares e exponenciais versus tamanhos de entrada para diferentes tempos de execução](images/why-qc/graph-all.svg) ## Por que medimos algoritmos dessa forma? Diferentes computadores têm diferentes pontos fortes; certas operações podem ser mais rápidas em um computador do que em outro. Ao estudar o crescimento versus o tamanho da entrada, podemos ignorar detalhes específicos do dispositivo e realmente medir o *algoritmo* , em vez da combinação específica de algoritmo e computador. É importante ressaltar que saber como um algoritmo é dimensionado com o tamanho da entrada também nos diz se o algoritmo crescerá de forma gerenciável ou não. Vamos pensar no algoritmo de adição de tempo linear que vimos acima. Se pudéssemos somar dois números de 10 dígitos em um segundo, devido à taxa linear de crescimento, poderíamos somar dois números de 20 dígitos em dois segundos. Cada 10 dígitos extras deve adicionar aproximadamente mais um segundo ao nosso tempo de computação. Para contrastar, imagine que você pode encontrar um PIN de 10 dígitos em 1 segundo usando o algoritmo de pesquisa de tempo exponencial acima. Isso significa que seu computador é rápido o suficiente para tentar ~ 5.000.000.000 combinações por segundo. Esperaríamos que este computador usando esse algoritmo levasse aproximadamente 5.000.000.000 segundos (~150 anos) para encontrar um PIN de 20 dígitos. Adicionar outros 10 dígitos aumenta isso para cerca de 150.000.000.000 anos (~ 120x a idade do universo). Algoritmos de tempo exponencial com uma entrada de tamanho modesto (neste caso ~ 30 dígitos) podem se tornar não apenas difíceis, mas literalmente impossíveis de realizar. Embora esse problema de adivinhação de PIN seja um exemplo artificial que pretendíamos ser o mais simples possível, existem muitos problemas reais em ciência da computação para os quais temos apenas algoritmos ineficientes. Apesar da impressionante velocidade dos computadores atuais, esses problemas [intratáveis](gloss:intractable) podem ser muito difíceis até mesmo para os maiores supercomputadores. Mas se pudermos encontrar algoritmos que cresçam com mais eficiência, esses problemas intratáveis podem se tornar subitamente gerenciáveis, mesmo com computadores relativamente lentos ou não confiáveis. É aí que entra a computação quântica. ## Como a computação quântica pode ajudar? Até agora, pensamos em algoritmos de uma maneira muito abstrata, mas os computadores que executam esses algoritmos devem existir no mundo real. Quer esses computadores sejam microchips de alta potência ou humanos com canetas e papel, todos os computadores são governados pelas leis da física, e as operações que eles podem realizar limitam os algoritmos que podemos criar. A física é uma tentativa de descobrir o conjunto de regras que tudo no universo segue. Por volta do início do século 20, por meio de experimentos delicados em laboratórios, os físicos viram comportamentos estranhos que sua física atual não conseguia explicar. Isso significava que as regras não eram muito precisas, então eles desenvolveram a física 'quântica' mais completa, que descreve muito bem esse comportamento. Os físicos criaram a física quântica para explicar o comportamento que nunca tinham visto antes, e os cientistas da computação descobriram que poderiam (em teoria) explorar esse comportamento recém-descoberto para criar algoritmos mais eficientes. Como resultado, existem certos problemas que acreditamos serem intratáveis para computadores convencionais, mas são gerenciáveis para um computador 'quântico' que pode explorar esse comportamento. Um desses problemas é a *fatoração de inteiros* . Digamos que temos um inteiro que chamaremos de '$x$'. Um algoritmo de fatoração encontra os inteiros $p$ e $q$ tais que $p×q = x$. Isso às vezes é fácil; você pode dizer de relance que $ 2.000 = 2 × 1.000 $, mas se $ x $ é o produto de dois números primos grandes, esse problema se torna muito difícil. Quando falamos sobre fatoração de inteiros, vamos assumir o cenário mais difícil (pior caso). Na célula de código abaixo, estamos atribuindo um número de 250 dígitos à variável <code>x</code> : ``` x = 2140324650240744961264423072839333563008614715144755017797754920881418023447140136643345519095804679610992851872470914587687396261921557363047454770520805119056493106687691590019759405693457452230589325976697471681738069364894699871578494975937497937 ``` Em 2020, os pesquisadores fatoraram esse número usando um supercomputador clássico e cerca de 2.700 [core-years](gloss:coreyears) de poder de processamento. Este foi um grande esforço que quebrou recordes no momento da escrita. Podemos verificar seus resultados na célula de código abaixo (felizmente, temos algoritmos eficientes para multiplicação!): ``` p = 64135289477071580278790190170577389084825014742943447208116859632024532344630238623598752668347708737661925585694639798853367 q = 33372027594978156556226010605355114227940760344767554666784520987023841729210037080257448673296881877565718986258036932062711 p*q == x # Evaluates to 'True' ``` A saída mostrada é o valor da última linha da célula. Nesse caso, podemos ver que <code>p*q == x</code> é avaliado como <code>True</code> . Embora não comprovado matematicamente, temos certeza de que não há algoritmo eficiente para fatorar esses números em computadores tradicionais. Na verdade, grande parte da criptografia da Internet se baseia na suposição de que esse problema é intratável e que fatorar um número [RSA](gloss:RSA) de 617 dígitos é impossível. Em contraste, conhecemos algoritmos de fatoração eficientes para computadores quânticos que, uma vez que tivermos computadores quânticos grandes o suficiente, estimamos que poderiam fatorar esses números em menos de um dia. ## Onde estamos agora? Agora sabemos que os computadores quânticos podem executar algoritmos mais eficientes, mas os computadores quânticos que temos hoje são muito pequenos e instáveis para apresentar vantagens sobre os computadores tradicionais. Em um nível muito simples, existem dois fatores que limitam o tamanho dos problemas que nossos computadores quânticos podem resolver. A primeira é a quantidade de dados que eles podem armazenar e trabalhar, que geralmente medimos em [*qubits*](gloss:qubits) . Se não tivermos qubits suficientes, simplesmente não podemos armazenar e operar problemas acima de um determinado tamanho. A segunda é a taxa de erro do nosso computador quântico; como só vemos o comportamento quântico em experimentos de laboratório delicados, criar computadores quânticos é um processo delicado. Os computadores quânticos que temos agora são ruidosos, o que significa que muitas vezes erram e introduzem ' [ruído](gloss:noise) ' em nossos resultados. Muito ruído e nossos resultados serão absurdos! No momento, os computadores quânticos que temos são experimentais. Eles são limitados por contagens de qubits e taxas de erro, portanto, os maiores problemas que podemos resolver atualmente ainda são facilmente gerenciáveis para computadores convencionais. Em algum momento no futuro, isso vai mudar. Alcançaremos a 'vantagem quântica', na qual fará sentido econômico resolver um problema usando um computador quântico ao invés de um computador convencional. Como nós sabemos? *Porque medimos algoritmos por sua taxa de crescimento!* Sabemos que, enquanto os computadores quânticos continuarem se desenvolvendo de forma progressiva, eles acabarão ultrapassando os computadores clássicos. ![comparação de (projetadas) habilidades de fatoração clássica vs quântica ao longo do tempo](images/why-qc/q-vs-c.svg) A estimativa para fatorar um número RSA de 617 dígitos em menos de um dia assumiu ~ 20 milhões de qubits ruidosos. No momento da redação deste artigo, a IBM possui atualmente um computador quântico de 65 qubits e pretende criar um sistema com mais de 1.000 qubits até 2023. Existem outros algoritmos que acreditamos que nos darão uma vantagem quântica muito antes desse marco, mas ainda pode parecer que estamos muito longe. Devemos nos lembrar de onde vieram os computadores convencionais. Abaixo está uma foto do primeiro [transistor](gloss:transistor) , criado em 1947. Os transistores são os blocos de construção dos processadores de computador modernos. ![comparação de (projetadas) habilidades de fatoração clássica vs quântica ao longo do tempo](images/why-qc/first-transistor.jpg) Crédito da imagem: Funcionário federal <a href="https://clintonwhitehouse4.archives.gov/Initiatives/Millennium/capsule/mayo.html">Link</a>, <a href="https://commons.wikimedia.org/w/index.php?curid=554340">Domínio Público</a> . 70 anos depois, nossos chips de computador modernos podem conter bilhões de transistores. No restante deste curso, exploraremos os efeitos quânticos que nos permitem criar algoritmos mais eficientes. Ao final deste curso, você será capaz de usar o pacote de software, [Qiskit](gloss:qiskit) , para programar um computador quântico para executar um desses algoritmos. <!-- ::: q-block.exercise --> ### Questionário rápido <!-- ::: q-quiz(goal="intro-why-qc-1") --> <!-- ::: .question --> Computadores quânticos eventualmente... <!-- ::: --> <!-- ::: .option(correct) --> 1. ...fazer cálculos que são muito difíceis para computadores convencionais. <!-- ::: --> <!-- ::: .option --> 1. ...substituir computadores convencionais. <!-- ::: --> <!-- ::: .option --> 1. ...aumentar a velocidade dos computadores convencionais. <!-- ::: --> <!-- ::: --> <!-- ::: -->
github_jupyter
# eICU Collaborative Research Database # Notebook 2: Exploring the patient table In this notebook we introduce the patient table, a key table in the [eICU Collaborative Research Database](http://eicu-crd.mit.edu/). The patient table contains patient demographics and admission and discharge details for hospital and ICU stays. For more detail, see: http://eicu-crd.mit.edu/eicutables/patient/ ## Load libraries and connect to the data Run the following cells to import some libraries and then connect to the database. ``` # Import libraries import numpy as np import os import pandas as pd import matplotlib.pyplot as plt import matplotlib.patches as patches import matplotlib.path as path # Make pandas dataframes prettier from IPython.display import display, HTML # Access data using Google BigQuery. from google.colab import auth from google.cloud import bigquery ``` As before, you need to first authenticate yourself by running the following cell. If you are running it for the first time, it will ask you to follow a link to log in using your Gmail account, and accept the data access requests to your profile. Once this is done, it will generate a string of verification code, which you should paste back to the cell below and press enter. ``` auth.authenticate_user() ``` We'll also set the project details. ``` project_id='philips-eicu-meeting-2019' os.environ["GOOGLE_CLOUD_PROJECT"]=project_id ``` # Load data from the `patient` table Now we can start exploring the data. We'll begin by running a simple query on the database to load all columns of the `patient` table to a Pandas DataFrame. The query is written in SQL, a common language for extracting data from databases. The structure of an SQL query is: ```sql SELECT <columns> FROM <table> WHERE <criteria, optional> ``` `*` is a wildcard that indicates all columns ``` # Helper function to read data from BigQuery into a DataFrame. def run_query(query): return pd.io.gbq.read_gbq(query, project_id=project_id, dialect="standard") query = """ SELECT * FROM `physionet-data.eicu_crd_demo.patient` """ patient = run_query(query) ``` We have now assigned the output to our query to a variable called `patient`. Let's use the `head` method to view the first few rows of our data. ``` # view the top few rows of the patient data patient.head() ``` ## Questions - What does `patientunitstayid` represent? (hint, see: http://eicu-crd.mit.edu/eicutables/patient/) - What does `patienthealthsystemstayid` represent? - What does `uniquepid` represent? ``` # select a limited number of columns to view columns = ['uniquepid', 'patientunitstayid','gender','age','unitdischargestatus'] patient[columns].head() ``` - Try running the following query, which lists unique values in the age column. What do you notice? ``` # what are the unique values for age? age_col = 'age' patient[age_col].sort_values().unique() ``` - Try plotting a histogram of ages using the command in the cell below. What happens? Why? ``` # try plotting a histogram of ages patient[age_col].plot(kind='hist', bins=15) ``` Let's create a new column named `age_num`, then try again. ``` # create a column containing numerical ages # If ‘coerce’, then invalid parsing will be set as NaN agenum_col = 'age_num' patient[agenum_col] = pd.to_numeric(patient[age_col], errors='coerce') patient[agenum_col].sort_values().unique() patient[agenum_col].plot(kind='hist', bins=15) ``` ## Questions - Use the `mean()` method to find the average age. Why do we expect this to be lower than the true mean? - In the same way that you use `mean()`, you can use `describe()`, `max()`, and `min()`. Look at the admission heights (`admissionheight`) of patients in cm. What issue do you see? How can you deal with this issue? ``` adheight_col = 'admissionheight' patient[adheight_col].describe() # set threshold adheight_col = 'admissionheight' patient[patient[adheight_col] < 10] = None ```
github_jupyter
# Linear models for classification problems ![Linear (logistic) classification](images/linear_logistic.png) Given a training set of $N$ samples, $D = \{(\boldsymbol{x_1} , y_1 ), \ldots , (\boldsymbol{x_N} , y_N )\}$ , where $\boldsymbol{x_i}$ is a multidimensional input vector with dimension $P$ and class label (target or response). Multiclass Classification problems can be seen as several binary classification problems $y_i \in \{0, 1\}$ where the classifier aims to discriminate the sample of the current class (label 1) versus the samples of other classes (label 0). Therfore, for each class the classifier seek for a vector of parameters $\boldsymbol{w}$ that performs a linear combination of the input variables, $\boldsymbol{x}^T \boldsymbol{w}$. This step performs a **projection** or a **rotation** of input sample into a good discriminative one-dimensional sub-space, that best discriminate sample of current class vs sample of other classes. This score (a.k.a decision function) is tranformed, using the nonlinear activation funtion $f(.)$, to a "posterior probabilities" of class 1: $p(y=1|\boldsymbol{x}) = f(\boldsymbol{x}^T \boldsymbol{w})$, where, $p(y=1|\boldsymbol{x}) = 1 - p(y=0|\boldsymbol{x})$. The decision surfaces (orthogonal hyperplan to $\boldsymbol{w}$) correspond to $f(x)=\text{constant}$, so that $\boldsymbol{x}^T \boldsymbol{w}=\text{constant}$ and hence the decision surfaces are linear functions of $\boldsymbol{x}$, even if the function $f(.)$ is nonlinear. A thresholding of the activation (shifted by the bias or intercept) provides the predicted class label. The vector of parameters, that defines the discriminative axis, minimizes an **objective function** $J(\boldsymbol{w})$ that is a sum of of **loss function** $L(\boldsymbol{w})$ and some penalties on the weights vector $\Omega(\boldsymbol{w})$. $$ \min_{\boldsymbol{w}}~J = \sum_i L(y_i, f(\boldsymbol{x_i}^T\boldsymbol{w})) + \Omega(\boldsymbol{w}), $$ ## Fisher's linear discriminant with equal class covariance This geometric method does not make any probabilistic assumptions, instead it relies on distances. It looks for the **linear projection** of the data points onto a vector, $\boldsymbol{w}$, that maximizes the between/within variance ratio, denoted $F(\boldsymbol{w})$. Under a few assumptions, it will provide the same results as linear discriminant analysis (LDA), explained below. Suppose two classes of observations, $C_0$ and $C_1$, have means $\boldsymbol{\mu_0}$ and $\boldsymbol{\mu_1}$ and the same total within-class scatter ("covariance") matrix, \begin{align} \boldsymbol{S_W} &= \sum_{i\in C_0} (\boldsymbol{x_i} - \boldsymbol{\mu_0})(\boldsymbol{x_i} - \boldsymbol{\mu_0})^T + \sum_{j\in C_1} (\boldsymbol{x_j} - \boldsymbol{\mu_1})(\boldsymbol{x_j} -\boldsymbol{\mu_1})^T\\ &= \boldsymbol{X_c}^T \boldsymbol{X_c}, \end{align} where $\boldsymbol{X_c}$ is the $(N \times P)$ matrix of data centered on their respective means: $$ \boldsymbol{X_c} = \begin{bmatrix} \boldsymbol{X_0} - \boldsymbol{\mu_0} \\ \boldsymbol{X_1} - \boldsymbol{\mu_1} \end{bmatrix}, $$ where $\boldsymbol{X_0}$ and $\boldsymbol{X_1}$ are the $(N_0 \times P)$ and $(N_1 \times P)$ matrices of samples of classes $C_0$ and $C_1$. Let $\boldsymbol{S_B}$ being the scatter "between-class" matrix, given by $$ \boldsymbol{S_B} = (\boldsymbol{\mu_1} - \boldsymbol{\mu_0} )(\boldsymbol{\mu_1} - \boldsymbol{\mu_0} )^T. $$ The linear combination of features $\boldsymbol{w}^T x$ have means $\boldsymbol{w}^T \mu_i$ for $i=0,1$, and variance $\boldsymbol{w}^T \boldsymbol{X^T_c} \boldsymbol{X_c} \boldsymbol{w}$. Fisher defined the separation between these two distributions to be the ratio of the variance between the classes to the variance within the classes: \begin{align} F_{\text{Fisher}}(\boldsymbol{w}) &= \frac{\sigma_{\text{between}}^2}{\sigma_{\text{within}}^2}\\ &= \frac{(\boldsymbol{w}^T \boldsymbol{\mu_1} - \boldsymbol{w}^T \boldsymbol{\mu_0})^2}{\boldsymbol{w}^T X^T_c \boldsymbol{X_c} \boldsymbol{w}}\\ &= \frac{(\boldsymbol{w}^T (\boldsymbol{\mu_1} - \boldsymbol{\mu_0}))^2}{\boldsymbol{w}^T X^T_c \boldsymbol{X_c} \boldsymbol{w}}\\ &= \frac{\boldsymbol{w}^T (\boldsymbol{\mu_1} - \boldsymbol{\mu_0}) (\boldsymbol{\mu_1} - \boldsymbol{\mu_0})^T w}{\boldsymbol{w}^T X^T_c \boldsymbol{X_c} \boldsymbol{w}}\\ &= \frac{\boldsymbol{w}^T \boldsymbol{S_B} w}{\boldsymbol{w}^T \boldsymbol{S_W} \boldsymbol{w}}. \end{align} ### The Fisher most discriminant projection In the two-class case, the maximum separation occurs by a projection on the $(\boldsymbol{\mu_1} - \boldsymbol{\mu_0})$ using the Mahalanobis metric $\boldsymbol{S_W}^{-1}$, so that $$ \boldsymbol{w} \propto \boldsymbol{S_W}^{-1}(\boldsymbol{\mu_1} - \boldsymbol{\mu_0}). $$ #### Demonstration Differentiating $F_{\text{Fisher}}(w)$ with respect to $w$ gives \begin{align*} \nabla_{\boldsymbol{w}}F_{\text{Fisher}}(\boldsymbol{w}) &= 0\\ \nabla_{\boldsymbol{w}}\left(\frac{\boldsymbol{w}^T \boldsymbol{S_B} w}{\boldsymbol{w}^T \boldsymbol{S_W} \boldsymbol{w}}\right) &= 0\\ (\boldsymbol{w}^T \boldsymbol{S_W} \boldsymbol{w})(2 \boldsymbol{S_B} \boldsymbol{w}) - (\boldsymbol{w}^T \boldsymbol{S_B} \boldsymbol{w})(2 \boldsymbol{S_W} \boldsymbol{w}) &= 0\\ (\boldsymbol{w}^T \boldsymbol{S_W} \boldsymbol{w})(\boldsymbol{S_B} \boldsymbol{w}) &= (\boldsymbol{w}^T \boldsymbol{S_B} \boldsymbol{w})(\boldsymbol{S_W} \boldsymbol{w})\\ \boldsymbol{S_B} \boldsymbol{w} &= \frac{\boldsymbol{w}^T \boldsymbol{S_B} \boldsymbol{w}}{\boldsymbol{w}^T \boldsymbol{S_W} \boldsymbol{w}}(\boldsymbol{S_W} \boldsymbol{w})\\ \boldsymbol{S_B} \boldsymbol{w} &= \lambda (\boldsymbol{S_W} \boldsymbol{w})\\ \boldsymbol{S_W}^{-1}{\boldsymbol{S_B}} \boldsymbol{w} &= \lambda \boldsymbol{w}. \end{align*} Since we do not care about the magnitude of $\boldsymbol{w}$, only its direction, we replaced the scalar factor $(\boldsymbol{w}^T \boldsymbol{S_B} \boldsymbol{w}) / (\boldsymbol{w}^T \boldsymbol{S_W} \boldsymbol{w})$ by $\lambda$. In the multiple-class case, the solutions $w$ are determined by the eigenvectors of $\boldsymbol{S_W}^{-1}{\boldsymbol{S_B}}$ that correspond to the $K-1$ largest eigenvalues. However, in the two-class case (in which $\boldsymbol{S_B} = (\boldsymbol{\mu_1} - \boldsymbol{\mu_0} )(\boldsymbol{\mu_1} - \boldsymbol{\mu_0} )^T$) it is easy to show that $\boldsymbol{w} = \boldsymbol{S_W}^{-1}(\boldsymbol{\mu_1} - \boldsymbol{\mu_0})$ is the unique eigenvector of $\boldsymbol{S_W}^{-1}{\boldsymbol{S_B}}$: \begin{align*} \boldsymbol{S_W}^{-1}(\boldsymbol{\mu_1} - \boldsymbol{\mu_0} )(\boldsymbol{\mu_1} - \boldsymbol{\mu_0} )^T \boldsymbol{w} &= \lambda \boldsymbol{w}\\ \boldsymbol{S_W}^{-1}(\boldsymbol{\mu_1} - \boldsymbol{\mu_0} )(\boldsymbol{\mu_1} - \boldsymbol{\mu_0} )^T \boldsymbol{S_W}^{-1}(\boldsymbol{\mu_1} - \boldsymbol{\mu_0}) &= \lambda \boldsymbol{S_W}^{-1}(\boldsymbol{\mu_1} - \boldsymbol{\mu_0}), \end{align*} where here $\lambda = (\boldsymbol{\mu_1} - \boldsymbol{\mu_0} )^T \boldsymbol{S_W}^{-1}(\boldsymbol{\mu_1} - \boldsymbol{\mu_0})$. Which leads to the result $$ \boldsymbol{w} \propto \boldsymbol{S_W}^{-1}(\boldsymbol{\mu_1} - \boldsymbol{\mu_0}). $$ ### The separating hyperplane The separating hyperplane is a $P-1$-dimensional hyper surface, orthogonal to the projection vector, $w$. There is no single best way to find the origin of the plane along $w$, or equivalently the classification threshold that determines whether a point should be classified as belonging to $C_0$ or to $C_1$. However, if the projected points have roughly the same distribution, then the threshold can be chosen as the hyperplane exactly between the projections of the two means, i.e. as $$ T = \boldsymbol{w} \cdot \frac{1}{2}(\boldsymbol{\mu_1} - \boldsymbol{\mu_0}). $$ ![The Fisher most discriminant projection](images/fisher_linear_disc.png) ``` %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn import datasets import sklearn.linear_model as lm import sklearn.metrics as metrics np.set_printoptions(precision=2) pd.set_option('precision', 2) ``` ## Linear discriminant analysis (LDA) Linear discriminant analysis (LDA) is a probabilistic generalization of Fisher's linear discriminant. It uses Bayes' rule to fix the threshold based on prior probabilities of classes. 1. First compute the class-**conditional distributions** of $\boldsymbol{x}$ given class $C_k$: $p(x|C_k) = \mathcal{N}(\boldsymbol{x}|\boldsymbol{\mu_k}, \boldsymbol{S_W})$. Where $\mathcal{N}(\boldsymbol{x}|\boldsymbol{\mu_k}, \boldsymbol{S_W})$ is the multivariate Gaussian distribution defined over a P-dimensional vector $x$ of continuous variables, which is given by $$ \mathcal{N}(\boldsymbol{x}|\boldsymbol{\mu_k}, \boldsymbol{S_W}) = \frac{1}{(2\pi)^{P/2}|\boldsymbol{S_W}|^{1/2}}\exp\{-\frac{1}{2} (\boldsymbol{x} - \boldsymbol{\mu_k})^T \boldsymbol{S_W}^{-1}(x - \boldsymbol{\mu_k})\} $$ 2. Estimate the **prior probabilities** of class $k$, $p(C_k) = N_k/N$. 3. Compute **posterior probabilities** (ie. the probability of a each class given a sample) combining conditional with priors using Bayes' rule: $$ p(C_k|\boldsymbol{x}) = \frac{p(C_k) p(\boldsymbol{x}|C_k)}{p(\boldsymbol{x})} $$ Where $p(x)$ is the marginal distribution obtained by suming of classes: As usual, the denominator in Bayes’ theorem can be found in terms of the quantities appearing in the numerator, because $$ p(x) = \sum_k p(\boldsymbol{x}|C_k)p(C_k) $$ 4. Classify $\boldsymbol{x}$ using the Maximum-a-Posteriori probability: $C_k= \arg \max_{C_k} p(C_k|\boldsymbol{x})$ LDA is a **generative model** since the class-conditional distributions cal be used to generate samples of each classes. LDA is useful to deal with imbalanced group sizes (eg.: $N_1 \gg N_0$) since priors probabilities can be used to explicitly re-balance the classification by setting $p(C_0) = p(C_1) = 1/2$ or whatever seems relevant. LDA can be generalised to the multiclass case with $K>2$. With $N_1 = N_0$, LDA lead to the same solution than Fisher's linear discriminant. ### Exercise How many parameters are required to estimate to perform a LDA ? ``` from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA # Dataset 2 two multivariate normal n_samples, n_features = 100, 2 mean0, mean1 = np.array([0, 0]), np.array([0, 2]) Cov = np.array([[1, .8],[.8, 1]]) np.random.seed(42) X0 = np.random.multivariate_normal(mean0, Cov, n_samples) X1 = np.random.multivariate_normal(mean1, Cov, n_samples) X = np.vstack([X0, X1]) y = np.array([0] * X0.shape[0] + [1] * X1.shape[0]) # LDA with scikit-learn lda = LDA() proj = lda.fit(X, y).transform(X) y_pred_lda = lda.predict(X) errors = y_pred_lda != y print("Nb errors=%i, error rate=%.2f" % (errors.sum(), errors.sum() / len(y_pred_lda))) ``` ## Logistic regression Logistic regression is called a generalized linear models. ie.: it is a linear model with a link function that maps the output of linear multiple regression to the posterior probability of class $1$ $p(1|x)$ using the logistic sigmoid function: $$ p(1|\boldsymbol{w, x_i}) = \frac{1}{1 + \exp(-\boldsymbol{w} \cdot \boldsymbol{x_i})} $$ ``` def logistic(x): return 1 / (1 + np.exp(-x)) x = np.linspace(-6, 6, 100) plt.plot(x, logistic(x)) plt.grid(True) plt.title('Logistic (sigmoid)') ``` Logistic regression is a **discriminative model** since it focuses only on the posterior probability of each class $p(C_k|x)$. It only requires to estimate the $P$ weights of the $\boldsymbol{w}$ vector. Thus it should be favoured over LDA with many input features. In small dimension and balanced situations it would provide similar predictions than LDA. However imbalanced group sizes cannot be explicitly controlled. It can be managed using a reweighting of the input samples. ``` logreg = lm.LogisticRegression(penalty='none').fit(X, y) # This class implements regularized logistic regression. # C is the Inverse of regularization strength. # Large value => no regularization. logreg.fit(X, y) y_pred_logreg = logreg.predict(X) errors = y_pred_logreg != y print("Nb errors=%i, error rate=%.2f" % (errors.sum(), errors.sum() / len(y_pred_logreg))) print(logreg.coef_) ``` ### Exercise Explore the ``Logistic Regression`` parameters and proposes a solution in cases of highly imbalanced training dataset $N_1 \gg N_0$ when we know that in reality both classes have the same probability $p(C_1) = p(C_0)$. ## Losses ### Negative log likelihood or cross-entropy The **Loss function** for sample $i$ is the negative log of the probability: $$ L(\boldsymbol{w, x_i}, y_i) = \begin{cases} -\log(p(1|w, \boldsymbol{x_i})) & \text{if } y_i = 1 \\ -\log(1 - p(1|w, \boldsymbol{x_i}) & \text{if } y_i = 0 \end{cases} $$ For the whole dataset $\boldsymbol{X}, \boldsymbol{y} = \{\boldsymbol{x_i}, y_i\}$ the loss function to minimize $L(\boldsymbol{w, X, y})$ is the negative negative log likelihood (nll) that can be simplied using a 0/1 coding of the label in the case of binary classification: \begin{align} L(\boldsymbol{w, X, y}) &= -\log \mathcal{L}(\boldsymbol{w, X, y}) \\ &= -\log \Pi_i\{p(1|\boldsymbol{w}, \boldsymbol{x_i})^{y_i} (1 - p(1|\boldsymbol{w}, \boldsymbol{x_i})^{(1 - y_i)}\}\\ &= \sum_i\{y_i \log p(1|\boldsymbol{w}, \boldsymbol{x_i}) + (1 - y_i) \log(1 - p(1|\boldsymbol{w}, \boldsymbol{x_i}))\}, \end{align} This is known as the **cross-entropy** between the true label $y$ and the predicted probability $p$. For the logistic regression case, we have: $$ L(\boldsymbol{w, X, y}) = \sum_i\{y_i \boldsymbol{w \cdot x_i} - \log(1 + \exp(\boldsymbol{w \cdot x_i}))\} $$ This is solved by numerical method using the gradient of the loss: $$ \partial\frac{L(\boldsymbol{w, X, y})}{\partial\boldsymbol{w}} = \sum_i \boldsymbol{x_i} (y_i - p(1|\boldsymbol{w}, \boldsymbol{x_i})) $$ See also [Scikit learn doc](https://scikit-learn.org/stable/modules/sgd.html#mathematical-formulation) ### Hinge loss or $\ell_1$ loss TODO ## Overfitting VC dimension (for Vapnik–Chervonenkis dimension) is a measure of the **capacity** (complexity, expressive power, richness, or flexibility) of a statistical classification algorithm, defined as the cardinality of the largest set of points that the algorithm can shatter. Theorem: Linear classifier in $R^P$ have VC dimension of $P+1$. Hence in dimension two ($P=2$) any random partition of 3 points can be learned. ![In 2D we can shatter any three non-collinear points](images/vc_dimension_linear_2d.png) ## Regularization using penalization of coefficients The penalties use in regression are also used in classification. The only difference is the loss function generally the negative log likelihood (cross-entropy) or the hinge loss. We will explore: - Ridge (also called $\ell_2$) penalty: $\|\mathbf{w}\|_2^2$. It shrinks coefficients toward 0. - Lasso (also called $\ell_1$) penalty: $\|\mathbf{w}\|_1$. It performs feature selection by setting some coefficients to 0. - ElasticNet (also called $\ell_1\ell_2$) penalty: $\alpha \left(\rho~\|\mathbf{w}\|_1 + (1-\rho)~\|\mathbf{w}\|_2^2 \right)$. It performs selection of group of correlated features by setting some coefficients to 0. ``` # Dataset with some correlation X, y = datasets.make_classification(n_samples=100, n_features=10, n_informative=5, n_redundant=3, n_classes=2, random_state=3, shuffle=False) lr = lm.LogisticRegression(penalty='none').fit(X, y) l2 = lm.LogisticRegression(penalty='l2', C=.1).fit(X, y) # lambda = 1 / C! # use solver 'saga' to handle L1 penalty l1 = lm.LogisticRegression(penalty='l1', C=.1, solver='saga').fit(X, y) # lambda = 1 / C! l1l2 = lm.LogisticRegression(penalty='elasticnet', C=.1, l1_ratio=0.5, solver='saga').fit(X, y) # lambda = 1 / C! pd.DataFrame(np.vstack((lr.coef_, l2.coef_, l1.coef_, l1l2.coef_)), index=['lr', 'l2', 'l1', 'l1l2']) ``` ## Ridge Fisher's linear classification ($\ell_2$-regularization) When the matrix $\boldsymbol{S_W}$ is not full rank or $P \gg N$, the The Fisher most discriminant projection estimate of the is not unique. This can be solved using a biased version of $\boldsymbol{S_W}$: $$ \boldsymbol{S_W}^{Ridge} = \boldsymbol{S_W} + \lambda \boldsymbol{I} $$ where $I$ is the $P \times P$ identity matrix. This leads to the regularized (ridge) estimator of the Fisher's linear discriminant analysis: $$ \boldsymbol{w}^{Ridge} \propto (\boldsymbol{S_W} + \lambda \boldsymbol{I})^{-1}(\boldsymbol{\mu_1} - \boldsymbol{\mu_0}) $$ ![The Ridge Fisher most discriminant projection](images/ridge_fisher_linear_disc.png) Increasing $\lambda$ will: - Shrinks the coefficients toward zero. - The covariance will converge toward the diagonal matrix, reducing the contribution of the pairwise covariances. ## Ridge logistic regression ($\ell_2$-regularization) The **objective function** to be minimized is now the combination of the logistic loss (negative log likelyhood) $-\log \mathcal{L}(\boldsymbol{w})$ with a penalty of the L2 norm of the weights vector. In the two-class case, using the 0/1 coding we obtain: $$ \min_{\boldsymbol{w}}~\text{Logistic ridge}(\boldsymbol{w}) = -\log \mathcal{L}(\boldsymbol{w, X, y}) + \lambda~\|\boldsymbol{w}\|^2 $$ ``` from sklearn import linear_model lrl2 = linear_model.LogisticRegression(penalty='l2', C=.1) # This class implements regularized logistic regression. C is the Inverse of regularization strength. # Large value => no regularization. lrl2.fit(X, y) y_pred_l2 = lrl2.predict(X) prob_pred_l2 = lrl2.predict_proba(X) print("Probas of 5 first samples for class 0 and class 1:") print(prob_pred_l2[:5, :]) print("Coef vector:") print(lrl2.coef_) # Retrieve proba from coef vector probas = 1 / (1 + np.exp(- (np.dot(X, lrl2.coef_.T) + lrl2.intercept_))).ravel() print("Diff", np.max(np.abs(prob_pred_l2[:, 1] - probas))) errors = y_pred_l2 != y print("Nb errors=%i, error rate=%.2f" % (errors.sum(), errors.sum() / len(y))) ``` ## Lasso logistic regression ($\ell_1$-regularization) The **objective function** to be minimized is now the combination of the logistic loss $-\log \mathcal{L}(\boldsymbol{w})$ with a penalty of the L1 norm of the weights vector. In the two-class case, using the 0/1 coding we obtain: $$ \min_{\boldsymbol{w}}~\text{Logistic Lasso}(w) = -\log \mathcal{L}(\boldsymbol{w, X, y}) + \lambda~\|\boldsymbol{w}\|_1 $$ ``` from sklearn import linear_model lrl1 = lm.LogisticRegression(penalty='l1', C=.1, solver='saga') # lambda = 1 / C! # This class implements regularized logistic regression. C is the Inverse of regularization strength. # Large value => no regularization. lrl1.fit(X, y) y_pred_lrl1 = lrl1.predict(X) errors = y_pred_lrl1 != y print("Nb errors=%i, error rate=%.2f" % (errors.sum(), errors.sum() / len(y_pred_lrl1))) print("Coef vector:") print(lrl1.coef_) ``` ## Ridge linear Support Vector Machine ($\ell_2$-regularization) Support Vector Machine seek for separating hyperplane with maximum margin to enforce robustness against noise. Like logistic regression it is a **discriminative method** that only focuses of predictions. Here we present the non separable case of Maximum Margin Classifiers with $\pm 1$ coding (ie.: $y_i \ \{-1, +1\}$). In the next figure the legend aply to samples of "dot" class. ![Linear lar margin classifiers](images/svm.png) Linear SVM for classification (also called SVM-C or SVC) minimizes: $$ \begin{array}{lll} \text{min} & \text{Linear SVM}(\boldsymbol{w}) &= \text{penalty}(w) + C~\text{Hinge loss}(w)\\ & & = \|w\|_2^2 + C~\sum_i^N\xi_i\\ \text{with} & \forall i & y_i (w \cdot \boldsymbol{x_i}) \geq 1 - \xi_i \end{array} $$ Here we introduced the slack variables: $\xi_i$, with $\xi_i = 0$ for points that are on or inside the correct margin boundary and $\xi_i = |y_i - (w \ cdot \cdot \boldsymbol{x_i})|$ for other points. Thus: 1. If $y_i (w \cdot \boldsymbol{x_i}) \geq 1$ then the point lies outside the margin but on the correct side of the decision boundary. In this case $\xi_i=0$. The constraint is thus not active for this point. It does not contribute to the prediction. 2. If $1 > y_i (w \cdot \boldsymbol{x_i}) \geq 0$ then the point lies inside the margin and on the correct side of the decision boundary. In this case $0<\xi_i \leq 1$. The constraint is active for this point. It does contribute to the prediction as a support vector. 3. If $0 < y_i (w \cdot \boldsymbol{x_i})$) then the point is on the wrong side of the decision boundary (missclassification). In this case $0<\xi_i > 1$. The constraint is active for this point. It does contribute to the prediction as a support vector. This loss is called the hinge loss, defined as: $$ \max(0, 1 - y_i~ (w \cdot \boldsymbol{x_i})) $$ So linear SVM is closed to Ridge logistic regression, using the hinge loss instead of the logistic loss. Both will provide very similar predictions. ``` from sklearn import svm svmlin = svm.LinearSVC(C=.1) # Remark: by default LinearSVC uses squared_hinge as loss svmlin.fit(X, y) y_pred_svmlin = svmlin.predict(X) errors = y_pred_svmlin != y print("Nb errors=%i, error rate=%.2f" % (errors.sum(), errors.sum() / len(y_pred_svmlin))) print("Coef vector:") print(svmlin.coef_) ``` ## Lasso linear Support Vector Machine ($\ell_1$-regularization) Linear SVM for classification (also called SVM-C or SVC) with l1-regularization $$ \begin{array}{lll} \text{min} & F_{\text{Lasso linear SVM}}(w) &= ||w||_1 + C~\sum_i^N\xi_i\\ \text{with} & \forall i & y_i (w \cdot \boldsymbol{x_i}) \geq 1 - \xi_i \end{array} $$ ``` from sklearn import svm svmlinl1 = svm.LinearSVC(penalty='l1', dual=False) # Remark: by default LinearSVC uses squared_hinge as loss svmlinl1.fit(X, y) y_pred_svmlinl1 = svmlinl1.predict(X) errors = y_pred_svmlinl1 != y print("Nb errors=%i, error rate=%.2f" % (errors.sum(), errors.sum() / len(y_pred_svmlinl1))) print("Coef vector:") print(svmlinl1.coef_) ``` ## Exercise Compare predictions of Logistic regression (LR) and their SVM counterparts, ie.: L2 LR vs L2 SVM and L1 LR vs L1 SVM - Compute the correlation between pairs of weights vectors. - Compare the predictions of two classifiers using their decision function: * Give the equation of the decision function for a linear classifier, assuming that their is no intercept. * Compute the correlation decision function. * Plot the pairwise decision function of the classifiers. - Conclude on the differences between Linear SVM and logistic regression. ## Elastic-net classification ($\ell_1\ell_2$-regularization) The **objective function** to be minimized is now the combination of the logistic loss $\log L(\boldsymbol{w})$ or the hinge loss with combination of L1 and L2 penalties. In the two-class case, using the 0/1 coding we obtain: \begin{align} \min~\text{Logistic enet}(\boldsymbol{w}) &= -\log \mathcal{L}(\boldsymbol{w, X, y}) + \alpha~\left(\rho~\|\boldsymbol{w}\|_1 + (1-\rho)~\|\boldsymbol{w}\|_2^2 \right)\\ \min~\text{Hinge enet}(\boldsymbol{w}) &= \text{Hinge loss}(\boldsymbol{w}) + \alpha~\left(\rho~\|\boldsymbol{w}\|_1 + (1-\rho)~\|\boldsymbol{w}\|_2^2 \right) \end{align} ``` # Use SGD solver enetlog = lm.SGDClassifier(loss="log", penalty="elasticnet", alpha=0.1, l1_ratio=0.5, random_state=42) enetlog.fit(X, y) # Or saga solver: # enetloglike = lm.LogisticRegression(penalty='elasticnet', # C=.1, l1_ratio=0.5, solver='saga') enethinge = lm.SGDClassifier(loss="hinge", penalty="elasticnet", alpha=0.1, l1_ratio=0.5, random_state=42) enethinge.fit(X, y) print("Hinge loss and logistic loss provide almost the same predictions.") print("Confusion matrix") metrics.confusion_matrix(enetlog.predict(X), enethinge.predict(X)) print("Decision_function log x hinge losses:") _ = plt.plot(enetlog.decision_function(X), enethinge.decision_function(X), "o") ``` ## Classification performance evaluation metrics source: https://en.wikipedia.org/wiki/Sensitivity_and_specificity Imagine a study evaluating a new test that screens people for a disease. Each person taking the test either has or does not have the disease. The test outcome can be positive (classifying the person as having the disease) or negative (classifying the person as not having the disease). The test results for each subject may or may not match the subject's actual status. In that setting: - True positive (TP): Sick people correctly identified as sick - False positive (FP): Healthy people incorrectly identified as sick - True negative (TN): Healthy people correctly identified as healthy - False negative (FN): Sick people incorrectly identified as healthy - **Accuracy** (ACC): ACC = (TP + TN) / (TP + FP + FN + TN) - **Sensitivity** (SEN) or **recall** of the positive class or true positive rate (TPR) or hit rate: SEN = TP / P = TP / (TP+FN) - **Specificity** (SPC) or **recall** of the negative class or true negative rate: SPC = TN / N = TN / (TN+FP) - **Precision** or positive predictive value (PPV): PPV = TP / (TP + FP) - **Balanced accuracy** (bACC):is a useful performance measure is the balanced accuracy which avoids inflated performance estimates on imbalanced datasets (Brodersen, et al. (2010). "The balanced accuracy and its posterior distribution"). It is defined as the arithmetic mean of sensitivity and specificity, or the average accuracy obtained on either class: bACC = 1/2 * (SEN + SPC) - F1 Score (or F-score) which is a weighted average of precision and recall are usefull to deal with imballaced datasets The four outcomes can be formulated in a 2×2 contingency table or confusion matrix https://en.wikipedia.org/wiki/Sensitivity_and_specificity For more precision see: http://scikit-learn.org/stable/modules/model_evaluation.html ``` from sklearn import metrics y_pred = [0, 1, 0, 0] y_true = [0, 1, 0, 1] metrics.accuracy_score(y_true, y_pred) # The overall precision an recall metrics.precision_score(y_true, y_pred) metrics.recall_score(y_true, y_pred) # Recalls on individual classes: SEN & SPC recalls = metrics.recall_score(y_true, y_pred, average=None) recalls[0] # is the recall of class 0: specificity recalls[1] # is the recall of class 1: sensitivity # Balanced accuracy b_acc = recalls.mean() # The overall precision an recall on each individual class p, r, f, s = metrics.precision_recall_fscore_support(y_true, y_pred) ``` ### Significance of classification rate P-value associated to classification rate. Compared the number of correct classifications (=accuracy $\times N$) to the null hypothesis of Binomial distribution of parameters $p$ (typically 50% of chance level) and $N$ (Number of observations). Is 65% of accuracy a significant prediction rate among 70 observations? Since this is an exact, **two-sided** test of the null hypothesis, the p-value can be divided by 2 since we test that the accuracy is superior to the chance level. ``` import scipy.stats acc, N = 0.65, 70 pval = scipy.stats.binom_test(x=int(acc * N), n=N, p=0.5) / 2 print(pval) ``` ### Area Under Curve (AUC) of Receiver operating characteristic (ROC) Some classifier may have found a good discriminative projection $w$. However if the threshold to decide the final predicted class is poorly adjusted, the performances will highlight an high specificity and a low sensitivity or the contrary. In this case it is recommended to use the AUC of a ROC analysis which basically provide a measure of overlap of the two classes when points are projected on the discriminative axis. For more detail on ROC and AUC see:https://en.wikipedia.org/wiki/Receiver_operating_characteristic. ``` score_pred = np.array([.1 ,.2, .3, .4, .5, .6, .7, .8]) y_true = np.array([0, 0, 0, 0, 1, 1, 1, 1]) thres = .9 y_pred = (score_pred > thres).astype(int) print("With a threshold of %.2f, the rule always predict 0. Predictions:" % thres) print(y_pred) metrics.accuracy_score(y_true, y_pred) # The overall precision an recall on each individual class r = metrics.recall_score(y_true, y_pred, average=None) print("Recalls on individual classes are:", r, "ie, 100% of specificity, 0% of sensitivity") # However AUC=1 indicating a perfect separation of the two classes auc = metrics.roc_auc_score(y_true, score_pred) print("But the AUC of %.2f demonstrate a good classes separation." % auc) ``` ## Imbalanced classes Learning with discriminative (logistic regression, SVM) methods is generally based on minimizing the misclassification of training samples, which may be unsuitable for imbalanced datasets where the recognition might be biased in favor of the most numerous class. This problem can be addressed with a generative approach, which typically requires more parameters to be determined leading to reduced performances in high dimension. Dealing with imbalanced class may be addressed by three main ways (see Japkowicz and Stephen (2002) for a review), resampling, reweighting and one class learning. In **sampling strategies**, either the minority class is oversampled or majority class is undersampled or some combination of the two is deployed. Undersampling (Zhang and Mani, 2003) the majority class would lead to a poor usage of the left-out samples. Sometime one cannot afford such strategy since we are also facing a small sample size problem even for the majority class. Informed oversampling, which goes beyond a trivial duplication of minority class samples, requires the estimation of class conditional distributions in order to generate synthetic samples. Here generative models are required. An alternative, proposed in (Chawla et al., 2002) generate samples along the line segments joining any/all of the k minority class nearest neighbors. Such procedure blindly generalizes the minority area without regard to the majority class, which may be particularly problematic with high-dimensional and potentially skewed class distribution. **Reweighting**, also called cost-sensitive learning, works at an algorithmic level by adjusting the costs of the various classes to counter the class imbalance. Such reweighting can be implemented within SVM (Chang and Lin, 2001) or logistic regression (Friedman et al., 2010) classifiers. Most classifiers of Scikit learn offer such reweighting possibilities. The ``class_weight`` parameter can be positioned into the ``"balanced"`` mode which uses the values of $y$ to automatically adjust weights inversely proportional to class frequencies in the input data as $N / (2 N_k)$. ``` # dataset X, y = datasets.make_classification(n_samples=500, n_features=5, n_informative=2, n_redundant=0, n_repeated=0, n_classes=2, random_state=1, shuffle=False) print(*["#samples of class %i = %i;" % (lev, np.sum(y == lev)) for lev in np.unique(y)]) print('# No Reweighting balanced dataset') lr_inter = linear_model.LogisticRegression(C=1) lr_inter.fit(X, y) p, r, f, s = metrics.precision_recall_fscore_support(y, lr_inter.predict(X)) print("SPC: %.3f; SEN: %.3f" % tuple(r)) print('# => The predictions are balanced in sensitivity and specificity\n') # Create imbalanced dataset, by subsampling sample of class 0: keep only 10% of # class 0's samples and all class 1's samples. n0 = int(np.rint(np.sum(y == 0) / 20)) subsample_idx = np.concatenate((np.where(y == 0)[0][:n0], np.where(y == 1)[0])) Ximb = X[subsample_idx, :] yimb = y[subsample_idx] print(*["#samples of class %i = %i;" % (lev, np.sum(yimb == lev)) for lev in np.unique(yimb)]) print('# No Reweighting on imbalanced dataset') lr_inter = linear_model.LogisticRegression(C=1) lr_inter.fit(Ximb, yimb) p, r, f, s = metrics.precision_recall_fscore_support(yimb, lr_inter.predict(Ximb)) print("SPC: %.3f; SEN: %.3f" % tuple(r)) print('# => Sensitivity >> specificity\n') print('# Reweighting on imbalanced dataset') lr_inter_reweight = linear_model.LogisticRegression(C=1, class_weight="balanced") lr_inter_reweight.fit(Ximb, yimb) p, r, f, s = metrics.precision_recall_fscore_support(yimb, lr_inter_reweight.predict(Ximb)) print("SPC: %.3f; SEN: %.3f" % tuple(r)) print('# => The predictions are balanced in sensitivity and specificity\n') ``` ## Confidence interval cross-validation Confidence interval CI classification accuracy measured by cross-validation: ![CI classification](images/classif_accuracy_95ci_sizes.png) ## Exercise ### Fisher linear discriminant rule Write a class ``FisherLinearDiscriminant`` that implements the Fisher's linear discriminant analysis. This class must be compliant with the scikit-learn API by providing two methods: - ``fit(X, y)`` which fits the model and returns the object itself; - ``predict(X)`` which returns a vector of the predicted values. Apply the object on the dataset presented for the LDA.
github_jupyter
# Scene Classification ## 5. Predict - Import pkg - Load sample data, only first 1000 objects Reference: - https://challenger.ai/competitions - https://github.com/jupyter/notebook/issues/2287 **Tensorboard** 1. Input at command: **tensorboard --logdir=./log** 2. Input at browser: **http://127.0.0.1:6006** ### Import pkg ``` import numpy as np import pandas as pd # import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.image as mpimg import seaborn as sns %matplotlib inline from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix from keras.utils.np_utils import to_categorical # convert to one-hot-encoding from keras.models import Sequential, load_model from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import LearningRateScheduler, TensorBoard # import zipfile import os import zipfile import math import time from IPython.display import display import pdb import json from PIL import Image import glob import pickle ``` ### Load sample data, only first 1000 objects ``` input_path = './input' datasetName = 'train' date = '20170904' zip_path = input_path + '/ai_challenger_scene_{0}_{1}.zip'.format(datasetName, date) extract_path = input_path + '/ai_challenger_scene_{0}_{1}'.format(datasetName, date) image_path = extract_path + '/scene_{0}_images_{1}'.format(datasetName, date) scene_classes_path = extract_path + '/scene_classes.csv' scene_annotations_path = extract_path + '/scene_{0}_annotations_{1}.json'.format(datasetName, date) print(input_path) print(zip_path) print(extract_path) print(image_path) print(scene_classes_path) print(scene_annotations_path) scene_classes = pd.read_csv(scene_classes_path, header=None) display(scene_classes.head()) def get_scene_name(lable_number, scene_classes_path): scene_classes = pd.read_csv(scene_classes_path, header=None) return scene_classes.loc[lable_number, 2] print(get_scene_name(0, scene_classes_path)) pickleFolder = 'pickle_{0}'.format(datasetName) pickle_path = input_path + '/' + pickleFolder x_val_path = pickle_path + '/x_data0.p' y_val_path = pickle_path + '/y_data0.p' x_val0 = pickle.load(open(x_val_path, mode='rb')) y_val0 = pickle.load(open(y_val_path, mode='rb')) # x_val = x_val0[0:400] # y_val = y_val0[0:400] x_val = x_val0 y_val = y_val0 print(x_val.shape) print(y_val.shape) # x_train, x_val, y_train, y_val = train_test_split(x_data, y_data, test_size=0.1) # y_train = to_categorical(y_train) y_val = to_categorical(y_val) # print(y_train.shape) print(y_val.shape) x_data_path = pickle_path + '/x_data1.p' y_data_path = pickle_path + '/y_data1.p' x_data = pickle.load(open(x_data_path, mode='rb')) y_data = pickle.load(open(y_data_path, mode='rb')) print(x_data.shape) print(y_data.shape) fig, ax = plt.subplots(1, 2, figsize=(12, 6)) ax[0].imshow(x_data[0]) ax[0].set_title(get_scene_name(y_data[0], scene_classes_path)) ax[1].imshow(x_data[1]) ax[1].set_title(get_scene_name(y_data[1], scene_classes_path)) ``` ### Load model ``` from keras.preprocessing import image from keras.models import Model from keras.layers import Dense, GlobalAveragePooling2D from keras import backend as K # from keras.applications.resnet50 import ResNet50 # from keras.applications.resnet50 import preprocess_input, decode_predictions from keras.applications.inception_v3 import InceptionV3 model = load_model('./model/TopLayer2017-09-23_12-53-59.h5') final_loss, final_acc = model.evaluate(x_val, y_val, verbose=0) print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss, final_acc)) ``` **Train top 2 inception** ``` %%time input_path = './input' datasetName = 'validation' pickle_path = input_path + '/pickle_{0}'.format(datasetName) for i in range(0,10): x_train_path = pickle_path + '/x_data{0}.p'.format(i) y_train_path = pickle_path + '/y_data{0}.p'.format(i) print(x_train_path) print(y_train_path) if not os.path.exists(x_train_path): print(x_train_path + ' do not exist!') continue if not os.path.exists(y_train_path): print(y_train_path + ' do not exist!') continue x_train = pickle.load(open(x_train_path, mode='rb')) y_train = pickle.load(open(y_train_path, mode='rb')) y_train = to_categorical(y_train) print(x_data.shape) print(y_data.shape) hist = model.fit_generator(datagen.flow(x_train, y_train, batch_size=64), steps_per_epoch=100, epochs=5, #Increase this when not on Kaggle kernel verbose=2, #1 for ETA, 0 for silent validation_data=(x_val, y_val), #For speed callbacks=[annealer, tensorBoard]) final_loss, final_acc = model.evaluate(x_val, y_val, verbose=0) print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss, final_acc)) print('Done!') ```
github_jupyter
# Data Augmentation ### Goals In this notebook you're going to build a generator that can be used to help create data to train a classifier. There are many cases where this might be useful. If you are interested in any of these topics, you are welcome to explore the linked papers and articles! - With smaller datasets, GANs can provide useful data augmentation that substantially [improve classifier performance](https://arxiv.org/abs/1711.04340). - You have one type of data already labeled and would like to make predictions on [another related dataset for which you have no labels](https://www.nature.com/articles/s41598-019-52737-x). (You'll learn about the techniques for this use case in future notebooks!) - You want to protect the privacy of the people who provided their information so you can provide access to a [generator instead of real data](https://www.ahajournals.org/doi/full/10.1161/CIRCOUTCOMES.118.005122). - You have [input data with many missing values](https://arxiv.org/abs/1806.02920), where the input dimensions are correlated and you would like to train a model on complete inputs. - You would like to be able to identify a real-world abnormal feature in an image [for the purpose of diagnosis](https://link.springer.com/chapter/10.1007/978-3-030-00946-5_11), but have limited access to real examples of the condition. In this assignment, you're going to be acting as a bug enthusiast — more on that later. ### Learning Objectives 1. Understand some use cases for data augmentation and why GANs suit this task. 2. Implement a classifier that takes a mixed dataset of reals/fakes and analyze its accuracy. ## Getting Started ### Data Augmentation Before you implement GAN-based data augmentation, you should know a bit about data augmentation in general, specifically for image datasets. It is [very common practice](https://arxiv.org/abs/1712.04621) to augment image-based datasets in ways that are appropriate for a given dataset. This may include having your dataloader randomly flipping images across their vertical axis, randomly cropping your image to a particular size, randomly adding a bit of noise or color to an image in ways that are true-to-life. In general, data augmentation helps to stop your model from overfitting to the data, and allows you to make small datasets many times larger. However, a sufficiently powerful classifier often still overfits to the original examples which is why GANs are particularly useful here. They can generate new images instead of simply modifying existing ones. ### CIFAR The [CIFAR-10 and CIFAR-100](https://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdf) datasets are extremely widely used within machine learning -- they contain many thousands of “tiny” 32x32 color images of different classes representing relatively common real-world objects like airplanes and dogs, with 10 classes in CIFAR-10 and 100 classes in CIFAR-100. In CIFAR-100, there are 20 “superclasses” which each contain five classes. For example, the “fish” superclass contains “aquarium fish, flatfish, ray, shark, trout”. For the purposes of this assignment, you’ll be looking at a small subset of these images to simulate a small data regime, with only 40 images of each class for training. ![alt text](CIFAR.png) ### Initializations You will begin by importing some useful libraries and packages and defining a visualization function that has been provided. You will also be re-using your conditional generator and functions code from earlier assignments. This will let you control what class of images to augment for your classifier. ``` import torch import torch.nn.functional as F import matplotlib.pyplot as plt from torch import nn from tqdm.auto import tqdm from torchvision import transforms from torchvision.utils import make_grid from torch.utils.data import DataLoader torch.manual_seed(0) # Set for our testing purposes, please do not change! def show_tensor_images(image_tensor, num_images=25, size=(3, 32, 32), nrow=5, show=True): ''' Function for visualizing images: Given a tensor of images, number of images, and size per image, plots and prints the images in an uniform grid. ''' image_tensor = (image_tensor + 1) / 2 image_unflat = image_tensor.detach().cpu() image_grid = make_grid(image_unflat[:num_images], nrow=nrow) plt.imshow(image_grid.permute(1, 2, 0).squeeze()) if show: plt.show() ``` #### Generator ``` class Generator(nn.Module): ''' Generator Class Values: input_dim: the dimension of the input vector, a scalar im_chan: the number of channels of the output image, a scalar (CIFAR100 is in color (red, green, blue), so 3 is your default) hidden_dim: the inner dimension, a scalar ''' def __init__(self, input_dim=10, im_chan=3, hidden_dim=64): super(Generator, self).__init__() self.input_dim = input_dim # Build the neural network self.gen = nn.Sequential( self.make_gen_block(input_dim, hidden_dim * 4, kernel_size=4), self.make_gen_block(hidden_dim * 4, hidden_dim * 2, kernel_size=4, stride=1), self.make_gen_block(hidden_dim * 2, hidden_dim, kernel_size=4), self.make_gen_block(hidden_dim, im_chan, kernel_size=2, final_layer=True), ) def make_gen_block(self, input_channels, output_channels, kernel_size=3, stride=2, final_layer=False): ''' Function to return a sequence of operations corresponding to a generator block of DCGAN; a transposed convolution, a batchnorm (except in the final layer), and an activation. Parameters: input_channels: how many channels the input feature representation has output_channels: how many channels the output feature representation should have kernel_size: the size of each convolutional filter, equivalent to (kernel_size, kernel_size) stride: the stride of the convolution final_layer: a boolean, true if it is the final layer and false otherwise (affects activation and batchnorm) ''' if not final_layer: return nn.Sequential( nn.ConvTranspose2d(input_channels, output_channels, kernel_size, stride), nn.BatchNorm2d(output_channels), nn.ReLU(inplace=True), ) else: return nn.Sequential( nn.ConvTranspose2d(input_channels, output_channels, kernel_size, stride), nn.Tanh(), ) def forward(self, noise): ''' Function for completing a forward pass of the generator: Given a noise tensor, returns generated images. Parameters: noise: a noise tensor with dimensions (n_samples, input_dim) ''' x = noise.view(len(noise), self.input_dim, 1, 1) return self.gen(x) def get_noise(n_samples, input_dim, device='cpu'): ''' Function for creating noise vectors: Given the dimensions (n_samples, input_dim) creates a tensor of that shape filled with random numbers from the normal distribution. Parameters: n_samples: the number of samples to generate, a scalar input_dim: the dimension of the input vector, a scalar device: the device type ''' return torch.randn(n_samples, input_dim, device=device) def combine_vectors(x, y): ''' Function for combining two vectors with shapes (n_samples, ?) and (n_samples, ?) Parameters: x: (n_samples, ?) the first vector. In this assignment, this will be the noise vector of shape (n_samples, z_dim), but you shouldn't need to know the second dimension's size. y: (n_samples, ?) the second vector. Once again, in this assignment this will be the one-hot class vector with the shape (n_samples, n_classes), but you shouldn't assume this in your code. ''' return torch.cat([x, y], 1) def get_one_hot_labels(labels, n_classes): ''' Function for combining two vectors with shapes (n_samples, ?) and (n_samples, ?) Parameters: labels: (n_samples, 1) n_classes: a single integer corresponding to the total number of classes in the dataset ''' return F.one_hot(labels, n_classes) ``` ## Training Now you can begin training your models. First, you will define some new parameters: * cifar100_shape: the number of pixels in each CIFAR image, which has dimensions 32 x 32 and three channel (for red, green, and blue) so 3 x 32 x 32 * n_classes: the number of classes in CIFAR100 (e.g. airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck) ``` cifar100_shape = (3, 32, 32) n_classes = 100 ``` And you also include the same parameters from previous assignments: * criterion: the loss function * n_epochs: the number of times you iterate through the entire dataset when training * z_dim: the dimension of the noise vector * display_step: how often to display/visualize the images * batch_size: the number of images per forward/backward pass * lr: the learning rate * device: the device type ``` n_epochs = 10000 z_dim = 64 display_step = 500 batch_size = 64 lr = 0.0002 device = 'cuda' ``` Then, you want to set your generator's input dimension. Recall that for conditional GANs, the generator's input is the noise vector concatenated with the class vector. ``` generator_input_dim = z_dim + n_classes ``` #### Classifier For the classifier, you will use the same code that you wrote in an earlier assignment (the same as previous code for the discriminator as well since the discriminator is a real/fake classifier). ``` class Classifier(nn.Module): ''' Classifier Class Values: im_chan: the number of channels of the output image, a scalar n_classes: the total number of classes in the dataset, an integer scalar hidden_dim: the inner dimension, a scalar ''' def __init__(self, im_chan, n_classes, hidden_dim=32): super(Classifier, self).__init__() self.disc = nn.Sequential( self.make_classifier_block(im_chan, hidden_dim), self.make_classifier_block(hidden_dim, hidden_dim * 2), self.make_classifier_block(hidden_dim * 2, hidden_dim * 4), self.make_classifier_block(hidden_dim * 4, n_classes, final_layer=True), ) def make_classifier_block(self, input_channels, output_channels, kernel_size=3, stride=2, final_layer=False): ''' Function to return a sequence of operations corresponding to a classifier block; a convolution, a batchnorm (except in the final layer), and an activation (except in the final Parameters: input_channels: how many channels the input feature representation has output_channels: how many channels the output feature representation should have kernel_size: the size of each convolutional filter, equivalent to (kernel_size, kernel_size) stride: the stride of the convolution final_layer: a boolean, true if it is the final layer and false otherwise (affects activation and batchnorm) ''' if not final_layer: return nn.Sequential( nn.Conv2d(input_channels, output_channels, kernel_size, stride), nn.BatchNorm2d(output_channels), nn.LeakyReLU(0.2, inplace=True), ) else: return nn.Sequential( nn.Conv2d(input_channels, output_channels, kernel_size, stride), ) def forward(self, image): ''' Function for completing a forward pass of the classifier: Given an image tensor, returns an n_classes-dimension tensor representing fake/real. Parameters: image: a flattened image tensor with im_chan channels ''' class_pred = self.disc(image) return class_pred.view(len(class_pred), -1) ``` #### Pre-training (Optional) You are provided the code to pre-train the models (GAN and classifier) given to you in this assignment. However, this is intended only for your personal curiosity -- for the assignment to run as intended, you should not use any checkpoints besides the ones given to you. ``` # This code is here for you to train your own generator or classifier # outside the assignment on the full dataset if you'd like -- for the purposes # of this assignment, please use the provided checkpoints class Discriminator(nn.Module): ''' Discriminator Class Values: im_chan: the number of channels of the output image, a scalar (MNIST is black-and-white, so 1 channel is your default) hidden_dim: the inner dimension, a scalar ''' def __init__(self, im_chan=3, hidden_dim=64): super(Discriminator, self).__init__() self.disc = nn.Sequential( self.make_disc_block(im_chan, hidden_dim, stride=1), self.make_disc_block(hidden_dim, hidden_dim * 2), self.make_disc_block(hidden_dim * 2, hidden_dim * 4), self.make_disc_block(hidden_dim * 4, 1, final_layer=True), ) def make_disc_block(self, input_channels, output_channels, kernel_size=4, stride=2, final_layer=False): ''' Function to return a sequence of operations corresponding to a discriminator block of the DCGAN; a convolution, a batchnorm (except in the final layer), and an activation (except in the final layer). Parameters: input_channels: how many channels the input feature representation has output_channels: how many channels the output feature representation should have kernel_size: the size of each convolutional filter, equivalent to (kernel_size, kernel_size) stride: the stride of the convolution final_layer: a boolean, true if it is the final layer and false otherwise (affects activation and batchnorm) ''' if not final_layer: return nn.Sequential( nn.Conv2d(input_channels, output_channels, kernel_size, stride), nn.BatchNorm2d(output_channels), nn.LeakyReLU(0.2, inplace=True), ) else: return nn.Sequential( nn.Conv2d(input_channels, output_channels, kernel_size, stride), ) def forward(self, image): ''' Function for completing a forward pass of the discriminator: Given an image tensor, returns a 1-dimension tensor representing fake/real. Parameters: image: a flattened image tensor with dimension (im_chan) ''' disc_pred = self.disc(image) return disc_pred.view(len(disc_pred), -1) def train_generator(): gen = Generator(generator_input_dim).to(device) gen_opt = torch.optim.Adam(gen.parameters(), lr=lr) discriminator_input_dim = cifar100_shape[0] + n_classes disc = Discriminator(discriminator_input_dim).to(device) disc_opt = torch.optim.Adam(disc.parameters(), lr=lr) def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): torch.nn.init.normal_(m.weight, 0.0, 0.02) if isinstance(m, nn.BatchNorm2d): torch.nn.init.normal_(m.weight, 0.0, 0.02) torch.nn.init.constant_(m.bias, 0) gen = gen.apply(weights_init) disc = disc.apply(weights_init) criterion = nn.BCEWithLogitsLoss() cur_step = 0 mean_generator_loss = 0 mean_discriminator_loss = 0 for epoch in range(n_epochs): # Dataloader returns the batches and the labels for real, labels in dataloader: cur_batch_size = len(real) # Flatten the batch of real images from the dataset real = real.to(device) # Convert the labels from the dataloader into one-hot versions of those labels one_hot_labels = get_one_hot_labels(labels.to(device), n_classes).float() image_one_hot_labels = one_hot_labels[:, :, None, None] image_one_hot_labels = image_one_hot_labels.repeat(1, 1, cifar100_shape[1], cifar100_shape[2]) ### Update discriminator ### # Zero out the discriminator gradients disc_opt.zero_grad() # Get noise corresponding to the current batch_size fake_noise = get_noise(cur_batch_size, z_dim, device=device) # Combine the vectors of the noise and the one-hot labels for the generator noise_and_labels = combine_vectors(fake_noise, one_hot_labels) fake = gen(noise_and_labels) # Combine the vectors of the images and the one-hot labels for the discriminator fake_image_and_labels = combine_vectors(fake.detach(), image_one_hot_labels) real_image_and_labels = combine_vectors(real, image_one_hot_labels) disc_fake_pred = disc(fake_image_and_labels) disc_real_pred = disc(real_image_and_labels) disc_fake_loss = criterion(disc_fake_pred, torch.zeros_like(disc_fake_pred)) disc_real_loss = criterion(disc_real_pred, torch.ones_like(disc_real_pred)) disc_loss = (disc_fake_loss + disc_real_loss) / 2 disc_loss.backward(retain_graph=True) disc_opt.step() # Keep track of the average discriminator loss mean_discriminator_loss += disc_loss.item() / display_step ### Update generator ### # Zero out the generator gradients gen_opt.zero_grad() # Pass the discriminator the combination of the fake images and the one-hot labels fake_image_and_labels = combine_vectors(fake, image_one_hot_labels) disc_fake_pred = disc(fake_image_and_labels) gen_loss = criterion(disc_fake_pred, torch.ones_like(disc_fake_pred)) gen_loss.backward() gen_opt.step() # Keep track of the average generator loss mean_generator_loss += gen_loss.item() / display_step if cur_step % display_step == 0 and cur_step > 0: print(f"Step {cur_step}: Generator loss: {mean_generator_loss}, discriminator loss: {mean_discriminator_loss}") show_tensor_images(fake) show_tensor_images(real) mean_generator_loss = 0 mean_discriminator_loss = 0 cur_step += 1 def train_classifier(): criterion = nn.CrossEntropyLoss() n_epochs = 10 validation_dataloader = DataLoader( CIFAR100(".", train=False, download=True, transform=transform), batch_size=batch_size) display_step = 10 batch_size = 512 lr = 0.0002 device = 'cuda' classifier = Classifier(cifar100_shape[0], n_classes).to(device) classifier_opt = torch.optim.Adam(classifier.parameters(), lr=lr) cur_step = 0 for epoch in range(n_epochs): for real, labels in tqdm(dataloader): cur_batch_size = len(real) real = real.to(device) labels = labels.to(device) ### Update classifier ### # Get noise corresponding to the current batch_size classifier_opt.zero_grad() labels_hat = classifier(real.detach()) classifier_loss = criterion(labels_hat, labels) classifier_loss.backward() classifier_opt.step() if cur_step % display_step == 0: classifier_val_loss = 0 classifier_correct = 0 num_validation = 0 for val_example, val_label in validation_dataloader: cur_batch_size = len(val_example) num_validation += cur_batch_size val_example = val_example.to(device) val_label = val_label.to(device) labels_hat = classifier(val_example) classifier_val_loss += criterion(labels_hat, val_label) * cur_batch_size classifier_correct += (labels_hat.argmax(1) == val_label).float().sum() print(f"Step {cur_step}: " f"Classifier loss: {classifier_val_loss.item() / num_validation}, " f"classifier accuracy: {classifier_correct.item() / num_validation}") cur_step += 1 ``` ## Tuning the Classifier After two courses, you've probably had some fun debugging your GANs and have started to consider yourself a bug master. For this assignment, your mastery will be put to the test on some interesting bugs... well, bugs as in insects. As a bug master, you want a classifier capable of classifying different species of bugs: bees, beetles, butterflies, caterpillar, and more. Luckily, you found a great dataset with a lot of animal species and objects, and you trained your classifier on that. But the bug classes don't do as well as you would like. Now your plan is to train a GAN on the same data so it can generate new bugs to make your classifier better at distinguishing between all of your favorite bugs! You will fine-tune your model by augmenting the original real data with fake data and during that process, observe how to increase the accuracy of your classifier with these fake, GAN-generated bugs. After this, you will prove your worth as a bug master. #### Sampling Ratio Suppose that you've decided that although you have this pre-trained general generator and this general classifier, capable of identifying 100 classes with some accuracy (~17%), what you'd really like is a model that can classify the five different kinds of bugs in the dataset. You'll fine-tune your model by augmenting your data with the generated images. Keep in mind that both the generator and the classifier were trained on the same images: the 40 images per class you painstakingly found so your generator may not be great. This is the caveat with data augmentation, ultimately you are still bound by the real data that you have but you want to try and create more. To make your models even better, you would need to take some more bug photos, label them, and add them to your training set and/or use higher quality photos. To start, you'll first need to write some code to sample a combination of real and generated images. Given a probability, `p_real`, you'll need to generate a combined tensor where roughly `p_real` of the returned images are sampled from the real images. Note that you should not interpolate the images here: you should choose each image from the real or fake set with a given probability. For example, if your real images are a tensor of `[[1, 2, 3, 4, 5]]` and your fake images are a tensor of `[[-1, -2, -3, -4, -5]]`, and `p_real = 0.2`, two potential return values are `[[1, -2, 3, -4, -5]]` or `[[-1, 2, -3, -4, -5]]` In addition, we will expect the images to remain in the same order to maintain their alignment with their labels (this applies to the fake images too!). <details> <summary> <font size="3" color="green"> <b>Optional hints for <code><font size="4">combine_sample</font></code></b> </font> </summary> 1. This code probably shouldn't be much longer than 3 lines 2. You can index using a set of booleans which have the same length as your tensor 3. You want to generate an unbiased sample, which you can do (for example) with `torch.rand(length_reals) > p`. 4. There are many approaches here that will give a correct answer here. You may find [`torch.rand`](https://pytorch.org/docs/stable/generated/torch.rand.html) or [`torch.bernoulli`](https://pytorch.org/docs/master/generated/torch.bernoulli.html) useful. 5. You don't want to edit an argument in place, so you may find [`cur_tensor.clone()`](https://pytorch.org/docs/stable/tensors.html) useful too, which makes a copy of `cur_tensor`. </details> ``` # UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: combine_sample def combine_sample(real, fake, p_real): ''' Function to take a set of real and fake images of the same length (x) and produce a combined tensor with length (x) and sampled at the target probability Parameters: real: a tensor of real images, length (x) fake: a tensor of fake images, length (x) p_real: the probability the images are sampled from the real set ''' #### START CODE HERE #### probs = torch.rand(real.shape[0]) > p_real target_images = real.clone() target_images[probs] = fake[probs] #### END CODE HERE #### return target_images n_test_samples = 9999 test_combination = combine_sample( torch.ones(n_test_samples, 1), torch.zeros(n_test_samples, 1), 0.3 ) # Check that the shape is right assert tuple(test_combination.shape) == (n_test_samples, 1) # Check that the ratio is right assert torch.abs(test_combination.mean() - 0.3) < 0.05 # Make sure that no mixing happened assert test_combination.median() < 1e-5 test_combination = combine_sample( torch.ones(n_test_samples, 10, 10), torch.zeros(n_test_samples, 10, 10), 0.8 ) # Check that the shape is right assert tuple(test_combination.shape) == (n_test_samples, 10, 10) # Make sure that no mixing happened assert torch.abs((test_combination.sum([1, 2]).median()) - 100) < 1e-5 test_reals = torch.arange(n_test_samples)[:, None].float() test_fakes = torch.zeros(n_test_samples, 1) test_saved = (test_reals.clone(), test_fakes.clone()) test_combination = combine_sample(test_reals, test_fakes, 0.3) # Make sure that the sample isn't biased assert torch.abs((test_combination.mean() - 1500)) < 100 # Make sure no inputs were changed assert torch.abs(test_saved[0] - test_reals).sum() < 1e-3 assert torch.abs(test_saved[1] - test_fakes).sum() < 1e-3 test_fakes = torch.arange(n_test_samples)[:, None].float() test_combination = combine_sample(test_reals, test_fakes, 0.3) # Make sure that the order is maintained assert torch.abs(test_combination - test_reals).sum() < 1e-4 if torch.cuda.is_available(): # Check that the solution matches the input device assert str(combine_sample( torch.ones(n_test_samples, 10, 10).cuda(), torch.zeros(n_test_samples, 10, 10).cuda(), 0.8 ).device).startswith("cuda") print("Success!") ``` Now you have a challenge: find a `p_real` and a generator image such that your classifier gets an average of a 51% accuracy or higher on the insects, when evaluated with the `eval_augmentation` function. **You'll need to fill in `find_optimal` to find these parameters to solve this part!** Note that if your answer takes a very long time to run, you may need to hard-code the solution it finds. When you're training a generator, you will often have to look at different checkpoints and choose one that does the best (either empirically or using some evaluation method). Here, you are given four generator checkpoints: `gen_1.pt`, `gen_2.pt`, `gen_3.pt`, `gen_4.pt`. You'll also have some scratch area to write whatever code you'd like to solve this problem, but you must return a `p_real` and an image name of your selected generator checkpoint. You can hard-code/brute-force these numbers if you would like, but you are encouraged to try to solve this problem in a more general way. In practice, you would also want a test set (since it is possible to overfit on a validation set), but for simplicity you can just focus on the validation set. ``` # UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: find_optimal def find_optimal(): # In the following section, you can write the code to choose your optimal answer # You can even use the eval_augmentation function in your code if you'd like! gen_names = [ "gen_1.pt", "gen_2.pt", "gen_3.pt", "gen_4.pt" ] #### START CODE HERE #### best_p_real, best_gen_name = 0.65, "gen_4.pt" # best_score = -1000000000 # for gen in tqdm(gen_names): # probable_ps = torch.linspace(0, 1, 21) # for ps in probable_ps: # curr_score = eval_augmentation(ps, gen) # if curr_score > best_score: # best_score = curr_score # best_p_real = ps # best_gen_name = gen #### END CODE HERE #### # print(best_p_real, best_gen_name) return best_p_real, best_gen_name def augmented_train(p_real, gen_name): gen = Generator(generator_input_dim).to(device) gen.load_state_dict(torch.load(gen_name)) classifier = Classifier(cifar100_shape[0], n_classes).to(device) classifier.load_state_dict(torch.load("class.pt")) criterion = nn.CrossEntropyLoss() batch_size = 256 train_set = torch.load("insect_train.pt") val_set = torch.load("insect_val.pt") dataloader = DataLoader( torch.utils.data.TensorDataset(train_set["images"], train_set["labels"]), batch_size=batch_size, shuffle=True ) validation_dataloader = DataLoader( torch.utils.data.TensorDataset(val_set["images"], val_set["labels"]), batch_size=batch_size ) display_step = 1 lr = 0.0002 n_epochs = 20 classifier_opt = torch.optim.Adam(classifier.parameters(), lr=lr) cur_step = 0 best_score = 0 for epoch in range(n_epochs): for real, labels in dataloader: real = real.to(device) # Flatten the image labels = labels.to(device) one_hot_labels = get_one_hot_labels(labels.to(device), n_classes).float() ### Update classifier ### # Get noise corresponding to the current batch_size classifier_opt.zero_grad() cur_batch_size = len(labels) fake_noise = get_noise(cur_batch_size, z_dim, device=device) noise_and_labels = combine_vectors(fake_noise, one_hot_labels) fake = gen(noise_and_labels) target_images = combine_sample(real.clone(), fake.clone(), p_real) labels_hat = classifier(target_images.detach()) classifier_loss = criterion(labels_hat, labels) classifier_loss.backward() classifier_opt.step() # Calculate the accuracy on the validation set if cur_step % display_step == 0 and cur_step > 0: classifier_val_loss = 0 classifier_correct = 0 num_validation = 0 with torch.no_grad(): for val_example, val_label in validation_dataloader: cur_batch_size = len(val_example) num_validation += cur_batch_size val_example = val_example.to(device) val_label = val_label.to(device) labels_hat = classifier(val_example) classifier_val_loss += criterion(labels_hat, val_label) * cur_batch_size classifier_correct += (labels_hat.argmax(1) == val_label).float().sum() accuracy = classifier_correct.item() / num_validation if accuracy > best_score: best_score = accuracy cur_step += 1 return best_score def eval_augmentation(p_real, gen_name, n_test=20): total = 0 for i in range(n_test): total += augmented_train(p_real, gen_name) return total / n_test best_p_real, best_gen_name = find_optimal() performance = eval_augmentation(best_p_real, best_gen_name) print(f"Your model had an accuracy of {performance:0.1%}") assert performance > 0.512 print("Success!") ``` You'll likely find that the worst performance is when the generator is performing alone: this corresponds to the case where you might be trying to hide the underlying examples from the classifier. Perhaps you don't want other people to know about your specific bugs! ``` accuracies = [] p_real_all = torch.linspace(0, 1, 21) for p_real_vis in tqdm(p_real_all): accuracies += [eval_augmentation(p_real_vis, best_gen_name, n_test=4)] plt.plot(p_real_all.tolist(), accuracies) plt.ylabel("Accuracy") _ = plt.xlabel("Percent Real Images") ``` Here's a visualization of what the generator is actually generating, with real examples of each class above the corresponding generated image. ``` examples = [4, 41, 80, 122, 160] train_images = torch.load("insect_train.pt")["images"][examples] train_labels = torch.load("insect_train.pt")["labels"][examples] one_hot_labels = get_one_hot_labels(train_labels.to(device), n_classes).float() fake_noise = get_noise(len(train_images), z_dim, device=device) noise_and_labels = combine_vectors(fake_noise, one_hot_labels) gen = Generator(generator_input_dim).to(device) gen.load_state_dict(torch.load(best_gen_name)) fake = gen(noise_and_labels) show_tensor_images(torch.cat([train_images.cpu(), fake.cpu()])) ```
github_jupyter
# Federated learning: aggregation operators In this notebook, we provide an explanation of the implementation of the different federated aggregation operators provided in the framework. Before discussing the different aggregation operators, we must establish the federated configuration (for more information see notebook [Federated learning basic concepts](./federated_learning_basic_concepts.ipynb)). ``` import matplotlib.pyplot as plt import shfl import tensorflow as tf import numpy as np database = shfl.data_base.Emnist() train_data, train_labels, test_data, test_labels = database.load_data() iid_distribution = shfl.data_distribution.IidDataDistribution(database) federated_data, test_data, test_labels = iid_distribution.get_federated_data(num_nodes=5, percent=10) def model_builder(): model = tf.keras.models.Sequential() model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', strides=1, input_shape=(28, 28, 1))) model.add(tf.keras.layers.MaxPooling2D(pool_size=2, strides=2, padding='valid')) model.add(tf.keras.layers.Dropout(0.4)) model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', strides=1)) model.add(tf.keras.layers.MaxPooling2D(pool_size=2, strides=2, padding='valid')) model.add(tf.keras.layers.Dropout(0.3)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation='relu')) model.add(tf.keras.layers.Dropout(0.1)) model.add(tf.keras.layers.Dense(64, activation='relu')) model.add(tf.keras.layers.Dense(10, activation='softmax')) criterion = tf.keras.losses.CategoricalCrossentropy() optimizer = tf.keras.optimizers.RMSprop() metrics = [tf.keras.metrics.categorical_accuracy] return shfl.model.DeepLearningModel(model=model, criterion=criterion, optimizer=optimizer, metrics=metrics) class Reshape(shfl.private.FederatedTransformation): def apply(self, labeled_data): labeled_data.data = np.reshape(labeled_data.data, (labeled_data.data.shape[0], labeled_data.data.shape[1], labeled_data.data.shape[2],1)) shfl.private.federated_operation.apply_federated_transformation(federated_data, Reshape()) class Normalize(shfl.private.FederatedTransformation): def __init__(self, mean, std): self.__mean = mean self.__std = std def apply(self, labeled_data): labeled_data.data = (labeled_data.data - self.__mean)/self.__std mean = np.mean(train_data.data) std = np.std(train_data.data) shfl.private.federated_operation.apply_federated_transformation(federated_data, Normalize(mean, std)) test_data = np.reshape(test_data, (test_data.shape[0], test_data.shape[1], test_data.shape[2],1)) ``` Once we have loaded and federated the data and established the learning model, the only step that remains is to establish the aggregation operator. At the moment, the framework has FedAvg and WeightedFedAvg implemented. The implementation of the federated aggregation operators are as follows. ## Federated averaging operator In this section, we detail the implementation of `FedAvg` (see [FedAvg](https://github.com/sherpaai/Sherpa.ai-Federated-Learning-Framework/blob/master/shfl/federated_aggregator/fedavg_aggregator.py)) proposed by Google in this [paper](https://arxiv.org/abs/1602.05629). It is based on the arithmetic mean of the local weights $W_i$ trained in each of the local clients $C_i$. That is, the weights $W$ of the global model after each round of training are $$W = \frac{1}{n_{\rm{C}}} \sum_{i=1}^{n_{\rm{C}}} W_i$$ For its implementation, we create a class that implements the [FederatedAggregator](https://github.com/sherpaai/Sherpa.ai-Federated-Learning-Framework/blob/master/shfl/federated_aggregator/federated_aggregator.py) interface. The method aggregate_weights is overwritten by calculating the mean of the local weights of each client. ``` import numpy as np from shfl.federated_aggregator.federated_aggregator import FederatedAggregator class FedAvgAggregator(FederatedAggregator): """ Implementation of Federated Averaging Aggregator. It only uses a simple average of the parameters of all the models """ def aggregate_weights(self, clients_params): clients_params_array = np.array(clients_params) num_clients = clients_params_array.shape[0] num_layers = clients_params_array.shape[1] aggregated_weights = np.array([np.mean(clients_params_array[:, layer], axis=0) for layer in range(num_layers)]) return aggregated_weights fedavg_aggregator = FedAvgAggregator() ``` ## Weighted federated averaging operator In this section, we detail the implementation of `WeightedFedAvg` (see [WeightedFedAvg](https://github.com/sherpaai/Sherpa.ai-Federated-Learning-Framework/blob/master/shfl/federated_aggregator/weighted_fedavg_aggregator.py)). It is the weighted version of `FedAvg`. The weight of each client $C_i$ is determined by the amount of client data $n_i$ with respect to total training data $n$. That is, the parameters $W$ of the global model after each round of training are: $$W = \sum_{i=1}^n \frac{n_i}{n} W_i$$ When all clients have the same amount of data, it is equivalent to FedAvg. To implement it, we create a class that implements the `FederatedAggregator` interface. The method `aggregate_weights` is overwritten by calculating the weighted mean of the local parameters of each client. For this purpose, we first weigh the local parameters by percentage and then sum the weighted parameters. ``` import numpy as np from shfl.federated_aggregator.federated_aggregator import FederatedAggregator class WeightedFedAvgAggregator(FederatedAggregator): """ Implementation of Weighted Federated Averaging Aggregator. The aggregation of the parameters is based in the number of data \ in every node. """ def aggregate_weights(self, clients_params): clients_params_array = np.array(clients_params) num_clients = clients_params_array.shape[0] num_layers = clients_params_array.shape[1] ponderated_weights = np.array([self._percentage[client] * clients_params_array[client, :] for client in range(num_clients)]) aggregated_weights = np.array([np.sum(ponderated_weights[:, layer], axis=0) for layer in range(num_layers)]) return aggregated_weights weighted_fedavg_aggregator = WeightedFedAvgAggregator() ``` Finally, we are ready to establish the federated government with any of the implemented aggregation operators and start the federated learning process. ``` federated_government = shfl.federated_government.FederatedGovernment(model_builder, federated_data, fedavg_aggregator) federated_government.run_rounds(1, test_data, test_labels) ``` ## Cluster federated averaging operator In this section, we detail the implementation of `ClusterFedAvg` (see [ClusterFedAvg](https://github.com/sherpaai/Sherpa.ai-Federated-Learning-Framework/blob/master/shfl/federated_aggregator/cluster_fedavg_aggregator.py)). Cluster Federated Averaging is based on the aggregation operator used for k-means clustering. When aggregating the centroids of a federated K-means clustering, we are faced with the problem of grouping the clusters for subsequent aggregation. Based on the hypothesis that the closest centroids will belong to the same cluster, we apply K-means over the centroids, in order to group the centroids that belong to the same cluster and to obtain the representation (aggregation) of each group. We choose the new centroids obtained as the aggregation. To implement it, we create a class that implements the `FederatedAggregator` interface. The method `aggregate_weights` is overwritten by applying K-means to the clients centroids. ``` from shfl.federated_aggregator.federated_aggregator import FederatedAggregator import numpy as np from sklearn.cluster import KMeans class ClusterFedAvgAggregator(FederatedAggregator): """ Implementation of Cluster Average Federated Aggregator. It adds another k-means to find the minimum distance of cluster centroids coming from each node. """ def aggregate_weights(self, clients_params): clients_params_array = np.concatenate((clients_params)) n_clusters = clients_params[0].shape[0] model_aggregator = KMeans(n_clusters=n_clusters, init='k-means++') model_aggregator.fit(clients_params_array) aggregated_weights = np.array(model_aggregator.cluster_centers_) return aggregated_weights ``` We create a federated government of clustering, in order to apply this aggregation operator. ``` c_database = shfl.data_base.Iris() c_train_data, c_train_labels, c_test_data, c_test_labels = c_database.load_data() c_iid_distribution = shfl.data_distribution.IidDataDistribution(c_database) c_federated_data, c_test_data, c_test_labels = c_iid_distribution.get_federated_data(num_nodes=3, percent=50) n_clusters = 3 # Set number of clusters n_features = train_data.shape[1] def clustering_model_builder(): model = shfl.model.KMeansModel(n_clusters=n_clusters, n_features = n_features) return model clustering_aggregator = ClusterFedAvgAggregator() clustering_federated_government = shfl.federated_government.FederatedGovernment(clustering_model_builder, c_federated_data, clustering_aggregator) clustering_federated_government.run_rounds(1, c_test_data, c_test_labels) ```
github_jupyter
# Automated Machine Learning #### Forecasting away from training data ## Contents 1. [Introduction](#Introduction) 2. [Setup](#Setup) 3. [Data](#Data) 4. [Prepare remote compute and data.](#prepare_remote) 4. [Create the configuration and train a forecaster](#train) 5. [Forecasting from the trained model](#forecasting) 6. [Forecasting away from training data](#forecasting_away) ## Introduction This notebook demonstrates the full interface of the `forecast()` function. The best known and most frequent usage of `forecast` enables forecasting on test sets that immediately follows training data. However, in many use cases it is necessary to continue using the model for some time before retraining it. This happens especially in **high frequency forecasting** when forecasts need to be made more frequently than the model can be retrained. Examples are in Internet of Things and predictive cloud resource scaling. Here we show how to use the `forecast()` function when a time gap exists between training data and prediction period. Terminology: * forecast origin: the last period when the target value is known * forecast periods(s): the period(s) for which the value of the target is desired. * lookback: how many past periods (before forecast origin) the model function depends on. The larger of number of lags and length of rolling window. * prediction context: `lookback` periods immediately preceding the forecast origin ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/automl-forecasting-function.png) ## Setup Please make sure you have followed the `configuration.ipynb` notebook so that your ML workspace information is saved in the config file. ``` import os import pandas as pd import numpy as np import logging import warnings import azureml.core from azureml.core.dataset import Dataset from pandas.tseries.frequencies import to_offset from azureml.core.compute import AmlCompute from azureml.core.compute import ComputeTarget from azureml.core.runconfig import RunConfiguration from azureml.core.conda_dependencies import CondaDependencies # Squash warning messages for cleaner output in the notebook warnings.showwarning = lambda *args, **kwargs: None np.set_printoptions(precision=4, suppress=True, linewidth=120) ``` This notebook is compatible with Azure ML SDK version 1.35.0 or later. ``` print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK") from azureml.core.workspace import Workspace from azureml.core.experiment import Experiment from azureml.train.automl import AutoMLConfig ws = Workspace.from_config() # choose a name for the run history container in the workspace experiment_name = "automl-forecast-function-demo" experiment = Experiment(ws, experiment_name) output = {} output["Subscription ID"] = ws.subscription_id output["Workspace"] = ws.name output["SKU"] = ws.sku output["Resource Group"] = ws.resource_group output["Location"] = ws.location output["Run History Name"] = experiment_name pd.set_option("display.max_colwidth", -1) outputDf = pd.DataFrame(data=output, index=[""]) outputDf.T ``` ## Data For the demonstration purposes we will generate the data artificially and use them for the forecasting. ``` TIME_COLUMN_NAME = "date" TIME_SERIES_ID_COLUMN_NAME = "time_series_id" TARGET_COLUMN_NAME = "y" def get_timeseries( train_len: int, test_len: int, time_column_name: str, target_column_name: str, time_series_id_column_name: str, time_series_number: int = 1, freq: str = "H", ): """ Return the time series of designed length. :param train_len: The length of training data (one series). :type train_len: int :param test_len: The length of testing data (one series). :type test_len: int :param time_column_name: The desired name of a time column. :type time_column_name: str :param time_series_number: The number of time series in the data set. :type time_series_number: int :param freq: The frequency string representing pandas offset. see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html :type freq: str :returns: the tuple of train and test data sets. :rtype: tuple """ data_train = [] # type: List[pd.DataFrame] data_test = [] # type: List[pd.DataFrame] data_length = train_len + test_len for i in range(time_series_number): X = pd.DataFrame( { time_column_name: pd.date_range( start="2000-01-01", periods=data_length, freq=freq ), target_column_name: np.arange(data_length).astype(float) + np.random.rand(data_length) + i * 5, "ext_predictor": np.asarray(range(42, 42 + data_length)), time_series_id_column_name: np.repeat("ts{}".format(i), data_length), } ) data_train.append(X[:train_len]) data_test.append(X[train_len:]) X_train = pd.concat(data_train) y_train = X_train.pop(target_column_name).values X_test = pd.concat(data_test) y_test = X_test.pop(target_column_name).values return X_train, y_train, X_test, y_test n_test_periods = 6 n_train_periods = 30 X_train, y_train, X_test, y_test = get_timeseries( train_len=n_train_periods, test_len=n_test_periods, time_column_name=TIME_COLUMN_NAME, target_column_name=TARGET_COLUMN_NAME, time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME, time_series_number=2, ) ``` Let's see what the training data looks like. ``` X_train.tail() # plot the example time series import matplotlib.pyplot as plt whole_data = X_train.copy() target_label = "y" whole_data[target_label] = y_train for g in whole_data.groupby("time_series_id"): plt.plot(g[1]["date"].values, g[1]["y"].values, label=g[0]) plt.legend() plt.show() ``` ### Prepare remote compute and data. <a id="prepare_remote"></a> The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace), is paired with the storage account, which contains the default data store. We will use it to upload the artificial data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation. ``` # We need to save thw artificial data and then upload them to default workspace datastore. DATA_PATH = "fc_fn_data" DATA_PATH_X = "{}/data_train.csv".format(DATA_PATH) if not os.path.isdir("data"): os.mkdir("data") pd.DataFrame(whole_data).to_csv("data/data_train.csv", index=False) # Upload saved data to the default data store. ds = ws.get_default_datastore() ds.upload(src_dir="./data", target_path=DATA_PATH, overwrite=True, show_progress=True) train_data = Dataset.Tabular.from_delimited_files(path=ds.path(DATA_PATH_X)) ``` You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource. > Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist. ``` from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException # Choose a name for your CPU cluster amlcompute_cluster_name = "fcfn-cluster" # Verify that cluster does not exist already try: compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name) print("Found existing cluster, use it.") except ComputeTargetException: compute_config = AmlCompute.provisioning_configuration( vm_size="STANDARD_DS12_V2", max_nodes=6 ) compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config) compute_target.wait_for_completion(show_output=True) ``` ## Create the configuration and train a forecaster <a id="train"></a> First generate the configuration, in which we: * Set metadata columns: target, time column and time-series id column names. * Validate our data using cross validation with rolling window method. * Set normalized root mean squared error as a metric to select the best model. * Set early termination to True, so the iterations through the models will stop when no improvements in accuracy score will be made. * Set limitations on the length of experiment run to 15 minutes. * Finally, we set the task to be forecasting. * We apply the lag lead operator to the target value i.e. we use the previous values as a predictor for the future ones. * [Optional] Forecast frequency parameter (freq) represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information. ``` from azureml.automl.core.forecasting_parameters import ForecastingParameters lags = [1, 2, 3] forecast_horizon = n_test_periods forecasting_parameters = ForecastingParameters( time_column_name=TIME_COLUMN_NAME, forecast_horizon=forecast_horizon, time_series_id_column_names=[TIME_SERIES_ID_COLUMN_NAME], target_lags=lags, freq="H", # Set the forecast frequency to be hourly ) ``` Run the model selection and training process. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous. ``` from azureml.core.workspace import Workspace from azureml.core.experiment import Experiment from azureml.train.automl import AutoMLConfig automl_config = AutoMLConfig( task="forecasting", debug_log="automl_forecasting_function.log", primary_metric="normalized_root_mean_squared_error", experiment_timeout_hours=0.25, enable_early_stopping=True, training_data=train_data, compute_target=compute_target, n_cross_validations=3, verbosity=logging.INFO, max_concurrent_iterations=4, max_cores_per_iteration=-1, label_column_name=target_label, forecasting_parameters=forecasting_parameters, ) remote_run = experiment.submit(automl_config, show_output=False) remote_run.wait_for_completion() # Retrieve the best model to use it further. _, fitted_model = remote_run.get_output() ``` ## Forecasting from the trained model <a id="forecasting"></a> In this section we will review the `forecast` interface for two main scenarios: forecasting right after the training data, and the more complex interface for forecasting when there is a gap (in the time sense) between training and testing data. ### X_train is directly followed by the X_test Let's first consider the case when the prediction period immediately follows the training data. This is typical in scenarios where we have the time to retrain the model every time we wish to forecast. Forecasts that are made on daily and slower cadence typically fall into this category. Retraining the model every time benefits the accuracy because the most recent data is often the most informative. ![Forecasting after training](forecast_function_at_train.png) We use `X_test` as a **forecast request** to generate the predictions. #### Typical path: X_test is known, forecast all upcoming periods ``` # The data set contains hourly data, the training set ends at 01/02/2000 at 05:00 # These are predictions we are asking the model to make (does not contain thet target column y), # for 6 periods beginning with 2000-01-02 06:00, which immediately follows the training data X_test y_pred_no_gap, xy_nogap = fitted_model.forecast(X_test) # xy_nogap contains the predictions in the _automl_target_col column. # Those same numbers are output in y_pred_no_gap xy_nogap ``` #### Confidence intervals Forecasting model may be used for the prediction of forecasting intervals by running ```forecast_quantiles()```. This method accepts the same parameters as forecast(). ``` quantiles = fitted_model.forecast_quantiles(X_test) quantiles ``` #### Distribution forecasts Often the figure of interest is not just the point prediction, but the prediction at some quantile of the distribution. This arises when the forecast is used to control some kind of inventory, for example of grocery items or virtual machines for a cloud service. In such case, the control point is usually something like "we want the item to be in stock and not run out 99% of the time". This is called a "service level". Here is how you get quantile forecasts. ``` # specify which quantiles you would like fitted_model.quantiles = [0.01, 0.5, 0.95] # use forecast_quantiles function, not the forecast() one y_pred_quantiles = fitted_model.forecast_quantiles(X_test) # quantile forecasts returned in a Dataframe along with the time and time series id columns y_pred_quantiles ``` #### Destination-date forecast: "just do something" In some scenarios, the X_test is not known. The forecast is likely to be weak, because it is missing contemporaneous predictors, which we will need to impute. If you still wish to predict forward under the assumption that the last known values will be carried forward, you can forecast out to "destination date". The destination date still needs to fit within the forecast horizon from training. ``` # We will take the destination date as a last date in the test set. dest = max(X_test[TIME_COLUMN_NAME]) y_pred_dest, xy_dest = fitted_model.forecast(forecast_destination=dest) # This form also shows how we imputed the predictors which were not given. (Not so well! Use with caution!) xy_dest ``` ## Forecasting away from training data <a id="forecasting_away"></a> Suppose we trained a model, some time passed, and now we want to apply the model without re-training. If the model "looks back" -- uses previous values of the target -- then we somehow need to provide those values to the model. ![Forecasting after training](forecast_function_away_from_train.png) The notion of forecast origin comes into play: the forecast origin is **the last period for which we have seen the target value**. This applies per time-series, so each time-series can have a different forecast origin. The part of data before the forecast origin is the **prediction context**. To provide the context values the model needs when it looks back, we pass definite values in `y_test` (aligned with corresponding times in `X_test`). ``` # generate the same kind of test data we trained on, # but now make the train set much longer, so that the test set will be in the future X_context, y_context, X_away, y_away = get_timeseries( train_len=42, # train data was 30 steps long test_len=4, time_column_name=TIME_COLUMN_NAME, target_column_name=TARGET_COLUMN_NAME, time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME, time_series_number=2, ) # end of the data we trained on print(X_train.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max()) # start of the data we want to predict on print(X_away.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].min()) ``` There is a gap of 12 hours between end of training and beginning of `X_away`. (It looks like 13 because all timestamps point to the start of the one hour periods.) Using only `X_away` will fail without adding context data for the model to consume. ``` try: y_pred_away, xy_away = fitted_model.forecast(X_away) xy_away except Exception as e: print(e) ``` How should we read that eror message? The forecast origin is at the last time the model saw an actual value of `y` (the target). That was at the end of the training data! The model is attempting to forecast from the end of training data. But the requested forecast periods are past the forecast horizon. We need to provide a define `y` value to establish the forecast origin. We will use this helper function to take the required amount of context from the data preceding the testing data. It's definition is intentionally simplified to keep the idea in the clear. ``` def make_forecasting_query( fulldata, time_column_name, target_column_name, forecast_origin, horizon, lookback ): """ This function will take the full dataset, and create the query to predict all values of the time series from the `forecast_origin` forward for the next `horizon` horizons. Context from previous `lookback` periods will be included. fulldata: pandas.DataFrame a time series dataset. Needs to contain X and y. time_column_name: string which column (must be in fulldata) is the time axis target_column_name: string which column (must be in fulldata) is to be forecast forecast_origin: datetime type the last time we (pretend to) have target values horizon: timedelta how far forward, in time units (not periods) lookback: timedelta how far back does the model look Example: ``` forecast_origin = pd.to_datetime("2012-09-01") + pd.DateOffset(days=5) # forecast 5 days after end of training print(forecast_origin) X_query, y_query = make_forecasting_query(data, forecast_origin = forecast_origin, horizon = pd.DateOffset(days=7), # 7 days into the future lookback = pd.DateOffset(days=1), # model has lag 1 period (day) ) ``` """ X_past = fulldata[ (fulldata[time_column_name] > forecast_origin - lookback) & (fulldata[time_column_name] <= forecast_origin) ] X_future = fulldata[ (fulldata[time_column_name] > forecast_origin) & (fulldata[time_column_name] <= forecast_origin + horizon) ] y_past = X_past.pop(target_column_name).values.astype(np.float) y_future = X_future.pop(target_column_name).values.astype(np.float) # Now take y_future and turn it into question marks y_query = y_future.copy().astype( np.float ) # because sometimes life hands you an int y_query.fill(np.NaN) print("X_past is " + str(X_past.shape) + " - shaped") print("X_future is " + str(X_future.shape) + " - shaped") print("y_past is " + str(y_past.shape) + " - shaped") print("y_query is " + str(y_query.shape) + " - shaped") X_pred = pd.concat([X_past, X_future]) y_pred = np.concatenate([y_past, y_query]) return X_pred, y_pred ``` Let's see where the context data ends - it ends, by construction, just before the testing data starts. ``` print( X_context.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg( ["min", "max", "count"] ) ) print( X_away.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].agg( ["min", "max", "count"] ) ) X_context.tail(5) # Since the length of the lookback is 3, # we need to add 3 periods from the context to the request # so that the model has the data it needs # Put the X and y back together for a while. # They like each other and it makes them happy. X_context[TARGET_COLUMN_NAME] = y_context X_away[TARGET_COLUMN_NAME] = y_away fulldata = pd.concat([X_context, X_away]) # forecast origin is the last point of data, which is one 1-hr period before test forecast_origin = X_away[TIME_COLUMN_NAME].min() - pd.DateOffset(hours=1) # it is indeed the last point of the context assert forecast_origin == X_context[TIME_COLUMN_NAME].max() print("Forecast origin: " + str(forecast_origin)) # the model uses lags and rolling windows to look back in time n_lookback_periods = max(lags) lookback = pd.DateOffset(hours=n_lookback_periods) horizon = pd.DateOffset(hours=forecast_horizon) # now make the forecast query from context (refer to figure) X_pred, y_pred = make_forecasting_query( fulldata, TIME_COLUMN_NAME, TARGET_COLUMN_NAME, forecast_origin, horizon, lookback ) # show the forecast request aligned X_show = X_pred.copy() X_show[TARGET_COLUMN_NAME] = y_pred X_show ``` Note that the forecast origin is at 17:00 for both time-series, and periods from 18:00 are to be forecast. ``` # Now everything works y_pred_away, xy_away = fitted_model.forecast(X_pred, y_pred) # show the forecast aligned X_show = xy_away.reset_index() # without the generated features X_show[["date", "time_series_id", "ext_predictor", "_automl_target_col"]] # prediction is in _automl_target_col ``` ## Forecasting farther than the forecast horizon <a id="recursive forecasting"></a> When the forecast destination, or the latest date in the prediction data frame, is farther into the future than the specified forecast horizon, the `forecast()` function will still make point predictions out to the later date using a recursive operation mode. Internally, the method recursively applies the regular forecaster to generate context so that we can forecast further into the future. To illustrate the use-case and operation of recursive forecasting, we'll consider an example with a single time-series where the forecasting period directly follows the training period and is twice as long as the forecasting horizon given at training time. ![Recursive_forecast_overview](recursive_forecast_overview_small.png) Internally, we apply the forecaster in an iterative manner and finish the forecast task in two interations. In the first iteration, we apply the forecaster and get the prediction for the first forecast-horizon periods (y_pred1). In the second iteraction, y_pred1 is used as the context to produce the prediction for the next forecast-horizon periods (y_pred2). The combination of (y_pred1 and y_pred2) gives the results for the total forecast periods. A caveat: forecast accuracy will likely be worse the farther we predict into the future since errors are compounded with recursive application of the forecaster. ![Recursive_forecast_iter1](recursive_forecast_iter1.png) ![Recursive_forecast_iter2](recursive_forecast_iter2.png) ``` # generate the same kind of test data we trained on, but with a single time-series and test period twice as long # as the forecast_horizon. _, _, X_test_long, y_test_long = get_timeseries( train_len=n_train_periods, test_len=forecast_horizon * 2, time_column_name=TIME_COLUMN_NAME, target_column_name=TARGET_COLUMN_NAME, time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAME, time_series_number=1, ) print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].min()) print(X_test_long.groupby(TIME_SERIES_ID_COLUMN_NAME)[TIME_COLUMN_NAME].max()) # forecast() function will invoke the recursive forecast method internally. y_pred_long, X_trans_long = fitted_model.forecast(X_test_long) y_pred_long # What forecast() function does in this case is equivalent to iterating it twice over the test set as the following. y_pred1, _ = fitted_model.forecast(X_test_long[:forecast_horizon]) y_pred_all, _ = fitted_model.forecast( X_test_long, np.concatenate((y_pred1, np.full(forecast_horizon, np.nan))) ) np.array_equal(y_pred_all, y_pred_long) ``` #### Confidence interval and distributional forecasts AutoML cannot currently estimate forecast errors beyond the forecast horizon set during training, so the `forecast_quantiles()` function will return missing values for quantiles not equal to 0.5 beyond the forecast horizon. ``` fitted_model.forecast_quantiles(X_test_long) ``` Similarly with the simple senarios illustrated above, forecasting farther than the forecast horizon in other senarios like 'multiple time-series', 'Destination-date forecast', and 'forecast away from the training data' are also automatically handled by the `forecast()` function.
github_jupyter
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Goal" data-toc-modified-id="Goal-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Goal</a></span><ul class="toc-item"><li><span><a href="#Parsed-nt-database" data-toc-modified-id="Parsed-nt-database-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Parsed nt database</a></span><ul class="toc-item"><li><span><a href="#Parsing-nt" data-toc-modified-id="Parsing-nt-1.1.1"><span class="toc-item-num">1.1.1&nbsp;&nbsp;</span>Parsing nt</a></span></li><li><span><a href="#Filter-out-very-low-complexity-seqs" data-toc-modified-id="Filter-out-very-low-complexity-seqs-1.1.2"><span class="toc-item-num">1.1.2&nbsp;&nbsp;</span>Filter out very low complexity seqs</a></span></li><li><span><a href="#Creating-the-kraken2-database" data-toc-modified-id="Creating-the-kraken2-database-1.1.3"><span class="toc-item-num">1.1.3&nbsp;&nbsp;</span>Creating the kraken2 database</a></span><ul class="toc-item"><li><span><a href="#taxonomy" data-toc-modified-id="taxonomy-1.1.3.1"><span class="toc-item-num">1.1.3.1&nbsp;&nbsp;</span>taxonomy</a></span></li><li><span><a href="#adding-to-library-(with-masking)" data-toc-modified-id="adding-to-library-(with-masking)-1.1.3.2"><span class="toc-item-num">1.1.3.2&nbsp;&nbsp;</span>adding to library (with masking)</a></span></li><li><span><a href="#db-build" data-toc-modified-id="db-build-1.1.3.3"><span class="toc-item-num">1.1.3.3&nbsp;&nbsp;</span>db build</a></span></li></ul></li></ul></li><li><span><a href="#Database-of-host-genomes" data-toc-modified-id="Database-of-host-genomes-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Database of host genomes</a></span><ul class="toc-item"><li><span><a href="#Non-mammals:-Getting-genus-level-reps" data-toc-modified-id="Non-mammals:-Getting-genus-level-reps-1.2.1"><span class="toc-item-num">1.2.1&nbsp;&nbsp;</span>Non-mammals: Getting genus-level reps</a></span></li><li><span><a href="#Non-mammals:-Getting-genus-level-reps" data-toc-modified-id="Non-mammals:-Getting-genus-level-reps-1.2.2"><span class="toc-item-num">1.2.2&nbsp;&nbsp;</span>Non-mammals: Getting genus-level reps</a></span></li><li><span><a href="#Downloading-genomes" data-toc-modified-id="Downloading-genomes-1.2.3"><span class="toc-item-num">1.2.3&nbsp;&nbsp;</span>Downloading genomes</a></span></li><li><span><a href="#Renaming-sequences" data-toc-modified-id="Renaming-sequences-1.2.4"><span class="toc-item-num">1.2.4&nbsp;&nbsp;</span>Renaming sequences</a></span></li><li><span><a href="#Filtering-out-short-contigs" data-toc-modified-id="Filtering-out-short-contigs-1.2.5"><span class="toc-item-num">1.2.5&nbsp;&nbsp;</span>Filtering out short contigs</a></span></li><li><span><a href="#Database-init" data-toc-modified-id="Database-init-1.2.6"><span class="toc-item-num">1.2.6&nbsp;&nbsp;</span>Database init</a></span><ul class="toc-item"><li><span><a href="#adding-to-library-(with-masking)" data-toc-modified-id="adding-to-library-(with-masking)-1.2.6.1"><span class="toc-item-num">1.2.6.1&nbsp;&nbsp;</span>adding to library (with masking)</a></span></li><li><span><a href="#db-build" data-toc-modified-id="db-build-1.2.6.2"><span class="toc-item-num">1.2.6.2&nbsp;&nbsp;</span>db build</a></span></li><li><span><a href="#move-dbs" data-toc-modified-id="move-dbs-1.2.6.3"><span class="toc-item-num">1.2.6.3&nbsp;&nbsp;</span>move dbs</a></span></li></ul></li></ul></li><li><span><a href="#Database-of-all-vertebrata" data-toc-modified-id="Database-of-all-vertebrata-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Database of all vertebrata</a></span></li><li><span><a href="#Parsed-nt-database" data-toc-modified-id="Parsed-nt-database-1.4"><span class="toc-item-num">1.4&nbsp;&nbsp;</span>Parsed nt database</a></span><ul class="toc-item"><li><span><a href="#Parsing-nt" data-toc-modified-id="Parsing-nt-1.4.1"><span class="toc-item-num">1.4.1&nbsp;&nbsp;</span>Parsing nt</a></span></li><li><span><a href="#Filter-out-very-low-complexity-seqs" data-toc-modified-id="Filter-out-very-low-complexity-seqs-1.4.2"><span class="toc-item-num">1.4.2&nbsp;&nbsp;</span>Filter out very low complexity seqs</a></span></li><li><span><a href="#Creating-the-kraken2-database" data-toc-modified-id="Creating-the-kraken2-database-1.4.3"><span class="toc-item-num">1.4.3&nbsp;&nbsp;</span>Creating the kraken2 database</a></span><ul class="toc-item"><li><span><a href="#taxonomy" data-toc-modified-id="taxonomy-1.4.3.1"><span class="toc-item-num">1.4.3.1&nbsp;&nbsp;</span>taxonomy</a></span></li><li><span><a href="#adding-to-library-(with-masking)" data-toc-modified-id="adding-to-library-(with-masking)-1.4.3.2"><span class="toc-item-num">1.4.3.2&nbsp;&nbsp;</span>adding to library (with masking)</a></span></li><li><span><a href="#db-build" data-toc-modified-id="db-build-1.4.3.3"><span class="toc-item-num">1.4.3.3&nbsp;&nbsp;</span>db build</a></span></li></ul></li></ul></li></ul></li><li><span><a href="#Notes" data-toc-modified-id="Notes-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Notes</a></span></li><li><span><a href="#sessionInfo" data-toc-modified-id="sessionInfo-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>sessionInfo</a></span></li></ul></div> # Goal * Creating custom kraken2 databases for hosts in the dataset: * NCBI-nt sequence with a host taxonomic classification * all existing genomes (or similar relatives) * Using a high `--confidence` score and filtering out all reads with a classification ## Parsed nt database Database of host sequences parsed from nt ### Parsing nt ```{bash} # (py3_genome) @ rick:/ebio/abt3_projects/databases_no-backup/humann2/Georg_animal # get all taxIDs for animal hosts (all taxonomic levels) cut -f 7 /ebio/abt3_projects/Georg_animal_feces/data/mapping/unified_metadata_complete_190529.tsv | \ tail -n +2 | sort -u > unified_metadata_complete_190529_taxIDs.txt # Added 1246547 taxID for Proechimys_semispinosus (previously NA) ## number of taxIDs: 212 # getting child nodes taxonkit list --indent "" --ids 10034,10044,100819,100823,100826,10090,101278,10129,101364,101705,10185,102178,103695,103942,111810,111811,111821,117199,1233216,127946,13125,1320375,137523,155067,156569,171269,176039,181096,181117,184237,184711,184712,196523,201439,210855,211598,218793,218836,225396,230659,27610,27675,283035,283924,29065,29067,29077,302418,30338,30390,30422,30520,30532,323367,34869,34880,34924,36229,36239,36245,36305,37601,37610,38527,38626,40830,41261,419612,447135,45802,465717,46689,47308,47729,48139,48150,48155,48156,48668,48745,51300,51331,52619,52631,53277,54133,54292,54496,54556,55149,56213,57570,57571,57865,58317,58319,59474,59529,596110,60710,61852,62902,64668,65484,662561,670354,68724,69300,69811,71011,722674,72542,73334,75140,75839,77149,7823,7962,8010,8022,8032,8038,80427,8168,8284,8324,8384,8407,8501,867383,8790,88116,8839,8843,8847,8869,8895,8919,89252,8928,8964,8969,9031,9054,9103,9157,9187,9209,9231,9261,9268,9272,9315,9316,9317,9337,9351,9355,9358,9365,9394,9463,94993,9586,9595,9606,9611,9612,9615,9627,9644,9654,9657,9659,9662,9668,9685,9689,9691,9711,9720,9742,9768,9785,9794,9796,9798,9807,9809,9823,9825,9833,9838,9858,9860,9861,9874,98928,9894,9897,9913,9925,9927,9938,9940,9970,99804,9983,9986,9993,9996,1246547 | \ sort -u | perl -ne 'print if ! /^ *$/' > unified_metadata_complete_190529_taxIDs-all.txt ## number of taxIDs: 740 # parsing nt seqs based on all host taxIDs (and child taxIDs) blastdbcmd -db nt -entry all -outfmt "%T %a %s" | egrep -f <(perl -pe 's/(.+)/^$1 /' unified_metadata_complete_190529_taxIDs-all.txt) | perl -pe 's/(.+) (.+) (.+)/>$2|kraken:taxid|$1\n$3/' > unified_metadata_complete_190529_nt-parsed.fna ## file size: 24G ## number of seqs: 4163523 ``` ### Filter out very low complexity seqs ``` # (rna-tools) @ rick:/ebio/abt3_projects/databases_no-backup/humann2/Georg_animal bbduk.sh -Xmx100g threads=24 entropy=0.1 entropywindow=50 entropyk=5 overwrite=t in=unified_metadata_complete_190529_nt-parsed.fna out=/tmp/global/nyoungblut/vert_genomes/metadata-190529_nt-parse_entropyGt0p1.fna ## file size: 24G ## number of seqs: 4163243 ``` ### Creating the kraken2 database #### taxonomy ``` mkdir -p /tmp/global/nyoungblut/vert_genomes/vert_host_nt/ /tmp/global/nyoungblut/vert_genomes/kraken2/scripts/kraken2-build --db /tmp/global/nyoungblut/vert_genomes/vert_host_nt/ --download-taxonomy --use-ftp ``` > NOTE: I had to use updated master branch to have `--use-ftp` option #### adding to library (with masking) ``` (Bracken2) @ rick:/tmp/global/nyoungblut/vert_genome screen -L -S Krak2B kraken2-build --threads 24 \ --db /tmp/global/nyoungblut/vert_genomes/vert_host_nt/ \ --add-to-library /tmp/global/nyoungblut/vert_genomes/metadata-190529_nt-parse_entropyGt0p1.fna ``` #### db build ``` # (Bracken2) @ rick:/tmp/global/nyoungblut/vert_genomes/vert_host_nt/ screen -L -S Krak2B kraken2-build --build --threads 24 --db /tmp/global/nyoungblut/vert_genomes/vert_host_nt/ ``` ## Database of host genomes Using all genomes from genus-level representatives ### Non-mammals: Getting genus-level reps ``` taxIDs in tmp.txt file: 10034,10044,100819,100823,100826,10090,101278,10129,101364,101705,10185,102178,103695,103942,111810,111811,111821,117199,1233216,127946,13125,1320375,137523,155067,156569,171269,176039,181096,181117,184237,184711,184712,196523,201439,210855,211598,218793,218836,225396,230659,27610,27675,283035,283924,29065,29067,29077,302418,30338,30390,30422,30520,30532,323367,34869,34880,34924,36229,36239,36245,36305,37601,37610,38527,38626,40830,41261,419612,447135,45802,465717,46689,47308,47729,48139,48150,48155,48156,48668,48745,51300,51331,52619,52631,53277,54133,54292,54496,54556,55149,56213,57570,57571,57865,58317,58319,59474,59529,596110,60710,61852,62902,64668,65484,662561,670354,68724,69300,69811,71011,722674,72542,73334,75140,75839,77149,7823,7962,8010,8022,8032,8038,80427,8168,8284,8324,8384,8407,8501,867383,8790,88116,8839,8843,8847,8869,8895,8919,89252,8928,8964,8969,9031,9054,9103,9157,9187,9209,9231,9261,9268,9272,9315,9316,9317,9337,9351,9355,9358,9365,9394,9463,94993,9586,9595,9606,9611,9612,9615,9627,9644,9654,9657,9659,9662,9668,9685,9689,9691,9711,9720,9742,9768,9785,9794,9796,9798,9807,9809,9823,9825,9833,9838,9858,9860,9861,9874,98928,9894,9897,9913,9925,9927,9938,9940,9970,99804,9983,9986,9993,9996,1246547 (py3_genome) @ rick:/tmp/global/nyoungblut/vert_genomes taxonkit lineage -t tmp.txt | taxonkit reformat -t -f "{g};{s};{S}" | cut -f 5 | perl -pe 's/;/\n/g' | perl -ne 'print if /^.+$/' | sort -u | perl -pe 's/\n/,/g' ncbi-genome-download -o vert_other -H -r 3 -F fasta -s genbank -t 10001,10033,10034,10043,10044,100819,100823,100826,10088,10090,101278,10128,10129,101364,10162,101675,101705,10184,10185,101898,102177,102178,103695,103942,103951,108820,111810,111811,111821,117199,1233215,1233216,1246547,127946,13124,13125,1320375,137523,155067,156568,156569,1574408,161681,171269,175132,176038,176039,181096,181117,184237,184711,184712,1897773,1960649,196523,201437,201439,210855,211598,218792,218793,218835,218836,225396,230659,233201,27609,27610,27671,27675,283033,283035,283924,29065,29066,29067,29077,302418,30338,30389,30390,30409,30420,30422,30520,30532,30610,323367,325165,339868,34868,34869,34879,34880,34923,34924,36229,36239,36244,36245,36257,36283,36292,36304,36305,36801,37601,37610,38526,38527,38625,38626,39193,392815,39620,40829,40830,41260,41261,419612,42163,43162,44233,447134,447135,45802,465717,46689,47307,47308,47727,47729,48138,48139,48150,48155,48156,48284,48667,48668,48745,50391,51299,51300,51331,51357,52201,52618,52619,52631,53276,53277,54132,54133,54136,54292,54360,54491,54496,54555,54556,55149,56213,56266,56298,57569,57570,57571,57865,58316,58317,58318,58319,59474,59529,60710,61852,62902,64668,651673,65484,662560,662561,670354,68724,69300,69509,69811,71011,720582,722674,72542,73334,75140,75839,77141,77149,7822,7823,7956,7961,7962,8009,8010,8016,8022,8028,8032,8033,8038,80427,8166,8168,8283,8284,8324,8383,8384,8399,8407,8500,8501,8523,8556,8583,867383,86974,86976,8703,8789,8790,88115,88116,8835,8839,8842,8843,8847,8852,8867,8869,8894,8895,8918,8919,89252,8927,8928,8952,8956,8963,8964,8968,8969,9030,9031,9053,9054,9102,9103,9154,9157,9181,9186,9187,9207,9209,9260,9261,9266,9268,9271,9272,9312,9315,9316,9317,9336,9337,9350,9351,9353,9355,9357,9358,9364,9365,9379,9394,9463,9469,94992,94993,9554,9586,9592,9593,9595,9605,9606,9611,9612,9615,9625,9627,9639,9644,9653,9654,9656,9657,9658,9659,9661,9662,9665,9668,9682,9685,9688,9689,9691,9710,9711,9717,9720,9741,9742,9757,9766,9768,9784,9785,9789,9790,9794,9796,9798,9806,9807,9808,9809,9822,9823,9825,9832,9833,9836,9838,9857,9858,9859,9860,9861,9867,9871,9874,98927,98928,9893,9894,9896,9897,9903,9913,9922,9925,9926,9927,9933,9935,9938,9940,9969,9970,9973,9980,99804,9983,9984,9986,9992,9993,9996 --dry-run vertebrate_other | sort # Selected one genome per species ## If multiple genomes for a species, prioritized refseq # Created a table of accession --> taxID: vert_host_other_asmbl-taxID.tsv ``` ### Non-mammals: Getting genus-level reps ``` taxIDs in tmp.txt file: 10034,10044,100819,100823,100826,10090,101278,10129,101364,101705,10185,102178,103695,103942,111810,111811,111821,117199,1233216,127946,13125,1320375,137523,155067,156569,171269,176039,181096,181117,184237,184711,184712,196523,201439,210855,211598,218793,218836,225396,230659,27610,27675,283035,283924,29065,29067,29077,302418,30338,30390,30422,30520,30532,323367,34869,34880,34924,36229,36239,36245,36305,37601,37610,38527,38626,40830,41261,419612,447135,45802,465717,46689,47308,47729,48139,48150,48155,48156,48668,48745,51300,51331,52619,52631,53277,54133,54292,54496,54556,55149,56213,57570,57571,57865,58317,58319,59474,59529,596110,60710,61852,62902,64668,65484,662561,670354,68724,69300,69811,71011,722674,72542,73334,75140,75839,77149,7823,7962,8010,8022,8032,8038,80427,8168,8284,8324,8384,8407,8501,867383,8790,88116,8839,8843,8847,8869,8895,8919,89252,8928,8964,8969,9031,9054,9103,9157,9187,9209,9231,9261,9268,9272,9315,9316,9317,9337,9351,9355,9358,9365,9394,9463,94993,9586,9595,9606,9611,9612,9615,9627,9644,9654,9657,9659,9662,9668,9685,9689,9691,9711,9720,9742,9768,9785,9794,9796,9798,9807,9809,9823,9825,9833,9838,9858,9860,9861,9874,98928,9894,9897,9913,9925,9927,9938,9940,9970,99804,9983,9986,9993,9996,1246547 taxonkit lineage -t tmp.txt | taxonkit reformat -t -f "{g};{s};{S}" | cut -f 5 | perl -pe 's/;/\n/g' | perl -ne 'print if /^.+$/' | sort -u | perl -pe 's/\n/,/g' ncbi-genome-download -o vert_mam -H -r 3 -F fasta -s genbank -t 10001,10043,10044,100819,100823,100826,10088,10090,101278,10128,10129,101364,10162,101675,101705,10184,10185,101898,102177,102178,103695,103942,103951,108820,111810,111811,111821,117199,1233215,1233216,1246547,127946,13124,13125,1320375,137523,155067,156568,156569,1574408,161681,171269,175132,176038,176039,181096,181117,184237,184711,184712,1897773,1960649,196523,201437,201439,210855,211598,218792,218793,218835,218836,225396,230659,233201,27609,27610,27671,27675,283033,283035,283924,29065,29066,29067,29077,302418,30338,30389,30390,30409,30420,30422,30520,30532,30610,323367,325165,339868,34868,34869,34879,34880,34923,34924,36229,36239,36244,36245,36257,36283,36292,36304,36305,36801,37601,37610,38526,38527,38625,38626,39193,392815,39620,40829,40830,41260,41261,419612,42163,43162,44233,447134,447135,45802,465717,46689,47307,47308,47727,47729,48138,48139,48150,48155,48156,48284,48667,48668,48745,50391,51299,51300,51331,51357,52201,52618,52619,52631,53276,53277,54132,54133,54136,54292,54360,54491,54496,54555,54556,55149,56213,56266,56298,57569,57570,57571,57865,58316,58317,58318,58319,59474,59529,60710,61852,62902,64668,651673,65484,662560,662561,670354,68724,69300,69509,69811,71011,720582,722674,72542,73334,75140,75839,77141,77149,7822,7823,7956,7961,7962,8009,8010,8016,8022,8028,8032,8033,8038,80427,8166,8168,8283,8284,8324,8383,8384,8399,8407,8500,8501,8523,8556,8583,867383,86974,86976,8703,8789,8790,88115,88116,8835,8839,8842,8843,8847,8852,8867,8869,8894,8895,8918,8919,89252,8927,8928,8952,8956,8963,8964,8968,8969,9030,9031,9053,9054,9102,9103,9154,9157,9181,9186,9187,9207,9209,9260,9261,9266,9268,9271,9272,9312,9315,9316,9317,9336,9337,9350,9351,9353,9355,9357,9358,9364,9365,9379,9394,9463,9469,94992,94993,9554,9586,9592,9593,9595,9605,9606,9611,9612,9615,9625,9627,9639,9644,9653,9654,9656,9657,9658,9659,9661,9662,9665,9668,9682,9685,9688,9689,9691,9710,9711,9717,9720,9741,9742,9757,9766,9768,9784,9785,9789,9790,9794,9796,9798,9806,9807,9808,9809,9822,9823,9825,9832,9833,9836,9838,9857,9858,9859,9860,9861,9867,9871,9874,98927,98928,9893,9894,9896,9897,9903,9913,9922,9925,9926,9927,9933,9935,9938,9940,9969,9970,9973,9980,99804,9983,9984,9986,9992,9993,9996 --dry-run vertebrate_mammalian | sort ``` ### Downloading genomes ``` (py3_genome) @ rick:/tmp/global/nyoungblut/vert_genomes # non-mammals ncbi-genome-download -o vert_other -H -p 24 -r 3 -F fasta -s refseq -A GCA_004143745.1,GCA_004320145.1,GCA_900302645.1,GCA_901001165.1,GCF_000002315.6,GCF_000146605.2,GCF_000691405.1,GCF_000708925.1,GCF_000951615.1,GCF_001522545.3,GCF_002163495.1,GCF_003342905.1,GCF_003850225.1,GCF_004634155.1,GCF_900067755.1 vertebrate_other ncbi-genome-download -o vert_other -H -p 24 -r 3 -F fasta -s genbank -A GCA_004143745.1,GCA_004320145.1,GCA_900302645.1,GCA_901001165.1,GCF_000002315.6,GCF_000146605.2,GCF_000691405.1,GCF_000708925.1,GCF_000951615.1,GCF_001522545.3,GCF_002163495.1,GCF_003342905.1,GCF_003850225.1,GCF_004634155.1,GCF_900067755.1 vertebrate_other # mammals ncbi-genome-download -o vert_mam -H -p 24 -r 3 -F fasta -s refseq -A GCA_001305905.1,GCF_002263795.1,GCA_004027775.1,GCF_000767585.1,GCF_000311805.1,GCF_000002285.3,GCF_001704415.1,GCA_000751575.1,GCA_004027855.1,GCA_000164785.2,GCF_002863925.1,GCF_000696695.1,GCF_000296755.1,GCF_000181335.3,GCA_004027185.1,GCF_000151905.2,GCA_002995585.1,GCF_000001405.38,GCF_000001905.1,GCA_004027085.1,GCF_000001635.26,GCA_001305785.1,GCA_000004035.1,GCA_000191605.1,GCF_000003625.3,GCF_002742125.1,GCF_001857705.1,GCF_002099425.1,GCA_004348235.1,GCA_003071005.1,GCA_001707965.1,GCA_004026625.1,GCA_002215935.1,GCA_004024825.1,GCF_000003025.6,GCA_000612945.1,GCF_003160815.1 vertebrate_mammalian ncbi-genome-download -o vert_mam -H -p 24 -r 3 -F fasta -s genbank -A GCA_001305905.1,GCF_002263795.1,GCA_004027775.1,GCF_000767585.1,GCF_000311805.1,GCF_000002285.3,GCF_001704415.1,GCA_000751575.1,GCA_004027855.1,GCA_000164785.2,GCF_002863925.1,GCF_000696695.1,GCF_000296755.1,GCF_000181335.3,GCA_004027185.1,GCF_000151905.2,GCA_002995585.1,GCF_000001405.38,GCF_000001905.1,GCA_004027085.1,GCF_000001635.26,GCA_001305785.1,GCA_000004035.1,GCA_000191605.1,GCF_000003625.3,GCF_002742125.1,GCF_001857705.1,GCF_002099425.1,GCA_004348235.1,GCA_003071005.1,GCA_001707965.1,GCA_004026625.1,GCA_002215935.1,GCA_004024825.1,GCF_000003025.6,GCA_000612945.1,GCF_003160815.1 vertebrate_mammalian ``` ### Renaming sequences Using `>SEQID|kraken:taxid|TAXID` Created tables for mapping genome accession to taxID: * non-mammals: vert_host_other_asmbl-taxID.tsv * mammals: vert_host_mam_asmbl-taxID.tsv ``` (py3_genome) @ rick:/tmp/global/nyoungblut/vert_genomes/ /ebio/abt3_projects/Georg_animal_feces/code/rename_genome.py vert_host_other_asmbl-taxID.tsv vert_other.fna /ebio/abt3_projects/Georg_animal_feces/code/rename_genome.py vert_host_mam_asmbl-taxID.tsv vert_mam.fna ``` ### Filtering out short contigs ``` (py3_genome) @ rick:/tmp/global/nyoungblut/vert_genomes/ bioawk -cfastx 'length($seq)>=1000{print ">"$name"\n"$seq}' vert_other.fna > vert_other_filt.fna bioawk -cfastx 'length($seq)>=1000{print ">"$name"\n"$seq}' vert_mam.fna > vert_mam_filt.fna ``` Summary of trimming: ``` vert_other.fna: 735842 vert_other_filt.fna: 344581 vert_mam.fna: 15392143 vert_mam_filt.fna: 3190285 ``` ### Database init ``` # taxonomy (Bracken2) @ rick:/tmp/global/nyoungblut/vert_genomes/ DBDIR=/tmp/global/nyoungblut/vert_genomes/vert_host_genome/ mkdir -p $DBDIR /tmp/global/nyoungblut/vert_genomes/kraken2/scripts/kraken2-build --db $DBDIR --download-taxonomy --use-ftp ``` NOTE: I had to use updated master branch to have --use-ftp option #### adding to library (with masking) ``` (Bracken2) @ rick:/tmp/global/nyoungblut/vert_genomes/ screen -L -S Krak2B-other kraken2-build --threads 24 --db $DBDIR \ --add-to-library /tmp/global/nyoungblut/vert_genomes/vert_other_filt.fna screen -L -S Krak2B-mam kraken2-build --threads 24 --db $DBDIR \ --add-to-library /tmp/global/nyoungblut/vert_genomes/vert_mam_filt.fna ``` -- TO HERE for /tmp/global2/ -- #### db build ``` # (Bracken2) @ rick:/tmp/global/nyoungblut/vert_genomes/ screen -L -S Krak2B kraken2-build --build --threads 24 --db $DBDIR ``` #### move dbs ``` (base) @ rick:/ebio/abt3_projects/databases_no-backup/kraken2 mv /tmp/global/nyoungblut/vert_genomes/vert_host_nt/ vertebrata_nt_db mv /tmp/global/nyoungblut/vert_genomes/vert_host_genome/ vertebrata_genome_db ``` ## Database of all vertebrata ## Parsed nt database Database of host sequences parsed from nt ### Parsing nt ```{bash} # (py3_genome) @ rick:/tmp/global2/nyoungblut/vert_genomes/Kraken2_filt/vertebrata_nt # get all lineage taxIDs for all vertebrata taxonkit list --indent "" --ids 7742 | sort -u | perl -ne 'print if ! /^ *$/' > vert_taxIDs.txt # number of taxIDs = 87878 # parsing nt seqs based on all host taxIDs (and child taxIDs) blastdbcmd -db nt -entry all -outfmt "%T %a %s" | \ egrep -f <(perl -pe 's/(.+)/^$1 /' vert_taxIDs.txt) | \ perl -pe 's/(.+) (.+) (.+)/>$2|kraken:taxid|$1\n$3/' > vert_nt-parsed.fna ## file size: 73G ## number of seqs: 18061099 ``` ### Filter out very low complexity seqs ``` # (rna-tools) @ rick:/tmp/global2/nyoungblut/vert_genomes/Kraken2_filt/vertebrata_nt bbduk.sh -Xmx100g threads=24 entropy=0.1 entropywindow=50 entropyk=5 overwrite=t in=vert_nt-parsed.fna out=vert_nt-parsed_entropyGt0p1.fna ## file size: 74G ## number of seqs: 18060312 ``` ### Creating the kraken2 database #### taxonomy ``` (Bracken2) @ rick:/tmp/global2/nyoungblut/vert_genomes/Kraken2_filt/vertebrata_nt mkdir -p vertebrata_nt /tmp/global/nyoungblut/vert_genomes/kraken2/scripts/kraken2-build --db vertebrata_nt --download-taxonomy --use-ftp ``` > NOTE: I had to use updated master branch to have `--use-ftp` option #### adding to library (with masking) ``` (Bracken2) @ rick:/tmp/global2/nyoungblut/vert_genomes/Kraken2_filt/vertebrata_nt screen -L -S Krak2B kraken2-build --threads 24 \ --db vertebrata_nt --add-to-library vert_nt-parsed_entropyGt0p1.fna ``` #### db build ``` # (Bracken2) @ rick:/tmp/global2/nyoungblut/vert_genomes/Kraken2_filt/vertebrata_nt screen -L -S Krak2B kraken2-build --build --threads 24 --db vertebrata_nt ``` *** # Notes * final location of databases: `/ebio/abt3_projects/databases_no-backup/kraken2/` * databases can be used in `ikraken2` rule in LLMGQC version >= 0.8.4 # sessionInfo ``` sessionInfo() ```
github_jupyter
``` import numpy as np from scipy.interpolate import interp1d # -- astropy -- import astropy.time import astropy.coordinates import astropy.units as u from astropy.io import fits # -- feasibgs -- from feasibgs import util as UT from feasibgs import skymodel as Sky # -- others -- import speclite import specsim.simulator import matplotlib as mpl import matplotlib.pyplot as plt mpl.rcParams['text.usetex'] = True mpl.rcParams['font.family'] = 'serif' mpl.rcParams['axes.linewidth'] = 1.5 mpl.rcParams['axes.xmargin'] = 1 mpl.rcParams['xtick.labelsize'] = 'x-large' mpl.rcParams['xtick.major.size'] = 5 mpl.rcParams['xtick.major.width'] = 1.5 mpl.rcParams['ytick.labelsize'] = 'x-large' mpl.rcParams['ytick.major.size'] = 5 mpl.rcParams['ytick.major.width'] = 1.5 mpl.rcParams['legend.frameon'] = False %matplotlib inline fdecam = fits.open(''.join([UT.dat_dir(), 'decalsobs-zpt-dr3-allv2.fits'])) decam = fdecam[1].data keep = (decam['AIRMASS'] != 0.0) print("%i of %i have airmass != 0." % (np.sum(keep), decam['AIRMASS'].shape[1])) time = astropy.time.Time(decam['DATE'], format='jd') location = astropy.coordinates.EarthLocation.from_geodetic( lat='-30d10m10.78s', lon='-70d48m23.49s', height=2241.4*u.m) moon_position = astropy.coordinates.get_moon(time, location) moon_ra = moon_position.ra.value moon_dec = moon_position.dec.value moon_position_altaz = moon_position.transform_to(astropy.coordinates.AltAz(obstime=time, location=location)) moon_alt = moon_position_altaz.alt.value moon_az = moon_position_altaz.az.value def separation(ra1, dec1, ra2, dec2): pi2 = np.radians(90) alpha = np.cos(np.radians(ra1)-np.radians(ra2)) first = np.cos(pi2-np.radians(dec1))*np.cos(pi2-np.radians(dec2)) second = np.sin(pi2-np.radians(dec1))*np.sin(pi2-np.radians(dec2))*alpha return np.arccos(first+second)*180/np.pi separation_angle = separation(decam['RA'], decam['DEC'], ra2=moon_ra, dec2=moon_dec) fig = plt.figure(figsize=(15,5)) sub = fig.add_subplot(131) sub.scatter(moon_alt, decam['MOONPHASE'], c='k', s=1) #sub.scatter(boss_blue['MOON_ALT'], boss_blue['MOON_ILL'], c='k', s=1) sub.set_xlabel('Moon Altitude', fontsize=20) sub.set_xlim([0., 90.]) sub.set_ylabel('Moon Illumination', fontsize=20) sub.set_ylim([0., 1.]) sub = fig.add_subplot(132) sub.scatter(decam['MOONSEP'], decam['MOONPHASE'], c='k', s=1) #sub.scatter(boss_blue['MOON_SEP'], boss_blue['MOON_ILL'], c='k', s=1) sub.set_xlabel('Moon Separation', fontsize=20) sub.set_xlim([0., 180.]) sub.set_ylim([0., 1.]) sub = fig.add_subplot(133) sub.scatter(decam['AIRMASS'], decam['MOONPHASE'], c='k', s=1) #sub.scatter(boss_blue['AIRMASS'], boss_blue['MOON_ILL'], c='k', s=1) sub.set_xlabel('Airmass', fontsize=20) sub.set_xlim([1., 2.]) sub.set_ylim([0., 1.]) import pandas as pd f = ''.join([UT.code_dir(), 'dat/sky/MoonResults.csv']) coeffs = pd.DataFrame.from_csv(f) coeffs.columns = [ 'wl', 'model', 'data_var', 'unexplained_var',' X2', 'rX2', 'c0', 'c_am', 'tau', 'tau2', 'c_zodi', 'c_isl', 'sol', 'I', 't0', 't1', 't2', 't3', 't4', 'm0', 'm1', 'm2', 'm3', 'm4', 'm5', 'm6', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec', 'c2', 'c3', 'c4', 'c5', 'c6'] # keep moon models twi_coeffs = coeffs[coeffs['model'] == 'twilight'] coeffs = coeffs[coeffs['model'] == 'moon'] # order based on wavelengths for convenience wave_sort = np.argsort(np.array(coeffs['wl'])) for k in coeffs.keys(): coeffs[k] = np.array(coeffs[k])[wave_sort] for k in twi_coeffs.keys(): twi_coeffs[k] = np.array(twi_coeffs[k])[wave_sort] def cI_twi(alpha, delta, airmass): twi = ( twi_coeffs['t0'] * np.abs(alpha) + # CT2 twi_coeffs['t1'] * np.abs(alpha)**2 + # CT1 twi_coeffs['t2'] * np.abs(delta)**2 + # CT3 twi_coeffs['t3'] * np.abs(delta) # CT4 ) * np.exp(-twi_coeffs['t4'] * airmass) + twi_coeffs['c0'] return np.array(twi) specsim_sky = Sky.specsim_initialize('desi') specsim_wave = specsim_sky._wavelength # Ang cr_def = specsim_sky.moon.KS_CR cm0_def = specsim_sky.moon.KS_CM0 cm1_def = specsim_sky.moon.KS_CM1 def KSsky(airmass, moonill, moonalt, moonsep): specsim_sky.airmass = airmass specsim_sky.moon.moon_phase = np.arccos(2.*moonill - 1)/np.pi specsim_sky.moon.moon_zenith = (90. - moonalt) * u.deg specsim_sky.moon.separation_angle = moonsep * u.deg specsim_sky.moon.KS_CR = cr_def specsim_sky.moon.KS_CM0 = cm0_def specsim_sky.moon.KS_CM1 = cm1_def return specsim_sky.surface_brightness.value def KSrescaled_twi_sky(airmass, moonill, moonalt, moonsep, sun_alt, sun_sep): specsim_sky.airmass = airmass specsim_sky.moon.moon_phase = np.arccos(2.*moonill - 1)/np.pi specsim_sky.moon.moon_zenith = (90. - moonalt) * u.deg specsim_sky.moon.separation_angle = moonsep * u.deg specsim_sky.moon.KS_CR = 458173.535128 specsim_sky.moon.KS_CM0 = 5.540103 specsim_sky.moon.KS_CM1 = 178.141045 I_ks_rescale = specsim_sky.surface_brightness if sun_alt > -20.: I_twi = cI_twi(sun_alt, sun_sep, airmass)/np.pi I_twi_interp = interp1d(10.*np.array(coeffs['wl']), I_twi, fill_value='extrapolate') return I_ks_rescale.value + I_twi_interp(specsim_wave.value) else: return I_ks_rescale.value cut_g = (decam['FILTER'] == 'g') & (moon_alt > 10.) & (decam['TRANSP'] > .75) & (decam['TRANSP'] < 1.3) & keep cut_r = (decam['FILTER'] == 'r') & (moon_alt > 10.) & (decam['TRANSP'] > .75) & (decam['TRANSP'] < 1.3) & keep cut_z = (decam['FILTER'] == 'z') & (moon_alt > 10.) & (decam['TRANSP'] > .75) & (decam['TRANSP'] < 1.3) & keep def get_KSsky_mag(band): filter_response = speclite.filters.load_filter('decam2014-{}'.format(band)) if band == 'g': cut = cut_g elif band == 'r': cut = cut_r elif band == 'z': cut = cut_z nrows = np.sum(cut) sky_brightness = np.empty(nrows, dtype=float) for i in range(nrows): scattered_moon = KSsky( decam['AIRMASS'][cut][i], decam['MOONPHASE'][cut][i], moon_alt[cut][i], separation_angle[cut][i]) scattered_moon = scattered_moon * 1e-17 * u.erg / (u.Angstrom * u.cm ** 2 * u.s) moon_flux, sky_wlen = filter_response.pad_spectrum(scattered_moon, specsim_wave) sky_brightness[i] = filter_response.get_ab_maggies(moon_flux, sky_wlen) return flux_to_mag(sky_brightness) def get_newSky_band(band): filter_response = speclite.filters.load_filter('decam2014-{}'.format(band)) if band == 'g': cut = cut_g elif band == 'r': cut = cut_r elif band == 'z': cut = cut_z nrows = np.sum(cut) sky_brightness = np.empty(nrows, dtype=float) for i in range(nrows): scattered_moon = KSrescaled_twi_sky( decam['AIRMASS'][cut][i], decam['MOONPHASE'][cut][i], moon_alt[cut][i], separation_angle[cut][i], decam['SUNALT'][cut][i], decam['SUNSEP'][cut][i]) scattered_moon = scattered_moon * 1e-17 * u.erg / (u.Angstrom * u.cm ** 2 * u.s) moon_flux, sky_wlen = filter_response.pad_spectrum(scattered_moon, specsim_wave) sky_brightness[i] = filter_response.get_ab_maggies(moon_flux, sky_wlen) return flux_to_mag(sky_brightness) def flux_to_mag(flux): return 22.5 - 2.5*np.log10(flux*10**9) newsky_g = get_newSky_band('g') newsky_r = get_newSky_band('r') newsky_z = get_newSky_band('z') ks_g = get_KSsky_mag('g') ks_r = get_KSsky_mag('r') ks_z = get_KSsky_mag('z') fig = plt.figure(figsize=(17,5)) sub = fig.add_subplot(131) sub.scatter(decam['SKYBR'][cut_g], newsky_g, s=10, lw=1) sub.scatter(decam['SKYBR'][cut_g], ks_g, s=10, lw=1) sub.plot([16, 22], [16, 22], c='k', ls='--') sub.set_xlabel('DECam $g$ band', fontsize=20) sub.set_xlim([20., 22]) sub.set_ylabel('Sky Models $g$ band', fontsize=20) sub.set_ylim([20., 22]) sub = fig.add_subplot(132) sub.scatter(decam['SKYBR'][cut_r], newsky_r, s=10, lw=1) sub.scatter(decam['SKYBR'][cut_r], ks_r, s=10, lw=1) sub.plot([16, 22], [16, 22], c='k', ls='--') sub.set_xlabel('DECam $r$ band', fontsize=20) sub.set_xlim([19, 22]) sub.set_ylabel('Sky Models $r$ band', fontsize=20) sub.set_ylim([19, 22]) sub = fig.add_subplot(133) sub.scatter(decam['SKYBR'][cut_z], newsky_z, s=10, lw=1, label='rescaled KS + twi') sub.scatter(decam['SKYBR'][cut_z], ks_z, s=10, lw=1, label='original KS') sub.plot([16, 22], [16, 22], c='k', ls='--') sub.legend(loc='upper left', markerscale=10, handletextpad=0, fontsize=20) sub.set_xlabel('DECam $z$ band', fontsize=20) sub.set_xlim([16, 22]) sub.set_ylabel('Sky Models $z$ band', fontsize=20) sub.set_ylim([16, 22]) ```
github_jupyter
# Klein's curve Demonstration of the functionality of CyclePainter. For more information see 3.3 of the thesis. ``` # Initialize jupyter notebook. Calling twice allegedly avoids a bug %matplotlib notebook %matplotlib notebook from cyclepainter import * # Defining polynomial of Klein's curve k.<rho>=CyclotomicField(3) A2.<z,w>=AffineSpace(k,2) klein = w^7-(z-1)*(z-rho)^2*(z-rho^2)^4 # Hand-picked cohomology basis diffs = [ 1/w^5, 1/w^6, 1/w^3 ] # Automorphism of Klein's curve s = lambda z, w: (rho^2*z, rho^2*(z-1)*(z-rho)*(z - rho^2)^2/w^3) ``` ``` # Define the CyclePainter object. # kappa is the radius of avoidance of branch points used in abelfunctions. cp = CyclePainter(klein, kappa=2./5.) # Start the interactive plot. # # Workflow: # - to begin drawing a new path, click on "New path". # - By clicking on the canvas, the path can be now defined. It is recommended to avoid the kappa # circles around branch points. # - Once you are satisfied with the path, click on "Finish path". The last point will be automatically #. connected to the monodromy point (denoted by star). Every path must be starting and ending in the #. monodromy point. # - The radio-buttons on the right indicate the starting sheet of the path. This may be changed at any point. # Go ahead now and define your own path by the above steps. Once "Finish path" button is clicked, # you may execute the next cell. cp.start() # To save a path for processing (possible only AFTER clicking "Finish path"), you need to call p = cp.save_path('my_little_path') # A unique name needs to be given. # If you lose reference the path variable, it is now accessibly by its name _ = cp.get_path('my_little_path') # If you want to clean the canvas, you can run cp.clear_canvas() # And if you want to plot a saved path again cp.plot_path('my_little_path') # You may take a look at path's defining projection points and starting sheet _, _ = p.starting_sheet, p.projection_points # Or you may need to access the paths x/y values at some time t \in [0, 1] p.get_x(0.42) p.get_y(0.42) # (these are ordered) # You can integrate an one-form over the path p.integrate(diffs[0]) # Or find the intersection numbers with other paths p.intersection_number(p) # Note: in extreme cases, I have encountered errors in the intersections numbers. Take with a grain of salt. # We can also go back to cyclepainter object to find intersection matrix of multiple paths cp.intersection_matrix(['my_little_path', 'my_little_path']) # You can also apply an automorphism on the path (scroll up back to the plot to see it) p.apply_automorphism(s) # NOTE: # - the monodromy point will be sent to a different place, the ordering of the sheets is hence unreliable # - this function is meant for investigation only, to use the path, one should manually redraw it. # To save all of the paths into a file for future use cp.pickle_paths('pickled_paths_file.pickle') # This created a new file in the working directory. To load paths from such a file cp.load_paths('pickled_paths_file.pickle') ``` ``` # Load pre-pickled paths for the Klein's curve cp.load_paths('klein_curve_paths.pickle') # These paths are now available cp.saved_paths() # I'll go ahead and open a new plot for visibility. Please go ahead and close the first one (right-top corner button) cp.start() # this is, by the way, how one of the monodromy paths looks like cp.branch_points[0].show_permutation_path() # and this is the first of the a-cycles we just loaded cp.plot_path('a1') # Verify the intersection matrix cp.intersection_matrix(['a1', 'a2', 'a3', 'b1', 'b2', 'b3']) # Good to go. Calculate the period matrix with this homology basis a_cycles = ['a1', 'a2', 'a3'] b_cycles = ['b1', 'b2', 'b3'] # cp.period_matrix(a_cycles, b_cycles, diffs) # And finally, the highly-symmetric Riemann matrix rm = cp.riemann_matrix(a_cycles, b_cycles, diffs) np.round(rm, 9) ```
github_jupyter
# Linked List ``` from enum import Enum class Position(Enum): START = 0 END = 2 class Node: def __init__(self, data): self.data = data self.next = None def __repr__(self): return f'{self.__class__.__name__}({self.data})' ``` # Insertions 1. Start: **O(1)** 1. Insert After: **O(n)** 1. End: **O(1)** ``` class LinkedList: def __init__(self, ls = []): self.head = None self.tail = None self.count = len(ls) if self.count > 0: for item in ls: self._insertEnd(item) def _insertStart(self, data): self.count += 1 tmp = Node(data) tmp.next = self.head self.head = tmp if self.tail is None: self.tail = self.head return tmp def _insertAfter(self, data, after): self.count += 1 q = self.head while q is not None: if q.data == after: tmp = Node(data) tmp.next = q.next q.next = tmp if tmp.next is None: self.tail = tmp return tmp q = q.next def _insertEnd(self, data): self.count += 1 if self.head is None: tmp = Node(data) self.head = self.tail = tmp return tmp tmp = Node(data) self.tail.next = tmp self.tail = self.tail.next return tmp ``` # Deletions 1. Start: **O(1)** 1. Delete Element: **O(n)** 1. End: **O(n)** ``` def _deleteStart(self): if self.count == 0: return self.count -= 1 if self.head == self.tail: self.head = self.tail = None return tmp = self.head self.head = self.head.next return tmp def _deleteEle(self, ele): if self.count == 0 or (self.count == 1 and self.head.data != ele): return if self.head.data == ele: return self._deleteStart() q = self.head while q.next is not None: if q.next.data == ele: tmp = q.next q.next = tmp.next if q.next is None: self.tail = q self.count -= 1 return tmp q = q.next def _deleteEnd(self): if self.count == 0: return self.count -= 1 if self.head is self.tail: self.head = self.tail = None return q = self.head while q.next is not self.tail: q = q.next tmp = self.tail self.tail = q self.tail.next = None return tmp LinkedList._deleteEle = _deleteEle LinkedList._deleteStart = _deleteStart LinkedList._deleteEnd = _deleteEnd ``` # Public Interface ``` def push(self, data, ele = Position.END): method = { Position.START: self._insertStart, Position.END: self._insertEnd }.get(ele, self._insertAfter) if method == self._insertAfter: return self._insertAfter(data, ele) return method(data) def pop(self, ele = Position.END): method = { Position.START: self._deleteStart, Position.END: self._deleteEnd }.get(ele, self._deleteEle) if method == self._deleteEle: return method(ele) return method() LinkedList.push = push LinkedList.pop = pop ``` # Dunders To make life easier ``` def __repr__(self): # For Debug purpose: it returns the string which upon executing, results in exact same object ls = [] q = self.head while q is not None: ls.append(q.data) q = q.next return f'{self.__class__.__name__}({ls})' def __str__(self): # For User, pretty print of object q = self.head s = '' while q and q.next is not None: s += f'{q.data} -> ' q = q.next s += f'{str(q.data)}' if q is not None else None return f'[{s}]' def __len__(self): return self.count LinkedList.__repr__ = __repr__ LinkedList.__str__ = __str__ LinkedList.__len__ = __len__ ``` # Tests ``` ls = LinkedList() ls.push(3, Position.END) ls.push(1, Position.START) ls.push(4, ele = 3) ls.push(2, ele = 1) ls.push(0, Position.START) ls.push(5, Position.END) ls.push(6) print(f"Linked List ({len(ls)}) : {ls}") ls print(LinkedList([0, 1, 2, 3, 4, 5, 6])) # Yield same result as all the above statements ls.pop(Position.START) ls.pop(3) ls.pop(Position.END) print(f"Linked List ({len(ls)}) : {ls}") ls ```
github_jupyter
# 1. LGDE.com 일별 지표생성 실습 2일차 (정답) #### 주피터 노트북 단축키 (Windows 환경) | 단축키 | 설명 | 기타 | | --- | --- | --- | | Alt+Enter | 현재 셀 실행 + 다음 셀 추가 | 초기 개발 시에 주로 사용 | | Shift+Enter | 현재 셀 실행 + 다음 셀 이동 | 전체 테스트 시에 주로 사용 | | Ctrl+Enter | 현재 셀 실행 + 이동 안함 | 하나씩 점검 혹은 디버깅 시에 사용 | | Ctrl+/ | 주석 적용 및 해제 | Shift 키로 여러 줄을 선택하고 주석 및 해제 사용 | | Ctrl+s | 전체 저장 | - | ### 주피터 노트북 유의사항 * 모든 셀은 Code, Markdown, Raw 3가지 유형이 존재하며, **파이썬 코드 실행**은 반드시 ***Code*** 모드에서 수행되어야 합니다 * 현재 셀의 실행이 무한 루프에 빠지거나 너무 오래 걸리는 경우 상단 Menu 에서 ***Kernel - Interrupt Kernel*** 메뉴를 통해 현재 셀의 작업만 중지할 수 있습니다 * 메모리 혹은 다양한 이슈로 인해 제대로 동작하지 않는 경우에는 상단 Menu 에서 ***Kernel - Restart Kernel..*** 메뉴를 통해 재시작할 수 있습니다 ## 5. 수집된 데이터 탐색 ### 5-1. 스파크 세션 생성 ``` from pyspark.sql import * from pyspark.sql.functions import * from pyspark.sql.types import * from IPython.display import display, display_pretty, clear_output, JSON spark = ( SparkSession .builder .config("spark.sql.session.timeZone", "Asia/Seoul") .getOrCreate() ) # 노트북에서 테이블 형태로 데이터 프레임 출력을 위한 설정을 합니다 spark.conf.set("spark.sql.repl.eagerEval.enabled", True) # display enabled spark.conf.set("spark.sql.repl.eagerEval.truncate", 100) # display output columns size # 공통 데이터 위치 home_jovyan = "/home/jovyan" work_data = f"{home_jovyan}/work/data" work_dir=!pwd work_dir = work_dir[0] # 로컬 환경 최적화 spark.conf.set("spark.sql.shuffle.partitions", 5) # the number of partitions to use when shuffling data for joins or aggregations. spark.conf.set("spark.sql.streaming.forceDeleteTempCheckpointLocation", "true") spark user26 = spark.read.parquet("user/20201026") user26.printSchema() user26.show(truncate=False) display(user26) purchase26 = spark.read.parquet("purchase/20201026") purchase26.printSchema() purchase26.show(truncate=False) display(purchase26) access26 = spark.read.option("inferSchema", "true").json("access/20201026") access26.printSchema() access26.show(truncate=False) display(access26) ``` ### 5-2. 수집된 고객, 매출 및 접속 임시 테이블 생성 ``` user26.createOrReplaceTempView("user26") purchase26.createOrReplaceTempView("purchase26") access26.createOrReplaceTempView("access26") spark.sql("show tables '*26'") ``` ### 5-3. SparkSQL을 이용하여 테이블 별 데이터프레임 생성하기 ``` u_signup_condition = "u_signup >= '20201026' and u_signup < '20201027'" user = spark.sql("select u_id, u_name, u_gender from user26").where(u_signup_condition) user.createOrReplaceTempView("user") p_time_condition = "p_time >= '2020-10-26 00:00:00' and p_time < '2020-10-27 00:00:00'" purchase = spark.sql("select from_unixtime(p_time) as p_time, p_uid, p_id, p_name, p_amount from purchase26").where(p_time_condition) purchase.createOrReplaceTempView("purchase") access = spark.sql("select a_id, a_tag, a_timestamp, a_uid from access26") access.createOrReplaceTempView("access") spark.sql("show tables") ``` ### 5-4. 생성된 테이블을 SQL 문을 이용하여 탐색하기 ``` spark.sql("describe user") groupByCount = "select u_gender, count(1) from user group by u_gender" spark.sql(groupByCount) spark.sql("describe purchase") selectClause = "select min(p_amount), max(p_amount) from purchase" spark.sql(selectClause) spark.sql("describe access") countTop="select a_uid, count(1) as a_count from access where a_id = 'login' group by a_uid order by a_count desc" spark.sql(countTop) ``` ## 6. 기본 지표 생성 ### 6-1. DAU (Daily Activer User) 지표를 생성하세요 ``` display(access) distinctAccessUser = "select count(distinct a_uid) as DAU from access" dau = spark.sql(distinctAccessUser) display(dau) ``` ### 6-2. DPU (Daily Paying User) 지표를 생성하세요 ``` display(purchase) distinctPayingUser = "select count(distinct p_uid) as PU from purchase" pu = spark.sql(distinctPayingUser) display(pu) ``` ### 6-3. DR (Daily Revenue) 지표를 생성하세요 ``` display(purchase) sumOfDailyRevenue = "select sum(p_amount) as DR from purchase" dr = spark.sql(sumOfDailyRevenue) display(dr) ``` ### 6-4. ARPU (Average Revenue Per User) 지표를 생성하세요 ``` v_dau = dau.collect()[0]["DAU"] v_pu = pu.collect()[0]["PU"] v_dr = dr.collect()[0]["DR"] print("ARPU : {}".format(v_dr / v_dau)) ``` ### 6-5. ARPPU (Average Revenue Per Paying User) 지표를 생성하세요 ``` print("ARPPU : {}".format(v_dr / v_pu)) ``` ## 7. 고급 지표 생성 > 어제 디멘젼을 가져와서 정상 조회 되는 지 확인합니다 ``` yesterday_dimension="dimension/dt=20201025" yesterday = spark.read.parquet(yesterday_dimension) yesterday.printSchema() display(yesterday) ``` ### 7-1. 전체 고객의 ID 추출 ``` yesterday_uids = yesterday.select("d_uid") today_uids = access.select("a_uid") joinCondition = "<어제 uid 와 오늘 uid 값이 일치하는 조건>" joinCondition = yesterday_uids.d_uid == today_uids.a_uid joinHow = "<어제와 오늘 고객정보를 추가하는 방식>" joinHow = "full_outer" all_uids = ( yesterday_uids.join(today_uids, joinCondition, joinHow) .withColumn("uid", when(yesterday.d_uid.isNull(), access.a_uid).otherwise(yesterday.d_uid)) .select("uid").distinct() ) all_uids.printSchema() display(all_uids.orderBy("uid")) uid1 = yesterday.select("d_uid").withColumnRenamed("d_uid", "a_uid") uid2 = access.select("a_uid") all_uids = uid1.union(uid2).distinct() all_uids.printSchema() display(all_uids) ``` ### 7-2. 어제 디멘젼 정보와 결합 ``` joinCondition = "uid 와 yesterday 의 uid 가 일치하는 조건" joinCondition = all_uids.a_uid == yesterday.d_uid joinHow = "left_outer" # uid 에서 생성된 id 값이 모든 user_id 이므로, drop 함수를 통해 기존의 d_uid 는 제거하고, withColumnRenamed 함수를 통해, uid 를 d_uid 로 변경 합니다 uids = ( all_uids.join(yesterday, joinCondition, joinHow) .drop("d_uid") .withColumnRenamed("a_uid", "d_uid") .sort(asc("d_uid")) ) uids.printSchema() display(uids) ``` ### 7-3. 이름과 성별을 결합 ``` user.show() uids.show() # 실습25) d_name 이 null 이면 u_name 을 사용하고 그렇지 않으면 d_name 을 사용합니다 exprName = expr("case when d_name is null then u_name else d_name end") # 실습26) d_gender 이 null 이면 u_gender 을 사용하고 그렇지 않으면 d_gender 을 사용합니다 exprGender = expr("case when d_gender is null then u_gender else d_gender end") dim1 = ( uids.join(user, uids.d_uid == user.u_id, "left_outer") .withColumn("name", exprName) .withColumn("gender", exprGender) .drop("d_name", "d_gender", "u_id", "u_name", "u_gender") .withColumnRenamed("name", "d_name") .withColumnRenamed("gender", "d_gender") ).orderBy(asc("d_uid")) display(dim1) ``` ### 7-4. 숫자 필드에 널값은 0으로 기본값을 넣어줍니다 ``` # 실습27) d_acount, d_pamount, d_pcount 필드의 기본값을 0으로 넣도록 작성합니다 fillDefaultValue = { "d_acount":0, "d_pamount":0, "d_pcount":0 } dim2 = dim1.na.fill(fillDefaultValue) display(dim2) ``` ### 7-5. 접속횟수를 결합 ``` access_sum = spark.sql("select a_uid, count(a_uid) as a_count from access where a_id = 'login' group by a_uid") access_sum.printSchema() access_sum.show() dim2.printSchema() # 실습28) 오늘 접속 수치(a_count)가 null 이면 디멘젼의 d_acount 값을 사용하고, 그렇지 않으면 d_acount + a_count 를 사용합니다 sumOfAccess = expr("a_count") sumOfAccess = expr("case when a_count is null then d_acount else d_acount + a_count end") dim3 = ( dim2.join(access_sum, dim2.d_uid == access_sum.a_uid, "left_outer") .withColumn("sum_of_access", sumOfAccess) .drop("a_uid", "a_count", "d_acount") .withColumnRenamed("sum_of_access", "d_acount") ).orderBy(asc("d_uid")) dim3.printSchema() display(dim3) ``` ### 7-6. 매출횟수 및 매출을 결합 ``` display(dim3) dim3.printSchema() purchase_sum = spark.sql("select p_uid, sum(p_amount) as pamount, count(p_uid) as pcount from purchase group by p_uid") display(purchase_sum) purchase_sum.printSchema() # 실습29) 매출 금액의 합(d_pamount + pamount)을 구합니다 sumOfAmount = expr("case when pamount is null then d_pamount else d_pamount + pamount end") # 실습30) 매출 빈도의 합(d_pcount + pcount)을 구합니다 sumOfCount = expr("case when pcount is null then d_pcount else d_pcount + pcount end") dim4 = ( dim3.join(purchase_sum, dim3.d_uid == purchase_sum.p_uid, "left") .withColumn("sum_of_amount", sumOfAmount) .withColumn("sum_of_count", sumOfCount) .drop("p_uid", "d_pamount", "d_pcount", "pamount", "pcount") .withColumnRenamed("sum_of_amount", "d_pamount") .withColumnRenamed("sum_of_count", "d_pcount") ).orderBy(asc("d_uid")) dim4.printSchema() display(dim4) ``` ### 7-7. 최초 구매 일자를 결합 ``` # 실습31) 하루에 여러번 구매가 있을 수 있으므로 group by p_uid 집계를 통해 가장 먼저 구매한 정보 즉, min(p_time)함수를 통해 일시를 선택합니다 selectFirstPurchaseTime = "select p_uid, min(p_time) as p_time from purchase group by p_uid" first_purchase = spark.sql(selectFirstPurchaseTime) first_purchase.printSchema() first_purchase.show() # 실습32) 디멘젼의 최초구매일(d_first_purchase)이 null 이라면 p_time 을 사용하고 그렇지 않으면 d_first_purchase 를 사용합니다 exprFirstPurchase = expr("case when d_first_purchase is null then p_time else d_first_purchase end") dimension = ( dim4.join(first_purchase, dim4.d_uid == first_purchase.p_uid, "left") .withColumn("first_purchase", exprFirstPurchase) .drop("p_uid", "p_time", "d_first_purchase") .withColumnRenamed("first_purchase", "d_first_purchase") ).orderBy("d_uid") dimension.printSchema() display(dimension) ``` ### 7-8. 신규유저를 계산 ``` # 어제와 오늘의 디멘젼 테이블을 이용해 오늘 처음 접속한 신규 유저를 구합니다 today_uids = dimension.select("d_uid") yesterday_uids = yesterday.select("d_uid") nu = today_uids.subtract(yesterday_uids) nu.printSchema() nu.show() v_nu = nu.count() print("NU: {}".format(v_nu)) ``` ### 7-9. 생성된 디멘젼을 저장소에 저장합니다 ``` dimension.printSchema() target_dir="dimension/dt=20201026" dimension.write.mode("overwrite").parquet(target_dir) ``` ### 7-10. 생성된 디멘젼을 다시 읽어서 출력합니다 ``` newDimension = spark.read.parquet(target_dir) newDimension.printSchema() display(newDimension) ``` ### 7-11. 오늘 생성된 지표를 MySQL 테이블로 저장합니다 > 오늘 이전의 데이터는 유지하고, 신규로 생성된 데이터를 Append 해야 합니다. ``` print("DT:{}, DAU:{}, PU:{}, DR:{}".format("2020-10-26", v_dau, v_pu, v_dr)) today = "2020-10-26" lgde_origin = spark.read.jdbc("jdbc:mysql://mysql:3306/testdb", "testdb.lgde", properties={"user": "sqoop", "password": "sqoop"}).where(col("dt") < lit(today)) lgde_today = spark.createDataFrame([(today, v_dau, v_pu, v_dr)], ["DT", "DAU", "PU", "DR"]) lgde_union = lgde_origin.union(lgde_today) lgde_local = lgde_union.collect() lgde = spark.createDataFrame(lgde_local) lgde.write.mode("overwrite").jdbc("jdbc:mysql://mysql:3306/testdb", "testdb.lgde", properties={"user": "sqoop", "password": "sqoop"}) ```
github_jupyter
# Skin Cancer Training using MONAI ## Overview HAM10000 ("Human Against Machine with 10000 training images") is a popular data set of dermatoscopic images hosted by [Harvard Dataverse](https://dataverse.harvard.edu/) from different populations. It consists of 10015 images consisting of several diagnositic categories including: Actinic keratoses and intraepithelial carcinoma / Bowen's disease (akiec), basal cell carcinoma (bcc), benign keratosis-like lesions (solar lentigines / seborrheic keratoses and lichen-planus like keratoses, bkl), dermatofibroma (df), melanoma (mel), melanocytic nevi (nv) and vascular lesions (angiomas, angiokeratomas, pyogenic granulomas and hemorrhage, vasc). In this example we will demonstrate how to integrate the [MONAI](http://monai.io) framework into Amazon SageMaker using Pytorch and give example code of MONAI pre-processing transforms that can assist with imbalanced datasets and image transformations. We will also show the code to invoke MONAI neural network architectures such as Densenet for image classification and explore structure of Pytorch code to train and serve the model within SageMaker. Additionally, we will cover the SageMaker API calls to launch and manage the compute infrastructure for both model training and hosting for inference using the HAM10000 data set. For more information about the PyTorch in SageMaker, please visit [sagemaker-pytorch-containers](https://github.com/aws/sagemaker-pytorch-containers) and [sagemaker-python-sdk](https://github.com/aws/sagemaker-python-sdk) github repositories. --- ## Setup This notebook was created and tested on an ml.t2.medium notebook instance with 100 GB of EBS and conda_pytorch_p36 kernel. Let's get started by creating a S3 bucket and uploading the HAM10000 dataset to the bucket. <ol> <li>Create an S3 bucket in the same account as the Sagemaker notebook instance. <li>Download the skin cancer dataset at <a href="https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/DBW86T">HAM10000</a>. <li>Select "Access Dataset" in top right, and select "Original Format Zip". <li>Upload the dataset to the S3 bucket created in step 1. <li>Update the set.env file located in the current directory with the S3 location of the dataverse_files.zip. </ol> The code below will install MONAI framework and dependent packages and setup environment variables. ``` # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 !pip install -r source/requirements.txt import os from pathlib import Path from dotenv import load_dotenv env_path = Path('.') / 'set.env' load_dotenv(dotenv_path=env_path) skin_cancer_bucket=os.environ.get('SKIN_CANCER_BUCKET') skin_cancer_bucket_path=os.environ.get('SKIN_CANCER_BUCKET_PATH') skin_cancer_files=os.environ.get('SKIN_CANCER_FILES') skin_cancer_files_ext=os.environ.get('SKIN_CANCER_FILES_EXT') base_dir = os.environ.get('BASE_DIR') print('Skin Cancer Bucket: '+skin_cancer_bucket) print('Skin Cancer Bucket Prefix: '+skin_cancer_bucket_path) print('Skin Cancer Files: '+skin_cancer_files) print('Skin Cancer Files Ext: '+skin_cancer_files_ext) print('Base Dir: '+base_dir) ``` ## HAM10000 Data Transformation The transform_data.ipynb will download the dataverse_files.zip and perform transformations to build directories by class for training and validation sets from the meta-data. It will also augment the data to create a more balanced data set across the classes for training. The script will upload the transformed dataset HAM10000.tar.gz to the same S3 bucket identifed in set.env for model training. ``` %run source/transform_data.ipynb ``` ## Data ### Create Sagemaker session and S3 location for transformed HAM10000 dataset ``` import sagemaker sagemaker_session = sagemaker.Session() role = sagemaker.get_execution_role() inputs = sagemaker_session.upload_data(path=base_dir+'HAM10000.tar.gz', bucket=skin_cancer_bucket, key_prefix=skin_cancer_bucket_path) print('input spec (in this case, just an S3 path): {}'.format(inputs)) ``` ## Train Model ### Training The ```monai_skin_cancer.py``` script provides all the code we need for training and hosting a SageMaker model (model_fn function to load a model). The training script is very similar to a training script you might run outside of SageMaker, but you can access useful properties about the training environment through various environment variables, such as: * SM_MODEL_DIR: A string representing the path to the directory to write model artifacts to. These artifacts are uploaded to S3 for model hosting. * SM_NUM_GPUS: The number of gpus available in the current container. * SM_CURRENT_HOST: The name of the current container on the container network. * SM_HOSTS: JSON encoded list containing all the hosts . Supposing one input channel, 'training', was used in the call to the PyTorch estimator's fit() method, the following will be set, following the format SM_CHANNEL_[channel_name]: * SM_CHANNEL_TRAINING: A string representing the path to the directory containing data in the 'training' channel. For more information about training environment variables, please visit [SageMaker Containers](https://github.com/aws/sagemaker-containers). A typical training script loads data from the input channels, configures training with hyperparameters, trains a model, and saves a model to model_dir so that it can be hosted later. Hyperparameters are passed to your script as arguments and can be retrieved with an argparse.ArgumentParser instance. Because the SageMaker imports the training script, you should put your training code in a main guard (''if __name__=='__main__':'') if you are using the same script to host your model as we do in this example, so that SageMaker does not inadvertently run your training code at the wrong point in execution. MONAI includes deep neural networks such as UNet, DenseNet, GAN and others and provides sliding window inferences for large medical image volumes. In the skin cancer image classification model, we train the MONAI DenseNet model on the skin cancer images for thirty epochs while measuring loss. ``` !pygmentize source/monai_skin_cancer.py ``` ## Run training in SageMaker The `PyTorch` class allows us to run our training function as a training job on SageMaker infrastructure. We need to configure it with our training script, an IAM role, the number of training instances, the training instance type, and hyperparameters. In this case we are going to run our training job on ```ml.p3.8xlarge``` instance. But this example can be ran on one or multiple, cpu or gpu instances ([full list of available instances](https://aws.amazon.com/sagemaker/pricing/instance-types/)). The hyperparameters parameter is a dict of values that will be passed to your training script -- you can see how to access these values in the ```monai_skin_cancer.py``` script above. ``` from sagemaker.pytorch import PyTorch estimator = PyTorch(entry_point='monai_skin_cancer.py', source_dir='source', role=role, framework_version='1.5.0', py_version='py3', instance_count=1, instance_type='ml.p3.8xlarge', hyperparameters={ 'backend': 'gloo', 'epochs': 30 }) ``` After we've constructed our PyTorch object, we can fit it using the HAM10000 dataset we uploaded to S3. SageMaker will download the data to the local filesystem, so our training script can simply read the data from disk. ``` estimator.fit({'train': inputs}) ``` ## HOST Model ### Create real-time endpoint After training, we use the ``PyTorch`` estimator object to build and deploy a PyTorchPredictor. This creates a Sagemaker Endpoint -- a hosted prediction service that we can use to perform inference. As mentioned above we have implementation of `model_fn` in the monai_skin_cancer.py script that is required. We are going to use default implementations of `input_fn`, `predict_fn`, `output_fn` and `transform_fm` defined in [sagemaker-pytorch-containers](https://github.com/aws/sagemaker-pytorch-containers). The arguments to the deploy function allow us to set the number and type of instances that will be used for the Endpoint. These do not need to be the same as the values we used for the training job. For example, you can train a model on a set of GPU-based instances, and then deploy the Endpoint to a fleet of CPU-based instances, but you need to make sure that you return or save your model as a cpu model similar to what we did in monai_skin_cancer.py. Here we will deploy the model to a single ```ml.m5.xlarge``` instance. ``` predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.m5.xlarge') ``` ### Load Validation Images for Inference ``` from PIL import Image print('Load Test Images for Inference') val_dir = os.path.join(base_dir, 'HAM10000/val_dir') class_names = sorted([x for x in os.listdir(val_dir) if os.path.isdir(os.path.join(val_dir, x))]) num_class = len(class_names) image_files = [[os.path.join(val_dir, class_name, x) for x in os.listdir(os.path.join(val_dir, class_name))[:1]] for class_name in class_names] image_file_list = [] image_label_list = [] for i, class_name in enumerate(class_names): image_file_list.extend(image_files[i]) image_label_list.extend([i] * len(image_files[i])) num_total = len(image_label_list) image_width, image_height = Image.open(image_file_list[0]).size ``` ### MONAI Transform Image using Compose and Skin Cancer Dataset MONAI has transforms that support both Dictionary and Array format and are specialized for the high-dimensionality of medical images. The transforms include several categories such as Crop & Pad, Intensity, IO, Post-processing, Spatial, and Utilities. In the following excerpt, the Compose class chains a series of image transforms together and returns a single tensor of the image. ``` import torch from torch.utils.data import DataLoader from source.skin_cancer_dataset import SkinCancerDataset from monai.transforms import Compose, LoadPNG, Resize, AsChannelFirst, ScaleIntensity, ToTensor val_transforms = Compose([ LoadPNG(image_only=True), AsChannelFirst(channel_dim=2), ScaleIntensity(), Resize(spatial_size=(64,64)), ToTensor() ]) val_ds = SkinCancerDataset(image_file_list, image_label_list, val_transforms) val_loader = DataLoader(val_ds, batch_size=1, num_workers=1) ``` ### Evaluate We can now use the predictor to perform a real-time inference to classify skin cancer images. ``` print('Sample Inference Results By Class:') for i, val_data in enumerate(val_loader): response = predictor.predict(val_data[0]) actual_label = val_data[1] pred = torch.nn.functional.softmax(torch.tensor(response), dim=1) top_p, top_class = torch.topk(pred, 1) print('actual class: '+class_names[actual_label.numpy()[0]]) print('predicted class: '+class_names[top_class]) print('predicted class probablity: '+str(round(top_p.item(),2))) print() ``` ### Remove endpoint (Optional) Delete the prediction endpoint to release the instance(s) hosting the model once finished with example. ``` predictor.delete_endpoint() ```
github_jupyter
# Dictionaries We've been learning about *sequences* in Python but now we're going to switch gears and learn about *mappings* in Python. If you're familiar with other languages you can think of these Dictionaries as hash tables. This section will serve as a brief introduction to dictionaries and consist of: 1.) Constructing a Dictionary 2.) Accessing objects from a dictionary 3.) Nesting Dictionaries 4.) Basic Dictionary Methods So what are mappings? Mappings are a collection of objects that are stored by a *key*, unlike a sequence that stored objects by their relative position. This is an important distinction, since mappings won't retain order since they have objects defined by a key. A Python dictionary consists of a key and then an associated value. That value can be almost any Python object. ## Constructing a Dictionary Let's see how we can construct dictionaries to get a better understanding of how they work! ``` # Make a dictionary with {} and : to signify a key and a value my_dict = {'key1':'value1','key2':'value2'} # Call values by their key my_dict['key2'] ``` Its important to note that dictionaries are very flexible in the data types they can hold. For example: ``` my_dict = {'key1':123,'key2':[12,23,33],'key3':['item0','item1','item2']} # Let's call items from the dictionary my_dict['key3'] # Can call an index on that value my_dict['key3'][0] # Can then even call methods on that value my_dict['key3'][0].upper() ``` We can affect the values of a key as well. For instance: ``` my_dict['key1'] # Subtract 123 from the value my_dict['key1'] = my_dict['key1'] - 123 #Check my_dict['key1'] ``` A quick note, Python has a built-in method of doing a self subtraction or addition (or multiplication or division). We could have also used += or -= for the above statement. For example: ``` # Set the object equal to itself minus 123 my_dict['key1'] -= 123 my_dict['key1'] ``` We can also create keys by assignment. For instance if we started off with an empty dictionary, we could continually add to it: ``` # Create a new dictionary d = {} # Create a new key through assignment d['animal'] = 'Dog' # Can do this with any object d['answer'] = 42 #Show d ``` ## Nesting with Dictionaries Hopefully you're starting to see how powerful Python is with its flexibility of nesting objects and calling methods on them. Let's see a dictionary nested inside a dictionary: ``` # Dictionary nested inside a dictionary nested inside a dictionary d = {'key1':{'nestkey':{'subnestkey':'value'}}} ``` Wow! That's a quite the inception of dictionaries! Let's see how we can grab that value: ``` # Keep calling the keys d['key1']['nestkey']['subnestkey'] ``` ## A few Dictionary Methods There are a few methods we can call on a dictionary. Let's get a quick introduction to a few of them: ``` # Create a typical dictionary d = {'key1':1,'key2':2,'key3':3} # Method to return a list of all keys d.keys() # Method to grab all values d.values() # Method to return tuples of all items (we'll learn about tuples soon) d.items() ``` Hopefully you now have a good basic understanding how to construct dictionaries. There's a lot more to go into here, but we will revisit dictionaries at later time. After this section all you need to know is how to create a dictionary and how to retrieve values from it.
github_jupyter
# Lab 9.1: About Adam with MNIST Classifier **Jonathan Choi 2021** **[Deep Learning By Torch] End to End study scripts of Deep Learning by implementing code practice with Pytorch.** If you have an any issue, please PR below. [[Deep Learning By Torch] - Github @JonyChoi](https://github.com/jonychoi/Deep-Learning-By-Torch) In this lab(9.0 and 9.1), we are going to learn about various optimizers, including SGD(Stochastic Gradient Descent) as we used always, besides about Adam, Adagrad, Momentum, GD, Adadelta, RMSProp etc. We are going to create the neural network using optimizer Adam at the End. Please read script "09.0 About optimizers" to get more understand. ![optimizers](https://cdn-images-1.medium.com/max/2000/1*3mbLR7aSgbg_UoueBymw5g.png) Reference from https://medium.com/octavian-ai/which-optimizer-and-learning-rate-should-i-use-for-deep-learning-5acb418f9b2 ## Imports ``` import torch import torchvision.datasets as datasets import torchvision.transforms as transforms import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import random device = 'cuda' if torch.cuda.is_available() else 'cpu' random.seed(1) torch.manual_seed(1) if device == 'cuda': torch.cuda.manual_seed_all(1) ``` ## Set Hyper Parameters ``` learning_rate = 0.001 training_epochs = 15 batch_size = 100 ``` ## Load MNIST Data ``` mnist_train = datasets.MNIST(root='MNIST_data/', train=True, transform=transforms.ToTensor(), download=True) mnist_test = datasets.MNIST(root='MNIST_data/', train=False, transform=transforms.ToTensor(), download=True) data_loader = torch.utils.data.DataLoader(dataset=mnist_train, shuffle=True, drop_last=True, batch_size = batch_size) ``` ## Model Define ``` class LinearMNISTClassifier(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(784, 10) nn.init.normal_(self.linear.weight) def forward(self, x): return self.linear(x) ``` ## Train with SGD and ADAM ``` def train(data_loader, model, optimizer): #set optimizer if optimizer == "sgd": optimizer = optim.SGD(model.parameters(), lr=learning_rate) else: optimizer = optim.Adam(model.parameters(), lr=learning_rate) total_batch = len(data_loader) for epoch in range(training_epochs): avg_cost = 0 for X, Y in data_loader: #reshape input image into (batchsize x 784) #label is not one-hot encoded X = X.view(-1, 28 * 28).to(device)# Before X.shape = torch.Size([100, 1, 28, 28]) After torch.Size([100, 784]) Y = Y.to(device) #prediction pred = model(X) #cost cost = F.cross_entropy(pred, Y) #Reduce the cost optimizer.zero_grad() cost.backward() optimizer.step() avg_cost += cost / total_batch print('Epoch: {:d}/15, Cost: {:.6f}'.format(epoch+1, avg_cost)) print('Learning Finished') ``` ## Train with SGD ``` model = LinearMNISTClassifier().to(device) train(data_loader, model, 'sgd') #Test the model using test sets with torch.no_grad(): X_test = mnist_test.test_data.view(-1 ,28 * 28).float().to(device) Y_test = mnist_test.test_labels.to(device) pred = model(X_test) correct_prediction = torch.argmax(pred, 1) == Y_test accuracy = correct_prediction.float().mean() print('Accuracy: ', accuracy.item()) #Get one and predict r = random.randint(0, len(mnist_test) - 1) #below X_single_data.shape => torch.size([1, 784]) #X_single_data = mnist_test.test_data[r:r + 1].view(-1, 28 * 28).float().to(device) #Y_single_data = mnist_test.test_labels[r:r + 1].to(device) #X_test[r].shape => torch.size([784]) #X_test[r:r+1].shape => torch.size([1, 784]) #if torch.argmax(single_prediction, 1) => Since just torch.size([784]) makes IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1) #else torch.argmax(single_prediction, 0) => no error with size([784]) X_single_data = X_test[r] Y_single_data = Y_test[r] print('Label: ', Y_single_data.item()) single_prediction = model(X_single_data) print('Prediction: ', torch.argmax(single_prediction, 0).item()) ``` ## Train with ADAM ``` model = LinearMNISTClassifier().to(device) train(data_loader, model, 'adam') #Test the model using test sets with torch.no_grad(): X_test = mnist_test.data.view(-1 ,28 * 28).float().to(device) Y_test = mnist_test.targets.to(device) pred = model(X_test) correct_prediction = torch.argmax(pred, 1) == Y_test accuracy = correct_prediction.float().mean() print('Accuracy: ', accuracy.item()) #Get one and predict r = random.randint(0, len(mnist_test) - 1) print(mnist_test.data[r: r+1].view(-1, 28 * 28).shape, X_test[r: r+1].shape) #below X_single_data.shape => torch.size([1, 784]) #X_single_data = mnist_test.test_data[r:r + 1].view(-1, 28 * 28).float().to(device) #Y_single_data = mnist_test.test_labels[r:r + 1].to(device) #X_test[r].shape => torch.size([784]) #X_test[r:r+1].shape => torch.size([1, 784]) #if torch.argmax(single_prediction, 1) => Since just torch.size([784]) makes IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1) #else torch.argmax(single_prediction, 0) => no error with size([784]) X_single_data = X_test[r] Y_single_data = Y_test[r] print('Label: ', Y_single_data.item()) single_prediction = model(X_single_data) print('Prediction: ', torch.argmax(single_prediction, 0).item()) ```
github_jupyter
# Using Spark from Livy end point The samples demostrates using spark service via the livy end point. The sample uses the python request library to access the livy interface. Livy API details are available at https://livy.incubator.apache.org/docs/latest/rest-api.html ** Note : The image here may not be visible dues to markdown bug. Please change path here to full path to view the image. <img src = "./spark_from_livy.jpg" style="float: center;" alt="drawing" width="900"> ## Using python requests library to access restful APIs ``` import json, pprint, requests, textwrap from requests.auth import HTTPBasicAuth print("Get username and email ") print("---------------------- ") #Basic test for request import requests r = requests.get('https://jsonplaceholder.typicode.com/users') r.encoding = 'utf-8' ret_json = r.json() for user in ret_json: print(user['username'] , user['email']) #print(r.json()) ``` ## Livy APIs to fetch running sessions and sesssion state ``` import json, pprint, requests, textwrap from requests.auth import HTTPBasicAuth #Diabling exception to avoid the https verificatin warning import requests.packages.urllib3 as urllib3 import urllib3.exceptions as urllib3_exceptions urllib3.disable_warnings(urllib3_exceptions.InsecureRequestWarning) #Change host per your confirgration host = "https://<ip>:<port>/gateway/default/livy/v1" #Livy interface as per https://livy.incubator.apache.org/docs/latest/rest-api.html#session # Construct Request - Get a list of current spark sessions. sessions_url = host + "/sessions" # Common headers for all requests. # Auth header auth = HTTPBasicAuth("***", "***") # Content Type headers = {'Content-Type': 'application/json'} data = { 'from': 0, 'size': 10 } r = requests.get(sessions_url, data=json.dumps(data), headers=headers, auth=auth, verify=False) response_body = r.json() print("Sessions fetched starting ", response_body['from']) print("Number of session fetched", response_body['total']) session_list = response_body['sessions'] for session in session_list: print("The session {0} has a state {1} ".format(session['id'],session['state'])) ``` ## Create a new Spark session and query state ``` #Create a new Spark session data = { 'kind':'pyspark' } r = requests.post(sessions_url, data=json.dumps(data), headers=headers, auth=auth, verify=False) response_body = r.json() session_id = response_body['id'] session_state = response_body['state'] print("Spark session {0} created. Current state is {1}".format(session_id,session_state)) created_session_url = r.headers['location'] #Query information about the session we just created this_session_url = host + created_session_url print("this_session_url", this_session_url) r = requests.get(this_session_url, headers=headers,auth=auth, verify=False) pprint.pprint(r.json()) ``` ## Execute code interactively Spark session and check results ``` #Execute code interactively in this session using session/<ID>/statements interface statements_url = this_session_url + "/statements" pprint.pprint(statements_url) data = { 'code': "11+11" } r = requests.post(statements_url, data=json.dumps(data), headers=headers, auth=auth, verify=False) print("Respone status code" ,r.status_code ) print("Response received is ",r.json()) print("Poll URI is ",r.headers['location']) #response_body = r.json() # Wait a while before executing this cell. Spark session start up takes time to run your code. specific_statement = host + r.headers['location'] print("monitoring url is ", specific_statement) r = requests.get(specific_statement, headers=headers, auth=auth, verify=False) print("Response status is ",r.status_code) print("Response received is ", pprint.pprint(r.json())) ``` ## Execute code in batch ``` #Execute code in batch #The following uses pi.py from spark source. Please get that file and transer to HDFS /jar folder. batch_url = host + "/batches" print("batch_url", batch_url) data = { 'file' : '/jar/pi.py' } r = requests.post(batch_url, data=json.dumps(data), headers=headers, auth=auth, verify=False) returned_batch_url = r.headers['location'] print("Respone status code" , r.status_code) print("Poll URI is ",returned_batch_url ) print("Response is ", pprint.pprint(r.json())) #Check results of executed code specific_batch = host + returned_batch_url print("specific batch request ",specific_batch) r = requests.get(specific_batch,headers=headers, auth=auth, verify = False) print("Response status is ",r.status_code) print("Response received is ", pprint.pprint(r.json())) ```
github_jupyter
<div> <span style="float: left; width: 33%; text-align: left;"><a href="2.1.System-Topology.ipynb">Previous Notebook</a></span> <span style="float: left; width: 33%; text-align: center;"> <a href="../Start_Here.ipynb">Home Page</a> </span> </div> # Getting started with DLProf **Contents of this notebook:** - [Introduction to DLProf](#Introduction-to-DLProf) - [Profiling using DLProf](#Profiling-using-DLProf) - [Visualising profiles using DLProfviewer](#Visualising-profiles-using-DLProfviewer) - [Improving throughput using DLProf Expert system](#Improving-throughput-using-DLProf-Expert-system) **By the End of this Notebook you will:** - Learn the basics of `dlProf` - Learn how to profile using `dlprof` - Learn to visualise profile output using `dlprofviewer` ## Introduction to DLProf NVIDIA Deep Learning Profiler is a tool built to help data scientists understand and improve the performance of their models. It is built on top of the NVIDIA NSight Systems tools to collect the profiling data and aggregate them into the format that can be best consumable by Deep Leaning researchers, data scientists and engineers. <center><img src="images/DlProf_layers.png" width="400"/></center> DLProf was created with the following in mind : - Deep learning profiling for correlating to model layer and iterations. - Framework support built it. - A method to quickly visualise the profile in an intuitive manner to understand different aspects of the deep learning system. - Providing high-level information regarding different aspects of the DL workload. Let us now begin profiling using DLProf. ## Profiling using DLProf Let us now begin by running the `dlprof` command , let us now profile the application we used in Introduction to Distributed Deep learning notebook. **Note** : For the first run , we need to run `dlprof ` for a shorter duration so that we can define iterations called as `key node` which we will later pass to `dlprof` as a parameter to define iterations in the training process for a complete run. ``` !TF_CPP_MIN_LOG_LEVEL=3 dlprof --mode=tensorflow2 --reports=detail --delay=5 --duration=30 --output_path="Profile/Prof1" horovodrun -np 1 python3 ../source_code/N1/cnn_fmnist.py --batch-size=2048 ``` Let us understand the parameters from above : - `--mode=tensorflow2` : This sets the target framework to generate detailed metrics and reports specific to the framework. - `--reports=details` : This selects the aggregated report(s) to generate.We use `details` so that we can identify the operation to define an iteration. - `--delay=15dlprofviewer -p 8000 /Profile/Prof1/dlprof_dldb.sqlite` : Collection start delay in seconds , we set this to a positive value considering dataset download and processing time. - `--duration=30` : Collection duration in seconds. - `--output_path=/Profile/Prof1` : Setting an output path to store the profile output. With that let us now visualise the results of the profile. ## Visualising profiles using DLProfviewer Launch a Terminal session by clicking on `File` $\rightarrow$ `New` $\rightarrow$ `Terminal` <center><img src="images/open_terminal.png" /></center> and run the following command to launch the `dlprofviewer` server with the port `8000` . Kindly change it to a port that you will be using. ```bash dlprofviewer -b 0.0.0.0 -p 8000 /Profile/Prof1/dlprof_dldb.sqlite ``` You should now have a `dlprofviewer` server running on the port specific. Open a new tab in your browser and navigate to `127.0.0.1:8000` to access the `dlprofviewer` application. You need to change the port number here to the one you specified while launching the server. You should be seeing the following page , this is called the DLProf Dashboard. The Dashboard view provides a high level summary of the performance results in a panelized view. This view serves as a starting point in analyzing the results and provides several key metrics. **Note** : If you are not able to access the DLProf dashboard , kindly verify if you have access to port and verify if the port number forwarded matches the port dlprofviewer server is running on. ![dlprofviewer](images/dlprofviewer.png) Let us now focus on the Dashboard and understand what the differnet panels in the Dashboard are for. - **GPU Utilization Chart**: Shows the percentage of the wall clock time that the GPU is active. For multi-gpu, it is an average utilization across all GPUs - **Op GPU Time Chart**: Splits all operations into 3 categories: Operations that used tensor cores, operations that were eligible to use tensor cores but didn't, and operations that were ineligible to use tensor cores - **Kernel GPU Time Chart**: Breaks down all kernel time into 3 categories: Kernels that used tensor cores, memory kernels, and all other kernels - **Tensor Core Kernel Efficiency Chart**: Gives a single number that measures what percentage of GPU time inside of TC-eligible ops are using tensor cores. - **Performance summary**: A straightforward panel that shows all of the key metrics from the run in one place - **Iteration Summary**: A bar chart that shows the amount of time each iteration took during the run. The colored bars are the ones that were used to generate all of the statistics, while the gray bars are iterations that were outside the selected range. Each colored bar shows the breakdown of iteration time into GPU using TC, GPU not using TC, and all other non-GPU time. - **Top 10 GPU Ops**: Shows the top 10 operations in the run sorted by the amount of GPU time they took. This is a great starting point for trying to find potential for improvements - **System Config**: Shows the system configuration for the run. - **Expert Systems Recommendations**: Shows any potential problems that DLProf found and recommendations for how to fix them. - **Guidance Panel**: Provides some helpful links to learn more about GPU utilization and performance improvements Let us now look at some more details provided by the DLProf Viewer **Op Type Summary** : This page contains tables that aggregates metrics over all op types and enables users to see the performance of all the ops in terms of its types, such as Convolutions, Matrix Multiplications, etc. ![Op-type-summary](images/dlprof_ops_n.png) In the above image we can notice the tabular data is sorted by the time taken by the GPU for every operation. This allows us to understand the number of times an operation is called and the time taken by them , this will be used in the System Topology notebook to differentiate between the different types of GPU-GPU connectivity. **Ops and Kernels** : This view enables users to view, search, sort all ops and their corresponding kernels in the entire network. ![Ops_kernels](images/dlprof_ops_ker.png) We will look into the remaining tabs in the following section. Let us now profile again with `key_node` parameter , remember the `key_node` parameters is used to define a iteration , so we need to look for an operation in the **Ops and Kernels Summary** tab that occurs at every iteration. Here , let us choose the loss function operation name as `key_node` as we are aware this is calculated at the end of every iteration. ![Keynode](images/dlprof_keynode.png) Let us now add this parameter to profile our deep learning workload. ```bash --key_node=sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits ``` ``` !TF_CPP_MIN_LOG_LEVEL=3 dlprof --mode=tensorflow2 --key_node=sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits --output_path="Profile/Prof2" horovodrun -np 1 python3 ../source_code/N1/cnn_fmnist.py --batch-size=2048 ``` Close the already running `dlprofviewer` server and run it again with the latest profile. ```bash dlprofviewer -b 0.0.0.0 -p 8000 Profile/Prof2/dlprof_dldb.sqlite ``` We again come across the Dashboard , but this time we will be having a different Dashboard compared the the previous one as we have added the `key_node` parameter thus defining an iteration. This allows us to compare multiple parameters between different iterations. Here's a short brief on the remaining tabs that utilise the `key_node` parameter to display information tagged with iterations : - **Kernels by Iteration** : The Kernels by Iterations view shows operations and their execution time for every iteration. At a glance, you can compare iterations with respect to time as well as Kernels executed. - **Kernels by Op** : The Kernels by Op view is a variation of the Kernels by Iterations view. It has the capability to filter the list of kernels by iterations and op. - **Iterations** : This view displays iterations visually. Users can quickly see how many iterations are in the model, the iterations that were aggregated/profiled, and the accumulated durations of tensor core kernels in each iteration. Here is an example of the iterations tab where we have access to information specific to each iteration of training : <center><img src="images/dlprof_iters.png"/></center> The final tab give us the summary of GPU Utilisation : - **GPUs** : This view shows the utilization of all GPUs during the profile run. <center><img src="images/dlprof_gpus.png"/></center> Now that we understand the types of information that DLProf provides us with , let us now take a look on how to improve our throughput using DLProf. ## Improving throughput using DLProf Expert system Until now we understand the amount of information made available to us via DLProf , but for an user trying to optimize their model and make use of new techniques, this information would not be straighforward , in that case the Expert Systems Recommendations is very helpful to find potential problems and recommendations for how to fix them. Let us take a closer look from the above profile. <center><img src="images/dlprof_expert.png"/></center> Now that we have learnt the basics of DLProf and how to improve throughput using the DLProf expert systems.Let us now go back to the System topology notebook to use DLProf to understand the difference in communication times taken in different cases. *** ## Licensing This material is released by OpenACC-Standard.org, in collaboration with NVIDIA Corporation, under the Creative Commons Attribution 4.0 International (CC BY 4.0). <div> <span style="float: left; width: 33%; text-align: left;"><a href="2.1.System-Topology.ipynb">Previous Notebook</a></span> <span style="float: left; width: 33%; text-align: center;"> <a href="../Start_Here.ipynb">Home Page</a> </span> </div>
github_jupyter
### A simple chatbot. In this notebook we are going to create a simple chatbot sith the dataset that comes from [Cornell_Movie-Dialogs_Corpus](https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html) in pytorch. I will be loading this dataset that I've downloaded from my google drive since i'm working on google colab. ### Mounting the drive ``` from google.colab import drive drive.mount('/content/drive') ``` ### Imports ``` from __future__ import absolute_import, division, print_function, unicode_literals import torch from torch import nn from torch.nn import functional as F from torch.jit import script, trace import csv, random, re, os, codecs, math, time, itertools, unicodedata torch.__version__ ``` ### Device ``` device = torch.device("cuda" if torch.cuda.is_available() else 'cpu') device ``` ### Loading and processing data. The [Cornell Movie-Dialogs Corpus](https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html) is a rich dataset of movie character dialog: * 220,579 conversational exchanges between 10,292 pairs of movie characters * 9,035 characters from 617 movies * 304,713 total utterances ### extracting the dataset ``` path_to_zip_file = '/content/drive/My Drive/NLP Data/cornell_movie_dialogs_corpus/cornell_movie_dialogs_corpus.zip' os.path.exists(path_to_zip_file) path_to_data = '/content/drive/My Drive/NLP Data/cornell_movie_dialogs_corpus/data' # import zipfile # with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref: # zip_ref.extractall(path_to_data) ``` First, we’ll take a look at some lines of our datafile to see the original format. ``` corpus_name = "cornell movie-dialogs corpus" corpus = os.path.join(path_to_data, corpus_name) def print_lines(file, n=10): with open(file, 'rb') as datafile: lines = datafile.readlines() for line in lines[:n]: print(line) print_lines(os.path.join(corpus, "movie_lines.txt")) ``` ### Creating a formated data file. We are goig to create a formatted data file where each sentence and response pair will be tab-separated. ``` def load_lines(filename, fields): """ splits each line of the file into a dictionary of fields (lineID, characterID, movieID, character, text) """ lines ={} with open(filename, "r", encoding="iso-8859-1") as f: for line in f: values = line.split(" +++$+++ ") obj = {} for i, field in enumerate(fields): obj[field] = values[i] lines[obj['lineID']] = obj return lines def load_conversations(filename, lines, fields): """ groups fields of lines from loadLines into conversations based on movie_conversations.txt """ conversations =[] with open(filename, 'r', encoding='iso-8859-1') as f: for line in f: values = line.split(" +++$+++ ") # Extract fields convObj = {} for i, field in enumerate(fields): convObj[field] = values[i] # Convert string to list (convObj["utteranceIDs"] == "['L598485', 'L598486', ...]") utterance_id_pattern = re.compile('L[0-9]+') lineIds = utterance_id_pattern.findall(convObj["utteranceIDs"]) # Reassemble lines convObj["lines"] = [] for lineId in lineIds: convObj["lines"].append(lines[lineId]) conversations.append(convObj) return conversations # Extracts pairs of sentences from conversations def extract_sentence_pairs(conversations): qa_pairs = [] for conversation in conversations: # Iterate over all the lines of the conversation for i in range(len(conversation["lines"]) - 1): # We ignore the last line (no answer for it) input_line = conversation["lines"][i]["text"].strip() target_line = conversation["lines"][i+1]["text"].strip() # Filter wrong samples (if one of the lists is empty) if input_line and target_line: qa_pairs.append([input_line, target_line]) return qa_pairs ``` > Next we are going to save the formated movies line pairs delimited by a tab as `formatted_movie_lines.txt` ``` datafile = os.path.join(corpus, "formatted_movie_lines.txt") lines = {} conversations = [] MOVIE_LINES_FIELDS = ["lineID", "characterID", "movieID", "character", "text"] MOVIE_CONVERSATIONS_FIELDS = ["character1ID", "character2ID", "movieID", "utteranceIDs"] # Load lines and process conversations print("\nProcessing corpus...") lines = load_lines(os.path.join(corpus, "movie_lines.txt"), MOVIE_LINES_FIELDS) print("\nLoading conversations...") conversations = load_conversations(os.path.join(corpus, "movie_conversations.txt"), lines, MOVIE_CONVERSATIONS_FIELDS) conversations[0] ``` writting to a `csv` file. ``` # Write new csv file delimiter = '\t' # Unescape the delimiter delimiter = str(codecs.decode(delimiter, "unicode_escape")) print("\nWriting newly formatted file...") with open(datafile, 'w', encoding='utf-8') as outputfile: writer = csv.writer(outputfile, delimiter=delimiter, lineterminator='\n') for pair in extract_sentence_pairs(conversations): writer.writerow(pair) print("\nSample lines from file:") print_lines(datafile) ``` ### Loading and trimming data. We aare going to create the `Voc` class which keeps a mapping from words to indexes, a reverse mapping of indices to words, a count of each word and a total word count. The class provides methods for adding a word to the vocabulary (`add_word`), adding all words in a sentence ``(add_sentence)`` and trimming infrequently seen words (`trim`). More on trimming later. ``` # Default word tokens PAD_token = 0 # Used for padding short sentences SOS_token = 1 # Start-of-sentence token EOS_token = 2 # End-of-sentence token class Voc: def __init__(self, name): self.name = name self.trimmed = False self.word2index = {} self.word2count = {} self.index2word = {PAD_token: "<pad>", SOS_token: "<sos>", EOS_token:"<eos>"} self.num_words = 3 # Count SOS, EOS, PAD def add_sentence(self, sentence): for word in sentence.split(" "): self.add_word(word) def add_word(self, word): if word not in self.word2index: self.word2index[word] = self.num_words self.word2count[word] = 1 self.index2word[self.num_words] = word self.num_words += 1 else: self.word2count[word] += 1 def trim(self, min_count): if self.trimmed: return self.trimmed = True keep_words = [] for k, v in self.word2count.items(): if v >= min_count: keep_words.append(k) print('keep_words {} / {} = {:.4f}'.format( len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index) )) # Reinitialize dictionaries self.word2index = {} self.word2count = {} self.index2word = {PAD_token: "PAD", SOS_token: "SOS", EOS_token: "EOS"} self.num_words = 3 # Count default tokens for word in keep_words: self.add_word(word) ``` Before we can use this dataset, we need to persome some preprocessing on it. ``` MAX_LENGTH = 10 # Maximum sentence length to consider def unicode_to_ascii(s): # convert unicodes to ascii characters return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' ) def normalize_string(s): s = unicode_to_ascii(s.lower().strip()) s = re.sub(r"([.!?])", r" \1", s) s = re.sub(r"[^a-zA-Z.!?]+", r" ", s) s = re.sub(r"\s+", r" ", s).strip() return s def read_vocs(datafile, corpus_name): # Read query/response pairs and return a voc object lines = open(datafile, encoding='utf-8').read().strip().split("\n") pairs = [[normalize_string(s) for s in l.split('\t')] for l in lines] voc = Voc(corpus_name) return voc, pairs def filter_pair(p): return len(p[0].split(' ')) < MAX_LENGTH and len(p[1].split(' ')) < MAX_LENGTH def filter_pairs(pairs): return [pair for pair in pairs if filter_pair(pair)] def load_prepare_data(corpus, corpus_name, datafile, save_dir): print("Start preparing training data ...") voc, pairs = read_vocs(datafile, corpus_name) print("Read {!s} sentence pairs".format(len(pairs))) pairs = filter_pairs(pairs) print("Trimmed to {!s} sentence pairs".format(len(pairs))) print("Counting words...") for pair in pairs: voc.add_sentence(pair[0]) voc.add_sentence(pair[1]) print("Counted words:", voc.num_words) return voc, pairs # Load/Assemble voc and pairs save_dir = os.path.join(path_to_data, "save") voc, pairs = load_prepare_data(corpus, corpus_name, datafile, save_dir) # Print some pairs to validate print("\npairs:") for pair in pairs[:10]: print(pair) ``` Another tactic that is beneficial to achieving faster convergence during training is trimming rarely used words out of our vocabulary. Decreasing the feature space will also soften the difficulty of the function that the model must learn to approximate. We will do this as a two-step process: 1. Trim words used under MIN_COUNT threshold using the voc.trim function. 2. Filter out pairs with trimmed words. ``` MIN_COUNT = 3 # Minimum word count threshold for trimming def trim_rare_words(voc, pairs, MIN_COUNT): # Trim words used under the MIN_COUNT from the voc voc.trim(MIN_COUNT) # Filter out pairs with trimmed words keep_pairs = [] for pair in pairs: input_sentence = pair[0] output_sentence = pair[1] keep_input = True keep_output = True # Check input sentence for word in input_sentence.split(' '): if word not in voc.word2index: keep_input = False break # Check output sentence for word in output_sentence.split(' '): if word not in voc.word2index: keep_output = False break # Only keep pairs that do not contain trimmed word(s) in their input or output sentence if keep_input and keep_output: keep_pairs.append(pair) print("Trimmed from {} pairs to {}, {:.4f} of total".format(len(pairs), len(keep_pairs), len(keep_pairs) / len(pairs))) return keep_pairs # Trim voc and pairs pairs = trim_rare_words(voc, pairs, MIN_COUNT) pairs[:3] ``` ### Data preparation for the model. We are then going to convert or sentences int tensors so that the network can be able to train on them since neural networks only understand numbers. The `input_var` function handles the process of converting sentences to tensor, ultimately creating a correctly shaped zero-padded tensor. It also returns a tensor of `lengths` for each of the sequences in the batch which will be passed to our decoder later. The `output_var` function performs a similar function to `input_var`, but instead of returning a lengths tensor, it returns a binary mask tensor and a maximum target sentence length. The binary mask tensor has the same shape as the output target tensor, but every element that is a `PAD_token` is 0 and all others are 1. `batch_2_train_data `simply takes a bunch of pairs and returns the input and target tensors using the aforementioned functions. ``` def indexes_from_sentence(voc, sentence): return [voc.word2index[word] for word in sentence.split(" ")] + [EOS_token] def zero_padding(l, fillvalue=PAD_token): return list(itertools.zip_longest(*l, fillvalue=fillvalue)) def binary_matrix(l, value=PAD_token): m = [] for i, seq in enumerate(l): m.append([]) for token in seq: if token == PAD_token: m[i].append(0) else: m[i].append(1) return m def input_var(l, voc): # Returns padded input sequence tensor and lengths indexes_batch = [indexes_from_sentence(voc, sentence) for sentence in l] lengths = torch.tensor([len(indexes) for indexes in indexes_batch]) pad_list = zero_padding(indexes_batch) pad_var = torch.LongTensor(pad_list) return pad_var, lengths def output_var(l, voc): # Returns padded target sequence tensor, padding mask, and max target length indexes_batch = [indexes_from_sentence(voc, sentence) for sentence in l] max_target_len = max([len(indexes) for indexes in indexes_batch]) pad_list = zero_padding(indexes_batch) mask = binary_matrix(pad_list) mask = torch.BoolTensor(mask) pad_var = torch.LongTensor(pad_list) return pad_var, mask, max_target_len def batch_2_train_data(voc, pair_batch): # Returns all items for a given batch of pairs pair_batch.sort(key=lambda x: len(x[0].split(" ")), reverse=True) input_batch, output_batch = [], [] for pair in pair_batch: input_batch.append(pair[0]) output_batch.append(pair[1]) inp, lengths = input_var(input_batch, voc) output, mask, max_target_len = output_var(output_batch, voc) return inp, lengths, output, mask, max_target_len # Example for validation small_batch_size = 5 batches = batch_2_train_data(voc, [random.choice(pairs) for _ in range(small_batch_size)]) input_variable, lengths, target_variable, mask, max_target_len = batches print("input_variable:", input_variable) print("lengths:", lengths) print("target_variable:", target_variable) print("mask:", mask) print("max_target_len:", max_target_len) ``` ### Defining models ### Seq2Seq Model The brains of our chatbot is a sequence-to-sequence (seq2seq) model. The goal of a seq2seq model is to take a variable-length sequence as an input, and return a variable-length sequence as an output using a fixed-sized model. ![img](https://pytorch.org/tutorials/_images/seq2seq_ts.png) ### Encoder > The encoder RNN iterates through the input sentence one token (e.g. word) at a time, at each time step outputting an “output” vector and a “hidden state” vector. The hidden state vector is then passed to the next time step, while the output vector is recorded. The encoder transforms the context it saw at each point in the sequence into a set of points in a high-dimensional space, which the decoder will use to generate a meaningful output for the given task. At the heart of our encoder is a multi-layered Gated Recurrent Unit. We will use a bidirectional variant of the GRU, meaning that there are essentially two independent RNNs: one that is fed the input sequence in normal sequential order, and one that is fed the input sequence in reverse order. The outputs of each network are summed at each time step. Using a bidirectional GRU will give us the advantage of encoding both past and future contexts. Bidirectional RNN. ![img](https://pytorch.org/tutorials/_images/RNN-bidirectional.png) **Note** that an embedding layer is used to encode our word indices in an arbitrarily sized feature space. For our models, this layer will map each word to a feature space of size `hidden_size`. When trained, these values should encode semantic similarity between similar meaning words. Finally, if passing a padded batch of sequences to an RNN module, we must pack and unpack padding around the RNN pass using `nn.utils.rnn.pack_padded_sequence` and `nn.utils.rnn.pad_packed_sequence` respectively. ### Computation Graph: * Convert word indexes to embeddings. * Pack padded batch of sequences for RNN module. * Forward pass through GRU. * Unpack padding. * Sum bidirectional GRU outputs. * Return output and final hidden state. ### Inputs * `input_seq`: batch of input sentences; shape=`(max_length, batch_size)` * `input_lengths`: list of sentence lengths corresponding to each sentence in the batch; shape=`(batch_size)` * `hidden`: hidden state; shape=`(n_layers x num_directions, batch_size, hidden_size)` ### Ouputs * `outputs`: output features from the last hidden layer of the GRU (sum of bidirectional outputs); shape=`(max_length, batch_size, hidden_size)` * `hidden`: updated hidden state from GRU; shape=`(n_layers x num_directions, batch_size, hidden_size)` ``` class Encoder(nn.Module): def __init__(self, hidden_size, embedding, n_layers=1, dropout=.5, bidirectional = True): super(Encoder, self).__init__() self.n_layers = n_layers self.hidden_size = hidden_size self.embedding = embedding self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=(0 if n_layers==1 else dropout), bidirectional=bidirectional) def forward(self, input_seq, input_lengths, hidden=None): embedded = self.embedding(input_seq) packed = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths) outputs, hidden = self.gru(packed, hidden) outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs) outputs = outputs[:, :, :self.hidden_size] + outputs[:, : ,self.hidden_size:] return outputs, hidden ``` ### Decoder The decoder RNN generates the response sentence in a token-by-token fashion. It uses the encoder’s context vectors, and internal hidden states to generate the next word in the sequence. It continues generating words until it outputs an EOS_token, representing the end of the sentence. A common problem with a vanilla seq2seq decoder is that if we rely solely on the context vector to encode the entire input sequence’s meaning, it is likely that we will have information loss. This is especially the case when dealing with long input sequences, greatly limiting the capability of our decoder. To combat this, Bahdanau et al. created an “attention mechanism” that allows the decoder to pay attention to certain parts of the input sequence, rather than using the entire fixed context at every step. At a high level, attention is calculated using the decoder’s current hidden state and the encoder’s outputs. The output attention weights have the same shape as the input sequence, allowing us to multiply them by the encoder outputs, giving us a weighted sum which indicates the parts of encoder output to pay attention to. Sean Robertson’s figure describes this very well: ![img](https://pytorch.org/tutorials/_images/attn2.png) Luong et al. improved upon Bahdanau et al.’s groundwork by creating “Global attention”. The key difference is that with “Global attention”, we consider all of the encoder’s hidden states, as opposed to Bahdanau et al.’s “Local attention”, which only considers the encoder’s hidden state from the current time step. Another difference is that with “Global attention”, we calculate attention weights, or energies, using the hidden state of the decoder from the current time step only. Bahdanau et al.’s attention calculation requires knowledge of the decoder’s state from the previous time step. Also, Luong et al. provides various methods to calculate the attention energies between the encoder output and decoder output which are called “score functions”: ![img](https://pytorch.org/tutorials/_images/scores.png) where $h_t$ = current target decoder state and $\bar{h}_s$ = all encoder states. Overall, the Global attention mechanism can be summarized by the following figure. Note that we will implement the “Attention Layer” as a separate nn.Module called Attention. The output of this module is a `softmax` normalized weights tensor of shape (`batch_size, 1, max_length`). ![img](https://pytorch.org/tutorials/_images/global_attn.png) ``` class Attention(nn.Module): def __init__(self, method, hidden_size): super(Attention, self).__init__() self.method = method if self.method not in ['dot', 'general', 'concat']: raise ValueError(self.method, "is not an appropriate attention method.") self.hidden_size = hidden_size if self.method == 'general': self.attn = nn.Linear(self.hidden_size, hidden_size) elif self.method == 'concat': self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.v = nn.Parameter(torch.FloatTensor(hidden_size)) def dot_score(self, hidden, encoder_output): return torch.sum(hidden * encoder_output, dim=2) def general_score(self, hidden, encoder_output): energy = self.attn(encoder_output) return torch.sum(hidden * energy, dim=2) def concat_score(self, hidden, encoder_output): energy = self.attn(torch.cat((hidden.expand(encoder_output.size(0), -1, -1), encoder_output), 2)).tanh() return torch.sum(self.v * energy, dim=2) def forward(self, hidden, encoder_outputs): # Calculate the attention weights (energies) based on the given method if self.method == 'general': attn_energies = self.general_score(hidden, encoder_outputs) elif self.method == 'concat': attn_energies = self.concat_score(hidden, encoder_outputs) elif self.method == 'dot': attn_energies = self.dot_score(hidden, encoder_outputs) # Transpose max_length and batch_size dimensions attn_energies = attn_energies.t() # Return the softmax normalized probability scores (with added dimension) return F.softmax(attn_energies, dim=1).unsqueeze(1) ``` Now that we have defined our attention submodule, we can implement the actual decoder model. For the decoder, we will manually feed our batch one time step at a time. This means that our embedded word tensor and GRU output will both have shape `(1, batch_size, hidden_size)`. ### Computational Graph 1. Get embedding of current input word. 2. Forward through unidirectional GRU. 3. Calculate attention weights from the current GRU output from (2). 4. Multiply attention weights to encoder outputs to get new “weighted sum” context vector. 4. Concatenate weighted context vector and GRU output using Luong eq. 5. 6. Predict next word using Luong eq. 6 (without softmax). 7. Return output and final hidden state. ### Inputs: * input_step: one time step (one word) of input sequence batch; shape=(`1, batch_size`) * last_hidden: final hidden layer of GRU; shape=(`n_layers x num_directions, batch_size, hidden_size`) * encoder_outputs: encoder model’s output; shape=(`max_length, batch_size, hidden_size`) ### Outputs: * output: softmax normalized tensor giving probabilities of each word being the correct next word in the decoded sequence; shape=(`batch_size, voc.num_words`) * hidden: final hidden state of GRU; shape=(`n_layers x num_directions, batch_size, hidden_size`) ``` class Decoder(nn.Module): def __init__(self, attn_model, embedding, hidden_size, output_size, n_layers=1, dropout=0.1): super(Decoder, self).__init__() # Keep for reference self.attn_model = attn_model self.hidden_size = hidden_size self.output_size = output_size self.n_layers = n_layers self.dropout = dropout # Define layers self.embedding = embedding self.embedding_dropout = nn.Dropout(dropout) self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=(0 if n_layers == 1 else dropout)) self.concat = nn.Linear(hidden_size * 2, hidden_size) self.out = nn.Linear(hidden_size, output_size) self.attn = Attention(attn_model, hidden_size) def forward(self, input_step, last_hidden, encoder_outputs): # Note: we run this one step (word) at a time # Get embedding of current input word embedded = self.embedding(input_step) embedded = self.embedding_dropout(embedded) # Forward through unidirectional GRU rnn_output, hidden = self.gru(embedded, last_hidden) # Calculate attention weights from the current GRU output attn_weights = self.attn(rnn_output, encoder_outputs) # Multiply attention weights to encoder outputs to get new "weighted sum" context vector context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) # Concatenate weighted context vector and GRU output using Luong eq. 5 rnn_output = rnn_output.squeeze(0) context = context.squeeze(1) concat_input = torch.cat((rnn_output, context), 1) concat_output = torch.tanh(self.concat(concat_input)) # Predict next word using Luong eq. 6 output = self.out(concat_output) output = F.softmax(output, dim=1) # Return output and final hidden state return output, hidden ``` ### Define training Procedure 1. masked loss Since we are dealing with batches of padded sequences, we cannot simply consider all elements of the tensor when calculating loss. We define `maskNLLLoss` to calculate our loss based on our decoder’s output tensor, the target tensor, and a binary mask tensor describing the padding of the target tensor. This loss function calculates the average negative log likelihood of the elements that correspond to a 1 in the mask tensor. ``` def maskNLLLoss(inp, target, mask): nTotal = mask.sum() crossEntropy = -torch.log(torch.gather(inp, 1, target.view(-1, 1)).squeeze(1)) loss = crossEntropy.masked_select(mask).mean() loss = loss.to(device) return loss, nTotal.item() ``` ### Single training iteration The train function contains the algorithm for a single training iteration (a single batch of inputs). We will use a couple of clever tricks to aid in convergence: The first trick is using **teacher forcing**. This means that at some probability, set by `teacher_forcing_ratio`, we use the current target word as the decoder’s next input rather than using the decoder’s current guess. This technique acts as training wheels for the decoder, aiding in more efficient training. However, teacher forcing can lead to model instability during inference, as the decoder may not have a sufficient chance to truly craft its own output sequences during training. Thus, we must be mindful of how we are setting the `teacher_forcing_ratio`, and not be fooled by fast convergence. * The second trick that we implement is **gradient clipping**. This is a commonly used technique for countering the “exploding gradient” problem. In essence, by clipping or thresholding gradients to a maximum value, we prevent the gradients from growing exponentially and either overflow (NaN), or overshoot steep cliffs in the cost function. ![img](https://pytorch.org/tutorials/_images/grad_clip.png) ### Sequence operations 1. Forward pass entire input batch through encoder. 2. Initialize decoder inputs as SOS_token, and hidden state as the encoder’s final hidden state. 3. Forward input batch sequence through decoder one time step at a time. 4. If teacher forcing: set next decoder input as the current target; else: set next decoder input as current decoder output. 5. Calculate and accumulate loss. 6. Perform backpropagation. 7. Clip gradients. 8. Update encoder and decoder model parameters. ``` def train(input_variable, lengths, target_variable, mask, max_target_len, encoder, decoder, embedding, encoder_optimizer, decoder_optimizer, batch_size, clip, max_length=MAX_LENGTH): encoder_optimizer.zero_grad() decoder_optimizer.zero_grad() input_variable = input_variable.to(device) target_variable = target_variable.to(device) mask = mask.to(device) lengths = lengths.to("cpu") loss = 0 print_losses = [] n_totals = 0 encoder_outputs, encoder_hidden = encoder(input_variable, lengths) use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False if use_teacher_forcing: for t in range(max_target_len): decoder_output, decoder_hidden = decoder( decoder_input, decoder_hidden, encoder_outputs ) # Teacher forcing: next input is current target decoder_input = target_variable[t].view(1, -1) # Calculate and accumulate loss mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t]) loss += mask_loss print_losses.append(mask_loss.item() * nTotal) n_totals += nTotal else: for t in range(max_target_len): decoder_output, decoder_hidden = decoder( decoder_input, decoder_hidden, encoder_outputs ) # No teacher forcing: next input is decoder's own current output _, topi = decoder_output.topk(1) decoder_input = torch.LongTensor([[topi[i][0] for i in range(batch_size)]]) decoder_input = decoder_input.to(device) # Calculate and accumulate loss mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t]) loss += mask_loss print_losses.append(mask_loss.item() * nTotal) n_totals += nTotal loss.backward() # Clip gradients: gradients are modified in place _ = nn.utils.clip_grad_norm_(encoder.parameters(), clip) _ = nn.utils.clip_grad_norm_(decoder.parameters(), clip) # Adjust model weights encoder_optimizer.step() decoder_optimizer.step() return sum(print_losses) / n_totals ``` ### Training iterations It is finally time to tie the full training procedure together with the data. The `trainIters` function is responsible for running `n_iterations` of training given the passed models, optimizers, data, etc. This function is quite self explanatory, as we have done the heavy lifting with the train function. One thing to note is that when we save our model, we save a tarball containing the encoder and decoder `state_dicts` (parameters), the optimizers’ state_dicts, the loss, the iteration, etc. Saving the model in this way will give us the ultimate flexibility with the checkpoint. After loading a checkpoint, we will be able to use the model parameters to run inference, or we can continue training right where we left off ``` def trainIters(model_name, voc, pairs, encoder, decoder, encoder_optimizer, decoder_optimizer, embedding, encoder_n_layers, decoder_n_layers, save_dir, n_iteration, batch_size, print_every, save_every, clip, corpus_name, loadFilename): # Load batches for each iteration training_batches = [batch_2_train_data(voc, [random.choice(pairs) for _ in range(batch_size)]) for _ in range(n_iteration)] # Initializations print('Initializing ...') start_iteration = 1 print_loss = 0 if loadFilename: start_iteration = checkpoint['iteration'] + 1 # Training loop print("Training...") for iteration in range(start_iteration, n_iteration + 1): training_batch = training_batches[iteration - 1] # Extract fields from batch input_variable, lengths, target_variable, mask, max_target_len = training_batch # Run a training iteration with batch loss = train(input_variable, lengths, target_variable, mask, max_target_len, encoder, decoder, embedding, encoder_optimizer, decoder_optimizer, batch_size, clip) print_loss += loss # Print progress if iteration % print_every == 0: print_loss_avg = print_loss / print_every print("Iteration: {}; Percent complete: {:.1f}%; Average loss: {:.4f}".format(iteration, iteration / n_iteration * 100, print_loss_avg)) print_loss = 0 # Save checkpoint if (iteration % save_every == 0): directory = os.path.join(save_dir, model_name, corpus_name, '{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size)) if not os.path.exists(directory): os.makedirs(directory) torch.save({ 'iteration': iteration, 'en': encoder.state_dict(), 'de': decoder.state_dict(), 'en_opt': encoder_optimizer.state_dict(), 'de_opt': decoder_optimizer.state_dict(), 'loss': loss, 'voc_dict': voc.__dict__, 'embedding': embedding.state_dict() }, os.path.join(directory, '{}_{}.tar'.format(iteration, 'checkpoint'))) ``` ### Define Evaluation After training a model, we want to be able to talk to the bot ourselves. First, we must define how we want the model to decode the encoded input. ### Greedy decoding Greedy decoding is the decoding method that we use during training when we are **NOT** using teacher forcing. In other words, for each time step, we simply choose the word from `decoder_output` with the highest softmax value. This decoding method is optimal on a single time-step level. To facilitate the greedy decoding operation, we define a `GreedySearchDecoder` class. When run, an object of this class takes an input sequence `(input_seq)` of shape `(input_seq length, 1)`, a scalar input length (input_length) tensor, and a `max_length` to bound the response sentence length. The input sentence is evaluated using the following computational graph: ### Computation Graph: 1. Forward input through encoder model. 2. Prepare encoder’s final hidden layer to be first hidden input to the decoder. 3. Initialize decoder’s first input as SOS_token. 4. Initialize tensors to append decoded words to. 5. Iteratively decode one word token at a time: 1. Forward pass through decoder. 2. Obtain most likely word token and its softmax score. 3. Record token and score. 4. Prepare current token to be next decoder input. 6. Return collections of word tokens and scores. ``` class GreedySearchDecoder(nn.Module): def __init__(self, encoder, decoder): super(GreedySearchDecoder, self).__init__() self.encoder = encoder self.decoder = decoder def forward(self, input_seq, input_length, max_length): # Forward input through encoder model encoder_outputs, encoder_hidden = self.encoder(input_seq, input_length) # Prepare encoder's final hidden layer to be first hidden input to the decoder decoder_hidden = encoder_hidden[:decoder.n_layers] # Initialize decoder input with SOS_token decoder_input = torch.ones(1, 1, device=device, dtype=torch.long) * SOS_token # Initialize tensors to append decoded words to all_tokens = torch.zeros([0], device=device, dtype=torch.long) all_scores = torch.zeros([0], device=device) # Iteratively decode one word token at a time for _ in range(max_length): # Forward pass through decoder decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs) # Obtain most likely word token and its softmax score decoder_scores, decoder_input = torch.max(decoder_output, dim=1) # Record token and score all_tokens = torch.cat((all_tokens, decoder_input), dim=0) all_scores = torch.cat((all_scores, decoder_scores), dim=0) # Prepare current token to be next decoder input (add a dimension) decoder_input = torch.unsqueeze(decoder_input, 0) # Return collections of word tokens and scores return all_tokens, all_scores ``` ### Evaluate my text Now that we have our decoding method defined, we can write functions for evaluating a string input sentence. The `evaluate` function manages the low-level process of handling the input sentence. We first format the sentence as an input batch of word indexes with `batch_size==1`. We do this by converting the words of the sentence to their corresponding indexes, and transposing the dimensions to prepare the tensor for our models. We also create a `lengths` tensor which contains the length of our input sentence. In this case, lengths is scalar because we are only evaluating one sentence at a time `(batch_size==1)`. Next, we obtain the decoded response sentence tensor using our `GreedySearchDecoder` object `(searcher)`. Finally, we convert the response’s indexes to words and return the list of decoded words. `evaluateInput` acts as the user interface for our chatbot. When called, an input text field will spawn in which we can enter our query sentence. After typing our input sentence and pressing Enter, our text is normalized in the same way as our training data, and is ultimately fed to the `evaluate` function to obtain a decoded output sentence. We loop this process, so we can keep chatting with our bot until we enter either “q” or “quit”. Finally, if a sentence is entered that contains a word that is not in the vocabulary, we handle this gracefully by printing an error message and prompting the user to enter another sentence. ``` def evaluate(encoder, decoder, searcher, voc, sentence, max_length=MAX_LENGTH): ### Format input sentence as a batch # words -> indexes indexes_batch = [indexesFromSentence(voc, sentence)] # Create lengths tensor lengths = torch.tensor([len(indexes) for indexes in indexes_batch]) # Transpose dimensions of batch to match models' expectations input_batch = torch.LongTensor(indexes_batch).transpose(0, 1) # Use appropriate device input_batch = input_batch.to(device) lengths = lengths.to("cpu") # Decode sentence with searcher tokens, scores = searcher(input_batch, lengths, max_length) # indexes -> words decoded_words = [voc.index2word[token.item()] for token in tokens] return decoded_words def evaluateInput(encoder, decoder, searcher, voc): input_sentence = '' while True: try: # Get input sentence input_sentence = input('> ') # Check if it is quit case if input_sentence == 'q' or input_sentence == 'quit': break # Normalize sentence input_sentence = normalizeString(input_sentence) # Evaluate sentence output_words = evaluate(encoder, decoder, searcher, voc, input_sentence) # Format and print response sentence output_words[:] = [x for x in output_words if not (x == 'EOS' or x == 'PAD')] print('Bot:', ' '.join(output_words)) except KeyError: print("Error: Encountered unknown word.") ``` ### Run Model Finally, it is time to run our model! Regardless of whether we want to train or test the chatbot model, we must initialize the individual encoder and decoder models. In the following block, we set our desired configurations, choose to start from scratch or set a checkpoint to load from, and build and initialize the models. Feel free to play with different model configurations to optimize performance. ``` # Configure models model_name = 'cb_model' attn_model = 'dot' #attn_model = 'general' #attn_model = 'concat' hidden_size = 500 encoder_n_layers = 2 decoder_n_layers = 2 dropout = 0.1 batch_size = 64 # Set checkpoint to load from; set to None if starting from scratch loadFilename = None checkpoint_iter = 4000 #loadFilename = os.path.join(save_dir, model_name, corpus_name, # '{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size), # '{}_checkpoint.tar'.format(checkpoint_iter)) # Load model if a loadFilename is provided if loadFilename: # If loading on same machine the model was trained on checkpoint = torch.load(loadFilename) # If loading a model trained on GPU to CPU #checkpoint = torch.load(loadFilename, map_location=torch.device('cpu')) encoder_sd = checkpoint['en'] decoder_sd = checkpoint['de'] encoder_optimizer_sd = checkpoint['en_opt'] decoder_optimizer_sd = checkpoint['de_opt'] embedding_sd = checkpoint['embedding'] voc.__dict__ = checkpoint['voc_dict'] print('Building encoder and decoder ...') # Initialize word embeddings embedding = nn.Embedding(voc.num_words, hidden_size) if loadFilename: embedding.load_state_dict(embedding_sd) # Initialize encoder & decoder models encoder = Encoder(hidden_size, embedding, encoder_n_layers, dropout) decoder = Decoder(attn_model, embedding, hidden_size, voc.num_words, decoder_n_layers, dropout) if loadFilename: encoder.load_state_dict(encoder_sd) decoder.load_state_dict(decoder_sd) # Use appropriate device encoder = encoder.to(device) decoder = decoder.to(device) print('Models built and ready to go!') ``` ### Run training ``` # Configure training/optimization clip = 50.0 teacher_forcing_ratio = 1.0 learning_rate = 0.0001 decoder_learning_ratio = 5.0 n_iteration = 4000 print_every = 1 save_every = 500 # Ensure dropout layers are in train mode encoder.train() decoder.train() # Initialize optimizers print('Building optimizers ...') encoder_optimizer = torch.optim.Adam(encoder.parameters(), lr=learning_rate) decoder_optimizer = torch.optim.Adam(decoder.parameters(), lr=learning_rate * decoder_learning_ratio) if loadFilename: encoder_optimizer.load_state_dict(encoder_optimizer_sd) decoder_optimizer.load_state_dict(decoder_optimizer_sd) # If you have cuda, configure cuda to call for state in encoder_optimizer.state.values(): for k, v in state.items(): if isinstance(v, torch.Tensor): state[k] = v.cuda() for state in decoder_optimizer.state.values(): for k, v in state.items(): if isinstance(v, torch.Tensor): state[k] = v.cuda() # Run training iterations print("Starting Training!") trainIters(model_name, voc, pairs, encoder, decoder, encoder_optimizer, decoder_optimizer, embedding, encoder_n_layers, decoder_n_layers, save_dir, n_iteration, batch_size, print_every, save_every, clip, corpus_name, loadFilename) ``` ### Inference ``` # Set dropout layers to eval mode encoder.eval() decoder.eval() # Initialize search module searcher = GreedySearchDecoder(encoder, decoder) # Begin chatting (uncomment and run the following line to begin) evaluateInput(encoder, decoder, searcher, voc) ``` ### Ref * [chatbot_tutorial](https://pytorch.org/tutorials/beginner/chatbot_tutorial.html?highlight=machine%20translation) ``` ```
github_jupyter
``` %matplotlib inline ``` Creating extensions using numpy and scipy ========================================= **Author**: `Adam Paszke <https://github.com/apaszke>`_ In this tutorial, we shall go through two tasks: 1. Create a neural network layer with no parameters. - This calls into **numpy** as part of it’s implementation 2. Create a neural network layer that has learnable weights - This calls into **SciPy** as part of it’s implementation ``` import torch from torch.autograd import Function from torch.autograd import Variable ``` Parameter-less example ---------------------- This layer doesn’t particularly do anything useful or mathematically correct. It is aptly named BadFFTFunction **Layer Implementation** ``` from numpy.fft import rfft2, irfft2 class BadFFTFunction(Function): def forward(self, input): numpy_input = input.numpy() result = abs(rfft2(numpy_input)) return torch.FloatTensor(result) def backward(self, grad_output): numpy_go = grad_output.numpy() result = irfft2(numpy_go) return torch.FloatTensor(result) # since this layer does not have any parameters, we can # simply declare this as a function, rather than as an nn.Module class def incorrect_fft(input): return BadFFTFunction()(input) ``` **Example usage of the created layer:** ``` input = Variable(torch.randn(8, 8), requires_grad=True) result = incorrect_fft(input) print(result.data) result.backward(torch.randn(result.size())) print(input.grad) ``` Parametrized example -------------------- This implements a layer with learnable weights. It implements the Cross-correlation with a learnable kernel. In deep learning literature, it’s confusingly referred to as Convolution. The backward computes the gradients wrt the input and gradients wrt the filter. **Implementation:** *Please Note that the implementation serves as an illustration, and we did not verify it’s correctness* ``` from scipy.signal import convolve2d, correlate2d from torch.nn.modules.module import Module from torch.nn.parameter import Parameter class ScipyConv2dFunction(Function): def forward(self, input, filter): result = correlate2d(input.numpy(), filter.numpy(), mode='valid') self.save_for_backward(input, filter) return torch.FloatTensor(result) def backward(self, grad_output): input, filter = self.saved_tensors grad_input = convolve2d(grad_output.numpy(), filter.t().numpy(), mode='full') grad_filter = convolve2d(input.numpy(), grad_output.numpy(), mode='valid') return torch.FloatTensor(grad_input), torch.FloatTensor(grad_filter) class ScipyConv2d(Module): def __init__(self, kh, kw): super(ScipyConv2d, self).__init__() self.filter = Parameter(torch.randn(kh, kw)) def forward(self, input): return ScipyConv2dFunction()(input, self.filter) ``` **Example usage:** ``` module = ScipyConv2d(3, 3) print(list(module.parameters())) input = Variable(torch.randn(10, 10), requires_grad=True) output = module(input) print(output) output.backward(torch.randn(8, 8)) print(input.grad) ```
github_jupyter
# Web scraping practice: Pittsburgh lobbyists We're going to scrape [a database of people registered to lobby the city of Pittsburgh](http://www.openbookpittsburgh.com/SearchLobbyists.aspx). First, let's import the packages we'll need: `csv`, `requests`, `bs4`, `pandas`. ``` # import csv, requests, BeautifulSoup from bs4, pandas ``` ### Noodle around Navigate to the page we're going to scrape. We want everyone -- what happens when you hit "search" without entering any criteria into the form? It works! (This won't always be the case for databases like this.) As of July 28, 2018, the search showed 83 lobbyists. Notice, too, that the URL has changed from this: `http://www.openbookpittsburgh.com/SearchLobbyists.aspx` To this: `http://www.openbookpittsburgh.com/SearchLobbyists.aspx?&page=0&cat=LobbyistName&sort=ASC&num=10&click=1` After the `?` are the URL _parameters_, separated by `&`: - `page=0` - `cat=LobbyistName` - `sort=ASC` - `num=10` - `click=1` These are the instructions that get passed to the database after we click search: Return results under the category "Lobbyist Name," show 10 lobbyists at a time, sort ascending, starting with page zero (the first page). (What happens if you _do_ put something in the search field? A new parameter is added to the URL: `lobbyist=`. But we want everything, so we can ignore this.) What happens if we tweak the URL and instruct the database to show us _100_ results at a time? Try it: [`http://www.openbookpittsburgh.com/SearchLobbyists.aspx?&page=0&cat=LobbyistName&sort=ASC&num=100&click=1`](`http://www.openbookpittsburgh.com/SearchLobbyists.aspx?&page=0&cat=LobbyistName&sort=ASC&num=100&click=1`) Now we have everyone in the database. ### Save web page locally So we've got our target URL -- if we request that page, we get back some HTML containing all the data we'd like to scrape. When possible, it's good practice to save local copies of the pages that you're scraping. That way you don't have to rely on a stable internet connection as you work on your scraper, and you can avoid sending unneccessary traffic to the target's server. Let's do that now. First, set up a couple of variables: - The base URL - A [dictionary](../reference/Python%20data%20types%20and%20basic%20syntax.ipynb#Dictionaries) of URL parameters (see [the requests documentation here](http://docs.python-requests.org/en/master/user/quickstart/#passing-parameters-in-urls)) - The name of the `.html` file we'll save to locally - The name of the `.csv` file we'll write out our results to ``` BASE_URL = 'http://www.openbookpittsburgh.com/SearchLobbyists.aspx' URL_PARAMS = { 'page': 0, 'cat': 'LobbyistName', 'sort': 'ASC', 'num': 1000, 'click': 1 } HTML_FILE = 'pittsburgh-lobbyists.html' CSV_FILE = 'pittsburgh-lobbyists.csv' ``` Now actually fetch the page, specifying our headers and `params=URL_PARAMS`. ``` # request the page # specify URL to get, custom headers and `params` ``` Write the `text` attribute -- the code underpinning the requested page -- to the file under the name we just specified. ``` # open html file # and write the page text into it ``` Great! Now we have a copy of the webpage in this directory. Let's open it up and turn the contents into a `BeautifulSoup` object. ``` # open the html file we just made # read in the contents and turn them into soup ``` We're ready to start looking for patterns and isolating the HTML elements we want to target. I like to examine the source code in the browser (In Chrome, it's `Ctrl+U` on PCs and `Ctrl+option+U` on a Mac). It looks like all of the lobbyist HTML is enclosed in a `div` with the class `items-container`. Let's use the BeautifulSoup method `find` to isolate that first. ``` # find the container ``` Within that container, it looks like each individual entry is a `div` with the class `item`. Let's use `find_all` to return a list of matching elements within the container. Then we can use the built-in [`len()`](https://docs.python.org/3/library/functions.html#len) function to see how many we've got. ``` # find the items # check the length of the list of items with len() ``` Looking good! Let's grab _one_ of those items as a test and parse out the information. We'll then use what we learned to scrape the info out of each entry, one at a time. Lobbyists have multiple clients, so in our database, one record will be one lobbying relationship -- each line is, essentially, a client and the lobbyist representing them. ``` # grab the first item and call it `test` # the person's name is in an h2 headline # their position is in a span element with the class `position` # their status is in two span elements that have the class `position` # the first is currency (expired or current) # the second is, are they a lobbyist for the city? # find_all() returns a list # grab text of "currency" status tag # set a default value -- they're assumed to not be a city lobbyist # unless the word "yes" appears in the (lowercased) city lobbyist span text # in which case, flip that variable to true # the company is in a div with the class `type` # the company address is in a div with the class `title` # lobbyists can have one or more clients, and these are list items in an unordered list # use find_all() to get all of the list items # loop over the list of clients # the company is in a span with the class `company` # we'll also strip off the colon at the end and kill any external whitespace # https://www.tutorialspoint.com/python/string_rstrip.htm # the company address is in a span with the class `address` # use a trick to strip out internal whitespace # https://stackoverflow.com/a/3739939 # print the results ``` Solid. Now we can basically copy-paste that code into a [for loop](../reference/Python%20data%20types%20and%20basic%20syntax.ipynb#for-loops) and apply it to each item we found. ``` # for every item in our list # name is in the h2 tag -- text.strip() # position is span with class position # status is split across two span tags with a class called 'status' # use find_all() to get them in a list # first one [0] is status # default value -- they're not a lobbyist for the city # if 'yes' is in the lowercase text of the second 'status' span # then they _are_ a city lobbyist # company is a div with class `type` # company address is a div with class `title` # clients are a bunch of list items -- use find_all() to get them # loop over client list # client company is span with class `company` # rstrip the colon and strip whitespace # client address is span with class address # remove internal whitespace # print the results ``` Looking good! Now let's write everything out to a CSV. ``` # open the CSV_FILE in write mode, newline='' # define headers headers = ['name', 'position', 'status', 'city_lobbyist', 'company', 'company_address', 'client_company', 'client_address'] # create a DictWriter object # write the headers # loop over the items in our list # name is h2 # position is span with class position # statuses arae in spans with class status -- use find_all() # first thing in that list is status # assume not a lobbyist for the city # but if 'yes' in text of second status tag # flip to True # company info is div with class `type` # company address is div with class `title` # find_all() to get list of `li` tags of clients # loop over client list # client company in span with class `company` # rstrip() colon and strip() whitespace # client address is span with class `address` # remove external whitespace # write out to file ``` ### _Extra credit_ We're repeating ourselves a lot here. If I were going to publish this scraper, I'd probably clean this up into a series of functions that each do one thing. Some homework, if you're interested: Break down the processing we've done into major tasks (fetch the page, save to file, parse the contents) and write [functions](../reference/Functions.ipynb) for each task. (Eventually, as you progress in your coding journey, [this handy guide to refactoring](https://refactoring-101.readthedocs.io/en/latest/) will become very useful!.) ### Load data into pandas for analysis Congrats! You've scraped a web page into a clean CSV. Here's where you could load it up into pandas and take a look. ``` # read in our csv # use head() to check it out # what else? ```
github_jupyter
# YOLO で転移学習 ## インストール ``` %%bash pip install yolov4 ``` ## インポート ``` from pathlib import Path import certifi import numpy as np from PIL import Image from yolov4.tf import YOLOv4, SaveWeightsCallback import tensorflow as tf ``` ## 転移学習で使用する学習済みモデルのダウンロード ``` # ダウンロードに失敗しないようにするためのおまじない os.environ["SSL_CERT_FILE"] = certifi.where() yolov4_tiny_weights_path = tf.keras.utils.get_file( fname="yolov4-tiny.conv.29", origin="https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v4_pre/yolov4-tiny.conv.29", cache_subdir="models/yolov4", ) yolov4_tiny_weights_path ``` ## 学習データを作成 labelImg でアノテーションを行い、下記のような構成で保存しておきます。 アノテーションの保存書式は YOLO を使用します。 ``` dataset ├── classes.txt ├── test │ ├── 0001.jpg │ ├── 0001.txt │ ├── 0002.jpg │ ├── 0002.txt │ ├── 0003.jpg │ ├── 0003.txt │ ├── 0004.jpg │ ├── 0004.txt │ ├── 0005.jpg │ └── image_path.txt └── train ├── 0001.jpg ├── 0001.txt ├── 0002.jpg ├── 0002.txt ├── 0003.jpg ├── 0003.txt ├── 0004.jpg ├── 0004.txt ├── 0005.jpg └── image_path.txt ``` **classes.txt** アノテーションに対するラベル名を行ごとに定義したファイルです。 **サンプル:** ``` person bicycle car motorbike aeroplane ... ``` **0001.txt, 0002.txt,...** YOLO 形式で保存されたバウンディングボックスの座標です。 labelImg でアノテーションを行い、YOLO 形式で保存すれば作成できます。 .jpg のファイル名に対応したアノテーションは同名の .txt に保存します。 各列の意味は `<label> <x_center> <y_center> <width> <height>` です。 - `<label>`: `classes.txt` の何行目のラベル名に対応するかを 0 始まりで指定 - `<x_center>`: バウンディングボックスの中心 x 座標を画像の幅で割った値 - `<y_center>`: バウンディングボックスの中心 y 座標を画像の高さで割った値 - `<width>`: バウンディングボックスの幅を画像の幅で割った値 - `<height>`: バウンディングボックスの高さを画像の高さで割った値 **サンプル:** ``` 0 0.651231 0.532031 0.132474 0.201563 ``` **image_path.txt** 画像データのパスを記述したファイルです。 **サンプル:** ``` 0001.jpg 0002.jpg 0003.jpg 0004.jpg 0005.jpg ``` ## 学習済みモデルをロード ``` yolo = YOLOv4(tiny=True) yolo.classes = "dataset/classes.txt" yolo.input_size = 608 yolo.batch_size = 5 yolo.make_model() yolo.load_weights(yolov4_tiny_weights_path, weights_type="yolo") ``` ## 学習データと検証データをロード `image_path.txt` 内の画像データのパスは `image_path_prefix` からの相対パスとみなされます。 ``` train_data = yolo.load_dataset( "dataset/train/image_path.txt", dataset_type="yolo", image_path_prefix="dataset/train", label_smoothing=0.05, training=True, ) validation_data = yolo.load_dataset( "dataset/test/image_path.txt", dataset_type="yolo", image_path_prefix="dataset/test", training=False, ) ``` ## ハイパーパラメータを定義 ``` epochs = 100 learning_rate = 1e-4 optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) yolo.compile(optimizer=optimizer, loss_iou_type="ciou") def create_learning_rate_scheduler(epochs): def scheduler(epoch, lr): if epoch < int(epochs * 0.5): return lr elif epoch < int(epochs * 0.8): return lr * 0.5 elif epoch < int(epochs * 0.9): return lr * 0.1 else: return lr * 0.01 return scheduler ``` ## 学習を開始 ``` yolo.fit( train_data, epochs=epochs, validation_data=validation_data, validation_steps=5, validation_freq=10, steps_per_epoch=5, callbacks=[ tf.keras.callbacks.LearningRateScheduler(create_learning_rate_scheduler(epochs)), tf.keras.callbacks.TerminateOnNaN(), tf.keras.callbacks.TensorBoard(log_dir="logs"), SaveWeightsCallback( yolo=yolo, dir_path="weights", weights_type="yolo", epoch_per_save=10, ), ], ) ``` ## 学習したモデルを使用 ``` yolo.load_weights("weights/yolov4-tiny-final.weights", weights_type="yolo") with open("dataset/test/image_path.txt") as f: output_images = [] for image_path in map(lambda x: f"dataset/test/{x.strip()}", f): print(image_path) image = Image.open(image_path) image = np.array(image) boxes = yolo.predict(image) output_image = yolo.draw_bboxes(image, boxes) output_image = Image.fromarray(output_image) output_images.append(output_image) output_images[0].save("out.gif", save_all=True, append_images=output_images[1:], loop=0) ```
github_jupyter
``` import sys sys.path.append("../codes/") from Readfiles import getFnames from DCdata import readReservoirDC %pylab inline from SimPEG.EM.Static import DC from SimPEG import EM from SimPEG import Mesh from SimPEG.Survey import Data def removeRxsfromDC(survey, inds, DClow=-np.inf, DChigh=np.inf, surveyType="2D"): srcList = survey.srcList srcListNew = [] dobs = survey.dobs dobs[inds] = np.nan data = Data(survey, survey.dobs) rxData = [] for iSrc, src in enumerate(srcList): rx = src.rxList[0] data_temp = data[src, rx] rxinds = np.isnan(data_temp) | (np.logical_or(DClow>data_temp, DChigh<data_temp)) nrxact_temp = rxinds.sum() nrx_temp = len(rxinds) rxlocM = rx.locs[0] rxlocN = rx.locs[1] srcloc = src.loc rxData.append(data_temp[~rxinds]) # All Rxs are active if nrxact_temp == 0: if surveyType == "2D": rxNew = DC.Rx.Dipole_ky(rxlocM, rxlocN) else: rxNew = DC.Rx.Dipole(rxlocM, rxlocN) srcNew = DC.Src.Dipole([rxNew], srcloc[0], srcloc[1]) srcListNew.append(srcNew) # All Rxs are nan then remove src elif nrx_temp == nrxact_temp: print ("Remove %i-th Src") % (iSrc) # Some Rxs are not active else: if surveyType == "2D": rxNew = DC.Rx.Dipole_ky(rxlocM[~rxinds,:], rxlocN[~rxinds,:]) else: rxNew = DC.Rx.Dipole(rxlocM[~rxinds,:], rxlocN[~rxinds,:]) srcNew = DC.Src.Dipole([rxNew], srcloc[0], srcloc[1]) srcListNew.append(srcNew) if surveyType == "2D": surveyNew = DC.Survey_ky(srcListNew) else: surveyNew = DC.Survey(srcListNew) surveyNew.dobs = np.hstack(rxData) return surveyNew #EM.Static.Utils.StaticUtils.plot_pseudoSection? fname1 = "../data/ChungCheonDC/20150101000000.apr" survey1 = readReservoirDC(fname1) dobsAppres1 = survey1.dobs fig, ax = plt.subplots(1,1, figsize = (10, 2)) dat = EM.Static.Utils.StaticUtils.plot_pseudoSection(survey1, ax) cb = dat[2] cb.set_label("Apprent resistivity (ohm-m)") geom = np.hstack(dat[3]) dobsDC = dobsAppres1 * geom fname2 = "../data/ChungCheonDC/20151218000000.apr" survey2 = readReservoirDC(fname2) dobsAppres2 = survey2.dobs fig, ax2 = plt.subplots(1,1, figsize = (10, 2)) dat = EM.Static.Utils.StaticUtils.plot_pseudoSection(survey2, ax2) cb = dat[2] #cb.set_label("Apparent resistivity (ohm-m)") #geom = np.hstack(dat[3]) #dobsDC = dobsAppres2 * geom plt.plot(abs(dobsAppres1)) survey1 = readReservoirDC(fname1) survey2 = readReservoirDC(fname2) survey12 = readReservoirDC(fname2) survey12.dobs = (abs(survey2.dobs-survey1.dobs) / abs(survey1.dobs))*100. survey2.dobs.shape survey1.dobs.shape survey1.dobs.shape # print dobsAppres[6:] #survey2 = readReservoirDC(fname2) #dobsAppres2 = survey.dobs fig, ax = plt.subplots(1,1, figsize = (10, 2)) dat = EM.Static.Utils.StaticUtils.plot_pseudoSection(survey12, ax) cb = dat[2] cb.set_label("Apparent resistivity (ohm-m)") geom = np.hstack(dat[3]) #dobsDC = dobsAppres12 * geom # surveyNew = removeRxsfromDC(survey, [346], DClow=40, DChigh=145, surveyType="2D") surveyNew = removeRxsfromDC(survey12, [330], surveyType="2D") surveyNew = removeRxsfromDC(survey12, [338], surveyType="2D") surveyNew = removeRxsfromDC(survey12, [346], surveyType="2D") surveyNew = removeRxsfromDC(survey12, [351], surveyType="2D") surveyNew = removeRxsfromDC(survey12, [358], surveyType="2D") fig, ax = plt.subplots(1,1, figsize = (10, 2)) dat = EM.Static.Utils.StaticUtils.plot_pseudoSection(surveyNew, ax, dtype='volt', sameratio=False,clim=(-20,20)) cb = dat[2] cb.set_label(" Relative Ratio") geom = np.hstack(dat[3]) dobsDC = surveyNew.dobs * geom surveyNew.dobs = dobsDC fig, ax = plt.subplots(1,1, figsize = (10, 2)) dat = EM.Static.Utils.StaticUtils.plot_pseudoSection(surveyNew, ax, dtype='appr', sameratio=False) plt.plot(abs(dobsDC)) # problem = DC.Problem2D_CC(mesh) cs = 2.5 npad = 6 hx = [(cs,npad, -1.3),(cs,160),(cs,npad, 1.3)] hy = [(cs,npad, -1.3),(cs,20)] mesh = Mesh.TensorMesh([hx, hy]) mesh = Mesh.TensorMesh([hx, hy],x0=[-mesh.hx[:6].sum()-0.25, -mesh.hy.sum()]) def from3Dto2Dsurvey(survey): srcLists2D = [] nSrc = len(survey.srcList) for iSrc in range (nSrc): src = survey.srcList[iSrc] locsM = np.c_[src.rxList[0].locs[0][:,0], np.ones_like(src.rxList[0].locs[0][:,0])*-0.75] locsN = np.c_[src.rxList[0].locs[1][:,0], np.ones_like(src.rxList[0].locs[1][:,0])*-0.75] rx = DC.Rx.Dipole_ky(locsM, locsN) locA = np.r_[src.loc[0][0], -0.75] locB = np.r_[src.loc[1][0], -0.75] src = DC.Src.Dipole([rx], locA, locB) srcLists2D.append(src) survey2D = DC.Survey_ky(srcLists2D) return survey2D from SimPEG import (Mesh, Maps, Utils, DataMisfit, Regularization, Optimization, Inversion, InvProblem, Directives) # from pymatsolver import MumpsSolver mapping = Maps.ExpMap(mesh) survey2D = from3Dto2Dsurvey(surveyNew) problem = DC.Problem2D_N(mesh, mapping=mapping) problem.pair(survey2D) # problem.Solver = MumpsSolver m0 = np.ones(mesh.nC)*np.log(1e-2) from ipywidgets import interact nSrc = len(survey2D.srcList) def foo(isrc): figsize(10, 5) mesh.plotImage(np.ones(mesh.nC)*np.nan, gridOpts={"color":"k", "alpha":0.5}, grid=True) # isrc=0 src = survey2D.srcList[isrc] plt.plot(src.loc[0][0], src.loc[0][1], 'bo') plt.plot(src.loc[1][0], src.loc[1][1], 'ro') locsM = src.rxList[0].locs[0] locsN = src.rxList[0].locs[1] plt.plot(locsM[:,0], locsM[:,1], 'ko') plt.plot(locsN[:,0], locsN[:,1], 'go') plt.gca().set_aspect('equal', adjustable='box') interact(foo, isrc=(0, nSrc-1, 1)) pred = survey2D.dpred(m0) # data_anal = [] # nSrc = len(survey.srcList) # for isrc in range(nSrc): # src = survey.srcList[isrc] # locA = src.loc[0] # locB = src.loc[1] # locsM = src.rxList[0].locs[0] # locsN = src.rxList[0].locs[1] # rxloc=[locsM, locsN] # a = EM.Analytics.DCAnalyticHalf(locA, rxloc, 1e-3, earth_type="halfspace") # b = EM.Analytics.DCAnalyticHalf(locB, rxloc, 1e-3, earth_type="halfspace") # data_anal.append(a-b) # data_anal = np.hstack(data_anal) survey.dobs = pred fig, ax = plt.subplots(1,1, figsize = (10, 2)) dat = EM.Static.Utils.StaticUtils.plot_pseudoSection(surveyNew, ax, dtype='appr', sameratio=False, scale="linear", clim=(0, 200)) out = hist(np.log10(abs(dobsDC)), bins = 100) weight = 1./abs(mesh.gridCC[:,1])**1.5 mesh.plotImage(np.log10(weight)) survey2D.dobs = dobsDC survey2D.eps = 10**(-2.3) survey2D.std = 0.02 dmisfit = DataMisfit.l2_DataMisfit(survey2D) regmap = Maps.IdentityMap(nP=int(mesh.nC)) reg = Regularization.Simple(mesh,mapping=regmap,cell_weights=weight) opt = Optimization.ProjectedGNCG(maxIter=10) opt.upper = np.log(1e0) opt.lower = np.log(1./300) invProb = InvProblem.BaseInvProblem(dmisfit, reg, opt) # Create an inversion object beta = Directives.BetaSchedule(coolingFactor=5, coolingRate=2) betaest = Directives.BetaEstimate_ByEig(beta0_ratio=1e0) target = Directives.TargetMisfit() inv = Inversion.BaseInversion(invProb, directiveList=[beta, betaest, target]) problem.counter = opt.counter = Utils.Counter() opt.LSshorten = 0.5 opt.remember('xc') mopt = inv.run(m0) xc = opt.recall("xc") fig, ax = plt.subplots(1,1, figsize = (10, 1.2)) iteration = 4 sigma = mapping*xc[iteration] dat = mesh.plotImage(1./sigma, grid=False, ax=ax, pcolorOpts={"cmap":"jet"}, clim=(0, 200)) ax.set_ylim(-30, 0) ax.set_xlim(-10, 290) plt.colorbar(dat[0]) print np.log10(sigma).min(), np.log10(sigma).max() 1./sigma.max() 1./sigma.min() surveyNew.dobs = invProb.dpred fig, ax = plt.subplots(1,1, figsize = (10, 2)) dat = EM.Static.Utils.StaticUtils.plot_pseudoSection(surveyNew, ax, dtype='appr', sameratio=False, clim=(40, 170)) surveyNew.dobs = dobsDC fig, ax = plt.subplots(1,1, figsize = (10, 2)) dat = EM.Static.Utils.StaticUtils.plot_pseudoSection(surveyNew, ax, dtype='appr', sameratio=False, clim=(40, 170)) surveyNew.dobs = abs(dmisfit.Wd*(dobsDC-invProb.dpred)) fig, ax = plt.subplots(1,1, figsize = (10, 2)) dat = EM.Static.Utils.StaticUtils.plot_pseudoSection(surveyNew, ax, dtype='volt', sameratio=False, clim=(0, 2)) # sigma = np.ones(mesh.nC) modelname = "sigma1218NBound.npy" np.save(modelname, sigma) ```
github_jupyter
``` from __future__ import print_function, division ``` # Images in Astronomy In this lesson we are going to look at aspects of processing and viewing images specific to Astronomy and Solar Astronomy. By the end of this lesson you should understand: * Projected Coordinate Systems in Images * World Coordinate Systems * Using WCS to calculate coordinates in images * Plotting images with WCS in images * Using SunPy Map ## Projected Coordinate Systems When taking images of the sky, we are projecting the spherical celestial coordinate system onto a 2-dimensional plane, which means that there is no simple linear relation between pixel coordinates and celestial coordinates There are multiple coordinate systems used to describe the locations in 2D and 3D space for both Astronomy and Solar Physics. We shall use a couple of these systems here as examples but if you want to know more about them there are many of resources avalible. ### World Coordinate System The FITS files have a standard for describing the physical coordinate system associated with imaging data, this is called the world coordinate system or WCS, sometimes the specific FITS version of this is referred to as FITS-WCS. There are multiple papers describing the FITS-WCS standard for various types of data, there is a list here: http://fits.gsfc.nasa.gov/fits_wcs.html As you learned in the previous lesson we can load FITS files with Astropy. To demonstrate a simple example of a FITS file with FITS-WCS information in the header we shall use an image from SunPy: ``` from sunpy.data.sample import AIA_171_IMAGE from astropy.io import fits hdulist = fits.open(AIA_171_IMAGE) hdulist.verify('silentfix') hdulist[0].header ``` As you can see there are lots of keys in this and most other real world FITS headers. The ones we need to understand for FITS-WCS are: Reference Pixel and Coordinate: ``` header = hdulist[0].header print(header['CRVAL1'], header['CRVAL2']) print(header['CRPIX1'], header['CRPIX2']) ``` Pixel resolution (at the reference pixel): ``` print(header['CDELT1'], header['CDELT2']) ``` Rotation angle, in degress (at the reference pixel): ``` print(header['CROTA2']) ``` Coordinate System and Projection: ``` print(header['CTYPE1'], header['CTYPE2']) ``` <section class="objectives panel panel-success"> <div class="panel-heading"> <h2><span class="fa fa-pencil"></span> Keyword Extraction </h2> </div> Extract and print out the `TELESCOP` value from the header. Next, extract the `WAVELNTH` and `WAVEUNIT` values, use these to construct an astropy Quantity object for the wavelength of this image. ``` header['TELESCOP'] import astropy.units as u u.Quantity(header['WAVELNTH'], unit=header['WAVEUNIT']) ``` We could now sit down and work out how to convert from a pixel coordinate to a physical coordinate described by this header (Helioprojective). However, we can cheat and just use Astropy. ``` from astropy.wcs import WCS wcs = WCS(header) ``` We can convert from pixel to world coordinate: ``` wcs.wcs_pix2world(((100, 100),), 0) ``` Or back again: ``` wcs.wcs_world2pix([[ 3.59725669e+02, -2.74328093e-01]], 0) ``` The last parameter to the two above examples is the 'origin' parameter. It is a flag that tells WCS if you indexes should be 0-based (like numpy) or 1-based (like FITS). Here we are using 0 as we want to convert to and from numpy indexes of the array. <section class="objectives panel panel-success"> <div class="panel-heading"> <h2><span class="fa fa-pencil"></span> How large is the image? </h2> </div> <br/> To get a little practise using Astropy's WCS calculate the world coordinates of the following pixels: <code> [-500, 0] [500, 500] [0, 0] </code> <br/> </section> ``` print(wcs.wcs_pix2world(((-500, 0),), 1)) print(wcs.wcs_pix2world(((500, 500),), 1)) print(wcs.wcs_pix2world(((0, 0),), 1)) ``` ## Plotting with wcsaxes In this section we are going to use the wcsaxes package to make WCS aware image plots. ``` import wcsaxes ``` For this example we are going to use a Hubble image. ``` from astropy.io import fits hdulist = fits.open('./h_n4571_f555_mosaic.fits.gz') hdulist wcs = WCS(hdulist[0].header) %matplotlib inline import matplotlib.pyplot as plt ax = plt.subplot(111, projection=wcs) ax.imshow(hdulist[0].data, cmap='gray', vmax=1000, interpolation=None, origin='lower') ``` This image now has physcial labels in the native coordinate system of the image. We can see what the coordinate system and projection of this image is using the 'CTYPE' header entries we saw earlier. ``` print(hdulist[0].header['CTYPE1'], hdulist[0].header['CTYPE2']) ``` We can tell that this is in the FK5 coordinate system by the presence of a 'equinox' entry in the header: ``` hdulist[0].header['equinox'] ``` There is also a quick way to generate an Astropy coordinate frame from a WCS object, which confirms this diagnosis. ``` from astropy.wcs.utils import wcs_to_celestial_frame wcs_to_celestial_frame(wcs) ``` for more information on the very useful `astropy.coordinates` module see http://docs.astropy.org/en/stable/coordinates/ <section class="objectives panel panel-success"> <div class="panel-heading"> <h2><span class="fa fa-pencil"></span> Add some labels </h2> </div> <br/> Now we have a nice plot with physically meaningful ticks, we should label our axes. <br/> Add labels to the axes saying "Right Ascension [degrees]" and "Declination [degrees]" <br/> Also overlay a coordinate grid using: <code>ax.coords.grid()</code> Look up the documentation for this method to see what parameters you can specify. <br/> </section> ``` ax = plt.subplot(111, projection=wcs) ax.imshow(hdulist[0].data, cmap='gray', vmax=1000, interpolation=None, origin='lower') ax.set_xlabel("Right Ascension [degrees]") ax.set_ylabel("Declination [degrees]") ax.coords.grid(color='white', alpha=0.5, linestyle='solid') ``` Now we have a nice plot, we can do a couple of things to plot. ### Overplotting in Pixel Coordinates ``` ax = plt.subplot(111, projection=wcs) ax.imshow(hdulist[0].data, cmap='gray', vmax=1000, interpolation=None, origin='lower') ax.set_xlabel("Right Ascension [degrees]") ax.set_ylabel("Declination [degrees]") ax.coords.grid(color='white', alpha=0.5, linestyle='solid') ax.plot(3000, 3000, 'o') ``` ### Overplotting in World Coordinates ``` ax = plt.subplot(111, projection=wcs) ax.imshow(hdulist[0].data, cmap='gray', vmax=1000, interpolation=None, origin='lower') ax.set_xlabel("Right Ascension [degrees]") ax.set_ylabel("Declination [degrees]") ax.coords.grid(color='white', alpha=0.5, linestyle='solid') ax.set_autoscale_on(False) ax.plot(3000, 3000, 'o') # Overplot in FK5 in Degrees ax.plot(189.25, 14.23, 'o', transform=ax.get_transform('world')) ``` <section class="objectives panel panel-success"> <div class="panel-heading"> <h2><span class="fa fa-pencil"></span> Add some labels </h2> </div> Now overplot two lines on the image, one where you specified the line in pixel coordinates, and one where you specified the line in FK5 world coordinates. </section> ``` ax = plt.subplot(111, projection=wcs) ax.imshow(hdulist[0].data, cmap='gray', vmax=1000, interpolation=None, origin='lower') ax.set_xlabel("Right Ascension [degrees]") ax.set_ylabel("Declination [degrees]") ax.coords.grid(color='white', alpha=0.5, linestyle='solid') ax.set_autoscale_on(False) ax.plot([2500, 3500], [2500,3500], 'cyan') # Overplot in FK5 in Degrees ax.plot([189.232, 189.25], [14.218, 14.23], 'yellow', transform=ax.get_transform('world')) ``` ### Overplotting Another Coordinate System ``` ax = plt.subplot(111, projection=wcs) ax.imshow(hdulist[0].data, cmap='gray', vmax=1000, interpolation=None, origin='lower') ax.set_xlabel("Right Ascension [degrees]") ax.set_ylabel("Declination [degrees]") ax.coords.grid(color='white', alpha=0.5, linestyle='solid') overlay = ax.get_coords_overlay('galactic') overlay.grid(color='orange', alpha=1, linestyle='solid') overlay['l'].set_axislabel("Galactic Longitude [degrees]") overlay['b'].set_axislabel("Galactic Latitude [degrees]") ``` ## SunPy Map The SunPy Map class is a wrapper for solar images which makes some of the above opertations easier. ``` import sunpy.map from sunpy.data.sample import AIA_171_ROLL_IMAGE amap = sunpy.map.Map(AIA_171_ROLL_IMAGE) amap.peek() ``` This has done a quick plot of the test image `AIA_171_ROLL_IMAGE` using wcsaxes. If we want to customise the plot we use the `plot()` method. ``` import astropy.units as u amap = sunpy.map.Map(AIA_171_ROLL_IMAGE) im = amap.plot() ax = plt.gca() ax.set_autoscale_on(False) x = 500*u.arcsec y = -300*u.arcsec ax.plot(x.to(u.deg), y.to(u.deg), 'o', transform=ax.get_transform('world')) amap.pixel_to_data(100*u.pix, 200*u.pix) amap.data_to_pixel(0*u.arcsec, 0*u.arcsec) ``` <section class="objectives panel panel-success"> <div class="panel-heading"> <h2><span class="fa fa-pencil"></span> Rotate your Owl </h2> </div> Why is the Sun wonky? Use the [`rotate()`](http://docs.sunpy.org/en/stable/code_ref/map.html#sunpy.map.mapbase.GenericMap.rotate) method of SunPy Map to align the coordinate grid to the pixel grid in this sample image. Once you have run rotate, plot the resulting image, and compare with the one above. </section> ``` mr = amap.rotate() mr.peek() ```
github_jupyter
# QEI (Q-Noisy Expected Improvement) Demo for Blog ``` from qmcpy import * import numpy as np from scipy.linalg import solve_triangular, cho_solve, cho_factor from scipy.stats import norm import matplotlib.pyplot as pyplot %matplotlib inline lw = 3 ms = 8 ``` ## Problem setup Here is the current data ($x$ and $y$ values with noise) from which we want to build a GP and run a Bayesian optimization. ``` def yf(x): return np.cos(10 * x) * np.exp(.2 * x) + np.exp(-5 * (x - .4) ** 2) xplt = np.linspace(0, 1, 300) yplt = yf(xplt) x = np.array([.1, .2, .4, .7, .9]) y = yf(x) v = np.array([.001, .05, .01, .1, .4]) pyplot.plot(xplt, yplt, linewidth=lw) pyplot.plot(x, y, 'o', markersize=ms, color='orange') pyplot.errorbar(x, y, yerr=2 * np.sqrt(v), marker='', linestyle='', color='orange', linewidth=lw) pyplot.title('Sample data with noise'); ``` ## Computation of the qEI quantity using `qmcpy` One quantity which can appear often during BO is a computation involving $q$ "next points" to sample in a BO process; in the standard formulation this quantity might involve just $q=1$, but $q>1$ is also of interest for batched evaluation in parallel. This quantity is defined as $$ \mathrm{EI}_q(x_1, \ldots, x_q;\mathbf{y}, \mathcal{X}, \boldsymbol{\epsilon}) = \int_{\mathbb{R}^q} \max_{1\leq i\leq q}\left[{(y_i - y^*)_+}\right]\, p_{Y_{x_1,\ldots, x_q}|\mathbf{y}, \mathcal{X}, \boldsymbol{\epsilon}}(y_1, \ldots, y_q)\; \text{d}y_1\cdots\text{d}y_q $$ The example I am considering here is with $q=5$ but this quantity could be made larger. Each of these QEI computations (done in a vectorized fashion in production) would be needed in an optimization loop (likely powered by CMAES or some other high dimensional nonconvex optimization tool). This optimization problem would take place in a $qd$ dimensional space, which is one aspect which usually prevents $q$ from being too large. Note that some of this will look much more confusing in $d>1$, but it is written here in a simplified version. ## GP model definition (kernel information) and qEI definition ``` shape_parameter = 4.1 process_variance = .9 fudge_factor = 1e-10 def gaussian_kernel(x, z): return process_variance * np.exp(-shape_parameter ** 2 * (x[:, None] - z[None, :]) ** 2) def gp_posterior_params(x_to_draw): n = len(x_to_draw) kernel_prior_data = gaussian_kernel(x, x) kernel_cross_matrix = gaussian_kernel(x_to_draw, x) kernel_prior_plot = gaussian_kernel(x_to_draw, x_to_draw) prior_cholesky = np.linalg.cholesky(kernel_prior_data + np.diag(v)) partial_cardinal_functions = solve_triangular(prior_cholesky, kernel_cross_matrix.T, lower=True) posterior_covariance = kernel_prior_plot - np.dot(partial_cardinal_functions.T, partial_cardinal_functions) + fudge_factor * np.eye(n) full_cardinal_functions = solve_triangular(prior_cholesky.T, partial_cardinal_functions, lower=False) posterior_mean = np.dot(full_cardinal_functions.T, y) return posterior_mean,posterior_covariance def gp_posterior_draws(x_to_draw, mc_strat, num_posterior_draws, posterior_mean, posterior_covariance): q = len(x_to_draw) if mc_strat == 'iid': dd = IIDStdUniform(q) elif mc_strat == 'lattice': dd = Lattice(q) elif mc_strat == 'sobol': dd = Sobol(q) g = Gaussian(dd,posterior_mean,posterior_covariance) posterior_draws = g.gen_samples(num_posterior_draws) return posterior_draws def compute_qei(posterior_draws): y_gp = np.fmax(np.max(posterior_draws.T - max(y), axis=0), 0) return y_gp ``` ## Demonstrate the concept of qEI on 2 points ``` num_posterior_draws = 2 ** 7 Np = (25, 24) X, Y = np.meshgrid(np.linspace(0, 1, Np[1]), np.linspace(0, 1, Np[0])) xp = np.array([X.reshape(-1), Y.reshape(-1)]).T mu_post,sigma_cov = gp_posterior_params(xplt) y_draws = gp_posterior_draws(xplt, 'lattice', num_posterior_draws,mu_post,sigma_cov).T qei_vals = np.empty(len(xp)) for k, next_x in enumerate(xp): mu_post,sigma_cov = gp_posterior_params(next_x) gp_draws = gp_posterior_draws(next_x, 'sobol', num_posterior_draws,mu_post,sigma_cov) qei_vals[k] = compute_qei(gp_draws).mean() Z = qei_vals.reshape(Np) fig, axes = pyplot.subplots(1, 3, figsize=(14, 4)) ax = axes[0] ax.plot(xplt, yplt, linewidth=lw) ax.plot(x, y, 'o', markersize=ms, color='orange') ax.errorbar(x, y, yerr=2 * np.sqrt(v), marker='', linestyle='', color='orange', linewidth=lw) ax.set_title('Sample data with noise') ax.set_ylim((-2.3, 2.6)) ax = axes[1] ax.plot(xplt, y_draws, linewidth=lw, color='b', alpha=.05) ax.plot(x, y, 'o', markersize=ms, color='orange') ax.errorbar(x, y, yerr=2 * np.sqrt(v), marker='', linestyle='', color='orange', linewidth=lw) ax.set_title(f'{num_posterior_draws} GP posterior draws') ax.set_ylim((-2.3, 2.6)) ax = axes[2] h = ax.contourf(X, Y, Z) ax.set_xlabel('First next point') ax.set_ylabel('Second next point') ax.set_title('qEI for q=2 next points') cax = fig.colorbar(h, ax=ax) cax.set_label('qEI') fig.tight_layout() ``` ## Choose some set of next points against which to test the computation Here, we consider $q=5$, which is much more costly to compute than the $q=2$ demonstration above. **Note** This will take some time to run. Use fewere `num_repeats` to reduce the cost. ``` # paramters next_x = np.array([0.158, 0.416, 0.718, 0.935, 0.465]) num_posterior_draws_to_test = 2 ** np.arange(4, 20) d = len(next_x) mu_post,sigma_cov = gp_posterior_params(next_x) # get reference answer with qmcpy dd = Sobol(d) tm = Gaussian(dd,mu_post,sigma_cov) integrand = CustomFun(tm,compute_qei) stopping_criterion = CubQMCSobolG(integrand, abs_tol=5e-7) reference_answer,data = stopping_criterion.integrate() print(data) # generate data num_posterior_draws_to_test = 2 ** np.arange(4, 20) vals = {} num_repeats = 50 mc_strats = ('iid', 'lattice', 'sobol') for mc_strat in mc_strats: vals[mc_strat] = [] for num_posterior_draws in num_posterior_draws_to_test: all_estimates = [] for _ in range(num_repeats): y_draws = gp_posterior_draws(next_x, mc_strat, num_posterior_draws,mu_post,sigma_cov) all_estimates.append(compute_qei(y_draws).mean()) vals[mc_strat].append(all_estimates) vals[mc_strat] = np.array(vals[mc_strat]) fig, ax = pyplot.subplots(1, 1, figsize=(6, 4)) colors = ('#F5811F', '#A23D97', '#00B253') alpha = .3 for (name, results), color in zip(vals.items(), colors): bot = np.percentile(abs(results - reference_answer), 25, axis=1) med = np.percentile(abs(results - reference_answer), 50, axis=1) top = np.percentile(abs(results - reference_answer), 75, axis=1) ax.loglog(num_posterior_draws_to_test, med, label=name, color=color) ax.fill_between(num_posterior_draws_to_test, bot, top, color=color, alpha=alpha) ax.loglog(num_posterior_draws_to_test, .1 * num_posterior_draws_to_test ** -.5, '--k', label='$O(N^{-1/2})$') ax.loglog(num_posterior_draws_to_test, .25 * num_posterior_draws_to_test ** -1.0, '-.k', label='$O(N^{-1})$') ax.set_xlabel('N - number of points') ax.set_ylabel('Accuracy') ax.legend(loc='lower left') ax.set_title(f'Statistics from {num_repeats} runs'); # plt.savefig('qei_convergence.png'); # parameters names = ['IID','Lattice','Sobol'] epsilons = [ [2e-4, 5e-4, 1e-3, 2e-3, 5e-3, 1e-2], # iid nodes [5e-6, 1e-5, 2e-5, 5e-5, 1e-4, 2e-4, 5e-4, 1e-3, 2e-3, 5e-3, 1e-2], # lattice [5e-6, 1e-5, 2e-5, 5e-5, 1e-4, 2e-4, 5e-4, 1e-3, 2e-3, 5e-3, 1e-2]] # sobol trials = 25 # initialize time data times = {names[j]:np.zeros((len(epsilons[j]),trials),dtype=float) for j in range(len(names))} n_needed = {names[j]:np.zeros((len(epsilons[j]),trials),dtype=float) for j in range(len(names))} # run tests for t in range(trials): print(f'{t}') for j in range(len(names)): for i in range(len(epsilons[j])): if j == 0: sc = CubMCG(CustomFun(Gaussian(IIDStdUniform(d),mu_post,sigma_cov),compute_qei),abs_tol=epsilons[j][i],rel_tol=0) elif j == 1: sc = CubQMCLatticeG(CustomFun(Gaussian(Lattice(d),mu_post,sigma_cov),compute_qei),abs_tol=epsilons[j][i],rel_tol=0) else: sc = CubQMCSobolG(CustomFun(Gaussian(Sobol(d),mu_post,sigma_cov),compute_qei),abs_tol=epsilons[j][i],rel_tol=0) solution,data = sc.integrate() times[names[j]][i,t] = data.time_integrate n_needed[names[j]][i,t] = data.n_total fig,axs = pyplot.subplots(1, 3, figsize=(22, 6)) colors = ('#245EAB', '#A23D97', '#00B253') light_colors = ('#A3DDFF', '#FFBCFF', '#4DFFA0') alpha = .3 def plot_fills(eps,data,name,color,light_color): bot = np.percentile(data, 5, axis=1) med = np.percentile(data, 50, axis=1) top = np.percentile(data, 95, axis=1) ax.loglog(eps, med, label=name, color=color) ax.fill_between(eps, bot, top, color=light_color) return med for i,(nt_data,label) in enumerate(zip([times,n_needed],['time','n'])): ax = axs[i+1] # iid plot eps_iid = np.array(epsilons[0]) data = nt_data['IID'] med_iid = plot_fills(eps_iid,data,'IID',colors[0],light_colors[0]) # lattice plot eps = np.array(epsilons[1]) data = nt_data['Lattice'] med_lattice = plot_fills(eps,data,'Lattice',colors[1],light_colors[1]) # sobol plot eps = np.array(epsilons[2]) data = nt_data['Sobol'] med_sobol = plot_fills(eps,data,'Sobol',colors[2],light_colors[2]) # iid bigO ax.loglog(eps_iid, (med_iid[0]*eps_iid[0]**2)/(eps_iid**2), '--k', label=r'$\mathcal{O}(1/\epsilon^2)$') # ld bigO ax.loglog(eps, ((med_lattice[0]*med_sobol[0])**.5 *eps[0]) / eps , '-.k', label=r'$\mathcal{O}(1/\epsilon)$') # metas ax.set_xlabel(r'$\epsilon$') ax.set_ylabel(label) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.legend(loc='lower left',frameon=False) ax.set_title(f'Statistics from {trials} runs') # plot sample data ax = axs[0] ax.plot(xplt, yplt, linewidth=lw) ax.plot(x, y, 'o', markersize=ms, color='orange') ax.errorbar(x, y, yerr=2 * np.sqrt(v), marker='', linestyle='', color='orange', linewidth=lw) ax.set_title('Sample data with noise') ax.set_xlim([0,1]) ax.set_xticks([0,1]) ax.set_ylim([-3, 3]) ax.set_yticks([-3,3]); ```
github_jupyter
# KNeighborsClassifier with MaxAbsScaler & PowerTransformer **This Code template is for the Classification task using KNeighborsClassifier with MaxAbsScaler feature scaling technique and PowerTransformer as Feature Transformation Technique in a pipeline.** ### Required Packages ``` !pip install imblearn import warnings as wr import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder from sklearn.pipeline import make_pipeline from sklearn.preprocessing import MaxAbsScaler,PowerTransformer from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import accuracy_score from imblearn.over_sampling import RandomOverSampler wr.filterwarnings('ignore') ``` ### Initialization Filepath of CSV file ``` #filepath file_path= "" ``` List of features which are required for model training . ``` #x_values features=[] ``` Target feature for prediction. ``` #y_value target='' ``` ### Data Fetching Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools. We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry. ``` df=pd.read_csv(file_path) #reading file df.head()#displaying initial entries print('Number of rows are :',df.shape[0], ',and number of columns are :',df.shape[1]) df.columns.tolist() ``` ### Data Preprocessing Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes. ``` def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) def EncodeY(df): if len(df.unique())<=2: return df else: un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort') df=LabelEncoder().fit_transform(df) EncodedT=[xi for xi in range(len(un_EncodedT))] print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT)) return df ``` #### Correlation Map In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns. ``` plt.figure(figsize = (20, 12)) corr = df.corr() mask = np.triu(np.ones_like(corr, dtype = bool)) sns.heatmap(corr, mask = mask, linewidths = 1, annot = True, fmt = ".2f") plt.show() ``` ### Feature Selections It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model. We will assign all the required input features to X and target/outcome to Y. ``` #spliting data into X(features) and Y(Target) X=df[features] Y=df[target] x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) Y=EncodeY(NullClearner(Y)) X.head() ``` #### Distribution Of Target Variable ``` plt.figure(figsize = (10,6)) sns.countplot(Y,palette='pastel') ``` ### Data Splitting The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data. ``` #we can choose randomstate and test_size as over requerment X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 123) #performing datasplitting ``` ### Handling Target Imbalance The challenge of working with imbalanced datasets is that most machine learning techniques will ignore, and in turn have poor performance on, the minority class, although typically it is performance on the minority class that is most important. One approach to addressing imbalanced datasets is to oversample the minority class. The simplest approach involves duplicating examples in the minority class.We will perform overspampling using imblearn library. ``` X_train,y_train = RandomOverSampler(random_state=123).fit_resample(X_train, y_train) ``` ### Feature Transformation **PowerTransformer** PowerTransformer applies a power transform featurewise to make data more Gaussian-like. Power transforms are a family of parametric, monotonic transformations that are applied to make data more Gaussian-like. This is useful for modeling issues related to heteroscedasticity (non-constant variance), or other situations where normality is desired. ### Feature Rescaling **MaxAbsScaler** Scale each feature by its maximum absolute value. This estimator scales and translates each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity. Reference URL to MaxAbsScaler API : https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MaxAbsScaler.html ## Model **KNeighborsClassifier :** KNN is one of the easiest Machine Learning algorithms based on Supervised Machine Learning technique. The algorithm stores all the available data and classifies a new data point based on the similarity. It assumes the similarity between the new data and data and put the new case into the category that is most similar to the available categories.KNN algorithm at the training phase just stores the dataset and when it gets new data, then it classifies that data into a category that is much similar to the available data. Model Tuning Parameters: * n_neighbors -> Number of neighbors to use by default for kneighbors queries. * weights -> weight function used in prediction. {uniform,distance} * algorithm-> Algorithm used to compute the nearest neighbors. {‘auto’, ‘ball_tree’, ‘kd_tree’, ‘brute’} * p -> Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. * leaf_size -> Leaf size passed to BallTree or KDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. ``` # Build Model here model=make_pipeline(MaxAbsScaler(),PowerTransformer(),KNeighborsClassifier()) model.fit(X_train,y_train) ``` #### Model Accuracy score() method return the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. ``` print("Accuracy score {:.2f} %\n".format(model.score(X_test,y_test)*100)) #prediction on testing set prediction=model.predict(X_test) ``` #### Confusion Matrix A confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known. ``` #ploting_confusion_matrix(model,X_test,y_test,cmap=plt.cm.Blues) cf_matrix=confusion_matrix(y_test,prediction) plt.figure(figsize=(7,6)) sns.heatmap(cf_matrix,annot=True,fmt="d") ``` #### Classification Report A Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False. * **where**: - Precision:- Accuracy of positive predictions. - Recall:- Fraction of positives that were correctly identified. - f1-score:- percent of positive predictions were correct - support:- Support is the number of actual occurrences of the class in the specified dataset. ``` print(classification_report(y_test,model.predict(X_test))) ``` #### Creator: Vipin Kumar , Github: [Profile](https://github.com/devVipin01)
github_jupyter
# Supplemental Information Section B - Analysis of mutation frequency (c) 2017 the authors. This work is licensed under a [Creative Commons Attribution License CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/). All code contained herein is licensed under an [MIT license](https://opensource.org/licenses/MIT). ``` import scipy as sp import scipy.ndimage # Our numerical workhorses import numpy as np import pandas as pd # Import the project utils import sys sys.path.insert(0, '../') import NB_sortseq_utils as utils # Import matplotlib stuff for plotting import matplotlib.pyplot as plt import matplotlib.cm as cm from IPython.core.pylabtools import figsize # Seaborn, useful for graphics import seaborn as sns sns.set_palette("deep", color_codes=True) utils.set_plotting_style1() %matplotlib inline # load in library sequences mut_region_start = 0 mut_region_length = 70 data_fn = '../../data/sortseq_pymc_dump/20150513_marR_MG1655_LB_na_mut1_4bins_seq.csv' #======================================================# # load sequences and convert to matrix representation #======================================================# f = open(data_fn) roi_list = [(line.split(',')[0][mut_region_start:mut_region_start+mut_region_length], line.split(',')[1].strip()) for line in f if line.strip()] f.close() # number of sequences N = len(roi_list) # Convert each sequence into matrix representation seq_mat = sp.zeros((4,mut_region_length,N),dtype=int) for i, line in enumerate(roi_list): seq_mat[:,:,i] = utils.seq2mat(line[0]) #======================================================# # We also need to know what the wild-type sequence was # to calculate number of mutations. Lets load those in. #======================================================# data_fn_wt = 'input_data/seq_WT_marmut1.txt' seq_mat_wt = sp.zeros((4,mut_region_length,1),dtype=int) seq_mat_wt_inv = sp.zeros((4,mut_region_length,1),dtype=int) f = open(data_fn_wt) for i, line in enumerate(f): if line.strip(): sb = line.split(',') seq_mat_wt[:,:,i] = utils.seq2mat(sb[0][0:mut_region_length]) f.close() # To determine whether a matrix has a mutation, I will # multiply (inner product) it by an inverted wild-type # sequence (positions associated with wild-type sequence # have value 0, and 1 otherwise). The sum of this matrix # product will give the number of mutations. # Let's generate this inverted wild-type matrix. for i in range(0,mut_region_length): for j in range(0,4): if seq_mat_wt[j,i] ==1: seq_mat_wt_inv[j,i] = 0 else: seq_mat_wt_inv[j,i] = 1 #======================================================# # calculate 'mutation' matrices #======================================================# seq_mat_mut = np.zeros((4,mut_region_length,N),dtype=int) for i in range(0,N): seq_mat_mut[:,:,i] = seq_mat[:,:,i] * seq_mat_wt_inv[:,:,0] #======================================================# # Calculate number of sequences that have 1-30 # mutations in their sequence. # This will take a while. I bet there is a better way # to calculate this. #======================================================# N_mut = np.zeros(25,dtype=int) for i in range(0,25): seq_mat_mut_temp = seq_mat_mut.copy() # A matrix will sum to 4 if it has 4 mutations. # Use this to check number of sequences with i mutations if i == 0: count = 0 for j in range(0,N): if seq_mat_mut_temp[:,:,j].sum() !=i: seq_mat_mut_temp[:,:,j] = seq_mat_mut_temp[:,:,j]*0.0 else: count += 1 N_mut[i] = count continue for j in range(0,N): if seq_mat_mut_temp[:,:,j].sum() !=i: seq_mat_mut_temp[:,:,j] = seq_mat_mut_temp[:,:,j]*0.0 # divide by number of mutations (sum of each good matrix) N_mut[i] = seq_mat_mut_temp.sum()/i print('Considering sequences with ',i, ' mutations.') #======================================================# # plot the result #======================================================# ind = np.arange(0,25) ind = np.arange(25)/70 fig = plt.figure(1, figsize(5, 4)) ax = plt.subplot(111) plt.bar(ind,100*(N_mut/N), width=0.015) # ax.set_xlim(0,0.35) ax.set_ylabel('percent of sequences') ax.set_xlabel('mutations / base pair') ax.grid(b=False) plt.tight_layout() figname_out = 'output_figs/figS1_mutation_histogram.pdf' fig.savefig(figname_out, format='pdf') ```
github_jupyter
``` import pandas as pd import numpy as np import torch from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.metrics import confusion_matrix import mlprepare as mlp import deep_tabular_augmentation as dta from torch.utils.data import Dataset, DataLoader from sklearn.ensemble import RandomForestClassifier device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') DATA_PATH = 'data/creditcard.csv' df = pd.read_csv(DATA_PATH) df.head() ``` We want a balanced dataset, hence we want to create as many fake data as the difference between the two classes. ``` difference_in_class_occurences = df['Class'].value_counts()[0]-df['Class'].value_counts()[1] difference_in_class_occurences cols = df.columns train_df, test_df = train_test_split(df[df['Class']==1], test_size=0.3, random_state=42) X_train_fraud, X_test_fraud, y_train, y_test, scaler_fraud_data = mlp.cont_standardize(train_df.iloc[:,:30], test_df.iloc[:,:30], train_df.iloc[:,30], test_df.iloc[:,30], cat_type=None, transform_y=False, path='scaler_objects/fraud_scaler', standardizer='StandardScaler') class DataBuilder(Dataset): def __init__(self, X_train, X_test, train=True): self.X_train, self.X_test = X_train, X_test if train: self.X_train['class']=1 self.x = torch.from_numpy(self.X_train.values).type(torch.FloatTensor) self.len=self.x.shape[0] else: self.X_test['class']=1 self.x = torch.from_numpy(self.X_test.values).type(torch.FloatTensor) self.len=self.x.shape[0] del self.X_train del self.X_test def __getitem__(self,index): return self.x[index] def __len__(self): return self.len traindata_set=DataBuilder(X_train_fraud, X_test_fraud, train=True) testdata_set=DataBuilder(X_train_fraud, X_test_fraud, train=False) trainloader=DataLoader(dataset=traindata_set,batch_size=1024) testloader=DataLoader(dataset=testdata_set,batch_size=1024) trainloader.dataset.x[0] D_in = traindata_set.x.shape[1] H = 50 H2 = 12 autoenc_model = dta.AutoencoderModel(trainloader, testloader, device, D_in, H, H2, latent_dim=5) autoenc_model_fit = autoenc_model.fit(epochs=1000) ``` Let's see how the created data looks like: ``` cols_fake = cols.to_list() cols_fake.remove('Class') df_fake = autoenc_model_fit.predict_df(no_samples=difference_in_class_occurences, cols=cols, scaler=scaler_fraud_data, cont_vars=cols_fake) df_fake['Class']=1 df_fake.head() ``` Now the data with random noise: ``` df_fake_with_noise = autoenc_model_fit.predict_with_noise_df(no_samples=difference_in_class_occurences, cols=cols, mu=0, sigma=0.05, scaler=scaler_fraud_data, cont_vars=cols_fake) df_fake_with_noise['Class']=1 df_fake_with_noise.head() df[df['Class']==1].groupby('Class').describe().loc[:,(slice(None),['mean'])] df_fake.describe().loc[['mean']] df_fake_with_noise.describe().loc[['mean']] ``` ## Train Random Forest ### Create three dataframes: the original, the original appended with fake_data, the original appended with fake data with noise ``` train_df, test_df = train_test_split(df, test_size=0.3, random_state=42) train_df_fake = train_df.append(df_fake) train_df_fake_with_noise = train_df.append(df_fake_with_noise) ``` ### Train model on original data ``` clf_original=RandomForestClassifier(n_estimators=100, max_features=0.5, min_samples_leaf=5, n_jobs=-1) #Train the model using the training sets y_pred=clf.predict(X_test) clf_original.fit(train_df.iloc[:,:30].values,train_df.iloc[:,30].values) y_pred_original=clf_original.predict(test_df.iloc[:,:30].values) confusion_matrix(test_df.iloc[:,30], y_pred_original) clf_fake=RandomForestClassifier(n_estimators=100, max_features=0.5, min_samples_leaf=5, n_jobs=-1) #Train the model using the training sets y_pred=clf.predict(X_test) clf_fake.fit(train_df_fake.iloc[:,:30].values,train_df_fake.iloc[:,30].values) y_pred_fake=clf_fake.predict(test_df.iloc[:,:30].values) confusion_matrix(test_df.iloc[:,30], y_pred_fake) clf_fake_with_noise=RandomForestClassifier(n_estimators=100, max_features=0.5, min_samples_leaf=5, n_jobs=-1) #Train the model using the training sets y_pred=clf.predict(X_test) clf_fake_with_noise.fit(train_df_fake_with_noise.iloc[:,:30].values,train_df_fake_with_noise.iloc[:,30].values) y_pred_fake_with_noise=clf_fake.predict(test_df.iloc[:,:30].values) confusion_matrix(test_df.iloc[:,30], y_pred_fake_with_noise) ```
github_jupyter
I'm going to try and download the Kages and LEGACY data from Vizier using ADQL, and combine them into a single array. ``` import warnings warnings.catch_warnings() warnings.simplefilter("ignore") import seaborn as sns import pylab as plt from astropy.table import Table import numpy as np import pandas as pd from tap import TAPVizieR as tv from tap import QueryStr ``` - Get all important information from both Legacy Tables - Get the same information from Kages tables # LEGACY Stars (66) ## We will use the BASTA model parameters ### Table 3, Paper 2: Global Seismic & Spectroscopic Properties We will take all our global properties from this catalogue ``` adql = QueryStr(""" select obs.KIC, obs.numax, obs."E_numax" as enumax, obs."<dnu>" as dnu, obs."E_<dnu>" as ednu, obs.Teff, obs."e_Teff" as eTeff, obs."[Fe/H]" as feh, obs."e_[Fe/H]" as efeh from "J/ApJ/835/173/table3" as obs """) vizier = tv() legacyobs = vizier.query(adql).to_pandas() ``` ## Table 4, Paper 2: BASTA model output We will take preliminary Masses, Radii, Logg and Ages from the BASTA fits in this catalogue ``` adql = QueryStr(""" select modl.KIC, modl.Mass as modmass, modl."E_Mass" as upmodmass, modl."e_Mass" as lomodmass, modl.Rad as modrad, modl."E_Rad" as upmodrad, modl."e_Rad" as lomodrad, modl."log(g)" as modlogg, modl."E_log(g)" as upmodlogg, modl."e_log(g)" as lomodlogg, modl.Age as age, modl."E_Age" as upage, modl."e_Age" as loage from "J/ApJ/835/173/table4" as modl where modl.Pipe ='BASTA' """) vizier = tv() legacymodels = vizier.query(adql).to_pandas() ``` ### Auxiliary table: Table 1, Paper 1: Parameters for studied stars From this catalogue we will take the Kepler magnitude, Category, vsini, and sources on atmospheric parameters. Note that the $\nu_{\rm max}$ and $\Delta\nu$ values in this catalogue differ slightly to those in Paper 2. We will use the values from Paper 2. ``` adql = QueryStr(""" select aux.KIC, aux.Name, aux.Kpmag, aux.Cat, aux.r_Teff as Teffsource, aux."r_[Fe/H]" as fehsource, aux.vsini, aux.e_vsini as evsini from "J/ApJ/835/172/table1" as aux """) vizier = tv() aux = vizier.query(adql).to_pandas() aux.KIC = pd.to_numeric(aux['KIC'].str.decode("utf-8")) aux.Name = aux.Name.str.decode("utf-8") aux.Cat = aux.Cat.str.decode("utf-8") legacy = legacyobs.merge(legacymodels, on='KIC', how='left') legacy = legacy.merge(aux[['KIC','vsini','evsini']], on='KIC',how='left') legacy.head(5) ``` # Kages stars (33) ## We will use the BASTA model parameters ### Table 3, Paper 1 We will take the model parameters as well as the atmospheric parameters from here. ``` adql = QueryStr(""" select obs.KIC, obs.Teff, obs."e_Teff" as eTeff, obs."[Fe/H]" as feh, obs."e_[Fe/H]" as efeh, obs.Mass as modmass, obs."E_Mass" as upmodmass, obs."e_Mass" as lomodmass, obs.Radius as modrad, obs."E_Radius" as upmodrad, obs."e_Radius" as lomodrad, obs.logg as modlogg, obs."E_logg" as upmodlogg, obs."e_logg" as lomodlogg, obs.Age as age, obs."E_Age" as upage, obs."e_Age" as loage from "J/MNRAS/452/2127/table3" as obs """) vizier = tv() kagesmodels = vizier.query(adql).to_pandas() ``` ### Table 1, Paper 2 This table is not on Vizier, so we shall read it in. The $vsin(i)$ values are not reported in the publication of Davies+2016, but have been received via private communication. ``` kagesseis = pd.read_csv('kagesIIdata.dat', usecols=['KIC','num','snum','dnu','sdnu','vsini','svsini'], sep='\s+').rename(columns={'num':'numax', 'snum':'enumax', 'sdnu':'ednu', 'svsini':'evsini'}) kages = kagesmodels.merge(kagesseis, on='KIC',sort='left') kages.head(5) ``` # Now lets combine them! ## We'll add a Legacy/Kages (L/K) source label also ``` legacy['source'] = ['L'] * len(legacy) kages['source'] = ['K'] * len(kages) malatium = legacy.merge(kages, on='KIC', how='outer') ``` Note that there are 4 duplicates across these catalogues. These stars are: * 3632418 * 9414417 * 9955598 * 10963065 In these cases we preserve the values reported in LEGACY and not in Kages. ``` malatium = legacy.append(kages).drop_duplicates('KIC', keep='first') ``` # Finally, lets add some more info ## We will use the gaia-kepler.fun catalogue for this. This can be downloaded here: https://gaia-kepler.fun/. Its too large to store on Github. We will use the Gaia magnitude and BP-RP value. ``` gkf = Table.read('kepler_dr2_1arcsec.fits', format='fits').to_pandas().rename(columns={ 'kepid':'KIC', 'phot_g_mean_mag':'G', 'bp_rp':'bprp'})[['KIC','G','bprp']] malatium = malatium.merge(gkf, on='KIC', how='left') ``` # Lets save our final sample ``` malatium.to_csv('malatium.csv') malatium['KIC'].to_csv('targetlist.txt',index=False) print('Our final sample contains: {} stars'.format(len(malatium))) malatium.head(5) ``` # Lets make some fun plots! ``` sns.set_context('notebook',rc={"font.size":20,"axes.titlesize":20,"axes.labelsize":20}) sns.jointplot('Teff', 'modlogg', data=malatium) sns.jointplot('numax','dnu',data=malatium) sns.jointplot('modmass','age',data=malatium) sns.jointplot('Teff','age',data=malatium) sns.jointplot('feh','age',data=malatium) ```
github_jupyter
<img src="NotebookAddons/blackboard-banner.png" width="100%" /> <font face="Calibri"> <br> <font size="5"> <b>An Introduction to Simple SAR Change Detection Methods</b></font> <br> <font size="4"> <b> Franz J Meyer; University of Alaska Fairbanks </b> <br> <img src="NotebookAddons/UAFLogo_A_647.png" style="padding:5px;" width="170" align="right" /></font> <font size="3">This notebook introduces you to a some popular change detection methods that can be applied on SAR time series data. SAR is an excellent tool for change detection. As SAR sensors are weather and illumination independent, and as SAR's carry their own illumination source (active sensor), differences between repeated images are a direct indication of changes on the surface. This fact is exploited by the change detection methods introduced below. The exercise is done in the framework of *Jupyter Notebooks*. The Jupyter Notebook environment is easy to launch in any web browser for interactive data exploration with provided or new training data. Notebooks are comprised of text written in a combination of executable python code and markdown formatting including latex style mathematical equations. Another advantage of Jupyter Notebooks is that they can easily be expanded, changed, and shared with new data sets or newly available time series steps. Therefore, they provide an excellent basis for collaborative and repeatable data analysis. <br> <b>This notebook covers the following data analysis concepts:</b> - Time series metrics 95$^{th}$ and 5$^{th}$ percentile difference thresholding - Time series coefficient of variation thresholding - Log Ratio-based change detection from image pairs </font> </font> <hr> <font face="Calibri" size="5" color='rgba(200,0,0,0.2)'> <b>Important Note about JupyterHub</b> </font> <br><br> <font face="Calibri" size="3"> <b>Your JupyterHub server will automatically shutdown when left idle for more than 1 hour. Your notebooks will not be lost but you will have to restart their kernels and re-run them from the beginning. You will not be able to seamlessly continue running a partially run notebook.</b> </font> ``` %%javascript var kernel = Jupyter.notebook.kernel; var command = ["notebookUrl = ", "'", window.location, "'" ].join('') kernel.execute(command) from IPython.display import Markdown from IPython.display import display user = !echo $JUPYTERHUB_USER env = !echo $CONDA_PREFIX if env[0] == '': env[0] = 'Python 3 (base)' if env[0] != '/home/jovyan/.local/envs/rtc_analysis': display(Markdown(f'<text style=color:red><strong>WARNING:</strong></text>')) display(Markdown(f'<text style=color:red>This notebook should be run using the "rtc_analysis" conda environment.</text>')) display(Markdown(f'<text style=color:red>It is currently using the "{env[0].split("/")[-1]}" environment.</text>')) display(Markdown(f'<text style=color:red>Select the "rtc_analysis" from the "Change Kernel" submenu of the "Kernel" menu.</text>')) display(Markdown(f'<text style=color:red>If the "rtc_analysis" environment is not present, use <a href="{notebookUrl.split("/user")[0]}/user/{user[0]}/notebooks/conda_environments/Create_OSL_Conda_Environments.ipynb"> Create_OSL_Conda_Environments.ipynb </a> to create it.</text>')) display(Markdown(f'<text style=color:red>Note that you must restart your server after creating a new environment before it is usable by notebooks.</text>')) ``` <hr> <font face="Calibri"> <font size="5"> <b> 0. Importing Relevant Python Packages </b> </font> <font size="3">In this notebook we will use the following scientific libraries: <ol type="1"> <li> <b><a href="https://pandas.pydata.org/" target="_blank">Pandas</a></b> is a Python library that provides high-level data structures and a vast variety of tools for analysis. The great feature of this package is the ability to translate rather complex operations with data into one or two commands. Pandas contains many built-in methods for filtering and combining data, as well as the time-series functionality. </li> <li> <b><a href="https://www.gdal.org/" target="_blank">GDAL</a></b> is a software library for reading and writing raster and vector geospatial data formats. It includes a collection of programs tailored for geospatial data processing. Most modern GIS systems (such as ArcGIS or QGIS) use GDAL in the background.</li> <li> <b><a href="http://www.numpy.org/" target="_blank">NumPy</a></b> is one of the principal packages for scientific applications of Python. It is intended for processing large multidimensional arrays and matrices, and an extensive collection of high-level mathematical functions and implemented methods makes it possible to perform various operations with these objects. </li> <li> <b><a href="https://matplotlib.org/index.html" target="_blank">Matplotlib</a></b> is a low-level library for creating two-dimensional diagrams and graphs. With its help, you can build diverse charts, from histograms and scatterplots to non-Cartesian coordinates graphs. Moreover, many popular plotting libraries are designed to work in conjunction with matplotlib. </li> <li> The <b><a href="https://www.pydoc.io/pypi/asf-hyp3-1.1.1/index.html" target="_blank">asf-hyp3 API</a></b> provides useful functions and scripts for accessing and processing SAR data via the Alaska Satellite Facility's Hybrid Pluggable Processing Pipeline, or HyP3 (pronounced "hype"). </li> <li><b><a href="https://www.scipy.org/about.html" target="_blank">SciPY</a></b> is a library that provides functions for numerical integration, interpolation, optimization, linear algebra and statistics. </li> </font> <font face="Calibri" size="3"> Our first step is to <b>import them:</b> </font> ``` %%capture import os # for chdir, getcwd, path.exists import datetime # for date from glob import glob import re import pandas as pd # for DatetimeIndex from osgeo import gdal # for Info %matplotlib inline import matplotlib.pyplot as plt from matplotlib import animation from matplotlib import rc import numpy as np from IPython.display import HTML import asf_notebook as asfn asfn.jupytertheme_matplotlib_format() ``` <hr> <font face="Calibri"> <font size="5"> <b> 1. Load Data Stack</b> </font> <font size="3"> In the Exercise4A-SARChangeDetectionMethods.ipynb notebook will used a dense 12-day repeat Sentinel-1 C-band SAR data stack. It contained imagery acquired during the year 2018 in Guatemala. The data was already prepared for you. Now, you will later learn how to download and pre-process Sentinel-1 images using the services of the <a href="https://www.asf.alaska.edu/" target="_blank">Alaska Satellite Facility</a>. <br><br> <b>Begin by writing a function to retrieve the absolute paths to each of our tiffs:</b> </font> ``` def get_tiff_paths(paths): tiff_paths = !ls $paths | sort -t_ -k5,5 return tiff_paths ``` <font face="Calibri" size="3"><b>Enter the path to the directory holding your tiffs:</b> </font> ``` while True: print("Enter the absolute path to the directory holding your tiffs.") print("NOTE: the tiffs should all be in the same polarization.") tiff_dir = input() paths = f"{tiff_dir}/*.tif" if os.path.exists(tiff_dir): tiff_paths = get_tiff_paths(paths) if len(tiff_paths) < 1: print(f"{tiff_dir} exists but contains no tifs.") print("You will not be able to proceed until tifs are prepared.") break else: print(f"\n{tiff_dir} does not exist.") continue ``` <font face="Calibri" size="3"><b>Determine the path to the analysis directory containing the tiff directory:</b> </font> ``` analysis_dir = os.path.dirname(tiff_dir) print(analysis_dir) ``` <font face="Calibri" size="3"><b>Determine the path to the analysis directory containing the tiff directory:</b> </font> ``` polarity = asfn.select_parameter(['VV', 'VH'], 'Polarity:') polarity pol = polarity.value print(f'Polarity: {pol}') ``` <font face="Calibri" size="3"><b>Create a wildcard path to the tiffs:</b> </font> ``` if pol == 'VV': wildcard_path = f"{tiff_dir}/*VV.tif*" else: wildcard_path = f"{tiff_dir}/*VH.tif*" print(f'wildcard_path: \n{wildcard_path}') ``` <font face="Calibri" size="3"><b>Write a function to extract the tiff dates from a wildcard path.</b> </font> ``` def get_dates(paths): dates = [] pths = glob(paths) for p in pths: regex = "[0-9]{8}" date = re.search(regex, p) if date: dates.append(date.group(0)) dates.sort() return dates ``` <font face="Calibri" size="3"><b>Call get_dates() to collect the product acquisition dates:</b></font> ``` dates = get_dates(wildcard_path) print(f'dates: \n{dates}') ``` <br> <font face="Calibri" size="5"> <b> 2. Create the VRTs </b> </font> <font face="Calibri" size="3"><b>Create the virtual raster table for the GeoTiffs:</b></font> ``` raster_path = f"{analysis_dir}/raster_stack_{pol}.vrt" !gdalbuildvrt -separate $raster_path $wildcard_path ``` <br> <font face="Calibri" size="5"> <b> 3. Define Some Python Helper Functions for this Notebook </b> </font> <br><br> <font face="Calibri" size="3">We are defining two helper functions for this notebook: - **CreateGeoTiff()** to write out images - **timeseries_metrics()** to compute various metrics from a time series data stack</font> ``` def create_geotiff(name, array, data_type, ndv, bandnames=None, ref_image=None, geo_t=None, projection=None): # If it's a 2D image we fake a third dimension: if len(array.shape) == 2: array = np.array([array]) if ref_image == None and (geo_t == None or projection == None): raise RuntimeWarning('ref_image or settings required.') if bandnames != None: if len(bandnames) != array.shape[0]: raise RuntimeError(f'Need {Array.shape[0]} bandnames. {len(bandnames)} given') else: bandnames = [f'Band {i+1}' for i in range(array.shape[0])] if ref_image != None: refimg = gdal.Open(ref_image) geo_t = refimg.GetGeoTransform() Projection = refimg.GetProjection() driver = gdal.GetDriverByName('GTIFF') array[np.isnan(array)] = ndv dataset = driver.Create(name, array.shape[2], array.shape[1], array.shape[0], data_type) dataset.SetGeoTransform(geo_t) dataset.SetProjection(projection) for i, image in enumerate(array, 1): dataset.GetRasterBand(i).WriteArray(image) dataset.GetRasterBand(i).SetNoDataValue(ndv) dataset.SetDescription(bandnames[i-1]) dataset.FlushCache() return name def timeseries_metrics(raster, ndv=np.nan): # Make us of numpy nan functions # Check if type is a float array if not raster.dtype.name.find('float')>-1: raster = raster.astype(np.float64) # Set ndv to nan if not np.isnan(ndv): raster[np.equal(raster,ndv)] = np.nan # Build dictionary of the metrics tsmetrics={} rperc = np.nanpercentile(raster,[5,50,95], axis=0) tsmetrics['mean'] = np.nanmean(raster, axis=0) tsmetrics['max'] = np.nanmax(raster, axis=0) tsmetrics['min'] = np.nanmin(raster, axis=0) tsmetrics['range'] = tsmetrics['max'] - tsmetrics['min'] tsmetrics['median'] = rperc[1] tsmetrics['p5'] = rperc[0] tsmetrics['p95'] = rperc[2] tsmetrics['prange'] = rperc[2]-rperc[0] tsmetrics['var'] = np.nanvar(raster, axis=0) tsmetrics['std'] = np.sqrt(tsmetrics['var']) tsmetrics['CV'] = np.abs(tsmetrics['var'] / tsmetrics['mean']) return tsmetrics ``` <br> <font face="Calibri" size="5"> <b> 4. Create a Pandas Time Index and Display the VRT Band Dates</b> </font> <br><br> <font face="Calibri" size="3"><b>Create an index of timedelta64 data with Pandas:</b></font> ``` tindex = pd.DatetimeIndex(dates) ``` <font face="Calibri" size="3"><b>Print the bands and dates for all images in the virtual raster table (VRT):</b></font> ``` j = 1 print(f"Bands and dates for {raster_path}") for i in tindex: print("{:4d} {}".format(j, i.date()), end=' ') j += 1 if j%5 == 1: print() ``` <hr> <br> <font face="Calibri" size="5"> <b> 4. Create a Time Series Animation to get an Idea of the Dynamics at the Site </b> </font> <font face="Calibri" size="4"> <b> 4.1 Load Time Series Stack </b> </font> <font face="Calibri" size="3">Now we are ready to create a time series animation from the calibrated SAR data. <br><br> <b>First, create a raster from band 0 and a raster stack from all the images:</b> </font> ``` img = gdal.Open(raster_path) band = img.GetRasterBand(1) raster0 = band.ReadAsArray() band_number = 0 # Needed for updates rasterstack = img.ReadAsArray() ``` <font face="Calibri" size="3"><b>Print the bands, pixels, and lines:</b></font> ``` print(f"Number of bands: {img.RasterCount}") print(f"Number of pixels: {img.RasterXSize}") print(f"Number of lines: {img.RasterYSize}") ``` <br> <font face="Calibri" size="4"> <b> 4.2 Data Conversion between dB and Power Scales </b> </font> The data at hand are radiometrically terrain corrected images, which are often expressed as terrain flattened $\gamma^0$ backscattering coefficients. For forest and land cover monitoring applications $\gamma^0$ is the preferred metric. <br><br> To use a logarithmic scale instead of the natural power scale, <b>you can set the following variable to True</b>: ``` todB = True labeldB = 'dB' if todB else 'linear' def convert(raster, todB=todB): if todB: return 10 * np.ma.log10(raster) else: return raster ``` <br> <font face="Calibri" size="4"> <b> 4.3 Create Time Series Animation </b> </font> <font face="Calibri" size="3"><b>Create and move into a directory in which to store our plots and animations:</b></font> ``` product_path = 'plots_and_animations' asfn.new_directory(product_path) ``` <font face="Calibri" size="3">Now we can <b>create the information needed to animate our data:</b></font> ``` %%capture fig = plt.figure(figsize=(10, 5)) ax = fig.subplots() ax.axis('off') vmin = np.nanpercentile(convert(rasterstack), 1) vmax = np.nanpercentile(convert(rasterstack), 99) im = ax.imshow(convert(raster0), cmap='inferno', vmin=vmin, vmax=vmax) cbar = fig.colorbar(im) cbar.set_label(labeldB) ax.set_title("{}".format(tindex[0].date())) plt.rcParams.update({'font.size': 14}) def animate(i): ax.set_title("{}".format(tindex[i].date())) im.set_data(convert(rasterstack[i])) # Interval is given in milliseconds ani = animation.FuncAnimation(fig, animate, frames=rasterstack.shape[0], interval=300) ``` <font face="Calibri" size="3"><b>Configure matplotlib's RC settings for the animation:</b></font> ``` rc('animation', embed_limit=40971520.0) # We need to increase the limit maybe to show the entire animation ``` <font face="Calibri" size="3"><b>Create a javascript animation of the time-series running inline in the notebook:</b></font> ``` HTML(ani.to_jshtml()) ``` <font face="Calibri" size="3"><b>Delete the dummy png</b> that was saved to the current working directory while generating the javascript animation in the last code cell.</font> ``` try: os.remove('None0000000.png') except FileNotFoundError: pass ``` <font face="Calibri" size="3"><b>Save the animation (animation.gif):</b> </font> ``` ani.save(os.path.join(product_path, f'animation_{labeldB}.gif'), writer='pillow', fps=2) ``` <br> <div class="alert alert-success"> <font face="Calibri" size="5"> <b> <font color='rgba(200,0,0,0.2)'> <u>EXERCISE #1</u>: </font> Can You See the Impact of the Eruption?</b> </font> <font face="Calibri" size="3"> Can you see the impact of the ongoing eruption in the SAR images? Can you identify the lahar flows? When did they occur and how far did they reach? </font> </div> <hr> <br> <font face="Calibri" size="5"> <b> 5. Computation and Visualization of Time Series Metrics</b> </font> <font face="Calibri" size="3">Once a time-series was constructed, we can compute <b>a set of metrics</b> for each pixel in the stack: - Mean - Median - Maximum - Minimum - Range (Maximum - Minimum) - 5th Percentile - 95th Percentile - PRange (95th - 5th Percentile) - Variance - Coefficient of Variation (Variance/Mean) <hr> First, we <b>mask out pixels</b> that are zero (e.g. beyond the edge of the swath). Then we <b>calculate the time series metrics</b>: </font> ``` mask = (rasterstack == 0) raster = np.ma.array(convert(rasterstack), mask=mask, dtype=np.float64) %%capture metrics = timeseries_metrics(raster.filled(np.nan), ndv=np.nan) metrics.keys() ``` <font face="Calibri" size="3">Let's look at the histograms for the time series variance and coeficient of variation to aid displaying those images:</font> ``` fig, ax = plt.subplots(1,2,figsize=(16,4)) ax[0].hist(metrics['var'].flatten(), bins=100, range=np.nanpercentile(metrics['var'], [1,99])) ax[1].hist(metrics['CV'].flatten(), bins=100, range=np.nanpercentile(metrics['CV'], [1,99])) _ = ax[0].set_title('Variance') _ = ax[1].set_title('Coefficient of Variation') # List the metrics keys you want to plot metric_keys=['mean', 'median', 'max', 'min', 'p95', 'p5', 'prange', 'var', 'std', 'CV'] fig= plt.figure(figsize=(16,40)) idx=1 for i in metric_keys: ax = fig.add_subplot(5,2,idx) vmin, vmax = np.nanpercentile(metrics[i], [1, 99]) ax.imshow(metrics[i],vmin=vmin,vmax=vmax,cmap='inferno') ax.set_title(i.upper()) ax.axis('off') idx+=1 ``` <hr> <br> <div class="alert alert-success"> <font face="Calibri" size="5"> <b> <font color='rgba(200,0,0,0.2)'> <u>EXERCISE #2:</u> </font></b> <font face="Calibri" size="3"> Can you group the metrics in terms of what they are most sensitive to? How would you expect the sensitivity of each metric to change if you switched from dB to linearly scaled images? </font> </div> <br> <hr> <font face="Calibri" size="3">You might have noticed white patches in the images above. These do not contain any data. The reason is that they are in the radar shadow of terrain that is closer to the satellite. <hr> <br> <font face="Calibri" size="5"> <b> 6. Some Popular SAR Change Detection Methods</b> </font> <font face="Calibri" size="3">This section will introduce you to the following popular and simple change detection methods: - Time series metrics 95$^{th}$ and 5$^{th}$ percentile difference and standard deviation thresholding - Time series coefficient of variation thresholding </font> <hr> <font face="Calibri" size="4"> <b> 6.1 Change Detection with the Percentile Difference and the Variance Threshold Method</b> </font> <font face="Calibri" size="3">In this method we find thresholds on the <b>95$^{th}$ and 5$^{th}$ percentile difference</b> or the <b>temporal pixel-by-pixel gray value cariance</b>. Let's start with the 95$^{th}$ and 5$^{th}$ percentile difference. The advantage to look at percentiles verus maximum minus minimum is that it is more robust to outliers. First, let us define a <b>function for plotting histograms</b> </font> ``` def plot_histogram_cdf(metric='std'): plt.rcParams.update({'font.size': 12}) fig = plt.figure(figsize=(14, 4)) # Initialize figure with a size ax1 = fig.add_subplot(121) # 121 determines: 2 rows, 2 plots, first plot ax2 = fig.add_subplot(122) h = ax1.hist( metrics[metric].flatten(), bins=200, range=np.nanpercentile(metrics[metric], [1, 99])) ax1.xaxis.set_label_text(f'{metric} {labeldB}') ax1.set_title('Histogram') n, bins, patches = ax2.hist( metrics[metric].flatten(), bins=200, range=np.nanpercentile(metrics[metric], [1, 99]), cumulative='True', density='True', histtype='step', label='Empirical') ax2.xaxis.set_label_text(f'{metric} {labeldB}') ax2.set_title('CDF') outind = np.where(n > 0.95) threshind = np.min(outind) thresh = bins[threshind] ax1.axvline(thresh,color='red') _ = ax2.axvline(thresh,color='red') plt.savefig(os.path.join(product_path, f'{metric}_{labeldB}_histogram.png'), dpi=200, transparent='true') ``` <font face="Calibri" size="3"><b>Now let's look at the 95th - 5th percentile range</b> ``` plot_histogram_cdf(metric='prange') ``` <br> <font face="Calibri" size="3">Let's visualize the 5% of all pixels with the largest (95th - 5th percentile) difference in the time series. We will refer to the pixels (x,y) that exceed this threshold $t$ as likely <b>change pixels (cp)</b>: ${cp}_{x,y} = P_{x,y}^{95th} - P_{x,y}^{5th} > t$ If we define $t$ to correspond to the 5% of pixels with highest (95th - 5th percentile) difference, the image looks like:</font> ``` def plot_threshold_classifier(metric='prange', percentage_cutoff=5): plt.figure(figsize=(8,8)) thresh = np.nanpercentile(metrics[metric], 100 - percentage_cutoff) mask = metrics[metric] < thresh # For display we prepare the inverse mask plt.imshow(mask, cmap='gray') _=plt.title(f'Threshold Classifier on {metric} > %1.3f' % thresh) plt.savefig(os.path.join(product_path, f'changes_{metric}_{labeldB}.png'), dpi=200, transparent='true') return np.logical_not(mask) metric = 'prange' masks = {metric: plot_threshold_classifier(metric=metric)} ``` <hr> <br> <div class="alert alert-success"> <font face="Calibri" size="5"> <b> <font color='rgba(200,0,0,0.2)'> <u>EXERCISE #3:</u> </font></b> <font face="Calibri" size="3"> Discuss what you see in this figure. What are the black areas? What are the white areas? What kind of changes may be included in this map? </font> </div> <br> <hr> <br> <font face="Calibri" size="3">Instead of applying a threshold on the 95th - 5th percentile difference data, we can also attempt to threshold other metrics. The <b>standard deviation</b> (or variance) variable seems a useful indicator for change as it identifies pixels for which radar brightness has changed strongly within the time series. Hence, in the following we use this metric for change identification according to: ${cp}_{x,y} = \sigma > t$ with $t=CDF_{\sigma} > 0.95$ (5% pixels with highest standard deviation):</font> ``` plot_histogram_cdf(metric='std') metric = 'std' masks[metric] = plot_threshold_classifier(metric=metric) ``` <br> <font face="Calibri" size="4"> <b> 6.2 Change Detection with the Coefficient of Variation Method </b> </font> <font face="Calibri" size="3">We can also set a threshold $t$ for the <b>coefficient of variation image</b> to classify change in the time series: ${CV}_{x,y} = \frac{\sigma_{x,y}}{\overline{X}_{x,y}} > t$ Let's look at the histogram and the Cumulative Distribution Function (CDF) of the coefficient of variation:</font> ``` plot_histogram_cdf(metric='CV') ``` <br> <font face="Calibri" size="3">With a threshold of $t=CDF_{CV} > 0.95$ (5% pixels with highest variance) the change pixels would look like the following image:</font> ``` metric = 'CV' masks[metric] = plot_threshold_classifier(metric=metric) ``` <br> <font face="Calibri" size="4"> <b> 6.3 Pair-wise change detection </b> </font> <font face="Calibri" size="3">To analyze temporal changes between two images, it is useful to compute metrics that are sensitive to discrepancies between the two images. In radar remote sensing, the standard way is to look at ratios (in the linearly scaled power domain) or, equivalently, at differences in the logarithmic dB domain. ``` dates = ('2018-05-27', '2018-06-08') # around first eruption # convert to datetime objects dates_ = [datetime.datetime.strptime(date, '%Y-%m-%d') for date in dates] # get the indices in one line dates_ind = [np.argmin(np.abs(date - tindex)) for date in dates_] print(f'Comparing image {dates_ind[0]} from {tindex[dates_ind[0]].date()} with {dates_ind[1]} from {tindex[dates_ind[1]].date()}') ``` <br> <font face="Calibri" size="3"><b>Compute the log ratio in dB</b>, corresponding to the difference in dB</font> ``` ratiodB = 10 * np.ma.log10(np.ma.divide(rasterstack[dates_ind[1], ...], rasterstack[dates_ind[0], ...])) ``` <br> <font face="Calibri" size="3"><b>Let us manually choose a threshold this time</b> <br><br> thresh is the threshold, e.g. -2 dB<br> thresh_type determines whether we mask everything below that (lower) or above (upper)</font> ``` thresh = -2 thresh_type = 'lower' #'lower': mask everything below thresh, 'upper': mask everything above dynamic_range = np.nanpercentile(np.abs(ratiodB), 99) fig, axs = plt.subplots(ncols=3, nrows=1) fig.set_size_inches(20, 4) plt.subplots_adjust(hspace=0.4, right=0.85) h = axs[0].hist( ratiodB.flatten(), bins=200, range=np.nanpercentile(ratiodB, [0.1, 99.9])) axs[0].xaxis.set_label_text(f'difference [dB]') axs[0].set_title('Histogram') im0 = axs[1].imshow(ratiodB, cmap='RdBu', vmin=-dynamic_range, vmax=dynamic_range) cbar = fig.colorbar(im0, orientation='vertical', ax=axs.ravel().tolist(), shrink=0.7) cbar.set_label('[dB]') axs[1].set_title('Image') mask = (ratiodB > thresh if thresh_type == 'lower' else ratiodB < thresh).astype(np.int8) axs[2].imshow(mask, cmap='gray') axs[2].set_title('Mask') fig.suptitle(f'{tindex[dates_ind[0]].date()} {tindex[dates_ind[0]].date()}') logratiolabel = f'logratio_{tindex[dates_ind[0]].date()}_{tindex[dates_ind[0]].date()}' plt.savefig(os.path.join(product_path, f'{logratiolabel}.png'), dpi=200, transparent='true') masks[logratiolabel] = np.logical_not(mask) ``` <br> <font face="Calibri" size="3"><b>Do you think the threshold is appropriate? </b></font> <br> <div class="alert alert-success"> <font face="Calibri" size="5"> <b> <font color='rgba(200,0,0,0.2)'> <u>EXERCISE #4:</u> </font></b> <font face="Calibri" size="3"> Discuss what you see in this figure: - What kind of changes are detected? - How does this change map compare to the previous one? Note that you are only looking at one specific pair of images here. - Feel free to look at other image pairs and analyze identified changes! </font> </div> <br> <hr> <br> <font face="Calibri" size="5"> <b> 7. Write Our Change Detection Results and Metrics Images to GeoTIFF files</b> </font> <font face="Calibri" size="4"> <b> 7.1 Determine Output Geometry </b> </font> <font face="Calibri" size="3">First, we need to <b>set the correct geotransformation and projection information</b>. We retrieve the values from the input images: </font> ``` proj=img.GetProjection() geotrans=list(img.GetGeoTransform()) geotrans ``` <br> <font face="Calibri" size="4"> <b> 7.2 Output Time Series Metrics Images </b> </font> <font face="Calibri" size="3">We use the root of the time series data stack name and append a _ts_metrics_&lt;metric&gt;.tif ending as filenames: </font> ``` # Time Series Metrics as image: # We make a new subdirectory where we will store the images dirname = f"{analysis_dir}/{analysis_dir.split('/')[-1]}_tsmetrics_{pol}" os.makedirs(dirname, exist_ok=True) print(dirname) ``` <br> <font face="Calibri" size="3">Now we can <b>output the individual metrics as GeoTIFF images</b>: </font> ``` names=[] # List to keep track of all the names for metric in metrics: name_ = os.path.join(dirname, f'{metric}_{labeldB}.tif') create_geotiff(name_, metrics[metric], gdal.GDT_Float32, np.nan, [metric], geo_t=geotrans, projection=proj) names.append(name_) ``` <br> <font face="Calibri" size="4"> <b> 7.3 Build a Virtual Raster Table on the Metrics GeoTIFF images </b> </font> <font face="Calibri" size="3">To tie the images into one new raster stack of time series metrics we build a virtual raster table with all the metrics. Trick: Use ' '.join(names) to build one long string of names separated by a space as input to *gdalbuildvrt*: </font> ``` cmd='gdalbuildvrt -separate -overwrite -vrtnodata nan '+\ dirname+f'_{labeldB}.vrt '+' '.join(names) # print(cmd) _ = os.system(cmd) ``` <br> <font face="Calibri" size="4"> <b> 7.4 Create GeoTIFFs for the Change Iamges from our Four Change Detection Attempts </b> </font> <font face="Calibri" size="3">We are going to write GeoTIFF output files that stores the results from the classifiers: </font> ``` for metric in masks: fnmetric = os.path.join(dirname, f"{analysis_dir.split('/')[-1]}_{labeldB}_{metric}_thresholds.tif") create_geotiff(fnmetric, masks[metric], gdal.GDT_Byte, np.nan, geo_t=geotrans, projection=proj) ``` <hr> <br> <div class="alert alert-success"> <font face="Calibri" size="5"> <b> <font color='rgba(200,0,0,0.2)'> <u>EXERCISE #5:</u> </font></b> <font face="Calibri" size="3"> Explore the data stack a bit more. Answer the following questions for yourself: <ul> <li> Which metrics are more appropriate for logarithic (dB) or linearly (power) transformed images?</li> <li> Change the thresholds and dates in this notebook to see the effects on detected changes.</li> <li> Load created change masks into QGIS and compare the detected areas with your time series plots and image data.</li> </ul> </font> </div> <br> <hr> <div class="alert alert-success"> <font face="Calibri" size="5"> <b> <font color='rgba(200,0,0,0.2)'> <u>EXERCISE #6:</u> </font></b> <font face="Calibri" size="4"><b>2016-2017 flooding</b></font> <font face="Calibri" size="3"><br>Explore the flooding data stack from Lab 3 at VV and VH polarization. What can you detect? Compare the minimum method for flood detection from Lab 3 with the results obtained with this notebook. <br><br> To this end, we advise you to restart the notebook (or duplicate it first) by going to 'Kernel' and 'Restart' </font> </div> <br> <hr> <font face="Calibri" size="2"> <i>SARChangeDetectionMethods_From_Prepared_Data_Stack - Version 1.3.0 - April 2021 </i> <br> <b>Version Changes</b> <ul> <li>from osgeo import gdal</li> <li>namespace asf_notebook</li> </ul> </font>
github_jupyter
``` # Standard imports import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline import os import sys import re import glob import suftware as su from scipy.stats import norm from scipy.stats import poisson #Load mavenn and check path import mavenn print(mavenn.__path__) e = np.exp(1) pi = np.pi def pseudo_log(x,base=e): return np.log(x+.5)/np.log(base) # Get in-file data_dir = '/Users/jkinney/Dropbox/20_mavenn/20.08.16_mpsa_raw_data/' in_file = data_dir + 'results.brca2_9nt_lib1_rep1.txt' # Create dict to record statistics stats_dict = {} # Load file in_df = pd.read_csv(in_file, delimiter='\t', index_col=0) print(f'Processing {in_file}') # Remove extraneous columns del in_df['mis_ct'] del in_df['lib_ct'] in_df.head() # Marginalize by splice site data_df = in_df.groupby('ss').sum() data_df.reset_index(inplace=True) # Look at tot_ct distribution sns.histplot(np.log10(data_df['tot_ct']+.1)) # Rename columns data_df.rename(columns={'ss':'x'}, inplace=True) # Make sequences RNA data_df['x'] = [ss.replace('T','U') for ss in data_df['x']] # Remove ss with minimum tot_ct min_ct = 10 ix = data_df['tot_ct'] >= min_ct data_df = data_df[ix] print(f'{(~ix).sum()} ss removed for having tot_ct < {min_ct}') # Remove ss with invalid sequences ix = np.array([((x[3]=='G') and (x[4] in {'C','U'})) for x in data_df['x']]) data_df = data_df[ix] print(f'{(~ix).sum()} ss with invalid sequences removed') # Get consensus i_cons and o_cons cons_seq = 'CAGGUAAGU' tmp_df = data_df.set_index('x') i_cons = tmp_df.loc['CAGGUAAGU','tot_ct'] o_cons = tmp_df.loc['CAGGUAAGU','ex_ct'] # Compute y i_n = data_df['tot_ct'] o_n = data_df['ex_ct'] y_n = np.log10(100*((o_n+1)/(i_n+1))/((o_cons+1)/(i_cons+1))) data_df['y'] = y_n # Assign to trianing and test sets N = len(data_df) training_frac=.8 np.random.seed(0) r = np.random.rand(N) test_frac = .2 val_frac = .2 ix_train = (test_frac + val_frac <= r) ix_val = (test_frac <= r) & (r < test_frac + val_frac) ix_test = (r < test_frac) data_df['set'] = '' data_df.loc[ix_train, 'set'] = 'training' data_df.loc[ix_val, 'set'] = 'validation' data_df.loc[ix_test, 'set'] = 'test' assert all([len(x)>0 for x in data_df['set']]) # Shuffle data for extra safety data_df = data_df.sample(frac=1).reset_index(drop=True) # Order columns data_df = data_df[['set', 'tot_ct', 'ex_ct', 'y', 'x']] # Preview dataframe print(f'N: {N:,}') data_df.head(20) # Check normalization; should have y=2.0 data_df[data_df['x']==cons_seq] # Show size of compressed dataset file file_name = 'mpsa_data.csv.gz' data_df.to_csv(file_name, compression='gzip', index=False) print('df (zipped):') !du -mh $file_name !mv $file_name ../. # Test loading loaded_df = mavenn.load_example_dataset('mpsa') loaded_df.head() ```
github_jupyter
<small><small><i> All the IPython Notebooks in **Python Introduction** lecture series by Dr. Milaan Parmar are available @ **[GitHub](https://github.com/milaan9/01_Python_Introduction)** </i></small></small> # Python Operators Python can be used like a calculator. Simply type in expressions to get them evaluated. **What are operators in python?** Operators are special **symbols** in Python that carry out **arithmetic** or **logical computation**. The value that the operator operates on is called the **operand**. For example: ```python >>>6+3 9 ``` Here, **`+`** is the operator that performs addition. **`2`** and **`3`** are the operands and **`5`** is the output of the **operation**. ``` 6+3 ``` ## 1. Arithmatic Operators Arithmetic operators are used to perform **mathematical operations** like **addition**, **subtraction**, **multiplication** etc. | Symbol | Task Performed | Meaning | Example | |:------:|:---------------| :------: |:--------:| | **`+`** | Addition | add two operands or unary plus | **x + y** or **+2** | | **`-`** | Subtraction | substract right operand from the left or unary minus | **x - y** or **-2** | | **`*`** | Multiplication | Multiply two operands | **x \* y** | | **`/`** | Division | Divide left operand by the right one (always results into float) | **x / y** | | **`%`** | Modulus (remainder) | remainder of the division of left operand by the right | **x % y** (remainder of **x/y**) | | **`//`** | Integer/Floor division | division that results into whole number adjusted to the left in the number line | **x // y** | | <b>`**`</b> | Exponentiation (power) | left operand raised to the power of right | **x \*\* y** (**x** to the power **y**) | As expected these operations generally promote to the most general type of any of the numbers involved i.e. int -> float -> complex. #### Example : Arithmetic operators in Python ``` print('Addition: ', 1 + 2) print('Subtraction: ', 2 - 1) print('Multiplication: ', 2 * 3) print ('Division: ', 4 / 2) # Division in python gives floating number print('Division: ', 6 / 2) print('Division: ', 7 / 2) print('Division without the remainder: ', 7 // 2) # gives without the floating number or without the remaining print('Modulus: ', 3 % 2) # Gives the remainder print ('Division without the remainder: ',7 // 3) print('Exponential: ', 3 ** 2) # it means 3 * 3 x = 16 y = 3 print('x + y =',x+y) # 19 print('x - y =',x-y) # 13 print('x * y =',x*y) # 48 print('x / y =',x/y) # 5.333 print('x // y =',x//y) # 519 1+2+3 7-1 6 * (3+0j) * 1.0 5/6 ``` In many languages (and older versions of python) $\frac{1}{2}=0$ (truncated division). In Python 3 this behaviour is captured by a separate operator that rounds down: (i.e., **a // b$=\lfloor \frac{a}{b}\rfloor$**) ``` 5//6.0 15%10 3 ** 2 # it means 3 * 3 ``` Python natively allows (nearly) infinite length integers while floating point numbers are double precision numbers: ``` 22**600 22.0**600 ``` ## 2. Comparison/Relational operators Comparison operators are used to **compare values**. It either returns **True** or **False** according to the **condition**. | Symbol | Task Performed | Meaning | Example | |:----:| :--- |:--- |:---: | | **`>`** | greater than | True if left operand is greater than the right | **x > y** | | **`<`** | less than | True if left operand is less than the right | **x < y** | | **`==`** | equal to | True if both operands are equal | **x == y** | | **`!=`** | not equal to | True if both operands are not equal | **x != y** | | **`>=`** | greater than or equal to | True if left operand is greater than or equal to the right | **x >= y** | | **`<=`** | less than or equal to | True if left operand is less than or equal to the right | **x <= y** | Note the difference between **`==`** (equality test) and **`=`** (assignment) #### Example : Comparison operators in Python ``` print(6 > 3) # True, because 3 is greater than 2 print(6 >= 3) # True, because 3 is greater than 2 print(6 < 3) # False, because 3 is greater than 2 print(3 < 6) # True, because 2 is less than 3 print(3 <= 6) # True, because 2 is less than 3 print(6 == 3) # False, because 3 is not equal to 2 print(6 != 3) # True, because 3 is not equal to 2 print(len("apple") == len("avocado")) # False print(len("apple") != len("avocado")) # True print(len("apple") < len("avocado")) # True print(len("banana") != len("orange")) # False print(len("banana") == len("orange")) # True print(len("tomato") == len("potato")) # True print(len("python") > len("coding")) # False x = 30 y = 22 print('x > y is',x>y) # False print('x < y is',x<y) # True print('x >= y is',x>=y) # False print('x <= y is',x<=y) # True z = 3 # 3 is assign to variable z z == 3 # 3 is equal to z z > 3 ``` Comparisons can also be chained in the mathematically obvious way. The following will work as expected in Python (but not in other languages like C/C++): ``` 0.5 < z <= 1 # z == 3 ``` ## 3. Logical/Boolean operators Logical operators are the **`and`**, **`or`**, **`not`** operators. | Symbol | Meaning | Example | |:----:| :---: |:---:| | **`and`** | True if both the operands are true | **x and y** | | **`or`** | True if either of the operand is true | **x or y** | | **`not`** | True if operand are false (complements the operand) | **not x** | #### Example : Logical operators in Python ``` print('True == True: ', True == True) print('True == False: ', True == False) print('False == False:', False == False) print('True and True: ', True and True) print('True or False:', True or False) # Another way comparison print('1 is 1', 1 is 1) # True - because the data values are the same print('1 is not 2', 1 is not 2) # True - because 1 is not 2 print('A in Milaan', 'A' in 'Milaan') # True - A found in the string print('B in Milaan', 'B' in 'Milaan') # False - there is no uppercase B print('python' in 'python is fun') # True - because coding for all has the word coding print('a in an:', 'a' in 'an') # True print('27 is 3 ** 3:', 27 is 3**3) # True print(6 > 3 and 5 > 3) # True - because both statements are true print(6 > 3 and 5 < 3) # False - because the second statement is false print(6 < 3 and 5 < 3) # False - because both statements are false print(6 > 3 or 5 > 3) # True - because both statements are true print(6 > 3 or 5 < 3) # True - because one of the statement is true print(6 < 3 or 5 < 3) # False - because both statements are false print(not 6 > 3) # False - because 6 > 3 is true, then not True gives False print(not True) # False - Negation, the not operator turns true to false print(not False) # True print(not not True) # True print(not not False) # False x = True y = False print('x and y is',x and y) # False print('x or y is',x or y) # True print('not x is',not x) # False True and (not(not False)) or (True and (not True)) # What will be output? # True and (not(True)) or (True and (False)) # True and False or (False) # False or False # False ``` Here is the **[truth table (@ and, or, not)](https://github.com/milaan9/01_Python_Introduction/blob/main/Python_Keywords_List.ipynb)** for these operators. ## 4. Bitwise operators Bitwise operators act on operands as if they were string of binary digits. It operates **bit by bit**, hence the name. **For example:** 2 is **`10`** in binary and 7 is **`111`**. **In the table below:** Let **`x`** = 10 (**`0000 1010`** in binary) and **`y`** = 4 (**`0000 0100`** in binary) | Operator | Meaning | Symbol | Task Performed | Example | |:---:|:---:| :---:|:---:|:---:| |**`and`** | Logical and | **`&`** | Bitwise And | **x & y** = 0 (**`0000 0000`**) | |**`or`** | Logical or | **$\mid$** | Bitwise OR | **x \| y** = 14 (**`0000 1110`**) | |**`not`** | Not | **`~`** | Bitwise NOT | **~x** = -11 (**`1111 0101`**) | | &nbsp; |&nbsp; | **`^`** | Bitwise XOR | **x ^ y** = 14 (**`0000 1110`**) | | &nbsp; |&nbsp; | **`>>`** | Bitwise right shift | **x >> 2** = 2 (**`0000 0010`**) | | &nbsp; |&nbsp; | **`<<`** | Bitwise left shift | **x << 2** = 40 (**`0010 1000`**) | ``` a = 2 #binary: 0010 b = 3 #binary: 0011 print('a & b =',a & b,"=",bin(a&b)) 5 >> 1 # 0 ➡ 0000 0101 # 0000 0010 # 0010 is 2 in decimal ``` **Explanation**: 0000 0101 -> 5 (5 is 0101 in binary) Shifting the digits by 1 to the right and zero padding that will be: 0 ➡ 0000 0101 = 0000 0010 0000 0010 -> 2 ``` 5 << 1 # 0000 0101 ⬅ 0 # 0000 1010 # 1010 is 10 in decimal ``` **Explanation**: 0000 0101 -> 5 Shifting the digits by 1 to the left and zero padding will be: 0000 0101 ⬅ 0 = 0000 1010 0000 1010 -> 10 ``` 6 >> 2 # What will be output??? print(not (True and False), "==", not True or not False) # TRUE == True print (False and (not False) or (False and True), "==",not (True and (not False) or (not True))) ``` ## 5. Assignment operators Assignment operators are used in Python to **assign values** to **variables**. **`a = 5`** is a simple assignment operator that assigns the value 5 on the right to the variable **`a`** on the left. There are various compound operators in Python like a **`+= 5`** that adds to the variable and later assigns the same. It is equivalent to **`a = a + 5`**. | Symbol | Example | Equivalent to | |:---:|:---:|:---:| | **`=`** | **x = 5** | **x = 5** | | **`+=`** | **x += 5** | **x = x + 5** | | **`-=`** | **x -= 5** | **x = x - 5** | | **`*=`** | **x \*= 5** | **x = x \* 5** | | **`/=`** | **x /= 5** | **x = x / 5** | | **`%=`** | **x %= 5** | **x = x % 5** | | **`//=`** | **x //= 5** | **x = x // 5** | | <b>`**=`</b> | **x \*\*= 5** | **x = x \*\* 5** | | **`&=`** | **x &= 5** | **x = x & 5** | | **`\|=`** | **x \|= 5** | **x = x \| 5** | | **`^=`** | **x ^= 5** | **x = x ^ 5** | | **`>>=`** | **x >>= 5** | **x = x >> 5** | | **`<<=`** | **x <<= 5** | **x = x << 5** | The binary operators can be combined with assignment to modify a variable value. For example: ``` x = 1 x += 2 # add 2 to x print("x is",x) x <<= 2 # left shift by 2 (equivalent to x *= 4) print('x is',x) x **= 2 # x := x^2 print('x is',x) ``` ## 6. Special operators Python language offers some special types of operators like the identity operator or the membership operator. They are described below with examples. ### 1. Identity operators **`is`** and **`is not`** are the identity operators in Python. They are used to check if two values (or variables) are located on the same part of the **memory**. Two variables that are equal does not imply that they are **identical**. | Symbol | Meaning | Example | |:---:| :---: |:---:| | **`is`** | True if the operands are identical (refer to the same object) | **x is True** | | **`is not`** | True if the operands are not identical (do not refer to the same object) | **x is not True** | #### Example : Identity operators in Python ``` x1 = 6 y1 = 6 x2 = 'Hello' y2 = 'Hello' x3 = [1,2,3] # list y3 = [1,2,3] # list # Output: False print(x1 is not y1) # Output: True print(x2 is y2) # Output: False because two list [] can never be equal print(x3 is y3) ``` **Explanation:** Here, we see that **`x1`** and **`y1`** are integers of same values, so they are equal as well as identical. Same is the case with **`x2`** and **`y2`** (strings). But **`x3`** and **`y3`** are list. They are equal but not identical. It is because interpreter locates them **separately in memory** although they are equal. ### 2. Membership operators **`in`** and **`not in`** are the membership operators in Python. They are used to test whether a value or variable is found in a **sequence** (**[string](https://github.com/milaan9/02_Python_Datatypes/blob/main/002_Python_String.ipynb)**, **[list](https://github.com/milaan9/02_Python_Datatypes/blob/main/003_Python_List.ipynb)**, **[tuple](https://github.com/milaan9/02_Python_Datatypes/blob/main/004_Python_Tuple.ipynb)**, **[set](https://github.com/milaan9/02_Python_Datatypes/blob/main/006_Python_Sets.ipynb)** and **[dictionary](https://github.com/milaan9/02_Python_Datatypes/blob/main/005_Python_Dictionary.ipynb)**). In a dictionary we can only test for presence of **key, not the value**. | Symbol | Meaning | Example | |:---:| :---: |:---:| | **`in`** | True if value/variable is found in sequence | **5 in x** | | **`not in`** | True if value/variable is not found in sequence | **5 not in x** | #### Example : Membership operators in Python ``` x = 'Hello world' y = {1:'a',2:'b'} # dictionary 1 is key and 'a' is element. So we access element without its key. # Output: True print('H' in x) # Do we have 'H' in 'Hello World' ? # Output: True print('hello' not in x) # Do we have 'hello' in 'Hello World' ? # Output: True print(1 in y) # Output: False because we cannot identify 'a' without its key hence it is Flase. print('a' in y) ``` **Explanation:** Here, **'`H`'** is in **`x`** but **'`hello`'** is not present in **`x`** (remember, Python is case sensitive). Similary, **`1`** is key and **'`a`'** is the value in dictionary y. Hence, **`'a'in y`** returns **`False`**. ## 💻 Exercises ➞ <span class='label label-default'>Operators</span> 1. Declare your age as integer variable 2. Declare your height as a float variable 3. Declare a variable that store a complex number 4. Write a code that prompts the user to enter base and height of the triangle and calculate an area of this triangle (area = 0.5 x b x h). ```py Enter base: 20 Enter height: 10 The area of the triangle is 100 ``` 5. Write a code that prompts the user to enter side a, side b, and side c of the triangle. Calculate the perimeter of the triangle (perimeter = a + b + c). ```py Enter side a: 5 Enter side b: 4 Enter side c: 3 The perimeter of the triangle is 12 ``` 6. Get length and width of a rectangle using prompt. Calculate its area (**area = length x width**) and perimeter (**perimeter = 2 x (length + width)**) 7. Get radius of a circle using prompt. Calculate the area (**area = pi x r x r**) and circumference (**c = 2 x pi x r**) where pi = 3.14. 8. Calculate the slope, x-intercept and y-intercept of $y = 2x -2$ 9. Slope is ($m = (y2-y1)/(x2-x1)$). Find the slope and **[Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance#:~:text=In%20mathematics%2C%20the%20Euclidean%20distance,being%20called%20the%20Pythagorean%20distance.)** between point (2, 2) and point (6,10) 10. Compare the slopes in tasks 8 and 9. 11. Calculate the value of y ($y = x^2 + 6x + 9$). Try to use different x values and figure out at what x value y is going to be 0. 12. Find the length of **`'python'`** and **`'datascience'`** and make a falsy comparison statement. 13. Use **`and`** operator to check if **`on`** is found in both **`python`** and **`cannon`** 14. **`I hope this course is not full of jargon`**. Use **`in`** operator to check if **`jargon`** is in the sentence. 15. There is no **`on`** in both **`python`** and **`cannon`** 16. Find the length of the text **`python`** and convert the value to float and convert it to string 17. Even numbers are divisible by 2 and the remainder is zero. How do you check if a number is even or not using python? 18. Check if the floor division of 7 by 3 is equal to the int converted value of 2.7. 19. Check if type of **"10"** is equal to type of 10 20. Check if int(**"9.6"**) is equal to 10 21. Write a code that prompts the user to enter hours and rate per hour. Calculate pay of the person? ```py Enter hours: 40 Enter rate per hour: 30 Your weekly earning is 1200 ``` 22. Write a script that prompts the user to enter number of years. Calculate the number of seconds a person can live. Assume a person can live hundred years ```py Enter number of years you have lived: 100 You have lived for 3153600000 seconds. ``` 23. Write a Python code that displays the following table ```py 1 2 3 4 5 2 4 6 8 10 3 6 9 12 15 4 8 12 16 20 5 10 15 20 25 ```
github_jupyter
``` # Visualization of the KO+ChIP Gold Standard from: # Miraldi et al. (2018) "Leveraging chromatin accessibility for transcriptional regulatory network inference in Th17 Cells" # TO START: In the menu above, choose "Cell" --> "Run All", and network + heatmap will load # NOTE: Default limits networks to TF-TF edges in top 1 TF / gene model (.93 quantile), to see the full # network hit "restore" (in the drop-down menu in cell below) and set threshold to 0 and hit "threshold" # You can search for gene names in the search box below the network (hit "Match"), and find regulators ("targeted by") # Change "canvas" to "SVG" (drop-down menu in cell below) to enable drag interactions with nodes & labels # Change "SVG" to "canvas" to speed up layout operations # More info about jp_gene_viz and user interface instructions are available on Github: # https://github.com/simonsfoundation/jp_gene_viz/blob/master/doc/dNetwork%20widget%20overview.ipynb # directory containing gene expression data and network folder directory = "." # folder containing networks netPath = 'Networks' # network file name networkFile = 'ENCODE_bias50_TFmRNA_sp.tsv' # title for network figure netTitle = 'ENCODE DHS, bias = 50_TFmRNA, TFA = TF mRNA' # name of gene expression file expressionFile = 'Th0_Th17_48hTh.txt' # column of gene expression file to color network nodes rnaSampleOfInt = 'Th17(48h)' # edge cutoff -- for Inferelator TRNs, corresponds to signed quantile (rank of edges in 15 TFs / gene models), # increase from 0 --> 1 to get more significant edges (e.g., .33 would correspond to edges only in 10 TFs / gene # models) edgeCutoff = .93 import sys if ".." not in sys.path: sys.path.append("..") from jp_gene_viz import dNetwork dNetwork.load_javascript_support() # from jp_gene_viz import multiple_network from jp_gene_viz import LExpression LExpression.load_javascript_support() # Load network linked to gene expression data L = LExpression.LinkedExpressionNetwork() L.show() # Load Network and Heatmap L.load_network(directory + '/' + netPath + '/' + networkFile) L.load_heatmap(directory + '/' + expressionFile) N = L.network N.set_title(netTitle) N.threshhold_slider.value = edgeCutoff N.apply_click(None) N.draw() # Add labels to nodes N.labels_button.value=True # Limit to TFs only, remove unconnected TFs, choose and set network layout N.restore_click() N.tf_only_click() N.connected_only_click() N.layout_dropdown.value = 'fruchterman_reingold' N.layout_click() # Interact with Heatmap # Limit genes in heatmap to network genes L.gene_click(None) # Z-score heatmap values L.expression.transform_dropdown.value = 'Z score' L.expression.apply_transform() # Choose a column in the heatmap (e.g., 48h Th17) to color nodes L.expression.col = rnaSampleOfInt L.condition_click(None) # Switch SVG layout to get line colors, then switch back to faster canvas mode N.force_svg(None) ```
github_jupyter
# In-Class Exercises The goal of this Jupyter Notebook and the in-class exercises is to give you practical experience applying basic signal processing and signal comparison approaches, which are widely used in ubiquitous computing (and physics, electrical engineering, astronomy, and beyond!) and important to completing assignment 2 (A2: Gesture Recognizer) # FFT A Fast Fourier Transform (FFT) samples a signal and extracts the frequency components. It is used to transform a signal from the time-domain to the frequency-domain. ![Time vs. Frequency view of signal](https://upload.wikimedia.org/wikipedia/commons/6/61/FFT-Time-Frequency-View.png "View of a signal in the time and frequency domain") For example, the signal below consists of a sum of cosine waves at 10, 20, 30, 40, and 50 Hz. ![FFT of an example signal](https://upload.wikimedia.org/wikipedia/commons/thumb/6/64/FFT_of_Cosine_Summation_Function.png/800px-FFT_of_Cosine_Summation_Function.png) ``` # Let's play with the FFT analysis in numpy and scipy and frequency-domain plotting in matplotlib! # First, let's make a simple signal to play with. import matplotlib.pyplot as plt # matplot lib is the premiere plotting lib for Python: https://matplotlib.org/ import numpy as np # numpy is the premiere signal handling library for Python: http://www.numpy.org/ totalTimeOfSignalInSecs = 5 freq = 3 # in Hz samplingRate = 512 # Create an array from 0 to totalTimeOfSignalInSecs * samplingRate # But we need to divide each value in this array by our samplingRate so that our time step (dt) is correct # # We use arange here rather than just range because arange is numpy's version of Python's built-in range... # so we create a numpy array rather than a regular Python array time = np.arange(totalTimeOfSignalInSecs * samplingRate) / samplingRate signal = np.sin(2 * np.pi * freq * time) # TODO: now plot the signal. How would you do this? fig, axes = plt.subplots() axes.plot(time, signal) axes.set(xlabel="time", title="My frequency plot ({} Hz)".format(freq)) # Now, let's analyze the frequencies in our wave form using spectral analysis freq = 53.5 # Hz time = np.arange(totalTimeOfSignalInSecs * samplingRate) / samplingRate signal = np.sin(2 * np.pi * freq * time) # TODO: Try playing around with different signals, what happens to the spectral analysis # signal = np.sin(2 * np.pi * freq * time) + 0.05 * np.sin(2 * np.pi * 2 * freq * time) + 0.005 * np.sin(2 * np.pi * 3 * freq * time) fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(15,5)) fig.subplots_adjust(hspace=0.5) ax1.plot(time[0:samplingRate], signal[0:samplingRate]) ax1.set(xlabel='time', title='Sine Wave with Freq={} Hz'.format(freq)) ax1.grid() # By reducing the NFFT size, we get less precision in our signal analysis. # The y-axis is the amount of intensity (or power) at a given frequency # Side note: db Scale is common when dealing with intensities because intensity, # power, and energy are proportional to the square of the signal amplitude # However, the primary point is using an fft to determine what frequencies exist in a signal powerSpectrum, powerFreq = ax2.psd(signal, NFFT=1024, Fs=samplingRate) # But what if frequency changes during the signal. Let's add three different frequencies that occur at discrete # time points. timeOfEachSignalInSecs = 2 # arbitrarily chose 43 Hz, 88 Hz, 156 Hz. Don't change these for now (or if you do change them, change them back :) freqs = [43, 88, 156] time = np.arange(timeOfEachSignalInSecs * samplingRate) / samplingRate signal1 = np.sin(2 * np.pi * freqs[0] * time) # + np.sin(2 * np.pi * 320 * time) signal2 = np.sin(2 * np.pi * freqs[1] * time) signal3 = np.sin(2 * np.pi * freqs[2] * time) # TODO: You can use numpy's concatenate to concatenate the signals together # https://docs.scipy.org/doc/numpy/reference/generated/numpy.concatenate.html signalConcatenated = np.concatenate([signal1, signal2, signal3]) fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(15,5)) fig.subplots_adjust(hspace=0.5) ax1.plot(signalConcatenated) ax1.set(xlabel='Samples', title='Sine Wave with Freqs={} Hz'.format(freqs)) ax1.grid() powerSpectrum, powerFreq = ax2.psd(signalConcatenated, NFFT=512, Fs=samplingRate) ax2.set(title="Spectral Analysis Across Entire Signal (3 Spikes Corresponding to Each Freq={})".format(freqs)) print() # But the above doesn't show us how the frequencies change over time. For this, we have to use a spectrogram numSignals = len(freqs) time = np.arange(timeOfEachSignalInSecs * samplingRate * numSignals) / samplingRate fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(15,7)) fig.subplots_adjust(hspace=0.5) ax1.plot(time, signalConcatenated) ax1.set(xlabel='Time', title='Sine Wave with Freqs={} Hz'.format(freqs)) ax1.set_xlim(xmin=0,xmax=timeOfEachSignalInSecs*numSignals) ax1.grid() # NFFT = # of points from sample used to calculate the FFT. We'll look at that next # noverlap = # of overlapping points between NFFT windows # See: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.specgram.html ax2.specgram(signalConcatenated, NFFT=512, noverlap=0, Fs=samplingRate) ax2.set(xlabel='Time', ylabel="Frequency (Spectral Energy)", title='FFT Of Composite Signal') powerSpectrum, powerFreq = ax3.psd(signalConcatenated, NFFT=512, Fs=samplingRate) ax3.set(title="Spectral Analysis Across Entire Signal (3 Spikes Corresponding to Each Freq={})".format(freqs)) # Playing around with FFT resolution # By increasing the FFT window size, you increase frequency resolution # However, the drawback is that you might miss spurious signals that pop-up momentarily but # not have enough energy compared to other signals to show up in your frequency analysis numSignals = len(freqs) time = np.arange(timeOfEachSignalInSecs * samplingRate * numSignals) / samplingRate fig, axes = plt.subplots(4, 1, figsize=(15,12)) fig.subplots_adjust(hspace=0.5) axes[0].plot(time, signalConcatenated) axes[0].set(xlabel='Time', title='Sine Wave with Freqs={} Hz'.format(freqs)) axes[0].set_xlim(xmin=0,xmax=timeOfEachSignalInSecs*numSignals) axes[0].grid() # You can calculate your frequency resolution (that is the size of each frequency bin) # by FreqResolution = SamplingRate / FFTWindowSize fftWindowSize = 8 axes[1].specgram(signalConcatenated, NFFT=fftWindowSize, Fs=samplingRate, noverlap=0) axes[1].set(xlabel='Time', ylabel="Frequency (Spectral Energy)", title='FFT Of Composite Signal (NFFT={} FreqBinSize={} Hz)'.format(fftWindowSize, samplingRate/fftWindowSize)) # TODO MAKE TWO MORE PLOTS (e.g., for axes[2] and axes[3]) WITH DIFFERENT FFT WINDOW SIZES print() # Recall from last lecture about additive synthesis. What if instead of having the three signals occur at different times, # they all occurred simultaneously! totalTimeOfSignalInSecs = timeOfEachSignalInSecs * len(freqs) time = np.arange(totalTimeOfSignalInSecs * samplingRate) / samplingRate signal1 = np.sin(2 * np.pi * freqs[0] * time) signal2 = np.sin(2 * np.pi * freqs[1] * time) signal3 = np.sin(2 * np.pi * freqs[2] * time) signalAdditive = #TODO HOW DO WE PERFORM ADDITIVE SYNTHESIS? fig, axes = plt.subplots(4, 1, figsize=(15,12)) fig.subplots_adjust(hspace=0.5) # The first plot is just to show a zoomed in sample zoomedInSamplingWindow = int(samplingRate / 1) axes[0].plot(time[0:zoomedInSamplingWindow], signalAdditive[0:zoomedInSamplingWindow]) axes[0].set(xlabel='Time', title='{} Samples of Sine Wave with Freqs={} Hz'.format(zoomedInSamplingWindow, freqs)) axes[0].set_xlim(xmin=0,xmax=zoomedInSamplingWindow/samplingRate) axes[0].grid() # Now show the raw plot axes[1].plot(time, signalAdditive) axes[1].set(xlabel='Time', title='Sine Wave with Freqs={} Hz'.format(freqs)) axes[1].set_xlim(xmin=0,xmax=totalTimeOfSignalInSecs) axes[1].grid() # And show a spectrogram and spectral analysis fftWindowSize = 512 axes[2].specgram(signalAdditive, NFFT=fftWindowSize, Fs=samplingRate, noverlap=0) axes[2].set(xlabel='Time', ylabel="Frequency (Spectral Energy)", title='FFT Of Additive Signal (NFFT={} FreqBinSize={} Hz)'.format(fftWindowSize, samplingRate/fftWindowSize)) powerSpectrum, powerFreq = axes[3].psd(signalAdditive, NFFT=512, Fs=samplingRate) axes[3].set(title="Spectral Analysis Across Entire Signal") ``` # Filtering Filtering involves convolving one signal (the filter signal) with another (your source signal). Perhaps the most common digital filter for motion analysis is the Butterworth filter, which Mladenov et al., 2009 used in their paper to filter out high frequency noise in their step tracker. Using a digital filter is a two-step process: 1. You first create the digital filter with desired characteristics (cutoff frequency, phase shift, etc.) 2. You then apply this filter through convolution to your source signal Expert Note (feel free to ignore): while you might not conceptualize a moving average filter like this, it can actually be thought of as a type of convolution. See http://matlabtricks.com/post-11/moving-average-by-convolution. Indeed, to apply a moving average filter with numpy, you simply do the following: ''' windowSize = 10 netMagWithMeanFilter5 = np.convolve(sourceSignal, np.ones((windowSize,))/windowSize, mode='valid') ''' ``` # What happens if we try a low-pass filter on this signal? Low-pass filters filter out high frequencies from our data and # keep the lower frequencies. # Resources: # - http://scipy.github.io/old-wiki/pages/Cookbook/ButterworthBandpass # - https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.butter.html from scipy.signal import butter, lfilter # Set the desired cutoff frequency (here we select 115 Hz) desiredCutOffFreq = 115 print("Frequencies that are in our signal={}. We are trying to create a low-pass filter that filters out all freqs greater than {}" .format(freqs, desiredCutOffFreq)) # The butterworth filter expects the cutoff frequency to be normalized between [0, Nyquist] nyquistFreq = samplingRate / 2.0 desiredCutOffFreqNormalized = desiredCutOffFreq / nyquistFreq # Ideally, with an infinite "filter order", you would get a perfect filter # that cuts off all frequencies within your desired band. In practice, you # are dealing with finite data and so a super high filter order will distort your signal # Generally, filter orders can be relatively small (on the order of 10s or less like 6 or 9) but play around! # http://scipy.github.io/old-wiki/pages/Cookbook/ButterworthBandpass filterOrder = 40 b, a = butter(filterOrder, desiredCutOffFreqNormalized, btype='lowpass') # Use lfilter to apply the filter: https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.signal.lfilter.html signalButterLowPass = lfilter(b, a, signalAdditive) # TODO fig, axes = plt.subplots(3, 2, figsize=(15,12)) fig.subplots_adjust(hspace=0.5) axes[0][0].plot(time, signalAdditive) axes[0][0].set(xlabel='Time', title='Original Sine Wave with Freqs={} Hz'.format(freqs)) axes[0][0].set_xlim(xmin=0,xmax=totalTimeOfSignalInSecs) axes[0][0].grid() axes[0][1].plot(time, signalButterLowPass) axes[0][1].set(xlabel='Time', title='Low-Pass Filter Version (Cutoff={} Hz)'.format(desiredCutOffFreq)) axes[0][1].set_xlim(xmin=0,xmax=totalTimeOfSignalInSecs) axes[0][1].grid() fftWindowSize = 512 axes[1][0].specgram(signalAdditive, NFFT=fftWindowSize, Fs=samplingRate, noverlap=0) axes[1][0].set(xlabel='Time', ylabel="Frequency (Spectral Energy)", title='FFT of Original Additive Signal (NFFT={} FreqBinSize={} Hz)'.format(fftWindowSize, samplingRate/fftWindowSize)) axes[1][1].specgram(signalButterLowPass, NFFT=fftWindowSize, Fs=samplingRate, noverlap=0) axes[1][1].set(xlabel='Time', ylabel="Frequency (Spectral Energy)", title='FFT of Low-Pass Filtered Signal (NFFT={} FreqBinSize={} Hz)'.format(fftWindowSize, samplingRate/fftWindowSize)) powerSpectrum, powerFreq = axes[2][0].psd(signalAdditive, NFFT=fftWindowSize, Fs=samplingRate) axes[2][0].set(title="Spectral Analysis Across Original Signal") powerSpectrum, powerFreq = axes[2][1].psd(signalButterLowPass, NFFT=fftWindowSize, Fs=samplingRate) axes[2][1].set(title="Spectral Analysis Across Filtered Signal") print() # Now let's create a high-pass filter at 125Hz (so filter out the lower two frequencies) desiredCutOffFreq = 125 print("Frequencies that are in our signal={}. We are trying to create a high-pass filter that filters out all freqs less than {}" .format(freqs, desiredCutOffFreq)) # The butterworth filter expects the cutoff frequency to be normalized between [0, Nyquist] nyquistFreq = samplingRate / 2.0 desiredCutOffFreqNormalized = desiredCutOffFreq / nyquistFreq # TODO: Implement the rest! Much can be copy/pasted from the above cell # Recall that another pre-processing step in addition to filtering is de-meaning or de-trending our data # Let's play around with that from scipy import signal a = np.random.rand(50) * 50 b = np.array(range(0,50)) c = a + b # scipy has a nice detrending function # see: https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.detrend.html c_detrended = #TODO FINISH THIS # TODO: After you've finished and plotted the detrended signal # try to create more noise and see how well the scipy detrend algorithm performs... fig, axes = plt.subplots() axes.plot(c, label="c (avg={:0.1f})".format(np.mean(c))) axes.plot(c_detrended, label="c_detrended (avg={:0.1f})".format(np.mean(c_detrended))) axes.legend() ``` # Signal comparisons One of the most basic signal comparison approaches is just to take the Euclidean distance between two signals. A slightly better approach is to first attempt to align those signals to minimize the distance. ``` # Use Euclidean distance to compare two signals from scipy import signal from scipy.spatial import distance a = [8, 9, 10, 11, 8, 7, 5, 4, 0, 0, 1, 2, 3, 4, 3, 5, 7] b = [0, 0, 1, 2, 3, 4, 3, 5, 7, 8, 9, 10, 9, 8, 7, 5, 4] # Use scipy's euclidean distance function to compare a to b # the similarity between a and b signals # See: https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.euclidean.html euclidDistanceAToB = #TODO COMPLETE THIS LINE # TODO Graph signals a and b. Set the title of your graph such that the euclidDistanceAToB is displayed # Now let's use cross-correlation to align the signals before comparison. This is a very powerful technique. # The cross-correlation is a measure of similarity of two series as a function of the displacement of # one signal relative to the other (also known as a sliding dot product) # The formula essentially slides one signal a along the x-axis of fixed signal b, # calculating the integral of their product at each position. The best positional match # between signals is when this dot product is maximized # TODO: use https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.correlate.html correlateAToB = # TODO calculate the cross-correlation array between AToB fig, axes = plt.subplots(2, figsize=(6, 8)) fig.subplots_adjust(hspace=0.3) axes[0].plot(a, alpha=0.7, label="a") axes[0].plot(b, alpha=0.7, label="b") axes[0].legend() axes[0].set_title("Raw Graphs | Euclidean Distance From A to B = {}".format(euclidDistanceAToB)) axes[1].plot(correlateAToB, alpha=0.7) axes[1].set_title("Correlation Between A and B") print() # We are going to use the best match point (found via cross-correlation) to better align the signals # The best correlation point is at the highest value in this array bestCorrelationPoint = np.argmax(correlateAToB) indexShift = #TODO how much should we shift our signal? # We will use numpy's roll function to shift our signal. We could also use np.pad to pad with zero a_shifted = np.roll(a, indexShift) euclidDistanceAShiftedToB = #TODO calculate the distance between a_shifted and b print("Best correlation point={}, euclidean distance a to b={}, euclidean distance a_shifted to b={}" .format(bestCorrelationPoint, euclidDistanceAToB, euclidDistanceAShiftedToB)) fig, axes = plt.subplots(3, figsize=(6, 13)) fig.subplots_adjust(hspace=0.3) axes[0].plot(a, alpha=0.7, label="a") axes[0].plot(b, alpha=0.7, label="b") axes[0].legend() axes[0].set_title("Raw Graphs | Euclidean Distance From A to B = {}".format(euclidDistanceAToB)) # Note that the correlation signal is the length of a axes[1].plot(correlateAToB, alpha=0.7) axes[1].set_title("Correlation Between A and B (Best Correlation Index = {})".format(bestCorrelationPoint)) axes[2].plot(a_shifted, alpha=0.7, label="a_shifted") axes[2].plot(b, alpha=0.7, label="b") axes[2].legend() axes[2].set_title("Shifted Graph | Euclidean Distance From A_Shifted to B = {}".format(euclidDistanceAShiftedToB)) print() ``` # Closing note If you want the plots to be interactive, type the following in a cell: ``` %matplotlib notebook ``` See: https://stackoverflow.com/a/41125787
github_jupyter
# Manipulação de dados - III ## Agregação e agrupamento ### Agregando informações de linhas ou colunas Para agregar informações (p.ex. somar, tomar médias etc.) de linhas ou colunas podemos utilizar alguns métodos específicos já existentes em *DataFrames* e *Series*, tais como `sum`, `mean`, `cumsum` e `aggregate` (ou equivalentemente `agg`): ``` import pandas as pd import numpy as np dados_covid_PB = pd.read_csv('https://superset.plataformatarget.com.br/superset/explore_json/?form_data=%7B%22slice_id%22%3A1550%7D&csv=true', sep=',', index_col=0) dados_covid_PB.agg(lambda vetor: np.sum(vetor))[['casosNovos','obitosNovos']].astype('int') ``` Podemos conferir esta agregação resultante com o número de casos acumulados e óbitos acumulados ``` dados_covid_PB.head() ``` Isto também pode ser obtido utilizando o método `sum` de *DataFrames* e *Series*: ``` dados_covid_PB[['casosNovos','obitosNovos']].sum() ``` Podemos recriar a coluna `'obitosAcumulados'` com o método `cumsum` (soma cumulativa): ``` dados_covid_PB.obitosNovos.sort_index().cumsum() ``` ### Selecionando entradas distintas Para selecionar entradas distintas utilizamos o método `drop_duplicate`. Aqui, para exemplificar, vamos utilizar o banco de dados oficial sobre COVID no Brasil: ``` # pode levar um tempo para ler... covid_BR = pd.read_excel('../database/HIST_PAINEL_COVIDBR_18jul2020.xlsx') covid_BR.tail(3) # resumo da tabela covid_BR.info() # todos os estados únicos covid_BR.estado.drop_duplicates().array # ordena alfabeticamente covid_BR.estado.drop_duplicates().dropna().sort_values().array ``` ### Agrupando dados por valores em colunas e agregando os resultados Vamos determinar uma coluna para agrupar. Consideraremos o *DataFrame* `covid_BR`e selecionaremos os estados *PB*, *PE*, *RJ* e *SP* para realizar análises agrupando os resultados por estados. ``` covid_BR.query('estado in ["PB", "PE", "RJ", "SP"]') ``` Inspecionando o conjunto de dados, observamos que os dados para estado são apresentados com o valor `NaN` para `codmun` e quando `codmun` possui um valor diferente de `NaN`, o resultado é apenas para o município do código em questão. Como estamos interessados nos valores por estado, vamos selecionar apenas os dados com `codmun` contendo `NaN`. ``` covid_estados = covid_BR.query('estado in ["PB", "PE", "RJ", "SP"]') covid_apenas_estados = covid_estados.loc[covid_estados['codmun'].isna()] ``` Vamos agora selecionar apenas as colunas de interesse. Para tanto, vejamos os nomes das colunas: ``` covid_apenas_estados.columns covid_apenas_estados = covid_apenas_estados[['estado', 'data', 'casosNovos', 'obitosNovos']] ``` A data parece ser o *index* natural, já que o *index* atual não representa nada. Observe que teremos *index* repetidos, pois teremos as mesmas datas em estados diferentes. ``` covid_apenas_estados covid_apenas_estados = covid_apenas_estados.set_index('data') covid_apenas_estados ``` ### Agrupando com o método *groupby* Podemos escolher uma (ou mais colunas, incluindo o índice) para agrupar os dados. Ao agruparmos os dados, receberemos um objeto do tipo `DataFrameGroupBy`. Para vermos os resultados, devemos agregar os valores: ``` covid_estados_agrupado = covid_apenas_estados.groupby('estado') covid_estados_agrupado.sum().rename({'casosNovos':'Casos Totais', 'obitosNovos':'Obitos Totais'},axis=1) ``` Podemos agrupar por mais de uma coluna. Vamos fazer dois grupos. *grupo_1* formado por PB e PE e *grupo_2* formado por RJ e SP. Em seguida, vamos agrupar por grupo e por data: ``` covid_estados_grupos = covid_apenas_estados.copy() col_grupos = covid_estados_grupos.estado.map(lambda estado: 'grupo_1' if estado in ['PB','PE'] else 'grupo_2') covid_estados_grupos['grupo'] = col_grupos covid_estados_grupos ``` Agora vamos agrupar e agregar: ``` covid_grupo_agrupado = covid_estados_grupos.groupby(['grupo','data']) covid_grupo_agrupado.sum() ``` ### Mesclando *DataFrames* Vamos agora ver algumas formas de juntar dois ou mais *DataFrames* com *index* ou colunas em comum para formar um novo *DataFrame*. #### Mesclando *DataFrames* através de concatenações Concatenar nada mais é do que "colar" dois ou mais *DataFrames*. Podemos concatenar por linhas ou por colunas. A função que realiza a concatenação é `concat`. Os dois argumentos mais utilizados são a lista de *DataFrames* a serem concatenados e `axis`, onde `axis = 0` indica concatenação por linha (um *DataFrame* "embaixo" do outro) e `axis=1` indica concatenação por coluna (um *DataFrame* ao lado do outro). Relembre do *DataFrame* `df_dict_series`: ``` df_dict_series = pd.read_csv('../database/df_dict_series.csv') ``` Vamos criar um novo, com novas pessoas: ``` serie_Idade_nova = pd.Series({'Augusto':13, 'André': 17, 'Alexandre': 45}, name="Idade") serie_Peso_novo = pd.Series({'Augusto':95, 'André': 65, 'Alexandre': 83}, name="Peso") serie_Altura_nova = pd.Series({'Augusto':192, 'André': 175, 'Alexandre': 177}, name="Altura") serie_sobrenome = pd.Series({'Augusto':'Castro', 'André':'Castro', 'Alexandre':'Castro'}, name='Sobrenome') dicionario_novo = {'Sobrenome':serie_sobrenome, 'Peso': serie_Peso_novo, 'Idade': serie_Idade_nova, 'Altura': serie_Altura_nova} df_novo = pd.DataFrame(dicionario_novo) df_novo = df_novo.assign(IMC=round(df_novo.eval('Peso/(Altura/100)**2'),2)) df_novo ``` Agora vamos concatená-los: ``` pd.concat([df_dict_series,df_novo]) ``` ### Concatenando por coluna Para exemplificar vamos considerar os dados de COVID da Paraíba, selecionando casos novos e óbitos novos, e vamos obter dos dados do Brasil apenas os casos e óbitos diários do país, e vamos concatená-los por coluna. ``` covid_PB_casos_obitos = dados_covid_PB[['casosNovos','obitosNovos']] ``` Vamos tratar os dados do Brasil: ``` covid_BR_casos_obitos = covid_BR.query('regiao=="Brasil"') covid_BR_casos_obitos = covid_BR_casos_obitos.set_index('data') covid_BR_casos_obitos = covid_BR_casos_obitos[['casosNovos','obitosNovos']].rename({ 'casosNovos':'casosBR', 'obitosNovos':'obitosBR' }, axis=1) covid_PB_casos_obitos covid_BR_casos_obitos ``` Vamos agora concatená-los por coluna: ``` pd.concat([covid_PB_casos_obitos, covid_BR_casos_obitos], axis=1) ``` Para um polimento final, vamos substituir os valores `NaN` que ocorreram antes do dia 13 de julho por 0. Para tanto, a forma ideal é utilizando o método `map`: ``` dados_PB_BR = pd.concat([covid_PB_casos_obitos, covid_BR_casos_obitos], axis=1) dados_PB_BR['casosNovos'] = dados_PB_BR.casosNovos.map(lambda caso: 0 if np.isnan(caso) else caso).astype('int') dados_PB_BR['obitosNovos'] = dados_PB_BR.obitosNovos.map(lambda obito: 0 if np.isnan(obito) else obito).astype('int') dados_PB_BR ``` ### Mesclando *DataFrames* através de *joins* Para realizar *joins* iremos utilizar a função `merge` do *pandas*. *joins* tomam duas tabelas, uma tabela à esquerda e uma à direita e retornam uma terceira tabela contendo a união das colunas das duas tabelas. Existem 4 tipos de *joins*: * *left join*: Apenas irão aparecer os *index* (da linha) que existem na tabela à esquerda; * *right join*: Apenas irão aparecer os *index* (da linha) que existem na tabela à direita; * *inner join*: Apenas irão aparecer os *index* que existem nas duas tabelas; * *full join* ou *outer join*: irão aparecer todos os *index* das duas tabelas. Para exemplificar vamos considerar dois *DataFrames* (aqui teremos menos linhas, com nomes e dados fictícios). O primeiro *DataFrame* consistirá de nomes de alunos, CPF e matrícula da UFPB (*nome_cpf_mat*). O segundo *DataFrame* consistirá de nome, CPF e e-mail (*nome_cpf_email*). Nosso objetivo é criar um novo *DataFrame* contendo Nome, CPF, matrícula e e-mail. Temos ainda as seguintes situações: - No *DataFrame* *nome_cpf_mat* existem alunos que não estão presentes no *nome_cpf_email*, pois não enviaram esta informação. - No *DataFrame* *nome_cpf_email* existem alunos que não estão presentes no *nome_cpf_mat* pois estes não são alunos da UFPB. ``` nome_cpf_mat = pd.read_csv('../database/nome_cpf_mat.csv') nome_cpf_email = pd.read_csv('../database/nome_cpf_email.csv') ``` Vamos agora examinar os *DataFrames*. Como são bem simples, basta realizar *prints* deles. ``` nome_cpf_mat nome_cpf_email ``` Tipicamente é bom possuir *index* únicos. Neste sentido, vamos definir o CPF como *index*: ``` nome_cpf_mat = nome_cpf_mat.set_index('CPF') nome_cpf_email = nome_cpf_email.set_index('CPF') ``` Vamos agora realizar um **left** join com o *DataFrame* **nome_cpf_mat** ficando à esquerda (neste caso, apenas alunos com matrícula irão aparecer): ``` pd.merge(nome_cpf_mat, nome_cpf_email, how = 'left', on = ['Nome','CPF']) ``` - Na opção *how* dizemos qual o tipo de *join* que queremos realizar. - Na opção *on* dizemos quais as colunas que existem em comum nos *DataFrames*. Veja o que aconteceria se informássemos apenas que o *CPF* está presente nos dois *DataFrames*: ``` pd.merge(nome_cpf_mat, nome_cpf_email, how = 'left', on = 'CPF') ``` Observe que os nomes dos alunos que estão na segunda tabela ficam indeterminados na coluna *Nome_y*. Vamos agora realizar um **right** join com o *DataFrame* **nome_cpf_mat** ficando à esquerda (neste caso, apenas alunos **com e-mail** irão aparecer): ``` pd.merge(nome_cpf_mat, nome_cpf_email, how = 'right', on = ['Nome','CPF']) ``` Vamos agora realizar um **inner** join com o *DataFrame* **nome_cpf_mat** ficando à esquerda (neste caso, apenas alunos **com matrícula e com e-mail** irão aparecer): ``` pd.merge(nome_cpf_mat, nome_cpf_email, how = 'inner', on = ['Nome','CPF']) ``` Por fim, vamos agora realizar um **outer** ou **full** join com o *DataFrame* **nome_cpf_mat** ficando à esquerda (neste caso, **todos** os alunos irão aparecer): ``` pd.merge(nome_cpf_mat, nome_cpf_email, how = 'outer', on = ['Nome','CPF']) ``` ### Os métodos *apply*, *map* e *applymap* A ideia é relativamente simples. Os três métodos são vetorizados e aplicam uma função ou uma substituição via dicionário de tal forma que: * *apply* é realizado via linha ou coluna em um *DataFrame*; * *map* é aplicado a cada elemento de uma *Series*; * *applymap* é aplicado a cada elemento de um *DataFrame*. Já vimos diversos exemplos de uso de `map`. Vejamos exemplos de `applymap` e `apply`. * Neste exemplo vamos retomar a concatenação entre os dados da Paraíba e do Brasil, porém iremos substituir *todos* os valores de `NaN` por zero, usando o métodp `applymap`. ``` dados_PB_BR = pd.concat([covid_PB_casos_obitos, covid_BR_casos_obitos], axis=1) dados_PB_BR.applymap(lambda valor: 0 if np.isnan(valor) else valor) ``` Vamos utilizar `apply` para realizar a soma de casos e óbitos através de mais de uma forma ``` dados_PB_BR.apply(lambda x: np.sum(x)).astype('int') ``` Se quisermos realizar a operação por linhas, basta utilizar o argumento `axis=1`: ``` dados_PB_BR.apply(lambda x: (x>0).all(), axis=1) ```
github_jupyter
## 3. Analyze Data [Data Science Playlist on YouTube](https://www.youtube.com/watch?v=5yv_ID4YNTI&list=PLLBUgWXdTBDg1Qgmwt4jKtVn9BWh5-zgy) [![Python Data Science](https://apmonitor.com/che263/uploads/Begin_Python/DataScience03.png)](https://www.youtube.com/watch?v=5yv_ID4YNTI&list=PLLBUgWXdTBDg1Qgmwt4jKtVn9BWh5-zgy "Python Data Science") Once data is read into Python, a first step is to analyze the data with summary statistics. This is especially true if the data set is large. Summary statistics include the count, mean, standard deviation, maximum, minimum, and quartile information for the data columns. ![idea](https://apmonitor.com/che263/uploads/Begin_Python/idea.png) ### Generate Data Run the next cell to: - Generate `n` linearly spaced values betweeen `0` and `n-1` with `np.linspace(start,end,count)` - Draw random samples from a uniform distribution between 0 and 1 with `np.random.rand(count)` - Draw random samples from a normal (Gaussian) distribution with `np.random.normal(mean,std,count)` - Combine `time`, `x`, and `y` with a vertical stack `np.vstack` and transpose `.T` for column oriented data. - Save CSV text file `03-data.csv` with header `time,x,y`. ``` import numpy as np np.random.seed(0) n = 1000 time = np.linspace(0,n-1,n) x = np.random.rand(n) y = np.random.normal(1,1,n) data = np.vstack((time,x,y)).T np.savetxt('03-data.csv',data,header='time,x,y',delimiter=',',comments='') ``` ![idea](https://apmonitor.com/che263/uploads/Begin_Python/idea.png) ### Display Data Distributions The histogram is a preview of how to create graphics so that data can be evaluated visually. [04. Visualize](https://github.com/APMonitor/data_science/blob/master/04.%20Visualize.ipynb) shows how to create plots to analyze data. ``` import matplotlib.pyplot as plt %matplotlib inline plt.hist(x,10,label='x') plt.hist(y,60,label='y',alpha=0.7) plt.ylabel('Count'); plt.legend() plt.show() ``` ![idea](https://apmonitor.com/che263/uploads/Begin_Python/idea.png) ### Data Analysis with `numpy` The `np.loadtxt` function reads the CSV data file `03-data.csv`. Numpy calculates `size` (dimensions), `mean` (average), `std` (standard deviation), and `median` as summary statistics. If you don't specify the `axis` then `numpy` gives a statistic across both the rows (`axis=0`) and columns (`axis=1`). ``` import numpy as np data = np.loadtxt('03-data.csv',delimiter=',',skiprows=1) print('Dimension (rows,columns):') print(np.size(data,0),np.size(data,1)) print('Average:') print(np.mean(data,axis=0)) print('Standard Deviation:') print(np.std(data,0)) print('Median:') print(np.median(data,0)) ``` ![expert](https://apmonitor.com/che263/uploads/Begin_Python/expert.png) ### Analyze data 1. Calculate the mean, standard deviation, and median of `x*y` 2. Calculate the `skew` of `x*y` with the `scipy.stats` [skew function](https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.stats.skew.html). ![idea](https://apmonitor.com/che263/uploads/Begin_Python/idea.png) ### Data Analysis with `pandas` Pandas simplifies the data analysis with the `.describe()` function that is a method of `DataFrame` that is created with `pd.read_csv()`. Note that the data file can either be a local file name or a web-address such as ```python url='http://apmonitor.com/pdc/uploads/Main/tclab_data2.txt' data = pd.read_csv(url) data.describe() ``` ``` import pandas as pd data = pd.read_csv('03-data.csv') data.describe() ``` ![idea](https://apmonitor.com/che263/uploads/Begin_Python/idea.png) ### Data Analysis with `pandas-profiling` Pandas Profiling is a data analysis tool for a more in-depth summary of the data than the `descibe()` function. [Install the package](https://pandas-profiling.github.io/pandas-profiling/docs/master/rtd/pages/installation.html) with: ```python pip install --user pandas-profiling[notebook] jupyter nbextension enable --py widgetsnbextension ``` You need to restart the Kernel before proceeding. The install only needs to run once. ``` try: import pandas as pd from pandas_profiling import ProfileReport import os except: !pip install --user pandas-profiling !jupyter nbextension enable --py widgetsnbextension print('Restart the Kernel before proceeding') # import data url='http://apmonitor.com/pdc/uploads/Main/tclab_data2.txt' data = pd.read_csv(url) ``` After you install `pandas-profiling` and enable the widget extension, you can now import and analysis data. Some of the functions take a long time with a large data set. Two methods for dealing with large data sets are to: 1. Sub-sample the data sets such as with `data = data[::10]` to take every 10th row. 2. Use the `minimal` option to avoid the correlation and other analysis that is slow with large data sets. ``` profile = ProfileReport(data, explorative=True, minimal=False) ``` The profile report can be saved as an interactive web-page. The web-page is saved to the current working directory that is displayed with `os.getcwd()`. ``` profile.to_file('report.html') print('File report.html saved to '+os.getcwd()) ``` The profile report can also be viewed in the Jupyter Notebook. ``` profile.to_widgets() ``` ![expert](https://apmonitor.com/che263/uploads/Begin_Python/expert.png) ### TCLab Activity ![connections](https://apmonitor.com/che263/uploads/Begin_Python/connections.png) ### Generate Data Set 1 Generate a file from the TCLab data with seconds (`t`), heater levels (`Q1` and `Q2`), and temperatures (`lab.T1` and `lab.T2`). Record data every second for 120 seconds and change the heater levels every 30 seconds to a random number between 0 and 80 with `np.random.randint()`. There is no need to change this program, only run it for 2 minutes to collect the data. If you do not have a TCLab device, read a data file 1 from [an online link](https://apmonitor.com/do/uploads/Main/tclab_dyn_data2.txt). ``` import tclab, time, csv import pandas as pd import numpy as np try: # connect to TCLab if available n = 120 with open('03-tclab1.csv',mode='w',newline='') as f: cw = csv.writer(f) cw.writerow(['Time','Q1','Q2','T1','T2']) with tclab.TCLab() as lab: print('t Q1 Q2 T1 T2') for t in range(n): if t%30==0: Q1 = np.random.randint(0,81) Q2 = np.random.randint(0,81) lab.Q1(Q1); lab.Q2(Q2) cw.writerow([t,Q1,Q2,lab.T1,lab.T2]) if t%5==0: print(t,Q1,Q2,lab.T1,lab.T2) time.sleep(1) file = '03-tclab1.csv' data1=pd.read_csv(file) except: print('No TCLab device found, reading online file') url = 'http://apmonitor.com/do/uploads/Main/tclab_dyn_data2.txt' data1=pd.read_csv(url) ``` ### Read Data Set 2 Use `requests` to download a sample TCLab data file for the analysis. It is saved as `03-tclab2.csv`. ``` import requests import os url = 'http://apmonitor.com/pdc/uploads/Main/tclab_data2.txt' r = requests.get(url) with open('03-tclab2.csv', 'wb') as f: f.write(r.content) print('File 03-tclab2.csv retrieved to current working directory: ') print(os.getcwd()) ``` ### Data Analysis Read the files `03-tclab1.csv` and `03-tclab2.csv` and display summary statistics for each with `data.describe()`. Use the summary statistics to compare the number of samples and differences in average and standard deviation value for `T1` and `T2`. Use the `pandas-profiling` package to generate a data analysis report. View the distribution and correlation of the variables `Q1` and `T1`.
github_jupyter
<a href="https://colab.research.google.com/github/ProfessorPatrickSlatraigh/CST2312/blob/main/CST2312_Midterm_Review_WK09CL13_at_22_Oct_2021.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # **CST2312 Midterm Review** **Week 09, Class 13** * Literals * Strings * Lists * Tuples * Files * Functions * Loops * Conditionals # **Readings** --- **from the textbook: ( [https://www.py4e.com/lessons/](https://www.py4e.com/lessons/))** * [Variables](https://)... (Chapter 3) * [Strings](https://www.py4e.com/lessons/strings) (Chapter 7) * [Lists](https://www.py4e.com/lessons/lists) (Chapter 9) * [Tuples](https://www.py4e.com/lessons/tuples) (Chapter 11) * [Functions](www.py4e.com/lessons/functions) (Chapter 5) * [Loops](https://www.py4e.com/lessons/loops) (Chapter 6) * [Conditionals](https://www.py4ecom/lessons/logic) (Chapter 4) --- # **Literals** A literal is a specific written value in your program. Literals get their name because they *literally* mean what they are. Numbers are literals in Pyhon and so are words and phrases written in a human language. In this lesson you will learn how to communicate numbers and words to your program. You will also learn about other important literals in the Python language. ## Numbers You can write a number directly into your program. For example, if you want Python to print the number `10` you can execute this instruction: ```python print(10) ``` Copy the print instruction into the cell below and run it: ``` ``` The `print` function is given an `expression` enclosed in parentheses `(` and `)` as input and displays the expression in a readable format. We will use the `print` fucntion a lot in this class so it's a good idea to start becoming familiar with the `print` function. The most important thing to notice right now is that what you want printed is placed in between the parentheses. Python understands integers, numbers with no decimal place and *floating point* numbers, which contain a decimal. Copy these statements into the cell below and run them: ```python print(100) print(3.14) ``` ``` ``` Python also understands scientific notation. That's where the nubmer is expressed as a decimal that is multiplied by a power of 10. For example 1 billion is: $1,000,000,000 = 1 x 10^9$ You can print one billion in like this: ```python print(1e9) ``` You can use the notation to represent fractions too. ```python print("A nanosecond is", 1e-9, "seconds") ``` Copy the two print statements into the cell below and run them. Make sure there is no error and that you see printed the correct numbers. ``` ``` --- ## Strings Strings are a group of letters. There can be any number of letters in a string (including zero). Python detects the beginning and end of a string by looking for a *quote* character. There are different kinds of quotes. They are discussed in [Quotes](../Lesson03/quotes.ipynb). The most essential quotes are double quotes and single quotes and they are just two ways of doing the same thing. Copy these two examples into the cell below: ```python print("Double Quote") print('Single Quote') ``` ``` ``` **Do you see any difference in the way Python prints the words?** There isn't! Double and single quotes do the same thing. Why have both? The double and single quote are very common in English so you sometimes need to pick quotes based on what you have to say. Enter these examples into the cell below: ```python print("I can't do that, Dave.") print('"Brevity is the soul of wit." --William Shakespeare') ``` ``` ``` *What would happen if you changed double quotes to single quotes in the first print example?* ## True or False? Python has special literals for the values `True` and `False`. The use of true and false will become important later when we talk about conditions and loops. For now think of them as a special way to get the `print` function to print the words `True` and `False`. Enter this example into the cell below: ```python print('Roses are', False, 'violets are', True) ``` ``` ``` ## Literally Nothing! All of the other literals you've learned so far exist in every other programming language. The `None` literal is unique to Python. It's means "no value." Printing `None` is exactly like telling Python to "print nothing." See what happens when you print None in the cell below: ```python print(None) ``` ``` ``` ## Literals and Jupyter When you execute a cell in Jupyter the notebook automatically prints that last literal it sees as a convenience. In a lot of cases this means you don't need to have a `print` statement to see the value of something in the notebook. Try putting the code below into the next cell: ```python "Roses are", False ``` ``` ``` Now try this: ```python print("Roses are", False) ``` ``` ``` **Notice the difference?** --- # **Strings** String variables are used to store textual data, characters and sequences of characters. Can be specified by surrounding some text with single ' or double " quotes. ``` str_1 = 'Hello World!' print(str_1) str_2 = "Hello World!" print(str_2) str_1 == str_2 ``` *Multiline strings, or docstrings* If we have a piece of text with multiple lines, then triple quotes (either ' ' ' or " " ") can be used to surround the text. By encapsulating text in triple quotes we are able to define a string with more than one line -- essentially a string which may include newline (/n) characters. Later, we will see how the first docstring in a function definition is used by the help() function to provide information on a function's operation. A docstring may be encapsulated with either triple single-quotes or triple double-quotes so long as the beginning and ending triple quotes are the same type (single or double). ``` str_2 = """ Now, as other Europeans mull how to restart their economies while still protecting human life, the Spanish have become an early bellwether for how a second wave might happen, how hard it might hit and how it could be contained. "Perhaps Spain is the canary in the coal mine," said Prof. Antoni Trilla, an epidemiologist at the Barcelona Institute for Global Health, a research group. "Many countries may follow us — but hopefully not at the same speed or with the same number of cases that we are facing." The median age of sufferers has dropped to around 37 from 60. Asymptomatic cases account for more than 50 percent of positive results, which is partly because of a fourfold rise in testing. And the health institutions feel much better prepared. """ print(str_2) str_3 = """ If we want to have multiple lines in the string then we can use triple quotes: This is a multiline string! """ print(str_3) ``` **Operations on Strings** ``` # concatenation # note that + concatenates strings str1 = "hello" str2 = "world" message = str1 + " " + str2 + "!" print(message) # Notice the difference of the + operation when operating on text vs. numeric variables: x = 41 x = x + 1 print(x) print (type(x)) x = 41.0 x = x + 1 print(x) print (type(x)) x = "41" x = x + "1" print(x) print(type(x)) # string length word = "Python is the word. And on and on and on...." print(len(word)) print(word) print("The length of the text above is ", len(word), "characters") ``` **Special characters** When we use strings, you will notice that we often want to use some "special characters". These special characters consist of the backslash character (\\) followed by another character. **Tab character** \t: For example, if we want to create an output of multiple columns, with each columns being separated with a tab from each other, we can use the tab character, which is represented as \t. ``` # Example: List the first name, last name, and email of a person, in columns # Separate the columns using a tab character str_a ="First Name\tLast Name\tEmail" str_b ="Aaron\tRodgers\taaron@nfl.com" str_c ="Giannis\tAntetokounmpo\tgiannis@nba.com" print(str_a) print(str_b) print(str_c) ``` **New line character** \n: This is a special character that we use to represent a new line. ``` # Notice that we end the strings below with the \n character, # which is the "new line" special character str_d = "Hello World!\nHello World Twice!" print(str_d) ``` **Backslash character** \\: In general, backslash (\\) is used to introduce special characters. If we want to type the backslash character itself, we do it by typing backslash twice in a row: \\. ``` print("I want to print backslash: \\") str3 = 'This is a string within single quotes that can contain "double quotes" as part of the string' print(str3) str4 = 'If we want to have \'single quotes\' in single quoted string we should escape them' print(str4) str5 = "Similarly, if we want to have \"double quotes\" in double quoted string we should escape them\n" print(str5) ``` **Splitting Strings: split and join** Since we talked about special characters, let’s talk now about splitting strings, using the split() function. * longstring.split(separator): split the first string (longstring) at every occurrence of the second string (separator) Outputs a list (see below). * connector.join(list): join is the "reverse" of split, and joins all the elements of the list, using the connector string in front. ``` print("practical data science".split(" ")) ``` By default, the split() method will split on spaces (if no split character is specified). ``` print("practical data science".split()) ``` The split() method will use another character as a delimiter when the string of that character is passed as an argument. ``` print("billgates@microsoft.com".split("@")) ``` *ProTip: split()* Their is a second, optional argument to the split() method which is an integer of the maximum items to return from the split. ``` print("practical data science is your friend".split(" ", 1)) ``` Notice that after the maximum number of split items is reached, the remainder of the string is included as the final item in the list. ``` print("practical data science is your friend".split(" ", 2)) str_a ="First Name\tLast Name\tEmail" str_b ="Aaron\tRodgers\taaron@nfl.com" str_c ="Giannis\tAntetokounmpo\tgiannis@nba.com" print(str_a.split('\t')) print(str_b.split('\t')) print(str_c.split('\t')) #Notice that when we split a string and the delimeter #character does not appear, then we get back the string #itself, but converted into a list with a single element. print("hello".split(" ")) ``` **Exercise** Consider the string billgates@microsoft.com. Write code that finds the username of the email address and the domain of the email address, using the split() command. ``` # here is a solution ``` **Indexing and Slicing: Accessing parts of the string** *Indexing* Strings can be indexed (subscripted), with the first character having index 0. ``` word = "Python" print(word) word[0] # character in position 0 print(word[0]) word[1] print(word[1]) word[5] # character in position 5 print(word[5]) ``` **Negative indexing** Indices may also be negative numbers, to start counting from the right: ``` word[-1] # last character print(word[-1]) word[-2] # second-last character print(word[-2]) word[-6] print(word[-6]) ``` **Slicing** In addition to indexing, slicing is also supported. While indexing is used to obtain individual characters, slicing allows you to obtain a substring: ``` word[0:2] # characters from position 0 (included) to 2 (excluded) print(word) print(word[0:2]) word[2:5] # characters from position 2 (included) to 5 (excluded) print(word) print(word[2:5]) word[2:] # characters from position 2 (included) to the end print(word[2:]) word[:3] # characters from beginning (position 0) to position 3 (excluded) print(word[:3]) word[-3:] # last three characters print(word[-3:]) word[-3:-1] # penultimate two characters print(word[-3:-1]) word[1:-1] print(word[1:-1]) ``` **Exercise** Assign the string 'Information and Data Management' to a Python variable. Print the word 'Information' by using the indexing/slicing approach. Print the word 'Data' by using the negative indexing/slicing approach. ``` # here is my solution ``` --- # **Lists** **Complex Data Structures: Lists (and Tuples)** Python’s primitive data types: numeric types, strings, and booleans. We are now going to examine how these basic types can be composed together, in more complex structures. We will start by reviewing lists. **Basics of Lists** *Creating Lists* A list, sometimes called and array, or a vector, is an ordered collection of values. In Python, lists are specified by square brackets, [ ], containing zero or more values, separated by commas. For example: ``` number_list = [1, 2, 3, 0, 5, 10, 11] print(number_list) name_list = ["Elena", "Candido", "Ashwin", "Hong"] print(name_list) mixed_list = ["Elena", 44, "Candido", 53] print(mixed_list) empty_list = [] print(empty_list) empty_list1 = list() print(empty_list1) empty_list == empty_list1 ``` Iterating over lists: **for** loops. Given the department names and the numbers of the introdution level courses, create a list of all the introduction courses for all the departments. ``` dept = ["CST", "CET", "ENG"] intro_courses = ["1101", "1201"] # iterating over the list elements for i in dept: for j in intro_courses: print (i+j) ``` *Note: We did not spend time on the range() function in class.* ``` # iterating through the lists using indecies for i in range(len(dept)): for j in range(len(intro_courses)): print (dept[i]+dept[j]) ``` Lists are a very common data structure, and are often generated as a result of other functions, for instance, the split(" ") command for a string, will split a string on space, and then return a list of the smaller substrings. Strings are special types of lists. Strings are unmutable lists. ``` my_string = "Wow these data structures make for exciting dinner conversation" list_of_words = my_string.split(" ") print(list_of_words) ``` **Accessing parts of a list: Indexing and Slicing revisited** We can retrieve the value of a particular element of a list by writing in square brackets the location of the element. In python, like most programming languages, list indices start at 0, that is, to get the first element in a list, request the element at index 0. ``` another_list = ["a", "b", "c", "d", "e", "f"] print(another_list[1]) ``` Negative indices can be used to traverse the list from the right. ``` print(another_list[-2]) ``` If you remember the case with strings and accessing the individual characters, the concept is exactly the same. In fact, strings are treated in Python as lists of characters. This also means that the slicing operator works for accessing parts of the list as well: ``` # Retrieves the elements at positions 2, 3, and 4. # Remember these are the 3rd, 4th, and 5th elements of the list print(another_list[2:5]) # Retrieved the last 3 elements of the list print(another_list[-3:]) ``` **Exercise** You are given a list of names as one big, multiline string. Each line contains one name. ``` # Governers of the US States: Alabama through Hawaii names_string = """Kay Ivey Mike Dunleavy Doug Ducey Asa Hutchinson Gavin Newsom Jared Polis Ned Lamont John Carney Ron DeSantis Brian Kemp David Ige""" list_n = names_string.split("\n") len(list_n) ``` * Use the split command, appropriately configured, to separate names into a list of names. * Extract the 3rd name from the list * Extract the second from the last name, using negative indexing * Retrieve the 6th to 8th names from the list (inclusive, we want a list of 3 names, 6th, 7th, and 8th) * Retrieve the last 3 names. ``` # here is my solution ``` **More on Lists and Strings** *Equality comparison* ``` str1 = "hello" print(str1 == "hello") print(str1 == "Hello") ``` Notice that **capitalization matters** when comparsing strings in Python. If we want to make the comparison case-insensitive we typically first convert both sides of the equality to the same case: ``` print(str1.lower() == "Hello".lower()) ``` The opposite operator for equality is the inequality operator: !=. For example: ``` email1 = "efilatova@citytech.cuny.edu" email2 = " billgates@microsoft.com" print("Are the emails different?", email1 != email2) ``` **Ordering Strings** String also allow for inequality comparisons. When we compare strings, the string that is "smaller" is the one that is coming first in the dictionary. Let's see an example: ``` name1 = "Abraham" name2 = "Bill" # Abraham is lexicographically before Bill print(name1 < name2) name1 = "Giannis" name2 = "Bill" # Giannis is lexicographically after Bill print(name1 < name2) # Space, followed by numbers, followed by uppercase, followed by lowercase sorted(["Bill", " ZZ TOP!!! ", "HAHA", "lol", "LOL!", "ZZZZZ", "zzzzz", "123", "345"]) ``` **Finding text within string variables** *in* operator * The in operator, needle in haystack: reports if the string needle appears in the string haystack For example, string "New York" appears within "City University of New York", so the following operator returns True: ``` "New York" in "City University of New York" ``` But, unlike reality, "City University of New York" is not in "New York" :-) ``` "City University of New York" in "New York" ``` **find function** * The find function, haystack.find(needle): searches haystack for needle, prints the position of the first occurrence, indexed from 0; returns -1 if not found. For example: ``` word = "Python is the word. And on and on and on and on..." position = word.find("on") # The 'on' appears at the end of 'Python' print(position) print("The first time that we see the string \"on\" is at position", word.find("on")) ``` **count function** str_1.count(str_2): counts the number of occurrences of one string in another. ``` word = "Python is the word. And on and on and on and on..." lookfor = "on" count = word.count(lookfor) print("We see the string '", lookfor, "' that many times: ", count) word = "Python is the word. And on and on and on and on..." lookfor = "Python" count = word.count(lookfor) print("We see the string '", lookfor, "' that many times: ", count) word = "Python is the word. And on and on and on and on..." lookfor = "PYTHON" count = word.count(lookfor) print("We see the string '", lookfor, "' that many times: ", count) ``` **Exercise** Consider the news article from [CBSnews] [link text](https://www.cbsnews.com/news/biden-covid-19-schools-open-trump-administration/), which is given below, and stored in the string variable article. * Count how many times the word Biden appears in the article. * Count how many times the word Trump appears in the article. * Now sum up the occurences of Biden and Trump and display the percentage of coverage for each of the two strings. (For example, if Biden appears 2 times and Trump 3 times, then Biden is 40% and Trump is 60%.) ``` article = """ Joe Biden condemned President Trump's response to the coronavirus pandemic on Wednesday, saying that if Mr. Trump and his administration had "done their jobs," more schools would be able to reopen to in-person learning this fall. "If President Trump and his administration had done their jobs early on in this crisis, American schools would be open. And they'd be open safely," Biden said in a speech in Wilmington, Delaware. "Donald Trump and [Education Secretary Betsy] DeVos have not stepped up." Biden accused Mr. Trump of "starving schools of the needed funding, funding they need now." He slammed the president and Republicans for being unwilling to consider a Democratic proposal passed in the House in May that would have provided additional financial help for schools. "Mr. President, where are you? Where are you? Why aren't you working on this?" Biden said, making a personal entreaty to the president. He urged Mr. Trump to "get off Twitter" and invite congressional leaders to the Oval Office. "You always talk about your ability to negotiate. Negotiate a deal — a deal for somebody other than yourself," Biden said. Biden said there should be "universal guidance" for reopening schools and daycare centers, and added that the federal government should provide assistance to parents whose children are forced to undergo remote learning. "The idea that you're able to get the economy back on track without getting COVID under control, it's completely counterintuitive," Biden said. "There's going to be no need in my view to be able to shut down the whole economy." He also said he would pressure governors and local officials to impose a mask mandate, saying there's a "question under the Constitution" about whether a president can institute a national mandate. Biden has previously said he supports making wearing a mask a national requirement for three months. "Why do you wear a mask? To protect your neighbor? To keep someone else from getting sick and maybe dying. I call that patriotic. This is the United States of America. Every generation has made sacrifices to help others in moments of crisis," Biden said. Biden also took questions from reporters, his first time doing so since he formally accepted the Democratic nomination for president last month. Biden's campaign also announced on Wednesday that he would be traveling to Kenosha, Wisconsin on Thursday. On Tuesday, Mr. Trump visited the city, which has been roiled by protests since the police shooting of a Black man and the deaths of two people by a suspected vigilante shooter at a subsequent demonstration. Biden said that local leaders and members of Congress had asked him to come to Kenosha. "There have been overwhelming requests that I do come," he told reporters. "What we want to do is we've got to heal, we've got to put things together, bring people together. My purpose in going is to do just that, to be a positive influence." He also said he would be meeting with local community and business leaders and law enforcement. The Democratic presidential nominee drew a contrast between himself and Mr. Trump, whom he has accused of fanning the flames of dissension. Earlier this week, the president declined to denounce suspected vigilante shooter Kyle Rittenhouse, the 17-year-old who has been charged with killing two people during protests in Wisconsin. "I wouldn't incite violence. I would condemn it when it occurred," Biden said, adding that he supported law enforcement and believes that the majority of law enforcement officers are good people. "What I'd be doing is I'd be bringing people together in the White House right now," he said. Mr. Trump did not meet with the family of Jacob Blake, the seven times in the back by a police officer. His family has been vocal in denouncing violent protests since Jacob Blake was shot on August 23. The 29-year-old father of six remains hospitalized and is paralyzed from his injuries, his family lawyers have said. It is unclear whether Biden will meet with the Blake family while in Kenosha. """ # here is my solution ``` **Finding items in lists** Common functions * x in list: checks if x appears in the list * list.index(x): looks through the list to find the specified element, returning it's position if it's found, else throws an error * list.count(x): counts the number of occurrences of the input element ``` names = ["Jane", "John", "Chris", "Josh", "Mary", "Anna", "John"] "Chris" in names "Peter" in names # Index name = "Chris" print("Location of", name, "in the list:", names.index(name)) # Count print(names) print("# of John in the list", names.count("John")) print("# of Jane in the list", names.count("Jane")) print("# of Peter in the list", names.count("Peter")) ``` **Exercise** * Find the name "Josh" in the list. In what position does the name appear? * Try to find a name that does not exist in the list. What do you get? Now let's practice the if-else command: * Define a variable search with the name that you want to search for. * Check if the name appears in the list ** If yes, then return the index number for its first apperance, and the number of times that you see the name in the list ** If not, print that the name does not appear in the list ``` names = ["Jane", "John", "Chris", "Josh", "Mary", "Anna", "John"] # here is my solution ``` Besides lists, Python has two additional data structures that can store multiple objects. These data structures are *dictionaries* and *tuples*. --- # **Tuples** Tuples are defined in Python by enclosing elements in parenthesis ( ). A tuple is similar to a list and consists of a number of values separated by commas. For instance: ``` t = (12345, 54321, 54321, "hello!") print(t) ``` The usual slicing and indexing operators still apply: ``` print(t[3]) ``` And similarly, we can use the count and index functions. ``` t.index("hello!") list_v = [0, 1, 2, 3, 4] list_v[0] =100 print(list_v) str_v = "01234" str_v[0] = "9" print(str_v) ``` However, a tuple but is *immutable*. This means that we cannot modify its contents. So the other operators that modify a list do not apply to a tuple. Elements of a list can be modified, but elements in a tuple can only be accessed, not modified. The name tuple does not mean that only two values can be stored in this data structure. ``` l_var = [3,4,5] # a list print ("Initial list:", l_var) l_var[0]= 8 print("New list:", l_var) t_var = (3,4,5) # a tuple print ("Initial tuple:", t_var) t_var[0]= 8 print ("New tuple:", t_var) new_t = ("John", 44) print(new_t) ``` *ProTip: Single Value Tuple* To create a tuple that just contains one numerical value, the number must be followed by a comma. Without a comma, the variable is defined as a number. ``` num = (5) type(num) ``` When a comma is included after the number, the variable is defined as a tuple. ``` t_var = (5,) type(t_var) ``` **Exercise** ``` # Given multiline strings: names_gov with the names of governers # of the US States: Alabama through Hawaii; # and names_state, create a list that consists out of # the tuples with two elements: state's name and state's governor's name # # Assume that the order of the governers corresponds to the order # of the states and the number of governers is the same as the number of states. # names_gov = """Kay Ivey Mike Dunleavy Doug Ducey Asa Hutchinson Gavin Newsom Jared Polis Ned Lamont John Carney Ron DeSantis Brian Kemp David Ige""" names_state = """Alabama Alaska Arizona Arkansas California Colorado Connecticut Delaware Florida Georgia Hawaii""" # Let us confirm that the lists have the # same number of elements list_govs = names_gov.split("\n") list_states = names_state.split("\n") print (len(list_govs),len(list_states)) print (list_states) for i in list_states: print(i) ``` *Note: we have not used the range() function extensively in class. Please see the section of this notebook on Loops for more about the range() function. Remember that the range() function gives us an easy way to generate a sequence of integers of a desired length and, with additional arguments, it may start and step to our specification.* ``` for i in range(len(list_states)): print(list_states[i]) # What governer - state pairs do we have? for i in range(len(list_states)): print (list_govs[i], "--", list_states[i]) # Create a list of tuples governer_name_list = [] # Populate the list of tuples for i in range(len(list_states)): governer_name_list.append ( (list_govs[i],list_states[i]) ) # Output the list of tuples elements # What is the difference between the two for loops # in this cell? for i in governer_name_list: print ("Election Year", ":", i) governer_name_list # Let us define the variables once again # and try to re-run the above code names_gov = """Kay Ivey Mike Dunleavy Doug Ducey Asa Hutchinson Gavin Newsom Jared Polis Ned Lamont John Carney Ron DeSantis Brian Kemp David Ige""" names_state = """Alabama Alaska Arizona Arkansas California Colorado Connecticut Delaware Florida Georgia Hawaii """ # Let us confirm that the lists have the # same number of elements list_govs = names_gov.split("\n") list_states = names_state.split("\n") print (len(list_govs),len(list_states)) # What governer - state pairs do we have? for i in range(len(list_states)): print (list_govs[i], "--", list_states[i]) for j in range(5): print (j) ``` --- # **Files** *Note: In our notebook CST2312_Class07 the subject of reading files from Google Drive into the Colab content area is addressed. See the notebook CST2312_Class07 for step-by-step examples of reading Colab Content and Google Drive files.* **Reading and Writing Files** Computers have two kinds of memory, *volatile* memory that is maintained as long as there is electricity, and *non-volatile* memory which is maintained when the power is off. Variables are a form of volatile memory, they only last as long as the program is running while to computer is on. Files are a form of non-volatile memory, they are kept when the computer is off. Files can be small or very, very large, much larger than the amount of RAM memory in a computer. Becuase files can be so large they are accessed differently than memory. Like a book, files are read (and written) one line at a time. When you want to access a file you create a special kind of variable called a *file handle*. **File Handles** A *file handle* is a variable that gives us access to a file. A file handle is created with the `open()` function. The `open()` function takes at least one argument, the name of the file to open. The file handle is returned so you have to assign it to a variable. For example: ```python file_handle = open('files/example.txt') ``` Enter the code in the next cell. What *type* is `file_handle`? ``` ``` The variable `file_handle` can be used just like any other Python variable but it has a type you haven't seen before: ```python print('The type of file_handle is:', type(file_handle)) ``` ``` ``` The file handle gives your program a way to access the contents of the file, it does not contain the file's data. A variable that contains a file handle has functions that are needed to read and write the file. In the next sections you'll see how to use functions of a file handle to access files. ## Reading Files There are two ways to read a file in Python, one line at a time and the whole file at once. Try this, run the cell below to open the file `example.txt`: ``` file_handle = open('files/example.txt') ``` Now execute the code in this cell over and over: ``` file_handle.readline() ``` What do you notice? The file is read one line at a time, each time you execute `readline()` the next line is returned. When the file reaches the end empty lines are returned. Try doing it again but re-running the `open()` function. Now see what happens when you use the `read()` function: ``` file_handle.read() ``` What happens when you mix `read()` and `readline()`? **Do you understand how calling `readline` changes the place in the file?** In a practical program you want to *do something* with the data you get in the file. The exmple below opens the file and reads the first four lines into four different variables. ```python file_handle = open('files/example.txt') line1 = file_handle.readline() line2 = file_handle.readline() line3 = file_handle.readline() line4 = file_handle.readline() print("Lines:", line1, line2, line3, line4) ``` Enter the program into the next cell and run it: ``` ``` **Confused? Use the debugger!** ## Writing Files When you open a file you have to decide whether to read or write the file (you *can* do both, more on that later). If you only give one argument to `open()` the file is opened for reading. If you want to *write* to a file here's how you start: ```python file_handle = open('output.txt', 'w') ``` <div class="alert alert-danger"> <strong>Watch Out!</strong> Opening a file for writing <b>erases the contents of the file</b>. </div> Type the open command into the next cell: ``` file_handle = open('output.txt', 'w') ``` When the second argument to `open()` is `'w'` the file is open for writing. To write a file use the `write` function. Execute the code to write to the file. Once the file is open you can write to it using the `write` function. For example: ```python file_handle.write('Hello file world!\n') ``` Add the write command to the next cell and run it: ``` file_handle.write('Hello file world!\n') file_handle.close() ``` **Now find the file. Does it contain what you expect?** Notice that the newline character is in the string. Unlike `print()` the `write()` function does not add a newline to the end of the line. The write function also does not take multiple arguments like the `print()` function. If you want to mix variables and words in the write function use a `f-string`. Update the code cell with this code and execute it: ```python name = 'Your Name Here' file_handle.write(f'Hello, my name is {name}\n') ``` Notice that `write` returns a number? The number is the total number of bytes written to the file. That number can be useful because (as in the example above) when you write variables to a file you might not know in advance how much data is in the variable. Here's a complete program that writes a file: ```python name = "Your name here" file_handle = open('greeting.txt', 'w') wrote = file_handle.write(f"Hello, my name is {name}\n") print(f"I wrote {wrote} bytes to the file") file_handle.close() ``` Enter the complete program into the next cell: ``` ``` **Run the program and look inside of greeting.txt** ## Closing the File Handle If you open the file that we've just written you will notice the contents aren't there yet. When you write to a file the information you write is temporary held in memory to improve the performance of your program. When your program is done reading or writing a file you have to close the file handle using the close function: ```python file_handle.close() ``` It's essential that you always remember to close the files you've opened. A program can only hold a fixed number of files open, if your code "leaks" file handles by forgetting about them it's possible that you will exhaust that number and the `open()` function will fail. ## Seeking in a File When you read or write a file it's like reading and writing a book, as you read (or write) you advance the place in the file. The `seek()` function is like turning back or forward to a particular page. The argument to the `seek()` function takes you to a particular byte number in the file. Try the following: ```python file_handle = open('files/example.txt') print("The first line is:", file_handle.readline()) file_handle.seek(0) print("(again) The first line is:", file_handle.readline()) file_handle.close() ``` ``` ``` **Notice that the first line is repeated. Do you understand why?** **What happens when you change the `0` in `seek` to a different number?** ### Seeking and Lines The `seek` function seeks to a *byte number*, not a line. Seeking lines is not as easy as it seems because a "line" can be any number of bytes and the only way to know where lines are is to read the file. Try updating the code in the prvious cell to seek to the second line. It takes some trial and error. If you want to read a particular line from a file it takes some syntax we haven't covered yet. In case you're interested here's an example: ```python file_name = "files/example.txt" line_number = 3 with open(file_name) as fh: for _ in range(line_number): line = fh.readline() print("The line is:", line) ``` **Notice I didn't use `close`? Keep reading to find out why.** ``` ``` ## Appending a File Appending a file means writing to the end of the file. Recall that opening a file for writing erases the contents of the file if it already exists. That's not always a good idea. The `open` function has a mode that lets you append to the file without erasing its contents. Here's how to do that using `open()`: ```python file_handle = open('output.txt', 'a') file_handle.write('Put this at the end\n') file_handle.close() ``` Enter the program in to the next cell: ``` ``` Now take a look at `output.txt`. There's a new line at the end of the file and the original contents are still in place. *ProTip: Context Managers* ## Python 3's Context Managers Python 3 has a feature called a *context manager*. I'll say more about context managers in a future lecture, but you can use them to help you remember to close a file. If you use `open()` as a context manager the code that is indented inside of `open` has access to the file. Once execution leaves the indented code the file handle is **automatically** closed. Here's code that reads the first line of a file: ```python with open('example.txt') as file_handle: first_line = file_handle.readline() # The file is automatically closed! print(first_line) ``` ``` ``` Here's code that reads the entire contents of a file without a context manager: ```python file_handle = open('example.txt') file_contents = file_handle.read() file_handle.close() # Never forget! print(file_contents) ``` Here's the same code using the context manager: ```python with open('example.txt') as file_handle: file_contents = file_handle.read() print(file_contents) ``` Try typing the context manager version into the next cell. ``` ``` **Notice the indentation?** The indentation tells Python that the indented statements are *inside* the `open` block. This is an important concept that we will spend a lot more time on later in the course. For now, try running the code with and without the indentation. Does it work both ways? ## The Extra Newline (using `strip`) Notice that when you use `readline` there's an extra newline (`\n`) character? Re-run this example from earlier in the notebook: ```python file_handle = open('files/example.txt') line1 = file_handle.readline() line2 = file_handle.readline() line3 = file_handle.readline() line4 = file_handle.readline() print("Lines:", line1, line2, line3, line4) ``` Notice that the output doesn't line up? It can sometimes be annoying to have the extra newline when you read input from a file. The `strip` function in Python removes excess space from the beginning and ending of a string. A newline is considered whitespace. The `strip` function operates on a string. Update the code example to this: ```python file_handle = open('files/example.txt') line1 = file_handle.readline().strip() line2 = file_handle.readline().strip() line3 = file_handle.readline().strip() line4 = file_handle.readline().strip() print("Lines:", line1, line2, line3, line4) ``` ``` ``` The `strip` function works on a string and returns a new string. If you want to strip an existing string and store the stripped version back into the same variable do it like this: ```python foo = " This is foo " print(f'foo is "{foo}"') foo = foo.strip() print(f'foo is "{foo}"') ``` Try entering the example into the next cell: ``` ``` You can also stip at the same time you print a string. For example: ```python foo = " This is foo " print(f'foo is "{foo.strip()}"') ``` ``` ``` Remember the `strip` function. You'll need it when you want to make your program's output perfect. --- # **Functions** A *function* groups together instructions and gives them a name. They are the most important way to reuse code. Python functions are named after functions in math and, just like their mathematical counterparts, have inputs and outputs. Functions begin with the `def` keyword. Here's an example of a function called `triangle`. ```python def triangle(): """Draw a triangle""" from p4e.drawing import Turtle tu = Turtle() display(tu) tu.draw(70) tu.turn(120) tu.draw(70) tu.turn(120) tu.draw(70) tu.turn(120) ``` The `triangle` function is just a collection of statements that you're already familiar with. The statements are indented inside the `def` statement to signify that they belong with the definition of `triangle`. There's a string at the top called a *docstring*. The docstring describes what the function does. Try typing the `triangle` function into the next cell: ``` ``` Run the code in the cell. What happened? The code above *defines* the function. Defining a function makes the name of the function available but does not execute the instructions *inside* the function. They are executed when the function is *called* or invoked. This code calls `triangle`: ```python triangle() ``` Call `triangle` in the next cell: ``` ``` A fucntion definition is a *compound statement*. ## Compound Statements and Whitespace A compound statement is a is a statement that contains other code. The code inside of a compound statement begins after a colon and is indented to the right of the beginning of the statement. Here is an example of a compound statement: ``` def my_function(): """Say Hello!""" a = 'Hello' b = 'World' print ('{}, {}!'.format(a,b)) print('This is my program.') my_function() ``` **Did the print statements come out in the oder you expected?** Set a breakpoint on line 1 and step through the program. If you're new to programming it might take you a while to wrap your mind around how execution jumps around now that you use functions. Practice using the debugger! ### Python and Whitespace The use of whitespace is controversial in Python. The statements inside of a function must be ligned up to the right of the function and even with each other. You can use either tabs or spaces but you can't mix them. Use of whitespace in Python is designed to help you easily see the instructions that are inside and oustide of a function. ```python def line_up(): print('This is inside') print('Also inside') print('This is outside') print('Still outside') ``` Most other programming languages like, for example C, C++, Java and JavaScript, use braces to signify the beginning and end of a function. The rule in those languages is that "whitespace doesn't matter." Here's an example of "Hello World" in C++: ```c++ int main() { std::string my_name = "Boaty McBoatface"; std::cout << "Hello " << my_name << std::endl; } ``` That's the way you're *supposed* to write it. But, you could write it to look much worse: ```c++ int main(){std::string my_name="Boaty McBoatface";std::cout<<"Hello "<<my_name<<std::endl;} ``` Witespace in Python enforces a readable style. Not everyone likes that. ## Function Arguments Funtions are like machines that take *inputs* and produce *outputs*. The input to the function is called an *argument* and the output is called a *return value*. Function arguments are named in the function definition between the parentheses, they are separated by commas. Here's an example of a function that takes one argument: ```python def say_hello(name): print(f"Hello, {name}") ``` Type the `say_hello` function into the next cell: ``` ``` Now call your function in the next cell: ``` ``` **Can you see how the argument becomes the `name` variable?** Now let's see a function that takes two arguments. Here's an example: ```python def name_tag(first_name, last_name): print(f"Hello, my name is {first_name} {last_name}") ``` Enter the `name_tag` function into the next cell: ``` ``` Now call the `name_tag` function with two arguments: ``` ``` Functions can have any number of arguments, zero or more. The names of arguments are used as variables inside of the function. Here's an example of why arguments are so useful. This is the `triangle` function from before with an argument that controls the size of the triangle: ```python def triangle(size): """This triangle function takes an input: size""" tu = Turtle() display(tu) tu.draw(size) tu.turn(120) tu.draw(size) tu.turn(120) tu.draw(size) ``` Try updating the `triangle` function to take the `size` argument: ``` ``` When you call a function that takes an argument you must specify a value for the argument. Call your function and change the size around: ``` ``` It's an error to call a function with the wrong number of arguments. Try it and see the error message that results. Functions can have any number of argumens. This function takes two arguments, `name` and `color`. The function uses HTML to print out the contents of `name` in the `color` of your coice: ```python from IPython.core.display import HTML def html_nametag(name, color): """Print a nametag using HTML""" display(HTML(f'''<h1 style="color: {color}">Hello I'm {name}</h1>''')) ``` When you call the function you must supply a value for both `name` and `color`: ```python html_nametag("Mike", "red") ``` Try entering the html_nametag function and calling it. ``` ``` ## Return Values The *return value* is the output of a function. The return value is passed back to the place where the function is called. Inside of a function the `return` keyword causes a function to exit (or *return*) the flow of the program to where the function was called. Here's an example of a function that returns a value: ```python def random_number(): """I chose 4 completely at random.""" return 4 ``` When you call the function you use an equal sign to *catch* and store the returned value in an assignment to a variable. In the code below the return value from `random_number` is stored in the variable `num`. ```python num = random_number() print('num is:', num) ``` Enter the example code into the cell below and run it: ``` ``` **Can you see how the number 4 is returned to the place where the call is made?** The `return` statement stops the execution of a function, no code after the `return` statement is executed. Here's an example of a function with a problem. The last `print` statement in the `early_return` won't be executed: ```python def early_return(): print("You see this.") return print("Not this.") early_return() ``` ``` ``` **Use the debugger to trace the function execution.** A function can return any number of values. The values in the return are separated by commas. Here's a function that returns three values: ```python def multiple_values(): return "One", "Two", "Three" ``` When a function returns mutiple values all of the values must be received by variables on the left of the equal sign. Here's how to catch all of the return vaules from the `multiple_values` function: ```python one, two, three = multiple_values() ``` Try entering and calling the `multiple_values` function: ``` ``` Remember the syntax! ## Mixing Arguments and Return Values Most functions both take arguments and return values. Here's an example of a function that adds two numbers and returns their sum. ```python def add_two_numbers(a, b) : c = a + b return c ``` The caller of the function supplies input and stores the returned output of the function: ```python s = add_two_numbers(10, 100) print(s) ``` Here's another example of a function that performs math. This does three operations and returns the results: ```python def do_math(a, b) : su = a + b pr = a * b ra = a / b return su, pr, ra ``` You can store multiple return values with multiple variables: ```python ret1, ret2, ret3 = do_math(12, 45) print(f'The sum: {ret1}, product: {ret2} ratio: {ret3}') ``` Try entering the example functions. **Do you see that values are passed between different variables?** Use the debugger to explore. ## An Example Problem Writing functions to specification is a very common task in the life of a programmer. A function specification gives you the four important things you need to know when you write a function: 1. The name of the function 1. The function arguments 1. The return value 1. A description of what the function does. Here's an example speicification: <div class="alert alert-info"> <strong>Example Problem</strong> Write a function called `power_of` that takes two arguments, `sig` and `power`. The function should return the value: \begin{equation*} sig^\left( power \right) \end{equation*} * Name: `power_of` * Arguments: * `sig` (`float`) - The significand * `power` (`float`) - The exponent * Returns: The `sig` to the `power` power. </div> When you get a function description, start by setting up the problem. Create a function with a matching name and arguments. If there's a return value add a return statement (even if it is not returning the right thing at first). Alwyas add a docstring. Here's the start of the solution: ```python def power_of(sig, power): """The power_of function's docstring""" return 0 # Fix me later ``` Once setup, write code to call your function that you use to test. It's essential to test your code as you go along. Write a line or two and then test again. Here's some code to test the `power_of` function: ```python got = power_of(2, 5) print(f"I got {got} and it should be 32") ``` Now that you have a start and a way to test your code you can begin to problem solve and finish the question. Don't know how to do it. Try Google. Try searching for "[python power operator](https://www.google.com/search?q=python+power+operator&oq=python+power+o&aqs=chrome.0.0l3j69i57j0l4.4884j1j7&sourceid=chrome&ie=UTF-8)". ``` ``` ## Designing with Functions Functions give you a way to name snippets of code. The name of the function should succinctly describe what the function does. Here's an example of a function that reads the first line of a file: ```python def read_first_line(filename): """Read the first line of a file.""" file_handle = open(filename, 'r') line = file_handle.readline() file_handle.close() return line ``` Calling the function returns the first line of the file: ```python print(read_first_line('files/example.txt')) ``` ``` ``` ### The Importance of Docstrings Functions have docstrings to help readers understand what they do. For complicated functions docstrings describe in detail the function arguments and return values. Here's a docstring describing the `read_first_line` function. ```python def read_first_line(filename) : """ Read the first line of a file. Arguments: filename - The name of the file to read. Returns: A str containing the first line of the file. """ file = open(filename, 'r') line = file.readline() file.close() return line ``` ``` ``` The `help` function reads the docstring. Type in one version of the `read_first_line` function and use the `help` function to see the docstring. ```python help(read_first_line) ``` ``` ``` When it's not obvious what the function arguments are you should be more descriptive in your docstring. It's also important sometimes to describe what the return value is (or might be). ``` def read_a_line(filename, offset) : ''' Reads a line in the file after seeking to a particular place. Arguments: filename - The name of the file to read. offset - The place in the file to start reading. Returns: A string containing the line that was read. ''' file_handle = open(filename) file_handle.seek(offset) line = file_handle.readline() file_handle.close() return line ``` Now the `help` function shows a really useful message: ``` help(read_a_line) ``` There are official Python guidelines for how to use docstrings in functions: * https://www.python.org/dev/peps/pep-0257/ It will be difficult at first to know what code you should put in a function. Don't worry, it's hard for everyone at first! Practice, practice, practice! ## Function Variables and Scope Variables created inside of a function, including the function arguments, are *private* to the function. That means they only exist while the funciton is executing and disappear after the function returns. For beginners this can be confusing, but it's an essential feature of Python and other programming languages. The validity of a variable is called *scope*. ### Global Variables Variables that are defined all the way to the left (also called top-level or file scope) are *global variables*. Global variables are available everywhere in a program, including inside of functions. The code below demonstrates that the variable `a` can be used inside of the function: ``` # a is a global variable. a = 10 def func(b): # b is private to the function and can # only be used inside of it. print(f'a is {a} and b is {b}') # call func and pass in a+1 func(a+1) ``` Global variables sometimes lead to confusion. When a function uses a variable with the same name as a global variable the one inside of the function is used. Here's an example where a name conflict has confusing results: ``` a = 10 print(a) def func(a): a = a + 1 print(a) func(a) print(a) ``` **Can you explain what happens when you run this code?** Remember there are *two* variables named `a`. ### The Global Keyword When you use a global variable inside of a function Python has to make a guess. Should I use the global variable or should I create a local variable? Python will always choose to make a local variable. The `global` keyword tells Python to use the global variable rather than create a new local one. Look at the difference between the two functions below: ``` # a is global a = "global" def local_a(): a = "inside of local_a()" def global_a(): global a # Tell Python to use global "a" a = "inside of global_a()" print(a) local_a() print(a) global_a() print(a) ``` ### Avoid Global Variables Sometimes global variables are necessary but unless there's no other way to do something it's a good idea to avoid global variables. Global variables make programs harder to read and can lead to confusing problems. For example, this program has an error, but it almost looks right: ``` def add_two_numbers() : global a, b c = a + b a = 10 b = 20 c = 0 add_two_numbers() print(c) ``` **Before you use a global ask yourself, "Can I do this another way?" If answer is yes then you probably should.** ## The main() Function In the C programming language every program has a `main` function. The `main` function is called to start the program and when the `main` function exits the program ends. Python works differently, it just starts executing instructions in your file, starting at the top and ending at the bottom. This is great for small programs but as programs get more complex it's a problem because all of the variables in your program are global variables. **Best practice Python programs should not use global variables and do not have code outside of a function.** Such programs should use a template similar to the one below: ``` """ CIS-15 Program with Functions Mike Matera """ # It's always okay to have import lines at top-level import sys def main(): """All code goes inside of a function.""" print(f'Hello, my name is {sys.argv[0]}') # These two lines call the main() function when your program # is run from the command line. More on this later! if __name__ == '__main__': main() ``` <div class="alert alert-info"><strong>WARNING: Functions in Functions</strong> In Python it's posible to define a function *inside* of another function. **DO NOT DEFINE A FUNCTION INSIDE OF ANOTHER FUNCTION** unless you know what you're doing. All of your functions should begin all the way to the left of the page. </div> --- # **Loops** **For Loops** The `for` Loop In this lesson I introduce simple syntax with powerful applications. Earlier, you learned how to create and maniuplate lists. What if you wanted to run some code *for each* item in a list? To do that you need a `for` loop. Here's an example: ```python animals = ['Lions', 'Tigers', 'Bears'] for item in animals: print(item) print('Oh my!') ``` Notice that there's a list `animals`. The `print` statement inside of the the `for` loop is executed once for each element in the `animals` list. Enter it into the next cell: ``` ``` Again, notice: * The `print()` statement *inside* the for loop is run once for each item in the list * The items in the list are assigned to the variable `item` one by one The most powerful thing about Python's `for` loops is the idea that a sequence goes on the right of the `in` keyword. ## Sequences A sequence is anything that can be stepped through one element at a time. In Python many data types can be treated like sequences. In the last lesson we learned about lists which are naturally sequences but that's not the first sequence type you have learned. Here are some examples: Strings are a sequence of letters: ```python sentence = "Mary had a little lamb." for letter in sentence: print(letter) ``` File handles are a sequence of lines: ```python file_handle = open('files/example.txt') for line in file_handle: print(line.strip()) file_handle.close() ``` Try the example for loops in the next cell: ``` ``` ### Numerical Sequences with range() If you've programmed in another language you've probably seen the C-style `for` loop. If not, no big deal! In many other languages the `for` loop is for counting numbers and the syntax enables you to make numerical sequences (e.g. 0, 1, 2, ...) but not "step through" things easily. Counting numbers is important sometimes and Python gives you a way to do that. For example: ```python for number in range(10): print(number) ``` Try it in the next cell: ``` for number in range(10): print(number) ``` **What numbers does the list produce?** The `range` function can produce more complicated sequences. If you give `range` two numbers it starts at the first and ends one before the second. For example: ```python for number in range(5, 10): print(number) ``` Try changing the previous cell to the two-argument version of `range`. ## Loop Variables Here's a pop quiz! What is the difference between the output of these three `for` loops: ```python for item in animals: print(item) for animal in animals: print(animal) for duck in animals: print(duck) ``` Not sure? Try them out: ``` ``` *The following is a short test of that concept with a list of animals* animals = ["frog", "cow", "horse", "goat", "professor"] ``` animals = ["frog", "cow", "horse", "goat", "professor"] print(type(animals)) for animal in animals: print(animal) ``` **Answer: NOTHING!** The difference in the `for` loops is the name of the loop variable. There is nothing special about the name of the loop variable, it's simply a variable that the `for` loop creates for you to hold individual items. It's up to you to choose a name that suits your program. It's best to chose a name that will help you understand what's in the variable. Here's a better question: What is the most appropriate variable name from the loops above? 1. `item` 1. `animal` 1. `duck` ## Algorithms With `for` The `for` loop has simple syntax with powerful implications. If you're learning to program for the first time it's important for you to be able to step through the `for` loop one *iteration* at a time as a way to understand what it does. The debugger is really great for this! Here's an example with each iteration of the loop spelled out in a table: This `for` loops calculates the sum of every number in a list: ```python numbers = [1, 23, 46, 92, 9, 12, 5] total = 0 for number in numbers: total = total + number print(total) ``` Let's look at the value of `number` and `total` step-by-step: | Step # | `number` | `total` | | --- | --- | --- | | 0 | - | 0 | | 1 | 1 | 1 | | 2 | 23 | 24 | | 3 | 46 | 70 | | 4 | 92 | 162 | | 5 | 9 | 171 | | 6 | 12 | 183 | | 7 | 5 | 188 | Now enter the code and confirm those steps in the debugger: ``` ``` ### Modify the List The loop variable is a *copy* of the list element so changes in the loop variable do not affect the contents of the list. Here's an example `for` loop the changes the list by doubling each element in the list: ```python numbers = [1, 23, 46, 92, 9, 12, 5] for index in range(len(numbers)): numbers[index] = numbers[index] * 2 print(numbers) ``` Notice the following: 1. The loop uses `range` to get numbers. 1. `range` is given `len(numbers)`, the length of the list as an argument. 1. Inside the `for` loop I use the `index` variable to get elements of the list. Simulating the loop in a table we get: | Step # | `index` | `numbers[index]` before -> after | | --- | --- | --- | | 1 | 0 | 1 -> 2 | | 2 | 1 | 23 -> 46 | | 3 | 23 | 46 -> 92 | | 4 | 46 | 92 -> 184 | | 5 | 92 | 9 -> 18 | | 6 | 9 | 12 -> 24 | | 7 | 12 | 5 -> 10 | Enter the `for` loop example and confirm these steps in the debugger: ``` ``` ### Filtering Any legal Python statement can be inside of a `for` loop. Importantly you can make a decision inside of a `for` loop that filters elements. Here's an example of an algorithm that prints some elements in the list but not others: ```python numbers = [1, 23, 46, 92, 9, 12, 5] for number in numbers: if number < 20: print(number) ``` Notice that the `if` is inside the `for`. Simulating the loop in a table gives us: | Step # | `number` | `number < 20` | Action | | --- | --- | --- | --- | | 1 | 1 | `True` | `print` | | 2 | 23 | `False` | - | | 3 | 46 | `False` | - | | 4 | 92 | `False` | - | | 5 | 9 | `True` | `print` | | 6 | 12 | `True` | `print` | | 7 | 5 | `True` | `print` | Now enter the example and use the debugger to confirm the table: ``` ``` ### Searching the List Sometimes you want to find something in the list. Of course, this is conveniently done with the `in` operator in Python but there are more complicated cases that require a `for` loop. An algorithm that searches a sequence shold stop when it has found what it's looking for. The `break` statement stops a for loop from executing, leaving any remaining items untouched. Here's an algorithm that searches for a number in the list: ```python numbers = [1, 23, 46, 92, 9, 12, 5] for number in numbers: if number == 92: print("I found it!") break ``` Running `break` stops the loop early. Here's a simulation: | Step # | `number` | `number == 92` | Action | | --- | --- | --- | --- | | 1 | 1 | `False` | - | | 2 | 23 | `False` | - | | 3 | 46 | `False` | - | | 4 | 92 | `True` | Print and break | Enter the example and confirm the simulation in the debugger: ``` ``` <div class="alert alert-info"><strong>Note: Programming Style</strong> <p>The `break` statement is controversial. Some people discourage its use because they believe that it makes code *less readable*, or harder to simulate in your head. As you gain experience you'll be able to judge for yourself how readable and clear code is. Every statement has a time and a place. </div> **What about a function that does the same thing as `in`?** Let's move our filter code into a function called `find_in`. The function returns `True` if the item is in the list and `False` otherwise. * Name: `find_in` * Arguments: * number (integer) - The number to search for. * items (list of integer) - The list to search in. * Returns: (bool) `True` if `number` is found in `items` otherwise `False`. Here's the function: ```python def find_in(number, items): for item in items: if item == number: return True return False ``` Here's how to call the function on our list: ```python numbers = [1, 23, 46, 92, 9, 12, 5] if find_in(92, numbers): print("I found it!") ``` Enter the function and corresponding call into the next cell: ``` ``` **While Loops** The `while` Loop The `while` loop is known as the *indefinite* loop. You use it when you *can't know* how many times you need to go through the loop. The `while` loop continues looping while the *loop condidtion* is `True` and exits the loop when the condition is `False`. The `while` loop is less common than the `for` loop but still essential for most programs. Let's revisit our guessing game. In this version of the game we want the user to keep guessing until they get the right answer. There's no way to know how many guesses it will take them. ```python guess = 0 while guess != 34: guess = int(input("Pick a number between 1 and 100: ")) print("You got it!") ``` Try entering the code into the next cell: ``` ``` ## Input Validation The loop in the previous cell asks for input until the user provides the correct value. This is a useful behavior when you want to *validate* the input a user gives you. Here's an example, here's a program that asks for a number between 1 and 100 and forces the user to type in something correct. ``` def input_number(): number = -1 while not 1 <= number <= 100: number = int(input("Enter an integer between 1 and 100: ")) return number n = input_number() print("Thank you for typing:", n) ``` ## Infinite Loops `while` loops are a bit more hazardous than `for` loops because a programming mistake can make them *infinite* loops. Getting stuck in an infinite loop is one of the ways a program can crash and become unresponsive. So more care is needed when you program a `while` loop. Somtimes `while` loops are written as infinite loops with a `break` or `return` statement that terminates the loop. Here's the input validation function again: ``` def input_number(): while True: number = int(input("Enter an integer between 1 and 100: ")) if 1 <= number <= 100: return number n = input_number() print("Thank you for typing:", n) ``` **Does the difference between the two `input_number` functions seem like a big deal?** Probably not. Here's a practice problem. Try writing it both ways: Write a function called `number_between` that takes two arguments, a `minimum` and a `maximum` number. The function asks the user for input with the `input` function and returns a number between `minimum` and `maximum`. If the user types an invalid number the function keeps asking for a valid one. - Function: `number_between` - Argumets: - `minimum`: (int) The minimum number (inclusive) - `maximum`: (int) The maximum number (inclusive) - Returns: (int) an integer between `minimum` and `maximum` ``` ``` --- # **Conditionals** **Logic and Decisions** Conditional logic and evaluation of Boolean expressions are how programs make decisions and how you can write programs that respond to input in by running some statements and not others. ## Making Decisions with `if` What if you wanted your code to do different things based on user input? For example, suppose you want to play a game that prints different things based on a choice. The `if` statement allows you to conditionally run Python statements. **bold text** The `if` statement conditionally runs instructions inside the body of the if statement. The code is only run if the condition evaluates to `True`. In other words, the `print` function in the picture may or may not run depending on the value of `number`. Here's a practical example of a guessing game: ```python number = int(input("Pick a number between 1 and 100: ")) if number == 34: print("That's right!") print("Game Over") ``` Enter the example code and make sure the indentation matches perfectly! ``` ``` **Notice that you only see "That's right!" when you guess 34.** Try using the debugger and watch execution "hop over" the first `print` statement. The *condition* in the if statement is `number == 34`. When the condition is `True` the statements in the body of the `if` are executed, when the condition is `False` the statements are skipped. You can have as many statements as you like inside an `if` statement, even other `if` statements. ### The `if` Condition The statement that follows the `if` keyword will be evaluated to `True` or `False`. While you're coding remember that the statement you write must be asking a question of some form. In the example above the code is asking "if `number` is equal to `34`?" There are many forms a question can take but they all have one thing in common: They will result in a `True` or `False` value. Here's a few examples of questions: > If `number` is less than or equal to `34` ```python if number <= 34: ``` > If `number` modulus `2` is equal to `0` (in other words if `number` is even)? ```python if (number % 2) == 0: ``` > If `course` is equal to the string `"Python Programming for Everyone"` ```python if course == "Python Programming for Everyone": ``` > If `course` has the word `Python` anywhere in it ```python if "Python" in course: ``` While your working on a program rembmer that you can print the conditional statement that you put in the `if` to see if what you get is `True` or `False`. For example: ```python print(number <= 34) ``` If what you get is not a boolean it probably means you've made a mistake somewhere. ### Nesting `if` Statements A question I get a lot is, "Can I put this inside of an `if` statment?" The answer is **YES!** Absolutely any legal Python statement can go inside of an `if` statement. That's a lot of power. At first it might be a bit confusing. Here's an example of the guessing game that gives you a hint: ```python number = int(input("Pick a number between 1 and 100: ")) if number != 34: if number < 34: print("Too low!") if number > 34: print("Too high!") if number == 34: print("That's right!") print("Game Over") ``` Take a minute to type that version of the game into the next cell: ``` ``` **Try different values and use the debugger.** ## `if` and `else` The `else` statement lets you run code when something is *not* true. An `else` can only be used in combination with an `if`. Together they let you run one alternative no matter what the condition is. When you have and if/else one of the alternatives will **always** be run depending on the condition. Here's an example of the guessing game using an `else`: ```python number = int(input("Pick a number between 1 and 100: ")) if number == 34: print("That's right!") else: print("Guess again.") ``` Notice that no condition follows the `else`. Whatever is in the `else` body is only run when the `if` condition is `False` and so it doesn't need any more information. Enter the example in the next cell: ``` ``` Here's the game again that gives a hint: ```python number = int(input("Pick a number between 1 and 100: ")) if number == 34: print("That's right!") else: if number < 34: print("Too low!") else: print("Too high!") ``` Enter the code into the next cell: ``` ``` **Notice that you don't need to test for `number > 34`. Why?** ## Asking Multiple Questions with `elif` In our guessing game there are really three conditions that we care about: 1. The guess is correct 1. The guess is too low 1. The guess is too high Using `if` and `else` we've been able to simplify the code a bit but there's more we can do. When a program has to respond to a question with more than two alternatives the `elif` statement allows us to ask another question. The `elif` is short for "else if" and must follow an initial `if` statement and will only be run if the initial condition is `False`. Here's the best version of the guessing game so far: ```python number = int(input("Pick a number between 1 and 100: ")) if number == 34: print("That's right!") elif number < 34: print("Too low!") else: print("Too high!") ``` Here are the rules to remember when you construct `if`, `elif`, and `else` statements: 1. They must start with `if` 1. `elif` is optional and you can have as many as you like. 1. `else` is optional and must be at the end. 1. In an `if`, `elif`, `else` structure **only one** alternative will execute. Let's finish off the game program by testing if the user entered a number between 1 and 100 like we asked: ```python number = int(input("Pick a number between 1 and 100: ")) if number == 34: print("That's right!") elif number < 1 or number > 100: print("Bad guess.") elif number < 34: print("Too low!") else: print("Too high!") ``` Enter the example into the next cell. ``` ``` **Order matters!** Try reversing the two `elif` conditions in your code. What happens? ## Logic Computers are machines that are built on their ability to perform logic. Logic functions are the basis of all mathematical computations and is built on three fundamental operations: * `and` * `or` * `not` Logic operations use `True` and `False` as input, rather than numbers. Logic operations are defined by *truth tables*. Truth tables show the output of a logic operation given all combinations of input. The next sections will define the three fundamental logic operations. ### The `and` Operator The `and` operator returns `True` when both inputs are `True`, otherwise it returns `False`. The truth table for `and` is: | Input A | Input B | Output | | - | - | - | | `False` | `False` | `False` | | `False` | `True` | `False` | | `True` | `False` | `False` | | `True` | `True` | `True` | Use the cell below to test out the `and` operator: ```python False and False ``` ``` ``` ### The `or` Operator The `or` operator returns `True` if either input is `True` and `False` when both inputs are `False`. The truth table for `or` is: | Input A | Input B | Output | | - | - | - | | `False` | `False` | `False` | | `False` | `True` | `True` | | `True` | `False` | `True` | | `True` | `True` | `True` | Use the cell below to test out the `or` operator: ```python False or False ``` ``` ``` ### The `not` Operator The `not` operator returns the opposite of its input. Unlike `and` and `or`, `not` only takes a single argument. The truth table for `not` is: | Input | Output | | - | - | | `False` | `True` | | `True` | `False` | Use the cell below to test the `not` operator: ```python not True ``` ``` ``` ### Using Logic Conditional logic operators return `True` or `False` when evaluated in a Boolean expression. Those operations are: * `==` Equals * `<` Less than * `<=` Less than or equal to * `>` Greater than * `>=` Greater than or equal to * `!=` Not equal to The logical operators make it possible to combine operations to make compound questions. For example: ```python order = input("Would you like soup or salad?") if order == 'soup' or order == 'salad': print("Okay!") else: print("Huh?") ``` Enter this program that takes your order: ``` ``` **Do you follow the logic?** ``` ``` --- # **Footnotes:** *Content from this review notebook includes material from Python for Everybody by Charles Severance, and from the course Python for Everyone by Mike Matera (on Github).* ---
github_jupyter
# Toy Example over longer timescales We have a general, simple pipeline as follows: ``` config.py download_CDS.py download_MODIS.py ↓ ↓ ↓ ↓ ↓ ↓ ↓ ↓ ↓ ↓ ↳→→→→→→→→→→→→ clean_all_data.py ←←←←←←←←←↲ ↓ [dataframe: 'cleaned_data.pkl'] ↓ ↓ ML.py ↓ ↓ [ model: trained_model.joblib ] [ dataframe: predictions.pkl ] ``` --- * Global settings are specified in `config.py` * The X data is handled well by `climetlab` and the CDS API. * The Y data is obtained through a `wget` call. * The X and Y data are then brought together month-by-month (`clean_all_data.py`) into a single df via an inner join, after some relabelling of columns, indexing, long1/3 conversions etc. This single df is saved as a pickle and will be the basis of all ML training/predictions. * ML trainining, hyperparameter optimisation and prediciton is performed in `ML.py` --- ``` import pandas as pd data_root = '/network/group/aopp/predict/TIP016_PAXTON_RPSPEEDY/ML4L/' f = 'predictions.pkl' #Load the df of predictions and true values df = pd.read_pickle(data_root+f) #Also create an xr dataset object xr_dataset=xr.Dataset.from_dataframe(df) import matplotlib.pyplot as plt import cartopy.crs as ccrs import xarray as xr import sys def plot_to_subplot(f,ax,y,fixed_scaling): quadmesh = f[y].plot(ax=ax, cmap='jet', transform=ccrs.PlateCarree(), cbar_kwargs={'orientation':'vertical','shrink':0.6, 'aspect':40}) ax.coastlines() if fixed_scaling: quadmesh.set_clim(vmin=10000, vmax=17000) def plot_month_map(f,i): #Plot the predictions, the truth and the relative error for the ith month as a map #i=1 is first month, etc. #Time slice fmonth = f.isel(time=i-1) #Set up figure and axes h,w = 24,12 fig = plt.figure(figsize=(h,w)) ax1 = plt.subplot2grid((3, 2),(0,0),projection=ccrs.PlateCarree(central_longitude=0)) ax2 = plt.subplot2grid((3, 2),(0,1),sharey=ax1,projection=ccrs.PlateCarree(central_longitude=0)) ax3 = plt.subplot2grid((3, 2),(1,0),colspan=2,rowspan=2,projection=ccrs.PlateCarree(central_longitude=0)) #Plot it plot_to_subplot(fmonth,ax1,'LST_Day_CMG',True) plot_to_subplot(fmonth,ax2,'predictions',True) plot_to_subplot(fmonth,ax3,'relative_error',False) plt.subplots_adjust(wspace=0.1, hspace=0.1) def get_errors(df): max_error = np.max(abs(df['relative_error'])) min_error = np.min(abs(df['relative_error'])) mean_error = np.mean(abs(df['relative_error'])) return max_error, min_error, mean_error def evaluate_month(df,i): times_index = np.unique(df.index.get_level_values('time').values) df_month = df.loc[(slice(None), slice(None),times_index[i-1]), :] max_error, min_error, mean_error = get_errors(df_month) print (times_index[i-1], min_error, max_error, mean_error) ``` We can grab how good the predicions were for a single month: ``` i = 1 plot_month_map(xr_dataset,i) evaluate_month(df,i) ``` And can grab the overall error over the entire prediction set (max/min/mean): ``` get_errors(df) ``` # Questions * What features do we have access to when actually running our models? * What about warm vs control climates? Are there values in warm climates that are not observed in warm climates? i.e. does our training data cover the whole parameter space? * How accurate do we need to be? Mean error vs worst case error?
github_jupyter
``` import os import json import numpy as np from pymedphys.level1.msqconnect import mosaiq_connect from pymedphys.level1.configutilities import get_index, get_data_directory from pymedphys.level2.msqdelivery import multi_fetch_and_verify_mosaiq from pymedphys.level4.comparebygantry import ( group_consecutive_logfiles, assert_array_agreement, get_gantry_tolerance, get_logfile_delivery_data_bygantry, get_logfile_mu_density_bygantry, get_mosaiq_delivery_data_bygantry, get_mosaiq_mu_density_bygantry, get_comparison_results, get_mappings, get_comparisons_byfield ) with open('../config.json') as config_file: config = json.load(config_file) data_directory = get_data_directory(config) cache_filepath = os.path.join(data_directory, 'cache', 'dmlc_comparison.json') cache_scratch_filepath = os.path.join(data_directory, 'cache', 'dmlc_comparison_scratch.json') with open(cache_filepath, 'r') as cache_file: cache = json.load(cache_file) index = get_index(config) file_hashes = np.array(list(index.keys())) field_types = np.array([ index[file_hash]['delivery_details']['field_type'] for file_hash in file_hashes ]) file_hashes = file_hashes[field_types == 'DMLC'] is_qa = np.array([ index[file_hash]['delivery_details']['qa_mode'] for file_hash in file_hashes ]) file_hashes = file_hashes[np.invert(is_qa)] machine = np.array([ index[file_hash]['logfile_header']['machine'] for file_hash in file_hashes ]) # limit to RCCC for now file_hashes = file_hashes[(machine == '2619') | (machine == '2694')] np.random.shuffle(file_hashes) # might need course information index[file_hashes[0]] patient_grouped_fields, field_id_grouped_hashes = get_mappings(index, file_hashes) patient_ids = list(patient_grouped_fields.keys()) patient_id = patient_ids[0] field_ids = patient_grouped_fields[patient_id] patient_id # for field_id in field_ids: # print(field_id_grouped_hashes[field_id]) def get_comparisons_byfield(index, config, field_ids, field_id_grouped_hashes): mosaiq_delivery_data_byfield = dict() with mosaiq_connect('msqsql') as cursor: for field_id in field_ids: mosaiq_delivery_data_byfield[field_id] = multi_fetch_and_verify_mosaiq( cursor, field_id) comparisons_byfield = dict() for field_id in field_ids: keys = np.array(field_id_grouped_hashes[field_id]) logfile_groups = group_consecutive_logfiles(keys, index) logfile_groups = [ tuple(group) for group in logfile_groups ] mosaiq_delivery_data = mosaiq_delivery_data_byfield[field_id] mosaiq_gantry_angles = np.unique(mosaiq_delivery_data.gantry) logfile_delivery_data_bygantry = get_logfile_delivery_data_bygantry( index, config, logfile_groups, mosaiq_gantry_angles) logfile_mu_density_bygantry = get_logfile_mu_density_bygantry( logfile_groups, mosaiq_gantry_angles, logfile_delivery_data_bygantry) mosaiq_delivery_data_bygantry = get_mosaiq_delivery_data_bygantry( mosaiq_delivery_data) mosaiq_mu_density_bygantry = get_mosaiq_mu_density_bygantry( mosaiq_delivery_data_bygantry) comparison_results = get_comparison_results( mosaiq_mu_density_bygantry, logfile_mu_density_bygantry) comparisons_byfield[field_id] = comparison_results return comparisons_byfield comparisons_byfield = get_comparisons_byfield(index, config, field_ids, field_id_grouped_hashes) comparisons_byfield ```
github_jupyter
<a href="https://colab.research.google.com/github/phenix-project/Colabs/blob/main/alphafold2/AlphaFoldWithDensityMap.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ### <center> <b> <font color='black'> AlphaFold with a density map </font></b> </center> <font color='green'>This notebook integrates Phenix model rebuilding with AlphaFold to improve AlphaFold modeling. You upload a sequence and a density map (ccp4/mrc format) and it carries out cycles of AlphaFold modeling, rebuilding with the density map, and AlphaFold modeling with the rebuilt model as a template. In each cycle you get a new AlphaFold model and a rebuilt model. To run this notebook you will need a Google account. Most of the demos can be run with a free account; to run with long protein chains or to run faster you can use a Colab Pro or Pro+ account. To understand how this all works see the Phenix tutorial video ["AlphaFold changes everything"](https://youtu.be/9IExeA_A8Xs) and the [BioRxiv preprint](https://www.biorxiv.org/content/10.1101/2022.01.07.475350v2) on using AlphaFold with a density map. You can run a demo of any one of 25 structures by selecting one in the second cell. You then only need to select the demo and enter the Phenix download password. This notebook is derived from [ColabFold](https://colab.research.google.com/github/sokrypton/ColabFold/blob/main/AlphaFold2.ipynb) and the DeepMind [AlphaFold2 Colab](https://colab.research.google.com/github/deepmind/alphafold/blob/main/notebooks/AlphaFold.ipynb). ----------------- <b> <font color='black'> <center>Instructions for a simple run: </b><p><i>Note: If this is the first time you’re running this notebook, be sure to check out the helpful hints at the bottom of the page!</center> </p></i></font> 1. Run the first cell to install condacolab and reboot the virtual machine. You need to do this <b><i>before</i></b> using <b><i>Run all</i></b> in step 3. 2. While the virtual machine is rebooting, select the "Basic Inputs" cell, type in a Phenix download password, and either (A) select a demo or (B) paste in a sequence, resolution and jobname. You can also edit the Options in the next cell if you want. 3. If you are not running a demo, open Google Drive in a new browser window, make a new folder called <b><i>ColabInputs</i></b>, and upload your map file (CCP4/MRC) there. Be sure the name of your map file starts with the jobname. You can alternatively upload your map file directly by making the input_directory field blank in the Options cell. 4. Start your run by going up to the <b><i>Runtime</i></b> pulldown menu and selecting <b><i>Run all</i></b>. 5. Scroll down the page and follow what is going on. If necessary, upload your map file when the Upload button appears below the "Setting up input files" form. If you use Google drive for your input and output files you will be asked for permission. ----------------- <b> <font color='black'> <center>Please cite the ColabFold and AlphaFold2 papers if you use this notebook:</center> </font></b> - <font color='green'>[Mirdita, M., Ovchinnikov, S., Steinegger, M.(2021). ColabFold - Making protein folding accessible to all *bioRxiv*, 2021.08.15.456425](https://www.biorxiv.org/content/10.1101/2021.08.15.456425v2)</font> - <font color='green'> [Jumper, J., Evans, R., Pritzel, A. et al. Highly accurate protein structure prediction with AlphaFold. Nature 596, 583–589 (2021)](https://www.nature.com/articles/s41586-021-03819-2) </font> ----------------- ``` #@title 1. Hit the triangle <b>Run</b> button to the left to install condacolab and reboot the virtual machine. #@markdown You can edit the forms below while it is rebooting. #@markdown You may get questions about this notebook not being authored by Google and about needing a high-RAM environment. Just click OK to go on. #@markdown In 30 sec you get 3 black pop-up messages in the lower left corner of window about a crash (because of the reboot). #@markdown Close the last one and you are ready to go with <b><i>Runtime</i></b> / <b><i>Run all</i></b> once you have entered all your options. #@markdown <i>Normally leave box below at Standard</i> custom_update = 'Standard' #@param {type:'string'} ['None','Standard','Latest'] if custom_update == 'None': custom_update = None # Get the helper python files import os os.chdir("/content/") file_name = 'phenix_colab_utils.py' if os.path.isfile(file_name): os.remove(file_name) os.environ['file_name'] = file_name result = os.system("wget -qnc https://raw.githubusercontent.com/phenix-project/Colabs/main/alphafold2/$file_name") print("About to install condacolab...") import phenix_colab_utils as cu cu.get_helper_files(custom_update = custom_update) if custom_update: os.system("touch NEED_UPDATES") elif os.path.isfile("NEED_UPDATES"): os.remove("NEED_UPDATES") # get all the other helper files cu.clear_python_caches() cu.install_condacolab() !touch STEP_1 print("Ready with condacolab installed...close the last of 3 crash messages in lower left corner when it comes up") #@title 2. Basic inputs (Required) import os if not os.path.isfile("STEP_1"): raise AssertionError("Please run step 1 first") os.chdir("/content") from phenix_colab_utils import exit, get_map_name, get_demo_info, set_up_demo, make_four_char_name if not os.path.isfile("STEP_1"): exit("Please run step 1 first...") phenix_download_password='' #@param {type:"string"} #@markdown <b><i>Demo run</i></b>: Select the structure to predict. <i>(Any text entered in the <b>Normal run</b> section will be ignored if you select a demo</i>). demo_to_run = 'None (Demos with boxed maps, approx timings are for Colab; Pro is 2x faster, Pro+ 3x)' #@param ['None (Demos with boxed maps, approx timings are for Colab; Pro is 2x faster, Pro+ 3x)', '7mjs (EMDB 23883, 3.03 A, 132 residues, 2 hours, RMSD (A) Start: 3.2 End: 1.0)', '7lx5 (EMDB 23566, 3.44 A, 196 residues, 4 hours, RMSD (A) Start: 2.8 End: 2.2)', '7c2k (EMDB 30275, 2.93 A, 927 residues, 10 hours, RMSD (A) Start: 4.1 End: 1.8 NOTE: requires Colab Pro)', '7ev9 (EMDB 31325, 2.6 A, 382 residues, 5 hours, RMSD (A) Start: 2.0 End: 0.6)', '7kzz (EMDB 23093, 3.42 A, 281 residues, 3 hours, RMSD (A) Start: 2.7 End: 1.6)', '7mlz (EMDB 23914, 3.71 A, 196 residues, 4 hours, RMSD (A) Start: 3.1 End: 2.4)', '7l6u (EMDB 23208, 3.3 A, 311 residues, 10 hours, RMSD (A) Start: 2.5 End: 1.9)', '7mby (EMDB 23750, 2.44 A, 339 residues, 15 hours, RMSD (A) Start: 1.7 End: 0.6)', '7me0 (EMDB 23786, 2.48 A, 347 residues, 3 hours, RMSD (A) Start: 1.3 End: 0.3)', '7ls5 (EMDB 23502, 2.74 A, 243 residues, 10 hours, RMSD (A) Start: 1.0 End: 0.7)', '7n8i (EMDB 24237, 3 A, 106 residues, 1 hour, RMSD (A) Start: 0.4 End: 0.2)', '7lc6 (EMDB 23269, 3.7 A, 557 residues, 2 hours, RMSD (A) Start: 0.8 End: 0.6)', '7brm (EMDB 30160, 3.6 A, 257 residues, 5 hours, RMSD (A) Start: 4.2 End: 3.6)', '7lci (EMDB 23274, 2.9 A, 393 residues, 9 hours, RMSD (A) Start: 4.6 End: 3.0)', '7eda (EMDB 31062, 2.78 A, 334 residues, 4 hours, RMSD (A) Start: 4.0 End: 3.6)', '7l1k (EMDB 23110, 3.16 A, 149 residues, 2 hours, RMSD (A) Start: 0.6 End: 0.5)', '7m9c (EMDB 23723, 4.2 A, 257 residues, 3 hours, RMSD (A) Start: 1.4 End: 1.2)', '7lvr (EMDB 23541, 2.9 A, 441 residues, 12 hours, RMSD (A) Start: 1.1 End: 0.9)', '7rb9 (EMDB 24400, 3.76 A, 372 residues, 2 hours, RMSD (A) Start: 1.5 End: 1.4)', '7m7b (EMDB 23709, 2.95 A, 209 residues, 6 hours, RMSD (A) Start: 6.5 End: 6.2)', '7lsx (EMDB 23508, 3.61 A, 245 residues, 10 hours, RMSD (A) Start: 1.4 End: 1.5)', '7bxt (EMDB 30237, 4.2 A, 103 residues, 3 hours, RMSD (A) Start: 1.4 End: 2.2)', '7ku7 (EMDB 23035, 3.4 A, 269 residues, 6 hours, RMSD (A) Start: 18.5 End: 18.4)', '7lv9 (EMDB 23530, 4.5 A, 97 residues, RMSD (A) Start: 15.5 End: 15.5)', '7msw (EMDB 23970, 3.76 A, 635 residues, RMSD (A) Start: 27.0 End: 27.0)'] {type:"string"} if demo_to_run.split()[0] != "None": jobname, sequence, resolution = set_up_demo(demo_to_run) is_demo = True else: is_demo = False #@markdown <b><i>Normal run</i></b>: enter sequence of chain to predict (at least 20 residues), #@markdown resolution, name of this job. Upload your map file to a folder named #@markdown <i>ColabInputs</i> in your Google Drive (or get it ready to upload when a button #@markdown appears in cell 4). if (not is_demo): sequence = '' #@param {type:"string"} resolution = '' #@param {type:"string"} jobname = '' #@param {type:"string"} if resolution: try: resolution = float(resolution) except Exception as e: exit("Please supply a number for resolution") else: resolution = None query_sequence = sequence password = phenix_download_password # Check for required inputs if not password: exit("Please supply a Phenix download password") if not query_sequence and (not is_demo): exit("Please supply a demo or a query sequence, resolution and jobname") if not resolution and (not is_demo): exit("Please supply a demo or a query sequence, resolution and jobname") if not jobname and (not is_demo): exit("Please supply a demo or a query sequence, resolution and jobname") # Make jobname have the expected format orig_jobname = jobname jobname = make_four_char_name(jobname) if orig_jobname != jobname: print("Changing jobname to '%s' to match required format" %(jobname)) if (is_demo): print("\nRunning demo for %s" %(jobname)) print("\nJOBNAME:", jobname) print("RESOLUTION:",resolution) print("SEQUENCE:",query_sequence) # Save all parameters in a dictionary params = {} for p in ['resolution','jobname', 'password', 'query_sequence']: params[p] = locals().get(p,None) ! touch STEP_2 ! rm -f STEP_3 #@title 3. Options import os if not os.path.isfile("STEP_1"): raise AssertionError("Please run step 1 first") from phenix_colab_utils import exit if not os.path.isfile("STEP_2") or \ not 'is_demo' in locals().keys(): exit("Please run step 2 first...") #@markdown <b> A. Commonly-used options </b> #@markdown Include templates from the PDB (a good idea if there are similar proteins in the PDB) include_templates_from_pdb = False #@param {type:"boolean" } #@markdown Input directory containing your map and any models (usually <b>ColabInputs</b> on your Google drive). Leave blank to upload directly. Skip parts of the file name like /content/ or MyDrive/). Must be set to <b>ColabInputs</b> if demo is run. input_directory = "ColabInputs" #@param {type:"string"} if is_demo and input_directory != "ColabInputs": exit("For a demo the input_directory must be ColabInputs") #@markdown Maximum number of AlphaFold models to create on first cycle (best model chosen by plDDT, stops generating models if all have similar plDDT) maximum_number_of_models = 50#@param {type:"integer"} random_seed_iterations = maximum_number_of_models #@markdown Save outputs to the directory <b>ColabOutputs</b> on Google drive save_outputs_in_google_drive = True #@param {type:"boolean" } #@markdown Carry on from where you left off (requires saving outputs in Google drive with checkbox above on initial run) carry_on = False #@param {type: "boolean"} #@markdown <b> B. Advanced options</b> #@markdown Maximum cycles to run (fewer may be run if little change in models between cycles) maximum_cycles = 10#@param {type:"integer"} #@markdown Maximum number of templates from PDB to include (takes 1 minute per template): maximum_templates_from_pdb = 20#@param {type:"integer"} #@markdown Upload additional templates directly (read from input_directory if specified) upload_manual_templates = False #@param {type:"boolean" } #@markdown Upload MSA file (a3m format) directly (read from input_directory if specified) upload_msa_file = False #@param {type:"boolean" } #@markdown Specify if uploaded templates are already placed in the map and to be only used as suggestions for rebuilding and not as AlphaFold templates</font></i></b> uploaded_templates_are_fragment_suggestions = False #@param {type:"boolean" } uploaded_templates_are_map_to_model = uploaded_templates_are_fragment_suggestions #@markdown Specify how to use multiple sequence alignment information (you might use it only on first cycle to force AlphaFold to follow rebuilt models) msa_use = 'Use MSA throughout' #@param ["Use MSA throughout", "Use MSA in first cycle","Skip all MSA"] # Set actual parameters #@markdown Version of Phenix to use phenix_version ='dev-4536' #@param {type:"string"} version = phenix_version # rename variable #@markdown Random seed random_seed = 581867 #@param {type:"integer"} #@markdown Specify if you want to run a series of jobs by uploading a file with one jobname, resolution and sequence per line</i></b> upload_file_with_jobname_resolution_sequence_lines = False #@param {type:"boolean"} if is_demo and upload_file_with_jobname_resolution_sequence_lines: exit("For a demo upload_file_with_jobname_resolution_sequence_lines must be False") #@markdown Turn on debugging</i></b> debug = False #@param {type:"boolean"} # We are going to get these from uploaded file... if upload_file_with_jobname_resolution_sequence_lines: params['jobname'] = None params['resolution'] = None params['sequence'] = None if msa_use == "Use MSA throughout": skip_all_msa = False skip_all_msa_after_first_cycle = False elif msa_use == "Use MSA in first cycle": skip_all_msa = False skip_all_msa_after_first_cycle = True else: skip_all_msa = True skip_all_msa_after_first_cycle = True upload_maps = True # This version expects a map use_msa = (not skip_all_msa) minimum_random_seed_iterations = int(max(1,random_seed_iterations//20)) data_dir = "/content" content_dir = "/content" # Save parameters for p in ['content_dir','data_dir','save_outputs_in_google_drive', 'input_directory','working_directory', 'include_templates_from_pdb','maximum_templates_from_pdb', 'upload_msa_file', 'upload_manual_templates','uploaded_templates_are_map_to_model', 'maximum_cycles','version', 'upload_file_with_jobname_resolution_sequence_lines', 'use_msa','skip_all_msa_after_first_cycle', 'upload_maps','debug','carry_on','random_seed', 'random_seed_iterations','minimum_random_seed_iterations']: params[p] = locals().get(p,None) !touch STEP_3 #@title 4. Setting up input files... #@markdown You will be asked for permission to use your Google drive if needed. #@markdown The upload button will appear below this cell if needed import os if not os.path.isfile("STEP_1"): raise AssertionError("Please run step 1 first") if not os.path.isfile("STEP_3"): from phenix_colab_utils import exit exit("Please run steps 2-3 again before rerunning step 4...") # Set up the inputs using the helper python files from phenix_alphafold_utils import set_up_input_files params = set_up_input_files(params, convert_to_params = False) ! touch STEP_4 ! rm -f STEP_2 STEP_3 #@title 5. Installing Phenix, Alphafold and utilities... #@markdown This step takes 8 minutes import os if not os.path.isfile("STEP_1"): raise AssertionError("Please run step 1 first") from phenix_colab_utils import exit if not os.path.isfile("STEP_4"): exit("Please run steps 1-4 first...") import phenix_colab_utils as cu # Get tensorflow import before installation if not locals().get('tf'): tf = cu.import_tensorflow() # Install selected software cu.install_software( bioconda = True, phenix = True, phenix_version = params.get('version'), phenix_password = params.get('password'), alphafold = True, pdb_to_cif = True ) if os.path.isdir("updates") and os.path.isfile("NEED_UPDATES"): from install_updates import install_updates print("Installing updates") install_updates(skip_download = True) !touch STEP_5 #@title 6. Creating AlphaFold models import os if not os.path.isfile("STEP_1"): raise AssertionError("Please run step 1 first") from phenix_colab_utils import exit if not os.path.isfile("STEP_4"): exit("Please run steps 2-4 again before rerunning this step...") if not os.path.isfile("STEP_5"): exit("Please run step 5 first...") ! rm -f STEP_2 STEP_3 STEP_4 # Convert params from dict to alphafold_with_density_map params from phenix_alphafold_utils import get_alphafold_with_density_map_params params = get_alphafold_with_density_map_params(params) from run_alphafold_with_density_map import run_jobs # Working directory os.chdir(params.content_dir) results = run_jobs(params) #@title Utilities (skipped unless checked) # Put whatever utilities you want here. They will be run if checked clear_caches = False #@param {type:"boolean" } if clear_caches: from phenix_colab_utils import clear_python_caches clear_python_caches(modules = ['run_alphafold_with_density_map3','run_job','rebuild_model','install_phenix','run_fix_paths','runsh','mk_mock_template','mk_template','hh_process_seq','run_job','get_template_hit_list','run_alphafold_with_density_map','get_template_hit_list','get_cif_file_list','alphafold_utils','get_msa','get_templates_from_drive','phenix_alphafold_utils','phenix_colab_utils','clear_python_caches']) from phenix_colab_utils import clear_python_caches clear_python_caches() crash_deliberately_and_restart = False #@param {type:"boolean" } if crash_deliberately_and_restart: print("Crashing by using all memory. Results in restart, losing everything") [1]*10**10 upload_helper_files = False #@param {type:"boolean" } def get_helper_files(): import os for file_name in ['phenix_colab_utils.py', 'alphafold_utils.py','run_alphafold_with_density_map.py','phenix_alphafold_utils.py']: if os.path.isfile(file_name): os.remove(file_name) os.environ['file_name'] = file_name result = os.system("wget -qnc https://raw.githubusercontent.com/phenix-project/Colabs/main/alphafold2/$file_name") if upload_helper_files: get_helper_files() remove_everything_and_restart = False #@param {type:"boolean" } if remove_everything_and_restart: !kill -9 -1 auto_reload = False #@param {type:"boolean" } if auto_reload: %load_ext autoreload %autoreload 2 ``` **Helpful hints** **What this Colab notebook is good for** * The purpose of this notebook is to generate an AlphaFold model of a single protein chain that is compatible with a density map showing that chain. * The chain can be of any length between 20 and about 1000 residues. Longer chains could work but may fail due to the GPU memory required. * The density map should be at a resolution of about 4.5 A or better. The density map can be your complete map (perhaps showing many chains), or it can be a map boxed to contain just the chain of interest. If you can box the map that will always make it faster, and it could prevent failure in cases where docking of the predicted model into the map is difficult. **When this notebook will not work** * This notebook will fail if the AlphaFold prediction is of very low confidence (i.e., if few of the residues in the structure have plDDT values over 70). * The notebook will fail if it cannot find the location of the predicted model in the density map, or if the density map differs so much from the model that it cannot find a way to rebuild the model to agree with the map **Password** * Your Phenix download password is the password you get from <a href = "https://phenix-online.org/download" target="_blank"> phenix_online.org/download </a> and that you (or someone from your institution) used to download Phenix. It is updated weekly so you may need to request a new one rather frequently. **Saving your results** * The best way to save your results is to leave the <b>save_outputs_in_google_drive</b> box in <b>Commonly-needed options</b> checked. This way your results are saved in a <b>ColabOutputs</b> folder on your Google drive as they are generated and you can also <b>Carry on</b> if your notebook crashes or times out. * Wehn your job is done a zip file with your results is normally downloaded automatically. Sometimes this doesn't work and you need to download it manually using the folder icon on the left side of the screen. All your files are in a folder named based on your job name. The zip file is in that folder and also in the main folder. All your important files are also on your Google drive in the <b>ColabOutputs</b> folder. **Running a demo** * You can run any one of 25 demo structures by selecting it in the second cell. If you run a demo you only need to select the demo and supply your Phenix download password (no other inputs are necessary and the sequence, resolution and jobname fields are ignored). * These demos are not selected to give good results...they are the same structures used in the paper describing this procedure. Some of them start out with very good AlphaFold predictions and basically nothing happens. A few start out with really poor AlphaFold predictions and again nothing happens. Most of the others start out with a moderately good AlphaFold prediction (average plDDT of 70-80) and then the procedure works pretty well and you get an improved plDDT and an improved model. The ones at the top of the list are likely to work best. You can see the expected results for any of the demos [here](https://phenix-online.org/phenix_data/terwilliger/alphafold_with_density_2022/Demos). * The approximate length of time (on Colab) required to run the demos is listed. These are estimates for the standard Colab. The run time is shorter on Colab Pro and on Colab Pro+. * Colab can crash even on the demos...this usually happens if your session does not have enough memory but there may be bugs in our code that cause crashes as well. If you contact us (use the Phenix GUI to do that) we will try to fix any bugs that you find. * You might want to leave the "save_outputs_in_google_drive" button on in the third cell so that the results are saved there as you go and so you can use the "carry_on" button to run again if it times out or crashes (see next section) * After you run the demo you can download a .zip file with the results from a directory that starts with the name of your demo (for example the 7c2k demo would be in a directory called 7c2k_30275). You can get to the directory with the folder button on the left side of the notebook. Then if you navigate to your .zip file you can hover your cursor over the file name and click on the three dots and select download. * The map file used in the demo will be in the directory "ColabInputs" with a file name starting with the name of your demo. **Carrying on after a timeout or crash** * If you save your results in your Google drive folder <b>ColabOutputs</b> by specifying a Google drive input_directory, you can continue on after a crash. You set up the inputs just as you did on the initial run, but check the <b>carry_on</b> box. You then (usually) go through the whole process again (reboot the virtual machine, then <b>Run all</b>). The notebook will look in your <b>ColabOutputs</b> directory for the files that it is going to create...if it finds them there it will use them instead of creating them again. If you are lucky you may be able to restart without rebooting...you can try by just selecting <b>Run all</b> again and if it runs you are ok. **Sequence format** * Your sequence should contain only the 1-letter code of one protein chain. It can contain spaces if you want. **File names and jobname must match** * Your AlphaFold predictions will be named yyyy_ALPHAFOLD_x.pdb and your rebuilt models yyyy_REBUILT_x.pdb, where yyyy is your jobname and x is the cycle number. * All model file names must start with 4 characters, optionally followed by "_" and more characters, and must end in ".pdb" or ".cif", Valid file names are abcd.pdb, abcd.cif, abcd_other.pdb. Non-valid names are abc.pdb, abcde.cif. * Your jobname must match the beginnings of your map file names and model file names. If your jobname is joba then your map file name must look like: joba_xxx.mrc or joba_yyy.ccp4. Your model file name must look like: joba_mymodel.pdb or joba.cif. This correspondence is used to match map and model files with jobnames. **Options for uploading your map file** * (A) Upload when the Upload button appears at the bottom of the cell after you hit Runtime / Run all in step 3 * (B) Upload in advance to a unique folder in your Google Drive and specify this directory in the entry form. * (C) as in B but upload to a unique new folder in /content/. Note that C requires using the command-line tool at the bottom left of the page to create a new directory like MyFiles, uploading with the upload button near the top left of the page, and moving the uploaded file from /content/my_file.mrc to /content/MyFiles/my_file.mrc. **Uploading a file with all your file information** * To upload a file with a jobname, resolution, and sequence on each line, check ***upload_file_with_jobname_resolution_sequence_lines*** and hit the ***Run*** button to the left of the first cell. * If you upload a file with multiple sequences, each line of the file should have exactly one job name, a space, resolution, and a sequence, like this: 7n8i_24237 2.3 VIWMTQSPSSLSASVGDRVTITCQASQDIRFYLNWYQQKPGKAPKLLISDASNMETGVPSRFSGS 7lvr_23541 3 MRECISIHVGQAGVQIGNACWELYCLEHGIQPDGQMPSDKTIGGGDDSFNTFFSETG **Randomized tries on first cycle** * You can specify how many AlphaFold models to try and build at the start (50 may be a good number unless you have a big structure). Models are scored by plDDT and the highest-scoring one is kept. If all the models have similar plDDT as they are being created the randomization step is discontinued and the best one found is used. **Try turning off MSA's after first cycle** * You can encourage AlphaFold to use your rebuilt templates by specifying skip_all_msa_after_first_cycle. This will just use your template information and intrinsic structural information in AlphaFold for all cycles except the first. **Try including templates from the PDB** * The default is to not include templates from the PDB, but you can often improve your modeling a lot if you do include them. If you know that your structure is similar to some structures in the PDB it is a good idea to include templates from the PDB. You can also choose specific chains from the PDB and upload them yourself and check the "upload_manual_templates" box. **Reproducibility** * The tensorflow and AlphaFold2 code will give different results depending on the GPU that is used and the random seed that you choose. You can see what GPU you have by opening a cell with the '+Code' button and typing: ! nvidia-smi and then running that cell. The GPU type will be listed (like Tesla V100-SXM2). You get a much higher-quality GPU with Colab Pro or Pro+ than with the free version. **Running cells in this Colab notebook** * You can step through this notebook one part at a time by hitting the ***Run*** buttons to the left one at a time. * The cell that is active is indicated by a ***Run*** button that has turned into a black circle with a moving black arc * When execution is done, the ***Run*** button will go back to its original white triangle inside a black circle * You can stop execution of the active cell by hitting its ***Run*** button. It will turn red to indicate it has stopped. * You can rerun any cell any time that nothing is running. That means you can go all the way through, then go back to the first cell and enter another sequence and redo the procedure. * If something goes wrong, the Colab Notebook will print out an error message. Usually this will be something telling you how to change your inputs. You enter your new inputs and hit the ***Run*** button again to carry on. **Possible problems** * The automatic download may not always work. Normally the file download starts when the .zip files are created, but the actual download happens when all the AlphaFold models are completed. You can click on the folder icon to the left of the window and download your jobname.zip file manually. Open and close the file browser to show recently-added files. * Your Colab connection may time out if you go away and leave it, or if you run for a long time (more than an hour). If your connection times out you lose everything that is not yet downloaded. So you might want to download as you go or specify a Google drive input directory. * Google Colab assigns different types of GPUs with varying amount of memory. Some might not have enough memory to predict the structure for a long sequence. **Result zip file contents** 1. Alphafold prediction for each cycle 2. Rebuilt model for each cycle 3. PAE matrix (.jsn) for each cycle 4. PAE and plDDT figures (.png) for each cycle **Colab limitations** * While Colab is free, it is designed for interactive work and not-unlimited memory and GPU usage. It will time-out after a few hours and it may check that you are not a robot at random times. On a time-out you may lose your work. You can increase your allowed time with Colab+ * AlphaFold can crash if it requires too much memory. On a crash you may lose all your work that is not yet downloaded. You can have more memory accessible if you have Colab+. If you are familiar with Colab scripts you can try this [hack](https://towardsdatascience.com/double-your-google-colab-ram-in-10-seconds-using-these-10-characters-efa636e646ff ) with the <b>crash_deliberately_and_restart</b> check-off in the Utilities section to increase your memory allowance. **Description of the plots** * **Number of sequences per position** - Look for at least 30 sequences per position, for best performance, ideally 100 sequences. * **Predicted lDDT per position** - model confidence (out of 100) at each position. The higher the better. * **Predicted Alignment Error** - For homooligomers, this could be a useful metric to assess how confident the model is about the interface. The lower the better. **Updates** - <b> <font color='green'>2022-01-25 Includes integrated rebuilding and AlphaFold2 modeling - <b> <font color='green'>2022-02-18 Includes demos of 25 chains from August 2022 PDB entries and corresponding boxed maps. - <b> <font color='green'>2022-03-01 Allows any number of starting AlphaFold models. **Acknowledgments** - <b> <font color='green'>This notebook is based on the very nice notebook from ColabFold ([Mirdita et al., *bioRxiv*, 2021](https://www.biorxiv.org/content/10.1101/2021.08.15.456425v1), https://github.com/sokrypton/ColabFold)</font></b> - <b><font color='green'>ColabFold is based on AlphaFold2 [(Jumper et al. 2021)](https://www.nature.com/articles/s41586-021-03819-2) </font></b>
github_jupyter
## Classes for callback implementors ``` from fastai.gen_doc.nbdoc import * from fastai.callback import * from fastai.basics import * ``` fastai provides a powerful *callback* system, which is documented on the [`callbacks`](/callbacks.html#callbacks) page; look on that page if you're just looking for how to use existing callbacks. If you want to create your own, you'll need to use the classes discussed below. A key motivation for the callback system is that additional functionality can be entirely implemented in a single callback, so that it's easily read. By using this trick, we will have different methods categorized in different callbacks where we will find clearly stated all the interventions the method makes in training. For instance in the [`LRFinder`](/callbacks.lr_finder.html#LRFinder) callback, on top of running the fit function with exponentially growing LRs, it needs to handle some preparation and clean-up, and all this code can be in the same callback so we know exactly what it is doing and where to look if we need to change something. In addition, it allows our [`fit`](/basic_train.html#fit) function to be very clean and simple, yet still easily extended. So far in implementing a number of recent papers, we haven't yet come across any situation where we had to modify our training loop source code - we've been able to use callbacks every time. ``` show_doc(Callback) ``` To create a new type of callback, you'll need to inherit from this class, and implement one or more methods as required for your purposes. Perhaps the easiest way to get started is to look at the source code for some of the pre-defined fastai callbacks. You might be surprised at how simple they are! For instance, here is the **entire** source code for [`GradientClipping`](/train.html#GradientClipping): ```python @dataclass class GradientClipping(LearnerCallback): clip:float def on_backward_end(self, **kwargs): if self.clip: nn.utils.clip_grad_norm_(self.learn.model.parameters(), self.clip) ``` You generally want your custom callback constructor to take a [`Learner`](/basic_train.html#Learner) parameter, e.g.: ```python @dataclass class MyCallback(Callback): learn:Learner ``` Note that this allows the callback user to just pass your callback name to `callback_fns` when constructing their [`Learner`](/basic_train.html#Learner), since that always passes `self` when constructing callbacks from `callback_fns`. In addition, by passing the learner, this callback will have access to everything: e.g all the inputs/outputs as they are calculated, the losses, and also the data loaders, the optimizer, etc. At any time: - Changing self.learn.data.train_dl or self.data.valid_dl will change them inside the fit function (we just need to pass the [`DataBunch`](/basic_data.html#DataBunch) object to the fit function and not data.train_dl/data.valid_dl) - Changing self.learn.opt.opt (We have an [`OptimWrapper`](/callback.html#OptimWrapper) on top of the actual optimizer) will change it inside the fit function. - Changing self.learn.data or self.learn.opt directly WILL NOT change the data or the optimizer inside the fit function. In any of the callbacks you can unpack in the kwargs: - `n_epochs`, contains the number of epochs the training will take in total - `epoch`, contains the number of the current - `iteration`, contains the number of iterations done since the beginning of training - `num_batch`, contains the number of the batch we're at in the dataloader - `last_input`, contains the last input that got through the model (eventually updated by a callback) - `last_target`, contains the last target that got through the model (eventually updated by a callback) - `last_output`, contains the last output spitted by the model (eventually updated by a callback) - `last_loss`, contains the last loss computed (eventually updated by a callback) - `smooth_loss`, contains the smoothed version of the loss - `last_metrics`, contains the last validation loss and metrics computed - `pbar`, the progress bar - [`train`](/train.html#train), flag to know if we're in training mode or not - `stop_training`, that will stop the training at the end of the current epoch if True - `stop_epoch`, that will break the current epoch loop - `skip_step`, that will skip the next optimizer step - `skip_zero`, that will skip the next zero grad When returning a dictionary with those key names, the state of the [`CallbackHandler`](/callback.html#CallbackHandler) will be updated with any of those changes, so in any [`Callback`](/callback.html#Callback), you can change those values. ### Methods your subclass can implement All of these methods are optional; your subclass can handle as many or as few as you require. ``` show_doc(Callback.on_train_begin) ``` Here we can initiliaze anything we need. The optimizer has now been initialized. We can change any hyper-parameters by typing, for instance: ``` self.opt.lr = new_lr self.opt.mom = new_mom self.opt.wd = new_wd self.opt.beta = new_beta ``` ``` show_doc(Callback.on_epoch_begin) ``` This is not technically required since we have `on_train_begin` for epoch 0 and `on_epoch_end` for all the other epochs, yet it makes writing code that needs to be done at the beginning of every epoch easy and more readable. ``` show_doc(Callback.on_batch_begin) ``` Here is the perfect place to prepare everything before the model is called. Example: change the values of the hyperparameters (if we don't do it on_batch_end instead) At the end of that event `xb`,`yb` will be set to `last_input`, `last_target` of the state of the [`CallbackHandler`](/callback.html#CallbackHandler). ``` show_doc(Callback.on_loss_begin) ``` Here is the place to run some code that needs to be executed after the output has been computed but before the loss computation. Example: putting the output back in FP32 when training in mixed precision. At the end of that event the output will be set to `last_output` of the state of the [`CallbackHandler`](/callback.html#CallbackHandler). ``` show_doc(Callback.on_backward_begin) ``` Here is the place to run some code that needs to be executed after the loss has been computed but before the gradient computation. Example: `reg_fn` in RNNs. At the end of that event the output will be set to `last_loss` of the state of the [`CallbackHandler`](/callback.html#CallbackHandler). ``` show_doc(Callback.on_backward_end) ``` Here is the place to run some code that needs to be executed after the gradients have been computed but before the optimizer is called. If `skip_step` is `True` at the end of this event, the optimizer step is skipped. ``` show_doc(Callback.on_step_end) ``` Here is the place to run some code that needs to be executed after the optimizer step but before the gradients are zeroed. If `skip_zero` is `True` at the end of this event, the gradients are not zeroed. ``` show_doc(Callback.on_batch_end) ``` Here is the place to run some code that needs to be executed after a batch is fully done. Example: change the values of the hyperparameters (if we don't do it on_batch_begin instead) If `end_epoch` is `True` at the end of this event, the current epoch is interrupted (example: lr_finder stops the training when the loss explodes). ``` show_doc(Callback.on_epoch_end) ``` Here is the place to run some code that needs to be executed at the end of an epoch. Example: Save the model if we have a new best validation loss/metric. If `end_training` is `True` at the end of this event, the training stops (example: early stopping). ``` show_doc(Callback.on_train_end) ``` Here is the place to tidy everything. It's always executed even if there was an error during the training loop, and has an extra kwarg named exception to check if there was an exception or not. Examples: save log_files, load best model found during training ``` show_doc(Callback.get_state) ``` This is used internally when trying to export a [`Learner`](/basic_train.html#Learner). You won't need to subclass this function but you can add attribute names to the lists `exclude` or `not_min`of the [`Callback`](/callback.html#Callback) you are designing. Attributes in `exclude` are never saved, attributes in `not_min` only if `minimal=False`. ## Annealing functions The following functions provide different annealing schedules. You probably won't need to call them directly, but would instead use them as part of a callback. Here's what each one looks like: ``` annealings = "NO LINEAR COS EXP POLY".split() fns = [annealing_no, annealing_linear, annealing_cos, annealing_exp, annealing_poly(0.8)] for fn, t in zip(fns, annealings): plt.plot(np.arange(0, 100), [fn(2, 1e-2, o) for o in np.linspace(0.01,1,100)], label=t) plt.legend(); show_doc(annealing_cos) show_doc(annealing_exp) show_doc(annealing_linear) show_doc(annealing_no) show_doc(annealing_poly) show_doc(CallbackHandler) ``` You probably won't need to use this class yourself. It's used by fastai to combine all the callbacks together and call any relevant callback functions for each training stage. The methods below simply call the equivalent method in each callback function in [`self.callbacks`](/callbacks.html#callbacks). ``` show_doc(CallbackHandler.on_backward_begin) show_doc(CallbackHandler.on_backward_end) show_doc(CallbackHandler.on_batch_begin) show_doc(CallbackHandler.on_batch_end) show_doc(CallbackHandler.on_epoch_begin) show_doc(CallbackHandler.on_epoch_end) show_doc(CallbackHandler.on_loss_begin) show_doc(CallbackHandler.on_step_end) show_doc(CallbackHandler.on_train_begin) show_doc(CallbackHandler.on_train_end) show_doc(CallbackHandler.set_dl) show_doc(OptimWrapper) ``` This is a convenience class that provides a consistent API for getting and setting optimizer hyperparameters. For instance, for [`optim.Adam`](https://pytorch.org/docs/stable/optim.html#torch.optim.Adam) the momentum parameter is actually `betas[0]`, whereas for [`optim.SGD`](https://pytorch.org/docs/stable/optim.html#torch.optim.SGD) it's simply `momentum`. As another example, the details of handling weight decay depend on whether you are using `true_wd` or the traditional L2 regularization approach. This class also handles setting different WD and LR for each layer group, for discriminative layer training. ``` show_doc(OptimWrapper.clear) show_doc(OptimWrapper.create) show_doc(OptimWrapper.new) show_doc(OptimWrapper.read_defaults) show_doc(OptimWrapper.read_val) show_doc(OptimWrapper.set_val) show_doc(OptimWrapper.step) show_doc(OptimWrapper.zero_grad) show_doc(SmoothenValue) ``` Used for smoothing loss in [`Recorder`](/basic_train.html#Recorder). ``` show_doc(SmoothenValue.add_value) show_doc(Scheduler) ``` Used for creating annealing schedules, mainly for [`OneCycleScheduler`](/callbacks.one_cycle.html#OneCycleScheduler). ``` show_doc(Scheduler.step) show_doc(AverageMetric) ``` See the documentation on [`metrics`](/metrics.html#metrics) for more information. ### Callback methods You don't call these yourself - they're called by fastai's [`Callback`](/callback.html#Callback) system automatically to enable the class's functionality. ``` show_doc(AverageMetric.on_epoch_begin) show_doc(AverageMetric.on_batch_end) show_doc(AverageMetric.on_epoch_end) ``` ## Undocumented Methods - Methods moved below this line will intentionally be hidden ## New Methods - Please document or move to the undocumented section
github_jupyter
# Credit Card Fraud Classification with various ML Models ``` import pandas as pd import numpy as np from sklearn.preprocessing import MinMaxScaler, StandardScaler dataset = pd.read_csv('creditcard.csv') dataset dataset.describe() dataset[dataset['Class'] == 1] zeros = 284315 ones = 492 ones / len(dataset) * 100 ``` # Data Visualisation ``` import matplotlib.pyplot as plt from pylab import * import seaborn as sns %matplotlib inline # To check if there is a null value in the data sns.heatmap(dataset.isnull(), yticklabels=False, cmap='viridis') # to see the count of class sns.countplot(x='Class', data=dataset) plt.figure(figsize=(18, 12)) sns.heatmap(dataset.corr(), vmin=-1, cmap='YlGnBu') plt.show() fig, axs = plt.subplots(6, 5, squeeze=False, figsize=(18,12)) #plt.subplot(figsize=(28, 28)) for i, ax in enumerate(axs.flatten()): ax.set_facecolor('xkcd:grey') ax.set_title(dataset.columns[i]) sns.distplot(dataset.iloc[:, i], ax=ax, color="#DC143C", fit_kws={"color": "#4e8ef5"}) ax.set_xlabel('') fig.tight_layout(h_pad=-1.5, w_pad=-1.5) plt.show() targets = dataset['Class'] dataset.drop(['Time', 'Class', 'Amount'], axis = 1, inplace=True) dataset.head() cols = dataset.columns.difference(['Class']) cols mm_scale = MinMaxScaler() dataset = mm_scale.fit_transform(dataset) dataset dataset = pd.DataFrame(data=dataset, columns=cols) dataset = pd.concat([dataset, targets], axis=1) dataset X = dataset.iloc[:, :-1].values y = dataset.iloc[:, -1].values X from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state = 10) len(y_test[y_test == 1]) ``` ## Logistic Regression ``` from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, confusion_matrix, classification_report, log_loss, matthews_corrcoef logistic_regressor = LogisticRegression(max_iter=1500, class_weight={1: 3.2}, verbose=3, random_state=10, solver='lbfgs') #class weight gives better results logistic_regressor.fit(X_train, y_train) ``` ### Results ### Train data ``` predictions_train = logistic_regressor.predict(X_train) print(confusion_matrix(y_train, predictions_train)) print('\n') print(classification_report(y_train, predictions_train)) print('\n') print(accuracy_score(y_train, predictions_train)) print(log_loss(y_train, predictions_train)) #printing the confusion matrix LABELS = ['Normal' , 'Fraud'] conf_matrix = confusion_matrix(y_train, predictions_train) plt.figure(figsize=(8, 6)) sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt='d'); plt.title('Confusion matrix') plt.ylabel('True class') plt.xlabel('Predicted class') plt.show() ``` ### Test data ``` predictions_test = logistic_regressor.predict(X_test) print(confusion_matrix(y_test, predictions_test).ravel()) # tn, fp, fn, tp print(confusion_matrix(y_test, predictions_test)) print('\n') print(classification_report(y_test, predictions_test)) print('\n') print(accuracy_score(y_test, predictions_test)) print(log_loss(y_test, predictions_test)) # # Reshape the prediction values to 0 for valid, 1 for fraud. # predictions[predictions == 1] = 0 # predictions[predictions == -1] = 1 # n_errors = (predictions != y_test).sum() #printing the confusion matrix LABELS = ['Normal' , 'Fraud'] conf_matrix = confusion_matrix(y_test, predictions_test) plt.figure(figsize=(8, 6)) sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt='d'); plt.title('Confusion matrix') plt.ylabel('True class') plt.xlabel('Predicted class') plt.show() ``` # SVM ``` from sklearn.svm import SVC model = SVC(gamma=0.5, kernel='linear', random_state=10) model.fit(X_train, y_train) ``` ## Train ``` svm_predictions_train = model.predict(X_train) print(confusion_matrix(y_train, svm_predictions_train)) print('\n') print(classification_report(y_train, svm_predictions_train)) print('\n') print(accuracy_score(y_train, svm_predictions_train)) print(log_loss(y_train, svm_predictions_train)) #printing the confusion matrix LABELS = ['Normal' , 'Fraud'] conf_matrix = confusion_matrix(y_train, svm_predictions_train) plt.figure(figsize=(8, 6)) sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt='d'); plt.title('Confusion matrix') plt.ylabel('True class') plt.xlabel('Predicted class') plt.show() ``` ## Test ``` svm_predictions_test = model.predict(X_test) print(confusion_matrix(y_test, svm_predictions_test)) print('\n') print(classification_report(y_test, svm_predictions_test)) print('\n') print(accuracy_score(y_test, svm_predictions_test)) print(log_loss(y_test, svm_predictions_test)) #printing the confusion matrix LABELS = ['Normal' , 'Fraud'] conf_matrix = confusion_matrix(y_test, svm_predictions_test) plt.figure(figsize=(8, 6)) sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt='d'); plt.title('Confusion matrix') plt.ylabel('True class') plt.xlabel('Predicted class') plt.show() ``` # Decision Tree ``` from sklearn.tree import DecisionTreeClassifier from sklearn import tree decision_classifier = DecisionTreeClassifier(criterion='gini', random_state=10) decision_classifier.fit(X_train, y_train) ``` ## Train data predictions ``` predictions_dc_train = decision_classifier.predict(X_train) print(confusion_matrix(y_train, predictions_dc_train)) print('\n') print(classification_report(y_train, predictions_dc_train)) print('\n') print(accuracy_score(y_train, predictions_dc_train)) print(log_loss(y_train, predictions_dc_train)) #printing the confusion matrix LABELS = ['Normal' , 'Fraud'] conf_matrix = confusion_matrix(y_train, predictions_dc_train) plt.figure(figsize=(8, 6)) sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt='d'); plt.title('Confusion matrix') plt.ylabel('True class') plt.xlabel('Predicted class') plt.show() ``` ## Test data ``` predictions_dc_test = decision_classifier.predict(X_test) print(confusion_matrix(y_test, predictions_dc_test)) print('\n') print(classification_report(y_test, predictions_dc_test)) print('\n') print(accuracy_score(y_test, predictions_dc_test)) print(log_loss(y_test, predictions_dc_test)) #printing the confusion matrix LABELS = ['Normal' , 'Fraud'] conf_matrix = confusion_matrix(y_test, predictions_dc_test) plt.figure(figsize=(8, 6)) sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt='d'); plt.title('Confusion matrix') plt.ylabel('True class') plt.xlabel('Predicted class') plt.show() ``` # Random Forest ``` from sklearn.ensemble import RandomForestClassifier forest_classifier = RandomForestClassifier(n_estimators=5, class_weight={1: 4}, random_state=10) forest_classifier.fit(X_train, y_train) ``` ## Train data ``` predictions_rf_train = forest_classifier.predict(X_train) print(confusion_matrix(y_train, predictions_rf_train)) print('\n') print(classification_report(y_train, predictions_rf_train)) print('\n') print(accuracy_score(y_train, predictions_rf_train)) print(log_loss(y_train, predictions_rf_train)) #printing the confusion matrix LABELS = ['Normal' , 'Fraud'] conf_matrix = confusion_matrix(y_train, predictions_rf_train) plt.figure(figsize=(8, 6)) sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt='d'); plt.title('Confusion matrix') plt.ylabel('True class') plt.xlabel('Predicted class') plt.show() ``` ## test data ``` predictions_rf_test = forest_classifier.predict(X_test) print(confusion_matrix(y_test, predictions_rf_test)) print('\n') print(classification_report(y_test, predictions_rf_test)) print('\n') print(accuracy_score(y_test, predictions_rf_test)) print(log_loss(y_test, predictions_rf_test)) print('\n') print(matthews_corrcoef(y_test, predictions_rf_test)) print('\n') print(confusion_matrix(y_test, predictions_rf_test).ravel()) # tn, fp , fn, tp #printing the confusion matrix LABELS = ['Normal' , 'Fraud'] conf_matrix = confusion_matrix(y_test, predictions_rf_test) plt.figure(figsize=(8, 6)) sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt='d'); plt.title('Confusion matrix') plt.ylabel('True class') plt.xlabel('Predicted class') plt.show() ``` ### Random forest worked better without data normalisation or i can say immune to imbalanced data # XGBoost ``` from xgboost import XGBClassifier, XGBRFClassifier xgbc = XGBClassifier(random_state=10, gamma=0.5) xgbcrf = XGBRFClassifier(random_state=10, gamma=0.5) xgbc.fit(X_train, y_train) xgbcrf.fit(X_train, y_train) ``` ## Train ``` predictions_xg_train = xgbcrf.predict(X_train) print(confusion_matrix(y_train, predictions_xg_train)) print('\n') print(classification_report(y_train, predictions_xg_train)) print('\n') print(accuracy_score(y_train, predictions_xg_train)) print(log_loss(y_train, predictions_xg_train)) #printing the confusion matrix LABELS = ['Normal' , 'Fraud'] conf_matrix = confusion_matrix(y_train, predictions_xg_train) plt.figure(figsize=(8,6)) sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt='d'); plt.title('Confusion matrix') plt.ylabel('True class') plt.xlabel('Predicted class') plt.show() ``` ## Test ``` predictions_xg_test = xgbcrf.predict(X_test) print(confusion_matrix(y_test, predictions_xg_test)) print('\n') print(classification_report(y_test, predictions_xg_test)) print('\n') print(accuracy_score(y_test, predictions_xg_test)) print(log_loss(y_test, predictions_xg_test)) #printing the confusion matrix LABELS = ['Normal' , 'Fraud'] conf_matrix = confusion_matrix(y_test, predictions_xg_test) plt.figure(figsize=(8, 6)) sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt='d'); plt.title('Confusion matrix') plt.ylabel('True class') plt.xlabel('Predicted class') plt.show() ``` # Scikit-Learn MLP ``` from sklearn.neural_network import MLPClassifier mlp = MLPClassifier(max_iter=50, random_state=10) mlp.fit(X_train, y_train) ``` # Train data ``` predictions_mlp_train = mlp.predict(X_train) print(confusion_matrix(y_train, predictions_mlp_train)) print('\n') print(classification_report(y_train, predictions_mlp_train)) print('\n') print(accuracy_score(y_train, predictions_mlp_train)) print(log_loss(y_train, predictions_mlp_train)) #printing the confusion matrix LABELS = ['Normal' , 'Fraud'] conf_matrix = confusion_matrix(y_train, predictions_mlp_train) plt.figure(figsize=(8,6)) sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt='d'); plt.title('Confusion matrix') plt.ylabel('True class') plt.xlabel('Predicted class') plt.show() ``` ## Test data ``` predictions_mlp_test = mlp.predict(X_test) print(confusion_matrix(y_test, predictions_mlp_test)) print('\n') print(classification_report(y_test, predictions_mlp_test)) print('\n') print(accuracy_score(y_test, predictions_mlp_test)) print(log_loss(y_test, predictions_mlp_test)) #printing the confusion matrix LABELS = ['Normal' , 'Fraud'] conf_matrix = confusion_matrix(y_test, predictions_mlp_test) plt.figure(figsize=(8, 6)) sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt='d'); plt.title('Confusion matrix') plt.ylabel('True class') plt.xlabel('Predicted class') plt.show() ``` ### With data normalization MPL converge faster than using unprocessed data
github_jupyter
``` from fastai2.vision.all import * from siren import Siren, apply_siren_init, siren_init ``` # Loading data ``` path = untar_data(URLs.IMAGENETTE_160) lbl_dict = dict( n01440764='tench', n02102040='English springer', n02979186='cassette player', n03000684='chain saw', n03028079='church', n03394916='French horn', n03417042='garbage truck', n03425413='gas pump', n03445777='golf ball', n03888257='parachute' ) def label_func(fname): return lbl_dict[parent_label(fname)] block = DataBlock(blocks = (ImageBlock, CategoryBlock), get_items = get_image_files, get_y = label_func, splitter = GrandparentSplitter(valid_name='val'), item_tfms = Resize(128), batch_tfms= Normalize.from_stats(*imagenet_stats)) data = block.dataloaders(path) data.show_batch() ``` # Experiment with normal xresnet model (from fastai) ``` learn = Learner(data, xresnet18(n_out=10), metrics=accuracy, opt_func=ranger, cbs=ShowGraphCallback) # learn.lr_find() learn.fit_flat_cos(20, 3e-3) ``` Trying to close the gap between validation loss and training loss using data augmentation ``` data_with_aug = block.new(item_tfms = Resize(128), batch_tfms=[*aug_transforms(size=128), Normalize.from_stats(*imagenet_stats)] ).dataloaders(path) learn = Learner(data_with_aug, xresnet18(n_out=10), metrics=accuracy, opt_func=ranger, cbs=ShowGraphCallback) # learn.lr_find() learn.fit_flat_cos(20, 3e-3) ``` # Now trying siren activation instead of ReLu https://forums.fast.ai/t/normalizing-images-with-a-lambda-instead-of-stats-latest-efficientnet-requiires-it/62441/8 ``` class NormalizeEf(Transform): "AdvProp Normalization for `TensorImage`" order=99 def encodes(self, x:TensorImage): return (x * 2.) - 1. def decodes(self, x:TensorImage): return (x + 1.) / 2. data_siren = block.new(item_tfms = Resize(128), batch_tfms=NormalizeEf).dataloaders(path) def init_cnn_siren(m): if isinstance(m, (nn.Conv2d,nn.Linear)): apply_siren_init(m) for l in m.children(): init_cnn_siren(l) def get_siren_resnet(): mdl = xresnet18(n_out=10, act_cls=Siren) init_cnn_siren(mdl) return mdl mdl = get_siren_resnet() siren_learn = Learner(data_siren, mdl, metrics=accuracy, opt_func=ranger, cbs=ShowGraphCallback) # siren_learn.lr_find() siren_learn.fit_flat_cos(20, 3e-2) ``` The model is overfitting, so I tried to apply regularization in the form of data augmentation ``` dls = block.new(item_tfms = Resize(128), batch_tfms=[*aug_transforms(size=128), NormalizeEf] ).dataloaders(path) mdl = get_siren_resnet() siren_learn = Learner(dls, mdl, metrics=accuracy, opt_func=ranger, cbs=ShowGraphCallback) # siren_learn.lr_find() siren_learn.fit_flat_cos(20, 3e-2) ``` More regularization, this time weight decay is added ``` dls = block.new(item_tfms = Resize(128), batch_tfms=[*aug_transforms(size=128), NormalizeEf] ).dataloaders(path) mdl = get_siren_resnet() siren_learn = Learner(dls, mdl, wd=1e-3, metrics=accuracy, opt_func=ranger, cbs=ShowGraphCallback) # siren_learn.lr_find() siren_learn.fit_flat_cos(20, 3e-2) ``` Still not as good as the ReLu xresnet (87.4% accuracy vs 83.8%), training for the same number of epochs
github_jupyter
# Overview : ## 1. Intro to the course - Data Science course - General - Python for Data Science ## 2. Why to use a notebook - Easy to use ## 3. How to use a notebook - Create one - Set name - Create a cell - Text / Code - Run - A cell / All cells - Move -Up_down / Delete - Save / Download Prohobited python variable names ``` a = 2 b = 3 a/b a = 4 b = 3 ``` # Session 1 :Python for Data Science Let's get started - Data types / Assign a value # Simple Data types in Python - int - float - String ``` ## Assign a value a = 3 ## integer b = 3.3 ## float a a a = 20 a a = 3.7 a a = 'a' b = 'I am a Pythoneer' c = 'Python' ## String b b[-1] c[0]='p' ## String are immutable c = 'Java' c d = c*3 + b d b b = 'Data Science' ``` # Inputs / Outputs Inputs ``` num = input('Enter a number between 0 and 100 : ') num 3 'I am a programmer' num = int(input("Enter number :")) num num = float(input("Enter number :")) num x, y = input("Enter two values: ").split() x = int(x) x y x = int(input("Enter a number :")) y = int(input("Enter a number :")) x ``` - Output ``` print('The course is helpful') print(x,'x',y,'Alexandria is beautiful') print('Cairo is a beautiful City,','so is Alexandria','3','6',x) ``` # Operators in Python Operators : + - * / // ** % ``` 11%8 x,y,z,l,m = 3,2,3,5,1 l x + y x - y x * y x / y x // y x ** y x%y ``` # Control Flow - Loops - For loops For loops, in general, are used for sequential traversal. It falls under the category of definite iteration. Definite iterations means the number of repetitions is specified explicitly in advance. Note: In python, for loops only implements the collection-based iteration. ``` print('1 2 3') print('1 2 3 4 5 6 7 8 9 10') for counter in range(1,51): print(counter) for i in 'Python code': print(i) ``` - While loops In Python, While Loops is used to execute a block of statements repeatedly until a given condition is satisfied. And when the condition becomes false, the line immediately after the loop in the program is executed. While loop falls under the category of indefinite iteration. Indefinite iteration means that the number of times the loop is executed isn’t specified explicitly in advance. ``` count = 0 while (count < 3): count = count + 1 print("Hello Pythoneer/Pythonista") count = 2 while (count < 3): count = count + 1 print('Hello') ``` - Conditions ``` i = 10 if (i < 15): print(i,"is less than 15") print("1") print("Do not play") i = 15 if (i < 15): print("i is less than 15") elif (i == 15): print('i equals to 15') elif (i > 15): print('i bigger than 15') i = 100 if (i == 15): print("i is bigger than 15") else: print('i equals to 15') ``` # Data Structures - Lists List in Python are ordered and have a definite count. The elements in a list are indexed according to a definite sequence and the indexing of a list is done with 0 being the first index. Each element in the list has its definite place in the list, which allows duplicating of elements in the list, with each element having its own distinct place and credibility. ``` ## Can contain numbers, characters or strings... ## How to define it L = [1,2.2, "a" , "string" , 1+2,'I am a Python programmer',1,'a'] L ## Order of elements: L[-1] L[-1] L[1] L[1] = 2 ## Lists are immutable L L.append('Pythonista') L L.append(1) L L.remove(1) L S = 'I am a future data scientist 55' L = S.split(' ') L ``` - Tuples Tuple is a collection of Python objects much like a list. The sequence of values stored in a tuple can be of any type, and they are indexed by integers. Values of a tuple are syntactically separated by ‘commas’. Although it is not necessary, it is more common to define a tuple by closing the sequence of values in parentheses. This helps in understanding the Python tuples more easily. ``` ## Can contain numbers, characters or strings... ## How to define it tup = (1,2,3,'a','Programmer') tup[-1] ## tuples are mutable ``` - Sets In Python, Set is an unordered collection of data type that is iterable, mutable and has no duplicate elements. The order of elements in a set is undefined though it may consist of various elements. The major advantage of using a set, as opposed to a list, is that it has a highly optimized method for checking whether a specific element is contained in the set. ``` set1 = set("Py3 Python") set1 A = ('a','3','3','b') set2 = set(A) set2 set2.add('to be') set2 set2.remove('3') set2 ``` - Dictionary Dictionary in Python is an unordered collection of data values, used to store data values like a map, which unlike other Data Types that hold only single value as an element, Dictionary holds key:value pair. Key value is provided in the dictionary to make it more optimized. ``` dict1 = {'Je':'I', 'Tu':'You', 'Il':'He'} dict1['Je'] dict1['Avoir'] = 'To have' dict1['Avoir'] dict1 del dict1['Tu'] dict1 dict2['Ahmed'] dict2 = {'Ahmed' : 20, 'Seham' : 50, } ``` - Numpy Numpy is a general-purpose array-processing package. It provides a high-performance multidimensional array object, and tools for working with these arrays. It is the fundamental package for scientific computing with Python. Besides its obvious scientific uses, Numpy can also be used as an efficient multi-dimensional container of generic data - Arrays Array in Numpy is a table of elements (usually numbers), all of the same type, indexed by a tuple of positive integers. In Numpy, number of dimensions of the array is called rank of the array.A tuple of integers giving the size of the array along each dimension is known as shape of the array. An array class in Numpy is called as ndarray. Elements in Numpy arrays are accessed by using square brackets and can be initialized by using nested Python Lists. ``` import numpy as np # 1D-array arr = np.array( [1,2,23,5,6,3] ) print(arr) arr[4] # 2D-array arr = np.array( [[-1, 2, 0, 4], [4, -0.5, 6, 0], [2.6, 0, 7, 8], [3, -7, 4, 2.0]] ) print(arr) arr[3,2] arr[1,0] Trans_arr = arr.T Trans_arr arr1= np.array([1,2,23]) arr2= np.array([[1], [2], [3]]) X = np.matmul(arr1,arr2) X ``` - Iteration and slicing ``` LIST = [1,2,3,'Saturday',5,6,8] for element in LIST: print(element) arr1 = np.array([1,2,6]) for i in arr1: print(i) for elem in set2: print(elem) LIST = [1,2,3,'Saturday',5,6,8] LIST[2:] arr = np.array([[-1, 2, 0, 4], [4, -0.5, 6, 0], [2.6, 0, 7, 8], [3, -7, 4, 2.0]]) arr[2:,:] ```
github_jupyter
# 1A.1 - Dictionnaires, fonctions, code de Vigenère (correction) Le notebook ne fait que crypter et décrypter un message sachant le code connu. Casser le code requiert quelques astuces décrites dnas ce notebook : [casser le code de Vigenère](http://www.xavierdupre.fr/app/ensae_teaching_cs/helpsphinx/notebooks/expose_vigenere.html). ``` from jyquickhelper import add_notebook_menu add_notebook_menu() ``` ### Exercice 1 ``` def lettre_suivante(lettre) : c = ord(lettre) - ord('a') c = (c + 1) % 26 return chr (c + ord('a')) print (lettre_suivante('m'), lettre_suivante('z')) ``` ### Exercice 2 ``` mots = ['eddard', 'catelyn', 'robb', 'sansa', 'arya', 'brandon', 'rickon', 'theon', 'rorbert', 'cersei', 'tywin', 'jaime', 'tyrion', 'shae', 'bronn', 'lancel', 'joffrey', 'sandor', 'varys', 'renly', 'a' ] def mots_lettre_position (liste, lettre, position) : res = [ ] for mot in liste : if position < len(mot) and mot[position] == lettre : res.append (mot) return res r = mots_lettre_position ( mots, 'y', 1) print (r) ``` ### Exercice 3 : utilisation d'un dictionnaire L'énoncé suggère d'utiliser comme clé de dictionnaire le couple ``(position, lettre)`` et la fonction doit retourne la liste des mots qui ont tous la même lettre à la même position. Le dictionnaire ``dictionnaire_bien_choisi`` de l'énoncé doit avoir pour clés des couples ``(position, lettre)`` et pour valeurs des listes de prénoms. ``` def dictionnaire_choisi (liste) : d = { } for mot in liste : for i,c in enumerate(mot) : d [i,c] = d.get ((i,c), []) + [ mot ] return d def mots_lettre_position (d, lettre, position) : return d.get ( (position, lettre), [] ) d = dictionnaire_choisi(mots) r = mots_lettre_position ( d, 'y', 1) print ("résultat=",r) print ("dictionnaire=",d) ``` S'il permet d'aller beaucoup plus vite pour effectuer une recherche, le dictionnaire ``d`` contient beaucoup plus de mots que la liste initiale. Si on suppose que tous les mots sont uniques, il en contient exactement autant que la somme des longueurs de chaque mot. **A quoi ça sert ?** Tout dépend du nombre de fois qu'on n'effectue ce type de **recherche**. Il faut d'abord décomposer les deux méthodes en coût fixe (préparation du dictionnaire) et coût recherche puis regarder la page [Time Complexity](https://wiki.python.org/moin/TimeComplexity). On obtient : - liste de l'exercice 2 : coût fixe = 0, coût variable $\sim O(N)$ - dictionaire de l'exercice 3 : coût fixe $\sim O(L)$, coût variable $\sim O(1)$ Où : - $N$ est le nombre de mots, - $L$ est la somme des nombres de lettres de chaque mot, - $M$ est la longueur maximale d'un mot. Les dictionnaires en Python utilisent une [table de hashage](http://fr.wikipedia.org/wiki/Table_de_hachage) pour stocker les clés. L'objet ``map`` de Python ne rapproche plus de l'objet ``unordered_map`` de C++ que de l'objet ``map``. Ce dernier (C++ uniquement) est un tableau trié. L'accès à chaque élément se fait par dichotomie en $O(\ln_2 n)$ (voir [Standard C++ Containers](http://www.cs.northwestern.edu/~riesbeck/programming/c++/stl-summary.html#map). Le coût dans ce cas serait (toujours en C++) : - dictionaire de l'exercice 3 : coût fixe $\sim O(L \, ln_2(26 * M))$, coût variable $\sim O(ln_2(26 * M))$ Si on effectue cette recherche un grand nombre de fois, l'utilisation d'un dictionnaire permet d'être beaucoup plus rapide même si on doit créer une structure intermédiaire. Ce schéma revient régulièrement : **représenter autrement les données pour accélérer un traitement effectué un grand nombre de fois**. Vous pouvez lire également : - [hash](https://docs.python.org/3.4/reference/datamodel.html#object.__hash__) - [STL Container Performance ](http://john-ahlgren.blogspot.fr/2013/10/stl-container-performance.html) - [C++11: unordered_map vs map](http://kariddi.blogspot.fr/2012/07/c11-unorderedmap-vs-map.html) - [AVL tree](http://en.wikipedia.org/wiki/AVL_tree) - [List of data structures](http://en.wikipedia.org/wiki/List_of_data_structures) - [Time complexity of accessing a Python dict](http://stackoverflow.com/questions/1963507/time-complexity-of-accessing-a-python-dict) - [Hash Table Performance Tests](http://preshing.com/20110603/hash-table-performance-tests/) - [How to implement a good __hash__ function in python](http://stackoverflow.com/questions/4005318/how-to-implement-a-good-hash-function-in-python) ### Exercice 4 : crypter et décrypter selon Vigenère Tout d'abord le code de César : ``` def code_cesar(m): s = "".join( [ chr((ord(l)-65+3)%26+65) for l in m ] ) return s m = "JENESUISPASCODE" print(code_cesar(m)) ``` Et le code de Vigenère : ``` def code_vigenere ( message, cle) : message_code = "" for i,c in enumerate(message) : d = cle[ i % len(cle) ] d = ord(d) - 65 message_code += chr((ord(c)-65+d)%26+65) return message_code m = "JENESUISPASCODE" print ( code_vigenere (m, "DOP") ) ``` Et le décryptage du code de Vigenère pour lequel on modifie la fonction précédente qui pourra alors coder et décoder. ``` def code_vigenere ( message, cle, decode = False) : # ligne changée message_code = "" for i,c in enumerate(message) : d = cle[ i % len(cle) ] d = ord(d) - 65 if decode : d = 26 - d # ligne ajoutée message_code += chr((ord(c)-65+d)%26+65) return message_code m = "JENESUISPASCODE" c = code_vigenere (m, "DOP") d = code_vigenere (c, "DOP", True) print(c,d) ``` Pour retrouver le code de César, il suffit de choisir une clé d'une seule lettre : ``` c = code_vigenere (m, "D") print(c) ``` On peut casser le code de Vigenère. Vous trouverez la solution ici : [casser le code de Vigenère](http://www.xavierdupre.fr/app/ensae_teaching_cs/helpsphinx/notebooks/expose_vigenere.html).
github_jupyter
## Imports ``` from pathlib import Path import json import pandas as pd import numpy as np import os from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import import matplotlib.pyplot as plt from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter ``` ## Plotting function ``` def plot_F1(F1, expcode, expcodes, save_path): for model, M_results in F1.items(): x = []; y = []; z1 = []; z2 = [] # fig, axs = plt.subplots(1, 2, figsize = (13,5)) fig = plt.figure(figsize = (13,6)) ax1 = fig.add_subplot(121, projection='3d'); ax2 = fig.add_subplot(122, projection='3d') for test_perc, T_results in M_results.items(): perc = test_perc.split("=")[1] for item in T_results: x.append(float(perc)); y.append(item['num_epochs']);z1.append(item['avg_f1'][0]); z2.append(item['avg_f1'][1]) fig.suptitle(expcodes[expcode]+ "_" + model, fontsize=16) ax1.set_zlim(0, 1); ax2.set_zlim(0, 1) ax1.set_xlabel('test_perc'); ax2.set_xlabel('test_perc') ax1.set_ylabel('num_epochs'); ax2.set_ylabel('num_epochs') ax1.set_zlabel('F1'); ax2.set_zlabel('F1') ax1.set_title('Avg Macro'); ax2.set_title('Avg Weighted') fig.subplots_adjust(wspace=0.5) ax1.scatter3D(np.array(x), np.array(y), np.array(z1), c=np.array(z1), cmap='viridis') ax2.scatter3D(np.array(x), np.array(y), np.array(z2), c=np.array(z2), cmap='viridis') plt.savefig(f"{save_path}/{expcodes[expcode]}_{model}") plt.show() def select_best_scores(dictionary): results_list = [] for key, value in dictionary.items(): for model, T_results in value.items(): for item in T_results: # print(model," -- ", key," -- ",item['num_epochs']," -- ",item['avg_f1'][0]) results_list.append([model, key, item['num_epochs'], item['avg_f1'][1]]) results_list.sort(key=lambda x:x[3], reverse = True) print(results_list[0:3]) def transform_data(dictionary): new_dict = {} for key, value in dictionary.items(): for model, T_results in value.items(): if model in new_dict: new_dict[model][key] = T_results else: new_dict[model] = {} new_dict[model][key] = T_results return new_dict ``` ## Data loading Temporary requirement: Put all the EXPi folders in a single folder called `F1_multiclass_results` in the `input` folder. **Note:** Jordi, I know you used `Path` but for some reason I was not able to retrieve all the json files using that method, so I added an alternative. ``` # path = Path("C:/Users/user/Google Drive/Els_meus_documents/projectes/CompetitiveIntelligence/WRI/Notebooks/Data/finetuningResults") # filename = "FineTuningResults.json" # sub_path = Path("C:/Users/user/Google Drive/Els_meus_documents/projectes/CompetitiveIntelligence/WRI/Notebooks/Data/finetuningResults/") # paths = sub_path.glob('*.json') models = ["stsb-xlm-r", "paraphrase-xlm-r"]#"distiluse-base", "quora-distilbert", exp_codes = {'0' : "Rater2 combined labels", '1' : "Rater2 only new labels", '2' : "Rater3 combined labels", '3' : "Rater3 only new labels", '4' : "Rater1 combined labels", '5' : "Rater1 only new labels"} # exp_codes = {'20' : "Rater3", # '21' : "Rater2", # '22' : "Rater1"} # exp_codes = {'30' : "Rater1 only new test old", # '31' : "Rater2 only new test old", # '32' : "Rater3 only new test old"} exp_codes = {'40' : "Merged1", '41' : "Merged2", '42' : "Merged3"} results_path = "../input/F1_multiclass_results/" output_path = "../output/" all_files = [os.path.join(root, file) for root, dirs, files in os.walk(results_path) for file in files] exp_results_json = [file for file in all_files if file.endswith(".json")] for exp_result in exp_results_json: # print(exp_result) if "EXP" in exp_result and "TEST" not in exp_result: # exp_number = exp_result.split("/")[-2].replace("EXPTEST", "") exp_number = exp_result.split("EXP")[1].split("_")[0] if int(exp_number) > 39: print(exp_result) with open(exp_result, "r") as f: F1 = json.load(f) select_best_scores(F1) plot_F1(transform_data(F1), exp_number, exp_codes, output_path) def max_f1_per_experiment(results, weighted=False): max_results = {"f1-score": 0.0, "epochs": 0, "test_perc": 0.0} for model in results: for test_perc in results[model]: for result in results[model][test_perc]: cur_f1 = result['avg_f1'][1] if weighted else result['avg_f1'][0] if cur_f1 > max_results["f1-score"]: max_results["f1-score"] = round(cur_f1, 2) max_results["epochs"] = result['num_epochs'] max_results["test_perc"] = test_perc.split("=")[-1] max_results["model"] = model return max_results def pretty_print_max_results(max_results): for parameter, value in max_results.items(): print(f"- {parameter}: {value}") def store_results_table(df, exp_num, max_results): row = {"Experiment number": exp_num} row.update(max_results) return df.append(row, ignore_index=True) df = pd.DataFrame(columns=["Experiment number", "model", "f1-score", "epochs", "test_perc"]) weighted_f1 = False for exp_result in exp_results_json: if "EXPTEST" in exp_result: exp_number = exp_result.split("/")[-2].replace("EXPTEST", "") print("Experiment number:", exp_number) with open(exp_result, "r") as f: results_json = json.load(f) print("Best results:") max_res = max_f1_per_experiment(transform_data(results_json), weighted_f1) pretty_print_max_results(max_res) df = store_results_table(df, exp_number, max_res) print("===============================================================") if weighted_f1: df.to_csv("../output/weighted-f1-results-compilation.csv") else: df.to_csv("../output/avg-f1-results-compilation.csv") df ```
github_jupyter
# 007 Clustering Validation: FEMA application data * Damaged properties * Data analysis for Figure 2 * Mapping and additional analysis have been done in Quantum GIS version 3.4 Madeira ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") # Check package versions import types def imports(): for name, val in globals().items(): if isinstance(val, types.ModuleType): yield val.__name__ import pkg_resources root_packages = [i.split('.', 1)[0] for i in list(imports())] for m in pkg_resources.working_set: if m.project_name.lower() in root_packages: print (m.project_name, m.version) ``` # 1. Load FEMA housing assisntance application data * For owner * For renter ``` df_o = pd.read_csv('../../data/FEMA_Housing_Assistance_Data_Owner.csv') df_r = pd.read_csv('../../data/FEMA_Housing_Assistance_Data_Renter.csv') df_o = df_o[['Disaster', 'State', 'County', 'City', 'Zip Code', 'Valid Registrations', 'Average FEMA Inspected Damage', 'Total Inspected', 'Total Damage', 'No FEMA Inspected Damage', 'FEMA Inspected Damage between $1 and $10,000', 'FEMA Inspected Damage between $10,001 and $20,000', 'FEMA Inspected Damage between $20,001 and $30,000', 'FEMA Inspected Damage > $30,000', 'Approved for FEMA Assistance', 'Total Approved IHP Amount', 'Repair/Replace Amount', 'Rental Amount', 'Other Needs Amount', 'Approved between $1 and $10,000', 'Approved between $10,001 and $25,000', 'Approved between $25,001 and Max', 'Total Max Grants']] df_r = df_r[['Disaster', 'State', 'County', 'City', 'Zip Code', 'Valid Registrations', 'Total Inspected', 'Inspected with No Damage', 'Total with Moderate Damage', 'Total with Major Damage', 'Total with Substantial Damage', 'Approved for FEMA Assistance', 'Total Approved IHP Amount', 'Repair/Replace Amount', 'Rental Amount', 'Other Needs Amount', 'Approved between $1 and $10,000', 'Approved between $10,001 and $25,000', 'Approved between $25,001 and Max', 'Total Max Grants']] df_o = df_o[df_o['Disaster']==4332] df_r = df_r[df_r['Disaster']==4332] df_o = df_o[df_o['County']=='Harris (County)'] df_r = df_r[df_r['County']=='Harris (County)'] df_o = df_o.reset_index(drop=True) df_r = df_r.reset_index(drop=True) col_list = df_o.columns.tolist()[8:11]+df_o.columns.tolist()[12:] for c in col_list: df_o[c] = df_o[c].astype(str) df_o[c] = df_o[c].str.replace('$', '') df_o[c] = df_o[c].str.replace(',', '') df_o[c] = df_o[c].astype(float) for c in ['Total Approved IHP Amount', 'Repair/Replace Amount', 'Rental Amount', 'Other Needs Amount']: df_r[c] = df_r[c].astype(str) df_r[c] = df_r[c].str.replace('$', '') df_r[c] = df_r[c].str.replace(',', '') df_r[c] = df_r[c].astype(float) df_o.head(2) df_r.head(2) df_o['Valid Registrations'] = df_o['Valid Registrations'].astype(float) df_o = df_o.groupby('Zip Code').sum().reset_index(inplace=False) df_r = df_r.groupby('Zip Code').sum().reset_index(inplace=False) df_o = df_o[['Zip Code', 'Valid Registrations', 'Total Inspected', 'Total Damage', 'No FEMA Inspected Damage', 'Approved for FEMA Assistance','Total Approved IHP Amount']] df_r = df_r[['Zip Code', 'Valid Registrations', 'Total Inspected', 'Inspected with No Damage', 'Approved for FEMA Assistance', 'Total Approved IHP Amount']] # print len(df_o) # print len(df_r) df_r.head() df_o.head() ``` # 2. Validation of damage for each clusters * Calculate damaged properties per household at zipcode level ``` # Num of households (zipcode) hh_zip = pd.read_csv('../../data/household_zipcode_Texas/ACS_17_5YR_S1101_with_ann.csv', skiprows=[0]) # Num of households (census tract) hh_ct = pd.read_csv('../../data/household_census_tract_Harris_county/ACS_17_5YR_S1101_with_ann.csv', skiprows=[0]) # Zipcode/census tract IDs ct_zip = pd.read_csv('../../data/ct_zip.csv') df_r['insp_damage_r'] = df_r['Total Inspected'] - df_r['Inspected with No Damage'] df_o['insp_damage_o'] = df_o['Total Inspected'] - df_o['No FEMA Inspected Damage'] df_damage = pd.merge(df_o, df_r, how='outer', on='Zip Code') df_damage = df_damage[['Zip Code', 'insp_damage_r', 'insp_damage_o']] df_damage = df_damage.fillna(0) df_damage['insp_damage'] = df_damage['insp_damage_o'] + df_damage['insp_damage_r'] df_damage.columns = ['zipcode', 'insp_damage_r', 'insp_damage_o', 'insp_damage'] df_damage.head() hh_zip = hh_zip[['Id2', 'Total; Estimate; Total households']] hh_zip.columns = ['zipcode', 'hh'] df_damage_zip = pd.merge(df_damage, hh_zip, how='outer', on='zipcode') df_damage_zip = df_damage_zip.dropna() df_damage_zip['damage_per_hh'] = df_damage_zip['insp_damage']/df_damage_zip['hh'] df_damage_zip.head(2) df_damage_zip = df_damage_zip[['zipcode', 'damage_per_hh']] df_damage_zip.head() # df_damage_zip.to_csv('../../outputs/damage_per_hh_zip.csv') ``` * Calculate number of damaged properties at census tract level ``` ct_zip['TRACT'] = ct_zip['TRACT'].astype(str).str.zfill(6) ct_zip['GEOID_Data'] = '14000US48201' + ct_zip['TRACT'] ct_zip.head(2) df_damage_ct = pd.merge(ct_zip, df_damage_zip, how='left', on='zipcode') # print len(df_damage_ct) # print df_damage_ct['GEOID_Data'].nunique() df_damage_ct.head() hh_ct = hh_ct[['Id2', 'Total; Estimate; Total households']] hh_ct['GEOID_Data'] = '14000US' + hh_ct['Id2'].astype(str) hh_ct['hh'] = hh_ct['Total; Estimate; Total households'] hh_ct = hh_ct[['GEOID_Data', 'hh']] hh_ct.head(2) df_damage_ct = pd.merge(df_damage_ct, hh_ct, how='left', on='GEOID_Data') df_damage_ct.head() df_damage_ct['damage_ct'] = df_damage_ct['damage_per_hh'] * df_damage_ct['hh'] df_damage_ct.head() #df_damage_ct.to_csv('../../outputs/damage_ct.csv', index=False) ```
github_jupyter
# CIFAR - 10 ## Improved CNN ### Activate virtual environment ``` %%bash source ~/kerai/bin/activate ``` ### Imports ``` %matplotlib inline import numpy as np import matplotlib from matplotlib import pyplot as plt from keras.models import Sequential from keras.optimizers import Adam, SGD from keras.callbacks import ModelCheckpoint from keras.constraints import maxnorm from keras.models import load_model from keras.layers import GlobalAveragePooling2D, Lambda, Conv2D, MaxPooling2D, Dropout, Dense, Flatten, Activation from keras.preprocessing.image import ImageDataGenerator ``` Import helper functions ``` from helper import get_class_names, get_train_data, get_test_data, plot_images from helper import plot_model, predict_classes, visualize_errors ``` Change matplotlib graph style ``` matplotlib.style.use('ggplot') ``` ### Constants Import class names ``` class_names = get_class_names() print(class_names) ``` Get number of classes ``` num_classes = len(class_names) print(num_classes) # Hight and width of the images IMAGE_SIZE = 32 # 3 channels, Red, Green and Blue CHANNELS = 3 # Number of epochs NUM_EPOCH = 350 # learning rate LEARN_RATE = 1.0e-4 ``` ### Fetch and decode data Load the training dataset. Labels are integers whereas class is one-hot encoded vectors. ``` images_train, labels_train, class_train = get_train_data() ``` Load the testing dataset. ``` images_test, labels_test, class_test = get_test_data() print("Training set size:\t",len(images_train)) print("Testing set size:\t",len(images_test)) ``` The CIFAR-10 dataset has been loaded and consists of a total of 60,000 images and corresponding labels. ## Improving accuracy ### 1. Define a better CNN model A pure CNN model from https://arxiv.org/pdf/1412.6806.pdf ``` def pure_cnn_model(): model = Sequential() model.add(Conv2D(96, (3, 3), activation='relu', padding = 'same', input_shape=(IMAGE_SIZE,IMAGE_SIZE,CHANNELS))) model.add(Dropout(0.2)) model.add(Conv2D(96, (3, 3), activation='relu', padding = 'same')) model.add(Conv2D(96, (3, 3), activation='relu', padding = 'same', strides = 2)) model.add(Dropout(0.5)) model.add(Conv2D(192, (3, 3), activation='relu', padding = 'same')) model.add(Conv2D(192, (3, 3), activation='relu', padding = 'same')) model.add(Conv2D(192, (3, 3), activation='relu', padding = 'same', strides = 2)) model.add(Dropout(0.5)) model.add(Conv2D(192, (3, 3), padding = 'same')) model.add(Activation('relu')) model.add(Conv2D(192, (1, 1),padding='valid')) model.add(Activation('relu')) model.add(Conv2D(10, (1, 1), padding='valid')) model.add(GlobalAveragePooling2D()) model.add(Activation('softmax')) model.summary() return model ``` Build the model ``` model = pure_cnn_model() ``` #### Train model on the training data Save the model after every epoch ``` checkpoint = ModelCheckpoint('best_model_improved.h5', # model filename monitor='val_loss', # quantity to monitor verbose=0, # verbosity - 0 or 1 save_best_only= True, # The latest best model will not be overwritten mode='auto') # The decision to overwrite model is made # automatically depending on the quantity to monitor ``` Configure the model for training ``` model.compile(loss='categorical_crossentropy', # Better loss function for neural networks optimizer=Adam(lr=LEARN_RATE), # Adam optimizer with 1.0e-4 learning rate metrics = ['accuracy']) # Metrics to be evaluated by the model ``` For more information on categorical cross entropy loss function see - https://jamesmccaffrey.wordpress.com/2013/11/05/why-you-should-use-cross-entropy-error-instead-of-classification-error-or-mean-squared-error-for-neural-network-classifier-training/ Fit the model on the data provided ``` model_details = model.fit(images_train, class_train, batch_size = 128, epochs = NUM_EPOCH, # number of iterations validation_data= (images_test, class_test), callbacks=[checkpoint], verbose=1) ``` #### Evaluate the model ``` scores = model.evaluate(images_test, class_test, verbose=0) print("Accuracy: %.2f%%" % (scores[1]*100)) ``` #### Model accuracy and loss plots ``` plot_model(model_details) ``` ### 2. Augment the data ``` datagen = ImageDataGenerator( featurewise_center=False, # set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization=False, # divide inputs by std of the dataset samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range=45, # randomly rotate images in the range (degrees, 0 to 180) width_shift_range=0.2, # randomly shift images horizontally (fraction of total width) height_shift_range=0.2, # randomly shift images vertically (fraction of total height) horizontal_flip=True, # randomly flip images vertical_flip=False) # randomly flip images datagen.fit(images_train) ``` The above code augments the dataset to have random shifts, rotations and flips, thus increasing the size of the dataset. Build model again ``` augmented_model = pure_cnn_model() ``` #### Train model on the training data Save the model after every epoch ``` augmented_checkpoint = ModelCheckpoint('augmented_best_model.h5', # model filename monitor='val_loss', # quantity to monitor verbose=0, # verbosity - 0 or 1 save_best_only= True, # The latest best model will not be overwritten mode='auto') # The decision to overwrite model is made # automatically depending on the quantity to monitor ``` Configure the model for training ``` augmented_model.compile(loss='categorical_crossentropy', # Better loss function for neural networks optimizer=Adam(lr=LEARN_RATE), # Adam optimizer with 1.0e-4 learning rate metrics = ['accuracy']) # Metrics to be evaluated by the model ``` Fit the model on the data provided ``` augmented_model_details = augmented_model.fit_generator(datagen.flow(images_train, class_train, batch_size = 32), steps_per_epoch = len(images_train) / 32, # number of samples per gradient update epochs = NUM_EPOCH, # number of iterations validation_data= (images_test, class_test), callbacks=[augmented_checkpoint], verbose=1) ``` #### Evaluate the model ``` scores = augmented_model.evaluate(images_test, class_test, verbose=0) print("Accuracy: %.2f%%" % (scores[1]*100)) ``` #### Model accuracy and loss plots ``` plot_model(augmented_model_details) ``` To further improve the model, run it for more epochs and do more augmentations like ZCA whitening. ### Predictions Predict class for test set images ``` correct, labels_pred = predict_classes(augmented_model, images_test, labels_test) ``` Calculate accuracy using manual calculation ``` num_images = len(correct) print("Accuracy: %.2f%%" % ((sum(correct)*100)/num_images)) ``` ### Show some mis-classifications Plot the first 9 mis-classified images ``` visualize_errors(images_test, labels_test, class_names, labels_pred, correct) ``` ## Credits - https://arxiv.org/pdf/1412.6806.pdf - https://github.com/fchollet/keras/blob/master/examples/cifar10_cnn.py - https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/06_CIFAR-10.ipynb - http://machinelearningmastery.com/object-recognition-convolutional-neural-networks-keras-deep-learning-library/ - https://parneetk.github.io/blog/cnn-cifar10/ - https://github.com/dnlcrl/deep-residual-networks-pyfunt/blob/master/docs/CIFAR-10%20Experiments.ipynb
github_jupyter
##### Copyright 2018 The TensorFlow Hub Authors. Licensed under the Apache License, Version 2.0 (the "License"); ``` # Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== ``` # Action Recognition with an Inflated 3D CNN <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/action_recognition_with_tf_hub.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/hub/blob/master/examples/colab/action_recognition_with_tf_hub.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/hub/examples/colab/action_recognition_with_tf_hub.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> This Colab demonstrates use of action recognition from video data using the [tfhub.dev/deepmind/i3d-kinetics-400/1](https://tfhub.dev/deepmind/i3d-kinetics-400/1) module. The underlying model is described in the paper "[Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset](https://arxiv.org/abs/1705.07750)" by Joao Carreira and Andrew Zisserman. The paper was posted on arXiv in May 2017, and was published as a CVPR 2017 conference paper. The source code is publicly available on [github](https://github.com/deepmind/kinetics-i3d). "Quo Vadis" introduced a new architecture for video classification, the Inflated 3D Convnet or I3D. This architecture achieved state-of-the-art results on the UCF101 and HMDB51 datasets from fine-tuning these models. I3D models pre-trained on Kinetics also placed first in the CVPR 2017 [Charades challenge](http://vuchallenge.org/charades.html). The original module was trained on the [kinetics-400 dateset](https://deepmind.com/research/open-source/open-source-datasets/kinetics/) and knows about 400 different actions. Labels for these actions can be found in the [label map file](https://github.com/deepmind/kinetics-i3d/blob/master/data/label_map.txt). In this Colab we will use it recognize activites in videos from a UCF101 dataset. ## Setup ``` !pip install -q imageio !pip install -q opencv-python !pip install -q git+https://github.com/tensorflow/docs #@title Import the necessary modules # TensorFlow and TF-Hub modules. from absl import logging import tensorflow as tf import tensorflow_hub as hub from tensorflow_docs.vis import embed logging.set_verbosity(logging.ERROR) # Some modules to help with reading the UCF101 dataset. import random import re import os import tempfile import ssl import cv2 import numpy as np # Some modules to display an animation using imageio. import imageio from IPython import display from urllib import request # requires python3 #@title Helper functions for the UCF101 dataset # Utilities to fetch videos from UCF101 dataset UCF_ROOT = "https://www.crcv.ucf.edu/THUMOS14/UCF101/UCF101/" _VIDEO_LIST = None _CACHE_DIR = tempfile.mkdtemp() # As of July 2020, crcv.ucf.edu doesn't use a certificate accepted by the # default Colab environment anymore. unverified_context = ssl._create_unverified_context() def list_ucf_videos(): """Lists videos available in UCF101 dataset.""" global _VIDEO_LIST if not _VIDEO_LIST: index = request.urlopen(UCF_ROOT, context=unverified_context).read().decode("utf-8") videos = re.findall("(v_[\w_]+\.avi)", index) _VIDEO_LIST = sorted(set(videos)) return list(_VIDEO_LIST) def fetch_ucf_video(video): """Fetchs a video and cache into local filesystem.""" cache_path = os.path.join(_CACHE_DIR, video) if not os.path.exists(cache_path): urlpath = request.urljoin(UCF_ROOT, video) print("Fetching %s => %s" % (urlpath, cache_path)) data = request.urlopen(urlpath, context=unverified_context).read() open(cache_path, "wb").write(data) return cache_path # Utilities to open video files using CV2 def crop_center_square(frame): y, x = frame.shape[0:2] min_dim = min(y, x) start_x = (x // 2) - (min_dim // 2) start_y = (y // 2) - (min_dim // 2) return frame[start_y:start_y+min_dim,start_x:start_x+min_dim] def load_video(path, max_frames=0, resize=(224, 224)): cap = cv2.VideoCapture(path) frames = [] try: while True: ret, frame = cap.read() if not ret: break frame = crop_center_square(frame) frame = cv2.resize(frame, resize) frame = frame[:, :, [2, 1, 0]] frames.append(frame) if len(frames) == max_frames: break finally: cap.release() return np.array(frames) / 255.0 def to_gif(images): converted_images = np.clip(images * 255, 0, 255).astype(np.uint8) imageio.mimsave('./animation.gif', converted_images, fps=25) return embed.embed_file('./animation.gif') #@title Get the kinetics-400 labels # Get the kinetics-400 action labels from the GitHub repository. KINETICS_URL = "https://raw.githubusercontent.com/deepmind/kinetics-i3d/master/data/label_map.txt" with request.urlopen(KINETICS_URL) as obj: labels = [line.decode("utf-8").strip() for line in obj.readlines()] print("Found %d labels." % len(labels)) ``` # Using the UCF101 dataset ``` # Get the list of videos in the dataset. ucf_videos = list_ucf_videos() categories = {} for video in ucf_videos: category = video[2:-12] if category not in categories: categories[category] = [] categories[category].append(video) print("Found %d videos in %d categories." % (len(ucf_videos), len(categories))) for category, sequences in categories.items(): summary = ", ".join(sequences[:2]) print("%-20s %4d videos (%s, ...)" % (category, len(sequences), summary)) # Get a sample cricket video. video_path = fetch_ucf_video("v_CricketShot_g04_c02.avi") sample_video = load_video(video_path) sample_video.shape i3d = hub.load("https://tfhub.dev/deepmind/i3d-kinetics-400/1").signatures['default'] ``` Run the id3 model and print the top-5 action predictions. ``` def predict(sample_video): # Add a batch axis to the to the sample video. model_input = tf.constant(sample_video, dtype=tf.float32)[tf.newaxis, ...] logits = i3d(model_input)['default'][0] probabilities = tf.nn.softmax(logits) print("Top 5 actions:") for i in np.argsort(probabilities)[::-1][:5]: print(f" {labels[i]:22}: {probabilities[i] * 100:5.2f}%") predict(sample_video) ``` Now try a new video, from: https://commons.wikimedia.org/wiki/Category:Videos_of_sports How about [this video](https://commons.wikimedia.org/wiki/File:End_of_a_jam.ogv) by Patrick Gillett: ``` !curl -O https://upload.wikimedia.org/wikipedia/commons/8/86/End_of_a_jam.ogv video_path = "End_of_a_jam.ogv" sample_video = load_video(video_path)[:100] sample_video.shape to_gif(sample_video) predict(sample_video) ```
github_jupyter
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt data = pd.read_csv("gc.csv", encoding="latin1") df = data[["gender", "description"]] df df.gender.value_counts() df.isnull().sum() df.dropna(inplace=True) df.isnull().sum() df.gender.value_counts() from sklearn.preprocessing import LabelEncoder label_encoder = LabelEncoder() df["sex"]= label_encoder.fit_transform(df["gender"]) df df.sex.value_counts() import re first_description = df.description[4] description = re.sub("[^a-zA-Z]"," ",first_description) description = description.lower() import nltk nltk.download("stopwords") nltk.download('punkt') from nltk.corpus import stopwords description = nltk.word_tokenize(description) description = [ word for word in description if not word in set(stopwords.words("english"))] description import nltk as nlp nltk.download('wordnet') lemma = nlp.WordNetLemmatizer() description = [ lemma.lemmatize(word) for word in description] description = " ".join(description) description description_list = [] for description in df.description: description = re.sub("[^a-zA-Z]"," ",description) description = description.lower() # buyuk harftan kucuk harfe cevirme description = nltk.word_tokenize(description) #description = [ word for word in description if not word in set(stopwords.words("english"))] lemma = nlp.WordNetLemmatizer() description = [ lemma.lemmatize(word) for word in description] description = " ".join(description) description_list.append(description) from sklearn.feature_extraction.text import CountVectorizer # bag of words yaratmak icin kullandigim metot max_features = 5000 count_vectorizer = CountVectorizer(max_features=max_features,stop_words = "english") sparce_matrix = count_vectorizer.fit_transform(description_list).toarray() # x print("en sik kullanilan {} kelimeler: {}".format(max_features,count_vectorizer.get_feature_names())) y = df.iloc[:,2].values # male or female classes x = sparce_matrix # train test split from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x,y, test_size = 0.1, random_state = 42) df.drop("gender", axis=1, inplace=True) from sklearn.naive_bayes import GaussianNB nb = GaussianNB() nb.fit(x_train,y_train) y_pred = nb.predict(x_test) #print("accuracy: ",nb.score(y_pred.reshape(-1,1),y_test)) import seaborn as sns y_pred = pd.DataFrame(y_pred.reshape(-1,1)) df y_pred plt.figure(figsize=(15,12)) plt.plot(y_pred.iloc[:,0], y_pred.index) ```
github_jupyter
<a href="https://colab.research.google.com/github/julianikulski/director-experience/blob/main/preprocessing/biography_matching.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> #Matching biographies to directors In this notebook I will be matching the directors sitting on the boards of the S&P 500 companies between 2011 and 2015. These directors were researched on Refinitiv Eikon and then the relevant directors were identified in the notebook `director_company_data`. The biographies used in this notebook were taken from S&P Capital IQ. Because there are about 4600 director biographies missing from the S&P 500 Capital IQ biography dataset, I wrote the entire list of relevant directors (incl. directors with and without biographies) to an excel file. For all of the directors included in this file (~7500) I manually researched the missing biographies from DEF 14As. Simultaneously, I research the committee memberships and relevant CSR committees in these DEF 14As for each company. ``` # connecting to Google Drive to access files from google.colab import drive drive.mount('/content/drive') import numpy as np import pandas as pd from glob import glob import re from functools import reduce # set number of max rows pd.set_option('display.max_rows', 13000) ``` ## Reading in data ``` # read in the csv file containing all directors in my dataset all_directors_df = pd.read_csv('/content/drive/My Drive/director-csr/all_directors.csv') # drop the 'Unnamed: 0' column all_directors_df.drop(columns='Unnamed: 0', inplace=True) all_directors_df.head() # read in the excel files containing the biographies all_files = glob('/content/drive/My Drive/director-csr/directors/*.xls') list_df = [] for file in all_files: df_file = pd.read_excel(file, skiprows=7) # skipping the first 7 rows above the header list_df.append(df_file) biographies_df = pd.concat(list_df, axis=0, ignore_index=True) biographies_df.head() ``` ## Data Cleaning and Preprocessing ``` # rename some of the columns and drop others new_columns = ['name', 'first_name', 'middle_name', 'nick_name', 'last_name', 'comp_name', 'ticker', 'education', 'prim_comp', 'biographies', 'age', 'isin', 'all_tickers'] old_columns = ['Person Name', 'Person Name First', 'Person Name Middle', 'Person Name Nickname', 'Person Name Last', 'Company Name [Any Professional Record] [Current Matching Results]', 'Exchange:Ticker', 'Colleges/Universities', 'Primary Professional Record', 'Biographies', 'Person Age', 'Primary ISIN [Any Professional Record] [Current Matching Results]', 'Security Tickers [Any Professional Record] [Current Matching Results]'] biographies_df = biographies_df[old_columns] biographies_df.rename(columns=dict(zip(old_columns, new_columns)), inplace=True) biographies_df ``` In this biographies_df dataframe, I am really only interested in the biographies. I will keep the other columns as well because it may be a way to distinguish people with similar names based on their education, age, etc. However, I will keep duplicate biographies and only drop duplicate rows. The biographies may be the same, but other ISINs or company names might be used for the same person which might be valuable for the matching later on. ``` # drop any rows that are duplicate print(biographies_df.shape) biographies_df.drop_duplicates(inplace=True) print(biographies_df.shape) # create a copy of the uncleaned biographies_df biographies_old = biographies_df.copy() biographies_old['name'] = biographies_old['name'].apply(lambda x: x.lower() if not pd.isna(x) else x) def clean_names(df, bio=True): ''' Function to clean up the director names so that they can be matched Args: df = dataframe; containing director names bio = bool; True if the biographies dataframe is added, False otherwise Returns: df = dataframe ''' df['org_name'] = df.iloc[:,0] # change the strings to lower case df.iloc[:,0] = df.iloc[:,0].apply(lambda x: x.lower()) # check if the names contain anything in parentheses and if so remove them and their content df.iloc[:,0] = df.iloc[:,0].apply(lambda x: re.sub(r'\([^()]*\)', '', x)) # check if the names contain a title like ms. and mr. and if so remove them df.iloc[:,0] = df.iloc[:,0].apply(lambda x: re.sub(r'^\w{2,3}\. ?', '', x)) # do two different things with the commas for the different dataframes if bio: # move the last name in the front of the comma to the back of the string and remove the comma df.iloc[:,0] = df.iloc[:,0].apply(lambda x: ' '.join([x.split(',')[1], x.split(',')[0]])) else: # create a new column that contains all the words after a comma at the end df['qualification'] = df.iloc[:,0].apply(lambda x: x.split(',')[-1] if len(x.split(',')) > 1 else None) df.iloc[:,0] = df.iloc[:,0].apply(lambda x: x.split(',')[0]) # remove any initials or titles because they might be distracting when matching names df.iloc[:,0] = df.iloc[:,0].apply(lambda x: ' '.join([name if '.' not in name else '' for name in x.split()])) # remove 'the' substring from names df.iloc[:,0] = df.iloc[:,0].apply(lambda x: re.sub(r'^the\s', '', x)) # ensure that all white space is stripped df.iloc[:,0] = df.iloc[:,0].apply(lambda x: re.sub(' +', ' ', x).strip()) return df # clean both dataframes all_directors_clean = clean_names(all_directors_df, bio=False) biographies_old_clean = clean_names(biographies_df, bio=True) # I need to manually re-add names that got lost in the cleaning function index = all_directors_clean[all_directors_clean['name'] == ''].index new_names = ['david owen', 'donald riegle', 'thomas niles'] for i in range(len(index)): all_directors_clean.at[index[i], 'name'] = new_names[i] all_directors_clean[all_directors_clean['name'] == ''] # add last name column to dataframe all_directors_clean['last_name'] = all_directors_clean['name'].apply(lambda x: x.split(' ')[-1]) # sort the dataframe by name column all_directors_clean.sort_values(by='name', ascending=True) all_directors_clean.head() ``` I will do some additional data cleaning to try to match more biographies to people ``` # additional data cleaning to merge more directors with biographies biographies_clean = biographies_old_clean.copy() biographies_clean = biographies_clean.applymap(lambda x: x.lower() if isinstance(x, str) else x) biographies_clean['ticker'] = biographies_clean['ticker'].apply(lambda x: x.split(':')[-1] if not pd.isna(x) else x) biographies_clean['isin'] = biographies_clean['isin'].apply(lambda x: x if not pd.isna(x) else x) biographies_clean['last_name'] = biographies_clean['last_name'].apply(lambda x: x if not pd.isna(x) else x) biographies_clean['all_tickers'] = biographies_clean['all_tickers'].apply(lambda x: [ticker.split(':')[-1].lower() if not pd.isna(x) else x for ticker in x.split(';')]) biographies_clean['comp_name'] = biographies_clean['comp_name'].apply(lambda x: re.sub(r'\([^()]*\)', '', x)) biographies_clean['prim_comp'] = biographies_clean['prim_comp'].apply(lambda x: re.sub(r'\([^()]*\)', '', x)) # now add the ticker values of to the all_tickers list biographies_clean['all_tickers'] = biographies_clean.apply(lambda x: x['all_tickers'] + [x['ticker']] if x['all_tickers'][0] != '-' else x['all_tickers'], axis=1) # drop any biographies that are not populated biographies_clean = biographies_clean[biographies_clean['biographies'] != '-'] biographies_clean.head() ``` In order to be able to distinguish the directors and not create a duplicate effort because I research/review biographies for the same director twice because they are assigned to multiple companies, I will add a unique director ID based on the org_name (as displayed on the Reuters terminal) to the dataframe containing all directors. I will also add a unique id to the biographies_clean dataframe which considers the biographies to be unique. ``` # assign unique director id based on org_name all_directors_clean['unique_dir_id'] = all_directors_clean.groupby(['org_name']).ngroup() all_directors_clean.head() # assign unique director id based on org_name biographies_clean['unique_bio_id'] = biographies_clean.groupby(['biographies']).ngroup() biographies_clean.head() # drop any directors that do not have any of the years populated all_directors_clean['all_years'] = all_directors_clean.apply(lambda x: 'yes' if (x[['2011', '2012', '2013', '2014', '2015']] == 0).all() else 'no', axis=1) all_directors_rel = all_directors_clean[all_directors_clean['all_years'] == 'no'].copy() print(all_directors_rel.shape) print(all_directors_clean.shape) # manual name replacements to connect the right entries index = biographies_clean[biographies_clean['name'] == 'ahmet kent'].index biographies_clean.at[index, 'name'] = 'muhtar kent' index = all_directors_rel[all_directors_rel['name'] == 'ahmet kent'].index all_directors_rel.at[index, 'name'] = 'muhtar kent' all_directors_rel.at[index, 'org_name'] = 'mr. muhtar kent' # write all relevant directors to a csv file all_directors_rel.to_csv('/content/drive/My Drive/director-csr/all_directors_rel.csv') # this many unique directors are contained in my dataset print(all_directors_rel['unique_dir_id'].nunique()) # get only the unique names names_directors = all_directors_rel[['unique_dir_id']].copy() # show any duplicate biography entries in the biographies_clean dataframe dupe_bios_df = biographies_clean[biographies_clean.duplicated(subset='biographies', keep=False)].copy() print(dupe_bios_df.shape) dupe_bios_df.head() # check whether there are any instances where the bio is the same but the name is different unique_names_df = dupe_bios_df.groupby('unique_bio_id')['name'].apply(lambda x: x.unique()).reset_index() unique_names_df['multiple_names'] = unique_names_df['name'].apply(lambda x: 'yes' if len(x) > 1 else 'no') issue_df = unique_names_df[unique_names_df['multiple_names'] == 'yes'] print(issue_df.shape) ``` I will keep these duplicates and include them when matching the directors with the biographies. I can then later remove any duplicates. ## Biography matching Unfortunately, the biography and director data does not match well. I can only use the names of the directors to merge the two datasets. But the way the names are shown differs greatly, something including middle names or nick names and sometimes not and differing between the two dataset for the same person. Therefore, I will use several steps to merge the datasets together. After each step, I will do a rough manual scan whether this method works and then move the correctly matched pairs to a separate dataframe. With the rest, I will do another merging step and then repeat this process multiple times. I hope this method will save me some time that I would otherwise have to invest in gathering ~4000 bios manually from DEF 14As. ``` def review_and_match(df_dir, df_bio, merge_on, sanity='ticker'): ''' Function to run some checks and then add potentially correct matches to a final dataframe Args: df_dir = df df_bio = df merge_on = list Returns: df_matched = df df_review = df ''' # merge both dataframes on specified columns df_step = pd.merge(df_dir, df_bio, how='inner', on=merge_on, suffixes=['_dir', '_bio']) # assign column signifying how this director bio match was created df_step['_'.join(merge_on)+'_match'] = 1 # check whether this list contains any duplicates df_review = df_step[df_step.duplicated(subset=merge_on, keep=False)].copy() # do the sanity check based on the entered value if sanity == 'ticker': df_step['ticker_match'] = df_step.apply(lambda x: int(x['ticker_dir'] in x['all_tickers']), axis=1) # add ticker issues to df_review df_review = df_step[df_step['ticker_match'] == 0].copy() elif sanity == 'age': df_step['age_dir'] = df_step['age_dir'].apply(lambda x: 0 if pd.isna(x) else x) df_step['age_bio'] = df_step['age_bio'].apply(lambda x: 0 if x == '-' else x) df_step['age_match'] = df_step.apply(lambda x: 1 if abs(x['age_dir'] - x['age_bio']) <= 5 else 0, axis=1) # add ticker issues to df_review df_review = df_step[df_step['age_match'] == 0].copy() # remove the duplicates from the merged dataframe indices = df_review['unique_dir_id'] df_step = df_step[~df_step['unique_dir_id'].isin(indices)] # define the dataframe with all the probably correct matches df_matched = df_step.copy() return df_matched, df_review ``` ### Merging on name and isin I will check the results with the tickers and review any duplicates of last names and isins. ``` # match both datasets on name and isin df_matched_name_isin, df_review_name_isin = review_and_match(all_directors_rel, biographies_clean, ['name', 'isin'], sanity='ticker') # this many matches were generated print(df_matched_name_isin.shape) # this many samples need to be reviewed print(df_review_name_isin.shape) ``` ### Merging on last_name and isin ``` # match both datasets on last_name and isin df_matched_last_name_isin, df_review_last_name_isin = review_and_match(all_directors_rel, biographies_clean, ['last_name', 'isin'], sanity='ticker') # this many matches were generated print(df_matched_last_name_isin.shape) # this many samples need to be reviewed print(df_review_last_name_isin.shape) ``` ### Merging on last_name and ticker ``` # match both datasets on last_name and isin df_matched_last_name_ticker, df_review_last_name_ticker = review_and_match(all_directors_rel, biographies_clean, ['last_name', 'ticker'], sanity=None) # this many matches were generated print(df_matched_last_name_ticker.shape) # this many samples need to be reviewed print(df_review_last_name_ticker.shape) ``` ### Merging on name and do sanity check with ticker ``` # match both datasets on name and ticker df_matched_name, df_review_name = review_and_match(all_directors_rel, biographies_clean, ['name'], sanity='ticker') # this many matches were generated print(df_matched_name.shape) # this many samples need to be reviewed print(df_review_name.shape) ``` ### Merging on name with age sanity check ``` # match both datasets on name and do age sanity check df_matched_name_age, df_review_name_age = review_and_match(all_directors_rel, biographies_clean, ['name'], sanity='age') # this many matches were generated print(df_matched_name_age.shape) # this many samples need to be reviewed print(df_review_name_age.shape) ``` ### Merging only on name I will not add the results of this merger to the other dataframes, but rather, I will check which ones are already included in the matched and review dataframes and the remaining ones I will add to the review dataframe. ``` # match both datasets on name and do age sanity check df_matched_name_none, df_review_name_none = review_and_match(all_directors_rel, biographies_clean, ['name'], sanity=None) # this many matches were generated print(df_matched_name_none.shape) # this many samples need to be reviewed print(df_review_name_none.shape) ``` ### Putting the individual matching dataframes together ``` # drop the columns that are not in every dataframe same_cols = list(set(df_matched_name_isin) & set(df_matched_last_name_isin) & set(df_matched_name) & set(df_matched_name_age) & set(df_matched_last_name_ticker)) df_matched_name_isin = df_matched_name_isin[same_cols].copy() df_matched_last_name_isin = df_matched_last_name_isin[same_cols].copy() df_matched_name = df_matched_name[same_cols].copy() df_matched_name_age = df_matched_name_age[same_cols].copy() df_matched_last_name_ticker = df_matched_last_name_ticker[same_cols].copy() df_matched_name_none = df_matched_name_none[same_cols].copy() same_cols = list(set(df_review_name_isin) & set(df_review_last_name_isin) & set(df_review_name) & set(df_review_name_age) & set(df_review_last_name_ticker)) df_review_name_isin = df_review_name_isin[same_cols].copy() df_review_last_name_isin = df_review_last_name_isin[same_cols].copy() df_review_name = df_review_name[same_cols].copy() df_review_name_age = df_review_name_age[same_cols].copy() df_review_last_name_ticker = df_review_last_name_ticker[same_cols].copy() df_review_name_none = df_review_name_none[same_cols].copy() # append both matched data frames df_matched_all = df_matched_name.append([df_matched_last_name_isin, df_matched_name_isin, df_matched_name_age, df_matched_last_name_ticker]) df_matched_all.drop_duplicates(subset=['unique_dir_id', 'unique_bio_id'], inplace=True) print(df_matched_all.shape) ``` I will check which ones of the df_matched_name_none dataframe samples are included in the df_matched_all dataframe ``` # which of the samles matched only on name are already included in the all matched dataframe indices = df_matched_all['unique_dir_id'] df_manual_review = df_matched_name_none[~df_matched_name_none['unique_dir_id'].isin(indices)] # add the entries not in the df_matched_all dataframe to the review dataframe df_review_name_none = df_review_name_none.append(df_manual_review) df_review_name_none.drop_duplicates(subset=['unique_dir_id', 'unique_bio_id'], inplace=True) df_review_name_none.shape # append all review dataframes df_review_all = df_review_name_isin.append([df_review_last_name_isin, df_review_name, df_review_name_age, df_review_last_name_ticker, df_review_name_none]) df_review_all.drop_duplicates(subset=['unique_dir_id', 'unique_bio_id'], inplace=True) # remove any entries from the review data set that are already in the matched dataset indices = df_matched_all['unique_dir_id'] df_review_all = df_review_all[~df_review_all['unique_dir_id'].isin(indices)] df_review_all.shape ``` ### Manually reviewing the flagged matches in the review dataframe ``` # sort the dataframe by original director name df_review_all.sort_values(by=['org_name_dir'], inplace=True) # add ticker, isin, last_name, and name columns again df_review_merged = pd.merge(all_directors_rel[['name', 'ticker', 'last_name', 'isin', 'unique_dir_id']], df_review_all, how='right', on='unique_dir_id', suffixes=['_dir', '_review']) df_review_merged = pd.merge(biographies_clean[['name', 'ticker', 'last_name', 'isin', 'unique_bio_id']], df_review_merged, how='right', on='unique_bio_id', suffixes=['_bio', '_review']) # rearrange the columns new_order = ['name_review', 'last_name_review', 'org_name_dir', 'comp_name_dir', 'ticker_review', 'org_name_bio', 'comp_name_bio', 'prim_comp', 'age_bio', 'age_dir','biographies', 'unique_dir_id', 'unique_bio_id', 'first_name', 'nick_name', 'middle_name', 'last_name_bio'] rest_cols = [x for x in df_review_merged.columns if x not in new_order] all_cols = new_order + rest_cols df_review_merged = df_review_merged[all_cols] df_review_all.shape # write this dataframe to excel file for manual review df_review_merged.to_excel('/content/drive/My Drive/director-csr/dir_bio_manual_review.xlsx', sheet_name='review') ``` I started to review these 615 directors, however, most of them were not a match. Therefore, I will continue my manual research of the biographies. ## Writing the basic matching on name column to excel ``` # check the old uncleaned biographies for name biographies_old[biographies_old['name'].str.contains('schwarzman')] # check the cleaned biographies for name biographies_old_clean[biographies_old_clean['name'] == 'wayne daley'] # merge both dataframes on the name column dir_bio_df = pd.merge(all_directors_rel, biographies_old_clean, how='left', on='name') print(dir_bio_df['biographies'].isnull().sum(), 'are entries without biographies') print(dir_bio_df.shape) ``` It is important to note that these 4867 directors are not necessarily really unique. In the case of director 'thomas brown', there are three different people with this name and different biographies. However, because I cannot (easily) distinguish which one of these really belongs to a specific company, all three people will be added per company and I need to manually determine which ones of these is the correct biography. I will do this during the process of reviewing DEF 14As for missing biographies and csr committees. ``` # check how many directors (incl. duplicates) are sitting on the company boards unique_bios = dir_bio_df.drop_duplicates(subset=['org_name_x', 'comp_name_x']) unique_bios.shape # number of unique biographies which are available unique_bios = unique_bios.drop_duplicates(subset='biographies') unique_bios.shape ``` This sample includes two companies which are not relevant for my investigation because they were only part of the S&P 500 in 2010. Therefore, I will remove them just to see how many biographies relate to all relevant companies. But I will keep them in the dir_bio_df dataframe in order not to alter the code that is based on this dataframe in other notebooks. The irrelevant companies are itt and mdp (those are their tickers). ``` # check how many biographies are included in the S&P Capital IQ dataset without # the irrelevant companies included unique_bios[~unique_bios['ticker_x'].isin(['itt', 'mdp'])].shape # remove any duplicate entries where the name, biography and comp_name are the same dir_bio_df = dir_bio_df[~dir_bio_df.duplicated(subset=['name', 'biographies', 'comp_name_x'])] dir_bio_df.shape dir_bio_df.head() # write the dir_bio_df dataframe to an excel file dir_bio_df.to_excel('/content/drive/My Drive/director-csr/director_bios_all.xlsx', sheet_name='bios') ```
github_jupyter
## Data Mining and Machine Learning ### Logistic Regression: The ROC curve ### Libraries:scikit-learn and h2o #### Edgar Acuna ``` import numpy as np import pandas as pd from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import roc_auc_score from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import classification_report from sklearn.metrics import roc_curve, auc import matplotlib.pyplot as plt import h2o from h2o.estimators.glm import H2OGeneralizedLinearEstimator h2o.connect() h2o.no_progress() ``` ### I Regresion Logistica para Diabetes usando scikit learn ``` url= "http://academic.uprm.edu/eacuna/diabetes.dat" names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class'] data = pd.read_table(url, names=names,header=None) #La variable de respuesta y debe ser binaria (0,1) y=data['class']-1 X=data.iloc[:,0:8] #Haciendo la regresion logistica ya calculando su precision model = LogisticRegression() model = model.fit(X, y) print model.coef_ # Tasa de precision model.score(X, y) predictions = model.predict(X) print(classification_report(y, predictions)) ``` ### II. ROC curve using scikit-learn ``` #Hallando las probabilidades posteriores probs = model.predict_proba(X) preds = probs[:,1] false_positive_rate, true_positive_rate, thresholds = roc_curve(y, preds) roc_auc = auc(false_positive_rate, true_positive_rate) plt.title('The ROC curve') plt.plot(false_positive_rate, true_positive_rate, 'b', label='AUC = %0.2f'% roc_auc) plt.legend(loc='lower right') plt.plot([0,1],[0,1],'r--') plt.xlim([0,1.0]) plt.ylim([0,1.0]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show() ``` The AUC value represents the area under the curve ROC (azul). Ia classifier has an AUC between .9 and 1 then its predictions are very good, if the AUC lies between .8 y .89 its prediction are good. A poor classifier is one with an AUC less than de .60 de AUC. ### III Intersection of the sensitivity and specifity curves to choose the threshold ``` plt.title('Choice of the optimal Threshold') plt.plot(thresholds, true_positive_rate, 'b',label='Sensitivity') plt.legend(loc='lower right') plt.plot(thresholds, 1-false_positive_rate,'r--') plt.xlim([0,1.0]) plt.ylim([0,1.0]) plt.ylabel('Sensitivity ') plt.xlabel('Probability') plt.show() ``` El threshold que deberia ser usado en lugar de p=.5 para hacer la clasificacion sera aprox .35 ### IV. ROC curve using H20 ``` diabetes = h2o.import_file("https://academic.uprm.edu/eacuna/diabetes.dat") myx=['C1','C2','C3','C4','C5','C6','C7','C8'] diabetes['C9']=diabetes['C9'].asfactor() myy='C9' glm_model = H2OGeneralizedLinearEstimator(family= "binomial", lambda_ = 0, compute_p_values = True) glm_model.train(myx, myy, training_frame= diabetes) glm_model glm_model._model_json['output']['coefficients_table'] perf = glm_model.model_performance() #train=True is the default, so it's not needed perf.plot() #Effect after using the threshokd #Number of instances assigned to class 1 using p=.5 dp=data[preds>.5] dp['class'].value_counts() #Accuracy with p=.5 595*100/768.0 #Number of instances assigned to class 1 using p=.35 dp1=data[preds>.35] dp1['class'].value_counts() #New accuracy 591*100/768.0 ```
github_jupyter
# Matplotlib 各类图示 注: 本文内容来自于 https://matplotlib.org/gallery/index.html ## 折线图 ### 折线图 - xy 生成折线图 ``` import matplotlib.pyplot as plt import numpy as np import random t=[1,2,3,4,5] s = [i + i*random.randint(10,31) for i in t] fig, ax = plt.subplots() ax.plot(t, s) ax.set(xlabel='time (s)', ylabel='voltage (mV)', title='About as simple as it gets, folks') ax.grid() plt.show() ``` ### 弯曲的线段 - 数量更多的点看起来弯曲 ``` import matplotlib.pyplot as plt import numpy as np t = np.arange(0.0, 2.0, 0.01) s = 1 + np.sin(2 * np.pi * t) fig, ax = plt.subplots() ax.plot(t, s) ax.set(xlabel='time (s)', ylabel='voltage (mV)', title='About as simple as it gets, folks') ax.grid() plt.show() ``` ### 虚线 ``` import numpy as np import matplotlib.pyplot as plt x = np.linspace(0, 10, 500) y = np.sin(x) fig, ax = plt.subplots() # Using set_dashes() to modify dashing of an existing line line1, = ax.plot(x, y, label='Using set_dashes()') line1.set_dashes([2, 2, 10, 2]) # 2pt line, 2pt break, 10pt line, 2pt break ax.legend() plt.show() ``` ### 覆盖线条 ``` import matplotlib.pyplot as plt import matplotlib as mpl import datetime import matplotlib.dates as mdate import numpy as np import random DAYS = 365 def initdata(): timedelta = datetime.timedelta(days = 1) startdate = datetime.date.today() xdate = [startdate+i*timedelta for i in range(DAYS)] ycapital = [3000] for _ in range(DAYS-1): ycapital.append(ycapital[-1]+random.uniform(-1, 1.1)) return xdate, ycapital xdate, ydata = initdata() xlims = mdate.date2num([xdate[0], xdate[-1]]) # 设置值,控制显示颜色深浅 # 第二个参数 yv 是每行值相同,逐行增加,所以设定了水平方向渐变 _, yv = np.meshgrid(np.linspace(0,1,210), np.linspace(0,1,90)) fig, ax = plt.subplots(figsize=(21,9)) ax.plot(xdate, ydata, 'r-', label = 'Test area line', linewidth=2) # zv: array-like or PIL image, # origin:Place the [0,0] index of the array in the upper left or lower left corner of the axes #ax.imshow(yv, cmap=mpl.cm.Blues) extent = [xlims[0], xlims[1], min(ydata), max(ydata)] ax.imshow(yv, cmap=mpl.cm.Blues, origin='lower',alpha = 0.5, aspect = 'auto', extent = extent) ax.fill_between(xdate, ydata, max(ydata), color='white') # 设置刻度值颜色 plt.yticks(color = 'red') # 设置 y 轴百分比显示,注意将 y 轴数据乘以 100 #ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.2f%%')) # 颜色,旋转刻度 plt.xticks(color = 'gray',rotation = 15) # 指定字体,大小,颜色 fontdict = {"family":"Times New Roman", 'size':12, 'color':'b'} #Times New Roman, Arial plt.title("random account value", fontdict = fontdict) plt.xlabel("date(day)", fontdict = fontdict) plt.ylabel("account value", fontdict = fontdict) # 去掉边框 top left right bottom ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(False) ax.spines['right'].set_visible(False) # 时间显示样式 ax.xaxis.set_major_formatter(mdate.DateFormatter('%Y-%m-%d')) #设置时间刻度间隔 timedelta = (xdate[-1] - xdate[0]) / 10 # 这种方式不能保证显示最后一个日期 plt.xticks(mdate.drange(xdate[0], xdate[-1], timedelta)) # 分成 10 份 delta = round(len(xdate) / 9) plt.xticks([xdate[i*delta] for i in range(9)] + [xdate[-1]]) plt.tick_params(left = 'off') # 设置刻度的朝向,宽,长度 plt.tick_params(which = 'major', direction = 'out', width = 0.2, length = 5) # in, out or inout # 设置刻度显示在哪个方向上 #tick_params(labeltop='on',labelbottom='off',labelleft='off',labelright='off') # 设置 y 轴方向的网络线 plt.grid(axis = 'y', color = 'lightgray', linestyle = '-', linewidth = 0.5) plt.show() ``` ## 柱状图 ``` import matplotlib import matplotlib.pyplot as plt import numpy as np labels = ['G1', 'G2', 'G3', 'G4', 'G5'] A = [20, 34, 30, 35, 27] B = [31, 32, 25, 30, 33] x = np.arange(len(labels)) # the label locations width = 0.35 # the width of the bars fig, ax = plt.subplots() rects1 = ax.bar(x - width/2, A, width, label='Men') rects2 = ax.bar(x + width/2, B, width, label='Men') ax.set_ylabel('Scores') ax.set_title('Scores by group and gender') ax.set_xticks(x) ax.set_xticklabels(labels) ax.legend() def autolabel(rects): """Attach a text label above each bar in *rects*, displaying its height.""" for rect in rects: height = rect.get_height() ax.annotate('{}'.format(height), xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(0, 3), # 3 points vertical offset textcoords="offset points", ha='center', va='bottom') autolabel(rects1) autolabel(rects2) fig.tight_layout() plt.show() ``` ## 饼图 ``` import matplotlib.pyplot as plt # Pie chart, where the slices will be ordered and plotted counter-clockwise: labels = 'Frogs', 'Hogs', 'Dogs', 'Logs' sizes = [15, 30, 45, 10] explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs') fig1, ax1 = plt.subplots() ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.show() ``` ## 散点图 ``` import numpy as np import matplotlib.pyplot as plt import matplotlib.cbook as cbook # Load a numpy record array from yahoo csv data with fields date, open, close, # volume, adj_close from the mpl-data/example directory. The record array # stores the date as an np.datetime64 with a day unit ('D') in the date column. with cbook.get_sample_data('goog.npz') as datafile: price_data = np.load(datafile)['price_data'].view(np.recarray) price_data = price_data[-250:] # get the most recent 250 trading days delta1 = np.diff(price_data.adj_close) / price_data.adj_close[:-1] # Marker size in units of points^2 volume = (15 * price_data.volume[:-2] / price_data.volume[0])**2 close = 0.003 * price_data.close[:-2] / 0.003 * price_data.open[:-2] fig, ax = plt.subplots() ax.scatter(delta1[:-1], delta1[1:], c=close, s=volume, alpha=0.5) ax.set_xlabel(r'$\Delta_i$', fontsize=15) ax.set_ylabel(r'$\Delta_{i+1}$', fontsize=15) ax.set_title('Volume and percent change') ax.grid(True) fig.tight_layout() plt.show() ``` ## 子图 ``` import numpy as np import matplotlib.pyplot as plt # Data for plotting t = np.arange(0.01, 20.0, 0.01) # Create figure fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2) # log y axis ax1.semilogy(t, np.exp(-t / 5.0)) ax1.set(title='semilogy') ax1.grid() # log x axis ax2.semilogx(t, np.sin(2 * np.pi * t)) ax2.set(title='semilogx') ax2.grid() # log x and y axis ax3.loglog(t, 20 * np.exp(-t / 10.0), basex=2) ax3.set(title='loglog base 2 on x') ax3.grid() # With errorbars: clip non-positive values # Use new data for plotting x = 10.0**np.linspace(0.0, 2.0, 20) y = x**2.0 ax4.set_xscale("log", nonposx='clip') ax4.set_yscale("log", nonposy='clip') ax4.set(title='Errorbars go negative') ax4.errorbar(x, y, xerr=0.1 * x, yerr=5.0 + 0.75 * y) # ylim must be set after errorbar to allow errorbar to autoscale limits ax4.set_ylim(bottom=0.1) fig.tight_layout() plt.show() ``` ## 文字 ``` fig, ax = plt.subplots(figsize=(3, 3)) t = np.arange(0.0, 5.0, 0.01) s = np.cos(2*np.pi*t) # Plot a line and add some simple annotations line, = ax.plot(t, s) ax.annotate('figure pixels', xy=(10, 10), xycoords='figure pixels') ax.annotate('figure points', xy=(80, 80), xycoords='figure points') ax.annotate('figure fraction', xy=(.025, .975), xycoords='figure fraction', horizontalalignment='left', verticalalignment='top', fontsize=20) # The following examples show off how these arrows are drawn. ax.annotate('point offset from data', xy=(2, 1), xycoords='data', xytext=(-15, 25), textcoords='offset points', arrowprops=dict(facecolor='black', shrink=0.05), horizontalalignment='right', verticalalignment='bottom') ax.annotate('axes fraction', xy=(3, 1), xycoords='data', xytext=(0.8, 0.95), textcoords='axes fraction', arrowprops=dict(facecolor='black', shrink=0.05), horizontalalignment='right', verticalalignment='top') # You may also use negative points or pixels to specify from (right, top). # E.g., (-10, 10) is 10 points to the left of the right side of the axes and 10 # points above the bottom ax.annotate('pixel offset from axes fraction', xy=(1, 0), xycoords='axes fraction', xytext=(-20, 20), textcoords='offset pixels', horizontalalignment='right', verticalalignment='bottom') ax.set(xlim=(-1, 5), ylim=(-3, 5)) ```
github_jupyter
``` from matplotlib import pyplot as plt import numpy as np import pandas as pd import xarray as xr import gcsfs from scipy import signal # Experiments to process experiment_ids = ['historical', 'ssp370'] # Seasons to process seasons = ['all','DJF','JJA'] # Time slices (future) to process time_slices = ([['1991','2020'], ['2001','2030'], ['2011','2040'], ['2021','2050'], ['2031','2060'], ['2041','2070'], ['2051','2080'], ['2061','2090'], ['2071','2100']]) tresults_new=pd.read_csv('tresults.csv',index_col=0) tresults_new tresults_dict=tresults_new.to_dict() dt=np.zeros((len(time_slices),len(tresults_dict)))*np.nan modelcount=0 modelnames=[] for model in tresults_dict: modelnames.append(model) timecount=0 for timeperiod in tresults_dict[model]: dt[timecount,modelcount]=tresults_dict[model][timeperiod] timecount=timecount+1 modelcount=modelcount+1 dt plt.plot(dt) plt.title('temperature change for each model') plt.legend(modelnames) plt.savefig('../figures/temperature.png') dsdG_djf_new=pd.read_csv('dsdG_djf.csv',index_col=0) dsdG_djf_new dsdG_djf=dsdG_djf_new.to_dict() dsdG_djf_np=np.zeros((len(time_slices),len(dsdG_djf)))*np.nan modelcount=0 for model in tresults_dict: timecount=0 for timeperiod in dsdG_djf[model]: dsdG_djf_np[timecount,modelcount]=dsdG_djf[model][timeperiod] timecount=timecount+1 modelcount=modelcount+1 dsdG_djf_np plt.plot((dsdG_djf_np-1)*100) plt.title('DJF precip std change for each model') plt.legend(modelnames) plt.savefig('../figures/precip.png') plt.plot(dt,(dsdG_djf_np-1)*100) plt.title('DJF precip std change for each model') plt.legend(modelnames) plt.xlabel('Temperature change [K]') plt.ylabel('Precip change (%)') ``` ### make sure that any nan is matched everywhere ``` # make dt nan wherever any other field has a nan dt[np.where(np.isnan(dsdG_djf_np))]=np.nan #dt[np.where(np.isnan(dsdG_jja_etc_np))]=np.nan # than, make all fields nan where dt is nan. this should cover it. dsdG_djf_np[np.where(np.isnan(dt))]=np.nan #dsdG_jja_etc_np[np.where(np.isnan(dt))]=np.nan for i in range(10): print(modelnames[i]) print(dt[:,i]) print(dsdG_djf_np[:,i]) print np.nanmean(dt,axis=1) np.nanmean((dsdG_djf_np-1)*100,axis=1) plt.plot(np.nanmean(dt,axis=1),np.nanmean((dsdG_djf_np-1)*100,axis=1),marker='o') plt.title('DJF precip std change for each model') plt.xlabel('Temperature change [K]') plt.ylabel('Precip change (%)') plt.savefig('../figures/variabilitychange.png') ```
github_jupyter
## PyMC3 example ``` import numpy as np import pandas as pd import pymc3 as pm %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt import seaborn as sns sns.set() import sys print("Python version:", sys.version) print("Numpy version:", np.__version__) print("PyMC3 version:", pm.__version__) df = pd.read_csv("intervals.csv") df.head() ``` ![](reich_figure_1.png) Figure 1 from Reich et al. (2009), _Estimating incubation period distributions with coarse data_. ``` Tmin = np.array(np.maximum(df["SL"]-df["ER"], 0)) Tmax = np.array(df["SR"]-df["EL"]) plt.hist(Tmin); plt.hist(Tmax); xs = np.linspace(0, 75, 500) ys = np.zeros(len(xs)) nCases = len(Tmin) for i in range(nCases): ys[(xs >= Tmin[i]) & (xs <= Tmax[i])] += 1 / (nCases * (Tmax[i]-Tmin[i])) plt.plot(xs, ys); Tavgs = (Tmin + Tmax) / 2 %%time with pm.Model() as model: μ = pm.Uniform('μ', lower=-25, upper=25) σ = pm.Uniform('σ', lower=0, upper=25) T = pm.Lognormal('T', mu=μ, sigma=σ, observed=Tavgs) trace = pm.sample(10**4) pm.plot_trace(trace); pm.plot_posterior(trace); ``` This is assuming we have observations $\boldsymbol{T} = (T_1, \dots, T_n)$ which gives us a likelihood of $$ L(\mu, \sigma \mid \boldsymbol{T}) = \prod_{i=1}^n f(T_i; \mu, \sigma ) $$ where $f(x; \mu, \sigma)$ is the p.d.f. of the $\mathsf{LogNormal}(\mu, \sigma^2)$ distribution. However we don't have observations, just intervals. Say each unobserved period fell into $T_i \in [T_i^-, T_i^+]$. Our likelihood becomes $$ L(\mu, \sigma \mid \boldsymbol{T}^{\,-}, \boldsymbol{T}^{\,+}) = \prod_{i=1}^n \bigl[ F(T_i^+ ; \mu, \sigma) - F(T_i^-; \mu, \sigma) \bigr] $$ where $F(x; \mu, \sigma)$ is the c.d.f. of the $\mathsf{LogNormal}(\mu, \sigma^2)$ distribution. ``` import theano.tensor as tt # Taken from PyMC3's pymc3/distributions/dist_math.py file # starting at line 346. def zvalue(x, sigma, mu): """ Calculate the z-value for a normal distribution. """ return (x - mu) / sigma # Taken from PyMC3's pymc3/distributions/continuous.py file # starting at line 1849. def cdf(x, mu, sigma): """ Compute the log of the cumulative distribution function for Lognormal distribution at the specified value. Parameters ---------- x: numeric Value(s) for which log CDF is calculated. If the log CDF for multiple values are desired the values must be provided in a numpy array or theano tensor. Returns ------- TensorVariable """ z = zvalue(np.log(x), mu=mu, sigma=sigma) return tt.switch( tt.lt(z, -1.0), tt.erfcx(-z / tt.sqrt(2.)) / 2. * np.exp(-tt.sqr(z) / 2), tt.erfc(-z / tt.sqrt(2.)) / 2. ) ``` With `Potential` we have to add log-terms to the likelihood. So $$ \log \bigl[ L(\mu, \sigma \mid \boldsymbol{T}^{\,-}, \boldsymbol{T}^{\,+}) \bigr] = \sum_{i=1}^n \log \bigl[ F(T_i^+ ; \mu, \sigma) - F(T_i^-; \mu, \sigma) \bigr] \,. $$ ``` %%time with pm.Model() as model: μ = pm.Uniform('μ', lower=-25, upper=25) σ = pm.Uniform('σ', lower=0, upper=25) pm.Potential('T', tt.sum(tt.log( cdf(Tmax, μ, σ) - cdf(Tmin, μ, σ) ))) trace = pm.sample(10**5, step=pm.Metropolis()) pm.stats.ess(trace["μ"]), pm.stats.ess(trace["σ"]) pm.plot_trace(trace); pm.plot_posterior(trace); trace["μ"].mean() trace["σ"].mean() ```
github_jupyter
# Developing an AI application Going forward, AI algorithms will be incorporated into more and more everyday applications. For example, you might want to include an image classifier in a smart phone app. To do this, you'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications. In this project, you'll train an image classifier to recognize different species of flowers. You can imagine using something like this in a phone app that tells you the name of the flower your camera is looking at. In practice you'd train this classifier, then export it for use in your application. We'll be using [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, you can see a few examples below. <img src='assets/Flowers.png' width=500px> The project is broken down into multiple steps: * Load and preprocess the image dataset * Train the image classifier on your dataset * Use the trained classifier to predict image content We'll lead you through each part which you'll implement in Python. When you've completed this project, you'll have an application that can be trained on any set of labeled images. Here your network will be learning about flowers and end up as a command line application. But, what you do with your new skills depends on your imagination and effort in building a dataset. For example, imagine an app where you take a picture of a car, it tells you what the make and model is, then looks up information about it. Go build your own dataset and make something new. First up is importing the packages you'll need. It's good practice to keep all the imports at the beginning of your code. As you work through this notebook and find you need to import a package, make sure to add the import up here. # Acknowledgement ### **Special thanks** to *Ioannis Breier*, a super mentor and *Juan Delgado*, a phenomenal teacher ### It would not have been possible to complete this course and this final project without your guidance and assistance. Thanks you for the privilege to learn from you, you are simply wonderful !!! # Start from the next cell below for classifier training Override default architecture name, learning rate, dropout rate, epochs in the cell *"set args"* Training in CPU mode is possible but not practical, please run training in GPU mode *tip : if utilizing Udacity lab environment, save all your changes prior clicking GPU "Enable/Disble" button to avoid loss of unsaved changes* ### Start from the cell "Workspace Init" if you want to start with prediction using a saved checkpoint ``` # Imports here %matplotlib inline %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt import torch import numpy as np from torch import nn from torch import optim import torch.nn.functional as F from torchvision import datasets, transforms, models from PIL import Image from datetime import datetime import json import os import glob from workspace_utils import active_session ``` ## Load the data Here you'll use `torchvision` to load the data ([documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html)). The data should be included alongside this notebook, otherwise you can [download it here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts, training, validation, and testing. For the training, you'll want to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. You'll also need to make sure the input data is resized to 224x224 pixels as required by the pre-trained networks. The validation and testing sets are used to measure the model's performance on data it hasn't seen yet. For this you don't want any scaling or rotation transformations, but you'll need to resize then crop the images to the appropriate size. The pre-trained networks you'll use were trained on the ImageNet dataset where each color channel was normalized separately. For all three sets you'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1. ``` data_dir = 'flowers' train_dir = data_dir + '/train' valid_dir = data_dir + '/valid' test_dir = data_dir + '/test' savedir = 'chksav' # TODO: Define your transforms for the training, validation, and testing sets norm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) data_transforms = {'train': transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), norm]), 'valid': transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), norm]), 'test': transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), norm]) } # TODO: Load the datasets with ImageFolder image_datasets = {'train': datasets.ImageFolder(train_dir, transform=data_transforms['train']), 'valid': datasets.ImageFolder(valid_dir, transform=data_transforms['valid']), 'test': datasets.ImageFolder(test_dir, transform=data_transforms['test'])} # TODO: Using the image datasets and the trainforms, define the dataloaders dataloaders = {'train': torch.utils.data.DataLoader(image_datasets['train'], batch_size=64, shuffle=True), 'valid': torch.utils.data.DataLoader(image_datasets['valid'], batch_size=64, shuffle=True), 'test': torch.utils.data.DataLoader(image_datasets['test'], batch_size=64, shuffle=True)} ``` ### Label mapping You'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers. ``` import json with open('cat_to_name.json', 'r') as f: cat_to_name = json.load(f) ``` # Building and training the classifier Now that the data is ready, it's time to build and train the classifier. As usual, you should use one of the pretrained models from `torchvision.models` to get the image features. Build and train a new feed-forward classifier using those features. We're going to leave this part up to you. If you want to talk through it with someone, chat with your fellow students! You can also ask questions on the forums or join the instructors in office hours. Refer to [the rubric](https://review.udacity.com/#!/rubrics/1663/view) for guidance on successfully completing this section. Things you'll need to do: * Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html) (If you need a starting point, the VGG networks work great and are straightforward to use) * Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout * Train the classifier layers using backpropagation using the pre-trained network to get the features * Track the loss and accuracy on the validation set to determine the best hyperparameters We've left a cell open for you below, but use as many as you need. Our advice is to break the problem up into smaller parts you can run separately. Check that each part is doing what you expect, then move on to the next. You'll likely find that as you work through each part, you'll need to go back and modify your previous code. This is totally normal! When training make sure you're updating only the weights of the feed-forward network. You should be able to get the validation accuracy above 70% if you build everything right. Make sure to try different hyperparameters (learning rate, units in the classifier, epochs, etc) to find the best model. Save those hyperparameters to use as default values in the next part of the project. ``` device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') device device.type # set up an arg object to store my stuff class myArgs(dict): pass args = myArgs() ``` ## set args set your desired architecture, learning rate, drop out rate, epochs, interval for printing results during training ``` # set args attributes args.z_arch = 'densenet121' args.z_hid = None args.z_lrate = 0.001 args.z_dpout = 0.3 args.z_epochs = 10 args.z_print = 40 # list all my z_ attributes in args #print('Args list:\n',[i for i in dir(args) if i.startswith('z_')]) # TODO: Build and train your network def build_classifier(model, args): # Freeze parameters so we don't backprop through them for param in model.parameters(): param.requires_grad = False in_size = { 'densenet121': 1024, 'densenet161': 2208, 'vgg16': 25088, } hid_size = { 'densenet121': [500], 'densenet161': [1000, 500], 'vgg16': [4096, 4096,1000], } if args.z_dpout: p = args.z_dpout else: p = 0.5 output_size = len(dataloaders['train'].dataset.classes) relu = nn.ReLU() dropout = nn.Dropout(p) output = nn.LogSoftmax(dim=1) if args.z_hid: h_list = args.z_hid.split(',') h_list = list(map(int, h_list)) # convert list from string to int else: h_list = hid_size[args.z_arch] h_layers = [nn.Linear(in_size[args.z_arch], h_list[0])] h_layers.append(relu) if args.z_arch[:3] == 'vgg': h_layers.append(dropout) if len(h_list) > 1: h_sz = zip(h_list[:-1], h_list[1:]) for h1,h2 in h_sz: h_layers.append(nn.Linear(h1, h2)) h_layers.append(relu) if args.z_arch[:3] == 'vgg': h_layers.append(dropout) last = nn.Linear(h_list[-1], output_size) h_layers.append(last) h_layers.append(output) print(h_layers) model.classifier = nn.Sequential(*h_layers) return model # Build a classifier # load a pre-trained model and override with own classifier model = models.__dict__[args.z_arch](pretrained=True) model = build_classifier(model, args) print('\nmodel architecture:', args.z_arch, '\n') model.classifier def validate(model, dataloaders, criterion): valid_loss = 0 accuracy = 0 for images, labels in iter(dataloaders['valid']): images, labels = images.to(device), labels.to(device) output = model.forward(images) valid_loss += criterion(output, labels).item() ps = torch.exp(output) equality = (labels.data == ps.max(dim=1)[1]) accuracy += equality.type(torch.FloatTensor).mean() return valid_loss, accuracy # training def train(model, dataloaders, optimizer, criterion, epochs=2, print_freq=20, lr=0.001): if torch.cuda.is_available(): print('*** training classifier in GPU mode ...\n') else: print('*** training classifier in CPU mode ...\n') model.to(device) start_time = datetime.now() print('epochs:', epochs, ', print_freq:', print_freq, ', lr:', lr, '\n') steps = 0 for e in range(epochs): model.train() running_loss = 0 for images, labels in iter(dataloaders['train']): steps +=1 images, labels = images.to(device), labels.to(device) optimizer.zero_grad() output = model.forward(images) loss = criterion(output, labels) loss.backward() optimizer.step() running_loss += loss.item() if steps % print_freq == 0: model.eval() with torch.no_grad(): valid_loss, accuracy = validate(model, dataloaders, criterion) print('Epoch: {}/{}..'.format(e+1, epochs), 'Training Loss: {:.3f}..'.format(running_loss/print_freq), 'Validation Loss: {:.3f}..'.format(valid_loss/len(dataloaders['valid'])), 'Validation Accuracy: {:.3f}%'.format(accuracy/len(dataloaders['valid']) * 100) ) running_loss = 0 model.train() elapsed = datetime.now() - start_time print('\n*** classifier training done ! \nElapsed time[hh:mm:ss.ms]: {}'.format(elapsed)) return model # start training model criterion = nn.NLLLoss() optimizer = optim.Adam(model.classifier.parameters(), lr=args.z_lrate) with active_session(): model = train(model, dataloaders, optimizer, criterion, args.z_epochs, args.z_print, args.z_lrate) ``` ## Testing your network It's good practice to test your trained network on test data, images the network has never seen either in training or validation. This will give you a good estimate for the model's performance on completely new images. Run the test images through the network and measure the accuracy, the same way you did validation. You should be able to reach around 70% accuracy on the test set if the model has been trained well. ``` # TODO: Do validation on the test set def test(model, dataloaders, criterion): print('*** validating testset ...\n') model.cpu() model.eval() test_loss = 0 total = 0 match = 0 start_time = datetime.now() with torch.no_grad(): for images, labels in iter(dataloaders['test']): model, images, labels = model.to(device), images.to(device), labels.to(device) output = model.forward(images) test_loss += criterion(output, labels).item() total += images.shape[0] equality = labels.data == torch.max(output, 1)[1] match += equality.sum().item() model.test_accuracy = match/total * 100 print('Test Loss: {:.3f}'.format(test_loss/len(dataloaders['test'])), 'Test Accuracy: {:.2f}%'.format(model.test_accuracy)) elapsed = datetime.now() - start_time print('\n*** test validation done ! \nElapsed time[hh:mm:ss.ms]: {}'.format(elapsed)) with active_session(): test(model, dataloaders, criterion) ``` ## Save the checkpoint Now that your network is trained, save the model so you can load it later for making predictions. You probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. You can attach this to the model as an attribute which makes inference easier later on. ```model.class_to_idx = image_datasets['train'].class_to_idx``` Remember that you'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If you want to load the model and keep training, you'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. You'll likely want to use this trained model in the next part of the project, so best to save it now. ``` # TODO: Save the checkpoint model = model.cpu() # back to CPU mode post training model.class_to_idx = dataloaders['train'].dataset.class_to_idx checkpoint = {'arch': args.z_arch, 'classifier': model.classifier, 'epochs': args.z_epochs, 'dropout': args.z_dpout, 'lrate': args.z_lrate, 'train_sz': dataloaders['train'].batch_size, 'valid_sz': dataloaders['valid'].batch_size, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'class_to_idx' : model.class_to_idx } if not os.path.isdir(savedir): os.makedirs(savedir) chkpt = datetime.now().strftime('%Y%m%d_%H%M%S') + '_' + args.z_arch + '.pth' checkpt = os.path.join(savedir, chkpt) torch.save(checkpoint, checkpt) print('*** checkpoint: ', chkpt, ', saved to: ', os.path.dirname(checkpt)) ``` ## Workspace Init If there has been a switch from a GPU to CPU workspace on Udacity lab platform or if you start with prediction using a saved checkpoint without executing cells from the top to run training first, then ### start from the next cell below to run prediction with a saved checkpoint ``` # Initialize workspace try: args except NameError: # re-import required packages %matplotlib inline %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt import torch import numpy as np from torch import nn from torch import optim import torch.nn.functional as F from torchvision import datasets, transforms, models from PIL import Image from datetime import datetime import json import os import glob from workspace_utils import active_session # set up required file directories data_dir = 'flowers' train_dir = data_dir + '/train' valid_dir = data_dir + '/valid' test_dir = data_dir + '/test' savedir = 'chksav' # re-define your transforms for the training, validation, and testing sets normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) data_transforms = {'train': transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]), 'valid': transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]), 'test': transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]) } image_datasets = {'train': datasets.ImageFolder(train_dir, transform=data_transforms['train']), 'valid': datasets.ImageFolder(valid_dir, transform=data_transforms['valid']), 'test': datasets.ImageFolder(test_dir, transform=data_transforms['test'])} dataloaders = {'train': torch.utils.data.DataLoader(image_datasets['train'], batch_size=64, shuffle=True), 'valid': torch.utils.data.DataLoader(image_datasets['valid'], batch_size=64, shuffle=True), 'test': torch.utils.data.DataLoader(image_datasets['test'], batch_size=64, shuffle=True)} # re-load json class name mapper with open('cat_to_name.json', 'r') as f: cat_to_name = json.load(f) # set device device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print('device type:', device.type) # Re-instantiate args after exiting GPU mode class myArgs(dict): pass print('re-instantiate args...') args = myArgs() # get the last saved checkpoint if len(glob.glob(savedir+'/*.pth')) > 0 : checkpt = max(glob.glob(savedir+'/*.pth'), key=os.path.getctime) print('checkpoint:', checkpt, ' successfully loaded') else: checkpt = None print('\n*** no saved checkpoint to load !!!\n') ``` ## Loading the checkpoint At this point it's good to write a function that can load a checkpoint and rebuild the model. That way you can come back to this project and keep working on it without having to retrain the network. ``` # TODO: Write a function that loads a checkpoint and rebuilds the model def load_checkpoint(filepath): # if no longer in GPU, force all tensors to be on CPU if device.type == 'cpu': checkpoint = torch.load(filepath, map_location=lambda storage, loc: storage) else: checkpoint = torch.load(filepath) model = models.__dict__[checkpoint['arch']](pretrained=True) model.classifier = checkpoint['classifier'] model.class_to_idx = checkpoint['class_to_idx'] model.load_state_dict(checkpoint['state_dict']) args.z_arch = checkpoint['arch'] return model, args # load checkpoint if checkpt: model, args = load_checkpoint(checkpt) # check results print('model architecture:', args.z_arch, '\n') print('model.classifier:\n', model.classifier) else: print('\n*** stop !!! no saved checkpoint to load \n') # list all my z_ attributes in args #print('Args list:\n',[i for i in dir(args) if i.startswith('z_')]) ``` # Inference for classification Now you'll write a function to use a trained network for inference. That is, you'll pass an image into the network and predict the class of the flower in the image. Write a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like ```python probs, classes = predict(image_path, model) print(probs) print(classes) > [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339] > ['70', '3', '45', '62', '55'] ``` First you'll need to handle processing the input image such that it can be used in your network. ## Image Preprocessing You'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training. First, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) methods. Then you'll need to crop out the center 224x224 portion of the image. Color channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. You'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`. As before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. You'll want to subtract the means from each color channel, then divide by the standard deviation. And finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. You can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions. ``` def process_image(image): ''' Scales, crops, and normalizes a PIL image for a PyTorch model, returns an Numpy array ''' # TODO: Process a PIL image for use in a PyTorch model pil_img=image sz = image.size h = min(image.size) w = max(image.size) #print('size:',sz, ', h:',h, ', w:',w) # calculate ratio_aspect using original height & width # chosen h is 256, ratio aspect for adjusted w is original w/h ratio_aspect = w/h # get indices of short and long sides x = image.size.index(min(image.size)) y = image.size.index(max(image.size)) # calc new size with short side 256 pixels keeping ratio aspect new_sz = [0, 0] new_sz[x] = 256 new_sz[y] = int(new_sz[x] * ratio_aspect) #print('new_sz:',new_sz, '\npre resized img:', pil_img) # resize base on short side of 256 pixels pil_img=image.resize(new_sz) #print('post resized image:', pil_img) # crop out the center 224x224 portion wid, hgt = new_sz #print('wid:', wid, ', hgt:', hgt) # calc left, top, right, bottom margin pos l_margin = (wid - 224)/2 t_margin = (hgt - 224)/2 r_margin = (wid + 224 )/2 b_margin = (hgt + 224)/2 #print('left:',l_margin, ', top:',t_margin, ', right:',r_margin, ', bottom:',b_margin) # crop the image pil_img=pil_img.crop((l_margin, t_margin, r_margin, b_margin)) #print('cropped img:', pil_img) # convert to np array for normalization purpose np_img = np.array(pil_img) print('np_img.shape',np_img.shape) np_img = np_img/255 mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) np_img = (np_img - mean)/std # transpose to get color channel to 1st pos np_img = np_img.transpose((2, 0, 1)) return np_img ``` To check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions). ``` def imshow(image, ax=None, title=None): if ax is None: fig, ax = plt.subplots() # PyTorch tensors assume the color channel is the first dimension # but matplotlib assumes is the third dimension image = image.transpose((1, 2, 0)) # Undo preprocessing mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) image = std * image + mean # Image needs to be clipped between 0 and 1 or it looks like noise when displayed image = np.clip(image, 0, 1) ax.imshow(image) return ax # confingure args to randomly select an image file, its class and path def pick_a_pic(dset_dir, dset_type, args): args.z_imgcls = np.random.choice(dataloaders[dset_type].dataset.classes) args.z_rndimg = np.random.choice(os.listdir(dset_dir + '/' + args.z_imgcls)) args.z_rndimgpth = dset_dir + '/' + args.z_imgcls + '/' + args.z_rndimg return args # pick an image from test set args = pick_a_pic(test_dir,'test', args) print('class:', args.z_imgcls,', image:', args.z_rndimg, '\npath:', args.z_rndimgpth,'\n') print('args parameters:\n',[i for i in dir(args) if i.startswith('z_')], '\n') # Pass a pic from test set to image_process to convert into torch FloatTensor with Image.open(args.z_rndimgpth) as image: np_img = process_image(image) imshow(np_img) # Restore image to original config with Image.open(args.z_rndimgpth) as image: plt.imshow(image) ``` ## Class Prediction Once you can get images in the correct format, it's time to write a function for making predictions with your model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. You'll want to calculate the class probabilities then find the $K$ largest values. To get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.html#torch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. You need to convert from these indices to the actual class labels using `class_to_idx` which hopefully you added to the model or from an `ImageFolder` you used to load the data ([see here](#Save-the-checkpoint)). Make sure to invert the dictionary so you get a mapping from index to class as well. Again, this method should take a path to an image and a model checkpoint, then return the probabilities and classes. ```python probs, classes = predict(image_path, model) print(probs) print(classes) > [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339] > ['70', '3', '45', '62', '55'] ``` ``` def predict(image_path, cat_to_name, model, topk=5): ''' Predict the class (or classes) of an image using a trained deep learning model. ''' # TODO: Implement the code to predict the class from an image file model.cpu() model.eval() pil_img = Image.open(image_path) image = process_image(pil_img) image = torch.FloatTensor(image) model, image = model.to(device), image.to(device) print('\nori image.shape:', image.shape) image.unsqueeze_(0) # add a new dimension in pos 0 print('new image.shape:', image.shape, '\n') output = model.forward(image) # get the top k classes of prob ps = torch.exp(output).data[0] topk_prob, topk_idx = ps.topk(topk) # bring back to cpu and convert to numpy topk_probs = topk_prob.cpu().numpy() topk_idxs = topk_idx.cpu().numpy() # map topk_idx to classes in model.class_to_idx idx_class={i: k for k, i in model.class_to_idx.items()} topk_classes = [idx_class[i] for i in topk_idxs] # map class to class name topk_names = [cat_to_name[i] for i in topk_classes] print('*** Top ', topk, ' classes ***') print('class names: ', topk_names) print('classes: ', topk_classes) print('probabilities: ', topk_probs) return topk_classes, topk_names, topk_probs # Call predict() to predict the class (or classes) of an image args.z_topk = 5 #args.z_imgpath = test_dir + '/10/image_07090.jpg' args = pick_a_pic(test_dir,'test', args) print('image path:', args.z_rndimgpth, '\n') with active_session(): start_time = datetime.now() topk_classes, topk_names, topk_probs = predict(args.z_rndimgpth, cat_to_name, model) elapsed = datetime.now() - start_time print('\n*** predict elapsed time[hh:mm:ss.ms]: {}'.format(elapsed)) ``` ## Sanity Checking Now that you can use a trained model for predictions, check to make sure it makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. Use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this: <img src='assets/inference_example.png' width=300px> You can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above. ``` # TODO: Display an image along with the top 5 classes def show_classifier(model, imgcls, imgpth, cat_to_name): topk_classes, topk_names, topk_probs = predict(imgpth, cat_to_name, model) img = Image.open(imgpth) # get img name img_name = cat_to_name[imgcls] fig, (ax1, ax2) = plt.subplots(figsize=(10,4), ncols=2) ax1.set_title(img_name) ax1.imshow(img) ax1.axis('off') y_pos = np.arange(len(topk_probs)) ax2.barh(y_pos, topk_probs) ax2.set_yticks(y_pos) ax2.set_yticklabels(topk_names) ax2.invert_yaxis() ax2.set_xlim(0, 1.1) ax2.set_title('Class Probability') plt.tight_layout() plt.show() # display a randomly selected image with its top 5 classes probabilities args = pick_a_pic(test_dir,'test', args) print('image path:', args.z_rndimgpth, '\n') show_classifier(model, args.z_imgcls, args.z_rndimgpth, cat_to_name) ``` # References ### AIPNP Neural Network Lesson 4 [Part 5 - Inference and Validation](https://youtu.be/coBbbrGZXI0) [Part 6 - Saving and Loading Models](https://youtu.be/HiTih59dCWQ) [Part 7 - Loading Data Sets with Torchvision](https://youtu.be/hFu7GTfRWks) [Part 8 - Transfer Learning](https://youtu.be/3eqn5sgCOsY) helper.py ### Pytorch [Pytorch 0.4.0 Documentation](https://pytorch.org/docs/stable/index.html) [Pytorch 0.4.0 tutorial](https://pytorch.org/tutorials/index.html) [Iterate through custom dataset](https://discuss.pytorch.org/t/trying-to-iterate-through-my-custom-dataset/1909) [Problem loading model trained on GPU](https://discuss.pytorch.org/t/problem-loading-model-trained-on-gpu/17745) ### Python & Miscellaneous [Objects and classes in Python](http://jfine-python-classes.readthedocs.io/en/latest/construct.html) [find the latest file in a folder](https://stackoverflow.com/questions/39327032/how-to-get-the-latest-file-in-a-folder-using-python) [How to get the aspect ratio of an image?](https://math.stackexchange.com/questions/180804/how-to-get-the-aspect-ratio-of-an-image) [Activation functions and its types](https://towardsdatascience.com/activation-functions-and-its-types-which-is-better-a9a5310cc8f) [argparse](https://docs.python.org/dev/library/argparse.html#)
github_jupyter
# Read and Write from Spark to SQL using the MSSQL Spark Connector A typical big data scenario a key usage pattern is high volume, velocity and variety data processing in Spark followed with batch or streaming writes to SQL for access to LOB applications. These usage patterns greatly benefit from a connector that utilizes key SQL optimizations and provides an efficient write to SQLServer master instance and SQL Server data pool in Big Data Clusters. MSSQL Spark connector provides an efficient write SQLServer master instance and SQL Server data pool in Big Data Clusters. Usage ---- - Familiar Spark DataSource V1 interface - Referenced by fully qualified name "com.microsoft.sqlserver.jdbc.spark" - Use from supported Spark language bindings ( Python, Scala, Java, R) - Optionally pass Bulk Copy parameters ** Note : The image here may not be visible dues to markdown bug. Please change path here to full path to view the image. <img src = "../data-virtualization/MSSQL_Spark_Connector2.jpg" style="float: center;" alt="drawing" width="900"> More details ----------- MSSQL Spark connector, uses [SQL Server Bulk copy APIS](https://docs.microsoft.com/en-us/sql/connect/jdbc/using-bulk-copy-with-the-jdbc-driver?view=sql-server-2017#sqlserverbulkcopyoptions) to implement an efficient write to SQL Server. The connector is based on Spark Data source APIs and provides a familiar JDBC interface for access The Sample --------- The following sample shows MSSQL JDBC Connector for read/write SQLServer master instance and SQL Server data pool in Big Data Clusters. The sample is divided into 2 parts. - Part 1 shows read/write to SQL Master instance and - Part 2 shows read/write to Data Pools in Big Data Cluster. In the sample we' ll - Read a file from HDFS and do some basic processing - In Part 1, we'll write the dataframe to SQL server table and then read the table to a dataframe . - In Part 2, we'll write the dataframe to SQL Server data pool external table and then read it back to a spark data frame. ## PreReq ------- - Download [AdultCensusIncome.csv]( https://amldockerdatasets.azureedge.net/AdultCensusIncome.csv ) to your local machine. Upload this file to hdfs folder named *spark_data*. - The sample uses a SQL database *connector_test_db*, user *connector_user* with password *password123!#* and datasource *connector_ds*. The database, user/password and datasource need to be created before running the full sample. Refer **data-virtualization/mssql_spark_connector_user_creation.ipynb** on steps to create this user. # Read CSV into a data frame In this step we read the CSV into a data frame and do some basic cleanup steps. ``` #spark = SparkSession.builder.getOrCreate() sc.setLogLevel("INFO") #Read a file and then write it to the SQL table datafile = "/spark_data/AdultCensusIncome.csv" df = spark.read.format('csv').options(header='true', inferSchema='true', ignoreLeadingWhiteSpace='true', ignoreTrailingWhiteSpace='true').load(datafile) df.show(5) #Process this data. Very simple data cleanup steps. Replacing "-" with "_" in column names columns_new = [col.replace("-", "_") for col in df.columns] df = df.toDF(*columns_new) df.show(5) ``` # (PART 1) Write and READ to SQL Table - Write dataframe to SQL table to SQL Master - Read SQL Table to Spark dataframe ``` #Write from Spark to SQL table using MSSQL Spark Connector print("Use MSSQL connector to write to master SQL instance ") servername = "jdbc:sqlserver://master-0.master-svc" dbname = "connector_test_db" url = servername + ";" + "databaseName=" + dbname + ";" dbtable = "AdultCensus_test" user = "connector_user" password = "password123!#" # Please specify password here #com.microsoft.sqlserver.jdbc.spark try: df.write \ .format("com.microsoft.sqlserver.jdbc.spark") \ .mode("overwrite") \ .option("url", url) \ .option("dbtable", dbtable) \ .option("user", user) \ .option("password", password) \ .save() except ValueError as error : print("MSSQL Connector write failed", error) print("MSSQL Connector write(overwrite) succeeded ") #Use mode as append try: df.write \ .format("com.microsoft.sqlserver.jdbc.spark") \ .mode("append") \ .option("url", url) \ .option("dbtable", dbtable) \ .option("user", user) \ .option("password", password) \ .save() except ValueError as error : print("MSSQL Connector write failed", error) print("MSSQL Connector write(append) succeeded ") #Read from SQL table using MSSQ Connector print("read data from SQL server table ") jdbcDF = spark.read \ .format("com.microsoft.sqlserver.jdbc.spark") \ .option("url", url) \ .option("dbtable", dbtable) \ .option("user", user) \ .option("password", password).load() jdbcDF.show(5) ``` # (PART 2) Write and READ to Data Pool external Tables in Big Data Cluster - Write dataframe to SQL external table in Data Pools in Big Data Cluste - Read SQL external Table to Spark dataframe ``` #Write from Spark to SQL table using MSSQL Spark Connector print("Use MSSQL connector to write to master SQL instance ") datapool_table = "AdultCensus_DataPoolTable" datasource_name = "connector_ds" try: df.write \ .format("com.microsoft.sqlserver.jdbc.spark") \ .mode("overwrite") \ .option("url", url) \ .option("dbtable", datapool_table) \ .option("user", user) \ .option("password", password) \ .option("dataPoolDataSource",datasource_name)\ .save() except ValueError as error : print("MSSQL Connector write failed", error) print("MSSQL Connector write(overwrite) to data pool external table succeeded") try: df.write \ .format("com.microsoft.sqlserver.jdbc.spark") \ .mode("append") \ .option("url", url) \ .option("dbtable", datapool_table) \ .option("user", user) \ .option("password", password) \ .option("dataPoolDataSource",datasource_name)\ .save() except ValueError as error : print("MSSQL Connector write failed", error) print("MSSQL Connector write(append) to data pool external table succeeded") #Read from SQL table using MSSQ Connector print("read data from SQL server table ") jdbcDF = spark.read \ .format("com.microsoft.sqlserver.jdbc.spark") \ .option("url", url) \ .option("dbtable", datapool_table) \ .option("user", user) \ .option("password", password)\ .load() jdbcDF.show(5) print("MSSQL Connector read from data pool external table succeeded") ```
github_jupyter
``` # basic import numpy as np import pandas as pd # get data # ! conda install pandas-datareader s import pandas_datareader as pdr # visual # ! pip install mpl-finance import matplotlib.pyplot as plt import mpl_finance as mpf %matplotlib inline #import seaborn as sns #time import datetime as datetime # https://github.com/mrjbq7/ta-lib # ! pip install ta-lib import talib start_date = datetime.datetime(2018,1,1) #開始日期 end_date= datetime.datetime(2018,12,31) #結束日期 df_2330 = pdr.DataReader('2330.TW', 'yahoo', start=start_date ,end=end_date) # 台灣台積電 df_2330.head() df_2330['Date']=df_2330.index # index 複製到另一欄 df_2330 = df_2330.reset_index(drop=True) # index變成 0~N df_2330.head() sma_10 = talib.SMA(np.array(df_2330['Close']), 10) # 10日均線 sma_30 = talib.SMA(np.array(df_2330['Close']), 30) # 30日均線 df_2330['sma_10'] = sma_10 df_2330['sma_30'] = sma_30 df_2330[30:40] #df_2330.index = df_2330.index.format(formatter=lambda x: x.strftime('%Y-%m-%d')) fig = plt.figure(figsize=(24, 8)) ax = fig.add_subplot(1, 1, 1) ax.set_xticks(range(0, len(df_2330['Date']), 10)) ax.set_xticklabels(df_2330['Date'][::10]) mpf.candlestick2_ochl(ax, df_2330['Open'], df_2330['Close'], df_2330['High'], df_2330['Low'], width=0.6, colorup='r', colordown='g', alpha=0.75); sma_10 = talib.SMA(np.array(df_2330['Close']), 10) # 10日均線 sma_30 = talib.SMA(np.array(df_2330['Close']), 30) # 30日均線 fig = plt.figure(figsize=(24, 8)) ax = fig.add_subplot(1, 1, 1) ax.set_xticks(range(0, len(df_2330['Date']), 10)) ax.set_xticklabels(df_2330['Date'][::10]) mpf.candlestick2_ochl(ax, df_2330['Open'], df_2330['Close'], df_2330['High'], df_2330['Low'], width=0.6, colorup='r', colordown='g', alpha=0.75) plt.rcParams['font.sans-serif']=['Microsoft JhengHei'] ax.plot(sma_10, label='10日均線') ax.plot(sma_30, label='30日均線') ax.legend(); sma_10 = talib.SMA(np.array(df_2330['Close']), 10) sma_30 = talib.SMA(np.array(df_2330['Close']), 30) fig = plt.figure(figsize=(24, 15)) ax = fig.add_axes([0,0.2,1,0.5]) ax2 = fig.add_axes([0,0,1,0.2]) ax.set_xticks(range(0, len(df_2330.index), 10)) ax.set_xticklabels(df_2330.index[::10]) mpf.candlestick2_ochl(ax, df_2330['Open'], df_2330['Close'], df_2330['High'], df_2330['Low'], width=0.6, colorup='r', colordown='g', alpha=0.75) plt.rcParams['font.sans-serif']=['Microsoft JhengHei'] ax.plot(sma_10, label='10日均線') ax.plot(sma_30, label='30日均線') mpf.volume_overlay(ax2, df_2330['Open'], df_2330['Close'], df_2330['Volume'], colorup='r', colordown='g', width=0.5, alpha=0.8) ax2.set_xticks(range(0, len(df_2330['Date']), 10)) ax2.set_xticklabels(df_2330['Date'][::10]) ax.legend(); sma_10 = talib.SMA(np.array(df_2330['Close']), 10) sma_30 = talib.SMA(np.array(df_2330['Close']), 30) df_2330['k'], df_2330['d'] = talib.STOCH(df_2330['High'], df_2330['Low'], df_2330['Close']) df_2330['k'].fillna(value=0, inplace=True) df_2330['d'].fillna(value=0, inplace=True) fig = plt.figure(figsize=(24, 20)) ax = fig.add_axes([0,0.3,1,0.4]) ax2 = fig.add_axes([0,0.2,1,0.1]) ax3 = fig.add_axes([0,0,1,0.2]) ax.set_xticks(range(0, len(df_2330['Date']), 10)) ax.set_xticklabels(df_2330['Date'][::10]) mpf.candlestick2_ochl(ax, df_2330['Open'], df_2330['Close'], df_2330['High'], df_2330['Low'], width=0.6, colorup='r', colordown='g', alpha=0.75) plt.rcParams['font.sans-serif']=['Microsoft JhengHei'] ax.plot(sma_10, label='10日均線') ax.plot(sma_30, label='30日均線') ax2.plot(df_2330['k'], label='K值') ax2.plot(df_2330['d'], label='D值') ax2.set_xticks(range(0, len(df_2330['Date']), 10)) ax2.set_xticklabels(df_2330['Date'][::10]) mpf.volume_overlay(ax3, df_2330['Open'], df_2330['Close'], df_2330['Volume'], colorup='r', colordown='g', width=0.5, alpha=0.8) ax3.set_xticks(range(0, len(df_2330['Date']), 10)) ax3.set_xticklabels(df_2330['Date'][::10]) ax.legend(); ax2.legend(); sma_10 = talib.SMA(np.array(df_2330['Close']), 10) sma_30 = talib.SMA(np.array(df_2330['Close']), 30) df_2330['k'], df_2330['d'] = talib.STOCH(df_2330['High'], df_2330['Low'], df_2330['Close']) # k , d 值 df_2330['k'].fillna(value=0, inplace=True) #NaN 替換成0 df_2330['d'].fillna(value=0, inplace=True) df_2330[30:40] ### 兩個均線交叉點 for index, row in df_2330.iterrows(): try: if float(df_2330.loc[index,'sma_30']) > float(df_2330.loc[index,'sma_10']) and float(df_2330.loc[index+1,'sma_30']) < float(df_2330.loc[index+1,'sma_10']): print(df_2330.loc[index,'Date']) elif float(df_2330.loc[index,'sma_30']) < float(df_2330.loc[index,'sma_10']) and float(df_2330.loc[index+1,'sma_30']) > float(df_2330.loc[index+1,'sma_10']): print(df_2330.loc[index,'Date']) except: continue ```
github_jupyter
``` import random import pprint import math from collections import defaultdict ``` # Load the dataset ``` # record tag frequency across the dataset movie_tag_freq = defaultdict(set) user_tag_freq = defaultdict(set) with open("./datasets/ml-latest-small/movies.csv") as f: # Each line is of form: <movieId>,<title>,<genres> movies = {} for line in f: split_line = line.strip().split(",") title = split_line[1] tags = defaultdict(int) tag_list = split_line[2].split("|") # Some movie titles have a comma in them :( if len(split_line) >= 4: for i in range(2,len(split_line) - 1): title += "," + split_line[i] tag_list = split_line[-1].split("|") for tag in tag_list: tags[tag.lower()] += 1 movie_tag_freq[tag.lower()].add(split_line[0]) movies[split_line[0]] = { "name": title, "tags": tags, } # first line in file del movies["movieId"] with open("./datasets/ml-latest-small/tags.csv") as f: # Each line is of form: <userId>,<movieId>,<tag>,<timestamp> # We will only use <movieId>,<tag> for line in f: split_line = line.strip().split(",") if split_line[0] == "userId": continue tag = split_line[2].lower() movies[split_line[1]]["tags"][tag] += 1 movie_tag_freq[tag].add(split_line[0]) ``` For the ratings datset, we have to convert it to appear similar to rabble data. In this case we need to have input similar to rabble 'Likes'. To get this we: - Assume all ratings above `split` (scale 0.5 -> 5) are positive - `split` is the mean rating in the dataset ``` with open("./datasets/ml-latest-small/ratings.csv") as f: # Each line is of form: <userId>,<movieId>,<rating>,<timestamp> # We will only use <userId>,<movieId>,<rating> users = {} sum_ratings = 0 amount_ratings = 0 lines = f.readlines() for line in lines: split_line = line.strip().split(",") if split_line[0] == "userId": continue sum_ratings += float(split_line[2]) amount_ratings += 1 split = str(sum_ratings / amount_ratings) for line in lines: split_line = line.strip().split(",") if split_line[0] == "userId": continue if split_line[0] not in users: users[split_line[0]] = { "pos": [], "neg": [], } # We will convert ratings to binary (like/dislike) as that is the ratings used in rabble # Assume all ratings above 'split' (scale 0.5 -> 5) are positive # 'split' is the mean (3.501 for this dataset) rating if split_line[2] >= split: users[split_line[0]]["pos"].append(split_line[1]) else: users[split_line[0]]["neg"].append(split_line[1]) ``` #### A quick eyeball check of a movie object ``` pp = pprint.PrettyPrinter(indent=4) random_movie_pos = random.randrange(len(movies)) random_movie_id = [x for x in movies.keys()][random_movie_pos] print(random_movie_id, end=" ") pp.pprint(movies[random_movie_id]) ``` # Seperate training and test data - Each user has rated at least 20 movies. However as rabble only takes positive input we will only use the positive scores to train the user model - So we will train with roughly 3/4 of the positive ratings and test with the 1/4 positive and all the negative ratings ``` for u in users.keys(): cutoff = (len(users[u]["pos"]) * 3) // 4 shuffled_pos_ratings = users[u]["pos"] random.shuffle(shuffled_pos_ratings) users[u]["train"] = shuffled_pos_ratings[:cutoff] users[u]["test"] = shuffled_pos_ratings[cutoff:] ``` # Create User Models Create simple user model by adding any tags related to liked movies to a 'model' dictionary. Every time that tag is seen, the counter related to it is increased. ``` for u in users.keys(): users[u]["model"] = defaultdict(int) for movieId in users[u]["train"]: for tag in movies[movieId]["tags"].keys(): user_tag_freq[tag].add(u) users[u]["model"][tag] += 1 ``` #### A quick eyeball check of a model ``` pp = pprint.PrettyPrinter(indent=4) random_user_id = str(random.randrange(len(users))) print(random_user_id, end=" ") pp.pprint(users[random_user_id]) ``` # Similarity function Calculate similarity based on `TF Cosine-based Similarity` method described in Content-based Recommendation in Social Tagging Systems (4.2) [link](https://dl.acm.org/citation.cfm?id=1864756) ``` def tf_cosine_similarity(user_model, movie_tags): sum_user_item_tf = 0 sum_user_tf = 0 sum_item_tf = 0 tag_amount = 0 for tag in movie_tags.keys(): sum_user_item_tf += user_model[tag] * movie_tags[tag] sum_user_tf += user_model[tag] ** 2 sum_item_tf += movie_tags[tag] ** 2 tag_amount += 1 divisor = (((sum_user_tf) ** 0.5) * ((sum_item_tf) ** 0.5)) if divisor == 0: return 0 tf_cosine = sum_user_item_tf / divisor return tf_cosine ``` Calculate similarity based on `TF-IDF Cosine-based Similarity` method describe in Content-based Recommendation in Social Tagging Systems (4.3) [link](https://dl.acm.org/citation.cfm?id=1864756) Calculate `User-based inverse tag frequency` and `Item-based inverse tag frequency` using `tag_freq` ``` def calculate_based_itf(tag_freq, N, b_itfs): for key in tag_freq.keys(): occurance = len(tag_freq[key]) b_itf = math.log(N / occurance) b_itfs[key] = b_itf movie_amount = len(movies) user_amount = len(users) user_ifs = defaultdict(int) movie_ifs = defaultdict(int) calculate_based_itf(movie_tag_freq, len(movies), movie_ifs) calculate_based_itf(user_tag_freq, len(users), user_ifs) def tf_idf_cosine_similarity(user_model, movie_tags): sum_user_item_tf = 0 sum_user_tf = 0 sum_item_tf = 0 tag_amount = 0 for tag in movie_tags.keys(): sum_user_item_tf += user_model[tag] * user_ifs[tag] * movie_tags[tag] * movie_ifs[tag] sum_user_tf += (user_model[tag] * user_ifs[tag]) ** 2 sum_item_tf += (movie_tags[tag] * movie_ifs[tag])** 2 tag_amount += 1 divisor = (((sum_user_tf) ** 0.5) * ((sum_item_tf) ** 0.5)) if divisor == 0: return 0 tf_cosine = sum_user_item_tf / divisor return tf_cosine ``` #### Eyeball similarity ``` rand_u_model = users[random_user_id]["model"] rand_u_train = users[random_user_id]["train"] rand_u_test = users[random_user_id]["test"] rand_u_neg = users[random_user_id]["neg"] pos_train = movies[rand_u_train[random.randrange(len(rand_u_train))]] pos_test = movies[rand_u_test[random.randrange(len(rand_u_test))]] neg_movie = movies[rand_u_neg[random.randrange(len(rand_u_neg))]] print(pos_train) print(pos_test) print(neg_movie) print("TF Similarity") # pos result print(tf_cosine_similarity(rand_u_model, pos_train["tags"])) print(tf_cosine_similarity(rand_u_model, pos_test["tags"])) # negative print(tf_cosine_similarity(rand_u_model, neg_movie["tags"])) print("TF-IDF Similarity") # pos result print(tf_idf_cosine_similarity(rand_u_model, pos_train["tags"])) print(tf_idf_cosine_similarity(rand_u_model, pos_test["tags"])) # negative print(tf_idf_cosine_similarity(rand_u_model, neg_movie["tags"])) ``` # Evaluate Evaluating based on chapter `3.2.2 Measuring Usage Prediction` in [Recommender Systems Handbook](https://link.springer.com/chapter/10.1007/978-0-387-85820-3_8). Can be gotten [here](http://scholar.google.com/scholar_url?url=http://citeseerx.ist.psu.edu/viewdoc/download%3Fdoi%3D10.1.1.712.4138%26rep%3Drep1%26type%3Dpdf&hl=en&sa=X&scisig=AAGBfm1BpCg0RTunNpmouOfrHuNPul-3NQ&nossl=1&oi=scholarr). Evaluation metric chosen is area under the Receiving Operator Characteristic curve, as in @iandioch's research. ``` aucs = [] n1s = [] n2s = [] n3s = [] ns = [] # Column headings. print('\t\tn1 \tn2 \tn3 \tAUC') t = 0 for u in users.keys(): # Code based on @iandioch's common neighbours research model = users[u]["model"] n1 = 0 # missing_pos > neg n2 = 0 # missing_pos = neg n3 = 0 # missing_pos < neg n = 0 # total link comparisons for missing_pos_id in users[u]["test"]: a_score = tf_idf_cosine_similarity(model, movies[missing_pos_id]["tags"]) for neg_id in users[u]["neg"]: b_score = tf_idf_cosine_similarity(model, movies[neg_id]["tags"]) if abs(a_score-b_score) < 0.0005: n2 += 1 elif a_score > b_score: n1 += 1 else: n3 += 1 n += 1 # Check if there were any comparisons. # If not ignore this user model as they have no dislikes/ likes if n > 0: auc = (n1 + 0.5*n2)/(n) aucs.append(auc) n1s.append(n1) n2s.append(n2) n3s.append(n3) ns.append(n) t += 1 # Eye ball some per user examples if t <= 5: print('UserID {:<2}:\t{:<5}\t{:<5}\t{:<5}\t{:<.6f}'.format(u, n1, n2, n3, auc)) def avg(seq): return sum(seq)/len(seq) print('Average:\t{:<5}\t{:<5}\t{:<5}\t{:<.6f}'.format(int(round(avg(n1s))), int(round(avg(n2s))), int(round(avg(n3s))), avg(aucs))) ```
github_jupyter
### Learning cross-lingual embedding with FastText Embeddings Author: Jeanne Elizabeth Daniel November 2019 FastText (Bojanowskiet al., 2017) is conceptually similar to SGNS, except it also takes into account subword information. Each word is broken up into a set of character $n$-grams, with special boundary symbols at the beginning and end of each word. The original word is also retained in the set. Thus, fir $n=3$, the word "there" is represented by the following $n$-grams "<th", "the", "her", "ere", "re>" and the special token "\< there>". This simple approach enables sharing representations across the vocabulary,can handle rare words better, and can even handle unseen words (a property theat SGNS models lack). We learn a cross-lingual embedding space from the multilingual questions by relying on the estimated 10% prevalence of code-mixing. After training the embedding models on the questions found in the training set, we extract the cross-lingual word embeddings. We construct a sentence embedding by taking the average of all the word embeddings in the sentence (Wieting et al., 2015). Then we train $k$-nearest neighbour classifiers to predict the most appropriate answer, with $k = 1, 5, 25, 50$. The best validation scores were achieved by using cosine as the distance metric and using weighted majority voting, where the contribution of each nearest neighbour is inversely proportion to its distance from the query vector. ``` import pandas as pd import gensim import numpy as np from gensim.models import Word2Vec, FastText import preprocess_data data = pd.read_csv('dataset_7B', delimiter = ';', engine = 'python') data = data[['helpdesk_question', 'helpdesk_reply', 'set', 'low_resource']] responses = pd.DataFrame(data.loc[data['set'] == 'Train']['helpdesk_reply'].value_counts()).reset_index() responses['reply'] = responses['index'] responses['index'] = responses.index responses = dict(responses.set_index('reply')['index']) def create_fasttext(data, skip_gram= 1, min_count = 1, size = 100): """ Train a fasttext embedding model. The FastText model implicitly creates a multilingual vocabulary from the multilingual dataset. The estimate 10% code-switching is used as a weak cross-lingual signal to construct cross-lingual embeddings Args: data: dataframe that contains the questions skip_gram: binary indicator to use either skip-gram negative sampling or continuous bag-of-words (Mikolov et al., 2013) size: number of dimensions in embedding Returns: Trained embedding model """ documents = data['helpdesk_question'] documents['index'] = documents.index processed_docs = documents.apply(preprocess_data.preprocess, args = [0, False]) model = FastText(sentences=processed_docs, sg=skip_gram, size=size, window=5, min_count=min_count, word_ngrams=1, sample=0.001, seed=1, workers=5, negative=5, ns_exponent=0.75, iter=5, min_n=3, max_n=6, trim_rule=None) return model def create_sentence_embeddings(embedding_model, sentence): """ We create sentence embeddings by averaging the embeddings of the words found in the sentence. If no words match, we return a vector of random values. Args: embedding_model: pretrained word embedding model sentence: list of words found in sentence Returns: A sentence embedding for the input sentence """ sentence_vector = np.zeros(100) length = 0 if len(sentence) == 0: return (np.random.random(100) - 0.5)/100 if embedding_model.wv.vocab.get(sentence[0]) != None: sentence_vector = embedding_model.wv[sentence[0]] length += 1 for word in sentence[1:]: if embedding_model.wv.vocab.get(word) != None: sentence_vector = sentence_vector + 1*np.array(embedding_model.wv[word]) length += 1 if length == 0: return (np.random.random(100) - 0.5)/100 return sentence_vector/length def create_batch(df, embedding_model, D): """ Create batch of feature vectors in matrix form Args: df: dataset of questions embedding_model: pretrained embedding model D: size of embedding Returns: matrix where rows are embeddings of questions """ matrix = np.zeros((df.shape[0], D, )) all_text = list(df['helpdesk_question'].apply(preprocess_data.preprocess)) for i in range(len(all_text) -1): sentence_vector = create_sentence_embeddings(embedding_model, all_text[i]) matrix[i] += np.array(sentence_vector) return matrix def label_preprocess(entry): """ Returns integer ID corresponding to response for easy comparison and classification Args: entry: query item responses: dict containing all the template responses with their corresponding IDs Return: integer corresponding to each response """ if responses.get(entry) != None: return responses[entry] else: return len(responses) #default unknown class train_df = data.loc[data['set'] == 'Train'] valid_df = data.loc[data['set'] == 'Valid'] test_df = data.loc[data['set'] == 'Test'] test_LR_df = data.loc[(data['set'] == 'Test') & (data['low_resource'] == 'True')] y_train = data.loc[data['set'] == 'Train']['helpdesk_reply'].apply(label_preprocess) y_valid = data.loc[data['set'] == 'Valid']['helpdesk_reply'].apply(label_preprocess) y_test = data.loc[data['set'] == 'Test']['helpdesk_reply'].apply(label_preprocess) y_test_LR = data.loc[(data['set'] == 'Test') & (data['low_resource'] == 'True')]['helpdesk_reply'].apply(label_preprocess) fast = create_fasttext(train_df) from sklearn.neighbors import KNeighborsClassifier def train_knn_model(x_train, y_train, metric, k, weights): """ Fit k-nearest neighbour model to the sentence embeddings Args: x_train: matrix of sentence embeddings y_train: class labels associated with each sentence embedding metric: distance metric to use k: number of neighbours to consider weights: to either use uniform voting (equal weighting) or weighted voting (the weight of each vote is proportional to its distance to query) Returns: A trained KNN classifier """ clf = KNeighborsClassifier(n_neighbors=k, weights= weights, metric = metric) clf.fit(x_train, y_train) return clf ``` ### Results for FastText ``` x_train = create_batch(train_df, fast, 100) x_valid = create_batch(valid_df, fast, 100) x_test = create_batch(test_df, fast, 100) x_test_LR = create_batch(test_LR_df, fast, 100) clf_1NN = train_knn_model(x_train = x_train, y_train = y_train, metric = 'cosine', k = 1, weights = 'distance') score = clf_1NN.score(x_train, y_train) print("Train accuracy", score) score = clf_1NN.score(x_valid, y_valid) print("Validation accuracy", score) clf_5NN = train_knn_model(x_train = x_train, y_train = y_train, metric = 'cosine', k = 5, weights = 'distance') score = clf_5NN.score(x_valid, y_valid) print("Validation accuracy", score) clf_25NN = train_knn_model(x_train = x_train, y_train = y_train, metric = 'cosine', k = 25, weights = 'distance') score = clf_25NN.score(x_valid, y_valid) print("Validation accuracy", score) clf_50NN = train_knn_model(x_train = x_train, y_train = y_train, metric = 'cosine', k = 50, weights = 'distance') score = clf_50NN.score(x_valid, y_valid) print("Validation accuracy", score) score = clf_1NN.score(x_test, y_test) print("Test accuracy on 1-NN", score) score = clf_5NN.score(x_test, y_test) print("Test accuracy on 5-NN", score) score = clf_25NN.score(x_test, y_test) print("Test accuracy on 25-NN", score) score = clf_50NN.score(x_test, y_test) print("Test accuracy on 50-NN", score) score = clf_1NN.score(x_test_LR, y_test_LR) print("LR Test accuracy on 1-NN", score) score = clf_5NN.score(x_test_LR, y_test_LR) print("LR Test accuracy on 5-NN", score) score = clf_25NN.score(x_test_LR, y_test_LR) print("LR Test accuracy on 25-NN", score) score = clf_50NN.score(x_test_LR, y_test_LR) print("LR Test accuracy on 50-NN", score) ``` ### Assessing the quality of cross-lingual embeddings We design a small experiment to assess the quality of the cross-lingual embeddings for English and Zulu. The translations were obtained using google translate and verified by a Zulu speaker. We compute the sentence embedding for each English-Zulu translation pair and calculate the cosine distance between the two embeddings. ``` eng_A = "can you drink coca cola when you are pregnant" zulu_A = "ungayiphuza yini i-coca cola uma ukhulelwe" eng_B = "when can i stop breastfeeding" zulu_B = "ngingakuyeka nini ukuncelisa ibele" eng_C = "when can I start feeding my baby solid food" zulu_C = "ngingaqala nini ukondla ingane yami ukudla okuqinile" eng_D = "what are the signs of labour" zulu_D = "yiziphi izimpawu zokubeletha" eng_E = "when can I learn the gender of my baby" zulu_E = "ngingabazi ubulili bengane yami" embed_eng_A = create_sentence_embeddings(fast, preprocess_data.preprocess(eng_A)) embed_eng_B = create_sentence_embeddings(fast, preprocess_data.preprocess(eng_B)) embed_eng_C = create_sentence_embeddings(fast, preprocess_data.preprocess(eng_C)) embed_eng_D = create_sentence_embeddings(fast, preprocess_data.preprocess(eng_D)) embed_eng_E = create_sentence_embeddings(fast, preprocess_data.preprocess(eng_E)) embed_zulu_A = create_sentence_embeddings(fast, preprocess_data.preprocess(zulu_A)) embed_zulu_B = create_sentence_embeddings(fast, preprocess_data.preprocess(zulu_B)) embed_zulu_C = create_sentence_embeddings(fast, preprocess_data.preprocess(zulu_C)) embed_zulu_D = create_sentence_embeddings(fast, preprocess_data.preprocess(zulu_D)) embed_zulu_E = create_sentence_embeddings(fast, preprocess_data.preprocess(zulu_E)) from scipy.spatial.distance import cosine print("Sentence A:", cosine(embed_eng_A, embed_zulu_A)) print("Sentence B:", cosine(embed_eng_B, embed_zulu_B)) print("Sentence C:", cosine(embed_eng_C, embed_zulu_C)) print("Sentence D:", cosine(embed_eng_D, embed_zulu_D)) print("Sentence E:", cosine(embed_eng_E, embed_zulu_E)) ```
github_jupyter
``` from collections import defaultdict, deque from difflib import SequenceMatcher import bblfsh import Levenshtein import pandas as pd from sqlite3 import connect from tqdm import tqdm_notebook as tqdm import seaborn as sns import numpy as np from matplotlib import pyplot as plt import matplotlib.patches as patches from matplotlib.pyplot import text as plt_text sns.set(style="ticks") client = bblfsh.BblfshClient("0.0.0.0:9432") %matplotlib inline ``` ## Location of database ``` file_name = "2018-03-23T09_21_43Z-export.db" print(file_name) ``` ## Query database ``` def to_str(string): return bytes(string) user_id = {"Waren": 2, "Romain": 7, "Egor": 12} query_answers = "select pair_id, user_id, answer from assignments where answer is not null and user_id in (2,7, 12);" query_pairs = "select id as pair_id, content_a, content_b, score from file_pairs;" with connect(file_name) as conn: df = pd.read_sql_query(query_answers, conn) conn.text_factory = to_str # super strange that it doesn't work with just str df_pairs = pd.read_sql_query(query_pairs, conn) df_war = df[df.user_id == user_id["Waren"]] df_rom = df[df.user_id == user_id["Romain"]] df_eg = df[df.user_id == user_id["Egor"]] ``` ## Calculate similarity score between texts and flatten UAST ``` client = bblfsh.BblfshClient("0.0.0.0:9432") # Use it while there are some issues with bblfsh def uast2sequence(root): sequence = [] nodes = defaultdict(deque) stack = [root] nodes[id(root)].extend(root.children) while stack: if nodes[id(stack[-1])]: child = nodes[id(stack[-1])].popleft() nodes[id(child)].extend(child.children) stack.append(child) else: sequence.append(stack.pop()) return sequence def flatten_uast(uast): seq = uast2sequence(uast) res = [item.internal_type for item in seq] return res def uast_to_type_seq(content): try: uast = client.parse(filename="java.java", contents=content).uast except: uast = client.parse(filename="java.java", contents=content.decode("utf-8", "ignore")).uast return flatten_uast(uast) def ration_levenshtein(seq_a, seq_b): return Levenshtein.ratio(seq_a, seq_b) def ratio_difflib(seq_a, seq_b): res = SequenceMatcher(None, seq_a, seq_b).ratio() # Dirty hack to avoid strange scores from DiffLib return max(res, SequenceMatcher(None, seq_b, seq_a).ratio()) def calc_uast_sim(row): content_a, content_b = row.content_a, row.content_b type_seq_a = uast_to_type_seq(bytes(content_a)) type_seq_b = uast_to_type_seq(bytes(content_b)) res = ration_levenshtein("".join(type_seq_a), "".join(type_seq_b)) res_c = ration_levenshtein(content_a, content_b) return res, res_c # similarity_scores = [calc_uast_sim(row) for _, row in tqdm(list(df_pairs.iterrows()))] similarity_scores = [] for _, row in tqdm(list(df_pairs.iterrows())): try: similarity_scores.append(calc_uast_sim(row)) except Exception as e: import pdb;pdb.set_trace() similarity_scores.append((0.5, 0.5)) df_pairs["uast_score"] = [pair_score[0] for pair_score in similarity_scores] df_pairs["text_score"] = [pair_score[1] for pair_score in similarity_scores] ``` ## Assemble dataframe ``` df_war_pairs = df_pairs.join(df_war[["pair_id", "answer"]].set_index("pair_id"), on="pair_id", lsuffix="_as", rsuffix="_war") df_rom_war_pairs = df_war_pairs.join(df_rom[["pair_id", "answer"]].set_index("pair_id"), on="pair_id", lsuffix="_war", rsuffix="_rom") df_eg_rom_war_pairs = df_rom_war_pairs.join(df_eg[["pair_id", "answer"]].set_index("pair_id"), on="pair_id") df_eg_rom_war_pairs.columns = [col if col != "answer" else "answer_eg" for col in df_eg_rom_war_pairs.columns] df_eg_rom_war_pairs[df_eg_rom_war_pairs["pair_id"]==964]["answer_eg"] ``` ## Plot infographics ``` def pairplot_df(df, columns, hue=None, hue_order=None): sns.pairplot(df[columns], hue=hue, hue_order=hue_order, diag_kind="kde") columns = ["text_score", "uast_score"] hue_order = ["yes", "maybe", "no"] pairplot_df(df_eg_rom_war_pairs, columns + ["answer_eg"], hue="answer_eg", hue_order=hue_order) pairplot_df(df_eg_rom_war_pairs, columns + ["answer_rom"], hue="answer_rom", hue_order=hue_order) pairplot_df(df_eg_rom_war_pairs, columns + ["answer_war"], hue="answer_war", hue_order=hue_order) fig, ax = plt.subplots() x = np.array(df_eg_rom_war_pairs["text_score"].tolist()) y = np.array(df_eg_rom_war_pairs["uast_score"].tolist()) col_map = {"yes": "b", "maybe": "g", "no": "r", None: "y"} colors = np.array([col_map[ans] for ans in df_eg_rom_war_pairs["answer_eg"].tolist()]) ax.scatter(x, y, c=colors) ax.set_xlabel("text_sim") ax.set_ylabel("uast_sim") pair_ids = df_eg_rom_war_pairs["pair_id"].tolist() for i, pair_id in enumerate(pair_ids): ax.annotate(pair_id, (x[i],y[i])) ``` ## Sampling strategy ``` fig, ax = plt.subplots() ax.scatter(x, y, c=colors) ax.set_xlabel("text_sim") ax.set_ylabel("uast_sim") def add_sampling_area(image, xy, width, height, text): rect = patches.Rectangle(xy, width, height, linewidth=1, edgecolor='r', facecolor='none') image.add_patch(rect) plt_text(xy[0] + width / 2, xy[1] + height / 2, text) add_sampling_area(ax, (0.55, 0.45), 0.45, 0.25, "250") add_sampling_area(ax, (0.55, 0.7), 0.15, 0.3, "150") add_sampling_area(ax, (0.3, 0.45), 0.25, 0.55, "100") ```
github_jupyter
``` from __future__ import division, print_function, absolute_import ``` # Introduction to Visualization: Density Estimation and Data Exploration ======== ##### Version 0.1 There are many flavors of data analysis that fall under the "visualization" umbrella in astronomy. Today, by way of example, we will focus on 2 basic problems. *** By AA Miller 16 September 2017 ``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline ``` ## Problem 1) Density Estimation Starting with 2MASS and SDSS and extending through LSST, we are firmly in an era where data and large statistical samples are cheap. With this explosion in data volume comes a problem: we do not know the underlying probability density function (PDF) of the random variables measured via our observations. Hence - density estimation: an attempt to recover the unknown PDF from observations. In some cases theory can guide us to a parametric form for the PDF, but more often than not such guidance is not available. There is a common, simple, and very familiar tool for density estimation: histograms. But there is also a problem: HISTOGRAMS LIE! We will "prove" this to be the case in a series of examples. For this exercise, we will load the famous Linnerud data set, which tested 20 middle aged men by measuring the number of chinups, situps, and jumps they could do in order to compare these numbers to their weight, pulse, and waist size. To load the data (just chinups for now) we will run the following: from sklearn.datasets import load_linnerud linnerud = load_linnerud() chinups = linnerud.data[:,0] ``` from sklearn.datasets import load_linnerud linnerud = load_linnerud() chinups = linnerud.data[:,0] ``` **Problem 1a** Plot the histogram for the number of chinups using the default settings in pyplot. ``` plt.hist( # complete ``` Already with this simple plot we see a problem - the choice of bin centers and number of bins suggest that there is a 0% probability that middle aged men can do 10 chinups. Intuitively this seems incorrect, so lets examine how the histogram changes if we change the number of bins or the bin centers. **Problem 1b** Using the same data make 2 new histograms: (i) one with 5 bins (`bins = 5`), and (ii) one with the bars centered on the left bin edges (`align = "left"`). *Hint - if overplotting the results, you may find it helpful to use the `histtype = "step"` option* ``` plt.hist( # complete # complete ``` These small changes significantly change the output PDF. With fewer bins we get something closer to a continuous distribution, while shifting the bin centers reduces the probability to zero at 9 chinups. What if we instead allow the bin width to vary and require the same number of points in each bin? You can determine the bin edges for bins with 5 sources using the following command: bins = np.append(np.sort(chinups)[::5], np.max(chinups)) **Problem 1c** Plot a histogram with variable width bins, each with the same number of points. *Hint - setting `normed = True` will normalize the bin heights so that the PDF integrates to 1.* ``` # complete plt.hist(# complete ``` *Ending the lie* Earlier I stated that histograms lie. One simple way to combat this lie: show all the data. Displaying the original data points allows viewers to somewhat intuit the effects of the particular bin choices that have been made (though this can also be cumbersome for very large data sets, which these days is essentially all data sets). The standard for showing individual observations relative to a histogram is a "rug plot," which shows a vertical tick (or other symbol) at the location of each source used to estimate the PDF. **Problem 1d** Execute the cell below to see an example of a rug plot. ``` plt.hist(chinups, histtype = 'step') # this is the code for the rug plot plt.plot(chinups, np.zeros_like(chinups), '|', color='k', ms = 25, mew = 4) ``` Of course, even rug plots are not a perfect solution. Many of the chinup measurements are repeated, and those instances cannot be easily isolated above. One (slightly) better solution is to vary the transparency of the rug "whiskers" using `alpha = 0.3` in the whiskers plot call. But this too is far from perfect. To recap, histograms are not ideal for density estimation for the following reasons: * They introduce discontinuities that are not present in the data * They are strongly sensitive to user choices ($N_\mathrm{bins}$, bin centering, bin grouping), without any mathematical guidance to what these choices should be * They are difficult to visualize in higher dimensions Histograms are useful for generating a quick representation of univariate data, but for the reasons listed above they should never be used for analysis. Most especially, functions should not be fit to histograms given how greatly the number of bins and bin centering affects the output histogram. Okay - so if we are going to rail on histograms this much, there must be a better option. There is: [Kernel Density Estimation](https://en.wikipedia.org/wiki/Kernel_density_estimation) (KDE), a nonparametric form of density estimation whereby a normalized kernel function is convolved with the discrete data to obtain a continuous estimate of the underlying PDF. As a rule, the kernel must integrate to 1 over the interval $-\infty$ to $\infty$ and be symmetric. There are many possible kernels (gaussian is highly popular, though Epanechnikov, an inverted parabola, produces the minimal mean square error). KDE is not completely free of the problems we illustrated for histograms above (in particular, both a kernel and the width of the kernel need to be selected), but it does manage to correct a number of the ills. We will now demonstrate this via a few examples using the `scikit-learn` implementation of KDE: [`KernelDensity`](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KernelDensity.html#sklearn.neighbors.KernelDensity), which is part of the [`sklearn.neighbors`](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.neighbors) module. *Note* There are many implementations of KDE in Python, and Jake VanderPlas has put together [an excellent description of the strengths and weaknesses of each](https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/). We will use the `scitkit-learn` version as it is in many cases the fastest implementation. To demonstrate the basic idea behind KDE, we will begin by representing each point in the dataset as a block (i.e. we will adopt the tophat kernel). Borrowing some code from Jake, we can estimate the KDE using the following code: from sklearn.neighbors import KernelDensity def kde_sklearn(data, grid, bandwidth = 1.0, **kwargs): kde_skl = KernelDensity(bandwidth = bandwidth, **kwargs) kde_skl.fit(data[:, np.newaxis]) log_pdf = kde_skl.score_samples(grid[:, np.newaxis]) # sklearn returns log(density) return np.exp(log_pdf) The two main options to set are the bandwidth and the kernel. ``` # execute this cell from sklearn.neighbors import KernelDensity def kde_sklearn(data, grid, bandwidth = 1.0, **kwargs): kde_skl = KernelDensity(bandwidth = bandwidth, **kwargs) kde_skl.fit(data[:, np.newaxis]) log_pdf = kde_skl.score_samples(grid[:, np.newaxis]) # sklearn returns log(density) return np.exp(log_pdf) ``` **Problem 1e** Plot the KDE of the PDF for the number of chinups middle aged men can do using a bandwidth of 0.1 and a tophat kernel. *Hint - as a general rule, the grid should be smaller than the bandwidth when plotting the PDF.* ``` grid = # complete PDFtophat = kde_sklearn( # complete plt.plot( # complete ``` In this representation, each "block" has a height of 0.25. The bandwidth is too narrow to provide any overlap between the blocks. This choice of kernel and bandwidth produces an estimate that is essentially a histogram with a large number of bins. It gives no sense of continuity for the distribution. Now, we examine the difference (relative to histograms) upon changing the the width (i.e. kernel) of the blocks. **Problem 1f** Plot the KDE of the PDF for the number of chinups middle aged men can do using bandwidths of 1 and 5 and a tophat kernel. How do the results differ from the histogram plots above? ``` PDFtophat1 = # complete # complete # complete # complete ``` It turns out blocks are not an ideal representation for continuous data (see discussion on histograms above). Now we will explore the resulting PDF from other kernels. **Problem 1g** Plot the KDE of the PDF for the number of chinups middle aged men can do using a gaussian and Epanechnikov kernel. How do the results differ from the histogram plots above? *Hint - you will need to select the bandwidth. The examples above should provide insight into the useful range for bandwidth selection. You may need to adjust the values to get an answer you "like."* ``` PDFgaussian = # complete PDFepanechnikov = # complete ``` So, what is the *optimal* choice of bandwidth and kernel? Unfortunately, there is no hard and fast rule, as every problem will likely have a different optimization. Typically, the choice of bandwidth is far more important than the choice of kernel. In the case where the PDF is likely to be gaussian (or close to gaussian), then [Silverman's rule of thumb](https://en.wikipedia.org/wiki/Kernel_density_estimation#A_rule-of-thumb_bandwidth_estimator) can be used: $$h = 1.059 \sigma n^{-1/5}$$ where $h$ is the bandwidth, $\sigma$ is the standard deviation of the samples, and $n$ is the total number of samples. Note - in situations with bimodal or more complicated distributions, this rule of thumb can lead to woefully inaccurate PDF estimates. The most general way to estimate the choice of bandwidth is via cross validation (we will cover cross-validation later today). *What about multidimensional PDFs?* It is possible using many of the Python implementations of KDE to estimate multidimensional PDFs, though it is very very important to beware the curse of dimensionality in these circumstances. ## Problem 2) Data Exploration Now a more open ended topic: data exploration. In brief, data exploration encompases a large suite of tools (including those discussed above) to examine data that live in large dimensional spaces. There is no single best method or optimal direction for data exploration. Instead, today we will introduce some of the tools available via python. As an example we will start with a basic line plot - and examine tools beyond `matplotlib`. ``` x = np.arange(0, 6*np.pi, 0.1) y = np.cos(x) plt.plot(x,y, lw = 2) plt.xlabel('X') plt.ylabel('Y') plt.xlim(0, 6*np.pi) ``` ### Seaborn [`Seaborn`](https://stanford.edu/~mwaskom/software/seaborn/index.html) is a plotting package that enables many useful features for exploration. In fact, a lot of the functionality that we developed above can readily be handled with `seaborn`. *Aside - aesthetics vary greatly from person to person. The mere act of importing `seaborn` drastically changes the appearance of all plots made thereafter. Some really like these changes, others do not. I've heard it said that `seaborn` is no good as it forces an `import MakeUgly` under the hood. The choice is up to you.* To begin, we will make the same plot that we created in matplotlib. ``` import seaborn as sns fig = plt.figure() ax = fig.add_subplot(111) ax.plot(x,y, lw = 2) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_xlim(0, 6*np.pi) ``` We can see that the plot has significantly changed styles. `seaborn` has 5 style presets: `darkgrid`, `whitegrid`, `dark`, `white`, and `ticks`. You can change the preset using the following: sns.set_style("whitegrid") which will change the output for all subsequent plots. Note - if you want to change the style for only a single plot, that can be accomplished with the following: with sns.axes_style("dark"): with all ploting commands inside the `with` statement. **Problem 3a** Re-plot the sine curve using each `seaborn` preset to see which you like best - then adopt this for the remainder of the notebook. ``` sns.set_style( # complete # complete ``` The folks behind `seaborn` have thought a lot about color palettes, which is a good thing. Remember - the choice of color for plots is one of the most essential aspects of visualization. A poor choice of colors can easily mask interesting patterns or suggest structure that is not real. To learn more about what is available, see the [`seaborn` color tutorial](http://stanford.edu/~mwaskom/software/seaborn/tutorial/color_palettes.html). Here we load the default: ``` # default color palette current_palette = sns.color_palette() sns.palplot(current_palette) ``` which we will now change to `colorblind`, which is clearer to those that are colorblind. ``` # set palette to colorblind sns.set_palette("colorblind") current_palette = sns.color_palette() sns.palplot(current_palette) ``` Now that we have covered the basics of `seaborn` (and the above examples truly only scratch the surface of what is possible), we will explore the power of `seaborn` for higher dimension data sets. We will load the famous Iris data set, which measures 4 different features of 3 different types of Iris flowers. There are 150 different flowers in the data set. *Note - for those familiar with `pandas` `seaborn` is designed to integrate easily and directly with `pandas DataFrame` objects. In the example below the Iris data are loaded into a `DataFrame`. `iPython` notebooks also display the `DataFrame` data in a nice readable format.* ``` iris = sns.load_dataset("iris") iris ``` Now that we have a sense of the data structure, it is useful to examine the distribution of features. Above, we went to great pains to produce histograms, KDEs, and rug plots. `seaborn` handles all of that effortlessly with the `distplot` function. **Problem 3b** Plot the distribution of petal lengths for the Iris data set. ``` # note - hist, kde, and rug all set to True, set to False to turn them off with sns.axes_style("dark"): sns.distplot(iris['petal_length'], bins=20, hist=True, kde=True, rug=True) ``` Of course, this data set lives in a 4D space, so plotting more than univariate distributions is important (and as we will see tomorrow this is particularly useful for visualizing classification results). Fortunately, `seaborn` makes it very easy to produce handy summary plots. At this point, we are familiar with basic scatter plots in matplotlib. **Problem 3c** Make a matplotlib scatter plot showing the Iris petal length against the Iris petal width. ``` plt.scatter( # complete ``` Of course, when there are many many data points, scatter plots become difficult to interpret. As in the example below: ``` with sns.axes_style("darkgrid"): xexample = np.random.normal(loc = 0.2, scale = 1.1, size = 10000) yexample = np.random.normal(loc = -0.1, scale = 0.9, size = 10000) plt.scatter(xexample, yexample) ``` Here, we see that there are many points, clustered about the origin, but we have no sense of the underlying density of the distribution. 2D histograms, such as `plt.hist2d()`, can alleviate this problem. I prefer to use `plt.hexbin()` which is a little easier on the eyes (though note - these histograms are just as subject to the same issues discussed above). ``` # hexbin w/ bins = "log" returns the log of counts/bin # mincnt = 1 displays only hexpix with at least 1 source present with sns.axes_style("darkgrid"): plt.hexbin(xexample, yexample, bins = "log", cmap = "viridis", mincnt = 1) plt.colorbar() ``` While the above plot provides a significant improvement over the scatter plot by providing a better sense of the density near the center of the distribution, the binedge effects are clearly present. An even better solution, like before, is a density estimate, which is easily built into `seaborn` via the `kdeplot` function. ``` with sns.axes_style("darkgrid"): sns.kdeplot(xexample, yexample,shade=False) ``` This plot is much more appealing (and informative) than the previous two. For the first time we can clearly see that the distribution is not actually centered on the origin. Now we will move back to the Iris data set. Suppose we want to see univariate distributions in addition to the scatter plot? This is certainly possible with `matplotlib` and you can find examples on the web, however, with `seaborn` this is really easy. ``` sns.jointplot(x=iris['petal_length'], y=iris['petal_width']) ``` But! Histograms and scatter plots can be problematic as we have discussed many times before. **Problem 3d** Re-create the plot above but set `kind='kde'` to produce density estimates of the distributions. ``` sns.jointplot( # complete ``` That is much nicer than what was presented above. However - we still have a problem in that our data live in 4D, but we are (mostly) limited to 2D projections of that data. One way around this is via the `seaborn` version of a `pairplot`, which plots the distribution of every variable in the data set against each other. (Here is where the integration with `pandas DataFrame`s becomes so powerful.) ``` sns.pairplot(iris[["sepal_length", "sepal_width", "petal_length", "petal_width"]]) ``` For data sets where we have classification labels, we can even color the various points using the `hue` option, and produce KDEs along the diagonal with `diag_type = 'kde'`. ``` sns.pairplot(iris, vars = ["sepal_length", "sepal_width", "petal_length", "petal_width"], hue = "species", diag_kind = 'kde') ``` Even better - there is an option to create a `PairGrid` which allows fine tuned control of the data as displayed above, below, and along the diagonal. In this way it becomes possible to avoid having symmetric redundancy, which is not all that informative. In the example below, we will show scatter plots and contour plots simultaneously. ``` g = sns.PairGrid(iris, vars = ["sepal_length", "sepal_width", "petal_length", "petal_width"], hue = "species", diag_sharey=False) g.map_lower(sns.kdeplot) g.map_upper(plt.scatter, edgecolor='white') g.map_diag(sns.kdeplot, lw=3) ``` Note - one disadvantage to the plot above is that the contours do not share the same color scheme as the KDE estimates and the scatter plot. I have not been able to figure out how to change this in a satisfactory way. (One potential solution is detailed [here](http://stackoverflow.com/questions/32889590/seaborn-pairgrid-using-kdeplot-with-2-hues), however, it is worth noting that this solution restricts your color choices to a maximum of ~5 unless you are a colormaps wizard, and I am not.)
github_jupyter
介绍如何在tensorflow环境下,使用JSMA算法攻击基于Inception数据集预训练的alexnet模型。 Jupyter notebook中使用Anaconda中的环境需要单独配置,默认情况下使用的是系统默认的Python环境,以使用advbox环境为例。 首先在默认系统环境下执行以下命令,安装ipykernel。 conda install ipykernel conda install -n advbox ipykernel 在advbox环境下激活,这样启动后就可以在界面上看到advbox了。 python -m ipykernel install --user --name advbox --display-name advbox ``` import logging logging.basicConfig(level=logging.INFO,format="%(filename)s[line:%(lineno)d] %(levelname)s %(message)s") logger=logging.getLogger(__name__) import numpy as np from PIL import Image #pip install Pillow from adversarialbox.adversary import Adversary from adversarialbox.attacks.saliency import JSMA from adversarialbox.models.tensorflow import TensorflowModel import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data from tools import show_images_diff #定义被攻击的图片 imagename="tutorials/cropped_panda.jpg" #从'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'下载并解压到当前路径 dirname="classify_image_graph_def.pb" #加载解码的图像 这里是个大坑 tf提供的imagenet预训练好的模型pb文件中 包含针对图像的预处理环节 即解码jpg文件 这部分没有梯度 #需要直接处理解码后的数据 image=np.array(Image.open(imagename).convert('RGB').resize((224,224))).astype(np.float32) #[100,100,3]->[1,100,100,3] orig=image.copy().astype(np.uint8) image=np.expand_dims(image, axis=0) session=tf.Session() def create_graph(dirname): with tf.gfile.FastGFile(dirname, 'rb') as f: graph_def = session.graph_def graph_def.ParseFromString(f.read()) _ = tf.import_graph_def(graph_def, name='') create_graph(dirname) # 初始化参数 非常重要 session.run(tf.global_variables_initializer()) tensorlist=[n.name for n in session.graph_def.node] #输出全部tensor #logger.info(tensorlist) #获取logits logits=session.graph.get_tensor_by_name('softmax/logits:0') x = session.graph.get_tensor_by_name('ExpandDims:0') # advbox demo # 因为原始数据没有归一化 所以bounds=(0, 255) m = TensorflowModel( session, x, None, logits, None, bounds=(0, 255), channel_axis=3, preprocess=None) #实例化JSMA max_iter为最大迭代次数 theta为扰动系数 max_perturbations_per_pixel为单像素最大修改次数 attack = JSMA(m) attack_config = { "max_iter": 2000, "theta": 0.3, "max_perturbations_per_pixel": 7, "fast":True, "two_pix":False } adversary = Adversary(image,None) #麦克风 tlabel = 651 adversary.set_target(is_targeted_attack=True, target_label=tlabel) # FGSM targeted attack adversary = attack(adversary, **attack_config) if adversary.is_successful(): print( 'attack success, adversarial_label=%d' % (adversary.adversarial_label) ) #对抗样本保存在adversary.adversarial_example adversary_image=np.copy(adversary.adversarial_example) #强制类型转换 之前是float 现在要转换成int8 adv = np.array(adversary_image).astype("uint8")[0] print("jsma attack done") #显示原始图片 抵抗样本 以及两张图之间的差异 其中灰色代表没有差异的像素点 show_images_diff(orig,adversary.original_label,adv,adversary.adversarial_label) ```
github_jupyter
## Experiment Setup ### Random seed / PyTorch / CUDA related ``` import time import datetime import os import sys import itertools # Use Google Colab use_colab = True # Is this notebook running on Colab? # If so, then google.colab package (github.com/googlecolab/colabtools) # should be available in this environment # Previous version used importlib, but we could do the same thing with # just attempting to import google.colab try: from google.colab import drive colab_available = True except: colab_available = False if use_colab and colab_available: drive.mount('/content/drive') # If there's a package I need to install separately, do it here !pip install pyro-ppl # Install ronald_bdl package !pip install git+https://github.com/ronaldseoh/ronald_bdl.git # cd to the appropriate working directory under my Google Drive %cd 'drive/My Drive/Colab Notebooks/bayesian-dl-experiments' # List the directory contents !ls # IPython reloading magic %load_ext autoreload %autoreload 2 # Random seeds # Based on https://pytorch.org/docs/stable/notes/randomness.html random_seed = 682 ``` ### Third party libraries (NumPy, PyTorch, Pyro) ``` # Third party libraries import import numpy as np import torch import pyro import matplotlib.pyplot as plt import tqdm # Print version information print("NumPy Version: " + np.__version__) print("PyTorch Version: " + torch.__version__) print("Pyro Version: " + pyro.__version__) # More imports... from torch import nn, optim from torch.utils.data import random_split, DataLoader, RandomSampler import torchvision import torchvision.transforms as transforms from pyro.infer import SVI, Trace_ELBO, HMC, MCMC # Import model and dataset classes from ronald_bdl from ronald_bdl import models, datasets from ronald_bdl.models import utils # pyplot setting %matplotlib inline # torch.device / CUDA Setup use_cuda = True if use_cuda and torch.cuda.is_available(): torch_device = torch.device('cuda') torch.backends.cudnn.deterministic = True # Disable 'benchmark' mode # Note: https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936 torch.backends.cudnn.benchmark = False use_pin_memory = True # Faster Host to GPU copies with page-locked memory # CUDA libraries version information print("CUDA Version: " + str(torch.version.cuda)) print("cuDNN Version: " + str(torch.backends.cudnn.version())) print("CUDA Device Name: " + str(torch.cuda.get_device_name())) print("CUDA Capabilities: "+ str(torch.cuda.get_device_capability())) else: torch_device = torch.device('cpu') use_pin_memory = False ``` ### Variable settings #### Data prep ``` # Dataset to use uci_dataset_name = 'bostonHousing' # Set the proportion of the original dataset to be available as a whole subset_proportions = [1] # Proportion of the dataset (after getting the subset) to be used for training train_prop = 0.9 # Number of dataset splits n_splits = 10 ``` #### NN settings ``` # Fully-Connected network hidden layers network_hidden_dims = [25, 50, 100] network_hidden_layers = [1, 3, 5] # Dropout dropout_rates = [0.1, 0.3, 0.5] # Length-scale length_scale_values = [1e-2] # Model Precision tau_values = [0.1, 0.15, 0.2] ``` #### Training setup ``` # Number of epochs n_epoch = 4000 # Optimizer learning rate learning_rate = 0.001 # Training data batch sizes if uci_dataset_name in ('protein-tertiary-structure'): # Note: 45730 rows originally n_training_batch = 256 elif uci_dataset_name in ('yacht', 'bostonHousing'): n_training_batch = 32 # Number of test predictions (for each data point) prediction_runs = [3, 10, 50, 100, 1000, 10000] # Mean Squared Error for loss function to minimize objective = nn.MSELoss() # Test start time test_start_time = datetime.datetime.today().strftime('%Y%m%d%H%M') ``` ## Train the network ``` for subset_prop, hidden_dim, n_hidden, dropout_rate, length_scale, tau in itertools.product( subset_proportions, network_hidden_dims, network_hidden_layers, dropout_rates, length_scale_values, tau_values, ): # Reset the random number generator for each method (to produce identical results) torch.manual_seed(random_seed) np.random.seed(random_seed) pyro.set_rng_seed(random_seed) """ Results file storage """ # Create directory to store results for the current test configuration test_results_path = os.path.join( './test_results', 'number_of_test_predictions_1', uci_dataset_name, test_start_time, ( str(subset_prop) + '_' + str(hidden_dim) + '_' + str(n_hidden) + '_' + str(dropout_rate) + '_' + str(length_scale) + '_' + str(tau)), ) os.makedirs(test_results_path, exist_ok=True) test_results_rmse_mc_path = os.path.join( test_results_path, "rmse_mc.txt" ) test_results_lls_mc_path = os.path.join( test_results_path, "lls_mc.txt" ) # Prepare new subset of the original dataset subset = datasets.UCIDatasets( uci_dataset_name, root_dir='./datasets_files', limit_size=subset_prop, transform=lambda X, mean, std: (X - mean) / std, target_transform=lambda y, mean, std: (y - mean) / std, download=True) # Determine sizes of training and testing set train_size = int(train_prop * len(subset)) test_size = len(subset) - train_size # Print the size of the subset print("subset size = " + str((len(subset), subset.n_features))) print("training set size = %d" % train_size) print("test set size = %d" % test_size) train, test = random_split(subset, lengths=[train_size, test_size]) train_loader = DataLoader(train, batch_size=n_training_batch, pin_memory=use_pin_memory) # Prepare network network = models.FCNet( input_dim=subset.n_features, output_dim=subset.n_targets, hidden_dim=hidden_dim, n_hidden=n_hidden, dropout_rate=dropout_rate, dropout_type='bernoulli', ) # Send the whole model to the selected torch.device network.to(torch_device) # Print the network structure print(network) # Model to train mode network.train() # Adam optimizer # https://pytorch.org/docs/stable/optim.html?highlight=adam#torch.optim.Adam # NOTE: Need to set L2 regularization from here reg_strength = utils.reg_strength(dropout_rate, length_scale, train_size, tau) print('reg_strength = ' + str(reg_strength)) optimizer = optim.Adam( network.parameters(), lr=learning_rate, weight_decay=reg_strength, # L2 regularization ) print() """ Training """ print( "Starting subset %f, n_hidden %d, hidden_dim %d, dropout_rate %f, length_scale %f, tau %f" % (subset_prop, n_hidden, hidden_dim, dropout_rate, length_scale, tau)) # Record training start time (for this split) tic = time.time() progress_bar = tqdm.tqdm(range(n_epoch)) for epoch in progress_bar: # loop over the dataset multiple times for i, data in enumerate(train_loader): # get the inputs; data is a list of [inputs, labels] inputs, targets = data # Store the batch to torch_device's memory inputs = inputs.to(torch_device) targets = targets.to(torch_device) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = network(inputs) loss = objective(outputs, targets) loss.backward() optimizer.step() progress_bar.set_description("[iteration %04d] loss: %.4f" % (epoch + 1, loss.item())) # Record training end time toc = time.time() # Report the final loss print("final loss = %f" % (loss.item())) """ Testing """ # Model to eval mode network.eval() # Get the test data inputs, targets = test.dataset[test.indices] # Store the batch to torch_device's memory inputs = inputs.to(torch_device) targets = targets.to(torch_device) for n_predictions in prediction_runs: print(str(n_predictions) + " test runs...") # Record testing start time (for this split) tic_testing = time.time() _, mean, var, metrics = network.predict_dist( inputs, n_predictions, y_test=targets, reg_strength=reg_strength, train_size=train_size) # Record testing end time toc_testing = time.time() # store additional metrics if len(metrics) > 0: for key, value in metrics.items(): print(str(key) + " = " + str(value)) if key == 'rmse_mc': with open(test_results_rmse_mc_path, 'a+') as rmse_mc_file: rmse_mc_file.write('%d %f \n' % (n_predictions, value)) elif key == 'test_ll_mc': with open(test_results_lls_mc_path, 'a+') as lls_mc_file: lls_mc_file.write('%d %f \n' % (n_predictions, value)) print() # Report the total training time print("training time = " + str(toc - tic) + " seconds") # Report the total testing time print("testing time (last run) = " + str(toc_testing - tic_testing) + " seconds") print() ``` ## Results visualization ``` # experiment_root_directory = os.path.join( # './test_results', # 'number_of_test_predictions_1', # uci_dataset_name, # test_start_time, # ) # for subset_prop, dropout_rate, length_scale, tau in itertools.product( # subset_proportions, dropout_rates, length_scale_values, tau_values, # ): # for metric_name in ['lls_mc', 'rmse_mc']: # figure_file_name = ( # str(subset_prop) # + '_' + str(dropout_rate) # + '_' + str(length_scale) # + '_' + str(tau) # + '_' + metric_name + '.png' # ) # figure_title = ( # metric_name # + (' subset %f, dropout rate = %f, length_scale %f, tau %f' # % (subset_prop, dropout_rate, length_scale, tau)) # ) # fig, ax = plt.subplots(len(network_hidden_dims), len(network_hidden_layers)) # fig.tight_layout(pad=2, rect=[0, 0.00, 2, 2]) # plt_subplot_current_row = 0 # for hidden_dim in network_hidden_dims: # plt_subplot_current_column = 0 # for n_layer in network_hidden_layers: # # Open the score file # score_file_path = os.path.join( # experiment_root_directory, # ( # str(subset_prop) # + '_' + str(hidden_dim) # + '_' + str(n_layer) # + '_' + str(dropout_rate) # + '_' + str(length_scale) # + '_' + str(tau) # ), # metric_name + '.txt', # ) # scores = np.loadtxt(score_file_path).T # if metric_name == 'rmse_mc': # ax[plt_subplot_current_row, plt_subplot_current_column].set_ylim([0, 10]) # elif metric_name == 'lls_mc': # ax[plt_subplot_current_row, plt_subplot_current_column].set_ylim([-10, 0]) # ax[plt_subplot_current_row, plt_subplot_current_column].scatter(scores[0], scores[1]) # ax[plt_subplot_current_row, plt_subplot_current_column].set_title(str((hidden_dim, n_layer))) # plt_subplot_current_column += 1 # plt_subplot_current_row += 1 # fig.suptitle(figure_title, y=2.05) # plt.savefig( # os.path.join(experiment_root_directory, figure_file_name), # dpi=600, # bbox_inches='tight', # ) # plt.show() ```
github_jupyter
# Energy Balance Model Practical This practical uses a simple Energy Balance Model that is in the book by McGuffie & Henderson-Sellers. We will use it to investigate ice-albedo feedbacks and irreversibility. The [model documentation](https://moodle.ucl.ac.uk/mod/resource/view.php?id=292610) and the [book](https://moodle.ucl.ac.uk/mod/book/view.php?id=292625) can be found on the Moodle page. But I will run through the basics here first. ## Model Description This model is one of the simplest around, and has been constructed in a top-down fashion (rather than the bottom-up principles used for a general circulation model). It was originally proposed by [Budyko (1969)](https://moodle.ucl.ac.uk/mod/resource/view.php?id=292627). It divides the globe into 9 different boxes (each with an average temperature) and considers the impact of three different fluxes on each box. The boxes span from the equator to the pole, with evenly spaced boxes in the middle. ![image.png](attachment:image.png) ## Model Equation The three fluxes considered by in this model are 1. The incoming solar radiation ($S_o$) of which a fraction, $\alpha$, is reflected back away from the box (yellow). 2. An outgoing longwave radiation ($L$), which depends on the temperautre of the box (red). 3. A latitudinal heat transport ($F$), which depends how warm the box is with respect to the rest of the globe (blue arrows, defined as positive when the transport is ouf to the box). The model is an equilibrium model (so it has no time variation in it). Therefore all the fluxes are in balance and we can write following equation for the $i$ th box. $S_i(1-\alpha_i)=L_i+F_i$ ## Parameterisations #### Albedo Albedo, $\alpha$, is high (0.62) if ice-covered, low if not (0.3). We shall assume a grid-box is ice-covered is if is colder than a critical temperature, $T_{crit}$. $\alpha_i = \begin{cases} 0.3 & \text{if } T_i > T_{crit} \\ 0.62 & \text{if } T_i \leq T_{crit} \end{cases}$ #### Outgoing longwave radiation Top of atmosphere radiation is related to $\sigma$$T^4$ and the properties of atmosphere. As this model will only vary over a small range of $T$ (in Kelvin), we shall use a linear approximation: $L_i=A+B T_i$ where $A$ and $B$ are constants derived from observations. #### Heat transports Heat transport is related to temperature gradients (here modelled as a form of conduction for simplicity). Having already computed the global mean temperature ($T_g$), the amount of heat transport *out* of the grid box is simply related to the a grid boxes temperature difference from the global mean. $F=C(T_i-T_g)$ where C is a constant of proportionality, which has been derived from observational analysis. ## Numerical Approach We shall describe how the model is programmed below, as we start coding it up. However the basic way we will solve this model is **iteratively**. What this means is that we will * set the model up * feed in an initial guess * run that guess through the equations, and see what temperatures it gives * put those new temperatures through the equations * repeat that process many times - moving ever slowly closer to the correct solution * once the answer is steady, then it has **converged** on a stable answer # Program Start by loading the required packages into the kernal. We need *numpy* to do mathematics, *matplotlib* so that we can visualise the output, and then *pandas*: the [Python Data Analysis Library](https://pandas.pydata.org/) to build a data frame to store the model output with some metadata. ``` #Import packages import numpy as np import matplotlib.pyplot as plt import pandas as pd ``` Define some tunable parameters (we shall be altering these later in the exercise) ``` #Input quantities FracSC=1 #solar constant as fraction of present day A=204 #thermal A B=2.17 #thermal B C=3.81 #transport C t_crit=-10 #critical temp ``` Specify some important constants, and set up some essential factors related to the grid boxes. Each grid box is defined by its mean latitude, which helps specify the proportion of incoming solar radiation it receives, `SunWt`, and how much it contributes to the global average temperature, `cos_lat`. ``` #constant para. and names SC=1370 #solar constant of present day in W/m^2 a=0.3 #albedo without ice a_ice=0.62 #ice albedo zones=['80-90','70-80','60-70','50-60','40-50','30-40','20-30','10-20','0-10'] #znoal bands zones_mean=np.array([85,75,65,55,45,35,25,15,6]) #mean lat. of each zonal band SunWt=np.array([0.5,0.531,0.624,0.77,0.892,1.021,1.12,1.189,1.219]) cos_lat=np.cos(zones_mean*3.1415/180) R_in=SC/4*FracSC*SunWt #compute the incoming solar radiation at each latitude ``` We need to specify our initial temperatures. These then allow us to determine the albedo at each latitude ``` #initial condition init_t=[-15,-15,-5,5,10,15,18,22,24] #initial temp init_a=np.zeros(len(init_t)) #create an array to store initial albedo for i in range(len(zones)): if init_t[i]<t_crit: init_a[i]=a_ice else: init_a[i]=a ``` Run these initial temperatures through the equation set to see what it does... ``` #Step 1 Tcos=init_t*cos_lat mean_T=np.sum(Tcos)/np.sum(cos_lat) Temp=(R_in*(1-init_a)+C*mean_T-A)/(B+C) albedo=np.zeros(len(zones)) #create an array to store initial albedo for i in range(len(zones)): if Temp[i]<t_crit: albedo[i]=a_ice else: albedo[i]=a ``` Now that we've done it once, let's iterate it again many times (25 here) to allow it to converge. ``` #following steps step_no=25 #stop number for i in range(step_no): Tcos=Temp*cos_lat mean_T=np.sum(Tcos)/np.sum(cos_lat) Temp=(R_in*(1-albedo)+C*mean_T-A)/(B+C) albedo=np.zeros(len(zones)) #create an array to store initial albedo for i in range(len(zones)): if Temp[i]<t_crit: albedo[i]=a_ice else: albedo[i]=a print("Running the model results in a global mean temperature of % 5.2f oC." %mean_T) ``` So far everything is stored as separate vectors. It would be much more useful to store things in a structure that can also have some metadata (information about the data). We are going to use a DataFrame, which is provided by `pandas`. ``` pd.DataFrame({'zones':zones,'Albedo':albedo,'Temp':Temp}) ``` Finally let us plot the output. Which we are going to do using the routines in `matplotlib`. ``` #plotting fig, ax1 = plt.subplots() ax1.set_xlabel('lat. zones') ax1.set_ylabel('temperature',color='r') ax1.plot(zones, Temp,'r' ) ax1.tick_params(axis='y') ax2 = ax1.twinx() # create a second axes that shares the same x-axis ax2.set_ylabel('albedo',color='b') ax2.plot(zones, albedo,'b') ax2.tick_params(axis='y') fig.tight_layout() plt.show() ``` # Exercise and investigations with the model Having build a program of this model, we now need to use it learn something about the climate system. I want you to answer five different questions: 1. What fractional decrease of the solar constant is required to glaciate the world (a snowball Earth)? 2. What would be the impact of increasing $C$, the heat transport parameter? Write down a hypothesis first. 3. The critical temperature over land and sea are different ($0 ^oC$ and $-13 ^oC$), yet this model just represents a single hemisphere. Do you expect the Arctic or Antarctic to be colder? Test this by altering $T_{crit}$ 4. Early estimates for $B$ ranged from 1.45 to 1.6, but how does the higher value of $B$ used here influence the climate? What does it correspond to physically? 5. If you set the initial temperatures to those of a snowball Earth, what fraction of solar constant is required to remove the ice from the Equator. What does this mean for the climate? All of these questions can be investigated by altering the "tunable parameters" and the initial temperatures. Below we have gathered these together in a single cell, followed by a single cell for you to run and plot the output (which you should not edit). ``` #Edit these values FracSC=1.0 #solar constant as fraction of present day A=204 #thermal A B=2.17 #thermal B C=3.81 #transport C t_crit=-10. #critical temp init_t=[-15.,-15.,-5.,5.,10.,15.,18.,22.,24.] #initial temp SC=1370 #solar constant of present day in W/m^2 a=0.3 #albedo without ice a_ice=0.62 #ice albedo zones=['80-90','70-80','60-70','50-60','40-50','30-40','20-30','10-20','0-10'] #znoal bands zones_mean=np.array([85,75,65,55,45,35,25,15,6]) #mean lat. of each zonal band SunWt=np.array([0.5,0.531,0.624,0.77,0.892,1.021,1.12,1.189,1.219]) cos_lat=np.cos(zones_mean*3.1415/180) R_in=SC/4*FracSC*SunWt #compute the incoming solar radiation at each latitude init_a=np.zeros(len(init_t)) #create an array to store initial albedo for i in range(len(zones)): if init_t[i]<t_crit: init_a[i]=a_ice else: init_a[i]=a #Step 1 Tcos=init_t*cos_lat mean_T=np.sum(Tcos)/np.sum(cos_lat) Temp=(R_in*(1-init_a)+C*mean_T-A)/(B+C) albedo=np.zeros(len(zones)) #create an array to store initial albedo for i in range(len(zones)): if Temp[i]<t_crit: albedo[i]=a_ice else: albedo[i]=a #following steps step_no=25 #stop number for i in range(step_no): Tcos=Temp*cos_lat mean_T=np.sum(Tcos)/np.sum(cos_lat) Temp=(R_in*(1-albedo)+C*mean_T-A)/(B+C) albedo=np.zeros(len(zones)) #create an array to store initial albedo for i in range(len(zones)): if Temp[i]<t_crit: albedo[i]=a_ice else: albedo[i]=a print("Running the model results in a global mean temperature of % 5.2f oC." %mean_T) pd.DataFrame({'zones':zones,'Albedo':albedo,'Temp':Temp}) #plotting fig, ax1 = plt.subplots() ax1.set_xlabel('lat. zones') ax1.set_ylabel('temperature',color='r') ax1.plot(zones, Temp,'r' ) ax1.tick_params(axis='y') ax2 = ax1.twinx() # create a second axes that shares the same x-axis ax2.set_ylabel('albedo',color='b') ax2.plot(zones, albedo,'b') ax2.tick_params(axis='y') fig.tight_layout() plt.show() ```
github_jupyter
<img src='https://radiant-assets.s3-us-west-2.amazonaws.com/PrimaryRadiantMLHubLogo.png' alt='Radiant MLHub Logo' width='300'/> # How to use the Radiant MLHub API The Radiant MLHub API gives access to open Earth imagery training data for machine learning applications. You can learn more about the repository at the [Radiant MLHub site](https://mlhub.earth) and about the organization behind it at the [Radiant Earth Foundation site](https://radiant.earth). This Jupyter notebook, which you may copy and adapt for any use, shows basic examples of how to use the API. Full documentation for the API is available at [docs.mlhub.earth](docs.mlhub.earth). We'll show you how to set up your authorization, see the list of available collections and datasets, and retrieve the items (the data contained within them) from those collections. Each item in our collection is explained in json format compliant with [STAC](https://stacspec.org/) [label extension](https://github.com/radiantearth/stac-spec/tree/master/extensions/label) definition. ## Dependencies This notebook utilizes the [`radiant-mlhub` Python client](https://pypi.org/project/radiant-mlhub/) for interacting with the API. If you are running this notebooks using Binder, then this dependency has already been installed. If you are running this notebook locally, you will need to install this yourself. See the official [`radiant-mlhub` docs](https://radiant-mlhub.readthedocs.io/) for more documentation of the full functionality of that library. ## Authentication ### Create an API Key Access to the Radiant MLHub API requires an API key. To get your API key, go to [dashboard.mlhub.earth](https://dashboard.mlhub.earth). If you have not used Radiant MLHub before, you will need to sign up and create a new account. Otherwise, sign in. In the **API Keys** tab, you'll be able to create API key(s), which you will need. *Do not share* your API key with others: your usage may be limited and sharing your API key is a security risk. ### Configure the Client Once you have your API key, you need to configure the `radiant_mlhub` library to use that key. There are a number of ways to configure this (see the [Authentication docs](https://radiant-mlhub.readthedocs.io/en/latest/authentication.html) for details). For these examples, we will set the `MLHUB_API_KEY` environment variable. Run the cell below to save your API key as an environment variable that the client library will recognize. *If you are running this notebook locally and have configured a profile as described in the [Authentication docs](https://radiant-mlhub.readthedocs.io/en/latest/authentication.html), then you do not need to execute this cell.* ``` import os os.environ['MLHUB_API_KEY'] = 'PASTE_YOUR_API_KEY_HERE' from radiant_mlhub import client, get_session ``` ## List data collections A **collection** in the Radiant MLHub API is a [STAC Collection](https://github.com/radiantearth/stac-spec/tree/master/collection-spec) representing a group of resources (represented as [STAC Items](https://github.com/radiantearth/stac-spec/tree/master/item-spec) and their associated assets) covering a given spatial and temporal extent. A Radiant MLHub collection may contain resources representing training labels, source imagery, or (rarely) both. Use the `client.list_collections` function to list all available collections and view their properties. The following cell uses the `client.list_collections` function to print the ID, license (if available), and citation (if available) for all available collections. ``` collections = client.list_collections() for c in collections: collection_id = c['id'] license = c.get('license', 'N/A') citation = c.get('sci:citation', 'N/A') print(f'ID: {collection_id}\nLicense: {license}\nCitation: {citation}\n') ``` Collection objects have many other properties besides the ones shown above. The cell below prints the `ref_african_crops_kenya_01_labels` collection object in its entirety. ``` kenya_crops_labels = next(c for c in collections if c['id'] == 'ref_african_crops_kenya_01_labels') kenya_crops_labels ``` ## Select an Item Collections have items associated with them that are used to catalog assets (labels or source imagery) for that collection. Collections vary greatly in the number of items associated with them; some may contain only a handful of items, while others may contain hundreds of thousands of items. The following cell uses the `client.list_collection_items` to get the first item in the `ref_african_crops_kenya_01_labels` collection. The `client.list_collection_items` is a Python generator that yields a dictionary for each item in the collection (you can read more about how to use Python generators [here](https://realpython.com/introduction-to-python-generators/)). ``` # NOTE: Here we are using using the "id" property of the collection that we fetched above as the collection_id # argument to the list_collection_items function items_iterator = client.list_collection_items(kenya_crops_labels['id']) # Get the first item first_item = next(items_iterator) first_item ``` > **IMPORTANT:** Some collections may have hundreds of thousands of items (e.g. `bigearthnet_v1_source`). Looping over all of the items for these massive collections may take a very long time (perhaps on the order of hours), and is not recommended. To prevent accidentally looping over all assets, the `client.list_collection_items` function limits the total number of returned items to ``100`` by default. You can change this limit using the `limit` argument: > ```python > client.list_collection_items(collection['id'], limit=150) > ``` > If you would like to download all of the assets associated with a collection, it is far more efficient to use the `client.download_archive` method. ### List Available Assets Each STAC Item has assets associated with it, representing the actual source imagery or labels described by that Item. The cell below summarizes the assets for the first item that we selected above by printing the key within the `assets` dictionary, the asset title and the media type. ``` for asset_key, asset in first_item['assets'].items(): title = asset['title'] media_type = asset['type'] print(f'{asset_key}: {title} [{media_type}]') ``` ## Download Assets To download these assets, we will first set up a helper function to get the download link from the asset and then download the content to a local file. > **NOTE:** If you are running these notebooks using Binder these resources will be downloaded to the remote file system that the notebooks are running on and **not to your local file system.** If you want to download the files to your machine, you will need to clone the repo and run the notebook locally. ``` import urllib.parse from pathlib import Path import requests def download(item, asset_key, output_dir='.'): # Try to get the given asset and return None if it does not exist asset = item.get('assets', {}).get(asset_key) if asset is None: print(f'Asset "{asset_key}" does not exist in this item') return None # Try to get the download URL from the asset and return None if it does not exist download_url = asset.get('href') if download_url is None: print(f'Asset {asset_key} does not have an "href" property, cannot download.') return None session = get_session() r = session.get(download_url, allow_redirects=True, stream=True) filename = urllib.parse.urlsplit(r.url).path.split('/')[-1] output_path = Path(output_dir) / filename with output_path.open('wb') as dst: for chunk in r.iter_content(chunk_size=512 * 1024): if chunk: dst.write(chunk) print(f'Downloaded to {output_path.resolve()}') ``` ### Download Labels We can download the `labels` asset of the `selected_item` by calling the following function: ``` download(first_item, 'labels') ``` ### Download Metadata Likewise, we can download the documentation pdf and property description csv. ``` download(first_item, 'documentation') download(first_item, 'property_descriptions') ``` ### Download Source Imagery The Item that we fetched earlier represents a collection of labels. This item also contains references to all of the source imagery used to generate these labels in its `links` property. Any source imagery links will have a `rel` type of `"source"`. In the cell below we get a list of all the sources associated with this item and fetch the first one ``` source_links = [link for link in first_item['links'] if link['rel'] == 'source'] print(f'Number of Source Items: {len(source_links)}') session = get_session() r = session.get(source_links[0]['href']) source_item = r.json() print('First Item\n--------') source_item ``` Once we have the source item, we can use our `download` function to download assets associated with that item. The cell below downloads just the first 3 bands of the source item that we just fetched (a Sentinel 2 scene). ``` download(source_item, 'B01') download(source_item, 'B02') download(source_item, 'B03') ``` ### Download All Assets Looping through all items and downloading the associated assets may be *very* time-consuming for larger datasets like BigEarthNet or LandCoverNet. Instead, MLHub provides TAR archives of all collections that can be downloaded using the `/archive/{collection_id}` endpoint. The following cell uses the `client.download_archive` function to download the `ref_african_crops_kenya_01_labels` archive to the current working directory. ``` client.download_archive('ref_african_crops_kenya_01_labels', output_dir='.') ```
github_jupyter
``` import os import json import tensorflow as tf from tf_transformers.models import ROBERTAEncoder, EncoderDecoder from tf_transformers.utils import get_config, validate_model_name allowed_model_names = ["roberta_base", "roberta_large"] def modelWrapper(model_name, **kwargs): """Wrapper for Model Args: model_name ([type]): [description] Returns: [type]: [description] """ name = "roberta" model_name = model_name.replace("-", "_") # replace - with _ validate_model_name(model_name, allowed_model_names) config = get_config("tf_transformers.models.model_configs.roberta", model_name) if "is_training" not in kwargs: kwargs["is_training"] = False checkpoint_dir = None if "checkpoint_dir" in kwargs: checkpoint_dir = kwargs["checkpoint_dir"] del kwargs["checkpoint_dir"] kwargs["name"] = name print("Kwargs", kwargs) model_layer = ROBERTAEncoder(config, **kwargs) model = model_layer.get_model() if checkpoint_dir: model.load_checkpoint(checkpoint_dir) return model_layer, model, config from tf_transformers.utils import get_config, validate_model_name, get_model_wrapper from tf_transformers.models import EncoderDecoder from absl import logging logging.set_verbosity("INFO") model_layer, model, config = EncoderDecoderModel(model_name='roberta_base', is_training = False, share_attention_layers=False, share_encoder_embeddings=True, pipeline_mode="auto-regressive") model.load_checkpoint("/Users/PRVATE/Documents/Tokenizers/tf_xsum_summarizer/") model_layer, model, config = T5Model(model_name='t5-small', is_training=False, pipeline_mode='auto-regressive') model.load_checkpoint("/Users/PRVATE/tf_transformers_models/t5-small/") model_layer_test, model_test, config = EncoderDecoderModel(model_name='roberta_base', is_training=False) tf.keras.backend.clear_session() # tf_transformers Layer (an extension of Keras Layer) # This is not Keras model, but extension of keras Layer encoder_layer = ROBERTAEncoder(config=config['encoder'], name='roberta', mask_mode='user_defined', is_training=False ) tf.keras.backend.clear_session() decoder_layer = ROBERTAEncoder(config=config['encoder'], name='roberta', mask_mode='causal', is_training=False, is_decoder = True, use_mlm_layer = True, share_attention_layers = False, share_encoder_embeddings = True, encoder_embedding_layer=encoder_layer._embedding_layer, encoder_type_embedding_layer=encoder_layer._type_embeddings, encoder_positional_embedding_layer=encoder_layer._position_embedding_layer ) len(decoder_layer.variables) for var in decoder_layer.variables: print(var.name, var.shape) tf.keras.backend.clear_session() model2 = EncoderDecoder(encoder=encoder_layer, decoder=decoder_layer, is_training=True, name = 'bert2bertaaa') model2 = model2.get_model() len(model2.variables) model2.load_checkpoint("/Users/PRVATE/Documents/Tokenizers/tf_xsum_summarizer") # Default configs for the model model_config_dir = '/Users/PRVATE/Documents/tf_transformers/src/tf_transformers/models/model_configs/' model_name = 'roberta_base' config_location = os.path.join(model_config_dir, model_name, 'config.json') config = json.load(open(config_location)) # config['num_hidden_layers'] = 1 # Always do this tf.keras.backend.clear_session() # tf_transformers Layer (an extension of Keras Layer) # This is not Keras model, but extension of keras Layer encoder_layer = ROBERTAEncoder(config=config, name='roberta', mask_mode=config['mask_mode'], is_training=False ) tf.keras.backend.clear_session() decoder_layer = ROBERTAEncoder(config=config, name='roberta', mask_mode='causal', is_training=False, is_decoder = True, use_mlm_layer = True, share_attention_layers = False, share_encoder_embeddings = True, encoder_embedding_layer=encoder_layer._embedding_layer, encoder_type_embedding_layer=encoder_layer._type_embeddings, encoder_positional_embedding_layer=encoder_layer._position_embedding_layer ) tf.keras.backend.clear_session() model = EncoderDecoder(encoder=encoder_layer, decoder=decoder_layer, is_training=True, name = 'bert2bert') model = model.get_model() model.load_checkpoint("/Users/PRVATE/Documents/Tokenizers/tf_xsum_summarizer/") model.model_config # Default configs for the model model_config_dir = '/Users/PRVATE/Documents/tf_transformers/src/tf_transformers/models/model_configs/' model_name = 'roberta_base' config_location = os.path.join(model_config_dir, model_name, 'config.json') config = json.load(open(config_location)) # config['num_hidden_layers'] = 1 # Always do this tf.keras.backend.clear_session() # tf_transformers Layer (an extension of Keras Layer) # This is not Keras model, but extension of keras Layer encoder_layer = ROBERTAEncoder(config=config, name='roberta', mask_mode=config['mask_mode'], is_training=False ) tf.keras.backend.clear_session() decoder_layer = ROBERTAEncoder(config=config, name='roberta', mask_mode='causal', is_training=False, is_decoder = True, pipeline_mode='auto-regressive', use_mlm_layer = True, share_attention_layers = False, share_encoder_embeddings = True, encoder_embedding_layer=encoder_layer._embedding_layer, encoder_type_embedding_layer=encoder_layer._type_embeddings, encoder_positional_embedding_layer=encoder_layer._position_embedding_layer ) tf.keras.backend.clear_session() model_test = EncoderDecoder(encoder=encoder_layer, decoder=decoder_layer, is_training=False, name = 'bert2bert') model_test = model_test.get_model() model_test.load_checkpoint("/Users/PRVATE/Documents/Tokenizers/tf_xsum_summarizer/") import datasets data = datasets.load_from_disk("/Users/PRVATE/HUggingFace_Models/dataset/cnn_dailymail/") from transformers import RobertaTokenizer tokenizer = RobertaTokenizer.from_pretrained('roberta-base') train_data = data["train"].select(range(100)) from tf_transformers.data import pad_dataset encoder_max_length=512 decoder_max_length=64 batch_size = 2 @pad_dataset def process_data_to_model_inputs(text_list): # Tokenizer will automatically set [BOS] <text> [EOS] inputs = tokenizer(text_list, padding="max_length", truncation=True, max_length=encoder_max_length) tokenized = {} tokenized["encoder_input_ids"] = inputs.input_ids tokenized["encoder_input_mask"] = inputs.attention_mask tokenized["encoder_input_type_ids"] = tf.zeros_like(inputs.attention_mask).numpy() return tokenized sample_data = train_data.select(range(2)) article_list = [item['article'] for item in list(sample_data)] encoded_inputs = process_data_to_model_inputs(article_list) model_test.save_checkpoint("temp", overwrite=True) model_test.save_as_serialize_module("temp_pb_module/", overwrite=True) loaded = tf.saved_model.load("temp_pb_module/") model_pb = loaded.signatures['serving_default'] from tf_transformers.text import TextDecoderSeq2Seq decoder = TextDecoderSeq2Seq( model = model_test, decoder_start_token_id = 0, decoder_input_type_ids = 0 ) # decoder_results = decoder.decode(encoded_inputs, # mode='beam', # beam_size=3, # max_iterations=64, # eos_id=2) decoder_results = decoder.decode(encoded_inputs, mode='top_k_top_p', top_k = 35, top_p =0.55, num_return_sequences=3, max_iterations=64, eos_id=2) # decoder_results = decoder.decode(encoded_inputs, # mode='greedy', # max_iterations=64, # eos_id=2) decoder_results['predicted_ids'] decoder_results['matched_eos_pos'] tokenizer.decode(decoder_results['predicted_ids'][0][0]) tokenizer.decode(decoder_results['predicted_ids'][1][0]) decoder_results['matched_eos_pos'] # decoder_layer = TextDecoderSerializableSeq2Seq( # model = model_test, # decoder_start_token_id = 0, # decoder_input_type_ids = 0, # max_iterations=None, # mode="greedy", # do_sample=False, # eos_id=2, # ) # # Convert whole operation to a model # decoder_model = decoder_layer.get_model() # decoder_layer = TextDecoderSerializableSeq2Seq( # model = model_test, # decoder_start_token_id = 0, # decoder_input_type_ids = 0, # max_iterations=None, # mode="beam", # beam_size=3, # do_sample=False, # eos_id=2, # ) # # Convert whole operation to a model # decoder_model = decoder_layer.get_model() decoder_layer = TextDecoderSerializableSeq2Seq( model = model_test, decoder_start_token_id = 0, decoder_input_type_ids = 0, max_iterations=None, mode="top_k_top_p", top_k = 35, top_p = 0.55, num_return_sequences=3, do_sample=False, eos_id=2, ) # Convert whole operation to a model decoder_model = decoder_layer.get_model() # decoder_module = LegacyModule(decoder_model) # decoder_module.save(saved_model_dir_strategy) # print("Saved") encoded_inputs_copy = encoded_inputs.copy() encoded_inputs_copy['iterations'] = tf.constant([[64]]) decoder_results_serialized = decoder_model(encoded_inputs_copy) decoder_results['matched_eos_pos'] decoder_results_serialized['matched_eos_pos'] tf.assert_equal(decoder_results['predicted_ids'], decoder_results_serialized['predicted_ids']) ```
github_jupyter
## Title : Exercise: Hyperparameter tuning ## Description : ### Tuning the hyperparameters Random Forests perform very well out-of-the-box, with the pre-set hyperparameters in sklearn. Some of the tunable parameters are: - The number of trees in the forest: n_estimators, int, default=100 - The complexity of each tree: stop when a leaf has <= min_samples_leaf samples - The sampling scheme: number of features to consider at any given split: max_features {“auto”, “sqrt”, “log2”}, int or float, default=”auto”. ## Instructions: - Read the datafile diabetes.csv as a Pandas data frame. - Assign the predictor and response variable as mentioned in the scaffold. - Split the data into train and validation sets. - Define a vanilla Random Forest and fit the model on the entire data. - For various hyper parameters of the model, define different Random Forest models and train on the data. - Compare the results with each model. ## Hints: <a href="https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html" target="_blank">RandomForestClassifier()</a> Defines the RandomForestClassifier and includes more details on the definition and range of values for its tunable parameters. <a href="https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html#sklearn.ensemble.RandomForestClassifier.predict_proba" target="_blank">model.predict_proba(X)</a> Predict class probabilities for X <a href="https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html" target="_blank">roc_auc(y_test, y_proba)</a> Calculates the area under the receiver operating curve (AUC). <a href="https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html" target="_blank">GridSearchCV()</a> Performes exhaustive search over specified parameter values for an estimator. ``` # Import necessary libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.metrics import roc_auc_score from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.inspection import permutation_importance %matplotlib inline # Read the dataset and take a quick look df = pd.read_csv("diabetes.csv") df.head() # Assign the predictor and response variables. # Outcome is the response and all the other columns are the predictors X = df.drop("Outcome", axis=1) y = df['Outcome'] # Set the seed for reproducibility of results seed = 0 # Split the data into train and test sets with the mentioned seed X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=seed) ``` ### Vanila random forest Start by training a Random Forest Classifier using the default parameters and calculate the Receiver Operating Characteristic Area Under the Curve (ROC AUC). As we know, this metric is better than accuracy for a classification problem, since it covers the case of an imbalanced dataset. ``` ### edTest(test_vanilla) ### # Define a Random Forest classifier with randon_state = seed vanilla_rf = ___ # Fit the model on the entire data vanilla_rf.fit(___, ___); # Calculate AUC/ROC on the test set y_proba = ___[:, 1] auc = np.round(roc_auc_score(y_test, y_proba),2) print(f'Plain RF AUC on test set:{auc}') # Number of samples and features num_features = X_train.shape[1] num_samples = X_train.shape[0] num_samples, num_features ``` ### 1. Number of trees, `num_iterators`, default = 100 The number of trees needs to be large enough for the $oob$ error to stabilize in its lowest possible value. Plot the $oob$ error of a random forest as a function of the number of trees. Trees in a RF are called `estimators`. A good start is 10 times the number of features, however, adjusting other hyperparameters will influence the optimum number of trees. ``` %%time from collections import OrderedDict clf = RandomForestClassifier(warm_start=True, oob_score=True, min_samples_leaf=40, max_depth = 10, random_state=seed) error_rate = {} # Range of `n_estimators` values to explore. min_estimators = 150 max_estimators = 500 for i in range(min_estimators, max_estimators + 1): clf.set_params(n_estimators=i) clf.fit(X_train.values, y_train.values) # Record the OOB error for each `n_estimators=i` setting. oob_error = 1 - clf.oob_score_ error_rate[i] = oob_error %%time # Generate the "OOB error rate" vs. "n_estimators" plot. # OOB error rate = num_missclassified/total observations (%)\ xs = [] ys = [] for label, clf_err in error_rate.items(): xs.append(label) ys.append(clf_err) plt.plot(xs, ys) plt.xlim(min_estimators, max_estimators) plt.xlabel("n_estimators") plt.ylabel("OOB error rate") plt.show(); ``` ### 2. `min_samples_leaf`, default = 1 The minimum number of samples required to be at a leaf node. A split point at any depth will only be considered if it leaves at least min_samples_leaf training samples in each of the left and right branches. This may have the effect of smoothing the model, especially in regression. We will plot various values of the `min_samples_leaf` with `num_iterators`. ``` %%time from collections import OrderedDict ensemble_clfs = [ (1, RandomForestClassifier(warm_start=True, min_samples_leaf=1, oob_score=True, max_depth = 10, random_state=seed)), (5, RandomForestClassifier(warm_start=True, min_samples_leaf=5, oob_score=True, max_depth = 10, random_state=seed)) ] # Map a label (the value of `min_samples_leaf`) to a list of (model, oob error) tuples. error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs) min_estimators = 80 max_estimators = 500 for label, clf in ensemble_clfs: for i in range(min_estimators, max_estimators + 1): clf.set_params(n_estimators=i) clf.fit(X_train.values, y_train.values) # Record the OOB error for each model. Error is 1 - oob_score # oob_score: score of the training dataset obtained using an # out-of-bag estimate. # OOB error rate is % of num_missclassified/total observations oob_error = 1 - clf.oob_score_ error_rate[label].append((i, oob_error)) for label, clf_err in error_rate.items(): xs, ys = zip(*clf_err) plt.plot(xs, ys, label=f'min_samples_leaf={label}') plt.xlim(min_estimators, max_estimators) plt.xlabel("n_estimators") plt.ylabel("OOB error rate") plt.legend(loc="upper right") plt.show(); err = 100 best_num_estimators = 0 for label, clf_err in error_rate.items(): num_estimators, error = min(clf_err, key=lambda n: (n[1], -n[0])) if error<err: err=error; best_num_estimators = num_estimators; best_leaf = label print(f'Optimum num of estimators: {best_num_estimators} \nmin_samples_leaf: {best_leaf}') ``` Re-train the Random Forest Classifier using the new values for the parameters and calculate the AUC/ROC. Include another parameter, the `max_features`, the number of features to consider when looking for the best split. ``` ### edTest(test_estimators) ### estimators_rf = RandomForestClassifier(n_estimators= best_num_estimators, random_state=seed, oob_score=True, min_samples_leaf=best_leaf, max_features='sqrt') # Fit the model on the entire data estimators_rf.fit(X_train, y_train); # Calculate AUC/ROC on the test set y_proba = ___[:, 1] estimators_auc = np.round(roc_auc_score(y_test, y_proba),2) print(f'Educated RF AUC on test set:{estimators_auc}') ``` Look at the model's parameters ``` estimators_rf.get_params() ``` ### 3. Performing a cross-validation search After we have some idea of the range of optimum values for the number of trees and maybe a couple of other parameters, and have enough computing power, you may perform an exhaustive search over other parameter values. ``` %%time from sklearn.model_selection import GridSearchCV do_grid_search = True if do_grid_search: rf = RandomForestClassifier(n_jobs=-1, n_estimators= best_num_estimators, oob_score=True, max_features = 'sqrt', min_samples_leaf=best_leaf, random_state=seed).fit(X_train,y_train) param_grid = { 'min_samples_split': [2,5,None]} scoring = {'AUC': 'roc_auc'} grid_search = GridSearchCV(rf, param_grid, scoring=scoring, refit='AUC', return_train_score=True, n_jobs=-1) results = grid_search.fit(X_train, y_train) print(results.best_estimator_.get_params()) best_rf = results.best_estimator_ # Calculate AUC/ROC y_proba = best_rf.predict_proba(X_test)[:, 1] auc = np.round(roc_auc_score(y_test, y_proba),2) print(f'GridSearchCV RF AUC on test set:{auc}') ```
github_jupyter
First, read the input tables as pandas data frames, and filter out any unnecessary columns and rows. ``` # Import required modules. import pandas as pd import geopandas as gpd import random pd.set_option('display.max_columns', None) # Set input/output local file paths - update these to match your system. dissemination_area_lyr = r'/home/jovyan/work/data/census/lda_000b16a_e/lda_000b16a_e.shp' census_csv = r'/home/jovyan/work/data/census/98-400-X2016055_ENG_CSV/98-400-X2016055_English_CSV_data.csv' out_csv = r'/home/jovyan/work/blog/language-dot-map/language-dots-100.csv' # Read dissemination areas and subset columns. da_df = gpd.read_file(dissemination_area_lyr) da_df = da_df[['DAUID', 'geometry']] da_df.head() # Read language census data in chunks, filtering to totals for disemmination areas (GEO_LEVEL 4). May take a while to complete. census_cols = ['GEO_CODE (POR)', 'Dim: Knowledge of official languages (5): Member ID: [2]: English only', 'Dim: Knowledge of official languages (5): Member ID: [3]: French only', 'Dim: Knowledge of official languages (5): Member ID: [4]: English and French'] reader = pd.read_csv(census_csv, iterator=True, chunksize=1000) census_df = pd.concat([ chunk.loc[ (chunk['GEO_LEVEL'] == 4) & (chunk['DIM: Sex (3)'] == 'Total - Sex') & (chunk['DIM: Mother tongue (269)'] == 'Total - Mother tongue'), census_cols ] for chunk in reader ]) # Rename columns. census_df.columns = ['DAUID', 'English', 'French', 'English and French'] census_df.head() ``` Next, join the two data frames by the common key DAUID to assign geometries to the census data and calculate the bounding box (bbox) for each geometry. ``` # Convert the IDs "object" data type to integer, to allow the merge. da_df['DAUID'] = da_df['DAUID'].astype('int') # Merge geometry to census data frame and convert to a geopandas data frame. merged_df = pd.merge(census_df, da_df, on='DAUID') merged_df = gpd.GeoDataFrame(merged_df, geometry='geometry') # Calculate bbox for each geometry with bounds method. merged_df = pd.concat([merged_df, merged_df.bounds], axis=1) merged_df.head() ``` The basic methodology for a dot map like this is to randomly distribute point coordinates within the administration boundary, one to represent each value to be mapped. In this case, we will allocate a coordinate for every 100 people present in each language category. To distribute the dots, a random location within the bounding box is calculated and then tested against the actual administation boundary geometry. If the coordinate falls within the geometry, it is kept, and if not, a new random location calculated, the process repeated until a coordinate is found that does intersect. ``` # Recalculate values as 1 per 100 persons (rounded to nearest 100). factor = 100 merged_df[['English', 'French', 'English and French']] = merged_df[['English', 'French', 'English and French']].astype('int') merged_df[['English', 'French', 'English and French']] = round(merged_df[['English', 'French', 'English and French']] / factor).astype('int') # Define a function to randomly distribute coordinates within a geometry per value for each language. def random_coordinates(row): results = [] for language in ('English', 'French', 'English and French'): count = 0 val = row[language] while count < val: x = random.uniform(row['minx'], row['maxx']) y = random.uniform(row['miny'], row['maxy']) pt = Point(x, y) if pt.within(row['geometry']): count += 1 results.append([language, x, y]) return pd.DataFrame(results, columns=('language', 'x', 'y')) # Apply the function to every row of the data frame, returning a series with a data frame for each row. This step also takes a while to run. results = merged_df.apply(random_coordinates, axis=1, raw=True) # Unpack the series and concatenate the data frames. results = pd.concat(results.tolist(), ignore_index=True) # Write the results to file. results.to_csv(out_csv, index=False) ```
github_jupyter
<a href="https://colab.research.google.com/github/SauravMaheshkar/X-Ray-Image-Classification/blob/main/notebooks/GradCAM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Packages 📦 and Basic Setup ``` %%capture !pip3 install wandb import cv2 import imutils import numpy as np import tensorflow as tf from google.colab.patches import cv2_imshow from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.preprocessing.image import load_img from tensorflow.keras.applications import imagenet_utils IMAGE_SIZE = [224, 224] ``` # The Model 👷‍♀️ It takes a lot of time to train models, that's we use Weights and Biases Artifacts to store our model weights. You can store different versions of your datasets and models in the cloud as Artifacts. Think of an Artifact as of a folder of data to which we can add individual files, and then upload to the cloud as a part of our W&B project, which also supports automatic versioning of datasets and models. Artifacts also track the training pipelines as DAGs. Here's an example of a artifacts graph. ![](https://i.imgur.com/QQULnpP.gif) ## Download ⬇️ Weights from WandB Artifacts ``` %%capture import wandb run = wandb.init() artifact = run.use_artifact('sauravmaheshkar/xray-image-classification/model:v1', type='weights') artifact_dir = artifact.download() run.finish() ``` ## Load the Model 🚀 ``` def get_model(): base_model = tf.keras.applications.MobileNetV2( input_shape=(*IMAGE_SIZE, 3), include_top=False, weights="imagenet" ) base_model.trainable = False x = base_model.output x = tf.keras.layers.GlobalAveragePooling2D()(x) x = tf.keras.layers.Dense(8, activation="relu")(x) x = tf.keras.layers.Dropout(0.5)(x) outputs = tf.keras.layers.Dense(1, activation="sigmoid")(x) model = tf.keras.Model(inputs=base_model.input, outputs=outputs) return model model = get_model() model.load_weights('/content/artifacts/model:v1/model.h5') ``` # GradCAM 🔍 ## Custom Class 💀 Here, we implement a Custom `GradCAM` class to generate the heatmap. > Code ported from PyImageSearch Tutorials ``` class GradCAM: def __init__(self, model, classIdx, layerName=None): self.model = model self.classIdx = classIdx self.layerName = layerName if self.layerName is None: self.layerName = self.find_target_layer() def find_target_layer(self): for layer in reversed(self.model.layers): if len(layer.output_shape) == 4: return layer.name raise ValueError("Could not find 4D layer. Cannot apply GradCAM.") def compute_heatmap(self, image, eps=1e-8): gradModel = tf.keras.models.Model( inputs=[self.model.inputs], outputs=[self.model.get_layer(self.layerName).output, self.model.output]) with tf.GradientTape() as tape: inputs = tf.cast(image, tf.float32) (convOutputs, predictions) = gradModel(inputs) loss = predictions[:, self.classIdx] grads = tape.gradient(loss, convOutputs) castConvOutputs = tf.cast(convOutputs > 0, "float32") castGrads = tf.cast(grads > 0, "float32") guidedGrads = castConvOutputs * castGrads * grads convOutputs = convOutputs[0] guidedGrads = guidedGrads[0] weights = tf.reduce_mean(guidedGrads, axis=(0, 1)) cam = tf.reduce_sum(tf.multiply(weights, convOutputs), axis=-1) (w, h) = (image.shape[2], image.shape[1]) heatmap = cv2.resize(cam.numpy(), (w, h)) numer = heatmap - np.min(heatmap) denom = (heatmap.max() - heatmap.min()) + eps heatmap = numer / denom heatmap = (heatmap * 255).astype("uint8") return heatmap def overlay_heatmap(self, heatmap, image, alpha=0.5, colormap=cv2.COLORMAP_VIRIDIS): heatmap = cv2.applyColorMap(heatmap, colormap) output = cv2.addWeighted(image, alpha, heatmap, 1 - alpha, 0) return (heatmap, output) ``` ## CAM 🙎🏻‍♂️ ``` # Change path for your own image orig = cv2.imread("/content/pneumonia.jpeg") w, h = 224, 224 resized = cv2.resize(orig, (w, h))🙎🏻‍♂️ image = load_img("/content/pneumonia.jpeg", target_size=(w, h)) image = img_to_array(image) image = np.expand_dims(image, axis=0) image = image.astype('float64') preds = model.predict(image) i = np.argmax(preds[0]) cam = GradCAM(model, i) heatmap = cam.compute_heatmap(image) heatmap = cv2.resize(heatmap, (orig.shape[1], orig.shape[0])) (heatmap, output) = cam.overlay_heatmap(heatmap, orig, alpha=0.5) cv2.rectangle(output, (0, 0), (140, 40), (0, 0, 0), -1) cv2.putText(output, "GradCAM", (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2) output = np.hstack([orig, heatmap, output]) output = imutils.resize(output, height=400) cv2_imshow(output) cv2.waitKey(0) ```
github_jupyter
## 1. Meet Dr. Ignaz Semmelweis <p><img style="float: left;margin:5px 20px 5px 1px" src="https://s3.amazonaws.com/assets.datacamp.com/production/project_20/img/ignaz_semmelweis_1860.jpeg"></p> <!-- <img style="float: left;margin:5px 20px 5px 1px" src="https://s3.amazonaws.com/assets.datacamp.com/production/project_20/datasets/ignaz_semmelweis_1860.jpeg"> --> <p>This is Dr. Ignaz Semmelweis, a Hungarian physician born in 1818 and active at the Vienna General Hospital. If Dr. Semmelweis looks troubled it's probably because he's thinking about <em>childbed fever</em>: A deadly disease affecting women that just have given birth. He is thinking about it because in the early 1840s at the Vienna General Hospital as many as 10% of the women giving birth die from it. He is thinking about it because he knows the cause of childbed fever: It's the contaminated hands of the doctors delivering the babies. And they won't listen to him and <em>wash their hands</em>!</p> <p>In this notebook, we're going to reanalyze the data that made Semmelweis discover the importance of <em>handwashing</em>. Let's start by looking at the data that made Semmelweis realize that something was wrong with the procedures at Vienna General Hospital.</p> ``` # importing modules import pandas as pd # Read datasets/yearly_deaths_by_clinic.csv into yearly yearly = pd.read_csv('datasets/yearly_deaths_by_clinic.csv') # Print out yearly print(yearly) ``` ## 2. The alarming number of deaths <p>The table above shows the number of women giving birth at the two clinics at the Vienna General Hospital for the years 1841 to 1846. You'll notice that giving birth was very dangerous; an <em>alarming</em> number of women died as the result of childbirth, most of them from childbed fever.</p> <p>We see this more clearly if we look at the <em>proportion of deaths</em> out of the number of women giving birth. Let's zoom in on the proportion of deaths at Clinic 1.</p> ``` # Calculate proportion of deaths per no. births yearly["proportion_deaths"] = yearly.deaths/yearly.births # Extract clinic 1 data into yearly1 and clinic 2 data into yearly2 yearly1 = yearly[yearly['clinic'] == 'clinic 1'] yearly2 = yearly[yearly['clinic'] == 'clinic 2'] # Print out yearly1 print(yearly1) ``` ## 3. Death at the clinics <p>If we now plot the proportion of deaths at both clinic 1 and clinic 2 we'll see a curious pattern...</p> ``` # This makes plots appear in the notebook %matplotlib inline # Plot yearly proportion of deaths at the two clinics ax = yearly1.plot(x='year', y='proportion_deaths', label='clinic1') yearly2.plot(x='year', y='proportion_deaths', label='clinic2', ax=ax) ax.set_ylabel('Proportion deaths') ``` ## 4. The handwashing begins <p>Why is the proportion of deaths constantly so much higher in Clinic 1? Semmelweis saw the same pattern and was puzzled and distressed. The only difference between the clinics was that many medical students served at Clinic 1, while mostly midwife students served at Clinic 2. While the midwives only tended to the women giving birth, the medical students also spent time in the autopsy rooms examining corpses. </p> <p>Semmelweis started to suspect that something on the corpses, spread from the hands of the medical students, caused childbed fever. So in a desperate attempt to stop the high mortality rates, he decreed: <em>Wash your hands!</em> This was an unorthodox and controversial request, nobody in Vienna knew about bacteria at this point in time. </p> <p>Let's load in monthly data from Clinic 1 to see if the handwashing had any effect.</p> ``` # Read datasets/monthly_deaths.csv into monthly monthly = pd.read_csv('datasets/monthly_deaths.csv', parse_dates=['date']) # Calculate proportion of deaths per no. births monthly['proportion_deaths'] = monthly.deaths/monthly.births # Print out the first rows in monthly monthly.head() ``` ## 5. The effect of handwashing <p>With the data loaded we can now look at the proportion of deaths over time. In the plot below we haven't marked where obligatory handwashing started, but it reduced the proportion of deaths to such a degree that you should be able to spot it!</p> ``` # Plot monthly proportion of deaths # This makes plots appear in the notebook %matplotlib inline # Plot yearly proportion of deaths at the two clinics ax = monthly['proportion_deaths'].plot(x='date', y='proportion_deaths', label='clinic1') monthly['proportion_deaths'].plot(x='date', y='proportion_deaths', label='clinic2', ax=ax) ax.set_ylabel("Proportion deaths") ``` ## 6. The effect of handwashing highlighted <p>Starting from the summer of 1847 the proportion of deaths is drastically reduced and, yes, this was when Semmelweis made handwashing obligatory. </p> <p>The effect of handwashing is made even more clear if we highlight this in the graph.</p> ``` # This makes plots appear in the notebook %matplotlib inline # Date when handwashing was made mandatory import pandas as pd handwashing_start = pd.to_datetime('1847-06-01') # Split monthly into before and after handwashing_start before_washing = monthly[monthly['date'] < handwashing_start] after_washing = monthly[monthly['date'] >= handwashing_start] # Plot monthly proportion of deaths before and after handwashing ax = before_washing.plot(x='date', y='proportion_deaths', label='clinic1') after_washing.plot(x='date', y='proportion_deaths', label='clinic2', ax=ax) ax.set_ylabel("Proportion deaths") ``` ## 7. More handwashing, fewer deaths? <p>Again, the graph shows that handwashing had a huge effect. How much did it reduce the monthly proportion of deaths on average?</p> ``` # Difference in mean monthly proportion of deaths due to handwashing before_proportion = before_washing['proportion_deaths'] after_proportion = after_washing['proportion_deaths'] mean_diff = after_proportion.mean() - before_proportion.mean() mean_diff ``` ## 8. A Bootstrap analysis of Semmelweis handwashing data <p>It reduced the proportion of deaths by around 8 percentage points! From 10% on average to just 2% (which is still a high number by modern standards). </p> <p>To get a feeling for the uncertainty around how much handwashing reduces mortalities we could look at a confidence interval (here calculated using the bootstrap method).</p> ``` # A bootstrap analysis of the reduction of deaths due to handwashing boot_mean_diff = [] for i in range(3000): boot_before = before_proportion.sample(frac=1, replace=True) boot_after = after_proportion.sample(frac=1, replace=True) boot_mean_diff.append(boot_after.mean() - boot_before.mean()) # Calculating a 95% confidence interval from boot_mean_diff confidence_interval = pd.Series(boot_mean_diff).quantile([0.025, 0.975]) confidence_interval ``` ## 9. The fate of Dr. Semmelweis <p>So handwashing reduced the proportion of deaths by between 6.7 and 10 percentage points, according to a 95% confidence interval. All in all, it would seem that Semmelweis had solid evidence that handwashing was a simple but highly effective procedure that could save many lives.</p> <p>The tragedy is that, despite the evidence, Semmelweis' theory — that childbed fever was caused by some "substance" (what we today know as <em>bacteria</em>) from autopsy room corpses — was ridiculed by contemporary scientists. The medical community largely rejected his discovery and in 1849 he was forced to leave the Vienna General Hospital for good.</p> <p>One reason for this was that statistics and statistical arguments were uncommon in medical science in the 1800s. Semmelweis only published his data as long tables of raw data, but he didn't show any graphs nor confidence intervals. If he would have had access to the analysis we've just put together he might have been more successful in getting the Viennese doctors to wash their hands.</p> ``` # The data Semmelweis collected points to that: doctors_should_wash_their_hands = True ```
github_jupyter
# Simple Neural Network Neural Networks are a machine learning framework that attempts to mimic the learning pattern of natural biological neural networks. neural networks receive inputs, then based on these inputs they produce an output signal to next. The process of creating a neural network begins with the most basic form, a single perceptron or Multi layer Perceptron. ![image.png](attachment:image.png) It takes several input, processes it through multiple neurons from multiple hidden layers and returns the result using an output layer. This result estimation process is technically known as “Forward Propagation“. Next, we compare the result with actual output. The task is to make the output to neural network as close to actual (desired) output. Each of these neurons are contributing some error to final output. We try to minimize the value/ weight of neurons those are contributing more to the error and this happens while traveling back to the neurons of the neural network and finding where the error lies. This process is known as “Backward Propagation“. . In order to reduce these number of iterations to minimize the error, the neural networks use a common algorithm known as “Gradient Descent”, which helps to optimize the task quickly and efficiently. ``` from datetime import datetime import numpy as np import numpy as numpy import pandas as pd import pylab import calendar from scipy import stats import seaborn as sns from sklearn import model_selection, preprocessing from scipy.stats import kendalltau import warnings import matplotlib.pyplot as plt import pandas from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout, BatchNormalization from keras.layers.advanced_activations import PReLU from keras.optimizers import Adam from keras.wrappers.scikit_learn import KerasRegressor from keras import callbacks from keras.callbacks import ModelCheckpoint from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.preprocessing import LabelEncoder from sklearn.metrics import mean_squared_error import patsy import statsmodels.api as sm import statsmodels.formula.api as smf from statsmodels.regression.quantile_regression import QuantReg # Load train, Prop and sample print('Loading train, prop and sample data') train = pd.read_csv("train_2016_v2.csv", parse_dates=["transactiondate"]) prop = pd.read_csv('properties_2016.csv') sample = pd.read_csv('sample_submission.csv') ``` #### Label Encoder LabelEncoder is a utility class to help normalize labels categorical values and to encode such that they contain only values between 0 and n_classes-1. Here, we LabelEncode the properties dataset. ``` print('Fitting Label Encoder on properties') for c in prop.columns: prop[c]=prop[c].fillna(-1) if prop[c].dtype == 'object': lbl = LabelEncoder() lbl.fit(list(prop[c].values)) prop[c] = lbl.transform(list(prop[c].values)) ``` ###### Create Test and Train sets ``` #Create df_train and x_train y_train from that print('Creating training set') df_train = train.merge(prop, how='left', on='parcelid') # Create df_test and test set print('Creating df_test') sample['parcelid'] = sample['ParcelId'] print("Merge Sample with property data") df_test = sample.merge(prop, on='parcelid', how='left') INCLUDE_TIME_FEATURES = False INCLUDE_SEASONAL_FEATURES = True N_EPOCHS = 150 BEST_EPOCH = False CV_ONLY = False ``` ###### Q4 Validation Here, we divide the whole year is didvided into four quarters (q1,q2,q3,q4) and the first three quaters are considered to be part of traning set and the validation set has the last qurater q4, Hence Q4 Validation. The quarters are divided based on the time and seasonal features. With the help of error in the Q4 validation set between it's existing Log Error and predicted Log Error it uses backpropagation in each Epoch to minimize it and build a better model. ``` df_train["transactiondate"] = pd.to_datetime(df_train["transactiondate"]) df_train['transactiondate_quarter'] = df_train['transactiondate'].dt.quarter if INCLUDE_TIME_FEATURES: df_train["transactiondate_year"] = df_train["transactiondate"].dt.year df_train["transactiondate_month"] = df_train["transactiondate"].dt.month df_train["transactiondate"] = df_train["transactiondate"].dt.day if INCLUDE_SEASONAL_FEATURES: basedate = pd.to_datetime('2015-11-15').toordinal() df_train['cos_season'] = \ ( (pd.to_datetime(df_train['transactiondate']).apply(lambda x: x.toordinal()-basedate)) * \ (2*np.pi/365.25) ).apply(np.cos) df_train['sin_season'] = \ ( (pd.to_datetime(df_train['transactiondate']).apply(lambda x: x.toordinal()-basedate)) * \ (2*np.pi/365.25) ).apply(np.sin) select_qtr4 = df_train["transactiondate_quarter"] == 4 print('Create x_train and y_train from df_train' ) x_train_all = df_train.drop(['parcelid', 'logerror', 'transactiondate', 'propertyzoningdesc', 'propertycountylandusecode', 'fireplacecnt', 'fireplaceflag'], axis=1) if not INCLUDE_TIME_FEATURES: x_train_all = x_train_all.drop(['transactiondate_quarter'], axis=1) y_train_all = df_train["logerror"] y_train = y_train_all[~select_qtr4] x_train = x_train_all[~select_qtr4] x_valid = x_train_all[select_qtr4] y_valid = y_train_all[select_qtr4] y_mean = np.mean(y_train) print(x_train.shape, y_train.shape) train_columns = x_train.columns for c in x_train.dtypes[x_train.dtypes == object].index.values: x_train[c] = (x_train[c] == True) if INCLUDE_TIME_FEATURES: df_test["transactiondate"] = pd.to_datetime('2016-11-15') # typical date for 2016 test data df_test["transactiondate_year"] = df_test["transactiondate"].dt.year df_test["transactiondate_month"] = df_test["transactiondate"].dt.month df_test['transactiondate_quarter'] = df_test['transactiondate'].dt.quarter df_test["transactiondate"] = df_test["transactiondate"].dt.day if INCLUDE_SEASONAL_FEATURES: basedate = pd.to_datetime('2015-11-15').toordinal() df_test['cos_season'] = np.cos( (pd.to_datetime('2016-11-15').toordinal() - basedate) * \ (2*np.pi/365.25) ) df_test['sin_season'] = np.sin( (pd.to_datetime('2016-11-15').toordinal() - basedate) * \ (2*np.pi/365.25) ) x_test = df_test[train_columns] print('Shape of x_test:', x_test.shape) print("Preparing x_test:") for c in x_test.dtypes[x_test.dtypes == object].index.values: x_test[c] = (x_test[c] == True) ``` ###### Imputer Imputation transformer is used for completing missing values. ``` from sklearn.preprocessing import Imputer imputer= Imputer() imputer.fit(x_train.iloc[:, :]) x_train = imputer.transform(x_train.iloc[:, :]) imputer.fit(x_valid.iloc[:, :]) x_valid = imputer.transform(x_valid.iloc[:, :]) imputer.fit(x_test.iloc[:, :]) x_test = imputer.transform(x_test.iloc[:, :]) ``` ##### Standard Scalar Standardizes features by removing the mean and scaling to unit variance Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Mean and standard deviation are then stored to be used on later data using the transform method. Standardization of a dataset is a common requirement for many machine learning estimators: they might behave badly if the individual feature do not more or less look like standard normally distributed data (e.g. Gaussian with 0 mean and unit variance) ``` sc = StandardScaler() x_train = sc.fit_transform(x_train) x_test = sc.transform(x_test) x_valid = sc.fit_transform(x_valid) x_val = np.array(x_valid) y_val = np.array(y_valid) ``` ###### Train Network Here we have five layered Neural network with inputs to the network starting from 400 to 1. ###### Kernel Initializers Initializations define the way to set the initial random weights of Keras layers. ##### Random Normal Initializer Initializer that generates tensors with a normal distribution. ###### Batch Normalization We normalize the input layer by adjusting and scaling the activations and to speed up learning. In Batch normalization the output of a previous activation layer by subtracting the batch mean and dividing by the batch standard deviation. However, after this shift/scale of activation outputs by some randomly initialized parameters, the weights in the next layer are no longer optimal. GD (gradient descent) undoes this normalization if it’s a way for it to minimize the loss function. Here, we use mean absolute error loss function and Adam optimizer. ``` len_x=int(x_train.shape[1]) print("len_x is:",len_x) nn = Sequential() nn.add(Dense(units = 400 , kernel_initializer = 'normal', input_dim = len_x)) nn.add(PReLU()) nn.add(Dropout(.4)) nn.add(Dense(units = 160 , kernel_initializer = 'normal')) nn.add(PReLU()) nn.add(BatchNormalization()) nn.add(Dropout(.6)) nn.add(Dense(units = 64 , kernel_initializer = 'normal')) nn.add(PReLU()) nn.add(BatchNormalization()) nn.add(Dropout(.5)) nn.add(Dense(units = 26, kernel_initializer = 'normal')) nn.add(PReLU()) nn.add(BatchNormalization()) nn.add(Dropout(.6)) nn.add(Dense(1, kernel_initializer='normal')) nn.compile(loss='mae', optimizer='Adam') nn.fit(np.array(x_train), np.array(y_train), batch_size = 32, epochs = N_EPOCHS, verbose=2, validation_data=(x_val,y_val)) valid_pred = nn.predict(x_val) rmse = np.sqrt(mean_squared_error(y_val,valid_pred)) print('Test RMSE: %.3f' % rmse) ``` ###### Predict and Write Results ``` if not CV_ONLY: print("\nGenerating predictions...") y_pred_ann = nn.predict(x_test) print( "\nPreparing results for write..." ) y_pred = y_pred_ann.flatten() output = pd.DataFrame({'ParcelId': prop['parcelid'].astype(np.int32), '201610': y_pred, '201611': y_pred, '201612': y_pred, '201710': y_pred, '201711': y_pred, '201712': y_pred}) # set col 'ParceID' to first col cols = output.columns.tolist() cols = cols[-1:] + cols[:-1] output = output[cols] print( "\nWriting results to disk:" ) output.to_csv('Only_ANN_{}.csv'.format(datetime.now().strftime('%Y%m%d_%H%M%S')), index=False) print( "\nFinished!" ) ``` # Summary: In this notebook, we have implemented Simple neural networks with 5 layers for prediction of LogError = (log(Zestimate)-log(salesprice)) using 2016 property dataset and its corresponding log error values provided by zillow for home value prediction in Python using Keras and tensorflow deep learning libraries. Original dataset is prepared accordingly and required features are label encoded rest of the features which are almost zero or few are dropped. Later,the whole year is didvided into four quarters and the first three quaters are considered to be part of traning set and the validation set has the last qurater q4, Hence Q4 Validation. The quarters are divided based on the time and seasonal features. With the help of error in the Q4 validation set between it's existing Log Error and predicted Log Error it uses backpropagation in each Epoch to minimize it and build a better model. Finally, we have predicted logerror values of 2016 and 2017 for the last quarter (from November to December) in the test dataset and the results are written to a csv file. Calculate RMSE for the Network built can be seen as 0.148 which means that there is minimal error in the logerror gives us the better predictions.Further, the model can improvised by add more layers or changing the backpropagation parameters. ###### Referrence: https://www.sciencedirect.com/science/article/pii/S0377221703005484
github_jupyter
``` from google.colab import drive drive.mount('/content/gdrive') import os os.chdir('/content/gdrive/My Drive/finch/tensorflow2/spoken_language_understanding/atis/main') %tensorflow_version 2.x !pip install tensorflow-addons from tensorflow_addons.optimizers.cyclical_learning_rate import Triangular2CyclicalLearningRate from sklearn.metrics import classification_report, f1_score, accuracy_score import tensorflow as tf import pprint import logging import time import numpy as np print("TensorFlow Version", tf.__version__) print('GPU Enabled:', tf.test.is_gpu_available()) def get_vocab(vocab_path): word2idx = {} with open(vocab_path) as f: for i, line in enumerate(f): line = line.rstrip() word2idx[line] = i return word2idx def data_generator(f_path, params): print('Reading', f_path) with open(f_path) as f: for line in f: line = line.rstrip() text, slot_intent = line.split('\t') words = text.split()[1:-1] slot_intent = slot_intent.split() slots, intent = slot_intent[1:-1], slot_intent[-1] assert len(words) == len(slots) words = [params['word2idx'].get(w, len(params['word2idx'])) for w in words] intent = params['intent2idx'].get(intent, len(params['intent2idx'])) slots = [params['slot2idx'].get(s, len(params['slot2idx'])) for s in slots] yield (words, (intent, slots)) def dataset(is_training, params): _shapes = ([None], ((), [None])) _types = (tf.int32, (tf.int32, tf.int32)) _pads = (0, (-1, 0)) if is_training: ds = tf.data.Dataset.from_generator( lambda: data_generator(params['train_path'], params), output_shapes = _shapes, output_types = _types,) ds = ds.shuffle(params['num_samples']) ds = ds.padded_batch(params['batch_size'], _shapes, _pads) ds = ds.prefetch(tf.data.experimental.AUTOTUNE) else: ds = tf.data.Dataset.from_generator( lambda: data_generator(params['test_path'], params), output_shapes = _shapes, output_types = _types,) ds = ds.padded_batch(params['batch_size'], _shapes, _pads) ds = ds.prefetch(tf.data.experimental.AUTOTUNE) return ds def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4, start_index=0): to_float = lambda x: tf.cast(x, tf.float32) position = to_float(tf.range(length) + start_index) num_timescales = channels // 2 log_timescale_increment = ( tf.math.log(float(max_timescale) / float(min_timescale)) / tf.maximum(to_float(num_timescales) - 1, 1)) inv_timescales = min_timescale * tf.exp( to_float(tf.range(num_timescales)) * -log_timescale_increment) scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) signal = tf.pad(signal, [[0, 0], [0, tf.compat.v1.mod(channels, 2)]]) signal = tf.reshape(signal, [1, length, channels]) return signal class LayerNorm(tf.keras.layers.Layer): def __init__(self, params): super().__init__() self._epsilon = params['epsilon'] self._hidden_units = params['global_units'] def build(self, input_shape): self.scale = self.add_weight(name='scale', shape=[self._hidden_units], initializer=tf.ones_initializer(), trainable=True) self.bias = self.add_weight(name='bias', shape=[self._hidden_units], initializer=tf.zeros_initializer(), trainable=True) super().build(input_shape) def call(self, inputs): mean, variance = tf.nn.moments(inputs, [-1], keepdims=True) norm_x = (inputs - mean) * tf.math.rsqrt(variance + self._epsilon) return norm_x * self.scale + self.bias def compute_output_shape(self, input_shape): return input_shape class EncoderBlock(tf.keras.Model): def __init__(self, SubModel, params, name): super().__init__(name = name) self.layer_norm = LayerNorm(params) self.sub_model = SubModel(params) self.dropout = tf.keras.layers.Dropout(params['dropout_rate']) def call(self, inputs, training): inputs, masks = inputs x = self.layer_norm(inputs) x = self.sub_model((x, masks), training=training) x = self.dropout(x, training=training) x += inputs return x class MultiheadSelfAttention(tf.keras.Model): def __init__(self, params): super().__init__() self.qkv_linear = tf.keras.layers.Dense(3*params['hidden_units'], name='qkv_linear') self.dropout = tf.keras.layers.Dropout(params['dropout_rate']) self.out_linear = tf.keras.layers.Dense(params['global_units'], params['activation'], name='out_linear') self.num_heads = params['num_heads'] self.is_bidirectional = params['is_bidirectional'] def call(self, inputs, training): x, masks = inputs batch_sz = tf.shape(x)[0] timesteps = tf.shape(x)[1] x_shift = tf.concat((tf.zeros((batch_sz, 1, params['global_units'])), x[:, :-1, :]), axis=1) thres = params['global_units'] // 2 x = tf.concat((x_shift[:, :, :thres], x[:, :, thres:]), axis=-1) q_k_v = self.qkv_linear(x) q, k, v = tf.split(q_k_v, 3, axis=-1) if self.num_heads > 1: q = tf.concat(tf.split(q, self.num_heads, axis=2), axis=0) k = tf.concat(tf.split(k, self.num_heads, axis=2), axis=0) v = tf.concat(tf.split(v, self.num_heads, axis=2), axis=0) align = tf.matmul(q, k, transpose_b=True) align *= tf.math.rsqrt(tf.cast(k.shape[-1], tf.float32)) if (masks is not None) or (not self.is_bidirectional): paddings = tf.fill(tf.shape(align), float('-inf')) if masks is not None: c_masks = tf.tile(masks, [params['num_heads'], 1]) c_masks = tf.tile(tf.expand_dims(c_masks, 1), [1, timesteps, 1]) align = tf.where(tf.equal(c_masks, 0), paddings, align) if not self.is_bidirectional: lower_tri = tf.ones((timesteps, timesteps)) lower_tri = tf.linalg.LinearOperatorLowerTriangular(lower_tri).to_dense() t_masks = tf.tile(tf.expand_dims(lower_tri, 0), [tf.shape(align)[0], 1, 1]) align = tf.where(tf.equal(t_masks, 0), paddings, align) align = tf.nn.softmax(align) align = self.dropout(align, training=training) if masks is not None: q_masks = tf.tile(masks, [params['num_heads'], 1]) q_masks = tf.tile(tf.expand_dims(q_masks, 2), [1, 1, timesteps]) align *= tf.cast(q_masks, tf.float32) x = tf.matmul(align, v) if self.num_heads > 1: x = tf.concat(tf.split(x, self.num_heads, axis=0), axis=2) x = self.out_linear(x) return x class PointwiseFFN(tf.keras.Model): def __init__(self, params): super().__init__() self.dense_1 = tf.keras.layers.Dense(params['multiplier']*params['global_units'], params['activation'], name='fc') self.dropout = tf.keras.layers.Dropout(params['dropout_rate']) self.dense_2 = tf.keras.layers.Dense(params['global_units'], name='linear') def call(self, inputs, training): x, masks = inputs return self.dense_2(self.dropout(self.dense_1(x), training=training)) class Model(tf.keras.Model): def __init__(self, params: dict): super().__init__() self.embedding = tf.Variable(np.load(params['vocab_path']), dtype=tf.float32, name='pretrained_embedding') self.input_dropout = tf.keras.layers.Dropout(params['dropout_rate']) self.blocks = [] for i in range(params['num_layers']): self.blocks.append(EncoderBlock( MultiheadSelfAttention, params, name='layer{}.1'.format(i+1))) self.blocks.append(EncoderBlock( PointwiseFFN, params, name='layer{}.2'.format(i+1))) self.intent_dropout = tf.keras.layers.Dropout(params['dropout_rate']) self.fc_intent = tf.keras.layers.Dense(params['global_units'], params['activation'], name='fc_intent') self.out_linear_intent = tf.keras.layers.Dense(params['intent_size'], name='output_intent') self.out_linear_slot = tf.keras.layers.Dense(params['slot_size'], name='output_slot') def call(self, inputs, training): if inputs.dtype != tf.int32: inputs = tf.cast(inputs, tf.int32) masks = tf.sign(inputs) x = tf.nn.embedding_lookup(self.embedding, inputs) if params['is_embedding_scaled']: x *= tf.sqrt(tf.cast(params['global_units'], tf.float32)) x += get_timing_signal_1d(tf.shape(x)[1], params['global_units']) x = self.input_dropout(x, training=training) for block in self.blocks: x = block((x, masks), training=training) x_intent = tf.reduce_max(x, 1) x_intent = self.intent_dropout(x_intent, training=training) x_intent = self.out_linear_intent(self.fc_intent(x_intent)) x_slot = self.out_linear_slot(x) return (x_intent, x_slot) params = { 'train_path': '../data/atis.train.w-intent.iob', 'test_path': '../data/atis.test.w-intent.iob', 'word_path': '../vocab/word.txt', 'vocab_path': '../vocab/word.npy', 'intent_path': '../vocab/intent.txt', 'slot_path': '../vocab/slot.txt', 'batch_size': 16, 'num_samples': 4978, 'num_layers': 2, 'global_units': 300, 'hidden_units': 512, 'activation': tf.nn.elu, 'num_heads': 8, 'multiplier': 2, 'dropout_rate': .1, 'epsilon': 1e-6, 'is_bidirectional': True, 'is_embedding_scaled': False, 'clip_norm': 5., } params['word2idx'] = get_vocab(params['word_path']) params['intent2idx'] = get_vocab(params['intent_path']) params['slot2idx'] = get_vocab(params['slot_path']) params['word_size'] = len(params['word2idx']) + 1 params['intent_size'] = len(params['intent2idx']) + 1 params['slot_size'] = len(params['slot2idx']) + 1 model = Model(params) model.build(input_shape=(None, None)) pprint.pprint([(v.name, v.shape) for v in model.trainable_variables]) decay_lr = Triangular2CyclicalLearningRate( initial_learning_rate = 1e-4, maximal_learning_rate = 8e-4, step_size = 8 * params['num_samples'] // params['batch_size'], ) optim = tf.optimizers.Adam(1e-4) global_step = 0 slot_best_f1 = .0 intent_acc_with_that = .0 t0 = time.time() logger = logging.getLogger('tensorflow') logger.setLevel(logging.INFO) for n_epoch in range(1, 64+1): # TRAINING for (words, (intent, slots)) in dataset(is_training=True, params=params): with tf.GradientTape() as tape: y_intent, y_slots = model(words, training=True) loss_intent = tf.compat.v1.losses.softmax_cross_entropy( onehot_labels = tf.one_hot(intent, len(params['intent2idx'])+1), logits = y_intent, label_smoothing = .2) # weight of 'O' is set to be small weights = tf.cast(tf.sign(slots), tf.float32) padding = tf.constant(1e-2, tf.float32, weights.shape) weights = tf.where(tf.equal(weights, 0.), padding, weights) loss_slots = tf.compat.v1.losses.softmax_cross_entropy( onehot_labels = tf.one_hot(slots, len(params['slot2idx'])+1), logits = y_slots, weights = tf.cast(weights, tf.float32), label_smoothing = .2) # joint loss loss = loss_intent + loss_slots optim.lr.assign(decay_lr(global_step)) grads = tape.gradient(loss, model.trainable_variables) grads, _ = tf.clip_by_global_norm(grads, params['clip_norm']) optim.apply_gradients(zip(grads, model.trainable_variables)) if global_step % 50 == 0: logger.info("Step {} | Loss: {:.4f} | Loss_intent: {:.4f} | Loss_slots: {:.4f} | Spent: {:.1f} secs | LR: {:.6f}".format( global_step, loss.numpy().item(), loss_intent.numpy().item(), loss_slots.numpy().item(), time.time()-t0, optim.lr.numpy().item())) t0 = time.time() global_step += 1 # EVALUATION intent_true = [] intent_pred = [] slot_true = [] slot_pred = [] for (words, (intent, slots)) in dataset(is_training=False, params=params): y_intent, y_slots = model(words, training=False) y_intent = tf.argmax(y_intent, -1) y_slots = tf.argmax(y_slots, -1) intent_true += intent.numpy().flatten().tolist() intent_pred += y_intent.numpy().flatten().tolist() slot_true += slots.numpy().flatten().tolist() slot_pred += y_slots.numpy().flatten().tolist() f1_slots = f1_score(y_true = slot_true, y_pred = slot_pred, labels = list(params['slot2idx'].values()), sample_weight = np.sign(slot_true), average='micro',) acc_intent = accuracy_score(intent_true, intent_pred) logger.info("Slot F1: {:.3f}, Intent Acc: {:.3f}".format(f1_slots, acc_intent)) if n_epoch != 1 and n_epoch % 8 == 0: logger.info('\n'+classification_report(y_true = intent_true, y_pred = intent_pred, labels = list(params['intent2idx'].values()), target_names = list(params['intent2idx'].keys()), digits=3)) logger.info('\n'+classification_report(y_true = slot_true, y_pred = slot_pred, labels = list(params['slot2idx'].values()), target_names = list(params['slot2idx'].keys()), sample_weight = np.sign(slot_true), digits=3)) if f1_slots > slot_best_f1: slot_best_f1 = f1_slots intent_acc_with_that = acc_intent # you can save model here logger.info("Best Slot F1: {:.3f}, Intent Acc: {:.3f}".format(slot_best_f1, intent_acc_with_that)) ```
github_jupyter
# Processor temperature We have a temperature sensor in the processor of our company's server. We want to analyze the data provided to determinate whether we should change the cooling system for a better one. It is expensive and as a data analyst we cannot make decisions without a basis. We provide the temperatures measured throughout the 24 hours of a day in a list-type data structure composed of 24 integers: ``` temperatures_C = [33,66,65,0,59,60,62,64,70,76,80,69,80,83,68,79,61,53,50,49,53,48,45,39] ``` ## Goals 1. Treatment of lists 2. Use of loop or list comprenhention 3. Calculation of the mean, minimum and maximum. 4. Filtering of lists. 5. Interpolate an outlier. 6. Logical operators. 7. Print ## Temperature graph To facilitate understanding, the temperature graph is shown below. You do not have to do anything in this section. The test starts in **Problem**. ``` # import import matplotlib.pyplot as plt %matplotlib inline # axis x, axis y y = [33,66,65,0,59,60,62,64,70,76,80,81,80,83,90,79,61,53,50,49,53,48,45,39] x = list(range(len(y))) # plot plt.plot(x, y) plt.axhline(y=70, linewidth=1, color='r') plt.xlabel('hours') plt.ylabel('Temperature ºC') plt.title('Temperatures of our server throughout the day') ``` ## Problem If the sensor detects more than 4 hours with temperatures greater than or equal to 70ºC or any temperature above 80ºC or the average exceeds 65ºC throughout the day, we must give the order to change the cooling system to avoid damaging the processor. We will guide you step by step so you can make the decision by calculating some intermediate steps: 1. Minimum temperature 2. Maximum temperature 3. Temperatures equal to or greater than 70ºC 4. Average temperatures throughout the day. 5. If there was a sensor failure at 03:00 and we did not capture the data, how would you estimate the value that we lack? Correct that value in the list of temperatures. 6. Bonus: Our maintenance staff is from the United States and does not understand the international metric system. Pass temperatures to Degrees Fahrenheit. Formula: F = 1.8 * C + 32 web: https://en.wikipedia.org/wiki/Conversion_of_units_of_temperature ``` # assign a variable to the list of temperatures temperatures_C = [33,66,65,0,59,60,62,64,70,76,80,81,80,83,90,79,61,53,50,49,53,48,45,39] temperature_70 = [] temperature_70_24 = 0 temperature_70_4hours = 0 temperature_80 = [] temperature_80_24 = 0 # 1. Calculate the minimum of the list and print the value using print() print("1. the minimum temperature is:", min(temperatures_C), "\n") # 2. Calculate the maximum of the list and print the value using print() print("2. the maximum temperature is:", max(temperatures_C), "\n") # 3. Items in the list that are greater than 70ºC and print the result for i in temperatures_C: if i <= 70: temperature_70.append(False) temperature_80.append(False) if i > 70: temperature_70.append(i) temperature_70_24 = True temperature_80.append(False) elif i > 80: temperature_80.append(i) temperature_80_24 = True print("3. list of temp. greater 70°C:", "\n", temperature_70, "\n") for i in range(0, len(temperature_70)): if temperature_70[i] != False and temperature_70[i-1] != False and temperature_70[i-2] != False and temperature_70[i-3] != False: temperature_70_4hours = True # 4. Calculate the mean temperature throughout the day and print the result mean_day_temp = sum(temperatures_C) / len(temperatures_C) print("4. mean 24-hours temperature: ", round(mean_day_temp, 2), "°C", "\n") # 5.1 Solve the fault in the sensor by estimating a value # 5.2 Update of the estimated value at 03:00 on the list for i in range(0, len(temperatures_C)): if temperatures_C[i] == 0: temperatures_C[i] = (temperatures_C[i+1] + temperatures_C[i-1])/2 print("5. temperature value missing, Sensor defect? temperature approx.", temperatures_C[i], "at hour:", i) print(temperatures_C, "\n") # Bonus: convert the list of ºC to ºFarenheit temperatures_F = [] for i in range(0, len(temperatures_C)): temperatures_F.append(round(((temperatures_C[i] * 1.8) + 32),2)) print("Bonus: the temperatures converted in Fahrenheit:", "\n", temperatures_F) ``` ## Take the decision Remember that if the sensor detects more than 4 hours with temperatures greater than or equal to 70ºC or any temperature higher than 80ºC or the average was higher than 65ºC throughout the day, we must give the order to change the cooling system to avoid the danger of damaging the equipment: * more than 4 hours with temperatures greater than or equal to 70ºC * some temperature higher than 80ºC * average was higher than 65ºC throughout the day If any of these three is met, the cooling system must be changed. ``` # Print True or False depending on whether you would change the cooling system or not system_change = 0 for i in range(0, 24): if mean_day_temp > 65 or temperature_70_24 == True or temperature_80_24 == True or temperature_70_4hours == True: system_change = True else: system_change = False print("The cooling system should be changed:", system_change) ``` ## Future improvements 1. We want the hours (not the temperatures) whose temperature exceeds 70ºC 2. Condition that those hours are more than 4 consecutive and consecutive, not simply the sum of the whole set. Is this condition met? 3. Average of each of the lists (ºC and ºF). How they relate? 4. Standard deviation of each of the lists. How they relate? ``` # 1. We want the hours (not the temperatures) whose temperature exceeds 70ºC for i in range(0, len(temperature_70)): if temperature_70[i] != False: print("temperatures above 70°C meassured at hour:",i) # 2. Condition that those hours are more than 4 consecutive and consecutive, not simply the sum of the whole set. Is this condition met? print("temperature 4-hours-high is reached:", temperature_70_4hours) # 3. Average of each of the lists (ºC and ºF). How they relate? print("average temperature in °C:", sum(temperatures_C)/len(temperatures_C)) print("average temperature in °F:", sum(temperatures_F)/len(temperatures_F)) print("realtion mean temp °C/F", (sum(temperatures_C)/len(temperatures_C)) / (sum(temperatures_F)/len(temperatures_F))) # 4. Standard deviation of each of the lists. How they relate? import numpy as np print("the std. dev. for the temperature in °C:", np.std(temperatures_C)) print("the std. dev. for the temperature in °F:", np.std(temperatures_F)) print("realtion std.dev. for temp °C/F", np.std(temperatures_C) / np.std(temperatures_F)) ```
github_jupyter
## Loading Libraries Let's import all the necessary packages first. You can safely ignore this section. ``` import java.util.Random; import java.lang.*; %%loadFromPOM <dependency> <groupId>org.knowm.xchart</groupId> <artifactId>xchart</artifactId> <version>3.5.2</version> </dependency> %maven org.knowm.xchart:xchart:3.5.2 import org.knowm.xchart.*; ``` ## Helper Methods Let's code three helper methods: * random array generator * array printer * copyArray It is assumed that you are fully capable of coding two similar methods by yourself. If you are new to Java (but have some experience with a different language), playing with these methods will help you get familiar with Java faster. ``` // random array generator public int[] randomArr(int size) { Random r = new Random(); int[] arr = new int[size]; for (int i = 0; i < size; i++) { arr[i] = r.nextInt(1000) + 1; } return arr; } // array printer public void printArr(int[] arr) { for (int num : arr) { System.out.print(num + " "); } System.out.println(); } // array deep copy public void copyArray(int[] from, int[] to) { if (from.length != to.length) { System.exit(0); } for (int i = 0; i < from.length; i++) { to[i] = from[i]; } } ``` ## Time Complexity - maxFind Here is the maxFind method and its sanity check: ``` // maxFinder public int maxFind(int[] arr) { int max = arr[0]; for (int num: arr) { if (num > max) { max = num; } } return max; } // proof that maxFind works int[] arr = randomArr(15); printArr(arr); System.out.println("The max number is " + maxFind(arr)); ``` Let's try to find the relationship between input size (i.e., the length of input array) and the running time of maxFind. Lets modify the maxFind method with a flag. When flag is set to true, the method returns the running time (i.e., steps) it takes to fully execute the method. ``` // track steps of maxFind public int maxFindTrack(int[] arr, boolean trackSteps) { // init steps int steps = 0; int max = arr[0]; // increment steps if (trackSteps) { steps++; } for (int num: arr) { if (num > max) { max = num; // increment steps if (trackSteps) { steps++; } } // increment steps if (trackSteps) { steps++; } } // increment steps if (trackSteps) { steps++; return steps; } return max; } // proof that maxFindTrack works int size = 15; int[] steps = new int[size]; for (int i = 1; i < steps.length; i++) { int[] arr = randomArr(i); steps[i] = maxFindTrack(arr, true); } printArr(steps); // size of input - convert int to double for plotting double[] xData = new double[size]; for (int i = 1; i < xData.length; i++) { xData[i] = i; } // steps - convert int to double for plotting double[] yData = new double[size]; for (int i = 0; i < yData.length; i++) { yData[i] = steps[i]; } // plot it XYChart chart = QuickChart.getChart("Running Time vs Input Size", "Input Size n", "Running Time T(n)", "y(x)", xData, yData); BitmapEncoder.getBufferedImage(chart); ``` To quantify the running time of maxFind more accurately, we can further modify maxFind method to deal with the worst case scenario and the best case scenarios: * worst: an ascending sorted array * best: a descending sorted array **Question for you: why does an ascending sorted array is the worst case scenario for maxFind, while a descending sorted array is the best?** ``` // track steps of maxFind public int maxFindTrackUpdate(int[] arr, boolean trackSteps, String condition) { // init arr depending on condition if (condition.equals("worst")) { Arrays.sort(arr); } if (condition.equals("best")) { Arrays.sort(arr); for (int i = 0; i < arr.length/2; i++) { int temp = arr[i]; arr[i] = arr[arr.length-1]; arr[arr.length-1] = temp; } } // init steps int steps = 0; int max = arr[0]; // increment steps if (trackSteps) { steps++; } for (int num: arr) { if (num > max) { max = num; // increment steps if (trackSteps) { steps++; } } // increment steps if (trackSteps) { steps++; } } // increment steps if (trackSteps) { steps++; return steps; } return max; } ``` Let's plot the steps it takes to find max from a randomly generated array with 30 integers for different scenarios. ``` // predetermined size size = 30; // storage of steps int[] best = new int[size]; int[] normal = new int[size]; int[] worst = new int[size]; // populate storage for (int i = 1; i < size; i++) { int[] tempB = randomArr(i); int[] tempN = new int[tempB.length]; copyArray(tempB, tempN); int[] tempW = new int[tempB.length]; copyArray(tempB, tempW); best[i] = maxFindTrackUpdate(tempB, true, "best"); normal[i] = maxFindTrackUpdate(tempN, true, "normal"); worst[i] = maxFindTrackUpdate(tempW, true, "worst"); } System.out.print("Best: "); printArr(best); System.out.print("Normal: "); printArr(normal); System.out.print("Worst: "); printArr(worst); // size of input - convert int to double for plotting double[] xData = new double[size]; for (int i = 1; i < xData.length; i++) { xData[i] = i; } // best - convert int to double for plotting double[] yDataB = new double[size]; for (int i = 0; i < yDataB.length; i++) { yDataB[i] = best[i]; } // normal - convert int to double for plotting double[] yDataN = new double[size]; for (int i = 0; i < yDataN.length; i++) { yDataN[i] = normal[i]; } // worst - convert int to double for plotting double[] yDataW = new double[size]; for (int i = 0; i < yDataW.length; i++) { yDataW[i] = worst[i]; } // plot it XYChart chart = new XYChartBuilder().width(600).height(400).title("Find Max").xAxisTitle("Input Size n").yAxisTitle("Running Time T(n)").build(); chart.addSeries("Best", xData, yDataB); chart.addSeries("Normal", xData, yDataN); chart.addSeries("Worst", xData, yDataW); BitmapEncoder.getBufferedImage(chart); ``` ## Time Complexity - Fibonacci Sequence There are two ways to implement Fibonacci Sequence: * recursive * Iterative The recursive implementation is a direct translation of its definition. The iterative approach takes advantage of an array to memorize any Fibonacci numbers it ever calculates to avoid redundant computation. ``` // recursive public int fibonacciRecursive(int n){ if (n == 1 || n == 2) { return 1; } else { return fibonacciRecursive(n-1) + fibonacciRecursive(n-2); } } // iterative public int fibonacciIterative(int n) { int[] store = new int[n+1]; if (n == 1 || n == 2) { return 1; } else { store[1] = 1; store[2] = 1; for (int i = 3; i <= n; i++) { store[i] = store[i-1] + store[i-2]; } return store[n]; } } ``` To track the running times of recursive and iterative fibonacci, we just need to do a small modification to both methods: ``` // recursive definition: return the steps it takes to calculate fibonacci(n) public int fibonacciRecursiveTrack(int n){ int track = 0; if (n == 1 || n == 2) { track++; } else { track += 1 + fibonacciRecursiveTrack(n-1) + fibonacciRecursiveTrack(n-2); } return track; } // iterative public int fibonacciIterativeTrack(int n) { int steps = 2; int[] store = new int[n+1]; if (n == 1 || n == 2) { steps++; return steps; } else { steps += 3; store[1] = 1; store[2] = 1; for (int i = 3; i <= n; i++) { store[i] = store[i-1] + store[i-2]; steps++; } return steps; } } ``` Let's plot the running time of both and compare: ``` // predetermined size size = 15; // storage of steps int[] stepsRecursive = new int[size]; int[] stepsIterative = new int[size]; // populate storage for (int i = 1; i < size; i++) { // prepare two exactly same sorted arrays int[] temp1 = randomArr(i); int[] temp2 = new int[temp1.length]; copyArray(temp1, temp2); stepsRecursive[i] = fibonacciRecursiveTrack(i); stepsIterative[i] = fibonacciIterativeTrack(i); } // size of input - convert int to double for plotting double[] xData = new double[size]; for (int i = 1; i < xData.length; i++) { xData[i] = i; } // recurisve double[] yDataR = new double[size]; for (int i = 0; i < yDataR.length; i++) { yDataR[i] = stepsRecursive[i]; } // iterative double[] yDataI = new double[size]; for (int i = 0; i < yDataI.length; i++) { yDataI[i] = stepsIterative[i]; } // plot it XYChart chart = new XYChartBuilder().width(600).height(400).title("Fibonacci").xAxisTitle("Input Size n").yAxisTitle("Running Time T(n)").build(); chart.addSeries("Recursive", xData, yDataR); chart.addSeries("Iterative", xData, yDataI); BitmapEncoder.getBufferedImage(chart); ``` # Do it yourself Can you plot the input size against the running time of iterative fibonacci? You will need to: 1. Modify iterative fibonacci method so you can track its running time 2. Plot its input size against the running time. You can reuse the code in the above cell. 3. (Optional) Compare the running time of recursive and iterative fibonacci in the same plot. You can reuse the code for plotting different scenarios of maxFind running time. **When you finish (or not) playing your exploration of the whole interactive notebook and DIY assignment, you should download a html file and upload it to the assignment box on Canvas:** * File --> Download as --> HTML (.html) ![download](images/html.png)
github_jupyter
# Autonomous driving - Car detection Welcome to your week 3 programming assignment. You will learn about object detection using the very powerful YOLO model. Many of the ideas in this notebook are described in the two YOLO papers: Redmon et al., 2016 (https://arxiv.org/abs/1506.02640) and Redmon and Farhadi, 2016 (https://arxiv.org/abs/1612.08242). **You will learn to**: - Use object detection on a car detection dataset - Deal with bounding boxes Run the following cell to load the packages and dependencies that are going to be useful for your journey! ``` import argparse import os import matplotlib.pyplot as plt from matplotlib.pyplot import imshow import scipy.io import scipy.misc import numpy as np import pandas as pd import PIL import tensorflow as tf from keras import backend as K from keras.layers import Input, Lambda, Conv2D from keras.models import load_model, Model from yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes from yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body %matplotlib inline ``` **Important Note**: As you can see, we import Keras's backend as K. This means that to use a Keras function in this notebook, you will need to write: `K.function(...)`. ## 1 - Problem Statement You are working on a self-driving car. As a critical component of this project, you'd like to first build a car detection system. To collect data, you've mounted a camera to the hood (meaning the front) of the car, which takes pictures of the road ahead every few seconds while you drive around. <center> <video width="400" height="200" src="nb_images/road_video_compressed2.mp4" type="video/mp4" controls> </video> </center> <caption><center> Pictures taken from a car-mounted camera while driving around Silicon Valley. <br> We would like to especially thank [drive.ai](https://www.drive.ai/) for providing this dataset! Drive.ai is a company building the brains of self-driving vehicles. </center></caption> <img src="nb_images/driveai.png" style="width:100px;height:100;"> You've gathered all these images into a folder and have labelled them by drawing bounding boxes around every car you found. Here's an example of what your bounding boxes look like. <img src="nb_images/box_label.png" style="width:500px;height:250;"> <caption><center> <u> **Figure 1** </u>: **Definition of a box**<br> </center></caption> If you have 80 classes that you want YOLO to recognize, you can represent the class label $c$ either as an integer from 1 to 80, or as an 80-dimensional vector (with 80 numbers) one component of which is 1 and the rest of which are 0. The video lectures had used the latter representation; in this notebook, we will use both representations, depending on which is more convenient for a particular step. In this exercise, you will learn how YOLO works, then apply it to car detection. Because the YOLO model is very computationally expensive to train, we will load pre-trained weights for you to use. ## 2 - YOLO YOLO ("you only look once") is a popular algoritm because it achieves high accuracy while also being able to run in real-time. This algorithm "only looks once" at the image in the sense that it requires only one forward propagation pass through the network to make predictions. After non-max suppression, it then outputs recognized objects together with the bounding boxes. ### 2.1 - Model details First things to know: - The **input** is a batch of images of shape (m, 608, 608, 3) - The **output** is a list of bounding boxes along with the recognized classes. Each bounding box is represented by 6 numbers $(p_c, b_x, b_y, b_h, b_w, c)$ as explained above. If you expand $c$ into an 80-dimensional vector, each bounding box is then represented by 85 numbers. We will use 5 anchor boxes. So you can think of the YOLO architecture as the following: IMAGE (m, 608, 608, 3) -> DEEP CNN -> ENCODING (m, 19, 19, 5, 85). Lets look in greater detail at what this encoding represents. <img src="nb_images/architecture.png" style="width:700px;height:400;"> <caption><center> <u> **Figure 2** </u>: **Encoding architecture for YOLO**<br> </center></caption> If the center/midpoint of an object falls into a grid cell, that grid cell is responsible for detecting that object. Since we are using 5 anchor boxes, each of the 19 x19 cells thus encodes information about 5 boxes. Anchor boxes are defined only by their width and height. For simplicity, we will flatten the last two last dimensions of the shape (19, 19, 5, 85) encoding. So the output of the Deep CNN is (19, 19, 425). <img src="nb_images/flatten.png" style="width:700px;height:400;"> <caption><center> <u> **Figure 3** </u>: **Flattening the last two last dimensions**<br> </center></caption> Now, for each box (of each cell) we will compute the following elementwise product and extract a probability that the box contains a certain class. <img src="nb_images/probability_extraction.png" style="width:700px;height:400;"> <caption><center> <u> **Figure 4** </u>: **Find the class detected by each box**<br> </center></caption> Here's one way to visualize what YOLO is predicting on an image: - For each of the 19x19 grid cells, find the maximum of the probability scores (taking a max across both the 5 anchor boxes and across different classes). - Color that grid cell according to what object that grid cell considers the most likely. Doing this results in this picture: <img src="nb_images/proba_map.png" style="width:300px;height:300;"> <caption><center> <u> **Figure 5** </u>: Each of the 19x19 grid cells colored according to which class has the largest predicted probability in that cell.<br> </center></caption> Note that this visualization isn't a core part of the YOLO algorithm itself for making predictions; it's just a nice way of visualizing an intermediate result of the algorithm. Another way to visualize YOLO's output is to plot the bounding boxes that it outputs. Doing that results in a visualization like this: <img src="nb_images/anchor_map.png" style="width:200px;height:200;"> <caption><center> <u> **Figure 6** </u>: Each cell gives you 5 boxes. In total, the model predicts: 19x19x5 = 1805 boxes just by looking once at the image (one forward pass through the network)! Different colors denote different classes. <br> </center></caption> In the figure above, we plotted only boxes that the model had assigned a high probability to, but this is still too many boxes. You'd like to filter the algorithm's output down to a much smaller number of detected objects. To do so, you'll use non-max suppression. Specifically, you'll carry out these steps: - Get rid of boxes with a low score (meaning, the box is not very confident about detecting a class) - Select only one box when several boxes overlap with each other and detect the same object. ### 2.2 - Filtering with a threshold on class scores You are going to apply a first filter by thresholding. You would like to get rid of any box for which the class "score" is less than a chosen threshold. The model gives you a total of 19x19x5x85 numbers, with each box described by 85 numbers. It'll be convenient to rearrange the (19,19,5,85) (or (19,19,425)) dimensional tensor into the following variables: - `box_confidence`: tensor of shape $(19 \times 19, 5, 1)$ containing $p_c$ (confidence probability that there's some object) for each of the 5 boxes predicted in each of the 19x19 cells. - `boxes`: tensor of shape $(19 \times 19, 5, 4)$ containing $(b_x, b_y, b_h, b_w)$ for each of the 5 boxes per cell. - `box_class_probs`: tensor of shape $(19 \times 19, 5, 80)$ containing the detection probabilities $(c_1, c_2, ... c_{80})$ for each of the 80 classes for each of the 5 boxes per cell. **Exercise**: Implement `yolo_filter_boxes()`. 1. Compute box scores by doing the elementwise product as described in Figure 4. The following code may help you choose the right operator: ```python a = np.random.randn(19*19, 5, 1) b = np.random.randn(19*19, 5, 80) c = a * b # shape of c will be (19*19, 5, 80) ``` 2. For each box, find: - the index of the class with the maximum box score ([Hint](https://keras.io/backend/#argmax)) (Be careful with what axis you choose; consider using axis=-1) - the corresponding box score ([Hint](https://keras.io/backend/#max)) (Be careful with what axis you choose; consider using axis=-1) 3. Create a mask by using a threshold. As a reminder: `([0.9, 0.3, 0.4, 0.5, 0.1] < 0.4)` returns: `[False, True, False, False, True]`. The mask should be True for the boxes you want to keep. 4. Use TensorFlow to apply the mask to box_class_scores, boxes and box_classes to filter out the boxes we don't want. You should be left with just the subset of boxes you want to keep. ([Hint](https://www.tensorflow.org/api_docs/python/tf/boolean_mask)) Reminder: to call a Keras function, you should use `K.function(...)`. ``` # GRADED FUNCTION: yolo_filter_boxes def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6): """Filters YOLO boxes by thresholding on object and class confidence. Arguments: box_confidence -- tensor of shape (19, 19, 5, 1) boxes -- tensor of shape (19, 19, 5, 4) box_class_probs -- tensor of shape (19, 19, 5, 80) threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box Returns: scores -- tensor of shape (None,), containing the class probability score for selected boxes boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes Note: "None" is here because you don't know the exact number of selected boxes, as it depends on the threshold. For example, the actual output size of scores would be (10,) if there are 10 boxes. """ # Step 1: Compute box scores ### START CODE HERE ### (≈ 1 line) box_scores = box_confidence*box_class_probs ### END CODE HERE ### # Step 2: Find the box_classes thanks to the max box_scores, keep track of the corresponding score ### START CODE HERE ### (≈ 2 lines) box_classes = K.argmax(box_scores,axis = -1) box_class_scores = K.max(box_scores,axis = -1) ### END CODE HERE ### # Step 3: Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold) ### START CODE HERE ### (≈ 1 line) filtering_mask = box_class_scores>=threshold ### END CODE HERE ### # Step 4: Apply the mask to scores, boxes and classes ### START CODE HERE ### (≈ 3 lines) scores = tf.boolean_mask(box_class_scores,filtering_mask) boxes = tf.boolean_mask(boxes,filtering_mask) classes = tf.boolean_mask(box_classes,filtering_mask) ### END CODE HERE ### return scores, boxes, classes with tf.Session() as test_a: box_confidence = tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1) boxes = tf.random_normal([19, 19, 5, 4], mean=1, stddev=4, seed = 1) box_class_probs = tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1) scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = 0.5) print("scores[2] = " + str(scores[2].eval())) print("boxes[2] = " + str(boxes[2].eval())) print("classes[2] = " + str(classes[2].eval())) print("scores.shape = " + str(scores.shape)) print("boxes.shape = " + str(boxes.shape)) print("classes.shape = " + str(classes.shape)) ``` **Expected Output**: <table> <tr> <td> **scores[2]** </td> <td> 10.7506 </td> </tr> <tr> <td> **boxes[2]** </td> <td> [ 8.42653275 3.27136683 -0.5313437 -4.94137383] </td> </tr> <tr> <td> **classes[2]** </td> <td> 7 </td> </tr> <tr> <td> **scores.shape** </td> <td> (?,) </td> </tr> <tr> <td> **boxes.shape** </td> <td> (?, 4) </td> </tr> <tr> <td> **classes.shape** </td> <td> (?,) </td> </tr> </table> ### 2.3 - Non-max suppression ### Even after filtering by thresholding over the classes scores, you still end up a lot of overlapping boxes. A second filter for selecting the right boxes is called non-maximum suppression (NMS). <img src="nb_images/non-max-suppression.png" style="width:500px;height:400;"> <caption><center> <u> **Figure 7** </u>: In this example, the model has predicted 3 cars, but it's actually 3 predictions of the same car. Running non-max suppression (NMS) will select only the most accurate (highest probabiliy) one of the 3 boxes. <br> </center></caption> Non-max suppression uses the very important function called **"Intersection over Union"**, or IoU. <img src="nb_images/iou.png" style="width:500px;height:400;"> <caption><center> <u> **Figure 8** </u>: Definition of "Intersection over Union". <br> </center></caption> **Exercise**: Implement iou(). Some hints: - In this exercise only, we define a box using its two corners (upper left and lower right): `(x1, y1, x2, y2)` rather than the midpoint and height/width. - To calculate the area of a rectangle you need to multiply its height `(y2 - y1)` by its width `(x2 - x1)`. - You'll also need to find the coordinates `(xi1, yi1, xi2, yi2)` of the intersection of two boxes. Remember that: - xi1 = maximum of the x1 coordinates of the two boxes - yi1 = maximum of the y1 coordinates of the two boxes - xi2 = minimum of the x2 coordinates of the two boxes - yi2 = minimum of the y2 coordinates of the two boxes - In order to compute the intersection area, you need to make sure the height and width of the intersection are positive, otherwise the intersection area should be zero. Use `max(height, 0)` and `max(width, 0)`. In this code, we use the convention that (0,0) is the top-left corner of an image, (1,0) is the upper-right corner, and (1,1) the lower-right corner. ``` # GRADED FUNCTION: iou def iou(box1, box2): """Implement the intersection over union (IoU) between box1 and box2      Arguments: box1 -- first box, list object with coordinates (x1, y1, x2, y2)     box2 -- second box, list object with coordinates (x1, y1, x2, y2)     """ # Calculate the (y1, x1, y2, x2) coordinates of the intersection of box1 and box2. Calculate its Area. ### START CODE HERE ### (≈ 5 lines) xi1 = max(box1[0], box2[0]) yi1 = max(box1[1], box2[1]) xi2 = min(box1[2], box2[2]) yi2 = min(box1[3], box2[3]) inter_area = np.maximum(yi2-yi1,0) *np.maximum(xi2-xi1,0) ### END CODE HERE ### # Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B) ### START CODE HERE ### (≈ 3 lines) box1_area = (box1[2]-box1[0])*(box1[3]-box1[1]) box2_area = (box2[2]-box2[0])*(box2[3]-box2[1]) union_area = box1_area + box2_area - inter_area ### END CODE HERE ### # compute the IoU ### START CODE HERE ### (≈ 1 line) iou = float(inter_area) / float(union_area) ### END CODE HERE ### return iou box1 = (2, 1, 4, 3) box2 = (1, 2, 3, 4) print("iou = " + str(iou(box1, box2))) ``` **Expected Output**: <table> <tr> <td> **iou = ** </td> <td> 0.14285714285714285 </td> </tr> </table> You are now ready to implement non-max suppression. The key steps are: 1. Select the box that has the highest score. 2. Compute its overlap with all other boxes, and remove boxes that overlap it more than `iou_threshold`. 3. Go back to step 1 and iterate until there's no more boxes with a lower score than the current selected box. This will remove all boxes that have a large overlap with the selected boxes. Only the "best" boxes remain. **Exercise**: Implement yolo_non_max_suppression() using TensorFlow. TensorFlow has two built-in functions that are used to implement non-max suppression (so you don't actually need to use your `iou()` implementation): - [tf.image.non_max_suppression()](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression) - [K.gather()](https://www.tensorflow.org/api_docs/python/tf/gather) ``` # GRADED FUNCTION: yolo_non_max_suppression def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5): """ Applies Non-max suppression (NMS) to set of boxes Arguments: scores -- tensor of shape (None,), output of yolo_filter_boxes() boxes -- tensor of shape (None, 4), output of yolo_filter_boxes() that have been scaled to the image size (see later) classes -- tensor of shape (None,), output of yolo_filter_boxes() max_boxes -- integer, maximum number of predicted boxes you'd like iou_threshold -- real value, "intersection over union" threshold used for NMS filtering Returns: scores -- tensor of shape (, None), predicted score for each box boxes -- tensor of shape (4, None), predicted box coordinates classes -- tensor of shape (, None), predicted class for each box Note: The "None" dimension of the output tensors has obviously to be less than max_boxes. Note also that this function will transpose the shapes of scores, boxes, classes. This is made for convenience. """ max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression() K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor # Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep ### START CODE HERE ### (≈ 1 line) nms_indices = tf.image.non_max_suppression(boxes,scores,max_boxes_tensor,iou_threshold) ### END CODE HERE ### # Use K.gather() to select only nms_indices from scores, boxes and classes ### START CODE HERE ### (≈ 3 lines) scores = tf.gather(scores,nms_indices) boxes = tf.gather(boxes,nms_indices) classes = tf.gather(classes,nms_indices) ### END CODE HERE ### return scores, boxes, classes with tf.Session() as test_b: scores = tf.random_normal([54,], mean=1, stddev=4, seed = 1) boxes = tf.random_normal([54, 4], mean=1, stddev=4, seed = 1) classes = tf.random_normal([54,], mean=1, stddev=4, seed = 1) scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes) print("scores[2] = " + str(scores[2].eval())) print("boxes[2] = " + str(boxes[2].eval())) print("classes[2] = " + str(classes[2].eval())) print("scores.shape = " + str(scores.eval().shape)) print("boxes.shape = " + str(boxes.eval().shape)) print("classes.shape = " + str(classes.eval().shape)) ``` **Expected Output**: <table> <tr> <td> **scores[2]** </td> <td> 6.9384 </td> </tr> <tr> <td> **boxes[2]** </td> <td> [-5.299932 3.13798141 4.45036697 0.95942086] </td> </tr> <tr> <td> **classes[2]** </td> <td> -2.24527 </td> </tr> <tr> <td> **scores.shape** </td> <td> (10,) </td> </tr> <tr> <td> **boxes.shape** </td> <td> (10, 4) </td> </tr> <tr> <td> **classes.shape** </td> <td> (10,) </td> </tr> </table> ### 2.4 Wrapping up the filtering It's time to implement a function taking the output of the deep CNN (the 19x19x5x85 dimensional encoding) and filtering through all the boxes using the functions you've just implemented. **Exercise**: Implement `yolo_eval()` which takes the output of the YOLO encoding and filters the boxes using score threshold and NMS. There's just one last implementational detail you have to know. There're a few ways of representing boxes, such as via their corners or via their midpoint and height/width. YOLO converts between a few such formats at different times, using the following functions (which we have provided): ```python boxes = yolo_boxes_to_corners(box_xy, box_wh) ``` which converts the yolo box coordinates (x,y,w,h) to box corners' coordinates (x1, y1, x2, y2) to fit the input of `yolo_filter_boxes` ```python boxes = scale_boxes(boxes, image_shape) ``` YOLO's network was trained to run on 608x608 images. If you are testing this data on a different size image--for example, the car detection dataset had 720x1280 images--this step rescales the boxes so that they can be plotted on top of the original 720x1280 image. Don't worry about these two functions; we'll show you where they need to be called. ``` # GRADED FUNCTION: yolo_eval def yolo_eval(yolo_outputs, image_shape = (720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5): """ Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes. Arguments: yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors: box_confidence: tensor of shape (None, 19, 19, 5, 1) box_xy: tensor of shape (None, 19, 19, 5, 2) box_wh: tensor of shape (None, 19, 19, 5, 2) box_class_probs: tensor of shape (None, 19, 19, 5, 80) image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype) max_boxes -- integer, maximum number of predicted boxes you'd like score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box iou_threshold -- real value, "intersection over union" threshold used for NMS filtering Returns: scores -- tensor of shape (None, ), predicted score for each box boxes -- tensor of shape (None, 4), predicted box coordinates classes -- tensor of shape (None,), predicted class for each box """ ### START CODE HERE ### # Retrieve outputs of the YOLO model (≈1 line) box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs # Convert boxes to be ready for filtering functions boxes = yolo_boxes_to_corners(box_xy, box_wh) # Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line) scores, boxes, classes = yolo_filter_boxes(box_confidence,boxes,box_class_probs,score_threshold) # Scale boxes back to original image shape. boxes = scale_boxes(boxes, image_shape) # Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line) scores, boxes, classes = yolo_non_max_suppression(scores,boxes,classes,max_boxes,iou_threshold) ### END CODE HERE ### return scores, boxes, classes with tf.Session() as test_b: yolo_outputs = (tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1), tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1), tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1), tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1)) scores, boxes, classes = yolo_eval(yolo_outputs) print("scores[2] = " + str(scores[2].eval())) print("boxes[2] = " + str(boxes[2].eval())) print("classes[2] = " + str(classes[2].eval())) print("scores.shape = " + str(scores.eval().shape)) print("boxes.shape = " + str(boxes.eval().shape)) print("classes.shape = " + str(classes.eval().shape)) ``` **Expected Output**: <table> <tr> <td> **scores[2]** </td> <td> 138.791 </td> </tr> <tr> <td> **boxes[2]** </td> <td> [ 1292.32971191 -278.52166748 3876.98925781 -835.56494141] </td> </tr> <tr> <td> **classes[2]** </td> <td> 54 </td> </tr> <tr> <td> **scores.shape** </td> <td> (10,) </td> </tr> <tr> <td> **boxes.shape** </td> <td> (10, 4) </td> </tr> <tr> <td> **classes.shape** </td> <td> (10,) </td> </tr> </table> <font color='blue'> **Summary for YOLO**: - Input image (608, 608, 3) - The input image goes through a CNN, resulting in a (19,19,5,85) dimensional output. - After flattening the last two dimensions, the output is a volume of shape (19, 19, 425): - Each cell in a 19x19 grid over the input image gives 425 numbers. - 425 = 5 x 85 because each cell contains predictions for 5 boxes, corresponding to 5 anchor boxes, as seen in lecture. - 85 = 5 + 80 where 5 is because $(p_c, b_x, b_y, b_h, b_w)$ has 5 numbers, and and 80 is the number of classes we'd like to detect - You then select only few boxes based on: - Score-thresholding: throw away boxes that have detected a class with a score less than the threshold - Non-max suppression: Compute the Intersection over Union and avoid selecting overlapping boxes - This gives you YOLO's final output. ## 3 - Test YOLO pretrained model on images In this part, you are going to use a pretrained model and test it on the car detection dataset. As usual, you start by **creating a session to start your graph**. Run the following cell. ``` sess = K.get_session() ``` ### 3.1 - Defining classes, anchors and image shape. Recall that we are trying to detect 80 classes, and are using 5 anchor boxes. We have gathered the information about the 80 classes and 5 boxes in two files "coco_classes.txt" and "yolo_anchors.txt". Let's load these quantities into the model by running the next cell. The car detection dataset has 720x1280 images, which we've pre-processed into 608x608 images. ``` class_names = read_classes("model_data/coco_classes.txt") anchors = read_anchors("model_data/yolo_anchors.txt") image_shape = (720., 1280.) ``` ### 3.2 - Loading a pretrained model Training a YOLO model takes a very long time and requires a fairly large dataset of labelled bounding boxes for a large range of target classes. You are going to load an existing pretrained Keras YOLO model stored in "yolo.h5". (These weights come from the official YOLO website, and were converted using a function written by Allan Zelener. References are at the end of this notebook. Technically, these are the parameters from the "YOLOv2" model, but we will more simply refer to it as "YOLO" in this notebook.) Run the cell below to load the model from this file. ``` yolo_model = load_model("model_data/yolo.h5") ``` This loads the weights of a trained YOLO model. Here's a summary of the layers your model contains. ``` yolo_model.summary() ``` **Note**: On some computers, you may see a warning message from Keras. Don't worry about it if you do--it is fine. **Reminder**: this model converts a preprocessed batch of input images (shape: (m, 608, 608, 3)) into a tensor of shape (m, 19, 19, 5, 85) as explained in Figure (2). ### 3.3 - Convert output of the model to usable bounding box tensors The output of `yolo_model` is a (m, 19, 19, 5, 85) tensor that needs to pass through non-trivial processing and conversion. The following cell does that for you. ``` yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names)) ``` You added `yolo_outputs` to your graph. This set of 4 tensors is ready to be used as input by your `yolo_eval` function. ### 3.4 - Filtering boxes `yolo_outputs` gave you all the predicted boxes of `yolo_model` in the correct format. You're now ready to perform filtering and select only the best boxes. Lets now call `yolo_eval`, which you had previously implemented, to do this. ``` scores, boxes, classes = yolo_eval(yolo_outputs, image_shape) ``` ### 3.5 - Run the graph on an image Let the fun begin. You have created a (`sess`) graph that can be summarized as follows: 1. <font color='purple'> yolo_model.input </font> is given to `yolo_model`. The model is used to compute the output <font color='purple'> yolo_model.output </font> 2. <font color='purple'> yolo_model.output </font> is processed by `yolo_head`. It gives you <font color='purple'> yolo_outputs </font> 3. <font color='purple'> yolo_outputs </font> goes through a filtering function, `yolo_eval`. It outputs your predictions: <font color='purple'> scores, boxes, classes </font> **Exercise**: Implement predict() which runs the graph to test YOLO on an image. You will need to run a TensorFlow session, to have it compute `scores, boxes, classes`. The code below also uses the following function: ```python image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608)) ``` which outputs: - image: a python (PIL) representation of your image used for drawing boxes. You won't need to use it. - image_data: a numpy-array representing the image. This will be the input to the CNN. **Important note**: when a model uses BatchNorm (as is the case in YOLO), you will need to pass an additional placeholder in the feed_dict {K.learning_phase(): 0}. ``` def predict(sess, image_file): """ Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions. Arguments: sess -- your tensorflow/Keras session containing the YOLO graph image_file -- name of an image stored in the "images" folder. Returns: out_scores -- tensor of shape (None, ), scores of the predicted boxes out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes out_classes -- tensor of shape (None, ), class index of the predicted boxes Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes. """ # Preprocess your image image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608)) # Run the session with the correct tensors and choose the correct placeholders in the feed_dict. # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0}) ### START CODE HERE ### (≈ 1 line) out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input: image_data, K.learning_phase(): 0}) ### END CODE HERE ### # Print predictions info print('Found {} boxes for {}'.format(len(out_boxes), image_file)) # Generate colors for drawing bounding boxes. colors = generate_colors(class_names) # Draw bounding boxes on the image file draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors) # Save the predicted bounding box on the image image.save(os.path.join("out", image_file), quality=90) # Display the results in the notebook output_image = scipy.misc.imread(os.path.join("out", image_file)) imshow(output_image) return out_scores, out_boxes, out_classes ``` Run the following cell on the "test.jpg" image to verify that your function is correct. ``` out_scores, out_boxes, out_classes = predict(sess, "test.jpg") ``` **Expected Output**: <table> <tr> <td> **Found 7 boxes for test.jpg** </td> </tr> <tr> <td> **car** </td> <td> 0.60 (925, 285) (1045, 374) </td> </tr> <tr> <td> **car** </td> <td> 0.66 (706, 279) (786, 350) </td> </tr> <tr> <td> **bus** </td> <td> 0.67 (5, 266) (220, 407) </td> </tr> <tr> <td> **car** </td> <td> 0.70 (947, 324) (1280, 705) </td> </tr> <tr> <td> **car** </td> <td> 0.74 (159, 303) (346, 440) </td> </tr> <tr> <td> **car** </td> <td> 0.80 (761, 282) (942, 412) </td> </tr> <tr> <td> **car** </td> <td> 0.89 (367, 300) (745, 648) </td> </tr> </table> The model you've just run is actually able to detect 80 different classes listed in "coco_classes.txt". To test the model on your own images: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Write your image's name in the cell above code 4. Run the code and see the output of the algorithm! If you were to run your session in a for loop over all your images. Here's what you would get: <center> <video width="400" height="200" src="nb_images/pred_video_compressed2.mp4" type="video/mp4" controls> </video> </center> <caption><center> Predictions of the YOLO model on pictures taken from a camera while driving around the Silicon Valley <br> Thanks [drive.ai](https://www.drive.ai/) for providing this dataset! </center></caption> <font color='blue'> **What you should remember**: - YOLO is a state-of-the-art object detection model that is fast and accurate - It runs an input image through a CNN which outputs a 19x19x5x85 dimensional volume. - The encoding can be seen as a grid where each of the 19x19 cells contains information about 5 boxes. - You filter through all the boxes using non-max suppression. Specifically: - Score thresholding on the probability of detecting a class to keep only accurate (high probability) boxes - Intersection over Union (IoU) thresholding to eliminate overlapping boxes - Because training a YOLO model from randomly initialized weights is non-trivial and requires a large dataset as well as lot of computation, we used previously trained model parameters in this exercise. If you wish, you can also try fine-tuning the YOLO model with your own dataset, though this would be a fairly non-trivial exercise. **References**: The ideas presented in this notebook came primarily from the two YOLO papers. The implementation here also took significant inspiration and used many components from Allan Zelener's github repository. The pretrained weights used in this exercise came from the official YOLO website. - Joseph Redmon, Santosh Divvala, Ross Girshick, Ali Farhadi - [You Only Look Once: Unified, Real-Time Object Detection](https://arxiv.org/abs/1506.02640) (2015) - Joseph Redmon, Ali Farhadi - [YOLO9000: Better, Faster, Stronger](https://arxiv.org/abs/1612.08242) (2016) - Allan Zelener - [YAD2K: Yet Another Darknet 2 Keras](https://github.com/allanzelener/YAD2K) - The official YOLO website (https://pjreddie.com/darknet/yolo/) **Car detection dataset**: <a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">The Drive.ai Sample Dataset</span> (provided by drive.ai) is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</a>. We are especially grateful to Brody Huval, Chih Hu and Rahul Patel for collecting and providing this dataset.
github_jupyter
<!--NOTEBOOK_HEADER--> *This notebook contains material from [PyRosetta](https://RosettaCommons.github.io/PyRosetta.notebooks); content is available [on Github](https://github.com/RosettaCommons/PyRosetta.notebooks.git).* <!--NAVIGATION--> < [RosettaAntibodyDesign](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/12.02-RosettaAntibodyDesign-RAbD.ipynb) | [Contents](toc.ipynb) | [Index](index.ipynb) | [RosettaCarbohydrates: Trees, Selectors and Movers](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/13.01-Glycan-Trees-Selectors-and-Movers.ipynb) ><p><a href="https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/13.00-RosettaCarbohydrates-Working-with-Glycans.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a> # RosettaCarbohydrates Keywords: carbohydrate, glycan, sugar, glucose, mannose, sugar, GlycanTreeSet, saccharide, furanose, pyranose, aldose, ketose ## Overview In this chapter, we will focus on a special subset of non-peptide oligo- and polymers — carbohydrates.</p> Modeling carbohydrates — also known as saccharides, glycans, or simply sugars — comes with some special challenges. For one, most saccharide residues contain a ring as part of their backbone. This ring provides potentially new degrees of freedom when sampling. Additionally, carbohydrate structures are often branched, leading in Rosetta to more complicated `FoldTrees`. This chapter includes a quick overview of carbohydrate nomenclature, structure, and basic interactions within Rosetta. ## Carbohydrate Chemistry Background <div class="figure-right" style="clear:right;float:right;width:300px"><img src="./Media/Fig-pyranose_vs_furanose.png" width="300px"><small><b>Figure 1.</b> A pyranose (left) and a furanose (right).</small></div> <p>Sugars (<b>saccharides</b>) are defined as hyroxylated aldehydes and ketones. A typical monosaccharide has an equal number of carbon and oxygen atoms. For example, glucose has the molecular formula C<sub>6</sub>H<sub>12</sub>O<sub>6</sub>.</p><p>Sugars containing more than three carbons will spontaneously cyclize in aqueous environments to form five- or six-membered hemiacetals and hemiketals. Sugars with five-membered rings are called <b>furanoses</b>; those with six-membered rings are called <b>pyranoses</b> (Fig. 1).</p> <div class="figure-left" style="clear:left;float:left;width:300px"><img src="./Media/Fig-aldose_vs_ketose.png" width="300px"><small><b>Figure 2.</b> An aldose (left) and a ketose (right).</small></div> <p>A sugar is classified as an <b>aldose</b> or <b>ketose</b>, depending on whether it has an aldehyde or ketone in its linear form (Fig. 2).</p> <p>The different sugars have different names, depending on the stereochemistry at each of the carbon atoms in the molecule. For example, glucose has one set of stereochemistries, while mannose has another.</p> <p>In addition to their full names, many individual saccharide residues have three-letter codes, just like amino acid residues do. Glucose is "Glc" and mannose is "Man".</p> ## Backbone Torsions, Residue Connections, and side-chains A glycan tree is made up of many sugar residues, each residue a ring. The 'backbone' of a glycan is the connection between one residue and another. The chemical makeup of each sugar residue in this 'linkage' effects the propensity/energy of each bacbone dihedral angle. In addition, sugars can be attached via different carbons of the parent glycan. In this way, the chemical makeup and the attachment position effects the dihedral propensities. Typically, there are two backbone dihedral angles, but this could be up to 4+ angles depending on the connection. In IUPAC, the dihedrals of N are defined as the dihedrals between N and N-1 (IE - the parent linkage). The ASN (or other glycosylated protein residue's) dihedrals become part of the first glycan residue that is connected. For this first first glycan residue that is connected to an ASN, it has 4 torsions, while the ASN now has none! If you are creating a movemap for dihedral residues, please use the `MoveMapFactory` as this has the IUPAC nomenclature of glycan residues built in in order to allow proper DOF sampling of the backbone residues, especially for branching glycan trees. In general, all of our samplers should use residue selectors and use the MoveMapFactory to build movemaps internally. A sugar's side-chains are the constitutents of the glycan ring, which are typically an OH group or an acetyl group. These are sampled together at 60 degree angles by default during packing. A higher granularity of rotamers cannot currently be handled in Rosetta, but 60 degrees seems adequete for our purposes. Within Rosetta, glycan connectivity information is stored in the `GlycanTreeSet`, which is continually updated to reflect any residue changes or additions to the pose. This info is always available through the function pose.glycan_tree_set() Chemical information of each glycan residue can be accessed through the CarbohydrateInfo object, which is stored in each ResidueType object: pose.residue_type(i).carbohydrate_info() We will cover both of these classes in the next tutorial. ## Documentation https://www.rosettacommons.org/docs/latest/application_documentation/carbohydrates/WorkingWithGlycans ## References **Residue centric modeling and design of saccharide and glycoconjugate structures** Jason W. Labonte Jared Adolf-Bryfogle William R. Schief Jeffrey J. Gray _Journal of Computational Chemistry_, 11/30/2016 - <https://doi.org/10.1002/jcc.24679> **Automatically Fixing Errors in Glycoprotein Structures with Rosetta** Brandon Frenz, Sebastian Rämisch, Andrew J. Borst, Alexandra C. Walls Jared Adolf-Bryfogle, William R. Schief, David Veesler, Frank DiMaio _Structure_, 1/2/2019 ## Initialization <p>Let's use Pyrosetta to compare some common monosaccharide residues and see how they differ. As usual, we start by importing the `pyrosetta` and `rosetta` namespaces.</p> ``` import sys if 'google.colab' in sys.modules: !pip install pyrosettacolabsetup import pyrosettacolabsetup pyrosettacolabsetup.mount_pyrosetta_install() print ("Notebook is set for PyRosetta use in Colab. Have fun!") from pyrosetta import * from pyrosetta.teaching import * from pyrosetta.rosetta import * ``` First, one needs the `-include_sugars` option, which will tell Rosetta to load sugars and add the sugar_bb energy term to a default scorefunction. This scoreterm is like rama for the sugar dihedrals which connect each sugar residue. ``` init('-include_sugars') ``` When loading structures from the PDB that include glycans, we use these options. This includes an option to write out the structures in pdb format instead of the (better) Rosetta format. We will be using these options in the next tutorial. -maintain_links -auto_detect_glycan_connections -alternate_3_letter_codes pdb_sugar -write_glycan_pdb_codes -load_PDB_components false <ul><li>Set up the `PyMOLMover` for viewing structures.</li></ul> ``` pm = PyMOLMover() ``` ## Creating Saccharides from Sequence We will use the function, `pose_from_saccharide_sequence()`, which must be imported from the `core.pose` namespace. Unlike with peptide chains, one-letter-codes will not suffice when specifying saccharide chains, because there is too much information to convey; we must use at least four letters. The first three letters are the sugar's three-letter code; the fourth letter designates whether the residue is a furanose (`f`) or pyranose (`p`). ``` from pyrosetta.rosetta.core.pose import pose_from_saccharide_sequence glucose = pose_from_saccharide_sequence('Glcp') galactose = pose_from_saccharide_sequence('Galp') mannose = pose_from_saccharide_sequence('Manp') ``` <ul><li>Use the `PyMOLMover` to compare the three monosacharides in PyMOL.</li></ul> <ul><li>At which carbons do the three sugars differ?</li></ul> ### L and D Forms <p>Just like with peptides, saccharides come in two enantiomeric forms, labelled <font style="font-variant: small-caps">l</font> and <font style="font-variant: small-caps">d</font>. (Note the small-caps, used in print.) These can be loaded into PyRosetta using the prefixes `L-` and `D-`.</p> ``` L_glucose = pose_from_saccharide_sequence('L-Glcp') D_glucose = pose_from_saccharide_sequence('D-Glcp') ``` <ul><li>Compare the two structures in PyMOL. Notice that all stereocenters are inverted between the two monosaccharides.</li></ul> <ul><li>Which enantiomer is loaded by PyRosetta by default if <font style="font-variant: small-caps">l</font> or <font style="font-variant: small-caps">d</font> are not specified?</li></ul> ### Anomers <p>The carbon that is at a higher oxidation state — that is, the carbon of the hemiacetal/-ketal in the cyclic form or the carbon that is the carbonyl carbon of the aldehyde or ketone in the linear form — is called the <b>anomeric carbon</b>. Because the carbonyl of an aldehyde or ketone is planar, a sugar molecule can cyclize into one of two forms, one in which the resulting hydroxyl group is pointing "up" and another in which the same hydroxyl group is pointing "down". These two <b>anomers</b> are labelled α and β.</p> <ul><li>Create a one-residue `Pose` for both α- and β-<font style="font-variant: small-caps">d</font>-glucopyranose and use PyMOL to compare both.</li></ul> ``` alpha_D_glucose = pose_from_saccharide_sequence('a-D-Glcp') ``` <ul><li>For which anomer is the C1 hydroxyl group axial to the chair conformation of the six-membered pyranose ring?</li> <li>Which anomer of <font style="font-variant: small-caps">d</font>-glucose would you predict to be the most stable? (Hint: remember what you learned in organic chemistry about axial and equatorial substituents.)</li></ul> ### Linear Oligosaccharides & IUPAC Sequences Oligo- and polysaccharides are composed of simple monosaccharide residues connected by acetal and ketal linkages called __glycosidic bonds__. Any of the monosaccharide's _hydroxyl_ groups can be used to form a linkage to the anomeric carbon of another monosaccharide, leading to both _linear_ and _branched_ molecules. Rosetta can create both _linear_ and _branched_ oligosaccharides from an __IUPAC__ sequence. (IUPAC is the international organization dedicated to chemical nomenclature.) <p>To properly build a linear oligosaccharide, Rosetta must know the following details about each sugar residue being created in the following order:</p> - Main-chain connectivity — →2) (`->2)`), →4) (`->4)`), →6) (`->6)`), _etc._; default value is `->4)-` - Anomeric form — α (`a` or `alpha`) or β (`b` or `beta`); default value is `alpha` - Enantiomeric form — <font style="font-variant: small-caps">l</font> (`L`) or <font style="font-variant: small-caps">d</font> (`D`); default value is `D` - 3-Letter code — required; uses sentence case - Ring form code — <i>f</i> (for a furanose/5-membered ring), <i>p</i> (for a pyranose/6-membered ring); required Residues must be separated by hyphens. Glycosidic linkages can be specified with full IUPAC notation, _e.g._, `-(1->4)-` for “-(1→4)-”. (This means that the residue on the left connects from its C1 (anomeric) position to the hydoxyl oxygen at C4 of the residue on the right.) Rosetta will assume `-(1->` for aldoses and `-(2->` for ketoses.</p><p>Note that the standard is to write the IUPAC sequence of a saccharide chain <em>in reverse order from how they are numbered</em>. Lets create three new oligosacharides from sequence. ``` maltotriose = pose_from_saccharide_sequence('a-D-Glcp-' * 3) lactose = pose_from_saccharide_sequence('b-D-Galp-(1->4)-a-D-Glcp') isomaltose = pose_from_saccharide_sequence('->6)-Glcp-' * 2) ``` ### General Residue Information When you print a `Pose` containing carbohydrate residues, the sugar residues will be listed as `Z` in the sequence. ``` print("maltotriose\n", maltotriose) print("\nisomaltose\n", isomaltose) print("\nlactose\n", lactose) ``` However, you can have Rosetta print out the sequences for individual chains, using the `chain_sequence()` method. If you do this, Rosetta is smart enough to give you a distinct sequence format for saccharide chains. (You may have noticed that the default file name for a `.pdb` file created from this `Pose` will be the same sequence.) ``` print(maltotriose.chain_sequence(1)) print(isomaltose.chain_sequence(1)) print(lactose.chain_sequence(1)) ``` <p>Again, the standard is to show the sequence of a saccharide chain in reverse order from how they are numbered.</p> This is also how phi, psi, and omega are defined. From i+1 to i. ``` for res in lactose.residues: print(res.seqpos(), res.name()) ``` <p>Notice that for polysaccharides, the upstream residue is called the <b>reducing end</b>, while the downstream residue is called the <b>non-reducing end</b>.</p> You will also see the terms parent and child being used across Rosetta. Here, for Residue 2, residue 1 is the parent. For Residue 1, Residue 2 is the child. Due to branching, residues can have more than one child/non-reducing-end, but only a single parent residue. <p>Rosetta stores carbohydrate-specific information within `ResidueType`. If you print a residue, this additional information will be displayed.</p> ``` print(glucose.residue(1)) ``` <ul><li>Scanning the output from printing a glucose `Residue`, what is the general term for an aldose with six carbon atoms?</li> ## Exploring Carbohydrate Structure ### Torsion Angles <p>Most bioolymers have predefined, named torsion angles for their main-chain and side-chain bonds, such as φ, ψ, and ω and the various χs for amino acid residues. The same is true for saccharide residues. The torsion angles of sugars are as follows:</p> <ul><div class="figure-right" style="clear:right;float:right;width:300px"><img src="./Media/Fig-saccharide_main_chain_torsions.png" width="300px"><small><b>Figure 3.</b> A disaccharide's main-chain torsion angles.</small></div><div class="figure-right" style="clear:right;float:right;width:150px"><img src="./Media/Fig-saccharide_ring_torsions.png" width="150px"><small><b>Figure 4.</b> A monosaccharide's internal ring torsion angles.</small></div><div class="figure-right" style="clear:right;float:right;width:150px"><img src="./Media/Fig-saccharide_side_chain_torsions.png" width="150px"><small><b>Figure 5.</b> A monosaccharide's side-chain torsion angles.</small></div><li>φ — The 1<sup>st</sup> glycosidic torsion back to the <em>previous</em> (<i>n</i>&minus;1) residue. The angle is defined by the cyclic oxygen, the two atoms across the bond, and the cyclic carbon numbered one less than the glycosidic linkage position. For aldopyranoses, φ(<i>n</i>) is thus defined as O5(<i>n</i>)–C1(<i>n</i>)–O<i>X</i>(<i>n</i>&minus;1)–C<i>X</i>(<i>n</i>&minus;1), where <i>X</i> is the position of the glycosidic linkage. For aldofuranoses, φ(<i>n</i>) is defined as O4(<i>n</i>)–C1(<i>n</i>)–O<i>X</i>(<i>n</i>&minus;1)–CX(<i>n</i>&minus;1) For 2-ketopyranoses, φ(<i>n</i>) is defined as O6(<i>n</i>)–C2(<i>n</i>)–O<i>X</i>(<i>n</i>&minus;1)–C<i>X</i>(<i>n</i>&minus;1). For 2-ketofuranoses, φ(<i>n</i>) is defined as O5(<i>n</i>)–C2(<i>n</i>)–OX(<i>n</i>&minus;1)–CX(<i>n</i>&minus;1). <i>Et cetera</i>&hellip;.</li><li>ψ — The 2<sup>nd</sup> glycosidic torsion back to the <em>previous</em> (<i>n</i>&minus;1) residue. The angle is defined by the anomeric carbon, the two atoms across the bond, and the cyclic carbon numbered two less than the glycosidic linkage position. ψ(<i>n</i>) is thus defined as C<sub>anomeric</sub>(<i>n</i>)–O<i>X</i>(<i>n</i>&minus;1)–C<i>X</i>(<i>n</i>&minus;1)–C<i>X</i>&minus;1(<i>n</i>&minus;1), where <i>X</i> is the position of the glycosidic linkage.</li><li>ω — The 3<sup>rd</sup> (and any subsequent) glycosidic torsion(s) back to the <em>previous residue</em>. ω<sub>1</sub>(<i>n</i>) is defined as O<i>X</i>(<i>n</i>&minus;1)–C<i>X</i>(<i>n</i>&minus;1)–C<i>X</i>&minus;1(<i>n</i>&minus;1)–C<i>X</i>&minus;2(<i>n</i>&minus;1), where <i>X</i> is the position of the glycosidic linkage. (This only applies to sugars with exocyclic connectivities.). The connection in Figure 3 has an exocyclic carbon, but the other potential connection points do not - so only phi and psi would available as bacbone torsion angles for those connection points. </li><li>ν<sub>1</sub> – ν<sub><i>n</i></sub> — The internal ring torsion angles, where <i>n</i> is the number of atoms in the ring. ν<sub>1</sub> defines the torsion across bond C1–C2, <i>etc</i>.</li><li>χ<sub>1</sub> – χ<sub><i>n</i></sub> — The side-chain torsion angles, where <i>n</i> is the number of carbons in the sugar residue. The angle is defined by the carbon numbered one less than the glycosidic linkage position, the two atoms across the bond, and the polar hydrogen. The cyclic ring counts as carbon 0. For an aldopyranose, χ<sub>1</sub> is thus defined by O5–C1–O1–HO1, and χ<sub>2</sub> is defined by C1–C2–O2–HO2. χ<sub>5</sub> is defined by C4–C5–C6–O6, because it rotates the exocyclic carbon rather than twists the ring. χ<sub>6</sub> is defined by C5–C6–O6–HO6.</li></ul> Take special note of how φ, ψ, and ω are defined <em>in the reverse order</em> as the angles of the same names for amino acid residues! The `chi()` method of `Pose` works with sugar residues in the same way that it works with amino acid residues, where the first argument is the χ subscript and the second is the residue number of the `Pose`. ``` galactose.chi(1, 1) galactose.chi(2, 1) galactose.chi(3, 1) galactose.chi(4, 1) galactose.chi(5, 1) galactose.chi(6, 1) ``` Likewise, we can use `set_chi()` to change these torsion angles and observe the changes in PyMOL, setting the option to keep history to true. ``` from pyrosetta.rosetta.protocols.moves import AddPyMOLObserver observer = AddPyMOLObserver(galactose, True) pm.apply(galactose) ``` <ul><li>Perform the following torsion angle changes to galactose using `set_chi()` and observe which torsions move in PyMOL.<ul><li>Set χ<sub>1</sub> to 120&deg;.</li><li>Set χ<sub>2</sub> to 60&deg;.</li><li>Set χ<sub>3</sub> to 60&deg;.</li><li>Set χ<sub>4</sub> to 0&deg;.</li><li>Set χ<sub>5</sub> to 60&deg;.</li><li>Set χ<sub>6</sub> to &minus;60&deg;.</li></ul></li></ul> ``` galactose.set_chi(1, 1, 180) ## BEGIN SOLUTION for chi_angle in zip([x for x in range(1, 6)], [120, 60, 60, 0, 60, -60]): print(chi_angle) galactose.set_chi(chi_angle[0] , 1, chi_angle[1]) ## END SOLUTION ``` ### Creating Saccharides from a PDB file The `phi()`, `set_phi()`, `psi()`, `set_psi()`, `omega()`, and `set_omega()` methods of `Pose` also work with sugars. However, since `pose_from_saccharide_sequence()` may create a `Pose` with angles that cause the residues to wrap around onto each other, instead, let's reload some Pose's from `.pdb` files. ``` maltotriose = pose_from_file('inputs/glycans/maltotriose.pdb') isomaltose = pose_from_file('inputs/glycans/isomaltose.pdb') ``` <ul><li>Now, try out the torsion angle getters and setters for the glycosydic bonds.</li></ul> ``` pm.apply(maltotriose) maltotriose.phi(1) maltotriose.psi(1) maltotriose.phi(2) maltotriose.psi(2) maltotriose.omega(2) maltotriose.phi(3) maltotriose.psi(3) ``` <p>Notice how φ<sub>1</sub> and ψ<sub>1</sub> are undefined&mdash;the first residue is not connected to anything ``` observer = AddPyMOLObserver(maltotriose, True) for i in (2, 3): maltotriose.set_phi(i, 180) maltotriose.set_psi(i, 180) ``` **Isomaltose** is composed of (1→6) linkages, so in this case omega torsions are defined. Get and set φ<sub>2</sub>, ψ<sub>2</sub>, ω<sub>2</sub></p> for isomaltose ``` observer = AddPyMOLObserver(isomaltose, True) ## BEGIN SOLUTION print(isomaltose.phi(2)) print(isomaltose.psi(2)) print(isomaltose.omega(2)) ## END SOLUTION ``` <p>Any cyclic residue also stores its ν angles.</p> ``` pm.apply(glucose) Glc1 = glucose.residue(1) for i in range(1, 6): print(Glc1.nu(i)) ``` <p>However, we generally care more about the ring conformation of a cyclic residue&rsquo;s rings, in this case, its only ring with index of 1. (The output values here are the ideal angles, not the actual angles, which we viewed above.)</p> ``` print(Glc1.ring_conformer(1)) ``` ### RingConformers <p>The output above warrants a brief explanation. First, what does `4C1` mean? Most of us likely remember learning about chair and boat conformations in Organic Chemistry. Do you recall how there are two distinct chair conformations that can interconvert between each other? The names for these specific conformations are <sup>4</sup>C<sub>1</sub> and <sup>1</sup>C<sub>4</sub>. The nomenclature is as follows: Superscripts to the left of the capital letter are above the plane of the ring if it is oriented such that its carbon atoms proceed in a clockwise direction when viewed from above. Subscripts to the right of the letter are below the plane of the ring. The letter itself is an abbreviation, where, for example, C indicates a chair conformation and B a boat conformation. In all, there are 38 different ideal ring conformations that any six-membered cycle can take.</p><p>`C-P parameters` refers to the Cremer&ndash;Pople parameters for this conformation (Cremer D, Pople JA. J Am Chem Soc. 1975;97:1354–1358.). C&ndash;P parameters are an alternative coordinate system used to refer to a ring conformation.</p><p> Finally, a `RingConformer` in Rosetta includes the values of the ν angles. Each conformer has a unique set of angles. `Pose::set_nu()` does not exist, because it would rip a ring apart. Instead, to change a ring conformation, we need to use the `set_ring_conformer()` method, which takes a `RingConformer` object. Most of the time, you will not need to adjust the ring conformers, but you should be aware of it. We can ask a cyclic `ResidueType` for one of its `RingConformerSet`s to give us the `RingConformer` we want. (Each `RingConformerSet` includes the list of possible idealized ring conformers that such a ring can attain as well as information about the most energetically favorable one.) Then, we can et the conformation for our residue through `Pose`. (The arguments for `set_ring_conformer()` are the `Pose`’s sequence position, ring number, and the new conformer, respectively.)</p> <ul><div class="figure-center" style="clear:middle;float:middle;width:300px"><img src="./Media/chair_figure.tif" width="300px"><medium><b>Figure 5.</b> The two chair conformations of α-d-glucopyranose. In the <sup>1</sup>C<sup>4</sup> conformation (left), all of the substituents are axial; in the <sup>4</sup>C<sup>1</sup> conformation (right), they are equatorial. <sup>4</sup>C<sup>1</sup> is the most stable conformation for the majority of the α-d-aldohexopyranoses. In this nomenclature, a superscript means that that numbered carbon is above the ring, if the atoms are arranged in a clockwise manner from C1. A subscripted number indicates a carbon below the plane of the ring. ``` ring_set = Glc1.type().ring_conformer_set(1) conformer = ring_set.get_ideal_conformer_by_name('1C4') glucose.set_ring_conformation(1, 1, conformer) pm.apply(glucose) ``` ## Modified Sugars, Branched Oligosaccharides, &amp; `.pdb` File `LINK` Records <p>Modified sugars can also be created in Rosetta, either from sequence or from file. In the former case, simply use the proper abbreviation for the modification after the “ring form code”. For example, the abbreviation for an <i>N</i>-acetyl group is “NAc”. Note the <i>N</i>-acetyl group in the PyMOL window.</p> ``` LacNAc = pose_from_saccharide_sequence('b-D-Galp-(1->4)-a-D-GlcpNAc') pm.apply(LacNAc) ``` <p>Rosetta can handle branched oligosaccharides as well, but when loading from a sequence, this requires the use of brackets, which is the standard IUPAC notation. For example, here is how one would load Lewis<sup>x</sup> (Le<sup>x</sup>), a common branched glyco-epitope, into Rosetta by sequence.</p> ``` Lex = pose_from_saccharide_sequence('b-D-Galp-(1->4)-[a-L-Fucp-(1->3)]-D-GlcpNAc') pm.apply(Lex) ``` One can also load branched carbohydrates from a `.pdb` file. These `.pdb` files must include `LINK` records, which are a standard part of the PDB format. Open the `test/data/carbohydrates/Lex.pdb` file and look bear the top to see an example `LINK` record, which looks like this: ```LINK O3 Glc A 1 C1 Fuc B 1 1555 1555 1.5 ``` It tells us that there is a covalent linkage between O3 of glucose A1 and C1 of fucose B1 with a bond length of 1.5 Å. (The `1555`s indicate symmetry and are ignored by Rosetta.) Note that if the LINK records are not in order, or HETNAM records are not in a Rosetta format, we will fail to load. In the next tutorial we will use auto-detection to do this. For now, we know Lex.pdb will load OK. ``` Lex = pose_from_file('inputs/glycans/Lex.pdb') pm.apply(Lex) ``` You may notice when viewing the structure in PyMOL that the hybridization of the carbonyl of the amido functionality of the <i>N</i>-acetyl group is wrong. This is because of an error in the model deposited in the PDB from which this file was generated. This is, unfortunately, a very common problem with sugar structures found in the PDB. It is always useful to use http://www.glycosciences.de to identify any errors in the solution PDB structure before working with them in Rosetta. The referenced paper, __Automatically Fixing Errors in Glycoprotein Structures with Rosetta__ can be used as a guide to fixing these. You may also have noticed that the `inputs/glycans/Lex.pdb` file indicated in its `HETNAM` records that Glc1 was actually an <i>N</i>-acetylglycosamine (GlcNAc) with the indication `2-acetylamino-2-deoxy-`. This is optional and is helpful for human-readability, but Rosetta only needs to know the base `ResidueType` of each sugar residue; specific `VariantType`s needed — and most sugar modifications are treated as `VariantType`s — are determined automatically from the atom names in the `HETATM` records for the residue. Anything after the comma is ignored.</p><ul><li>Print out the `Pose` to see how the `FoldTree` is defined. ``` ## BEGIN SOLUTION print(Lex) ## END SOLUTION ``` Note the `CHEMICAL` `Edge` (`-2`). This is Rosetta’s way of indicating a branch backbone connection. Unlike a standard `POLYMER` `Edge` (`-1`), this one tells you which atoms are involved.<p><ul><li>Print out the sequence of each chain. <ul><li>Now print out information about each residue in the Pose to see which VariantTypes and ResiduePropertys are assigned to each.</li></ul> <ul><li>What are the three `VariantType`s of residue 1?</li><li>Output the various torison angles and make sure that you understand to which angles they correspond.</li></ul> <p>Can you see now why φ and ψ are defined the way they are? If they were defined as in AA residues, they would not have unique definitions, since GlcNAc is a branch point. A monosaccharide can have multiple children, but it can never have more than a single parent.</p><p>Note that for this oligosaccharide χ<sub>3</sub>(1) is equivalent to ψ(3) and χ<sub>4</sub>(1) is equivalent to ψ(2). Make sure that you understand why!</p> ``` Lex.chi(3, 1), Lex.psi(3) Lex.chi(4, 1), Lex.psi(2) ``` <p>For chemically modified sugars, χ angles are redefined at the positions where substitution has occurred. For new χs that have come into existence from the addition of new atoms and bonds, new definitions are added to new indices. For example, for Glc<i>N</i><sup>2</sup>Ac residue 1, χ<sub>C2–N2–C′–Cα′</sub> is accessed through `chi(7, 1)`.</p> ``` Lex.chi(2, 1) Lex.set_chi(2, 1, 180) pm.apply(Lex) Lex.chi(7, 1) Lex.set_chi(7, 1, 0) pm.apply(Lex) ``` <ul><li>Play around with getting and setting the various torsion angles for Le<sup>x</sup></li></ul> ## <i>N</i>- and <i>O</i>-Linked Glycans <p>Branching does not have to occur at sugars; a glycan can be attached to the nitrogen of an ASN or the oxygen of a SER or THR. <i>N</i>-linked glycans themselves tend to be branched structures.</p> We will cover more on linked glycan trees in the next tutorial through the `GlycanTreeSet` object - which is always present in a pose that has carbohydrates. ``` N_linked = pose_from_file('inputs/glycans/N-linked_14-mer_glycan.pdb') pm.apply(N_linked) print(N_linked) for i in range(4): print(N_linked.chain_sequence(i + 1)) ``` <ul><li>Which residue number is glycosylated above?</li></ul> ``` O_linked = pose_from_file('inputs/glycans/O_glycan.pdb') pm.apply(O_linked) ``` <ul><li>Print `O-linked` and the sequence of each of its chains.</li></ul> `set_phi()` and `set_psi()` still work when a glycan is linked to a peptide. (Below, we use `pdb_info()` to give help us select the residue that we want. In this case, in the `.pdb` file, the glycan is chain B.) ``` N_linked.set_phi(N_linked.pdb_info().pdb2pose("B", 1), 180) pm.apply(N_linked) ``` <ul><li>Set ψ(B1) to 0&deg; and ω(B1) to 90&deg; and view the results in PyMOL.</li></ul> <p>Notice that in this case ψ and ω affect the side-chain torsions (χs) of the asparagine residue. This is another case where there are multiple ways of both naming and accessing the same specific torsion angles.</p><p>One can also create conjugated glycans from sequences if performed in steps, first creating the peptide portion by loading from a `.pdb` file or from sequence and then using the `glycosylate_pose()` function, (which needs to be imported first.) For example, to glycosylate an ASA peptide with a single glucose at position 2 of the peptide, we perform the following:</p> ### Glycosylation by function Here, we will glycosylate a simple peptide using the function, `glycosylate_pose`. In the next tutorial, we will use a Mover interface to this function. ``` peptide = pose_from_sequence('ASA') pm.apply(peptide) from pyrosetta.rosetta.core.pose.carbohydrates import glycosylate_pose, glycosylate_pose_by_file glycosylate_pose(peptide, 2, 'Glcp') pm.apply(peptide) ``` Here, we uset the main function to glycosylate a pose. In the next tutorial, we will use a Mover interface to do so. <p>It is also possible to glycosylate a pose with common glycans found in the database. These files end in the `.iupac` extension and are simply IUPAC sequences just as we have been using throughout this chapter.</p> Here is a list of some common iupacs. ``` bisected_fucosylated_N-glycan_core.iupac bisected_N-glycan_core.iupac common_names.txt core_1_O-glycan.iupac core_2_O-glycan.iupac core_3_O-glycan.iupac core_4_O-glycan.iupac core_5_O-glycan.iupac core_6_O-glycan.iupac core_7_O-glycan.iupac core_8_O-glycan.iupac fucosylated_N-glycan_core.iupac high-mannose_N-glycan_core.iupac hybrid_bisected_fucosylated_N-glycan_core.iupac hybrid_bisected_N-glycan_core.iupac hybrid_fucosylated_N-glycan_core.iupac hybrid_N-glycan_core.iupac man5.iupac man9.iupac N-glycan_core.iupac ``` ``` peptide = pose_from_sequence('ASA'); pm.apply(peptide) glycosylate_pose_by_file(peptide, 2, 'core_5_O-glycan') pm.apply(peptide) ``` ## Conclusion You now have a grasp on the basics of RosettaCarbohydrates. Please continue onto the next tutorial for more on glycan residue selection and various movers that can be of use when working with glycans. **Chapter contributors:** - Jared Adolf-Bryfogle (Scripps; Institute for Protein Innovation) - Jason Labonte (Jons Hopkins; Franklin and Marshall College) <!--NAVIGATION--> < [RosettaAntibodyDesign](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/12.02-RosettaAntibodyDesign-RAbD.ipynb) | [Contents](toc.ipynb) | [Index](index.ipynb) | [RosettaCarbohydrates: Trees, Selectors and Movers](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/13.01-Glycan-Trees-Selectors-and-Movers.ipynb) ><p><a href="https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/13.00-RosettaCarbohydrates-Working-with-Glycans.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a>
github_jupyter