code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Load data from train & test data set from scipy.io import loadmat train_path =r'/root/notebooks/Projects/number-detection/source/train_32x32.mat' test_path=r'/root/notebooks/Projects/number-detection/source/test_32x32.mat' train = loadmat(train_path) test = loadmat(test_path) X_train = train['X'] y_train = train['y'] X_test = test['X'] y_test = test['y'] print("Shape of X_train is:", X_train.shape) print("Shape of y_train is:", y_train.shape) print("Shape of X_test is:", X_test.shape) print("Shape of y_test is:", y_test.shape) # + import matplotlib.pyplot as plt import numpy as np # %matplotlib inline X_train = X_train[:,:,:,:] X_test = X_test[:,:,:,:] start = 50 fig, ax = plt.subplots(3, 4) for row in range(3): for col in range(4): ax[row, col].imshow(X_train[:,:,:,start+row*3+col]) ax[row, col].axis('off') ax[row, col].set_title('{}'.format(y_train[start+row*3+col, 0])) plt.show() X_train = np.rollaxis(X_train, 3) X_test = np.rollaxis(X_test, 3) print("Shape of X_train is now:", X_train.shape) print("Shape of X_test is now:", X_test.shape) # + y_train = y_train[:,0] y_test = y_test[:,0] print("Shape of y_train is now:", y_train.shape) print("Shape of y_test is now:", y_test.shape) # + y_train[y_train==10] = 0 y_test[y_test==10] = 0 print("labels of y_train are", np.unique(y_train[:])) print("labels of y_test are", np.unique(y_test[:])) # + X_train = X_train[:,:,7:24,:] X_test = X_test[:,:,7:24,:] # plot cropped images fig, ax = plt.subplots(3, 4) for row in range(3): for col in range(4): ax[row, col].imshow(X_train[start+row*3+col,:,:,:]) ax[row, col].axis('off') ax[row, col].set_title('{}'.format(y_train[start+row*3+col])) plt.show() # - X_train = X_train / 255 X_test = X_test / 255 # + from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import MaxPooling2D from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Dropout model = Sequential([ Conv2D(32, (3,3), activation='relu', input_shape=(32, 17, 3)), Conv2D(32, (3,3), activation='relu'), MaxPooling2D(2, 2), Dropout(0.3), Conv2D(64, (3,3), activation='relu'), Conv2D(64, (3,3), activation='relu'), MaxPooling2D(2, 2), Dropout(0.3), Flatten(), Dense(512, activation='relu'), Dropout(0.3), Dense(10, activation='softmax') ]) # get a summary of our built model model.summary() # - model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(X_train, y_train, epochs=10) loss, acc = model.evaluate(X_test, y_test) print("Model accuracy on test data is: {:6.3f}%".format(100 * acc)) model.save('number_model.h5')
jupyter/keras-model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dependencies # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _kg_hide-input=true _kg_hide-output=true _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn import metrics from sklearn.model_selection import train_test_split from keras.models import Model from keras.callbacks import EarlyStopping from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.layers import Dense, Input, Embedding, Dropout, Activation, CuDNNLSTM, Conv1D, Bidirectional, GlobalMaxPool1D # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _kg_hide-input=true _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" train = pd.read_csv("../input/train.csv") test = pd.read_csv("../input/test.csv") print("Train shape : ", train.shape) print("Test shape : ", test.shape) # - # # Preprocess # + _kg_hide-input=true train['target'] = np.where(train['target'] >= 0.5, 1, 0) train['comment_text'] = train['comment_text'].astype(str) X_test = test['comment_text'].astype(str) # + _kg_hide-input=true # Train/validation split train_ids, val_ids = train_test_split(train['id'], test_size=0.2, random_state=2019) train_df = pd.merge(train_ids.to_frame(), train) validate_df = pd.merge(val_ids.to_frame(), train) Y_train = train_df['target'].values Y_val = validate_df['target'].values X_train = train_df['comment_text'] X_val = validate_df['comment_text'] # - # Hyper parameters maxlen = 150 # max number of words in a question to use embed_size = 300 # how big is each word vector max_features = 30000 # how many unique words to use (i.e num rows in embedding vector) # + _kg_hide-input=true # Fill missing values X_train = X_train.fillna("_na_").values X_val = X_val.fillna("_na_").values X_test = X_test.fillna("_na_").values # + _kg_hide-input=true # Tokenize the sentences tokenizer = Tokenizer(num_words=max_features) tokenizer.fit_on_texts(list(X_train)) X_train = tokenizer.texts_to_sequences(X_train) X_val = tokenizer.texts_to_sequences(X_val) X_test = tokenizer.texts_to_sequences(X_test) # + _kg_hide-input=true # Pad the sentences X_train = pad_sequences(X_train, maxlen=maxlen) X_val = pad_sequences(X_val, maxlen=maxlen) X_test = pad_sequences(X_test, maxlen=maxlen) # - # # Model # + _kg_hide-output=true inp = Input(shape=(maxlen,)) x = Embedding(max_features, embed_size)(inp) x = Bidirectional(CuDNNLSTM(64, return_sequences=True))(x) x = GlobalMaxPool1D()(x) x = Dense(32, activation="relu")(x) x = Dropout(0.5)(x) x = Dense(1, activation="sigmoid")(x) model = Model(inputs=inp, outputs=x) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() # + _kg_hide-input=true _kg_hide-output=true es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=3) history = model.fit(X_train, Y_train, batch_size=512, epochs=20, validation_data=(X_val, Y_val), callbacks=[es]) # + _kg_hide-input=true sns.set_style("whitegrid") fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(20, 8)) ax1.plot(history.history['acc'], label='Train Accuracy') ax1.plot(history.history['val_acc'], label='Validation accuracy') ax1.legend(loc='best') ax1.set_title('Accuracy') ax2.plot(history.history['loss'], label='Train loss') ax2.plot(history.history['val_loss'], label='Validation loss') ax2.legend(loc='best') ax2.set_title('Loss') plt.xlabel('Epochs') sns.despine() plt.show() # - # # Model evaluation # + _kg_hide-input=true identity_columns = [ 'male', 'female', 'homosexual_gay_or_lesbian', 'christian', 'jewish', 'muslim', 'black', 'white', 'psychiatric_or_mental_illness'] # Convert taget and identity columns to booleans def convert_to_bool(df, col_name): df[col_name] = np.where(df[col_name] >= 0.5, True, False) def convert_dataframe_to_bool(df): bool_df = df.copy() for col in ['target'] + identity_columns: convert_to_bool(bool_df, col) return bool_df SUBGROUP_AUC = 'subgroup_auc' BPSN_AUC = 'bpsn_auc' # stands for background positive, subgroup negative BNSP_AUC = 'bnsp_auc' # stands for background negative, subgroup positive def compute_auc(y_true, y_pred): try: return metrics.roc_auc_score(y_true, y_pred) except ValueError: return np.nan def compute_subgroup_auc(df, subgroup, label, model_name): subgroup_examples = df[df[subgroup]] return compute_auc(subgroup_examples[label], subgroup_examples[model_name]) def compute_bpsn_auc(df, subgroup, label, model_name): """Computes the AUC of the within-subgroup negative examples and the background positive examples.""" subgroup_negative_examples = df[df[subgroup] & ~df[label]] non_subgroup_positive_examples = df[~df[subgroup] & df[label]] examples = subgroup_negative_examples.append(non_subgroup_positive_examples) return compute_auc(examples[label], examples[model_name]) def compute_bnsp_auc(df, subgroup, label, model_name): """Computes the AUC of the within-subgroup positive examples and the background negative examples.""" subgroup_positive_examples = df[df[subgroup] & df[label]] non_subgroup_negative_examples = df[~df[subgroup] & ~df[label]] examples = subgroup_positive_examples.append(non_subgroup_negative_examples) return compute_auc(examples[label], examples[model_name]) def compute_bias_metrics_for_model(dataset, subgroups, model, label_col, include_asegs=False): """Computes per-subgroup metrics for all subgroups and one model.""" records = [] for subgroup in subgroups: record = { 'subgroup': subgroup, 'subgroup_size': len(dataset[dataset[subgroup]]) } record[SUBGROUP_AUC] = compute_subgroup_auc(dataset, subgroup, label_col, model) record[BPSN_AUC] = compute_bpsn_auc(dataset, subgroup, label_col, model) record[BNSP_AUC] = compute_bnsp_auc(dataset, subgroup, label_col, model) records.append(record) return pd.DataFrame(records).sort_values('subgroup_auc', ascending=True) # validate_df = pd.merge(val_ids.to_frame(), train) validate_df['preds'] = model.predict(X_val) validate_df = convert_dataframe_to_bool(validate_df) bias_metrics_df = compute_bias_metrics_for_model(validate_df, identity_columns, 'preds', 'target') print('Validation bias metric by group') display(bias_metrics_df) # + _kg_hide-input=true def power_mean(series, p): total = sum(np.power(series, p)) return np.power(total / len(series), 1 / p) def get_final_metric(bias_df, overall_auc, POWER=-5, OVERALL_MODEL_WEIGHT=0.25): bias_score = np.average([ power_mean(bias_df[SUBGROUP_AUC], POWER), power_mean(bias_df[BPSN_AUC], POWER), power_mean(bias_df[BNSP_AUC], POWER) ]) return (OVERALL_MODEL_WEIGHT * overall_auc) + ((1 - OVERALL_MODEL_WEIGHT) * bias_score) # train_df = pd.merge(train_ids.to_frame(), train) train_df['preds'] = model.predict(X_train) train_df = convert_dataframe_to_bool(train_df) print('Train ROC AUC: %.4f' % get_final_metric(bias_metrics_df, metrics.roc_auc_score(train_df['target'].values, train_df['preds'].values))) print('Validation ROC AUC: %.4f' % get_final_metric(bias_metrics_df, metrics.roc_auc_score(validate_df['target'].values, validate_df['preds'].values))) # - # # Predictions Y_test = model.predict(X_test) # + _kg_hide-input=true submission = pd.read_csv('../input/sample_submission.csv') submission['prediction'] = Y_test submission.to_csv('submission.csv', index=False) submission.head(10)
Model backlog/Deep Learning/[2nd] Bi LSTM - Baseline.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="JoMbQLjK3uHZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1596257769435, "user_tz": 420, "elapsed": 447, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-FXsetuIjTxg/AAAAAAAAAAI/AAAAAAAAAO4/TYiDKhUGRd0/s64/photo.jpg", "userId": "09258938025824762674"}} outputId="0163fec3-711f-496d-c555-03bb465c300d" #Declare a complex number and store it in a variable. a=2+2j #Check the type and print the id of the same. print(type(a)) print(id(a)) # + id="6N6Ee4BU33jk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} executionInfo={"status": "ok", "timestamp": 1596257723072, "user_tz": 420, "elapsed": 537, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-FXsetuIjTxg/AAAAAAAAAAI/AAAAAAAAAO4/TYiDKhUGRd0/s64/photo.jpg", "userId": "09258938025824762674"}} outputId="92345333-ec99-4124-f281-2ece086bc811" #Arithmatic Operations on complex number #Take two different complex number. #Store them in two different variables. #Do below operations on them:- #Find sum of both numbers #Find differce between them #Find the product of both numbers. #Find value after dividing first num with second number #Find the result of first num to the power of second number. b=4+2j c=6-3j #sum of both numbers sum=b+c print(sum) #difference between numbers difference=b-c print(difference) #product of both numbers product=b*c print(product) #value after dividing first num with second number div=b/c print(div) #the result of first num to the power of second number. expo=b**c print(expo) # + id="xhkdg7LD352y" colab_type="code" colab={} #Comparison Operation not applicable between instance of complex values #Object reusability concept is not applicable on complex numebr # + id="4lzPH2sb38KM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1596257142876, "user_tz": 420, "elapsed": 420, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-FXsetuIjTxg/AAAAAAAAAAI/AAAAAAAAAO4/TYiDKhUGRd0/s64/photo.jpg", "userId": "09258938025824762674"}} outputId="3214bb8a-b339-4f67-8ea0-38ee2722ba5b" #Equality Operator #Take two different complex numbers. #Store them in two different variables. #Equuate them using equality operator (==, !=) #Observe the output(return type should be boolean) d=5+6j e=5+6j y=7-6j print(d==e) print(d==y) # + id="EDEl19UD3_tr" colab_type="code" colab={} #Logical operators #Observe the output of below code #Cross check the output manually print(10+20j and 20+30j) #20+30j #----------------------------------------->Output is 20+30j print(0+0j and 20+30j) #0+0j #----------------------------------------->Output is 0j print(20+30j and 0+0j) #0+0j #----------------------------------------->Output is 0j print(0+0j and 0+0j) #0+0j #----------------------------------------->Output is 0j print(10+20j or 20+30j) #10+20j #----------------------------------------->Output is 10+20j print(0+0j or 20+30j) #20+30j #----------------------------------------->Output is 20+30j print(20+30j or 0+0j) #20+30j #----------------------------------------->Output is 20+30j print(0+0j or 0+0j) #0+0j #----------------------------------------->Output is 0j print(not 10+20j) #False #----------------------------------------->Output is False print(not 0+0j) #True #----------------------------------------->Output is True # + id="4ifueKbP4Br1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1596257211067, "user_tz": 420, "elapsed": 367, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-FXsetuIjTxg/AAAAAAAAAAI/AAAAAAAAAO4/TYiDKhUGRd0/s64/photo.jpg", "userId": "09258938025824762674"}} outputId="436002db-cb4f-4baa-ef49-deeb6ee4b9f6" #What is the output of expression inside print statement. Cross check before running the program. a = 10+20j b = 10+20j print(a is b) #False #True or False?--->False print(a is not b) #True #True or False?--->True # + id="TxMbr5jQ4Dwl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} executionInfo={"status": "ok", "timestamp": 1596257763885, "user_tz": 420, "elapsed": 321, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-FXsetuIjTxg/AAAAAAAAAAI/AAAAAAAAAO4/TYiDKhUGRd0/s64/photo.jpg", "userId": "09258938025824762674"}} outputId="5a2a186b-46e0-4af7-d1b0-b63e870d4b5f" #Membership operation #in, not in are two membership operators and it returns boolean value print('2.7' in 'Python2.7.8') #True print(10+20j in [10,10.20,10+20j,'Python']) #True print(10+20j in (10,10.20,10+20j,'Python')) #True print(30+40j in {1,20.30,30+40j}) #True print(30+40j in {1:100, 2.3:200, 30+40j:300}) #True print(10 in range(20)) #True
Harshitha/Copy of Complex_Assignement_harshitha.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="g_nWetWWd_ns" # ##### Copyright 2018 The TensorFlow Authors. # + cellView="form" colab={} colab_type="code" id="2pHVBk_seED1" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + cellView="form" colab={} colab_type="code" id="N_fMsQ-N8I7j" #@title MIT License # # Copyright (c) 2017 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a # # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # + [markdown] colab_type="text" id="pZJ3uY9O17VN" # # Save and restore models # + [markdown] colab_type="text" id="M4Ata7_wMul1" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/tutorials/keras/save_and_restore_models"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/keras/save_and_restore_models.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/keras/save_and_restore_models.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # </table> # + [markdown] colab_type="text" id="mBdde4YJeJKF" # Model progress can be saved during—and after—training. This means a model can resume where it left off and avoid long training times. Saving also means you can share your model and others can recreate your work. When publishing research models and techniques, most machine learning practitioners share: # # * code to create the model, and # * the trained weights, or parameters, for the model # # Sharing this data helps others understand how the model works and try it themselves with new data. # # Caution: Be careful with untrusted code—TensorFlow models are code. See [Using TensorFlow Securely](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md) for details. # # ### Options # # There are different ways to save TensorFlow models—depending on the API you're using. This guide uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow. For other approaches, see the TensorFlow [Save and Restore](https://www.tensorflow.org/guide/saved_model) guide or [Saving in eager](https://www.tensorflow.org/guide/eager#object_based_saving). # # + [markdown] colab_type="text" id="xCUREq7WXgvg" # ## Setup # # ### Installs and imports # + [markdown] colab_type="text" id="7l0MiTOrXtNv" # Install and import TensorFlow and dependencies: # + colab={} colab_type="code" id="RzIOVSdnMYyO" # !pip install h5py pyyaml # + [markdown] colab_type="text" id="SbGsznErXWt6" # ### Get an example dataset # # We'll use the [MNIST dataset](http://yann.lecun.com/exdb/mnist/) to train our model to demonstrate saving weights. To speed up these demonstration runs, only use the first 1000 examples: # + colab={} colab_type="code" id="7Nm7Tyb-gRt-" from __future__ import absolute_import, division, print_function import os # !pip install tf-nightly-2.0-preview import tensorflow as tf keras = tf.keras tf.__version__ # + colab={} colab_type="code" id="9rGfFwE9XVwz" (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data() train_labels = train_labels[:1000] test_labels = test_labels[:1000] train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0 test_images = test_images[:1000].reshape(-1, 28 * 28) / 255.0 # + [markdown] colab_type="text" id="anG3iVoXyZGI" # ### Define a model # + [markdown] colab_type="text" id="wynsOBfby0Pa" # Let's build a simple model we'll use to demonstrate saving and loading weights. # + colab={} colab_type="code" id="0HZbJIjxyX1S" # Returns a short sequential model def create_model(): model = tf.keras.models.Sequential([ keras.layers.Dense(512, activation='relu', input_shape=(784,)), keras.layers.Dropout(0.2), keras.layers.Dense(10, activation='softmax') ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) return model # Create a basic model instance model = create_model() model.summary() # + [markdown] colab_type="text" id="soDE0W_KH8rG" # ## Save checkpoints during training # + [markdown] colab_type="text" id="mRyd5qQQIXZm" # The primary use case is to automatically save checkpoints *during* and at *the end* of training. This way you can use a trained model without having to retrain it, or pick-up training where you left of—in case the training process was interrupted. # # `tf.keras.callbacks.ModelCheckpoint` is a callback that performs this task. The callback takes a couple of arguments to configure checkpointing. # # ### Checkpoint callback usage # # Train the model and pass it the `ModelCheckpoint` callback: # + colab={} colab_type="code" id="IFPuhwntH8VH" checkpoint_path = "training_1/cp.ckpt" checkpoint_dir = os.path.dirname(checkpoint_path) # Create checkpoint callback cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1) model = create_model() model.fit(train_images, train_labels, epochs = 10, validation_data = (test_images,test_labels), callbacks = [cp_callback]) # pass callback to training # + [markdown] colab_type="text" id="rlM-sgyJO084" # This creates a single collection of TensorFlow checkpoint files that are updated at the end of each epoch: # + colab={} colab_type="code" id="gXG5FVKFOVQ3" # !ls {checkpoint_dir} # + [markdown] colab_type="text" id="wlRN_f56Pqa9" # Create a new, untrained model. When restoring a model from only weights, you must have a model with the same architecture as the original model. Since it's the same model architecture, we can share weights despite that it's a different *instance* of the model. # # Now rebuild a fresh, untrained model, and evaluate it on the test set. An untrained model will perform at chance levels (~10% accuracy): # + colab={} colab_type="code" id="Fp5gbuiaPqCT" model = create_model() loss, acc = model.evaluate(test_images, test_labels) print("Untrained model, accuracy: {:5.2f}%".format(100*acc)) # + [markdown] colab_type="text" id="1DTKpZssRSo3" # Then load the weights from the checkpoint, and re-evaluate: # + colab={} colab_type="code" id="2IZxbwiRRSD2" model.load_weights(checkpoint_path) loss,acc = model.evaluate(test_images, test_labels) print("Restored model, accuracy: {:5.2f}%".format(100*acc)) # + [markdown] colab_type="text" id="bpAbKkAyVPV8" # ### Checkpoint callback options # # The callback provides several options to give the resulting checkpoints unique names, and adjust the checkpointing frequency. # # Train a new model, and save uniquely named checkpoints once every 5-epochs: # # + colab={} colab_type="code" id="mQF_dlgIVOvq" # include the epoch in the file name. (uses `str.format`) checkpoint_path = "training_2/cp-{epoch:04d}.ckpt" checkpoint_dir = os.path.dirname(checkpoint_path) cp_callback = tf.keras.callbacks.ModelCheckpoint( checkpoint_path, verbose=1, save_weights_only=True, # Save weights, every 5-epochs. period=5) model = create_model() model.save_weights(checkpoint_path.format(epoch=0)) model.fit(train_images, train_labels, epochs = 50, callbacks = [cp_callback], validation_data = (test_images,test_labels), verbose=0) # + [markdown] colab_type="text" id="1zFrKTjjavWI" # Now, look at the resulting checkpoints and choose the latest one: # + colab={} colab_type="code" id="p64q3-V4sXt0" # ! ls {checkpoint_dir} # + colab={} colab_type="code" id="1AN_fnuyR41H" latest = tf.train.latest_checkpoint(checkpoint_dir) latest # + [markdown] colab_type="text" id="Zk2ciGbKg561" # Note: the default tensorflow format only saves the 5 most recent checkpoints. # # To test, reset the model and load the latest checkpoint: # + colab={} colab_type="code" id="3M04jyK-H3QK" model = create_model() model.load_weights(latest) loss, acc = model.evaluate(test_images, test_labels) print("Restored model, accuracy: {:5.2f}%".format(100*acc)) # + [markdown] colab_type="text" id="c2OxsJOTHxia" # ## What are these files? # + [markdown] colab_type="text" id="JtdYhvWnH2ib" # The above code stores the weights to a collection of [checkpoint](https://www.tensorflow.org/guide/saved_model#save_and_restore_variables)-formatted files that contain only the trained weights in a binary format. Checkpoints contain: # * One or more shards that contain your model's weights. # * An index file that indicates which weights are stored in a which shard. # # If you are only training a model on a single machine, you'll have one shard with the suffix: `.data-00000-of-00001` # + [markdown] colab_type="text" id="S_FA-ZvxuXQV" # ## Manually save weights # # Above you saw how to load the weights into a model. # # Manually saving the weights is just as simple, use the `Model.save_weights` method. # + colab={} colab_type="code" id="R7W5plyZ-u9X" # Save the weights model.save_weights('./checkpoints/my_checkpoint') # Restore the weights model = create_model() model.load_weights('./checkpoints/my_checkpoint') loss,acc = model.evaluate(test_images, test_labels) print("Restored model, accuracy: {:5.2f}%".format(100*acc)) # + [markdown] colab_type="text" id="kOGlxPRBEvV1" # ## Save the entire model # # The model and optimizer can be saved to a file that contains both their state (weights and variables), and the model configuration. This allows you to export a model so it can be used without access to the original python code. Since the optimizer-state is recovered you can even resume training from exactly where you left off. # # Saving a fully-functional model is very useful—you can load them in TensorFlow.js ([HDF5](https://js.tensorflow.org/tutorials/import-keras.html), [Saved Model](https://js.tensorflow.org/tutorials/import-saved-model.html)) and then train and run them in web browsers, or convert them to run on mobile devices using TensorFlow Lite ([HDF5](https://www.tensorflow.org/lite/convert/python_api#exporting_a_tfkeras_file_), [Saved Model](https://www.tensorflow.org/lite/convert/python_api#exporting_a_savedmodel_)) # + [markdown] colab_type="text" id="SkGwf-50zLNn" # ### As an HDF5 file # # Keras provides a basic save format using the [HDF5](https://en.wikipedia.org/wiki/Hierarchical_Data_Format) standard. For our purposes, the saved model can be treated as a single binary blob. # + colab={} colab_type="code" id="m2dkmJVCGUia" model = create_model() model.compile(optimizer='adam', loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy']) model.fit(train_images, train_labels, epochs=5) # Save entire model to a HDF5 file model.save('my_model.h5') # + [markdown] colab_type="text" id="GWmttMOqS68S" # Now recreate the model from that file: # + colab={} colab_type="code" id="5NDMO_7kS6Do" # Recreate the exact same model, including weights and optimizer. new_model = keras.models.load_model('my_model.h5') new_model.summary() # + [markdown] colab_type="text" id="JXQpbTicTBwt" # Check its accuracy: # + colab={} colab_type="code" id="jwEaj9DnTCVA" loss, acc = new_model.evaluate(test_images, test_labels) print("Restored model, accuracy: {:5.2f}%".format(100*acc)) # + [markdown] colab_type="text" id="dGXqd4wWJl8O" # This technique saves everything: # # * The weight values # * The model's configuration(architecture) # * The optimizer configuration # + [markdown] colab_type="text" id="kPyhgcoVzqUB" # ### As a `saved_model` # + [markdown] colab_type="text" id="LtcN4VIb7JkK" # Caution: This method of saving a `tf.keras` model is experimental and may change in future versions. # + [markdown] colab_type="text" id="DSWiSB0Q8c46" # Build a fresh model: # + colab={} colab_type="code" id="sI1YvCDFzpl3" model = create_model() model.fit(train_images, train_labels, epochs=5) # + [markdown] colab_type="text" id="iUvT_3qE8hV5" # Create a `saved_model`: # + colab={} colab_type="code" id="sq8fPglI1RWA" saved_model_path = tf.keras.experimental.export(model, "./saved_models") # + [markdown] colab_type="text" id="MjpmyPfh8-1n" # Saved models are placed in a time-stamped directory: # + colab={} colab_type="code" id="ZtOvxA7V0iTv" # !ls saved_models/ # + [markdown] colab_type="text" id="B7qfpvpY9HCe" # Reload a fresh keras model from the saved model. # + colab={} colab_type="code" id="0YofwHdN0pxa" new_model = tf.keras.experimental.load_from_saved_model(saved_model_path) new_model.summary() # + [markdown] colab_type="text" id="uWwgNaz19TH2" # Run the restored model. # + colab={} colab_type="code" id="Yh5Mu0yOgE5J" model.predict(test_images).shape # + colab={} colab_type="code" id="Pc9e6G6w1AWG" # The re-loaded model needs to be re-compiled. new_model.compile(optimizer=model.optimizer, # keep the optimizer that was loaded loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy']) loss, acc = new_model.evaluate(test_images, test_labels) print("Restored model, accuracy: {:5.2f}%".format(100*acc)) # + [markdown] colab_type="text" id="eUYTzSz5VxL2" # ## What's Next # # That was a quick guide to saving and loading in with `tf.keras`. # # * The [tf.keras guide](https://www.tensorflow.org/guide/keras) shows more about saving and loading models with `tf.keras`. # # * See [Saving in eager](https://www.tensorflow.org/guide/eager#object_based_saving) for saving during eager execution. # # * The [Save and Restore](https://www.tensorflow.org/guide/saved_model) guide has low-level details about TensorFlow saving.
site/en/r2/tutorials/keras/save_and_restore_models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="MRXgvNO4GGTK" # ## *Multi-Layer Perceptron* (Perceptron de Muitas Camadas) # + [markdown] colab_type="text" id="C25cQbJy48P8" # # Agora vamos resolver um problema prático e bastante desafiador, para verificar o real poder das redes neurais artificiais. Como de costume, vamos primeiro investigar a base de dados que utilizaremos. Dê uma olhada: # # https://github.com/Natalnet/GCiD/raw/master/Codes/Data/curitibadiario.csv # + colab={} colab_type="code" id="GA54QwgTGZpu" import pandas as pd import matplotlib.pyplot as plt #Leitura de dados dataSet = pd.read_csv("https://github.com/Natalnet/GCiD/raw/master/Codes/Data/curitibadiario.csv", skiprows = 16, sep=';') # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="Mz2WiP7s5fCE" outputId="3a136153-6fb1-4f3b-9b68-ac8d78c38d91" #Visualizando cabeçalho dos dados dataSet.head() # + [markdown] colab_type="text" id="L80q1OJ1-PEj" # # Velho problema conhecido, que já sabemos como resolver. Vamos simplificar e juntar todos os passos (já estudados) em uma função, que vai retornar nossa base de dados bem mais organizada. # + colab={} colab_type="code" id="suOCgeY8-Zuk" def OrganizarDados(dataSet): #Transformando data em variável do tipo datetime dataSet["Data"] = pd.to_datetime(dataSet["Data"], format = "%d/%m/%Y") #Atribuindo "Data" como índice para a base de dados dataSet = dataSet.set_index("Data") #Separando os dados em dois dataframes, um para as 00:00 h e outro para as 12:00 h dataSet00, dataSet12 = dataSet[dataSet["Hora"] == 0], dataSet[dataSet["Hora"] == 1200] #Descartando as colunas "Hora", "Estacao" e "Unnamed: 11" dataSet00, dataSet12 = dataSet00.drop(columns = ["Hora", "Estacao", "Unnamed: 11"]), dataSet12.drop(columns = ["Hora", "Estacao", "Unnamed: 11"]) #Eliminando colunas completas por "NaN" em cada uma das bases de dados dataSet00 = dataSet00.drop(columns = ["Precipitacao","TempMinima"]) dataSet12 = dataSet12.drop(columns = ["TempMaxima","Insolacao","Evaporacao Piche","Temp Comp Media","Umidade Relativa Media","Velocidade do Vento Media"]) #Criando o intervalo completo de tempo de 01-01-2005 à 31-12-2017 dataInicial = '2005-01-01' dataFinal = '2017-12-31' tempo = pd.date_range(dataInicial, dataFinal) #Atribuindo este intervalo de tempo à um dataSet provisório dataSetProv = pd.DataFrame() dataSetProv["Data"] = tempo #Atribuindo o índice para o dataSet provisório como sendo a coluna de datas dataSetProv = dataSetProv.set_index("Data") #Mesclando o dataSet provisório como sendo o resultado da junção dos dataSet00 e dataSet12 dataSetProv = dataSetProv.join(dataSet00).join(dataSet12) #Tornando o dataSet como sendo o dataSet provisório dataSet = dataSetProv return dataSet # + colab={} colab_type="code" id="bKoRgM0c-rjf" #Aplicando a função ao dataSet dataSet = OrganizarDados(dataSet) # + [markdown] colab_type="text" id="Ut10fr2I6NWW" # # Nosso objetivo hoje é prever temperatura máxima para um determinado dia, dada uma determinada combinação de outras variáveis. Será que conseguimos? # + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="DhT2TpIg6DY_" outputId="e2fd298f-8e5e-4ef7-a9e4-8ce40f318a41" #Visualizando relação dos dados plt.scatter(dataSet.index, dataSet["TempMaxima"]) plt.title("Distribuição de temperatura máxima ao longo dos últimos anos") plt.xlabel("Data") plt.ylabel("Temperatura máxima diária") plt.show() # + [markdown] colab_type="text" id="NnUnyVf160QZ" # # Não parece nada simples, hein? Desafio você a tentar modelar uma função polinomial que descreva o comportamento desse problema. O último ser humano que tentou realizar essa proeza foi encontrado meses depois congelado em uma caverna no Alaska (fonte: FEI - Fatos que Eu Inventei) # + colab={} colab_type="code" id="7ASFRyV7-xxF" #Tornando a informação de mês uma variável do dataSet dataSet["Mes"] = dataSet.index.month # + colab={} colab_type="code" id="7-eMKEw0-0us" #Eliminando eventos inconsistentes da base de dados dataSet = dataSet.dropna() # + colab={} colab_type="code" id="iTMULiEu-3Ds" #Resentando o índice do dataSet dataSet = dataSet.reset_index(drop = True) # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="n1HddWA3-5p6" outputId="7ce4e5d0-e06c-4f35-ee05-44fb3330b3eb" #Visualizando cabeçalho dos dados dataSet.head() # + colab={"base_uri": "https://localhost:8080/", "height": 297} colab_type="code" id="NFGaK5bh_AXU" outputId="2c0be85e-ed9c-49d9-aef0-4a3be7cc3a51" #Visualizando descrição resumida dos dados dataSet.describe() # + [markdown] colab_type="text" id="q3bpc4eB_Js-" # # Agora vamos separar o que queremos prever (TempMaxima) do restante da base de dados (dataSet) # + colab={} colab_type="code" id="D7ZEpnrw7Jc5" #Separando "TempMaxima" de dataSet tempMax = dataSet["TempMaxima"] dataSet = dataSet.drop(columns = ["TempMaxima"]) # + colab={} colab_type="code" id="ScYBJzXH6zY8" from sklearn.model_selection import train_test_split #Separando os conjuntos de treino e teste com os dados embaralhados, sendo 80% para treino e 20% para teste X_train, X_test, y_train, y_test = train_test_split(dataSet, tempMax, test_size = 0.2, random_state = 9) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="q9h-ds5RDtJO" outputId="5d051a83-4e48-4ce7-c8ba-958694ec9d8b" #Visualizando as dimensões da nossa base de treino (isso será útil mais na frente) print(X_train.shape) # + colab={} colab_type="code" id="Dq5QW74EBvBa" from sklearn.preprocessing import MinMaxScaler #Aplicando uma transformação que lineariza os dados entre -1 e 1 mms = MinMaxScaler(feature_range=(-1, 1)) X_train = mms.fit_transform(X_train) X_test = mms.transform(X_test) # + [markdown] colab_type="text" id="rD4BBoIxCcku" # # Agora chegamos no momento esperado: Usar a MLP # + colab={} colab_type="code" id="BXpbv8XKCbwM" from keras.models import Sequential #Declarando a variável "mlp" como um objeto do tipo "Sequential" (referente às redes de múltiplas camadas) mlp = Sequential() # + colab={} colab_type="code" id="1LJ3GyNCDS6d" from keras.layers import Dense #Vamos agora "brincar" de adicionar as camadas #Adicionando a camada de entrada mlp.add(Dense(activation = 'tanh', input_dim = 8, units = 8, kernel_initializer = 'random_uniform', bias_initializer='zeros')) #Adicionando a camada escondida mlp.add(Dense(activation = 'tanh', units = 8, kernel_initializer = 'random_uniform', bias_initializer='zeros')) #Adicionando a camada de saída mlp.add(Dense(units = 1, kernel_initializer = 'random_uniform', bias_initializer='zeros')) # Compilando o modelo mlp.compile(loss='mean_squared_error', optimizer = 'adam', metrics= ['mae']) # + [markdown] colab_type="text" id="rwCWmFVMFDRC" # # Achou um trabalho árduo? Bom, de todo modo, já chegou ao fim. Agora vamos treinar! # + colab={"base_uri": "https://localhost:8080/", "height": 13634} colab_type="code" id="S57RkwBVFK8J" outputId="8cc776a1-3135-4c41-97a2-237428d8239f" #A hora da verdade mlp.fit(x = X_train, y = y_train, epochs = 200, verbose=1) # + colab={} colab_type="code" id="PNL9R7BCHT-z" #Prevendo os valores de temperatura para o conjunto de teste y_pred = mlp.predict(X_test) # + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="VK7jL8RSE3eV" outputId="23ccb5fa-6462-4288-9a49-f484da313d7a" #Vamos primeiro validar o aprendizado graficamente plt.scatter(y_pred, y_test) plt.title("Temperatura máxima estimada para o conjunto de teste") plt.xlabel("Temperatura estimada") plt.ylabel("Temperatura real") plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="UihUGSZbDDmK" outputId="b33c2cfa-42d5-4d67-9f12-ceca10422147" from sklearn.metrics import mean_absolute_error #Erro absoluto médio entre a temperatura máxima prevista e a real print(mean_absolute_error(y_pred, y_test)) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="nNB6thyrHPxR" outputId="c9be2655-daf3-4ba5-a79e-0327d0ff399c" from sklearn.metrics import mean_squared_error #Erro quadrático médio entre a temperatura máxima prevista e a real print(mean_squared_error(y_pred, y_test)) # - # Um Jupyter-Notebook escrito por <b> <NAME> </b> sob a orientação do Professor <b> Orivaldo Vieira de Santana Junior </b> # <br> Contatos: # <br> Links para Github # <br> Gilvandro - https://github.com/gilvandrocesardemedeiros # <br> Orivaldo - https://github.com/orivaldosantana # <br> Link para Linkedin # <br> Gilvandro - https://www.linkedin.com/in/gilvandromedeiros/
Day 3/01 - Multilayer Perceptron [MLP].ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.4 64-bit # name: python3 # --- # # Data Preprocessing # ## Importing Libraries # + #importing Libraries import pandas as pd import os import sys #import local libraries #Adding scripts path sys.path.append(os.path.abspath(os.path.join('..'))) #importing dvc_data_loader script # from scripts.dvc_data_loader import * from scripts.data_information import DataInfo from scripts.data_loader import load_df_from_csv from scripts.data_manipulation import DataManipulator from scripts.data_cleaner import DataCleaner from scripts.utlity_functions import convert_to_month_name from scripts.grapher import * from sklearn import preprocessing # - pd.set_option('max_column', None) # ## Loading Data combined_df = load_df_from_csv('../data/train.csv') combined_df.info() combined_df.head(5) combined_df.drop(['Unnamed: 0'], axis=1, inplace=True) # Saving Holiday dates for later prediciton holiday_days = combined_df[['Month', 'Day', 'StateHoliday']] holiday_days = holiday_days[holiday_days['StateHoliday'] != 0] holiday_days = holiday_days[holiday_days['StateHoliday'] != '0'] holiday_days = holiday_days.drop_duplicates() holiday_days.drop('StateHoliday', axis=1, inplace=True) holiday_days.to_csv('../models/holiday_reference.csv', index=False) # ## Handling None and Empty Values info = DataInfo(combined_df) info.get_column_based_missing_percentage() manipulator = DataManipulator(combined_df) # Fill missing numeric values manipulator.fill_columns_with_max(info.get_numeric_columns()) # Fill non-numeric values (categorical values) manipulator.fill_columns_with_most_frequent(info.get_object_columns()) info.get_columns_with_missing_values() # ## Adding Additional Extracted Data # ### WeekDays manipulator.add_week_day('DayOfWeek') # ### WeekEnds # # > Doesnt Add additional information # combined_df.add_week_ends(combined_df, 'DayOfWeek') # ### Number of days to Holidays # manipulator.add_number_of_days_to_holiday('StateHoliday') # ### Number of days after Holiday # manipulator.add_number_of_days_after_holiday('StateHoliday') # ### Beginning of month, mid month and ending of month # manipulator.add_month_timing('Day') # ### More Features # ### Season (Winter, April, ...) manipulator.add_season('Month') ## Added dataframe status combined_df.info() # ## Drop Columns # Store Date Customers combined_df.drop(['Store','Date','Customers'],axis=1,inplace=True) ## Dropped dataframe status combined_df.info() # # Save UnLabeled Data unlabeled_data = DataCleaner(combined_df) unlabeled_data.save_clean_data('../data/train.csv') # ## Encoding Data to Numbers # + # Label Object type data columns (StoreType,Assortment,StateHoliday,Season) manipulator.label_columns(['Season', 'StateHoliday', 'StoreType', 'Assortment']) info.get_object_columns() # - combined_df['PromoInterval'].value_counts() manipulator.label_columns(['PromoInterval']) info.get_object_columns() ## Label Encoded dataframe status combined_df.info() # ## Scaling Data # # Using StandardScaler to standardize the all columns scale_list = combined_df.columns.to_list() # Scale manipulator.standardize_columns(scale_list) info.get_min_max_of_dataframe_columns() # # Save Clean Data cleaner = DataCleaner(info.df) cleaner.optimize_df() cleaner.save_clean_data('../data/train.csv')
notebooks/data_preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Import Dependencies import time import pymongo import requests from bs4 import BeautifulSoup as bs import pandas as pd from splinter import Browser from webdriver_manager.chrome import ChromeDriverManager # + # from splinter import Browser executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless = False) # - # Initialize PyMongo to work with MongoDBs conn = 'mongodb://localhost:27017' client = pymongo.MongoClient(conn) # Define database and collection db = client.MissionToMars_db collection = db.marsdata print(collection) # + # Visit the URL for mars news url = 'https://redplanetscience.com/' browser.visit(url) # pull html into Beautiful Soup parser html=browser.html soup=bs(html, 'html.parser') # - print(soup) # results are returned as an iterable list news_elements = soup.find_all('div', class_='list_text') news_title = news_elements[0].find('div', class_ = 'content_title').text news_p = news_elements[0].find('div', class_ = 'article_teaser_body').text news_title, news_p browser.quit() # + # Visit the URL for futured images url = 'https://spaceimages-mars.com/' browser.visit(url) # pull html into Beautiful Soup parser html=browser.html soup=bs(html, 'html.parser') # - print(soup) # find the url of the fuatured images image = soup.find('img', class_ = 'headerimage fade-in')['src'] featured_imge_url = url + image print(f'The featured image URL: {featured_imge_url}') # Visit the Mars Facts webpage mars_facts='https://space-facts.com/mars/' mars_fact_table=pd.read_html(mars_facts) # + df = mars_fact_table[0] # Create Data Frame df.columns = ["Description", "Value"] # Set index to Description df.set_index("Description", inplace=True) # Print Data Frame df # + # Save html code to folder Assets html_table = df.to_html() # Strip unwanted newlines to clean up the table html_table.replace("\n", '') # Save html code df.to_html("mars_facts_data.html") html_table # + # obtain high resolution images for each of Mar's hemispheres url = 'https://marshemispheres.com/' browser.visit(url) # pull html into Beautiful Soup parser html=browser.html soup=bs(html, 'html.parser') # - print(soup) # + # create a list for disctionay of url img_title_list = [] # loop thru the list to retrive High resolution image url and title high_reso_image = soup.find_all('div', class_ = 'description') for image in high_reso_image: # title of the image image_title = image.find('h3').get_text() # find image url img_url = image.find('a', class_ = 'itemLink product-item')['href'] hemis_url = url + img_url # now find the high resolution image from 'hemis_url' browser.visit(hemis_url) html = browser.html soup = bs(html,'html.parser') # get image src img_src = soup.find('img', class_='wide-image')['src'] # create image link highresol_imgurl = url + img_src # create disctionary of titles and high resolution url hemisphere_image_url = [{ 'title': image_title, 'image_url': highresol_imgurl }] # append titles and images to the list img_title_list += hemisphere_image_url # print(img_title_list) for high_res_image in img_title_list: print(high_res_image['image_url'], high_res_image['title'] ) # - # create disctionary for all the data from above mars_information = { "news_title": news_title, "news_p": news_p, "featured_imge_url": featured_imge_url, "facts_table": html_table, "hemispheres": img_title_list } mars_information # + # import data to MongoDB from pymongo import MongoClient client = MongoClient() client client = MongoClient(host="localhost", port=27017) db = client.MissionToMars_DB collection = db.marsdata result = collection.insert_one(mars_information) result # -
scrape_mars.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PySpark # language: python # name: pysparkkernel # --- # # Task 4 (Guided Exercise) # This notebook is part of Milestone 3, task 3 and is a guided exercise. I have put guidelines and helpful links (as comments) along with this notebook to take you through this. # # In this exercise you will be using Spark's MLlib. The idea is to tune some hyperparameters of a Random Forest to find an optimum model. Once we know the optimum settings, we'll train a Random Forest in sklearn (task 4)and save it with joblib (task 5) (so that we can use it next week to deploy). # # Here consider MLlib as another python package that you are using, like the scikit-learn. You will be seeing many scikit-learn similar classes and methods available in MLlib for various ML related tasks, you might also notice that some of them are not yet implimented in MLlib. What you write using pyspark package will be using the spark engine to run your code, and hence all the benefits of distributed computing what we discussed in class. # # NOTE: Here whenever you use spark makes sure that you refer to the right documentation based on the version what you will be using. [Here](https://spark.apache.org/docs/) you can select the version of the spark and go to the correct documentation. In our case we are using spark 3.1.2, and here is the link to spark documetation that you can refer to, # - [MLlib Documentation](https://spark.apache.org/docs/3.1.2/ml-guide.html) # - [MLlib API Reference](https://spark.apache.org/docs/3.1.2/api/python/reference/pyspark.ml.html) # # You may notice that there are RDD-based API and DataFrame-based (Main Guide) API available in the documentation. You want to focus on DataFrame based API as no one these days use RDD based API. We will discuss the difference in class. # # Before you start this notebook make sure that you are using EMR jupyterHub and the kernal that you selected is PySpark. # ## Import necessary libraries from pyspark.ml import Pipeline from pyspark.context import SparkContext from pyspark.sql.session import SparkSession from pyspark.ml.feature import VectorAssembler, UnivariateFeatureSelector from pyspark.ml.evaluation import RegressionEvaluator from pyspark.ml.regression import RandomForestRegressor as sparkRFR from pyspark.ml.tuning import CrossValidator, ParamGridBuilder import pandas as pd # ## Read the data # To start with; read 100 data points for development purpose. Once your code is ready then try on the whole dataset. ## Depending on the permissions that you provided to your bucket you might need to provide your aws credentials ## to read from the bucket, if so provide with your credentials and pass as storage_options=aws_credentials # aws_credentials = {"key": "","secret": "","token":""} ## here 100 data points for testing the code pandas_df = pd.read_csv("s3://mds-s3-25/output/ml_data_SYD.csv", index_col=0, parse_dates=True).iloc[:100].dropna() # pandas_df = pd.read_csv("s3://xxxx/output/ml_data_SYD.csv", index_col=0, parse_dates=True).dropna() feature_cols = list(pandas_df.drop(columns="observed_rainfall").columns) # ## Preparing dataset for ML # Load dataframe and coerce features into a single column called "Features" # This is a requirement of MLlib # Here we are converting your pandas dataframe to a spark dataframe, # Here "spark" is a spark session I will discuss this in our Wed class. # It is automatically created for you in this notebook. # read more here https://blog.knoldus.com/spark-createdataframe-vs-todf/ training = spark.createDataFrame(pandas_df) assembler = VectorAssembler(inputCols=feature_cols, outputCol="Features") training = assembler.transform(training).select("Features", "observed_rainfall") # ## Find best hyperparameter settings # Official Documentation of MLlib, Random forest regression [here](http://spark.apache.org/docs/3.0.1/ml-classification-regression.html#random-forest-regression). # # Here we will be mainly using following classes and methods; # # - [RandomForestRegressor](https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.regression.RandomForestRegressor.html) # - [ParamGridBuilder](https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.tuning.ParamGridBuilder.html) # - addGrid # - build # - [CrossValidator](https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.tuning.CrossValidator.html) # - fit # # Use these parameters for coming up with ideal parameters, you could try more parameters, but make sure you have enough power to do it. But you are required to try only following parameters. This will take around 15 min on entire dataset.... # # - Use numTrees as [10, 50,100] # - maxDepth as [5, 10] # - bootstrap as [False, True] # - In the CrossValidator use evaluator to be RegressionEvaluator(labelCol="Observed") # # ***Additional reference:*** You can refer to [here](https://www.sparkitecture.io/machine-learning/regression/random-forest) and [here](https://www.silect.is/blog/random-forest-models-in-spark-ml/). # Some additional reading [here](https://projector-video-pdf-converter.datacamp.com/14989/chapter4.pdf) # + ##Once you finish testing the model on 100 data points, then load entire dataset and run , this could take ~15 min. ## write code here. rf = sparkRFR(featuresCol='Features', labelCol='observed_rainfall') grid=ParamGridBuilder()\ .addGrid(rf.numTrees, [10, 50,100]) \ .addGrid(rf.maxDepth, [5, 10]) \ .addGrid(rf.bootstrap, [False, True]) \ .build() evaluator = RegressionEvaluator(labelCol="observed_rainfall") cv = CrossValidator(estimator=rf, estimatorParamMaps=grid, evaluator=evaluator, parallelism=2) cvModel = cv.fit(training) # - # Print run info print("\nBest model") print("==========") print(f"\nCV Score: {min(cvModel.avgMetrics):.2f}") print(f"numTrees: {cvModel.bestModel.getNumTrees}") print(f"MaxDepth: {cvModel.bestModel.getMaxDepth()}") print(f"Bootstrap: {cvModel.bestModel.getBootstrap()}")
milestone3/Milestone3-Task4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pickle import numpy as np from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split, cross_val_score from sklearn.metrics import classification_report, confusion_matrix from sklearn.model_selection import GridSearchCV from sklearn.model_selection import StratifiedShuffleSplit from sklearn import metrics from sklearn.ensemble import RandomForestClassifier #試取資料 file = open("/Users/peggy/Documents/109-2(2-2)/machine_learning/MLGame/games/snake/log/snake.pickle", "rb") data = pickle.load(file) file.close() type(data['ml']) # - game_info = data["ml"]["scene_info"] game_command = data["ml"]["command"] print(game_info) print(game_command) # + g = game_info[1] feature = np.array([g["snake_head"][0], g["snake_head"][1]]) print(feature) print(game_command[1]) if game_command[1] == "UP": game_command[1] = 0 elif game_command[1] == "DOWN": game_command[1] = 1 elif game_command[1] == "LEFT": game_command[1] = 2 elif game_command[1] == "RIGHT": game_command[1] = 3 else: game_command[1] = 4 # + for i in range(2, len(game_info) - 1): g = game_info[i] feature = np.vstack((feature, [g["snake_head"][0], g["snake_head"][1]])) if game_command[i] == "UP": game_command[i] = 0 elif game_command[i] == "DOWN": game_command[i] = 1 elif game_command[i] == "LEFT": game_command[i] = 2 elif game_command[i] == "RIGHT": game_command[i] = 3 else: game_command[i] = 4 answer = np.array(game_command[1:-1]) print(feature) print(feature.shape) print(answer) print(answer.shape) # - # # MLPClassifier # + from sklearn.neural_network import MLPClassifier from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(feature, answer, test_size=0.3, random_state = 7) MLPC = MLPClassifier(random_state=1, max_iter=300).fit(x_train, y_train) MLPC.predict_proba(x_test) MLPC.predict(x_test) MLPC.score(x_test, y_test) file = open("model_MLPC.pickle", "wb") pickle.dump(MLPC, file) file.close() # - print("Accuracy score (validation): {0:.3f}".format(MLPC.score(x_test, y_test))) print("Confusion Matrix for Raandom Forests:") print(confusion_matrix(y_test, MLPC.predict(x_test))) print() print("Classification Report for Random Forests") print(classification_report(y_test, MLPC.predict(x_test)))
MLGame/games/snake/ml/others/train.py_ian.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/victoria2012/test_visualization/blob/master/pandas_plot.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="BeBHwf1acryE" import pandas as pd # + colab={"base_uri": "https://localhost:8080/", "height": 328} id="I8rfcQm6c8vb" outputId="307328e8-583b-4730-e2b9-0429912644fa" df = pd.read_excel('./남북한발전전력량.xlsx') df # + colab={"base_uri": "https://localhost:8080/", "height": 111} id="QYngmofHdITD" outputId="9581db0f-1615-4d7a-e3b7-edc91919dce3" df.iloc[0:2,0:2] # + id="M-x6pJSLfEP5" df_ns = df.iloc[[0,5],2:] # + colab={"base_uri": "https://localhost:8080/", "height": 142} id="h2hQ7Ct8gRjr" outputId="cd8ca287-03df-4202-898e-5a26a346ef18" df.loc['0':'2','전력량 (억㎾h)':'발전 전력별'] # + colab={"base_uri": "https://localhost:8080/", "height": 111} id="sIpbAFqMhzWp" outputId="80e1e8f7-0be3-4c3b-9294-6b545f256bfa" df.loc[[0,5],'1990':] # + colab={"base_uri": "https://localhost:8080/", "height": 111} id="0nWo7cU2isx3" outputId="ac73e871-175c-4160-a21f-2ec20a0acbbf" df_ns # + colab={"base_uri": "https://localhost:8080/", "height": 111} id="4flwedP8jvmw" outputId="b31694a4-466a-4fba-cda3-ae11bbd023fa" df_ns.index = ['South','North'] df_ns # + colab={"base_uri": "https://localhost:8080/"} id="Q5TfDKXJkD5J" outputId="f06f8679-d292-490e-c927-557e07941df9" type(df_ns) # + colab={"base_uri": "https://localhost:8080/", "height": 479} id="1u4IyqKAkmoo" outputId="af5a1dd9-fbe8-4c28-c2d4-fddbdda27580" df_ns.plot() # + colab={"base_uri": "https://localhost:8080/", "height": 886} id="SGWoOi8HktPp" outputId="88b5ea87-3b89-4813-b225-8732316d75e1" df_ns = df_ns.transpose() df_ns # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="pc7J5CTJ3u09" outputId="ea1b559d-5095-4670-dbc5-a85289f758a6" df_ns.plot() # + id="rXaNbuV94IZt"
pandas_plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import seaborn as sns import matplotlib as plot from matplotlib import style from matplotlib import pyplot as plt # %matplotlib inline from IPython.display import display data=pd.read_excel('PL_2018-2019.xlsx') data.shape with pd.option_context('display.max_rows',10): display(data) plt_date_grp = data.groupby('PLANT_DATE') (plt_date_grp[['PLOT_NO','PLANT_AREA','YIELD']].sum()).head(15) data.YIELD.plot() data.SUPPLIED_TONNS.plot() data.YIELD.plot() crops = data['CROP_TYPE_NAME'] yld=data['YIELD'] supplied=data['SUPPLIED_TONNS'] disposal=data['DISPOSAL_TONNS'] area=data['PLANT_AREA'] date=data['PLANT_DATE'] sns.scatterplot(supplied,yld); sns.scatterplot(supplied, yld, hue=crops, s=50); data.describe() yld np.arange(0,200 , 20) plt.hist(yld,bins=np.arange(0,200 , 20)); plt.title("YEILD IN TONNS") plt.hist(area); plt.title("AREA USED FOR PLANTING") PL=data[data.CROP_TYPE_NAME == 'PL'] R1=data[data.CROP_TYPE_NAME == 'R1'] R2=data[data.CROP_TYPE_NAME == 'R2'] plt.hist(PL.YIELD,alpha =0.4,bins=np.arange(0,200 , 20)); plt.hist(R1.YIELD,alpha =0.4,bins=np.arange(0,200 , 20)); plt.hist(R2.YIELD,alpha =0.4,bins=np.arange(0,200 , 20)); plt.legend(['PL','R1','R2']) plt.hist(PL.PLANT_AREA,alpha =0.4,bins=np.arange(0,20 , 2)); plt.hist(R1.PLANT_AREA,alpha =0.4,bins=np.arange(0,20 , 2)); plt.hist(R2.PLANT_AREA,alpha =0.4,bins=np.arange(0,20 , 2)); plt.legend(['PL','R1','R2']) plt.hist([PL.PLANT_AREA, R1.PLANT_AREA, R2.PLANT_AREA], bins=np.arange(0,20 , 2), stacked=True); plt.legend(['PL','R1','R2']) plt.bar(yld,area) a = data["CROP_TYPE_NAME"] b = data["YIELD"] plt.xlabel("CROP TYPE") plt.ylabel("Yield") plt.bar(a, b) c_name = data.CROP_TYPE_NAME.unique() gsm = [] for i in range(0, len(c_name)): gsm.append((len(data[(data['CROP_TYPE_NAME'] == c_name[i]) & (data['DIVERSION_CENTER_NAME'] == 'GSM_HARVEST')].CROP_TYPE_NAME) / len( data[data['CROP_TYPE_NAME'] == c_name[i]].CROP_TYPE_NAME)) * 100) plt.bar(c_name, gsm) plt.xlabel('CROP_TYPE') plt.ylabel('CORPS TAKEN BY GSM_HARVEST IN %') plt.show() ndata=data[['YIELD','CLUSTER_NAME','DISPOSAL_NAME']].copy() # + fig,axes = plt.subplots(3,3 , figsize=(16,16)) plt.tight_layout(pad=3) axes[1,0].set_title('Yield VS Supplied Tonnes') sns.scatterplot(supplied, yld, hue=crops, s=50, ax=axes[1,0]); axes[1,1].hist([PL.PLANT_AREA, R1.PLANT_AREA, R2.PLANT_AREA], bins=np.arange(0,20 , 2), stacked=True); axes[1,1].legend(['PL','R1','R2']) axes[1,1].set_title('Crop type VS Area') axes[1,2].hist([PL.YIELD,R1.YIELD,R2.YIELD],bins=np.arange(0,200 , 20),stacked=True) axes[1,2].legend(['PL','R1','R2']) axes[1,2].set_title('Crop type VS Yield') axes[0,1].hist(PL.PLANT_AREA,alpha =0.4,bins=np.arange(0,20 , 2)); axes[0,1].hist(R1.PLANT_AREA,alpha =0.4,bins=np.arange(0,20 , 2)); axes[0,1].hist(R2.PLANT_AREA,alpha =0.4,bins=np.arange(0,20 , 2)); axes[0,1].legend(['PL','R1','R2']) axes[0,1].set_title('Crop type VS Area') axes[0,2].bar(c_name, gsm) axes[0,2].set_xlabel('CROP_TYPE') axes[0,2].set_ylabel('CORPS TAKEN BY GSM_HARVEST IN %') axes[0,2].set_title('Crop type VS GSM harvest') axes[0,0].bar(a,b) axes[0,0].set_xlabel("CROP TYPE") axes[0,0].set_ylabel("Yield") axes[0,0].set_title("Crop Type VS Yield") axes[2,0].hist(area); axes[2,0].set_title("AREA USED FOR PLANTING") axes[2,1].hist(yld,bins=np.arange(0,200 , 20)); axes[2,1].set_title("YEILD IN TONNS") axes[2,2].hist(PL.YIELD,alpha =0.4,bins=np.arange(0,200 , 20)); axes[2,2].hist(R1.YIELD,alpha =0.4,bins=np.arange(0,200 , 20)); axes[2,2].hist(R2.YIELD,alpha =0.4,bins=np.arange(0,200 , 20)); axes[2,2].legend(['PL','R1','R2']) axes[2,2].set_title("Crop type VS Yield") # - data["CV_NAME"].value_counts()
miniprojectsem4/.ipynb_checkpoints/prac-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import h5py import json import sys sys.path.append('F:\Linux') import illustris_python as il from datetime import datetime import matplotlib.pyplot as plt from matplotlib.collections import LineCollection from matplotlib.colors import BoundaryNorm, ListedColormap def xyline(x, y): points = np.array([x, y]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) return segments # def LoadMergHist(simu, subhaloID): # ''' # return subhalo's main progenitor and merger history with snapshot # ''' # if simu == 'TNG': # ldir = '/Raid0/zhouzb/merg_data/tng_DiskMerTree/%d.json' % subhaloID # else: # ldir = '/Raid0/zhouzb/merg_data/il1_DiskMerTree/%d.json' % subhaloID # with open(ldir) as f: # data = json.load(f) # Main = np.array(data['Main']) # return dict(zip(Main[:, 0], Main[:, 1])), np.array(data['Mergers']) def LoadMergHist(simu, subhaloID): ''' return subhalo's main progenitor and merger history with snapshot ''' if simu == 'TNG': ldir = 'f:/Linux/localRUN/tng_DiskMerTree/%d.json' % subhaloID else: ldir = 'f:/Linux/localRUN/il1_DiskMerTree/%d.json' % subhaloID with open(ldir) as f: data = json.load(f) Main = np.array(data['Main']) return dict(zip(Main[:, 0], Main[:, 1])), np.array(data['Mergers']) # - def HistValAndBin(nums, bins, more=0, mask=0): if mask == 1: reMask = [] val = [] tmp = nums[nums < bins[1]] if mask == 1: reMask.append(nums < bins[1]) val.append(len(tmp)) for i in range(1,len(bins)-1): tmp = nums[(nums > bins[i]) & (nums <= bins[i+1])] val.append(len(tmp)) if mask == 1: reMask.append((nums > bins[i]) & (nums <= bins[i+1])) if more == 1: tmp = nums[nums > bins[-1]] val.append(len(tmp)) if mask == 1: reMask.append(nums > bins[-1]) if mask == 0: return np.array(val) else: return np.array(val), np.array(reMask) # + ''' snap_135 z=0 snap_127 z=0.1 snap_120 z=0.2 snap_113 z=0.31 snap_108 z=0.4 snap_103 z=0.5 snap_85 z=1.0 snap_75 z=1.53 snap_68 z=2.0 ''' il1_snapshot = [135, 127, 120, 113, 103, 108, 95, 85, 75, 68] tng_snapshot = [99, 91, 84, 78, 72, 67, 59, 50, 40, 33] Redshift = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.7, 1.0, 1.5, 2.0] il1_GasFraction = {} for snap in il1_snapshot: mas = il.func.loadSubhalos('il1', snap, 'SubhaloMassInHalfRadType') Gf = mas[:, 0] / (mas[:, 4] + mas[:, 0]) Gf[np.isnan(Gf)] = 0 il1_GasFraction[snap] = Gf tng_GasFraction = {} for snap in tng_snapshot: mas = il.func.loadSubhalos('TNG', snap, 'SubhaloMassInHalfRadType') Gf = mas[:, 0] / (mas[:, 4] + mas[:, 0]) Gf[np.isnan(Gf)] = 0 tng_GasFraction[snap] = Gf tng2il1 = np.load('F:/Linux/localRUN/Match/tng2il1_allsub.npy',allow_pickle=1).item() A2list = np.load('f:/Linux/localRUN/il1_A2withRedshift.npy',allow_pickle=1).item() bar2bar = np.load('F:/npy/bar2bar.npy',allow_pickle=1).item() bar2disk = np.load('f:/npy/bar2no.npy',allow_pickle=1).item() il1_barID = np.load('f:/Linux/localRUN/barredID_il1.npy') il1_diskID = np.load('f:/Linux/localRUN/diskID_il1.npy') tng_barID = np.load('f:/Linux/localRUN/barredID_4WP_TNG.npy') tng_diskID = np.load('f:/Linux/localRUN/diskID_4WP.npy') # - def il1_b2bGFwithZ(): fig = plt.figure() ax = fig.add_subplot(111) norm = plt.Normalize(0, 0.5) ax.set_xlim(-0.05, 2.05) ax.set_ylim(0, 0.9) ax.set_xlabel('Redshift z') ax.set_ylabel('Gas Fraction') ax.set_title('il1 Matched barred galaxies gas fraction with redshift') for subhaloID in bar2bar.values(): # if GasFraction[33][subhaloID] < 0.25: # continue prog = LoadMergHist('il1', subhaloID)[0] GFlist = [] A2 = A2list[subhaloID] for snapnum in il1_snapshot: try: haloID = prog[snapnum] except: GFlist.append(-1) continue GFlist.append(il1_GasFraction[snapnum][haloID]) xaxis = [] yaxis = [] color = [] for i in range(10): if GFlist[i] != -1: xaxis.append(Redshift[i]) yaxis.append(GFlist[i]) color.append(float(A2[i])) seg = xyline(xaxis, yaxis) lc = LineCollection(seg, cmap='rainbow_r', norm=norm) lc.set_array(np.array(color)) lc.set_linewidth(1) line = ax.add_collection(lc) fig.colorbar(line, ax=ax) # plt.savefig('f:/Linux/local_result/Match/bar2bar/il1_barred_GF.png',dpi=400) # + def TNG_nobarGFwithZ(): fig = plt.figure() ax = fig.add_subplot(111) norm = plt.Normalize(0, 0.5) ax.set_xlim(-0.05, 2.05) ax.set_ylim(0, 0.9) ax.set_xlabel('Redshift z') ax.set_ylabel('Gas Fraction') ax.set_title('TNG Matched none barred disk galaxies gas fraction with redshift') for subhaloID in bar2disk.keys(): prog = LoadMergHist('TNG', subhaloID)[0] GFlist = [] for snapnum in tng_snapshot: try: haloID = prog[snapnum] except: GFlist.append(-1) continue GFlist.append(tng_GasFraction[snapnum][haloID]) xaxis = [] yaxis = [] for i in range(10): if GFlist[i] != -1: xaxis.append(Redshift[i]) yaxis.append(GFlist[i]) if len(xaxis) != 10: continue ax.plot(xaxis, yaxis, c='k', linewidth=0.4) TNG_nobarGFwithZ() # + def il1_nobarGFwithZ(): fig = plt.figure() ax = fig.add_subplot(111) norm = plt.Normalize(0, 0.5) ax.set_xlim(-0.05, 2.05) ax.set_ylim(0, 0.9) ax.set_xlabel('Redshift z') ax.set_ylabel('Gas Fraction') ax.set_title('il1 Matched none barred disk galaxies gas fraction with redshift') for subhaloID in bar2disk.values(): prog = LoadMergHist('il1', subhaloID)[0] GFlist = [] for snapnum in il1_snapshot: try: haloID = prog[snapnum] except: GFlist.append(-1) continue GFlist.append(il1_GasFraction[snapnum][haloID]) xaxis = [] yaxis = [] for i in range(10): if GFlist[i] != -1: xaxis.append(Redshift[i]) yaxis.append(GFlist[i]) if len(xaxis) != 10: continue ax.plot(xaxis, yaxis, c='k', linewidth=0.4) il1_nobarGFwithZ() # - tng_BHMass = {} for snap in tng_snapshot: BH = il.func.loadSubhalos('TNG', snap, 'SubhaloBHMass') BH = np.log10(BH * 10**10) / 0.6774 BH[np.isneginf(BH)] = 0 tng_BHMass[snap] = BH # + def TNG_nobarBHwithZ(): fig = plt.figure() ax = fig.add_subplot(111) norm = plt.Normalize(0, 0.5) ax.set_xlim(-0.05, 2.05) ax.set_ylim(7, 13) ax.set_xlabel('Redshift z') ax.set_ylabel(r'SubhaloBHMass ($10^{10} M_\odot/h$)') ax.set_title('TNG Matched barred galaxies BHmass with redshift') for subhaloID in bar2disk.keys(): prog = LoadMergHist('TNG', subhaloID)[0] BHlist = [] for snapnum in tng_snapshot: try: haloID = prog[snapnum] except: BHlist.append(-1) continue BHlist.append(tng_BHMass[snapnum][haloID]) xaxis = [] yaxis = [] for i in range(10): if BHlist[i] != -1: xaxis.append(Redshift[i]) yaxis.append(BHlist[i]) # print(yaxis) ax.plot(xaxis, yaxis, c='k', linewidth=1) # plt.savefig('F:/Linux/local_result/Match/bar2disk/tng_BHMwithZ.png',dpi=400) # plt.savefig('F:/Linux/local_result/alldisk/tng_BHMwithZ.png',dpi=300) TNG_nobarBHwithZ() # - il1_BHMass = {} for snap in il1_snapshot: BH = il.func.loadSubhalos('il1', snap, 'SubhaloBHMass') # BH = np.log10(BH * 10**10) / 0.704 # BH[np.isnan(BH)] = 0 # BH[np.isneginf(BH)] = 0 il1_BHMass[snap] = BH # + def il1_nobarBHwithZ(): fig = plt.figure() ax = fig.add_subplot(111) norm = plt.Normalize(0, 0.5) ax.set_xlim(-0.05, 2.05) ax.set_ylim(0, 0.08) ax.set_xlabel('Redshift z') ax.set_ylabel(r'SubhaloBHMass ($10^{10} M_\odot/h$)') ax.set_title('illustris-1 Matched barred galaxies BHmass with redshift') for subhaloID in bar2disk.values(): if subhaloID == 215567: continue prog = LoadMergHist('il1', subhaloID)[0] BHlist = [] for snapnum in il1_snapshot: try: haloID = prog[snapnum] except: BHlist.append(-1) continue BHlist.append(il1_BHMass[snapnum][haloID]) xaxis = [] yaxis = [] if BHlist[0] > 0.09: continue for i in range(10): if BHlist[i] != -1: xaxis.append(Redshift[i]) yaxis.append(BHlist[i]) ax.plot(xaxis, yaxis, c='k', linewidth=1) # plt.savefig('F:/Linux/local_result/alldisk/il1_BHMwithZ.png',dpi=300) plt.savefig('F:/Linux/local_result/Match/bar2disk/il1_BHMwithZ.png',dpi=400) il1_nobarBHwithZ() # - bar2bar.values() # + def b2bCompairPlot(): for subhaloID in bar2disk.keys(): fig = plt.figure() ax = fig.add_subplot(111) norm = plt.Normalize(0, 0.5) ax.set_xlim(-0.05, 2.05) # ax.set_ylim(0, 0.09) ax.set_xlabel('Redshift z') ax.set_ylabel(r'SubhaloBHMass ($10^{10} M_\odot/h$)') ax.set_title('TNG & illustris-1 Matched barred galaxies BHmass with redshift') tng_prog = LoadMergHist('TNG', subhaloID)[0] il1_prog = LoadMergHist('il1', bar2disk[subhaloID])[0] tng_BHlist = [] il1_BHlist = [] for snapnum in tng_snapshot: haloID = tng_prog[snapnum] tng_BHlist.append(tng_BHMass[snapnum][haloID]) for snapnum in il1_snapshot: haloID = il1_prog[snapnum] il1_BHlist.append(il1_BHMass[snapnum][haloID]) tng_yaxis = [] il1_yaxis = [] for i in range(10): tng_yaxis.append(tng_BHlist[i]) il1_yaxis.append(il1_BHlist[i]) # if c == 0: ax.plot(Redshift, tng_yaxis, c='c', linewidth=1, label='TNG barred galaxies') ax.plot(Redshift, il1_yaxis, c='r', linewidth=1, label='illustris-1 barred galaxies') # c+=1 # else: # ax.plot(Redshift, tng_yaxis, c='c', linewidth=1) # ax.plot(Redshift, il1_yaxis, c='r', linewidth=1) ax.legend(loc=1) plt.savefig('F:/Linux/local_result/Match/bar2bar/tmp/b2disk/%d.png'%subhaloID, dpi=400) plt.close() b2bCompairPlot() # + def TNG_nobarBHwithZ(): fig = plt.figure() ax = fig.add_subplot(111) norm = plt.Normalize(0, 0.5) ax.set_xlim(-0.05, 2.05) ax.set_ylim(7, 13) ax.set_xlabel('Redshift z') ax.set_ylabel(r'SubhaloBHMass ($10^{10} M_\odot/h$)') ax.set_title('TNG Matched barred galaxies BHmass with redshift') for subhaloID in bar2disk.keys(): prog = LoadMergHist('TNG', subhaloID)[0] BHlist = [] for snapnum in tng_snapshot: try: haloID = prog[snapnum] except: BHlist.append(-1) continue BHlist.append(tng_BHMass[snapnum][haloID]) xaxis = [] yaxis = [] for i in range(10): if BHlist[i] != -1: xaxis.append(Redshift[i]) yaxis.append(BHlist[i]) # print(yaxis) ax.plot(xaxis, yaxis, c='k', linewidth=1) # plt.savefig('F:/Linux/local_result/Match/bar2disk/tng_BHMwithZ.png',dpi=400) # plt.savefig('F:/Linux/local_result/alldisk/tng_BHMwithZ.png',dpi=300) TNG_nobarBHwithZ() # - il1_a2 = np.load('f:/Linux/localRUN/il1DiskA2withZ.npy',allow_pickle=1).item() tng_a2 = np.load('f:/Linux/localRUN/tngDiskA2withZ.npy',allow_pickle=1).item() tng_a2.keys() bins = np.linspace(0.15, 0.55,9) il1_rs = [135, 103, 85, 68] tng_rs = [99, 67, 50, 33] # + il1_barFraction = {} for snap in il1_rs: barred = [] for a2 in il1_a2[snap].values(): if(a2 >= 0.15): barred.append(a2) nums = HistValAndBin(np.array(barred), bins, more=1) il1_barFraction[snap] = nums / len(il1_a2[snap].values()) # - il1_barFraction for snap in il1_a2.keys(): print(len(il1_a2[snap].values())) il1_tmp = {} for snap in il1_barFraction.keys(): ll = [] for i in range(len(il1_barFraction[snap])): ll.append(sum(il1_barFraction[snap][i:-1])) il1_tmp[snap] = ll il1_tmp # + plt.scatter(bins, il1_tmp[135], color='b', label = 'z=0', marker='.') plt.scatter(bins, il1_tmp[103], color='r', label = 'z=0.5', marker='.') plt.scatter(bins, il1_tmp[85], color='lime', label = 'z=1', marker='.') plt.scatter(bins, il1_tmp[68], color='blueviolet', label = 'z=2', marker='.') plt.xlabel('A2') plt.ylabel('Fraction') plt.xlim(0.1,0.56) plt.title('Illustris-1 bar fration') plt.legend() plt.savefig('F:/Linux/local_result/il1_SUM_barFrationWithZ.png',dpi=300) # - tng_barFraction = {} for snap in tng_rs: barred = [] for a2 in tng_a2[snap].values(): if(a2 >= 0.15): barred.append(a2) nums = HistValAndBin(np.array(barred), bins, more=1) tng_barFraction[snap] = nums / len(tng_a2[snap].values()) tng_tmp = {} for snap in tng_barFraction.keys(): ll = [] for i in range(len(tng_barFraction[snap])): ll.append(sum(tng_barFraction[snap][i:-1])) tng_tmp[snap] = ll # + plt.scatter(bins, tng_tmp[99], color='b', label = 'z=0', marker='.') plt.scatter(bins, tng_tmp[67], color='r', label = 'z=0.5', marker='.') plt.scatter(bins, tng_tmp[50], color='lime', label = 'z=1', marker='.') plt.scatter(bins, tng_tmp[33], color='blueviolet', label = 'z=2', marker='.') plt.xlabel('A2') plt.ylabel('Fraction') plt.xlim(0.1,0.56) plt.title('TNG bar fration') plt.legend() plt.savefig('F:/Linux/local_result/TNG__sum_barFrationWithZ.png',dpi=300) # + 'SubhaloBHMdot' tng_BHdot = {} for snap in tng_snapshot: BH = il.func.loadSubhalos('TNG', snap, 'SubhaloBHMdot') tng_BHdot[snap] = BH il1_BHdot = {} for snap in il1_snapshot: BH = il.func.loadSubhalos('il1', snap, 'SubhaloBHMdot') il1_BHdot[snap] = BH # + def il1_nobarBHdotwithZ(): c=0 fig = plt.figure() ax = fig.add_subplot(111) norm = plt.Normalize(0, 0.5) ax.set_xlim(-0.05, 2.05) ax.set_ylim(1e-10, 1e-1) ax.set_yscale("log") ax.set_xlabel('Redshift z') ax.set_ylabel(r'SubhaloBHdot ($(10^{10} M_\odot/h) / (0.978Gyr/h)$)') ax.set_title('illustris-1 Matched barred galaxies BHmass with redshift') for subhaloID in bar2disk.values(): prog = LoadMergHist('il1', subhaloID)[0] BHlist = [] for snapnum in il1_snapshot: try: haloID = prog[snapnum] except: BHlist.append(-1) continue BHlist.append(il1_BHdot[snapnum][haloID]) xaxis = [] yaxis = [] # print(BHlist) for i in range(10): if BHlist[i] != -1: xaxis.append(Redshift[i]) yaxis.append(BHlist[i]) ax.plot(xaxis, yaxis, c='k', linewidth=0.5) # plt.savefig('F:/Linux/local_result/alldisk/il1_BHMwithZ.png',dpi=300) plt.savefig('F:/Linux/local_result/Match/bar2disk/il1_BHdotwithZ_log.png',dpi=400) il1_nobarBHdotwithZ() # + def TNG_nobarBHdotwithZ(): fig = plt.figure() ax = fig.add_subplot(111) norm = plt.Normalize(0, 0.5) ax.set_xlim(-0.05, 2.05) ax.set_ylim(1e-10, 1e-1) ax.set_yscale("log") ax.set_xlabel('Redshift z') ax.set_ylabel(r'SubhaloBHdot ($(10^{10} M_\odot/h) / (0.978Gyr/h)$)') ax.set_title('TNG Matched barred galaxies BHmass with redshift') for subhaloID in bar2disk.keys(): prog = LoadMergHist('TNG', subhaloID)[0] BHlist = [] for snapnum in tng_snapshot: try: haloID = prog[snapnum] except: BHlist.append(-1) continue BHlist.append(tng_BHdot[snapnum][haloID]) xaxis = [] yaxis = [] for i in range(10): if BHlist[i] != -1: xaxis.append(Redshift[i]) yaxis.append(BHlist[i]) ax.plot(xaxis, yaxis, c='k', linewidth=0.5) # plt.savefig('F:/Linux/local_result/alldisk/il1_BHMwithZ.png',dpi=300) plt.savefig('F:/Linux/local_result/Match/bar2disk/TNG_BHdotwithZ_log.png',dpi=400) TNG_nobarBHdotwithZ() # -
JpytrNb/jupyter/Gas-z-A2(MatchData).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers, datasets, Sequential x = tf.random.normal([2, 5, 5, 3]) # 输入,5*5,3 通道 w = tf.random.normal([3, 3, 3, 4]) # 4 个 3*3 大小的卷积核 x.shape out = tf.nn.conv2d(x, w, strides=1, padding='SAME') out.shape (x, y), (X_test, y_test) = datasets.mnist.load_data() x = tf.expand_dims(x, axis=3) x.shape train_db = tf.data.Dataset.from_tensor_slices((x,y)) for (x,y) in enumerate(train_db): x.shape w = tf.random.normal([3, 3, 1, 6]) (x, y), _ = datasets.mnist.load_data() # x: [0~255] => [0~1.] x = tf.convert_to_tensor(x, dtype=tf.float32) y = tf.convert_to_tensor(y, dtype=tf.int32) x.shape y.shape x = tf.expand_dims(x, axis=3) x.shape ww = tf.random.normal([3, 3, 1, 4]) # 4 个 3*3 大小的卷积核 out = tf.nn.conv2d(x, ww, strides=1, padding='SAME') out.shape layer = layers.Conv2D(4, kernel_size=3, strides=1, padding='SAME') out = layer(x) out.shape
TensorFlow-2.0/04-CNN/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 100 numpy exercises with hint # # This is a collection of exercises that have been collected in the numpy mailing list, on stack overflow and in the numpy documentation. The goal of this collection is to offer a quick reference for both old and new users but also to provide a set of exercises for those who teach. # # # If you find an error or think you've a better way to solve some of them, feel free to open an issue at <https://github.com/rougier/numpy-100> # #### 1. Import the numpy package under the name `np` (★☆☆) # (**hint**: import … as …) # #### 2. Print the numpy version and the configuration (★☆☆) # (**hint**: np.\_\_version\_\_, np.show\_config) # #### 3. Create a null vector of size 10 (★☆☆) # (**hint**: np.zeros) # #### 4. How to find the memory size of any array (★☆☆) # (**hint**: size, itemsize) # #### 5. How to get the documentation of the numpy add function from the command line? (★☆☆) # (**hint**: np.info) # #### 6. Create a null vector of size 10 but the fifth value which is 1 (★☆☆) # (**hint**: array\[4\]) # #### 7. Create a vector with values ranging from 10 to 49 (★☆☆) # (**hint**: np.arange) # #### 8. Reverse a vector (first element becomes last) (★☆☆) # (**hint**: array\[::-1\]) # #### 9. Create a 3x3 matrix with values ranging from 0 to 8 (★☆☆) # (**hint**: reshape) # #### 10. Find indices of non-zero elements from \[1,2,0,0,4,0\] (★☆☆) # (**hint**: np.nonzero) # #### 11. Create a 3x3 identity matrix (★☆☆) # (**hint**: np.eye) # #### 12. Create a 3x3x3 array with random values (★☆☆) # (**hint**: np.random.random) # #### 13. Create a 10x10 array with random values and find the minimum and maximum values (★☆☆) # (**hint**: min, max) # #### 14. Create a random vector of size 30 and find the mean value (★☆☆) # (**hint**: mean) # #### 15. Create a 2d array with 1 on the border and 0 inside (★☆☆) # (**hint**: array\[1:-1, 1:-1\]) # #### 16. How to add a border (filled with 0's) around an existing array? (★☆☆) # (**hint**: np.pad) # #### 17. What is the result of the following expression? (★☆☆) # (**hint**: NaN = not a number, inf = infinity) # ```python # 0 * np.nan # np.nan == np.nan # np.inf > np.nan # np.nan - np.nan # np.nan in set([np.nan]) # 0.3 == 3 * 0.1 # ``` # #### 18. Create a 5x5 matrix with values 1,2,3,4 just below the diagonal (★☆☆) # (**hint**: np.diag) # #### 19. Create a 8x8 matrix and fill it with a checkerboard pattern (★☆☆) # (**hint**: array\[::2\]) # #### 20. Consider a (6,7,8) shape array, what is the index (x,y,z) of the 100th element? # (**hint**: np.unravel_index) # #### 21. Create a checkerboard 8x8 matrix using the tile function (★☆☆) # (**hint**: np.tile) # #### 22. Normalize a 5x5 random matrix (★☆☆) # (**hint**: (x - mean) / std) # #### 23. Create a custom dtype that describes a color as four unsigned bytes (RGBA) (★☆☆) # (**hint**: np.dtype) # #### 24. Multiply a 5x3 matrix by a 3x2 matrix (real matrix product) (★☆☆) # (**hint**: np.dot | @) # #### 25. Given a 1D array, negate all elements which are between 3 and 8, in place. (★☆☆) # (**hint**: >, <=) # #### 26. What is the output of the following script? (★☆☆) # (**hint**: np.sum) # ```python # # Author: <NAME> # # print(sum(range(5),-1)) # from numpy import * # print(sum(range(5),-1)) # ``` # #### 27. Consider an integer vector Z, which of these expressions are legal? (★☆☆) # ```python # Z**Z # 2 << Z >> 2 # Z <- Z # 1j*Z # Z/1/1 # Z<Z>Z # ``` # #### 28. What are the result of the following expressions? # ```python # np.array(0) / np.array(0) # np.array(0) // np.array(0) # np.array([np.nan]).astype(int).astype(float) # ``` # #### 29. How to round away from zero a float array ? (★☆☆) # (**hint**: np.uniform, np.copysign, np.ceil, np.abs) # #### 30. How to find common values between two arrays? (★☆☆) # (**hint**: np.intersect1d) # #### 31. How to ignore all numpy warnings (not recommended)? (★☆☆) # (**hint**: np.seterr, np.errstate) # #### 32. Is the following expressions true? (★☆☆) # (**hint**: imaginary number) # ```python # np.sqrt(-1) == np.emath.sqrt(-1) # ``` # #### 33. How to get the dates of yesterday, today and tomorrow? (★☆☆) # (**hint**: np.datetime64, np.timedelta64) # #### 34. How to get all the dates corresponding to the month of July 2016? (★★☆) # (**hint**: np.arange(dtype=datetime64\['D'\])) # #### 35. How to compute ((A+B)\*(-A/2)) in place (without copy)? (★★☆) # (**hint**: np.add(out=), np.negative(out=), np.multiply(out=), np.divide(out=)) # #### 36. Extract the integer part of a random array using 5 different methods (★★☆) # (**hint**: %, np.floor, np.ceil, astype, np.trunc) # #### 37. Create a 5x5 matrix with row values ranging from 0 to 4 (★★☆) # (**hint**: np.arange) # #### 38. Consider a generator function that generates 10 integers and use it to build an array (★☆☆) # (**hint**: np.fromiter) # #### 39. Create a vector of size 10 with values ranging from 0 to 1, both excluded (★★☆) # (**hint**: np.linspace) # #### 40. Create a random vector of size 10 and sort it (★★☆) # (**hint**: sort) # #### 41. How to sum a small array faster than np.sum? (★★☆) # (**hint**: np.add.reduce) # #### 42. Consider two random array A and B, check if they are equal (★★☆) # (**hint**: np.allclose, np.array\_equal) # #### 43. Make an array immutable (read-only) (★★☆) # (**hint**: flags.writeable) # #### 44. Consider a random 10x2 matrix representing cartesian coordinates, convert them to polar coordinates (★★☆) # (**hint**: np.sqrt, np.arctan2) # #### 45. Create random vector of size 10 and replace the maximum value by 0 (★★☆) # (**hint**: argmax) # #### 46. Create a structured array with `x` and `y` coordinates covering the \[0,1\]x\[0,1\] area (★★☆) # (**hint**: np.meshgrid) # #### 47. Given two arrays, X and Y, construct the Cauchy matrix C (Cij =1/(xi - yj)) # (**hint**: np.subtract.outer) # #### 48. Print the minimum and maximum representable value for each numpy scalar type (★★☆) # (**hint**: np.iinfo, np.finfo, eps) # #### 49. How to print all the values of an array? (★★☆) # (**hint**: np.set\_printoptions) # #### 50. How to find the closest value (to a given scalar) in a vector? (★★☆) # (**hint**: argmin) # #### 51. Create a structured array representing a position (x,y) and a color (r,g,b) (★★☆) # (**hint**: dtype) # #### 52. Consider a random vector with shape (100,2) representing coordinates, find point by point distances (★★☆) # (**hint**: np.atleast\_2d, T, np.sqrt) # #### 53. How to convert a float (32 bits) array into an integer (32 bits) in place? # (**hint**: astype(copy=False)) # #### 54. How to read the following file? (★★☆) # (**hint**: np.genfromtxt) # ``` # 1, 2, 3, 4, 5 # 6, , , 7, 8 # , , 9,10,11 # ``` # #### 55. What is the equivalent of enumerate for numpy arrays? (★★☆) # (**hint**: np.ndenumerate, np.ndindex) # #### 56. Generate a generic 2D Gaussian-like array (★★☆) # (**hint**: np.meshgrid, np.exp) # #### 57. How to randomly place p elements in a 2D array? (★★☆) # (**hint**: np.put, np.random.choice) # #### 58. Subtract the mean of each row of a matrix (★★☆) # (**hint**: mean(axis=,keepdims=)) # #### 59. How to sort an array by the nth column? (★★☆) # (**hint**: argsort) # #### 60. How to tell if a given 2D array has null columns? (★★☆) # (**hint**: any, ~) # #### 61. Find the nearest value from a given value in an array (★★☆) # (**hint**: np.abs, argmin, flat) # #### 62. Considering two arrays with shape (1,3) and (3,1), how to compute their sum using an iterator? (★★☆) # (**hint**: np.nditer) # #### 63. Create an array class that has a name attribute (★★☆) # (**hint**: class method) # #### 64. Consider a given vector, how to add 1 to each element indexed by a second vector (be careful with repeated indices)? (★★★) # (**hint**: np.bincount | np.add.at) # #### 65. How to accumulate elements of a vector (X) to an array (F) based on an index list (I)? (★★★) # (**hint**: np.bincount) # #### 66. Considering a (w,h,3) image of (dtype=ubyte), compute the number of unique colors (★★★) # (**hint**: np.unique) # #### 67. Considering a four dimensions array, how to get sum over the last two axis at once? (★★★) # (**hint**: sum(axis=(-2,-1))) # #### 68. Considering a one-dimensional vector D, how to compute means of subsets of D using a vector S of same size describing subset indices? (★★★) # (**hint**: np.bincount) # #### 69. How to get the diagonal of a dot product? (★★★) # (**hint**: np.diag) # #### 70. Consider the vector \[1, 2, 3, 4, 5\], how to build a new vector with 3 consecutive zeros interleaved between each value? (★★★) # (**hint**: array\[::4\]) # #### 71. Consider an array of dimension (5,5,3), how to mulitply it by an array with dimensions (5,5)? (★★★) # (**hint**: array\[:, :, None\]) # #### 72. How to swap two rows of an array? (★★★) # (**hint**: array\[\[\]\] = array\[\[\]\]) # #### 73. Consider a set of 10 triplets describing 10 triangles (with shared vertices), find the set of unique line segments composing all the triangles (★★★) # (**hint**: repeat, np.roll, np.sort, view, np.unique) # #### 74. Given an array C that is a bincount, how to produce an array A such that np.bincount(A) == C? (★★★) # (**hint**: np.repeat) # #### 75. How to compute averages using a sliding window over an array? (★★★) # (**hint**: np.cumsum) # #### 76. Consider a one-dimensional array Z, build a two-dimensional array whose first row is (Z\[0\],Z\[1\],Z\[2\]) and each subsequent row is shifted by 1 (last row should be (Z\[-3\],Z\[-2\],Z\[-1\]) (★★★) # (**hint**: from numpy.lib import stride_tricks) # #### 77. How to negate a boolean, or to change the sign of a float inplace? (★★★) # (**hint**: np.logical_not, np.negative) # #### 78. Consider 2 sets of points P0,P1 describing lines (2d) and a point p, how to compute distance from p to each line i (P0\[i\],P1\[i\])? (★★★) # #### 79. Consider 2 sets of points P0,P1 describing lines (2d) and a set of points P, how to compute distance from each point j (P\[j\]) to each line i (P0\[i\],P1\[i\])? (★★★) # #### 80. Consider an arbitrary array, write a function that extract a subpart with a fixed shape and centered on a given element (pad with a `fill` value when necessary) (★★★) # (**hint**: minimum, maximum) # #### 81. Consider an array Z = \[1,2,3,4,5,6,7,8,9,10,11,12,13,14\], how to generate an array R = \[\[1,2,3,4\], \[2,3,4,5\], \[3,4,5,6\], ..., \[11,12,13,14\]\]? (★★★) # (**hint**: stride\_tricks.as\_strided) # #### 82. Compute a matrix rank (★★★) # (**hint**: np.linalg.svd) (suggestion: np.linalg.svd) # #### 83. How to find the most frequent value in an array? # (**hint**: np.bincount, argmax) # #### 84. Extract all the contiguous 3x3 blocks from a random 10x10 matrix (★★★) # (**hint**: stride\_tricks.as\_strided) # #### 85. Create a 2D array subclass such that Z\[i,j\] == Z\[j,i\] (★★★) # (**hint**: class method) # #### 86. Consider a set of p matrices wich shape (n,n) and a set of p vectors with shape (n,1). How to compute the sum of of the p matrix products at once? (result has shape (n,1)) (★★★) # (**hint**: np.tensordot) # #### 87. Consider a 16x16 array, how to get the block-sum (block size is 4x4)? (★★★) # (**hint**: np.add.reduceat) # #### 88. How to implement the Game of Life using numpy arrays? (★★★) # #### 89. How to get the n largest values of an array (★★★) # (**hint**: np.argsort | np.argpartition) # #### 90. Given an arbitrary number of vectors, build the cartesian product (every combinations of every item) (★★★) # (**hint**: np.indices) # #### 91. How to create a record array from a regular array? (★★★) # (**hint**: np.core.records.fromarrays) # #### 92. Consider a large vector Z, compute Z to the power of 3 using 3 different methods (★★★) # (**hint**: np.power, \*, np.einsum) # #### 93. Consider two arrays A and B of shape (8,3) and (2,2). How to find rows of A that contain elements of each row of B regardless of the order of the elements in B? (★★★) # (**hint**: np.where) # #### 94. Considering a 10x3 matrix, extract rows with unequal values (e.g. \[2,2,3\]) (★★★) # #### 95. Convert a vector of ints into a matrix binary representation (★★★) # (**hint**: np.unpackbits) # #### 96. Given a two dimensional array, how to extract unique rows? (★★★) # (**hint**: np.ascontiguousarray | np.unique) # #### 97. Considering 2 vectors A & B, write the einsum equivalent of inner, outer, sum, and mul function (★★★) # (**hint**: np.einsum) # #### 98. Considering a path described by two vectors (X,Y), how to sample it using equidistant samples (★★★)? # (**hint**: np.cumsum, np.interp) # #### 99. Given an integer n and a 2D array X, select from X the rows which can be interpreted as draws from a multinomial distribution with n degrees, i.e., the rows which only contain integers and which sum to n. (★★★) # (**hint**: np.logical\_and.reduce, np.mod) # #### 100. Compute bootstrapped 95% confidence intervals for the mean of a 1D array X (i.e., resample the elements of an array with replacement N times, compute the mean of each sample, and then compute percentiles over the means). (★★★) # (**hint**: np.percentile)
NumPy/100_Numpy_exercises_with_hint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## How To Create Custom Quantum Feature Maps # # In machine learning, a feature map represents a transformation of data into a higher-dimensional space. However, this can be an expensive computation. Instead, kernel functions can be used to implicitly encode this transformation through the pairwise inner products of data samples. Kernels are a similarity measure over the dataset and are a key component of many machine learning models, for example, support vector machines. A quantum computer can be used to encode classical data into the quantum state space. We call this a quantum feature map. # # In this guide, we will show how to create a custom quantum feature map with trainable parameters, which may be used as input to Qiskit machine learning algorithms such as `QSVC` and `QuantumKernelTrainer`. We will follow four basic steps: # # 1. Import required Qiskit packages # 2. Design the circuit for the quantum feature map # 3. Build the circuit with Qiskit # 4. Implement the feature map as a `QuantumCircuit` child class # ### Import Required Packages # # To create a quantum feature map with trainable parameters in Qiskit, there are two basic guidelines.<br> # The quantum feature map should: # - Be an extension of Qiskit's `QuantumCircuit` class # - Contain some number of trainable user parameters, `θ`, in addition to parameters designated to input data, `x` # + from typing import List, Callable, Union from qiskit import QuantumCircuit from qiskit.circuit import ParameterVector # To visualize circuit creation process from qiskit.visualization import circuit_drawer # - # ### Design the Circuit # Similarly to classical feature engineering, creating a quantum feature map is a process that strongly depends on the learning problem at hand. In general, we cannot suggest an optimal feature map with no prior knowledge of the learning problem. Instead, we will focus on the basic steps to create a circuit using the Qiskit API. To illustrate, we will build a version of the [covariant feature map](https://github.com/qiskit-community/prototype-quantum-kernel-training/blob/main/qkt/feature_maps/covariant_feature_map.py), which is tailored to a dataset with a particular structure. Check out [this guide](https://github.com/qiskit-community/prototype-quantum-kernel-training/blob/main/docs/background/qkernels_and_data_w_group_structure.ipynb) for more information on covariant quantum kernels. # # For this example, the feature map will be built from a circuit containing trainable parameters `θ` followed by a circuit encoding the input data `x`. The trainable parameter of the $i$th qubit corresponds to a rotation around the $y$-axis by an angle `θ[i]`. We follow this by an entanglement layer of controlled-$z$ gates. Finally, we encode two features `x[i], x[i+1]` per qubit using consecutive rotations around the $x$ and $z$ axes. # ### Build the Circuit with Qiskit # # First, we instantiate a `QuantumCircuit` and create the circuit layer with trainable parameters `θ[i]`. Here, we will assume we are given a dataset with 12 features and we encode two features per qubit. # + # For a dataset with 12 features; and 2 features per qubit FEATURE_DIMENSION = 12 NUM_QUBITS = int(FEATURE_DIMENSION / 2) # Qiskit feature maps should generally be QuantumCircuits or extensions of QuantumCircuit feature_map = QuantumCircuit(NUM_QUBITS) user_params = ParameterVector("θ", NUM_QUBITS) # Create circuit layer with trainable parameters for i in range(NUM_QUBITS): feature_map.ry(user_params[i], feature_map.qubits[i]) print(circuit_drawer(feature_map)) # - # Next, we will define an entanglement scheme (a linear map of controlled-$z$ gates) and create the entanglement layer. # + # Linear entanglement entanglement = [ [i, i+1] for i in range(NUM_QUBITS - 1) ] for source, target in entanglement: feature_map.cz(feature_map.qubits[source], feature_map.qubits[target]) feature_map.barrier() print(circuit_drawer(feature_map)) # - # Finally, we encode two features `x[i], x[i+1]` per qubit using a layer of single-qubit rotations. # + input_params = ParameterVector("x", FEATURE_DIMENSION) for i in range(NUM_QUBITS): feature_map.rz(input_params[2 * i + 1], feature_map.qubits[i]) feature_map.rx(input_params[2 * i], feature_map.qubits[i]) print(circuit_drawer(feature_map)) # - # ### Implement the Feature Map as a `QuantumCircuit` Child Class # # Most Qiskit algorithms that take feature maps as input require the feature map be a class extension of a `QuantumCircuit`. While there are many ways to do this, we suggest the following approach illustrated with `ExampleFeatureMap` that extends `QuantumCircuit`: # # The feature map circuit is created upon instantiation such that # - Parameters such as feature dimension and entanglement scheme should be specified during initialization # - In the initialization, `QuantumCircuit.__init__()` is called before the feature map circuit is generated, which ensures all `QuantumCircuit` class fields (e.g. `QuantumCircuit.qubits`) are properly initialized # - After the `QuantumCircuit` constructor has been called, a class method `_generate_feature_map` generates the feature map circuit class ExampleFeatureMap(QuantumCircuit): """The Example Feature Map circuit""" def __init__( self, feature_dimension: int, entanglement: Union[str, List[List[int]], Callable[[int], List[int]]] = None, name: str = "ExampleFeatureMap", ) -> None: """Create a new Example Feature Map circuit. Args: feature_dimension: The number of features entanglement: Entanglement scheme to be used in second layer name: Name of QuantumCircuit object Raises: ValueError: ExampleFeatureMap requires an even number of input features """ if (feature_dimension % 2) != 0: raise ValueError( """ Example feature map requires an even number of input features. """ ) self.feature_dimension = feature_dimension self.entanglement = entanglement self.user_parameters = None # Call the QuantumCircuit initialization num_qubits = feature_dimension / 2 super().__init__( num_qubits, name=name, ) # Build the feature map circuit self._generate_feature_map() def _generate_feature_map(self): # If no entanglement scheme specified, use linear entanglement if self.entanglement is None: self.entanglement = [ [i, i+1] for i in range(self.num_qubits - 1) ] # Vector of data parameters input_params = ParameterVector("x", self.feature_dimension) user_params = ParameterVector("θ", self.num_qubits) # Create an initial rotation layer of trainable parameters for i in range(self.num_qubits): self.ry(user_params[i], self.qubits[i]) self.user_parameters = user_params # Create the entanglement layer for source, target in self.entanglement: self.cz(self.qubits[source], self.qubits[target]) self.barrier() # Create a circuit representation of the data group for i in range(self.num_qubits): self.rz(input_params[2 * i + 1], self.qubits[i]) self.rx(input_params[2 * i], self.qubits[i]) # ### Instantiate and Inspect the Example Feature Map # # Finally, we will instantiate and inspect an `ExampleFeatureMap` object. We will use `feature_dimension=10` and the default linear entanglement, which should produce a 5-qubit feature map circuit. feature_map = ExampleFeatureMap(feature_dimension=10) circuit_drawer(feature_map) import qiskit.tools.jupyter # %qiskit_version_table # %qiskit_copyright
docs/how_tos/create_custom_quantum_feature_map.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="mYJ3KGNAa0Ix" import tensorflow as tf import numpy as np # + id="0fdOKnwDrabZ" colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 14210, "status": "ok", "timestamp": 1605453686627, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -120} outputId="dd09b4c5-d469-462a-fe0b-9f3e4ae94936" # !cat '/content/drive/My Drive/MasterThesis/Datasets/Offensive2020/train/HS/1002.txt' # + id="gfRAxzRxLxcC" colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 47954, "status": "ok", "timestamp": 1605453722735, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -120} outputId="ce18394c-cb2c-44b1-de8e-ecca169ead47" batch_size = 32 raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory( "/content/drive/My Drive/MasterThesis/Datasets/Offensive2020/train", batch_size=batch_size, validation_split=0.2, subset="training", seed=1337, ) raw_val_ds = tf.keras.preprocessing.text_dataset_from_directory( "/content/drive/My Drive/MasterThesis/Datasets/Offensive2020/dev", batch_size=batch_size, validation_split=0.2, subset="validation", seed=1337, ) raw_test_ds = tf.keras.preprocessing.text_dataset_from_directory( "/content/drive/My Drive/MasterThesis/Datasets/Offensive2020/test", batch_size=batch_size ) print( "Number of batches in raw_train_ds: %d" % tf.data.experimental.cardinality(raw_train_ds) ) print( "Number of batches in raw_val_ds: %d" % tf.data.experimental.cardinality(raw_val_ds) ) print( "Number of batches in raw_test_ds: %d" % tf.data.experimental.cardinality(raw_test_ds) ) # + id="ydTH5bCXK_kS" colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 174157, "status": "ok", "timestamp": 1605453854041, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -120} outputId="eaf08627-8281-4df5-bb22-561aea2716fc" # It's important to take a look at your raw data to ensure your normalization # and tokenization will work as expected. We can do that by taking a few # examples from the training set and looking at them. # This is one of the places where eager execution shines: # we can just evaluate these tensors using .numpy() # instead of needing to evaluate them in a Session/Graph context. for text_batch, label_batch in raw_train_ds.take(1): for i in range(5): print(text_batch.numpy()[i].decode('utf-8').strip()) print(label_batch.numpy()[i]) print('--------------------------------') # + id="QxrhvJ1MBjhm" colab={"background_save": true} from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import string import re def custom_standardization(input_data): lowercase = tf.strings.lower(input_data) stripped_html = tf.strings.regex_replace(lowercase, "<LF>", " ") return tf.strings.regex_replace( stripped_html, "[%s]" % re.escape(string.punctuation), "" ) max_features = 20000 embedding_dim = 128 sequence_length = 500 vectorize_layer = TextVectorization( standardize=custom_standardization, max_tokens=max_features, output_mode="int", output_sequence_length=sequence_length, ) text_ds = raw_train_ds.map(lambda x, y: x) vectorize_layer.adapt(text_ds) # + id="VwhbmPnsi4S-" colab={"background_save": true} def vectorize_text(text, label): text = tf.expand_dims(text, -1) return vectorize_layer(text), label # Vectorize the data. train_ds = raw_train_ds.map(vectorize_text) val_ds = raw_val_ds.map(vectorize_text) test_ds = raw_test_ds.map(vectorize_text) # Do async prefetching / buffering of the data for best performance on GPU. train_ds = train_ds.cache().prefetch(buffer_size=10) val_ds = val_ds.cache().prefetch(buffer_size=10) test_ds = test_ds.cache().prefetch(buffer_size=10) # + [markdown] id="Xv-M7bb6khq1" # ## Build a model # + id="WCohoIbMhVj9" colab={"base_uri": "https://localhost:8080/"} outputId="a476b4ec-10cb-49f9-cc83-08929795507d" from tensorflow.keras import layers model = tf.keras.Sequential([ tf.keras.layers.Embedding(max_features, 64), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, return_sequences=True)), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(1) ]) model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(1e-4), metrics=['accuracy']) history = model.fit(train_ds, epochs=10, validation_data=val_ds, validation_steps=30) # + [markdown] id="yUxWhjJRkvX2" # ## Train the model # + id="6f40euEzkly4" # epochs = 3 # # Fit the model using the train and test datasets. # history = model.fit(train_ds, validation_data=val_ds, epochs=epochs) # + [markdown] id="edg-1dgVk31a" # ## Evaluate the model on the test set # # + id="LpyFcJNDlTa_" test_loss, test_acc = model.evaluate(test_ds) print('Test Loss: {}'.format(test_loss)) print('Test Accuracy: {}'.format(test_acc)) # + [markdown] id="B83gmSdHlAAJ" # ## plotlib # + id="HKvke9H8k-DJ" import matplotlib.pyplot as plt acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss=history.history['loss'] val_loss=history.history['val_loss'] #epochs_range = range(22) plt.figure(figsize=(15, 15)) plt.subplot(1, 2, 1) plt.plot(acc, label='Training Accuracy') plt.plot(val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(loss, label='Training Loss') plt.plot(val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show() # + id="hKOwfY9olYJG"
Experiments/HateSpeechDetectionModels/otherExperiments/DNN/Offensive2020-SharedTask/Try and error Experiments/Copy of text_classification_Keras_HS_offiensive2020.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.5 64-bit (''anaconda3'': virtualenv)' # language: python # name: python37564bitanaconda3virtualenv217b44c72f9c4c92a41af3931e51fbe4 # --- import pandas as pd import numpy as np train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') train_data.head() train_data[train_data.Age.isna()] def initial_data(full_data, test_data=False, train_rate=0.8, answer_mode=False): full_data = full_data.loc[:, ['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch']] if not answer_mode: full_data = full_data[~full_data.Age.isna()] full_data.Sex = (full_data.Sex == 'male').astype('int') if not answer_mode: full_data = full_data.reindex(np.random.permutation(full_data.index)) print(full_data) X = full_data.iloc[:, 1:].values mean_X = X.mean(axis=0) std_X = X.std(axis=0) X = np.hstack(((X - mean_X) / std_X, np.ones((X.shape[0], 1)))) Y = full_data.iloc[:, 0].values Y = Y.reshape(Y.shape[0], 1) if not test_data: return X, Y train_X = X[:int(X.shape[0] * rate)] test_X = X[int(X.shape[0] * rate):] train_Y = Y[:int(Y.shape[0] * rate)] test_Y = Y[int(Y.shape[0] * rate):] return train_X, train_Y, test_X, test_Y def sigmoid(X, theta): return 1 / ( 1 + np.exp(-np.dot(X, theta))) def forward_prop(X, Y, theta): Y_hat = sigmoid(X, theta) m = X.shape[0] J = -1/m * np.sum(Y * np.log(Y_hat) + (1 - Y) * np.log(1 - Y_hat)) return Y_hat, J def backward_prop(X, Y, theta, Y_hat, learning_rate): m = X.shape[0] gradient = 1/m * np.dot(X.T, Y_hat - Y) return theta - learning_rate * gradient def logistic_regression(X, Y, num_iteration=2000, learning_rate=0.1): # initialize theta = np.zeros((X.shape[1], 1)) for num_iter in range(1, num_iteration + 1): Y_hat, J = forward_prop(X, Y, theta) theta = backward_prop(X, Y, theta, Y_hat, learning_rate) if num_iter % 100 == 0: print(num_iter, J) return theta def accuracy(X, Y, theta): return (sigmoid(X, theta).round().astype('int') == Y).astype('int').mean() # # Initialize Input train_X, train_Y = initial_data(train_data) # # Train the model theta = logistic_regression(train_X, train_Y) # # Predict Test Data test_X, _ = initial_data(test_data, answer_mode=True) print(test_X) print(theta) sigmoid(test_X, theta)
logistic-regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.6 64-bit # name: python3 # --- # # Quadrupole # ## Imports import slhfdtd from matplotlib import pyplot as plt from math import cos, pi # ## Global Configurations plt.rcParams.update({"text.usetex" : True}) # ## Point Source # + solver = slhfdtd.Solver((2e-6, 2e-6, 0)) solver.add_source(slhfdtd.PointSource( (0.8e-6, 0.8e-6, 0), wavelength=1.5e-7, power=1.0, func=cos, phase=pi )) solver.add_source(slhfdtd.PointSource( (0.8e-6, 1.2e-6, 0), wavelength=1.5e-7, power=1.0, func=cos )) solver.add_source(slhfdtd.PointSource( (1.2e-6, 0.8e-6, 0), wavelength=1.5e-7, power=1.0, func=cos )) solver.add_source(slhfdtd.PointSource( (1.2e-6, 1.2e-6, 0), wavelength=1.5e-7, power=1.0, func=cos, phase=pi )) solver.add_boundary(slhfdtd.AutoPML()) solver.run(5e-15) visualizer = slhfdtd.Visualizer(solver) visualizer.set_aspect('equal') visualizer.set_interpolation_2d('none') visualizer.set_variables(norms='log') visualizer.plot2d_magnitude() # plt.savefig('quadrupole-point.pdf', bbox_inches='tight') # - # ## Line Source # + solver = slhfdtd.Solver((2e-6, 2e-6, 0)) solver.add_source(slhfdtd.LineSource((0.8e-6, 0.8e-6, 0), (0.8e-6, 1.2e-6, 0), wavelength=1.5e-7, power=1.0, func=cos, phase=pi )) solver.add_source(slhfdtd.LineSource((1.2e-6, 0.8e-6, 0), (1.2e-6, 1.2e-6, 0), wavelength=1.5e-7, power=1.0, func=cos, phase=pi )) solver.add_source(slhfdtd.LineSource((0.8e-6, 0.8e-6, 0), (1.2e-6, 0.8e-6, 0), wavelength=1.5e-7, power=1.0, func=cos )) solver.add_source(slhfdtd.LineSource((0.8e-6, 1.2e-6, 0), (1.2e-6, 1.2e-6, 0), wavelength=1.5e-7, power=1.0, func=cos )) solver.add_boundary(slhfdtd.AutoPML()) solver.run(5e-15) visualizer = slhfdtd.Visualizer(solver) visualizer.set_aspect('equal') visualizer.set_interpolation_2d('none') visualizer.set_variables(norms='log') visualizer.plot2d_magnitude() # plt.savefig('quadrupole-line.pdf', bbox_inches='tight')
simulations/quadrupole.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import matplotlib.pyplot as plt import numpy as np # %matplotlib inline # # Why text data is important # ## E-Mail # ``` # FROM: <NAME> # DIRECTOR AUDITING / ACCOUNTING UNIT. # GUINEA-CONAKRY # # THIS MESSAGE MIGHT MEET YOU IN UTMOST SUPRISE. HOWEVER IT'S JUST MY URGENT NEED FOR A FOREIGN PARTNER THAT MADE ME CONTACT YOU FOR THIS TRANSACTION. # # I AM A BANKER BY PROFESSION FROM GUINEA-CONAKRY IN WEST AFRICA AND CURRENTLY HOLDING THE POST OF DIRECTOR AUDITING AND ACCOUNTING UNIT OF THE BANK. # # I HAVE THE OPPORTUNITY OF TRANSFERING THE LEFT OVER FUNDS $4.7M (FOUR MILLION SEVEN HUNDREDTHOUSAND DOLLARS) OF ONE OF MY BANK CLIENTS WHO DIED ALONGSIDE WITH HIS ENTIRE FAMILY ON 31 ST, JULY 2000 IN A PLANE CRASH. # # HENCE, I AM INVITING YOU FOR A MUTUAL BUSINESS OPPORTUNITY WHERE THIS MONEY CAN BE SHARED BETWEEN US IN THE RATION OF 60/40? 40% FOR YOU AND 60% FOR ME # # IF YOU AGREE TO MY BUSINESS PROPOSAL. FURTHER DETAIL OF THE TRANSACTION WILL BE FORWARDED TO YOU AS SOON AS I RECEIVE YOUR RETURN MAIL AND REPLY TO ME IMMEDIATELY. # # NOTE; DO NOT IGNORE THIS MAIL IF YOU SEE IT IN YOUR SPAM. WE HAVE POOR INTERNET NETWORK HERE IN AFRICA, FOR THIS REASON MY FIRST MAIL TO YOU MAY ENTER SPAM INSTEAD YOUR INBOX. # # NOW MY QUESTIONS ARE:- # # 1 CAN YOU HANDLE THIS PROJECT? .. # 2. CAN I GIVE YOU THIS TRUST? .. # # THANKS AND BEST REGARDS. # <NAME>. # ``` # ## Social Media import pandas as pd flu = pd.read_csv("flu_trends.txt", index_col="Date") flu['United States'].plot(rot=60) # ## Medical Records # ## Customer Service
scikit/Chapter 7/Text Data Motivation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Modelling a non-stationary poisson process # # >A non-stationary Poisson process (NSPP) is an arrival process with an arrival rate that varies by time. # # One of the limitations of queuing theory is the difficulty of modelling time-dependent arrivals. Computer # simulation offer a number of ways of modelling non-stationary arrivals. # # In this lab you will learn: # # * How to implement the thinning algorithm to model a non-stationary poisson process (NSPP) # # --- # # Imports # + import pandas as pd import numpy as np import itertools import simpy # please use simpy version 4 simpy.__version__ # - # ---- # ## An example NSPP # # The table below breaks an arrival process down into 60 minutes intervals. # # # | t(min) | Mean time between arrivals (min) | Arrival Rate $\lambda(t)$ (arrivals/min) | # |:------:|:--------------------------------:|:--------------------------------------:| # | 0 | 15 | 1/15 | # | 60 | 12 | 1/12 | # | 120 | 7 | 1/7 | # | 180 | 5 | 1/5 | # | 240 | 8 | 1/8 | # | 300 | 10 | 1/10 | # | 360 | 15 | 1/15 | # | 420 | 20 | 1/20 | # | 480 | 20 | 1/20 | # # > **Interpretation**: In the table above the fastest arrival rate is 1/5 customers per minute or 5 minutes between customer arrivals. # ## Thinning # # Thinning is a acceptance-rejection sampling method and is used to generate interarrival times from a NSPP. # # > A NSPP has arrival rate $\lambda(t)$ where $0 \leq t \leq T$ # **The thinning algorithm** # # Here $i$ is the arrival number and $\mathcal{T_i}$ is its arrival time. # # 1. Let $\lambda^* = \max_{0 \leq t \leq T}\lambda(t)$ be the maximum of the arrival rate function and set $t = 0$ and $i=1$ # # 2. Generate $e$ from the exponential distribution with rate $\lambda^*$ and let $t = t + e$ (this is the time of the next entity will arrive) # # 3. Generate $u$ from the $U(0,1)$ distribution. If $u \leq \dfrac{\lambda(t)}{\lambda^*}$ then $\mathcal{T_i} =t$ and $i = i + 1$ # # 4. Go to Step 2. # # Exercise 1: simulation **without thinning** # # **Task:** # * Build a simple simpy model that simulates time-dependent arrivals # * For this exercise please **IGNORE** the need for a thinning process. # # **Optional task:** # * It is useful to set the sampling of arrivals using a random seed. This will allow you to compare the number of arrivals before and after adding thinning. **Remember that an issue with DES without thinning occurs when moving from a period $t$ with a low arrival rate to $t+1$ that has a high one.** # # **Hints:** # * Build your model up gradually. # * Start by building a model that simulates exponential arrivals using a single mean inter-arrival time then add in logic to change which mean you use depending on the simulation time. # * The logic to decide the time period is equivalent to asking yourself "given `env.now()` and that arrival rates are split into 60 minute chunks which row of my dataframe should I select". # * To simply the task you set the run length of the simulation to no more than 540 minutes. For an extra challenge think about how you would run the model for longer than 480 minutes and loop back round to the first period (the code to do this is surprising simple). # # The data are stored in a file `data/nspp_example1.csv`. # + # your code here ... # - # ## Exercise 2: Thinning the arrivals # # **Task:** # * Update your exercise 1 code to include an implementation of thinning # * What do you notice about the total number of arrivals compared to the previous example? Why has the changed occurred? # * If you are not controlling your sampling with random seeds you will need to run each implementation a few times. # # **Hints:** # * You will need a second distribution - Uniform(0, 1) to do the thinning. If you are controlling random sampling through seeds that means you will need a second seed. # # + # your code here ...
labs/simulation/lab4/sim_lab4_nspp_STUDENT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Demo: process synchronization # The basic steps to set up an OpenCLSim simulation are: # * Import libraries # * Initialise simpy environment # * Define object classes # * Create objects # * Create sites # * Create vessels # * Create activities # * Register processes and run simpy # # ---- # # Next to the synchronization of activities using controll structures like **while** and **sequence** used within a single process, OpenCLSim allows to synchronize different processes using **start_events**. Start_events can be specified using the expression language as documented to **delay** the execution of a process. Control structures do not delay the execution but explicitly start the execution when possible. The difference is very well visible when you compare the activity log in this demo with the one from the sequence demo. # #### 0. Import libraries # + import datetime, time import simpy import shapely.geometry import pandas as pd import openclsim.core as core import openclsim.model as model import openclsim.plot as plot # - # #### 1. Initialise simpy environment # setup environment simulation_start = 0 my_env = simpy.Environment(initial_time=simulation_start) # #### 2. Define object classes # In this example we won't use object classes # #### 3. Create objects # ##### 3.1. Create site object(s) # No site objects are created # ##### 3.2. Create vessel object(s) # No vessel objects are created # ##### 3.3 Create activity/activities # The two activities are started in parallel, but the execution of Activity2 is delayed until Activity1 has been completed. To easier see how the two activities are interrelated a reporting activity is added. # initialise registry registry = {} # + # create a reporting activity reporting_activity = model.BasicActivity( env=my_env, name="Reporting activity", registry=registry, duration=0, ) # create two basic activities activity1 = model.BasicActivity( env=my_env, name="Activity1", registry=registry, additional_logs=[reporting_activity], duration=20, ) activity2 = model.BasicActivity( env=my_env, name="Activity2", registry=registry, additional_logs=[reporting_activity], start_event=[{"type": "activity", "name": "Activity1", "state": "done"}], duration=40, ) # - # #### 4. Register processes and run simpy # initate the simpy processes defined in activity1 and activity2 and run simpy model.register_processes([reporting_activity, activity1, activity2]) my_env.run() # #### 5. Inspect results # ##### 5.1 Inspect logs display(plot.get_log_dataframe(reporting_activity, [reporting_activity, activity1, activity2])) # Both activities start at the same time. Activity2 gets into a waiting state, which ends, when Activity1 ends. Then Activity2 is executed. # ##### 5.2 Visualise gantt charts plot.get_gantt_chart([reporting_activity, activity1, activity2])
notebooks/11_Process_synchronization.ipynb
# --- # jupyter: # jupytext: # formats: ipynb,md:myst # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="zMIrmiaZxiJC" # # Automatic vectorization in JAX # # [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/google/jax/blob/master/docs/jax-101/03-vectorization.ipynb) # # *Authors: TODO* # # In the previous section we discussed JIT compilation via the `jax.jit` function. This notebook discusses another of JAX's transforms: vectorization via `jax.vmap`. # + [markdown] id="Kw-_imBrx4nN" # ## Manual Vectorization # # Consider the following simple code that computes the convolution of two one-dimensional vectors: # + id="5Obro91lwE_s" outputId="061983c6-2faa-4a54-83a5-d2a823f61087" import jax import jax.numpy as jnp x = jnp.arange(5) w = jnp.array([2., 3., 4.]) def convolve(x, w): output = [] for i in range(1, len(x)-1): output.append(jnp.dot(x[i-1:i+2], w)) return jnp.array(output) convolve(x, w) # + [markdown] id="z_nPhEhLRysk" # Suppose we would like to apply this function to a batch of weights `w` to a batch of vectors `x`. # + id="rHQJnnrVUbxE" xs = jnp.stack([x, x]) ws = jnp.stack([w, w]) # + [markdown] id="ghaJQW1aUfPi" # The most naive option would be to simply loop over the batch in Python: # + id="yM-IycdlzGyJ" outputId="07ed6ffc-0265-45ef-d585-4b5fa7d221f1" def manually_batched_convolve(xs, ws): output = [] for i in range(xs.shape[0]): output.append(convolve(xs[i], ws[i])) return jnp.stack(output) manually_batched_convolve(xs, ws) # + [markdown] id="VTh0l_1SUlh4" # This produces the correct result, however it is not very efficient. # # In order to batch the computation efficiently, you would normally have to rewrite the function manually to ensure it is done in vectorized form. This is not particularly difficult to implement, but does involve changing how the function treats indices, axes, and other parts of the input. # # For example, we could manually rewrite `convolve()` to support vectorized computation across the batch dimension as follows: # + id="I4Wd9nrcTRRL" outputId="0b037b43-7b41-4625-f9e0-a6e0dbc4c65a" def manually_vectorized_convolve(xs, ws): output = [] for i in range(1, xs.shape[-1] -1): output.append(jnp.sum(xs[:, i-1:i+2] * ws, axis=1)) return jnp.stack(output, axis=1) manually_vectorized_convolve(xs, ws) # + [markdown] id="DW-RJ2Zs2QVu" # Such re-implementation is messy and error-prone; fortunately JAX provides another way. # + [markdown] id="2oVLanQmUAo_" # ## Automatic Vectorization # # In JAX, the `jax.vmap` transformation is designed to generate such a vectorized implementation of a function automatically: # + id="Brl-BoTqSQDw" outputId="af608dbb-27f2-4fbc-f225-79f3101b13ff" auto_batch_convolve = jax.vmap(convolve) auto_batch_convolve(xs, ws) # + [markdown] id="7aVAy7332lFj" # It does this by tracing the function similarly to `jax.jit`, and automatically adding batch axes at the beginning of each input. # # If the batch dimension is not the first, you may use the `in_axes` and `out_axes` arguments to specify the location of the batch dimension in inputs and outputs. These may be an integer if the batch axis is the same for all inputs and outputs, or lists, otherwise. # + id="_VEEm1CGT2n0" outputId="751e0fbf-bdfb-41df-9436-4da5de23123f" auto_batch_convolve_v2 = jax.vmap(convolve, in_axes=1, out_axes=1) xst = jnp.transpose(xs) wst = jnp.transpose(ws) auto_batch_convolve_v2(xst, wst) # + [markdown] id="-gNiLuxzSX32" # `jax.vmap` also supports the case where only one of the arguments is batched: for example, if you would like to convolve to a single set of weights `w` with a batch of vectors `x`; in this case the `in_axes` argument can be set to `None`: # + id="2s2YDsamSxki" outputId="5c70879b-5cce-4549-e38a-f45dbe663ab2" batch_convolve_v3 = jax.vmap(convolve, in_axes=[0, None]) batch_convolve_v3(xs, w) # + [markdown] id="bsxT4hA6RTCG" # ## Combining transformations # # As with all JAX transformations, `jax.jit` and `jax.vmap` are designed to be composable, which means you can wrap a vmapped function with `jit`, or a JITted function with `vmap`, and everything will work correctly: # + id="gsC-Myg0RVdj" outputId="cbdd384e-6633-4cea-b1a0-a01ad934a768" jitted_batch_convolve = jax.jit(auto_batch_convolve) jitted_batch_convolve(xs, ws)
docs/jax-101/03-vectorization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lab 8 # Last week we used the improved Euler method to numerically solve a DE of the form # \begin{align*} # \frac{\mathrm{d}\mathbf{y}}{\mathrm{d}t} = f(\mathbf{y}, t), # \end{align*} # where $\mathbf{y}$ is a vector of dependent variables and $f$ is a linear function of $\mathbf{y}$. # # This week we will use the SciPy `odeint` function in place of our implmentation. There are several advantages to using SciPy code, including that it is well-tested, provides a greater range of features, and uses more advanced techniques. One disadvantage is that it makes our code dependendent on SciPy, but it's worth it. # # We will also show how you can approximate solutions to systems of DEs that you may not be able to solve analytically. # # ## Setup from numpy import arange, empty, exp, array, linspace, isclose, allclose, sin, pi from scipy.integrate import odeint from scipy.optimize import minimize from plotly.offline import init_notebook_mode from plotly import graph_objs as go # ## A Nonlinear DE # # Imagine that the number of rabbits in an area as a function of time is modelled by $r(t)$ and the number of foxes in the same area at the same time is given by $f(t)$. We can modelled their linked fates using the coupled DEs # \begin{align*} # \frac{\mathrm{d}r}{\mathrm{d}t} &= 0.08 r - 0.0004 rf, \\ # \frac{\mathrm{d}f}{\mathrm{d}t} &= -0.09 f + 0.0005 rf, # \end{align*} # with initial conditions $r_0 = 300$ and $f_0 = 100$. See Section 6.3 (and the rest of Chapter 6) if you are curious about where this equation comes from. # # For us, it is important to note that # # - this system of DEs cannot be represented as matrix multiplied by a $(r\, f)^\intercal$ vector and # - we have not studied how to solve this equation. # # We can still define it as a Python function, however. Note that in the following function `rf` is a variable that contains $r$ and $f$ as a vector (actually a NumPy `array`, but it is one-dimensional so we call it a vector). def drfdt(rf, t): r, f = rf drdt = 0.08*r - 0.0004*r*f dfdt = -0.09*f + 0.0005*r*f return array([drdt, dfdt]) # `odeint` works almost exactly the same as `euler_improved` did, but it takes the values of $t$ for which you would like $r$ and $f$ as inputs. Note that the initial values are input as `[300, 100]`, and that the first element of $t$ must correspond to the time of those initial values (in this case `t[0] == 0`). t = linspace(0, 100, 100) rf = odeint(drfdt, [300, 100], t) fig = go.Figure() fig.add_trace(go.Scatter(x=t, y=rf[:,0], name='rabbits')) fig.add_trace(go.Scatter(x=t, y=rf[:,1], name='foxes')) fig.show('png') # You do not have to specify step sizes for `odeint` (like you did for `euler_improved`). It figures that out for itself. In fact, if you want to know how many rabbits and foxes that you will have after 40 years, you can call it with just that input: rf = odeint(drfdt, [300, 100], [0, 40]) print(rf[-1, 0], 'rabbits') print(rf[-1, 1], 'foxes') # If you wanted to figure out exactly when the number of rabbits reaches its lowest ebb, you can minimize the result numerically, again using SciPy. # + def rabbits(t): return odeint(drfdt, [300, 100], [0, t])[-1, 0] result = minimize(rabbits, 40) print(result) # - # You get a heap of diagnostic information from `minimize`, but the most important thing is that "Optimization terminated successfully" and that the value is stored in `x`: print('rabbits rebounded after', result.x[0], 'years') # + [markdown] nbgrader={"grade": false, "grade_id": "cell-158d64fc0018ba47", "locked": true, "schema_version": 1, "solution": false} # # Exercises # We will now numerically solve a differential equation that we cannot solve analytically. # + [markdown] nbgrader={"grade": false, "grade_id": "cell-1e3de6c112ff6928", "locked": true, "schema_version": 1, "solution": false} # Pendulum motion can be defined as # \begin{align*} # \frac{\mathrm{d}^2\theta}{\mathrm{d}t^2} = -\frac{g}{L}\sin\theta, # \end{align*} # where $g$ is gravitational acceleration, $L$ is the length of the pendulum, and $\theta$ is the # angle the pendulum makes with the vertical as shown in the figure. # # <div style="align: left; text-align:center;"> # <img src="images/hold-your-colour.png" style="width: 150px;"/> # Figure 1: A simple pendulum # </div> # # The above equation is a second-order nonlinear differential equation and we don’t have a # way to solve this equation algebraically. That is, we can’t use the characteristic equation # method or method of undetermined coefficients to solve this equation. # # We can, however, convert it into a system of first-order DEs and find an approximate solution using `odeint`. By setting $v=\mathrm{d}\theta/\mathrm{d}t$, we obtain the equivalent formulation # \begin{align*} # \frac{\mathrm{d}\theta}{\mathrm{d}t} &= v \\ # \frac{\mathrm{d}v}{\mathrm{d}t} &= -\frac{g}{L}\sin\theta. # \end{align*} # Suppose that $g/L = 10$. Write a function to calculate the vector # $\left(\begin{array}{1}\frac{\mathrm{d}\theta}{\mathrm{d}t}\\ # \frac{\mathrm{d}v}{\mathrm{d}t}\end{array}\right)$ as a function of $\left(\begin{array}{1}\theta\\v\end{array}\right)$ and $t$. (Note that NumPy does not care whether you return a column or row vector. Usually it figures out what you mean from the context.) # + nbgrader={"grade": false, "grade_id": "cell-908179848de3bcb8", "locked": false, "schema_version": 1, "solution": true} def dthetavdt(thetav, t): ### YOUR IMPLEMENTATION HERE # + [markdown] nbgrader={"grade": false, "grade_id": "cell-4d0cf830f39a1ec9", "locked": true, "schema_version": 1, "solution": false} # The following cell should run without errors. # + nbgrader={"grade": true, "grade_id": "cell-c16beaf3058c0527", "locked": true, "points": 1, "schema_version": 1, "solution": false} assert allclose(dthetavdt([pi/2, 0], 0), [0, -10.0]) assert allclose(dthetavdt([pi/12, 0], 0), [0, -2.5881904510252074]) assert allclose(dthetavdt([0, 1], 0), [1, 0]) # + [markdown] nbgrader={"grade": false, "grade_id": "cell-211a768590db0864", "locked": true, "schema_version": 1, "solution": false} # Use `odeint` to plot $\theta$ on the interval $0\leq t \leq 2.5$ when $\theta_0=\frac{\pi}{12}$ and $v_0=0$. # + nbgrader={"grade": true, "grade_id": "cell-281064131bd9c79e", "locked": false, "points": 1, "schema_version": 1, "solution": true} # + [markdown] nbgrader={"grade": false, "grade_id": "cell-f9f84ae9eaf1753d", "locked": true, "schema_version": 1, "solution": false} # In the following cell, calculate and print the values for $\theta(2.5)$ and $v(2.5)$ when $\theta_0=\frac{\pi}{12}$ and $v_0=0$. # + nbgrader={"grade": true, "grade_id": "cell-9201b6b15a7dfc5f", "locked": false, "points": 1, "schema_version": 1, "solution": true} # + [markdown] nbgrader={"grade": false, "grade_id": "cell-8a4f7e821b9ccd45", "locked": true, "schema_version": 1, "solution": false} # Plot $\theta$ on the interval $0\leq t \leq 2.5$ when $\theta_0=\frac{\pi}{2}$ and $v_0=0$. # + nbgrader={"grade": true, "grade_id": "cell-09abeb6f5f5f3d53", "locked": false, "points": 1, "schema_version": 1, "solution": true} # + [markdown] nbgrader={"grade": false, "grade_id": "cell-488f25a8b588d886", "locked": true, "schema_version": 1, "solution": false} # In the following cell, calculate and print the values for $\theta(2.5)$ and $v(2.5)$ when $\theta_0=\frac{\pi}{2}$ and $v_0=0$. # + nbgrader={"grade": true, "grade_id": "cell-2cd9188a5f40004a", "locked": false, "points": 1, "schema_version": 1, "solution": true}
notebooks/lab-08.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: py36 # language: python # name: py36 # --- # # A simple parameter exploration # # This notebook demonstrates a very simple parameter exploration of a custom function that we have defined. It is a simple function that returns the distance to a unit circle, so we expect our parameter exploration to resemble a circle. # + # change to the root directory of the project import os if os.getcwd().split("/")[-1] == "examples": os.chdir('..') # This will reload all imports as soon as the code changes # %load_ext autoreload # %autoreload 2 # + try: import matplotlib.pyplot as plt except ImportError: import sys # !{sys.executable} -m pip install matplotlib import matplotlib.pyplot as plt import numpy as np from neurolib.utils.parameterSpace import ParameterSpace from neurolib.optimize.exploration import BoxSearch # - # ## Define the evaluation function # Here we define a very simple evaluation function. The function needs to take in `traj` as an argument, which is the pypet trajectory. This is how the function knows what parameters were assigned to it. Using the builtin function `search.getParametersFromTraj(traj)` we can then retrieve the parameters for this run. They are returned as a dictionary and can be accessed in the function. # # In the last step, we use `search.saveOutputsToPypet(result_dict, traj)` to save the results to the pypet trajectory and to an HDF. In between, the computational magic happens! def explore_me(traj): pars = search.getParametersFromTraj(traj) # let's calculate the distance to a circle computation_result = abs((pars['x']**2 + pars['y']**2) - 1) result_dict = {"distance" : computation_result} search.saveOutputsToPypet(result_dict, traj) # ## Define the parameter space and exploration # # Here we define which space we want to cover. For this, we use the builtin class `ParameterSpace` which provides a very easy interface to the exploration. To initialize the exploration, we simply pass the evaluation function and the parameter space to the `BoxSearch` class. parameters = ParameterSpace({"x": np.linspace(-2, 2, 2), "y": np.linspace(-2, 2, 2)}) # info: chose np.linspace(-2, 2, 40) or more, values here are low for testing search = BoxSearch(evalFunction = explore_me, parameterSpace = parameters, filename="example-1.1.hdf") # ## Run # # And off we go! search.run() # ## Get results # # We can easily obtain the results from pypet. First we call `search.loadResults()` to make sure that the results are loaded from the hdf file to our instance. search.loadResults() print("Number of results: {}".format(len(search.results))) # The runs are also ordered in a simple pandas dataframe called `search.dfResults`. We cycle through all results by calling `search.results[i]` and loading the desired result (here the distance to the circle) into the dataframe # + for i in search.dfResults.index: search.dfResults.loc[i, 'distance'] = search.results[i]['distance'] search.dfResults # - # And of course a plot can visualize the results very easily. plt.imshow(search.dfResults.pivot_table(values='distance', index = 'x', columns='y'), \ extent = [min(search.dfResults.x), max(search.dfResults.x), min(search.dfResults.y), max(search.dfResults.y)], origin='lower') plt.colorbar(label='Distance to the unit circle')
examples/example-1.1-custom-parameter-exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="NooAiTdnafkz" # # Data Science Unit 1 Sprint Challenge 3 # # ## Exploring Data, Testing Hypotheses # # In this sprint challenge you will look at a dataset of people being approved or rejected for credit. # # https://archive.ics.uci.edu/ml/datasets/Credit+Approval # # Data Set Information: This file concerns credit card applications. All attribute names and values have been changed to meaningless symbols to protect confidentiality of the data. This dataset is interesting because there is a good mix of attributes -- continuous, nominal with small numbers of values, and nominal with larger numbers of values. There are also a few missing values. # # Attribute Information: # - A1: b, a. # - A2: continuous. # - A3: continuous. # - A4: u, y, l, t. # - A5: g, p, gg. # - A6: c, d, cc, i, j, k, m, r, q, w, x, e, aa, ff. # - A7: v, h, bb, j, n, z, dd, ff, o. # - A8: continuous. # - A9: t, f. # - A10: t, f. # - A11: continuous. # - A12: t, f. # - A13: g, p, s. # - A14: continuous. # - A15: continuous. # - A16: +,- (class attribute) # # Yes, most of that doesn't mean anything. A16 is a variable that indicates whether or not a person's request for credit has been approved or denied. This is a good candidate for a y variable since we might want to use the other features to predict this one. The remaining variables have been obfuscated for privacy - a challenge you may have to deal with in your data science career. # # Sprint challenges are evaluated based on satisfactory completion of each part. It is suggested you work through it in order, getting each aspect reasonably working, before trying to deeply explore, iterate, or refine any given step. Once you get to the end, if you want to go back and improve things, go for it! # + [markdown] colab_type="text" id="5wch6ksCbJtZ" # ## Part 1 - Load and validate the data # # - Load the data as a `pandas` data frame. # - Validate that it has the appropriate number of observations (you can check the raw file, and also read the dataset description from UCI). # - UCI says there should be missing data - check, and if necessary change the data so pandas recognizes it as na # - Make sure that the loaded features are of the types described above (continuous values should be treated as float), and correct as necessary # # This is review, but skills that you'll use at the start of any data exploration. Further, you may have to do some investigation to figure out which file to load from - that is part of the puzzle. # # Hint: If a column has the datatype of "object" even though it's made up of float or integer values, you can coerce it to act as a numeric column by using the `pd.to_numeric()` function. # + colab_type="code" id="Q79xDLckzibS" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="38024792-e63d-4268-f474-2f9dc9e0a7ec" # !wget https://archive.ics.uci.edu/ml/machine-learning-databases/credit-screening/crx.data # + id="lq4FvQ8KV4Lb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="40ee0c30-5d53-42a3-edf7-54b2cbabf5a3" import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv('crx.data', header=None, names=[ 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8', 'A9', 'A10', 'A11', 'A12', 'A13', 'A14', 'A15', 'A16' ]) print(df.shape) df.head(100) # + [markdown] id="iFj5A2FRXD_J" colab_type="text" # The number of instances match the number of rows. It also matches the number of columns that equals to 15 + the class attribute. The documentation provided for this dataset was kind enough to also tell us the number of null values. I will proceed to verify that it is correct. # # A1: 12 # A2: 12 # A4: 6 # A5: 6 # A6: 9 # A7: 9 # A14: 13 # + id="3TwnCGCfWrjg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 302} outputId="9e80be50-638d-47f8-dadb-8f576ea6db9e" df.isnull().sum() #It appears I will have to use '?' as a null parameter. Since the null values #only represent 5% of the data I will also proceed to not account for them #in my calculations and mention this in my final conclusion. # + id="_fS-mrulYdtq" colab_type="code" colab={} df = df.replace({'?':np.NaN}) # + id="bZ7Ajri8Yi-q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 302} outputId="8dafffb1-21cf-4d2d-91f5-3dd8866f36b6" df.isnull().sum() # + [markdown] id="LamrciVgYpju" colab_type="text" # This information matches up to the expected results now. Only A2 and A14 are continious data. I will make a note of what effect this amount of nulls might have in my findings. # + id="naP7ExFEZcwn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 302} outputId="230cff23-7921-4bc2-9b2c-591c7a81479c" df.dtypes #A2 and A14 need to be seen as float and INT in that order. # + id="44n9jT_zaIW6" colab_type="code" colab={} df['A2'] = pd.to_numeric(df.A2) df['A14'] = pd.to_numeric(df.A14) df['A11'] = pd.to_numeric(df.A11, downcast='float') df['A15'] = pd.to_numeric(df.A15, downcast='float') # + id="MknKZki9a6Lw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 343} outputId="dc722537-4b4c-4373-de03-af37bda0eb68" df.head(10) # + [markdown] colab_type="text" id="G7rLytbrO38L" # ## Part 2 - Exploring data, Testing hypotheses # # The only thing we really know about this data is that A16 is the class label. Besides that, we have 6 continuous (float) features and 9 categorical features. # # Explore the data: you can use whatever approach (tables, utility functions, visualizations) to get an impression of the distributions and relationships of the variables. In general, your goal is to understand how the features are different when grouped by the two class labels (`+` and `-`). # # For the 6 continuous features, how are they different when split between the two class labels? Choose two features to run t-tests (again split by class label) - specifically, select one feature that is *extremely* different between the classes, and another feature that is notably less different (though perhaps still "statistically significantly" different). You may have to explore more than two features to do this. # # For the categorical features, explore by creating "cross tabs" (aka [contingency tables](https://en.wikipedia.org/wiki/Contingency_table)) between them and the class label, and apply the Chi-squared test to them. [pandas.crosstab](http://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.crosstab.html) can create contingency tables, and [scipy.stats.chi2_contingency](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chi2_contingency.html) can calculate the Chi-squared statistic for them. # # There are 9 categorical features - as with the t-test, try to find one where the Chi-squared test returns an extreme result (rejecting the null that the data are independent), and one where it is less extreme. # # **NOTE** - "less extreme" just means smaller test statistic/larger p-value. Even the least extreme differences may be strongly statistically significant. # # Your *main* goal is the hypothesis tests, so don't spend too much time on the exploration/visualization piece. That is just a means to an end - use simple visualizations, such as boxplots or a scatter matrix (both built in to pandas), to get a feel for the overall distribution of the variables. # # This is challenging, so manage your time and aim for a baseline of at least running two t-tests and two Chi-squared tests before polishing. And don't forget to answer the questions in part 3, even if your results in this part aren't what you want them to be. # + [markdown] id="x5fITce5eucp" colab_type="text" # ### My notes # # Here I will birng in a few of the functions I've worked with in past assignments and modify them to meet my needs here. # + colab_type="code" id="_nqcgc0yzm68" colab={} import scipy.stats as stats def confidence_interval(data, confidence=0.95): """ Calculate a confidence interval around a sample mean for given data. Using t-distribution and two-tailed test, default 95% confidence. Arguments: data - iterable (list or numpy array) of sample observations confidence - level of confidence for the interval Returns: tuple of (mean, lower bound, upper bound) """ data = np.array(data) mean = np.mean(data) n = len(data) stderr = stats.sem(data, nan_policy='omit') #stderr = np.std(data, ddof=1) / np.sqrt(n) margin_of_error = stderr * stats.t.ppf((1 + confidence) / 2.0, n - 1) #print(margin_of_error) return (mean, mean - margin_of_error, mean + margin_of_error) def confidence_interval_for_column(positive_data, negative_data, column, confidence=.95): """ This function will return confidence intervals, and a plot of the data provided along with the confindence intervals for the data. The purpose of this function is to look at the difference between the two data ploted in one graph. Confidence interval is 95% default. Use float here to change this interval. Interval should be less than 1. """ print('This interval is for positive') answer1 = confidence_interval(positive_data[column], confidence) print(answer1) print('This interval is for negative') answer2 = confidence_interval(negative_data[column], confidence) print(answer2) print('\nThe blue line represents positive in this graph\n The orange line negative') ax = sns.kdeplot(positive_data[column], legend=False) CI = confidence_interval(positive_data[column]) plt.axvline(x=CI[1], color='#609EC9') plt.axvline(x=CI[2], color='#609EC9') c = plt.axvline(x=CI[0], color='k'); ax2 = sns.kdeplot(negative_data[column], legend =False) CI2 = confidence_interval(negative_data[column]) plt.axvline(x=CI2[1], color='#FF871E') plt.axvline(x=CI2[2], color='#FF871E') d = plt.axvline(x=CI2[0], color='k'); plt.show(); return plt.show() # + id="G4hA_bGBg4D5" colab_type="code" colab={} #I decided to handle my Nan differently because It was interfering with my #answers positive = df[df['A16'] == '+'] negative = df[df['A16'] == '-'] positive = positive[continuos].replace({np.NaN:0}) negative = negative[continuos].replace({np.NaN:0}) # + id="QW-k-6YwhK_Z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="af1674b6-de21-4e1f-dbb0-b384512d975d" positive.head() # + id="bRT79zrPhOCK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="fe060c6d-30b0-4c73-ac55-5b9ea4dce9c3" negative.head() # + id="3CKNPCgIhRMI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 302} outputId="0863efe2-3cec-4dc4-fd1a-558f2cd6abfe" #I want to make a condition to just select continuos and categorical continuos_condition = df.dtypes == 'float64' categorical_condition = df.dtypes == 'object' continuos_condition #decided to not keep trying this because of time constraint # + id="vgrNm_2NiWH6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 296} outputId="744c304b-2cec-42c8-fdc1-0257f17c4f48" # this variable allows me to filter continuos from categorical for ease of use. continuos = ['A2', 'A3', 'A8', 'A14'] categorical = ['A1', 'A4', 'A5', 'A6', 'A7', 'A9', 'A10', 'A11', 'A12', 'A13', 'A15'] print(df[categorical].head()) df[continuos].head() # + id="VcB2P7CYlVRl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="68121c5e-c6a7-4e94-b88c-591739a43a38" # Here I graphed all the continuos data with the function I made earlier for i in range(df[continuos].shape[1]): confidence_interval_for_column(positive, negative, continuos[i], confidence=.95) # + [markdown] id="lCOKPLbHrU1q" colab_type="text" # I will run Ttest on A14 and A8 as I see they might give me the most significant p value. # + id="7y4a4v_yr0Cd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7174c819-8aa1-433c-f9e9-43d1284a47b2" stats.ttest_ind(positive['A14'], negative['A14'], nan_policy='omit') # + id="cFABlFNPrc76" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2f208864-2ac0-45a0-c17a-b0716a717b05" stats.ttest_ind(positive['A8'], negative['A8'], nan_policy='omit') # + id="EdfpWwZIsSWw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 166} outputId="6100f9c6-4fa4-450c-c7fd-8e829530396b" contingency_table_a6 = pd.crosstab( df["A16"], df['A6'], margins = True) contingency_table_a6 # + id="GYQ2Y52euRYp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 235} outputId="66ba6b4e-3576-48eb-b4a1-ffe9d71053b6" chi_squared, p_value, dof, expected = stats.chi2_contingency(contingency_table_a6) print(f"Chi-Squared: {chi_squared}") print(f"P-value: {p_value}") print(f"Degrees of Freedom: {dof}") print("Expected: \n", np.array(expected)) # + id="5NOPRHovt21R" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 166} outputId="0f8caa98-3aa4-46a1-9dc2-a10ca08c864d" contingency_table_a7 = pd.crosstab( df["A16"], df['A7'], margins = True) contingency_table_a7 # + id="3tYUpRdkuZ-M" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="43aab37a-7d7b-4e64-df02-a330ff222ab8" chi_squared, p_value, dof, expected = stats.chi2_contingency(contingency_table_a7) print(f"Chi-Squared: {chi_squared}") print(f"P-value: {p_value}") print(f"Degrees of Freedom: {dof}") print("Expected: \n", np.array(expected)) # + [markdown] colab_type="text" id="ZM8JckA2bgnp" # ## Part 3 - Analysis and Interpretation # # Now that you've looked at the data, answer the following questions: # # - Interpret and explain the two t-tests you ran - what do they tell you about the relationships between the continuous features you selected and the class labels? # - Interpret and explain the two Chi-squared tests you ran - what do they tell you about the relationships between the categorical features you selected and the class labels? # - What was the most challenging part of this sprint challenge? # # Answer with text, but feel free to intersperse example code/results or refer to it from earlier. # + [markdown] colab_type="text" id="LIozLDNG2Uhu" # **Interpret and explain the two t-tests you ran - what do they tell you about the relationships between the continuous features you selected and the class labels?** # # # A14 with a less significant p value # Ttest_indResult(statistic=-2.6254915751389047, pvalue=0.008844680277712815) # # A8 with a lot more significant p value # Ttest_indResult(statistic=8.935819983773698, pvalue=3.6710537401601785e-18) # # + [markdown] id="UC_p_UFavgqa" colab_type="text" # --- # # ## Two Sample T-test # # 1) Null Hypothesis (boring hypothesis) default state # # No difference between positive and negative when tested with column A14. # # $\bar{x}_1 = \bar{x}_2$ # # 2) Alternative Hypothesis (interesting hypothesis) # # The mean of A14 is different between positive and negative. # # $\bar{x}_1 \neq \bar{x}_2$ # # 3) Confidence Level (The probability of seeing a true result in spite of random variability) # # 95% or .05 # # **A14 with a less significant p value** Ttest_indResult(statistic=-2.6254915751389047, pvalue=0.008844680277712815) # # 4) t-statistic: -2.6255 # # 5) p-value: .00884 # # Conclusion: # # Due to our test resulting in a t-statistic of -2.6255 and having a p-vlaue of .00884, we reject the null hypothesis that A14 get into positive and negative at a similar rate, and suggest that A14 does have an effect on the rate of positive and negative. # # --- # + [markdown] id="b2mMS3XKvxbc" colab_type="text" # --- # # ## Two Sample T-test # # 1) Null Hypothesis (boring hypothesis) default state # # No difference between positive and negative when tested with column A14. # # $\bar{x}_1 = \bar{x}_2$ # # 2) Alternative Hypothesis (interesting hypothesis) # # The mean of A8 is different between positive and negative. # # $\bar{x}_1 \neq \bar{x}_2$ # # 3) Confidence Level (The probability of seeing a true result in spite of random variability) # # 95% or .05 # # **A8 with a lot more significant p value** Ttest_indResult(statistic=8.935819983773698, pvalue=3.6710537401601785e-18) # # 4) t-statistic: 8.93 # # 5) p-value: 3.67105 e-18 # # Conclusion: # # Due to our test resulting in a t-statistic of 8.93 and having a p-vlaue of 3.67105 e-18, we have strong evidence to support rejecting the null hypothesis that A8 get into positive and negative at a similar rate, and suggest that A8 does have an effect on the rate of positive and negative. # # --- # + [markdown] id="gtg_BdepzqCp" colab_type="text" # ###Interpret and explain the two Chi-squared tests you ran - what do they tell you about the relationships between the categorical features you selected and the class labels? # + [markdown] id="ZEoYpANex_jO" colab_type="text" # --- # # ## Chi square test # # 1) Null Hypothesis (boring hypothesis) default state # # No difference between positive and negative when tested with column A7. # # $\bar{x}_1 = \bar{x}_2$ # # 2) Alternative Hypothesis (interesting hypothesis) # # The positive and negative rate is affected by A7. # # $\bar{x}_1 \neq \bar{x}_2$ # # 3) Confidence Level (The probability of seeing a true result in spite of random variability) # # 95% or .05 # # **A6 with a less significant p value** Chi-Squared: 45.03420714024056 # P-value: 0.0004097261730223732 # Degrees of Freedom: 18 # # 4) chi squared-statistic: 45.0342 # # 5) p-value: 0.0004 # # Conclusion: # # Due to our test resulting in a t-statistic of 45.0342 and having a p-vlaue of 0.0004, we reject the null hypothesis that A7 get into positive and negative at a similar rate, and suggest that A7 does have an effect on the rate of positive and negative. # # --- # + [markdown] id="ZlmZhgWIx_TC" colab_type="text" # --- # # ## Chi square test # # 1) Null Hypothesis (boring hypothesis) default state # # No difference between positive and negative when tested with column A6. # # $\bar{x}_1 = \bar{x}_2$ # # 2) Alternative Hypothesis (interesting hypothesis) # # The positive and negative rate is affected by A6. # # $\bar{x}_1 \neq \bar{x}_2$ # # 3) Confidence Level (The probability of seeing a true result in spite of random variability) # # 95% or .05 # # **A6 with a more significant p value** Chi-Squared: 98.32520342679135 # P-value: 9.446933633611132e-10 # # 4) chi squared-statistic: 98.33 # # 5) p-value: 9.4469 e-10 # # Conclusion: # # Due to our test resulting in a t-statistic of 98.33 and having a p-vlaue of 9.4469 e-10, we reject the null hypothesis that A6 get into positive and negative at a similar rate, and suggest that A6 does have an effect on the rate of positive and negative. # # --- # + [markdown] id="nXh0sL3-zmYo" colab_type="text" # ###What was the most challenging part of this sprint challenge? # + [markdown] id="WPGBsH7azvfq" colab_type="text" # I did not have much challenge with this sprint. I did however spend some time implementing some for loops that took some time to iterate. But other than that I understood this weeks content pretty well. # + [markdown] colab_type="text" id="krq6QeyrXs6b" # ## Stretch Goals # # Please do not work on anything listed in this section until you have adequately answered everything in the above three sections, all of these stretch goals are **OPTIONAL** but completing them will ensure you a score of 3 on the corresponding sections. # # ### Section 1 Stretch Goal: (do the following) # - Go above and beyond in conducting thorough data exploration including high quality comments and/or markdown text cells to explain your process/discoveries. # # ### Section 2 Stretch Goals: (do one of the following) # - Write a function(s) to automate the execution of t-tests on the continuous variables treating the different class labels as the two samples. # # - Write a funciton(s) to automate the execution of chi^2 tests on all of the different combinations of categorical variables in the dataset. # # ### Section 3 Stretch Goals: (do one of the following) # - Construct a confidence interval around the mean of one of the continuous variables, communicate the results/interpretation of that confidence interval in the most consumable/easy-to-understand way that you can. (You may choose to include a graph here, but you don't necessarily have to) Communicate the precision of your estimate as clearly as possible. # # - Explain the relationship between confidence intervals and T-tests, if possible, use code to demonstrate some important aspects of te
Sprint challenge/Jean Fraga DS_Unit_1_Sprint_Challenge_3_(2).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Data Science) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:eu-west-1:470317259841:image/datascience-1.0 # --- # # Module 7: Feature Monitoring # **This notebook leverages AWS Glue DataBrew to implement Feature Monitoring of Offline Feature Groups.** # # **Note:** Please set kernel to `Python 3 (Data Science)` and select instance to `ml.t3.medium` # # # --- # # ## Contents # # 1. [Overview](##Overview) # 2. [Feature Group Monitoring Preparation](#Feature-Group-Monitoring-Preparation) # 3. [Run Feature Group Monitoring using DataBrew Profile Job](#Run-Feature-Group-Monitoring-using-DataBrew-Profile-Job) # 4. [Visualisation of Feature Group statistics](#Visualisation-of-Feature-Group-statistics) # 5. [Clean up](#Clean-Up) # # # ## Overview # # In previous module (`Module-1 Introduction to SageMaker Feature Store`), we demonstrated how to create multiple features groups inside a Amazon SageMaker Feature Store and ingest data into it. # # In this notebook, we will illustrate how to implement feature monitoring using AWS Glue DataBrew to create feature statistics. # # [AWS Glue DataBrew](https://aws.amazon.com/glue/features/databrew/) is a visual data preparation tool that helps you clean and normalize data without writing code. DataBrew also allows customers to specify which data quality statistics to auto-generate for datasets when running a profile job. This allows users to customize data profile statistics such as determining duplicate values, correlations, and outliers based on the nature and size of their datasets, and create a custom data profile overview with only the statistics that meet their needs. # # We will cover the following aspects: # # * Preparation step to Feature Monitoring # * Executing Feature Monitoring using a DataBrew Profile job to generate statistics for a feature group # * Extract statistics from profile job execution and persist to S3 # * Additional visualisation of the feature group statistics # # ![Feature Monitoring Architecture](../images/feature_monitoring_architecture.png "Feature Monitoring Architecture") # --- # ## Prerequisites # Before continuing with this module, you need to have run `Module-1 Introduction to SageMaker Feature Store`. # ## Setup # #### Imports # %%capture # !pip install pyathena # + from sagemaker.feature_store.feature_group import FeatureGroup from sagemaker import get_execution_role import sagemaker import logging import boto3 import pandas as pd import time import re import os import sys from IPython.display import display, Markdown from time import gmtime, strftime #module containing utility functions for this notebook import feature_monitoring_utils # - logger = logging.getLogger('__name__') logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler()) logger.info(f'Using SageMaker version: {sagemaker.__version__}') # #### Retrieve Feature Group # In this notebook, we will be using and monitoring the orders feature group created in Module 1 of the workshop. In case you would like to use a different feature group, please uncomment and replace **ORDERS_FEATURE_GROUP_NAME** with the name of your Orders Feature Group. # + # Need to retreive FG names from previous FSCW notebooks # Retreive FG names # %store -r orders_feature_group_name #orders_feature_group_name = "<ORDERS_FEATURE_GROUP_NAME>" # Set up the results bucket location results_bucket=sagemaker.Session().default_bucket() # You might change this for a different s3 bucket results_key='aws-databrew-results/Offline-FS' # - # #### Update SageMaker Studio execution role # If you are running this notebook in Amazon SageMaker Studio, the IAM role assumed by your Studio user needs permission to perform Glue and DataBrew operations. To provide this permission to the role, do the following. # # 1. Open the [Amazon SageMaker console](https://console.aws.amazon.com/sagemaker/). # 2. Select Amazon SageMaker Studio and choose your user name. # 3. Under **User summary**, copy just the name part of the execution role ARN # 5. Go to the [IAM console](https://console.aws.amazon.com/iam) and click on **Roles**. # 6. Find the role associated with your SageMaker Studio user # 7. Under the Permissions tab, click **Attach policies** and add the following: **AWSGlueServiceRole**, **AWSGlueDataBrewFullAccessPolicy**, **AmazonAthenaFullAccess** # 8. Under Trust relationships, click **Edit trust relationship** and add the following JSON, # ``` # { # "Version": "2012-10-17", # "Statement": [ # { # "Effect": "Allow", # "Principal": { # "Service": "sagemaker.amazonaws.com" # }, # "Action": "sts:AssumeRole" # }, # { # "Effect": "Allow", # "Principal": { # "Service": "glue.amazonaws.com" # }, # "Action": "sts:AssumeRole" # }, # { # "Effect": "Allow", # "Principal": { # "Service": "databrew.amazonaws.com" # }, # "Action": "sts:AssumeRole" # } # ] # } # ``` # # ## Feature Group Monitoring Preparation # This function will execute a few steps to prepare the data before running DataBrew profiling job and generate statistics. # # * Create Snapshot of a Feature Group from Offline Feature Store (before creating using DataBrew profiling capability, we need to create a snapshot of the Feature from the Offline Feature Store which only keeps the latest version of a feature and removes records marked as to be deleted) # * Crawl the Snapshot table (the newly created table needs to be crawled to be accessible by DataBrew. The following function will create an AWS Glue Crawler, start the crawler) # * Create AWS Glue DataBrew dataset # * Create AWS Glue DataBrew Profile Job response_brew_prep = feature_monitoring_utils.feature_monitoring_prep( orders_feature_group_name, results_bucket, results_key, verbose = False ) # ## Run Feature Group Monitoring using DataBrew Profile Job # This section will run an AWS Glue DataBrew Profile (defined in the previous section). # #### Launch DataBrew Profile Job # Call the main profile execution function response_brew_job = feature_monitoring_utils.feature_monitoring_run( orders_feature_group_name, verbose=False ) # #### Retrieve Link to DataBrew Profile Job Visualizations # Users can access the statistics for a particular job run using the AWS DataBrew Console UI. The code below retrieves the URL link. # ![Feature Group Tag](../images/databrew_visualization.png "DataBrew Visualization") # # This link is also added as tag to the Feature Group (as per the picture below). # # ![Feature Group Tag](../images/feature_group_tags.png "Feature Group Tag") # # + # Display the Report S3 location databrew_profile_console_url = response_brew_job[2] brew_results_s3 = response_brew_job[4] print("Report is available at the following S3 location:\n" + brew_results_s3 + "\n") # Display the DataBrew link print("Please click on the link below to access visulizations in Glue DataBrew console:") databrew_link = f'[DataBrew Profile Job Visualizations]({databrew_profile_console_url})' display(Markdown(databrew_link)) # - # ### Ingest more rows into the feature group to simulate feature drift # # In order to see feature drift we simulate some data changes in the orders FG. The CSV file used for the ingestion was previously generated in 1st module. # # If the correspondent CSV file is not present at *../data/transformed/* folder, you will have to generate it by launching this notebook *m1_nb0_prepare_datasets.ipynb* from 1st module. # # If you want to simulate feature drift for another Feature Group you will need to customize this part and generate some changes to your data before launching the visualization section. # + # Provide the path to input CVS file csv_path = '.././data/transformed/orders.csv' orders_count = int(os.popen(f'wc -l {csv_path}').readline().split()[0]) - 1 nbrows = 0 nb_runs = 2 # Number of reports we want to generate nb_sample = int(orders_count*0.25/nb_runs) # Ingest rows and execute profile job for x in range(nb_runs): nbrows = nbrows + nb_sample # Ingest rows into FG from CSV file feature_monitoring_utils.ingest_rows_fg(orders_feature_group_name, csv_path, nbrows=nbrows) # Call the main profile execution function resp_job = feature_monitoring_utils.feature_monitoring_run(orders_feature_group_name, verbose=False) # - # ## Visualisation of Feature Group statistics # ### Overview # # The main goal of this notebook is to create visualizations of features and trends over time based on AWS Glue DataBrew quality statistics generated reports. # # ### Main steps # # - Consolidate historic monitor reports, generates a data frame and save CSV files in S3 # - Visualization of features statistics over time # # #### Creates a Pandas dataframe from Json reports and write CSV files to S3 # # These CVS files can be further used by other visualisation or BI tools # + start_date = '20210701' # input('Insert Date as a form YYYYmmdd') # Creates a Pandas dataframe from Json reports and write CSV files to S3 response_consolid = feature_monitoring_utils.consolidate_monitor_reports( orders_feature_group_name, results_bucket, results_key, start_date ) analysis_data = response_consolid[0] consolidated_s3_file = response_consolid[2] print(consolidated_s3_file) analysis_data.head() # - # #### Visualize the evolution in time for a particular feature # **purchase_amount's entropy drift over time** feature_name = 'purchase_amount' analysis_data_purchase_amount = feature_monitoring_utils.plot_profiling_data( analysis_data, feature_name, ['entropy'], kind='line' ) # **See two statistics (entropy and standardDeviation) for purchase_amount feature** # # **Separate plots** analysis_data_purchase_amount = feature_monitoring_utils.plot_profiling_data( analysis_data, feature_name, ['entropy','standardDeviation'], multiple_plots=True ) # **Same plot with different scales for each statistic** analysis_data_purchase_amount = feature_monitoring_utils.plot_profiling_data( analysis_data, feature_name, ['entropy','standardDeviation'] ) # **purchase_amount's entropy drift over time using a Bar Plot** # ## Clean up # **This section ensures you delete all AWS resources that was created as part of this workshop** # + # Clean up resources - crawler, DataBrew dataset, and profile job - as well as local analysis folder analysis_folder_name=response_consolid[4] response = feature_monitoring_utils.feature_monitoring_cleanup( orders_feature_group_name, analysis_folder_name ) # -
07-module-feature-monitoring/m7_nb1_feature_monitoring.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] colab_type="text" id="ncGMi7XXo-8g" # # Kapitel 5: Featureauswahl # + colab={} colab_type="code" id="tum7pL55o-8h" import warnings warnings.filterwarnings('ignore') # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="qPUJu4mOo-8k" outputId="66cb69b1-1d62-45cc-d337-6c571dae9e06" # %matplotlib inline # %pylab inline # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="iQF9mgcYo-8q" outputId="240588fa-9695-49d5-85c8-a11204421ef4" import sklearn print(sklearn.__version__) # + colab={} colab_type="code" id="DfcQU94uo-8u" import numpy as np # + colab={} colab_type="code" id="cjffiT9Po-8x" import matplotlib.pyplot as plt # + [markdown] colab_type="text" id="p5HliKB5o-8z" # ## Fluch der hohen Dimensionen # + colab={} colab_type="code" id="HNaFi456o-8z" n = 100 vmin = 0; vmax = 10 x1 = np.random.uniform(vmin, vmax, n) x2 = np.random.uniform(vmin, vmax, n) x3 = np.random.uniform(vmin, vmax, n) # + colab={} colab_type="code" id="gvuboZwlo-82" # #plt.hist? # + colab={"base_uri": "https://localhost:8080/", "height": 660} colab_type="code" id="VkIb2Y-Go-84" outputId="b1d8c08b-fc70-4b47-8a49-a9be56e60717" # Eine Dimension fig = plt.figure(figsize=(16, 11)) ax = fig.add_subplot(111) ax.hist(x1, alpha=0.6, edgecolor='black', lw=1, bins=np.arange(0, 11, 1)) ax.set_xlabel('X1') ax.set_ylabel('n samples') # fig.savefig('ML_0512.png', bbox_inches='tight') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 660} colab_type="code" id="eUKEnZPfo-86" outputId="6d118081-e3c7-44fc-deb8-f9f3129242b1" # Zwei Dimensionen fig = plt.figure(figsize=(16, 11)) ax = fig.add_subplot(111) ax.scatter(x1, x2, c="b", marker="o") ax.set_xlabel('X1') ax.set_ylabel('X2') ax.set_xticks(np.arange(0, 11, 1) ) ax.set_yticks(np.arange(0, 11, 1) ) ax.grid(color='k', linestyle='-', linewidth=1, alpha=0.6) # fig.savefig('ML_0513.png', bbox_inches='tight') plt.show() # plt.clf() # + colab={"base_uri": "https://localhost:8080/", "height": 629} colab_type="code" id="wmk2Mw5Uo-88" outputId="e3ba42bc-8fe4-46d6-c20d-dd30436bde98" # Drei Dimensionen from mpl_toolkits.mplot3d import Axes3D fig = plt.figure(figsize=(16, 11)) ax = fig.add_subplot(111, projection='3d') ax.scatter(x1, x2, x3, c="b", marker="o") # ax.plot_wireframe((3,4,4,3,3, 3,4,4,3,3, 3,3,4,4,4,4), # (5,5,6,6,5, 5,5,6,6,5, 6,6,6,6,5,5), # (2,2,2,2,2, 3,3,3,3,3, 3,2,2,3,3,2), # color='r', rstride=1, cstride=1, alpha=0.9) ax.set_xticks(np.arange(0, 11, 1) ) ax.set_yticks(np.arange(0, 11, 1) ) ax.set_zticks(np.arange(0, 11, 1) ) ax.grid(color='k', linestyle='-', linewidth=1, alpha=0.6) ax.set_xlabel('X1') ax.set_ylabel('X2') ax.set_zlabel('X3') # fig.savefig('ML_0514.png', bbox_inches='tight') plt.show() # plt.clf() # + [markdown] colab_type="text" id="oJ0D3ChGo-8_" # ## Overfitting und Underfitting: Model-Komplexität vs Datenmenge # + colab={} colab_type="code" id="6rBG3mrWo-8_" np.random.RandomState(1) n_samples = 20 X = np.random.uniform(-2, 2, n_samples) y = X**3 + np.random.uniform(0, 2, n_samples) # + colab={"base_uri": "https://localhost:8080/", "height": 678} colab_type="code" id="If_D49vDo-9D" outputId="d4a79d14-f448-4d06-f8d2-594b23dfad87" fig, ax = plt.subplots(figsize=(11, 11)) print(X.shape, y.shape) plt.scatter(X, y, color='navy', s=30, marker='o') plt.xlabel('x') plt.ylabel('y') # fig.savefig('ML_0504.png', bbox_inches='tight') plt.show() # plt.clf() # + colab={"base_uri": "https://localhost:8080/", "height": 660} colab_type="code" id="cVljjox3o-9F" outputId="b8eb6bc8-836f-4d2e-e4f9-6623256fc28e" from sklearn.pipeline import make_pipeline from sklearn.linear_model import Ridge, LinearRegression from sklearn.preprocessing import PolynomialFeatures fig, ax = plt.subplots(figsize=(11, 11)) plt.scatter(X, y, color='navy', s=30, marker='o') x_plot = np.linspace(-2, 2, 100) poly_model = make_pipeline(PolynomialFeatures(3), LinearRegression()) poly_model.fit(X[:, np.newaxis], y) y_plot = poly_model.predict(x_plot[:, np.newaxis]) plt.plot(x_plot, y_plot, lw=2, color="red") plt.ylim(-12, 12) plt.xlabel('x') plt.ylabel('y') # fig.savefig('ML_0505.png', bbox_inches='tight') plt.show() # plt.clf() # + colab={"base_uri": "https://localhost:8080/", "height": 660} colab_type="code" id="qx9d0Lg1o-9H" outputId="14de6319-8e3c-4046-84ed-f143e14411c1" fig, ax = plt.subplots(figsize=(11, 11)) plt.scatter(X, y, color='navy', s=30, marker='o') poly_model = make_pipeline(PolynomialFeatures(1), LinearRegression()) poly_model.fit(X[:, np.newaxis], y) y_plot = poly_model.predict(x_plot[:, np.newaxis]) plt.plot(x_plot, y_plot, lw=2, color="red") plt.ylim(-9, 9) plt.xlabel('x') plt.ylabel('y') # fig.savefig('ML_0507.png', bbox_inches='tight') plt.show() # plt.clf() # + colab={"base_uri": "https://localhost:8080/", "height": 664} colab_type="code" id="m9gyCATFo-9J" outputId="1880e461-b905-45bd-abf1-a8091c959f27" fig, ax = plt.subplots(figsize=(11, 11)) plt.scatter(X, y, color='navy', s=30, marker='o') poly_model = make_pipeline(PolynomialFeatures(20), LinearRegression()) poly_model.fit(X[:, np.newaxis], y) y_plot = poly_model.predict(x_plot[:, np.newaxis]) plt.plot(x_plot, y_plot, lw=2, color="red") plt.ylim(-10, 10) plt.xlabel('x') plt.ylabel('y') # fig.savefig('ML_0506.png', bbox_inches='tight') plt.show() # plt.clf() # + [markdown] colab_type="text" id="2VeSYK7qo-9K" # ### Mehr Datensätze # + colab={} colab_type="code" id="T6d_sBJTo-9L" n_samples = 200 X = np.random.uniform(-2, 2, n_samples) y = X**3 + np.random.uniform(0, 2, n_samples) # + colab={"base_uri": "https://localhost:8080/", "height": 296} colab_type="code" id="tnEhZ8qIo-9O" outputId="816a1831-4c17-41eb-cbc8-35140202af52" print(X.shape, y.shape) plt.scatter(X, y, color='navy', s=30, marker='o', label="training points") plt.xlabel('x') plt.ylabel('y') # fig.savefig('ML_0508.png', bbox_inches='tight') plt.show() # plt.clf() # + colab={"base_uri": "https://localhost:8080/", "height": 660} colab_type="code" id="d9sBpbCpo-9R" outputId="cf69de1d-b147-4332-abb5-64d6aa678ca8" fig, ax = plt.subplots(figsize=(11, 11)) plt.scatter(X, y, color='navy', s=30, marker='o', label="training points") poly_model = make_pipeline(PolynomialFeatures(3), LinearRegression()) poly_model.fit(X[:, np.newaxis], y) y_plot = poly_model.predict(x_plot[:, np.newaxis]) plt.plot(x_plot, y_plot, lw=2, color="red") plt.ylim(-12, 12) plt.xlabel('x') plt.ylabel('y') # fig.savefig('ML_0509.png', bbox_inches='tight') plt.show() # plt.clf() # + colab={"base_uri": "https://localhost:8080/", "height": 664} colab_type="code" id="dvzRCcsCo-9T" outputId="677cacbc-6b3f-490f-90c2-9cfc69704775" fig, ax = plt.subplots(figsize=(11, 11)) plt.scatter(X, y, color='navy', s=30, marker='o', label="training points") poly_model = make_pipeline(PolynomialFeatures(20), LinearRegression()) poly_model.fit(X[:, np.newaxis], y) y_plot = poly_model.predict(x_plot[:, np.newaxis]) plt.plot(x_plot, y_plot, lw=2, color="red") plt.ylim(-8, 8) plt.xlabel('x') plt.ylabel('y') # fig.savefig('ML_0510.png', bbox_inches='tight') plt.show() # plt.clf() # + colab={"base_uri": "https://localhost:8080/", "height": 660} colab_type="code" id="B9dJdby6o-9V" outputId="70a3e065-b6de-42c1-9fb7-31bd45a4d35e" fig, ax = plt.subplots(figsize=(11, 11)) plt.scatter(X, y, color='navy', s=30, marker='o', label="training points") poly_model = make_pipeline(PolynomialFeatures(1), LinearRegression()) poly_model.fit(X[:, np.newaxis], y) y_plot = poly_model.predict(x_plot[:, np.newaxis]) plt.plot(x_plot, y_plot, lw=2, color="red") plt.ylim(-9, 9) plt.xlabel('x') plt.ylabel('y') # fig.savefig('ML_0511.png', bbox_inches='tight') plt.show() # plt.clf() # + [markdown] colab_type="text" id="SGNT--o_o-9Y" # ## Univariate Feature Exploration # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="8mh52lDYo-9Y" outputId="5e31f1c4-c451-40cc-e0a4-4ece05f28ea9" from sklearn.datasets import load_iris from sklearn.svm import SVC iris = load_iris() X = iris.data y = iris.target print(X.shape, y.shape) import numpy as np import matplotlib.pyplot as plt svc = SVC(kernel='linear', C=1E0) print(X.shape, y.shape) n_classes = 3 colors = 'byr' CMAP = colors plot_step = 0.01 # Add random noise rns = np.random.RandomState(12) #noise1 = rns.lognormal(mean=1, size=(len(X), 1)) noise2 = rns.uniform(0, 6, size=(len(X), 1)) #X_noise = np.hstack([X, noise1]) X_noise = np.hstack([X, noise2]) # + colab={"base_uri": "https://localhost:8080/", "height": 755} colab_type="code" id="fcMKN8vZo-9a" outputId="94cbe773-9315-43d7-fea7-3256d2052239" Y_feature_names = iris.feature_names Y_target_names = iris.target_names Y_feature_names = np.append(Y_feature_names, 'noise1') #Y_feature_names = np.append(Y_feature_names, 'noise2') Y_target_names = np.append(Y_target_names, 'noise1') #Y_target_names = np.append(Y_target_names, 'noise2') #fig = plt.figure(1, figsize=(9, 16)) fig = plt.figure(1, figsize=(16, 9)) BINS = [] BINS.append(np.arange(4, 8, 0.1)) BINS.append(np.arange(2, 5, 0.1)) BINS.append(np.arange(1, 7, 0.1)) BINS.append(np.arange(0, 3, 0.1)) BINS.append(np.arange(0, 6, 0.1)) #BINS.append(np.arange(0, 6, 0.1)) for fid in range(4): #for fid in range(5): X = X_noise[:, fid] y = iris.target #plt.subplot(3, 2, fid + 1) plt.subplot(2, 2, fid + 1) plt.xlabel(Y_feature_names[fid]) plt.ylabel('n examples') plt.axis("tight") for i, color in zip(range(n_classes), colors): idx = np.where(y == i) clf = svc.fit(X.reshape([150,1]), y) print(clf.score(X.reshape([150,1]), y)) plt.hist(X[idx], alpha=0.6, color=color, edgecolor='black', lw=1, label=Y_target_names[i], bins=BINS[fid]) if fid==3: plt.legend(loc='upper right') plt.axis("tight") plt.show() # fig.savefig('ML_0501.png', bbox_inches='tight') # plt.clf() # + [markdown] colab_type="text" id="hPXEwCqyo-9b" # ## Bivariate Feature Exploration # + colab={"base_uri": "https://localhost:8080/", "height": 553} colab_type="code" id="cCdZA0VXo-9b" outputId="a1315ff2-af3d-4abd-fcd6-7139c127cbdf" from scipy.stats import pearsonr Y_feature_names = iris.feature_names #Y_target_names = iris.target_names #Y_feature_names = np.append(Y_feature_names, 'noise1') #Y_feature_names = np.append(Y_feature_names, 'noise2') #Y_target_names = np.append(Y_target_names, 'noise1') #Y_target_names = np.append(Y_target_names, 'noise2') n_classes = 3 colors = 'byr' CMAP = colors plot_step = 0.01 #____________________________________________________________________ fig = plt.figure(1, figsize=(18, 9)) pos = [[6.2, 4.2], [4.5, 6.5], [7, 0.5], [3.5, 3], [3.5, 1], [5, 0.5]] for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]): X = iris.data[:, pair] y = iris.target plt.subplot(2, 3, pairidx + 1) plt.xlabel(iris.feature_names[pair[0]]) plt.ylabel(iris.feature_names[pair[1]]) plt.axis("tight") for i, color in zip(range(n_classes), colors): idx = np.where(y == i) plt.scatter(X[idx, 0], X[idx, 1], c=color, edgecolor='black', lw=2, label=iris.target_names[i], cmap=CMAP) r = "r = " + str(round(pearsonr(X[:, 0], X[:, 1])[0], 3)) plt.text(pos[pairidx][0], pos[pairidx][1], r) plt.axis("tight") plt.axis("tight") plt.legend(loc='upper left') plt.show() # fig.savefig('ML_0502.png', bbox_inches='tight') # plt.clf() # + [markdown] colab_type="text" id="YFFZBNy-o-9e" # ## Korrelation zwischen Feature und Target # + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="vKuVNpILo-9f" outputId="c56b8185-809f-4085-f9ea-8de88b9576f8" from sklearn.datasets import load_iris import numpy as np from scipy.stats import pearsonr # pearson package from scipy iris = load_iris() # reload data X = iris.data y = iris.target for fid in (0, 1, 2, 3): # loop over all features idx = np.where( (y == 0) | (y == 1) ) x = X[idx] x = x[:, fid] print(iris.feature_names[fid], pearsonr(x, y[idx])[0]) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="D7Bfdkw6o-9i" outputId="555238c6-a7e5-4823-9ee2-42bcbf26d0cb" x = np.random.uniform(-1, 1, 1000) print(pearsonr(x, x**2)[0]) # + [markdown] colab_type="text" id="fl690nJfo-9j" # ## Principal Component Analyse # + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="JwLnd5Kqo-9j" outputId="e480faa7-addc-400e-d42a-ad5e2ffb9ea9" import numpy as np import matplotlib.pyplot as plt rns = np.random.RandomState(12) size = 200 X = np.zeros((size, 2)) x1 = rns.uniform(0, 2, size=size) x2 = -1.1*x1+1.8 + rns.normal(0, 0.2, size=size) X[:, 0] = x1 X[:, 1] = x2 from sklearn.decomposition import PCA pca = PCA(n_components=2, whiten=True) pca.fit(X) print(pca.explained_variance_) print() print(pca.components_) print() print(pca.mean_) print() # + colab={"base_uri": "https://localhost:8080/", "height": 698} colab_type="code" id="tGknYWlxo-9l" outputId="b25a5ff3-7239-45db-f3f8-160d51dcb38b" fig = plt.figure(figsize=(16, 11)) plt.scatter(X[:, 0], X[:, 1]) arrowprops = dict(arrowstyle='->', linewidth=2, shrinkA=0, shrinkB=0) for length, vector in zip(pca.explained_variance_, pca.components_): print(vector) v = vector * 1 * np.sqrt(length) ax = plt.gca() ax.annotate('', pca.mean_ + v, pca.mean_, arrowprops=arrowprops) plt.axis('equal') plt.xlim(0, 2) plt.ylim(0, 2) plt.xlabel('x1') plt.ylabel('x2') # fig.savefig('ML_0515.png', bbox_inches='tight') plt.show() # plt.clf() # + colab={"base_uri": "https://localhost:8080/", "height": 664} colab_type="code" id="M--XsTOZo-9n" outputId="f0029650-7b82-461a-e80e-14df679f30b4" fig = plt.figure(figsize=(16, 11)) X_pca = pca.transform(X) plt.scatter(X_pca[:, 0], X_pca[:, 1]) plt.annotate('', [0, 2], [0, 0], arrowprops=arrowprops) plt.annotate('', [2, 0], [0, 0], arrowprops=arrowprops) plt.axis('equal') plt.xlim(-3, 3) plt.ylim(-3, 3) plt.xlabel('pca component 1') plt.ylabel('pca component 2') # fig.savefig('ML_0516.png', bbox_inches='tight') plt.show() # plt.clf() # + colab={"base_uri": "https://localhost:8080/", "height": 497} colab_type="code" id="dxr12DIzo-9p" outputId="7d30abee-f9c7-4729-9d8e-26924551b929" from sklearn.datasets import load_iris from sklearn.decomposition import PCA import matplotlib.pyplot as plt import numpy as np n_classes = 3 colors = 'byr' CMAP = colors iris = load_iris() X = iris.data y = iris.target Y_target_names = iris.target_names pca = PCA(n_components=2, whiten=True) pca.fit(X) #_________________________________________________________ fig = plt.figure(figsize=(12, 8)) X_pca = pca.transform(X) for i, color in zip(range(n_classes), colors): idx = np.where(y == i) plt.scatter(X_pca[idx, 0], X_pca[idx, 1], label = Y_target_names[i], c=color, edgecolor='black', lw=2, cmap=CMAP) plt.axis("tight") plt.xlabel('pca component 1') plt.ylabel('pca component 2') plt.legend(loc='upper center') # fig.savefig('ML_0519.png', bbox_inches='tight') plt.show() # plt.clf() # + [markdown] colab_type="text" id="5X0moN-Co-9r" # ## Featureselektion # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="vtXJy9Hgo-9r" outputId="a9659352-9fb5-4088-e079-7646c5b66093" from sklearn.datasets import load_iris from sklearn.svm import SVC import numpy as np iris = load_iris() X = iris.data y = iris.target # reference score svc = SVC(kernel='linear', C=1) clf = svc.fit(X, y) print(clf.score(X, y)) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="O0lPqoFGo-9t" outputId="02f43923-2db2-403b-9059-46a305ad5de7" # Add random noise as non informative data rns = np.random.RandomState(12) noise = rns.uniform(0, 6, size=(len(X), 1)) X = np.hstack([X, noise]) # Score with all noise clf = svc.fit(X, y) print(clf.score(X, y)) # + colab={} colab_type="code" id="EGnTye5ro-9v" from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import f_classif selector = SelectKBest(f_classif, k=4) X_sel = selector.fit_transform(X, y) # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="BZXW3xZEo-9x" outputId="3af3d2d8-393b-4466-bfb2-6115ffb136ed" print(selector.scores_) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="X9icJTY6o-90" outputId="9aa9cf77-68fd-43fa-a689-6666ee833e10" svc = SVC(kernel='linear', C=1) clf = svc.fit(X_sel, y) print(clf.score(X_sel, y)) # + [markdown] colab_type="text" id="Iy6elm8Qo-91" # ## Selektion nach Tree-Modellen # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="KVrKK5pNo-92" outputId="a000c871-0d82-42cc-a7c9-e65c24c2bbe8" from sklearn.feature_selection import SelectFromModel from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() clf.fit(X, y) print(clf.feature_importances_) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="MNS96I_Ro-93" outputId="d6640305-5a6b-487f-c6f7-48e3ee2ff1bc" selector = SelectFromModel(clf, threshold=0.02) X_sel = selector.fit_transform(X, y) print(selector.get_support()) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="masCa8pso-95" outputId="12ee5033-fbce-4733-f296-fa6fdd85a0fb" svc = SVC(kernel='linear') clf = svc.fit(X_sel, y) print(clf.score(X_sel, y)) # + [markdown] colab_type="text" id="htdK7Awto-97" # ## Rekursive Eliminierung nach Modellen # + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="06QeC-0Yo-97" outputId="d578ac96-4e96-4548-dc69-1b320b159991" from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(random_state=12) clf.fit(X, y) # + colab={} colab_type="code" id="wf-DKLjZo--A" from sklearn.feature_selection import RFE # Original selector = RFE(clf, 4) selector = RFE(clf, n_features_to_select=4) # + colab={} colab_type="code" id="w7VvfvFdo--D" selector = selector.fit(X, y) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="GDjg3CvSo--F" outputId="ab75bb52-e9e3-4887-803f-711d829262a1" print(selector.get_support())
kap5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Chapter 13 – Loading and Preprocessing Data with TensorFlow** # # _This notebook contains all the sample code and solutions to the exercises in chapter 13._ # <table align="left"> # <td> # <a target="_blank" href="https://colab.research.google.com/github/ageron/handson-ml2/blob/master/13_loading_and_preprocessing_data.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # </table> # # Setup # First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0. # + # Python ≥3.5 is required import sys assert sys.version_info >= (3, 5) # Scikit-Learn ≥0.20 is required import sklearn assert sklearn.__version__ >= "0.20" try: # # %tensorflow_version only exists in Colab. # %tensorflow_version 2.x # !pip install -q -U tfx==0.21.2 print("You can safely ignore the package incompatibility errors.") except Exception: pass # TensorFlow ≥2.0 is required import tensorflow as tf from tensorflow import keras assert tf.__version__ >= "2.0" # Common imports import numpy as np import os # to make this notebook's output stable across runs np.random.seed(42) # To plot pretty figures # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "data" IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID) os.makedirs(IMAGES_PATH, exist_ok=True) def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300): path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension) print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format=fig_extension, dpi=resolution) # - # ## Datasets X = tf.range(10) dataset = tf.data.Dataset.from_tensor_slices(X) dataset # Equivalently: dataset = tf.data.Dataset.range(10) for item in dataset: print(item) # + tags=["raises-exception"] dataset = dataset.repeat(3).batch(7) for item in dataset: print(item) # - dataset = dataset.map(lambda x: x * 2) for item in dataset: print(item) #dataset = dataset.apply(tf.data.experimental.unbatch()) # Now deprecated dataset = dataset.unbatch() dataset = dataset.filter(lambda x: x < 10) # keep only items < 10 for item in dataset.take(3): print(item) # + tf.random.set_seed(42) dataset = tf.data.Dataset.range(10).repeat(3) dataset = dataset.shuffle(buffer_size=3, seed=42).batch(7) for item in dataset: print(item) # - # ## Split the California dataset to multiple CSV files # Let's start by loading and preparing the California housing dataset. We first load it, then split it into a training set, a validation set and a test set, and finally we scale it: # + from sklearn.datasets import fetch_california_housing from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler housing = fetch_california_housing() X_train_full, X_test, y_train_full, y_test = train_test_split( housing.data, housing.target.reshape(-1, 1), random_state=42) X_train, X_valid, y_train, y_valid = train_test_split( X_train_full, y_train_full, random_state=42) scaler = StandardScaler() scaler.fit(X_train) X_mean = scaler.mean_ X_std = scaler.scale_ # - # For a very large dataset that does not fit in memory, you will typically want to split it into many files first, then have TensorFlow read these files in parallel. To demonstrate this, let's start by splitting the housing dataset and save it to 20 CSV files: def save_to_multiple_csv_files(data, name_prefix, header=None, n_parts=10): housing_dir = os.path.join("datasets", "housing") os.makedirs(housing_dir, exist_ok=True) path_format = os.path.join(housing_dir, "my_{}_{:02d}.csv") filepaths = [] m = len(data) for file_idx, row_indices in enumerate(np.array_split(np.arange(m), n_parts)): part_csv = path_format.format(name_prefix, file_idx) filepaths.append(part_csv) with open(part_csv, "wt", encoding="utf-8") as f: if header is not None: f.write(header) f.write("\n") for row_idx in row_indices: f.write(",".join([repr(col) for col in data[row_idx]])) f.write("\n") return filepaths # + train_data = np.c_[X_train, y_train] valid_data = np.c_[X_valid, y_valid] test_data = np.c_[X_test, y_test] header_cols = housing.feature_names + ["MedianHouseValue"] header = ",".join(header_cols) train_filepaths = save_to_multiple_csv_files(train_data, "train", header, n_parts=20) valid_filepaths = save_to_multiple_csv_files(valid_data, "valid", header, n_parts=10) test_filepaths = save_to_multiple_csv_files(test_data, "test", header, n_parts=10) # - # Okay, now let's take a peek at the first few lines of one of these CSV files: # + import pandas as pd pd.read_csv(train_filepaths[0]).head() # - # Or in text mode: with open(train_filepaths[0]) as f: for i in range(5): print(f.readline(), end="") train_filepaths # ## Building an Input Pipeline filepath_dataset = tf.data.Dataset.list_files(train_filepaths, seed=42) for filepath in filepath_dataset: print(filepath) n_readers = 5 dataset = filepath_dataset.interleave( lambda filepath: tf.data.TextLineDataset(filepath).skip(1), cycle_length=n_readers) for line in dataset.take(5): print(line.numpy()) # Notice that field 4 is interpreted as a string. record_defaults=[0, np.nan, tf.constant(np.nan, dtype=tf.float64), "Hello", tf.constant([])] parsed_fields = tf.io.decode_csv('1,2,3,4,5', record_defaults) parsed_fields # Notice that all missing fields are replaced with their default value, when provided: parsed_fields = tf.io.decode_csv(',,,,5', record_defaults) parsed_fields # The 5th field is compulsory (since we provided `tf.constant([])` as the "default value"), so we get an exception if we do not provide it: try: parsed_fields = tf.io.decode_csv(',,,,', record_defaults) except tf.errors.InvalidArgumentError as ex: print(ex) # The number of fields should match exactly the number of fields in the `record_defaults`: try: parsed_fields = tf.io.decode_csv('1,2,3,4,5,6,7', record_defaults) except tf.errors.InvalidArgumentError as ex: print(ex) # + n_inputs = 8 # X_train.shape[-1] @tf.function def preprocess(line): defs = [0.] * n_inputs + [tf.constant([], dtype=tf.float32)] fields = tf.io.decode_csv(line, record_defaults=defs) x = tf.stack(fields[:-1]) y = tf.stack(fields[-1:]) return (x - X_mean) / X_std, y # - preprocess(b'4.2083,44.0,5.3232,0.9171,846.0,2.3370,37.47,-122.2,2.782') def csv_reader_dataset(filepaths, repeat=1, n_readers=5, n_read_threads=None, shuffle_buffer_size=10000, n_parse_threads=5, batch_size=32): dataset = tf.data.Dataset.list_files(filepaths).repeat(repeat) dataset = dataset.interleave( lambda filepath: tf.data.TextLineDataset(filepath).skip(1), cycle_length=n_readers, num_parallel_calls=n_read_threads) dataset = dataset.shuffle(shuffle_buffer_size) dataset = dataset.map(preprocess, num_parallel_calls=n_parse_threads) dataset = dataset.batch(batch_size) return dataset.prefetch(1) # + tf.random.set_seed(42) train_set = csv_reader_dataset(train_filepaths, batch_size=3) for X_batch, y_batch in train_set.take(2): print("X =", X_batch) print("y =", y_batch) print() # - train_set = csv_reader_dataset(train_filepaths, repeat=None) valid_set = csv_reader_dataset(valid_filepaths) test_set = csv_reader_dataset(test_filepaths) # + keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) model = keras.models.Sequential([ keras.layers.Dense(30, activation="relu", input_shape=X_train.shape[1:]), keras.layers.Dense(1), ]) # - model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3)) batch_size = 32 model.fit(train_set, steps_per_epoch=len(X_train) // batch_size, epochs=10, validation_data=valid_set) model.evaluate(test_set, steps=len(X_test) // batch_size) new_set = test_set.map(lambda X, y: X) # we could instead just pass test_set, Keras would ignore the labels X_new = X_test model.predict(new_set, steps=len(X_new) // batch_size) # + optimizer = keras.optimizers.Nadam(lr=0.01) loss_fn = keras.losses.mean_squared_error n_epochs = 5 batch_size = 32 n_steps_per_epoch = len(X_train) // batch_size total_steps = n_epochs * n_steps_per_epoch global_step = 0 for X_batch, y_batch in train_set.take(total_steps): global_step += 1 print("\rGlobal step {}/{}".format(global_step, total_steps), end="") with tf.GradientTape() as tape: y_pred = model(X_batch) main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred)) loss = tf.add_n([main_loss] + model.losses) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) # - keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) # + optimizer = keras.optimizers.Nadam(lr=0.01) loss_fn = keras.losses.mean_squared_error @tf.function def train(model, n_epochs, batch_size=32, n_readers=5, n_read_threads=5, shuffle_buffer_size=10000, n_parse_threads=5): train_set = csv_reader_dataset(train_filepaths, repeat=n_epochs, n_readers=n_readers, n_read_threads=n_read_threads, shuffle_buffer_size=shuffle_buffer_size, n_parse_threads=n_parse_threads, batch_size=batch_size) for X_batch, y_batch in train_set: with tf.GradientTape() as tape: y_pred = model(X_batch) main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred)) loss = tf.add_n([main_loss] + model.losses) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) train(model, 5) # - keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) # + optimizer = keras.optimizers.Nadam(lr=0.01) loss_fn = keras.losses.mean_squared_error @tf.function def train(model, n_epochs, batch_size=32, n_readers=5, n_read_threads=5, shuffle_buffer_size=10000, n_parse_threads=5): train_set = csv_reader_dataset(train_filepaths, repeat=n_epochs, n_readers=n_readers, n_read_threads=n_read_threads, shuffle_buffer_size=shuffle_buffer_size, n_parse_threads=n_parse_threads, batch_size=batch_size) n_steps_per_epoch = len(X_train) // batch_size total_steps = n_epochs * n_steps_per_epoch global_step = 0 for X_batch, y_batch in train_set.take(total_steps): global_step += 1 if tf.equal(global_step % 100, 0): tf.print("\rGlobal step", global_step, "/", total_steps) with tf.GradientTape() as tape: y_pred = model(X_batch) main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred)) loss = tf.add_n([main_loss] + model.losses) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) train(model, 5) # - # Here is a short description of each method in the `Dataset` class: for m in dir(tf.data.Dataset): if not (m.startswith("_") or m.endswith("_")): func = getattr(tf.data.Dataset, m) if hasattr(func, "__doc__"): print("● {:21s}{}".format(m + "()", func.__doc__.split("\n")[0])) # ## The `TFRecord` binary format # A TFRecord file is just a list of binary records. You can create one using a `tf.io.TFRecordWriter`: with tf.io.TFRecordWriter("my_data.tfrecord") as f: f.write(b"This is the first record") f.write(b"And this is the second record") # And you can read it using a `tf.data.TFRecordDataset`: filepaths = ["my_data.tfrecord"] dataset = tf.data.TFRecordDataset(filepaths) for item in dataset: print(item) # You can read multiple TFRecord files with just one `TFRecordDataset`. By default it will read them one at a time, but if you set `num_parallel_reads=3`, it will read 3 at a time in parallel and interleave their records: # + filepaths = ["my_test_{}.tfrecord".format(i) for i in range(5)] for i, filepath in enumerate(filepaths): with tf.io.TFRecordWriter(filepath) as f: for j in range(3): f.write("File {} record {}".format(i, j).encode("utf-8")) dataset = tf.data.TFRecordDataset(filepaths, num_parallel_reads=3) for item in dataset: print(item) # - options = tf.io.TFRecordOptions(compression_type="GZIP") with tf.io.TFRecordWriter("my_compressed.tfrecord", options) as f: f.write(b"This is the first record") f.write(b"And this is the second record") dataset = tf.data.TFRecordDataset(["my_compressed.tfrecord"], compression_type="GZIP") for item in dataset: print(item) # ### A Brief Intro to Protocol Buffers # For this section you need to [install protobuf](https://developers.google.com/protocol-buffers/docs/downloads). In general you will not have to do so when using TensorFlow, as it comes with functions to create and parse protocol buffers of type `tf.train.Example`, which are generally sufficient. However, in this section we will learn about protocol buffers by creating our own simple protobuf definition, so we need the protobuf compiler (`protoc`): we will use it to compile the protobuf definition to a Python module that we can then use in our code. # First let's write a simple protobuf definition: # %%writefile person.proto syntax = "proto3"; message Person { string name = 1; int32 id = 2; repeated string email = 3; } # And let's compile it (the `--descriptor_set_out` and `--include_imports` options are only required for the `tf.io.decode_proto()` example below): # !protoc person.proto --python_out=. --descriptor_set_out=person.desc --include_imports # !ls person* # + from person_pb2 import Person person = Person(name="Al", id=123, email=["<EMAIL>"]) # create a Person print(person) # display the Person # - person.name # read a field person.name = "Alice" # modify a field person.email[0] # repeated fields can be accessed like arrays person.email.append("<EMAIL>") # add an email address s = person.SerializeToString() # serialize to a byte string s person2 = Person() # create a new Person person2.ParseFromString(s) # parse the byte string (27 bytes) person == person2 # now they are equal # #### Custom protobuf # In rare cases, you may want to parse a custom protobuf (like the one we just created) in TensorFlow. For this you can use the `tf.io.decode_proto()` function: # + person_tf = tf.io.decode_proto( bytes=s, message_type="Person", field_names=["name", "id", "email"], output_types=[tf.string, tf.int32, tf.string], descriptor_source="person.desc") person_tf.values # - # For more details, see the [`tf.io.decode_proto()`](https://www.tensorflow.org/api_docs/python/tf/io/decode_proto) documentation. # ### TensorFlow Protobufs # Here is the definition of the tf.train.Example protobuf: # ```proto # syntax = "proto3"; # # message BytesList { repeated bytes value = 1; } # message FloatList { repeated float value = 1 [packed = true]; } # message Int64List { repeated int64 value = 1 [packed = true]; } # message Feature { # oneof kind { # BytesList bytes_list = 1; # FloatList float_list = 2; # Int64List int64_list = 3; # } # }; # message Features { map<string, Feature> feature = 1; }; # message Example { Features features = 1; }; # ``` # **Warning**: there's currently a bug preventing `from tensorflow.train import X` so we work around it by writing `X = tf.train.X`. See https://github.com/tensorflow/tensorflow/issues/33289 for more details. # + #from tensorflow.train import BytesList, FloatList, Int64List #from tensorflow.train import Feature, Features, Example BytesList = tf.train.BytesList FloatList = tf.train.FloatList Int64List = tf.train.Int64List Feature = tf.train.Feature Features = tf.train.Features Example = tf.train.Example person_example = Example( features=Features( feature={ "name": Feature(bytes_list=BytesList(value=[b"Alice"])), "id": Feature(int64_list=Int64List(value=[123])), "emails": Feature(bytes_list=BytesList(value=[b"<EMAIL>", b"<EMAIL>"])) })) with tf.io.TFRecordWriter("my_contacts.tfrecord") as f: f.write(person_example.SerializeToString()) # - feature_description = { "name": tf.io.FixedLenFeature([], tf.string, default_value=""), "id": tf.io.FixedLenFeature([], tf.int64, default_value=0), "emails": tf.io.VarLenFeature(tf.string), } for serialized_example in tf.data.TFRecordDataset(["my_contacts.tfrecord"]): parsed_example = tf.io.parse_single_example(serialized_example, feature_description) parsed_example parsed_example parsed_example["emails"].values[0] tf.sparse.to_dense(parsed_example["emails"], default_value=b"") parsed_example["emails"].values # ### Putting Images in TFRecords # + from sklearn.datasets import load_sample_images img = load_sample_images()["images"][0] plt.imshow(img) plt.axis("off") plt.title("Original Image") plt.show() # - data = tf.io.encode_jpeg(img) example_with_image = Example(features=Features(feature={ "image": Feature(bytes_list=BytesList(value=[data.numpy()]))})) serialized_example = example_with_image.SerializeToString() # then save to TFRecord feature_description = { "image": tf.io.VarLenFeature(tf.string) } example_with_image = tf.io.parse_single_example(serialized_example, feature_description) decoded_img = tf.io.decode_jpeg(example_with_image["image"].values[0]) # Or use `decode_image()` which supports BMP, GIF, JPEG and PNG formats: decoded_img = tf.io.decode_image(example_with_image["image"].values[0]) plt.imshow(decoded_img) plt.title("Decoded Image") plt.axis("off") plt.show() # ### Putting Tensors and Sparse Tensors in TFRecords # Tensors can be serialized and parsed easily using `tf.io.serialize_tensor()` and `tf.io.parse_tensor()`: t = tf.constant([[0., 1.], [2., 3.], [4., 5.]]) s = tf.io.serialize_tensor(t) s tf.io.parse_tensor(s, out_type=tf.float32) serialized_sparse = tf.io.serialize_sparse(parsed_example["emails"]) serialized_sparse BytesList(value=serialized_sparse.numpy()) dataset = tf.data.TFRecordDataset(["my_contacts.tfrecord"]).batch(10) for serialized_examples in dataset: parsed_examples = tf.io.parse_example(serialized_examples, feature_description) parsed_examples # ## Handling Sequential Data Using `SequenceExample` # ```proto # syntax = "proto3"; # # message FeatureList { repeated Feature feature = 1; }; # message FeatureLists { map<string, FeatureList> feature_list = 1; }; # message SequenceExample { # Features context = 1; # FeatureLists feature_lists = 2; # }; # ``` # **Warning**: there's currently a bug preventing `from tensorflow.train import X` so we work around it by writing `X = tf.train.X`. See https://github.com/tensorflow/tensorflow/issues/33289 for more details. # + #from tensorflow.train import FeatureList, FeatureLists, SequenceExample FeatureList = tf.train.FeatureList FeatureLists = tf.train.FeatureLists SequenceExample = tf.train.SequenceExample context = Features(feature={ "author_id": Feature(int64_list=Int64List(value=[123])), "title": Feature(bytes_list=BytesList(value=[b"A", b"desert", b"place", b"."])), "pub_date": Feature(int64_list=Int64List(value=[1623, 12, 25])) }) content = [["When", "shall", "we", "three", "meet", "again", "?"], ["In", "thunder", ",", "lightning", ",", "or", "in", "rain", "?"]] comments = [["When", "the", "hurlyburly", "'s", "done", "."], ["When", "the", "battle", "'s", "lost", "and", "won", "."]] def words_to_feature(words): return Feature(bytes_list=BytesList(value=[word.encode("utf-8") for word in words])) content_features = [words_to_feature(sentence) for sentence in content] comments_features = [words_to_feature(comment) for comment in comments] sequence_example = SequenceExample( context=context, feature_lists=FeatureLists(feature_list={ "content": FeatureList(feature=content_features), "comments": FeatureList(feature=comments_features) })) # - sequence_example serialized_sequence_example = sequence_example.SerializeToString() context_feature_descriptions = { "author_id": tf.io.FixedLenFeature([], tf.int64, default_value=0), "title": tf.io.VarLenFeature(tf.string), "pub_date": tf.io.FixedLenFeature([3], tf.int64, default_value=[0, 0, 0]), } sequence_feature_descriptions = { "content": tf.io.VarLenFeature(tf.string), "comments": tf.io.VarLenFeature(tf.string), } parsed_context, parsed_feature_lists = tf.io.parse_single_sequence_example( serialized_sequence_example, context_feature_descriptions, sequence_feature_descriptions) parsed_context parsed_context["title"].values parsed_feature_lists print(tf.RaggedTensor.from_sparse(parsed_feature_lists["content"])) # # The Features API # Let's use the variant of the California housing dataset that we used in Chapter 2, since it contains categorical features and missing values: # + import os import tarfile import urllib DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/" HOUSING_PATH = os.path.join("datasets", "housing") HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz" def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH): os.makedirs(housing_path, exist_ok=True) tgz_path = os.path.join(housing_path, "housing.tgz") urllib.request.urlretrieve(housing_url, tgz_path) housing_tgz = tarfile.open(tgz_path) housing_tgz.extractall(path=housing_path) housing_tgz.close() # - fetch_housing_data() # + import pandas as pd def load_housing_data(housing_path=HOUSING_PATH): csv_path = os.path.join(housing_path, "housing.csv") return pd.read_csv(csv_path) # - housing = load_housing_data() housing.head() housing_median_age = tf.feature_column.numeric_column("housing_median_age") age_mean, age_std = X_mean[1], X_std[1] # The median age is column in 1 housing_median_age = tf.feature_column.numeric_column( "housing_median_age", normalizer_fn=lambda x: (x - age_mean) / age_std) median_income = tf.feature_column.numeric_column("median_income") bucketized_income = tf.feature_column.bucketized_column( median_income, boundaries=[1.5, 3., 4.5, 6.]) bucketized_income ocean_prox_vocab = ['<1H OCEAN', 'INLAND', 'ISLAND', 'NEAR BAY', 'NEAR OCEAN'] ocean_proximity = tf.feature_column.categorical_column_with_vocabulary_list( "ocean_proximity", ocean_prox_vocab) ocean_proximity # Just an example, it's not used later on city_hash = tf.feature_column.categorical_column_with_hash_bucket( "city", hash_bucket_size=1000) city_hash bucketized_age = tf.feature_column.bucketized_column( housing_median_age, boundaries=[-1., -0.5, 0., 0.5, 1.]) # age was scaled age_and_ocean_proximity = tf.feature_column.crossed_column( [bucketized_age, ocean_proximity], hash_bucket_size=100) latitude = tf.feature_column.numeric_column("latitude") longitude = tf.feature_column.numeric_column("longitude") bucketized_latitude = tf.feature_column.bucketized_column( latitude, boundaries=list(np.linspace(32., 42., 20 - 1))) bucketized_longitude = tf.feature_column.bucketized_column( longitude, boundaries=list(np.linspace(-125., -114., 20 - 1))) location = tf.feature_column.crossed_column( [bucketized_latitude, bucketized_longitude], hash_bucket_size=1000) ocean_proximity_one_hot = tf.feature_column.indicator_column(ocean_proximity) ocean_proximity_embed = tf.feature_column.embedding_column(ocean_proximity, dimension=2) # ### Using Feature Columns for Parsing median_house_value = tf.feature_column.numeric_column("median_house_value") columns = [housing_median_age, median_house_value] feature_descriptions = tf.feature_column.make_parse_example_spec(columns) feature_descriptions with tf.io.TFRecordWriter("my_data_with_features.tfrecords") as f: for x, y in zip(X_train[:, 1:2], y_train): example = Example(features=Features(feature={ "housing_median_age": Feature(float_list=FloatList(value=[x])), "median_house_value": Feature(float_list=FloatList(value=[y])) })) f.write(example.SerializeToString()) keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) # + def parse_examples(serialized_examples): examples = tf.io.parse_example(serialized_examples, feature_descriptions) targets = examples.pop("median_house_value") # separate the targets return examples, targets batch_size = 32 dataset = tf.data.TFRecordDataset(["my_data_with_features.tfrecords"]) dataset = dataset.repeat().shuffle(10000).batch(batch_size).map(parse_examples) # - # **Warning**: the `DenseFeatures` layer currently does not work with the Functional API, see [TF issue #27416](https://github.com/tensorflow/tensorflow/issues/27416). Hopefully this will be resolved before the final release of TF 2.0. columns_without_target = columns[:-1] model = keras.models.Sequential([ keras.layers.DenseFeatures(feature_columns=columns_without_target), keras.layers.Dense(1) ]) model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3), metrics=["accuracy"]) model.fit(dataset, steps_per_epoch=len(X_train) // batch_size, epochs=5) some_columns = [ocean_proximity_embed, bucketized_income] dense_features = keras.layers.DenseFeatures(some_columns) dense_features({ "ocean_proximity": [["NEAR OCEAN"], ["INLAND"], ["INLAND"]], "median_income": [[3.], [7.2], [1.]] }) # # TF Transform try: import tensorflow_transform as tft def preprocess(inputs): # inputs is a batch of input features median_age = inputs["housing_median_age"] ocean_proximity = inputs["ocean_proximity"] standardized_age = tft.scale_to_z_score(median_age - tft.mean(median_age)) ocean_proximity_id = tft.compute_and_apply_vocabulary(ocean_proximity) return { "standardized_median_age": standardized_age, "ocean_proximity_id": ocean_proximity_id } except ImportError: print("TF Transform is not installed. Try running: pip3 install -U tensorflow-transform") # # TensorFlow Datasets # + import tensorflow_datasets as tfds datasets = tfds.load(name="mnist") mnist_train, mnist_test = datasets["train"], datasets["test"] # - print(tfds.list_builders()) plt.figure(figsize=(6,3)) mnist_train = mnist_train.repeat(5).batch(32).prefetch(1) for item in mnist_train: images = item["image"] labels = item["label"] for index in range(5): plt.subplot(1, 5, index + 1) image = images[index, ..., 0] label = labels[index].numpy() plt.imshow(image, cmap="binary") plt.title(label) plt.axis("off") break # just showing part of the first batch datasets = tfds.load(name="mnist") mnist_train, mnist_test = datasets["train"], datasets["test"] mnist_train = mnist_train.repeat(5).batch(32) mnist_train = mnist_train.map(lambda items: (items["image"], items["label"])) mnist_train = mnist_train.prefetch(1) for images, labels in mnist_train.take(1): print(images.shape) print(labels.numpy()) keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) datasets = tfds.load(name="mnist", batch_size=32, as_supervised=True) mnist_train = datasets["train"].repeat().prefetch(1) model = keras.models.Sequential([ keras.layers.Flatten(input_shape=[28, 28, 1]), keras.layers.Lambda(lambda images: tf.cast(images, tf.float32)), keras.layers.Dense(10, activation="softmax")]) model.compile(loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.SGD(lr=1e-3), metrics=["accuracy"]) model.fit(mnist_train, steps_per_epoch=60000 // 32, epochs=5) # # TensorFlow Hub keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) # + import tensorflow_hub as hub hub_layer = hub.KerasLayer("https://tfhub.dev/google/tf2-preview/nnlm-en-dim50/1", output_shape=[50], input_shape=[], dtype=tf.string) model = keras.Sequential() model.add(hub_layer) model.add(keras.layers.Dense(16, activation='relu')) model.add(keras.layers.Dense(1, activation='sigmoid')) model.summary() # - sentences = tf.constant(["It was a great movie", "The actors were amazing"]) embeddings = hub_layer(sentences) embeddings # # Exercises # # ## 1. to 8. # See Appendix A # # ## 9. # ### a. # _Exercise: Load the Fashion MNIST dataset (introduced in Chapter 10); split it into a training set, a validation set, and a test set; shuffle the training set; and save each dataset to multiple TFRecord files. Each record should be a serialized `Example` protobuf with two features: the serialized image (use `tf.io.serialize_tensor()` to serialize each image), and the label. Note: for large images, you could use `tf.io.encode_jpeg()` instead. This would save a lot of space, but it would lose a bit of image quality._ (X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data() X_valid, X_train = X_train_full[:5000], X_train_full[5000:] y_valid, y_train = y_train_full[:5000], y_train_full[5000:] keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) train_set = tf.data.Dataset.from_tensor_slices((X_train, y_train)).shuffle(len(X_train)) valid_set = tf.data.Dataset.from_tensor_slices((X_valid, y_valid)) test_set = tf.data.Dataset.from_tensor_slices((X_test, y_test)) def create_example(image, label): image_data = tf.io.serialize_tensor(image) #image_data = tf.io.encode_jpeg(image[..., np.newaxis]) return Example( features=Features( feature={ "image": Feature(bytes_list=BytesList(value=[image_data.numpy()])), "label": Feature(int64_list=Int64List(value=[label])), })) for image, label in valid_set.take(1): print(create_example(image, label)) # The following function saves a given dataset to a set of TFRecord files. The examples are written to the files in a round-robin fashion. To do this, we enumerate all the examples using the `dataset.enumerate()` method, and we compute `index % n_shards` to decide which file to write to. We use the standard `contextlib.ExitStack` class to make sure that all writers are properly closed whether or not an I/O error occurs while writing. # + from contextlib import ExitStack def write_tfrecords(name, dataset, n_shards=10): paths = ["{}.tfrecord-{:05d}-of-{:05d}".format(name, index, n_shards) for index in range(n_shards)] with ExitStack() as stack: writers = [stack.enter_context(tf.io.TFRecordWriter(path)) for path in paths] for index, (image, label) in dataset.enumerate(): shard = index % n_shards example = create_example(image, label) writers[shard].write(example.SerializeToString()) return paths # - train_filepaths = write_tfrecords("my_fashion_mnist.train", train_set) valid_filepaths = write_tfrecords("my_fashion_mnist.valid", valid_set) test_filepaths = write_tfrecords("my_fashion_mnist.test", test_set) # ### b. # _Exercise: Then use tf.data to create an efficient dataset for each set. Finally, use a Keras model to train these datasets, including a preprocessing layer to standardize each input feature. Try to make the input pipeline as efficient as possible, using TensorBoard to visualize profiling data._ # + def preprocess(tfrecord): feature_descriptions = { "image": tf.io.FixedLenFeature([], tf.string, default_value=""), "label": tf.io.FixedLenFeature([], tf.int64, default_value=-1) } example = tf.io.parse_single_example(tfrecord, feature_descriptions) image = tf.io.parse_tensor(example["image"], out_type=tf.uint8) #image = tf.io.decode_jpeg(example["image"]) image = tf.reshape(image, shape=[28, 28]) return image, example["label"] def mnist_dataset(filepaths, n_read_threads=5, shuffle_buffer_size=None, n_parse_threads=5, batch_size=32, cache=True): dataset = tf.data.TFRecordDataset(filepaths, num_parallel_reads=n_read_threads) if cache: dataset = dataset.cache() if shuffle_buffer_size: dataset = dataset.shuffle(shuffle_buffer_size) dataset = dataset.map(preprocess, num_parallel_calls=n_parse_threads) dataset = dataset.batch(batch_size) return dataset.prefetch(1) # - train_set = mnist_dataset(train_filepaths, shuffle_buffer_size=60000) valid_set = mnist_dataset(train_filepaths) test_set = mnist_dataset(train_filepaths) for X, y in train_set.take(1): for i in range(5): plt.subplot(1, 5, i + 1) plt.imshow(X[i].numpy(), cmap="binary") plt.axis("off") plt.title(str(y[i].numpy())) # + keras.backend.clear_session() tf.random.set_seed(42) np.random.seed(42) class Standardization(keras.layers.Layer): def adapt(self, data_sample): self.means_ = np.mean(data_sample, axis=0, keepdims=True) self.stds_ = np.std(data_sample, axis=0, keepdims=True) def call(self, inputs): return (inputs - self.means_) / (self.stds_ + keras.backend.epsilon()) standardization = Standardization(input_shape=[28, 28]) # or perhaps soon: #standardization = keras.layers.Normalization() sample_image_batches = train_set.take(100).map(lambda image, label: image) sample_images = np.concatenate(list(sample_image_batches.as_numpy_iterator()), axis=0).astype(np.float32) standardization.adapt(sample_images) model = keras.models.Sequential([ standardization, keras.layers.Flatten(), keras.layers.Dense(100, activation="relu"), keras.layers.Dense(10, activation="softmax") ]) model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"]) # + from datetime import datetime logs = os.path.join(os.curdir, "my_logs", "run_" + datetime.now().strftime("%Y%m%d_%H%M%S")) tensorboard_cb = tf.keras.callbacks.TensorBoard( log_dir=logs, histogram_freq=1, profile_batch=10) model.fit(train_set, epochs=5, validation_data=valid_set, callbacks=[tensorboard_cb]) # - # **Warning:** The profiling tab in TensorBoard works if you use TensorFlow 2.2+. You also need to make sure `tensorboard_plugin_profile` is installed (and restart Jupyter if necessary). # %load_ext tensorboard # %tensorboard --logdir=./my_logs --port=6006 # ## 10. # _Exercise: In this exercise you will download a dataset, split it, create a `tf.data.Dataset` to load it and preprocess it efficiently, then build and train a binary classification model containing an `Embedding` layer._ # # ### a. # _Exercise: Download the [Large Movie Review Dataset](https://homl.info/imdb), which contains 50,000 movies reviews from the [Internet Movie Database](https://imdb.com/). The data is organized in two directories, `train` and `test`, each containing a `pos` subdirectory with 12,500 positive reviews and a `neg` subdirectory with 12,500 negative reviews. Each review is stored in a separate text file. There are other files and folders (including preprocessed bag-of-words), but we will ignore them in this exercise._ # + from pathlib import Path DOWNLOAD_ROOT = "http://ai.stanford.edu/~amaas/data/sentiment/" FILENAME = "aclImdb_v1.tar.gz" filepath = keras.utils.get_file(FILENAME, DOWNLOAD_ROOT + FILENAME, extract=True) path = Path(filepath).parent / "aclImdb" path # - for name, subdirs, files in os.walk(path): indent = len(Path(name).parts) - len(path.parts) print(" " * indent + Path(name).parts[-1] + os.sep) for index, filename in enumerate(sorted(files)): if index == 3: print(" " * (indent + 1) + "...") break print(" " * (indent + 1) + filename) # + def review_paths(dirpath): return [str(path) for path in dirpath.glob("*.txt")] train_pos = review_paths(path / "train" / "pos") train_neg = review_paths(path / "train" / "neg") test_valid_pos = review_paths(path / "test" / "pos") test_valid_neg = review_paths(path / "test" / "neg") len(train_pos), len(train_neg), len(test_valid_pos), len(test_valid_neg) # - # ### b. # _Exercise: Split the test set into a validation set (15,000) and a test set (10,000)._ # + np.random.shuffle(test_valid_pos) test_pos = test_valid_pos[:5000] test_neg = test_valid_neg[:5000] valid_pos = test_valid_pos[5000:] valid_neg = test_valid_neg[5000:] # - # ### c. # _Exercise: Use tf.data to create an efficient dataset for each set._ # Since the dataset fits in memory, we can just load all the data using pure Python code and use `tf.data.Dataset.from_tensor_slices()`: def imdb_dataset(filepaths_positive, filepaths_negative): reviews = [] labels = [] for filepaths, label in ((filepaths_negative, 0), (filepaths_positive, 1)): for filepath in filepaths: with open(filepath) as review_file: reviews.append(review_file.read()) labels.append(label) return tf.data.Dataset.from_tensor_slices( (tf.constant(reviews), tf.constant(labels))) for X, y in imdb_dataset(train_pos, train_neg).take(3): print(X) print(y) print() # %timeit -r1 for X, y in imdb_dataset(train_pos, train_neg).repeat(10): pass # It takes about 20 seconds to load the dataset and go through it 10 times. # But let's pretend the dataset does not fit in memory, just to make things more interesting. Luckily, each review fits on just one line (they use `<br />` to indicate line breaks), so we can read the reviews using a `TextLineDataset`. If they didn't we would have to preprocess the input files (e.g., converting them to TFRecords). For very large datasets, it would make sense a tool like Apache Beam for that. def imdb_dataset(filepaths_positive, filepaths_negative, n_read_threads=5): dataset_neg = tf.data.TextLineDataset(filepaths_negative, num_parallel_reads=n_read_threads) dataset_neg = dataset_neg.map(lambda review: (review, 0)) dataset_pos = tf.data.TextLineDataset(filepaths_positive, num_parallel_reads=n_read_threads) dataset_pos = dataset_pos.map(lambda review: (review, 1)) return tf.data.Dataset.concatenate(dataset_pos, dataset_neg) # %timeit -r1 for X, y in imdb_dataset(train_pos, train_neg).repeat(10): pass # Now it takes about 34 seconds to go through the dataset 10 times. That's much slower, essentially because the dataset is not cached in RAM, so it must be reloaded at each epoch. If you add `.cache()` just before `.repeat(10)`, you will see that this implementation will be about as fast as the previous one. # %timeit -r1 for X, y in imdb_dataset(train_pos, train_neg).cache().repeat(10): pass # + batch_size = 32 train_set = imdb_dataset(train_pos, train_neg).shuffle(25000).batch(batch_size).prefetch(1) valid_set = imdb_dataset(valid_pos, valid_neg).batch(batch_size).prefetch(1) test_set = imdb_dataset(test_pos, test_neg).batch(batch_size).prefetch(1) # - # ### d. # _Exercise: Create a binary classification model, using a `TextVectorization` layer to preprocess each review. If the `TextVectorization` layer is not yet available (or if you like a challenge), try to create your own custom preprocessing layer: you can use the functions in the `tf.strings` package, for example `lower()` to make everything lowercase, `regex_replace()` to replace punctuation with spaces, and `split()` to split words on spaces. You should use a lookup table to output word indices, which must be prepared in the `adapt()` method._ # Let's first write a function to preprocess the reviews, cropping them to 300 characters, converting them to lower case, then replacing `<br />` and all non-letter characters to spaces, splitting the reviews into words, and finally padding or cropping each review so it ends up with exactly `n_words` tokens: # + def preprocess(X_batch, n_words=50): shape = tf.shape(X_batch) * tf.constant([1, 0]) + tf.constant([0, n_words]) Z = tf.strings.substr(X_batch, 0, 300) Z = tf.strings.lower(Z) Z = tf.strings.regex_replace(Z, b"<br\\s*/?>", b" ") Z = tf.strings.regex_replace(Z, b"[^a-z]", b" ") Z = tf.strings.split(Z) return Z.to_tensor(shape=shape, default_value=b"<pad>") X_example = tf.constant(["It's a great, great movie! I loved it.", "It was terrible, run away!!!"]) preprocess(X_example) # - # Now let's write a second utility function that will take a data sample with the same format as the output of the `preprocess()` function, and will output the list of the top `max_size` most frequent words, ensuring that the padding token is first: # + from collections import Counter def get_vocabulary(data_sample, max_size=1000): preprocessed_reviews = preprocess(data_sample).numpy() counter = Counter() for words in preprocessed_reviews: for word in words: if word != b"<pad>": counter[word] += 1 return [b"<pad>"] + [word for word, count in counter.most_common(max_size)] get_vocabulary(X_example) # - # Now we are ready to create the `TextVectorization` layer. Its constructor just saves the hyperparameters (`max_vocabulary_size` and `n_oov_buckets`). The `adapt()` method computes the vocabulary using the `get_vocabulary()` function, then it builds a `StaticVocabularyTable` (see Chapter 16 for more details). The `call()` method preprocesses the reviews to get a padded list of words for each review, then it uses the `StaticVocabularyTable` to lookup the index of each word in the vocabulary: class TextVectorization(keras.layers.Layer): def __init__(self, max_vocabulary_size=1000, n_oov_buckets=100, dtype=tf.string, **kwargs): super().__init__(dtype=dtype, **kwargs) self.max_vocabulary_size = max_vocabulary_size self.n_oov_buckets = n_oov_buckets def adapt(self, data_sample): self.vocab = get_vocabulary(data_sample, self.max_vocabulary_size) words = tf.constant(self.vocab) word_ids = tf.range(len(self.vocab), dtype=tf.int64) vocab_init = tf.lookup.KeyValueTensorInitializer(words, word_ids) self.table = tf.lookup.StaticVocabularyTable(vocab_init, self.n_oov_buckets) def call(self, inputs): preprocessed_inputs = preprocess(inputs) return self.table.lookup(preprocessed_inputs) # Let's try it on our small `X_example` we defined earlier: # + text_vectorization = TextVectorization() text_vectorization.adapt(X_example) text_vectorization(X_example) # - # Looks good! As you can see, each review was cleaned up and tokenized, then each word was encoded as its index in the vocabulary (all the 0s correspond to the `<pad>` tokens). # # Now let's create another `TextVectorization` layer and let's adapt it to the full IMDB training set (if the training set did not fit in RAM, we could just use a smaller sample of the training set by calling `train_set.take(500)`): # + max_vocabulary_size = 1000 n_oov_buckets = 100 sample_review_batches = train_set.map(lambda review, label: review) sample_reviews = np.concatenate(list(sample_review_batches.as_numpy_iterator()), axis=0) text_vectorization = TextVectorization(max_vocabulary_size, n_oov_buckets, input_shape=[]) text_vectorization.adapt(sample_reviews) # - # Let's run it on the same `X_example`, just to make sure the word IDs are larger now, since the vocabulary bigger: text_vectorization(X_example) # Good! Now let's take a look at the first 10 words in the vocabulary: text_vectorization.vocab[:10] # These are the most common words in the reviews. # Now to build our model we will need to encode all these word IDs somehow. One approach is to create bags of words: for each review, and for each word in the vocabulary, we count the number of occurences of that word in the review. For example: simple_example = tf.constant([[1, 3, 1, 0, 0], [2, 2, 0, 0, 0]]) tf.reduce_sum(tf.one_hot(simple_example, 4), axis=1) # The first review has 2 times the word 0, 2 times the word 1, 0 times the word 2, and 1 time the word 3, so its bag-of-words representation is `[2, 2, 0, 1]`. Similarly, the second review has 3 times the word 0, 0 times the word 1, and so on. Let's wrap this logic in a small custom layer, and let's test it. We'll drop the counts for the word 0, since this corresponds to the `<pad>` token, which we don't care about. class BagOfWords(keras.layers.Layer): def __init__(self, n_tokens, dtype=tf.int32, **kwargs): super().__init__(dtype=tf.int32, **kwargs) self.n_tokens = n_tokens def call(self, inputs): one_hot = tf.one_hot(inputs, self.n_tokens) return tf.reduce_sum(one_hot, axis=1)[:, 1:] # Let's test it: bag_of_words = BagOfWords(n_tokens=4) bag_of_words(simple_example) # It works fine! Now let's create another `BagOfWord` with the right vocabulary size for our training set: n_tokens = max_vocabulary_size + n_oov_buckets + 1 # add 1 for <pad> bag_of_words = BagOfWords(n_tokens) # We're ready to train the model! model = keras.models.Sequential([ text_vectorization, bag_of_words, keras.layers.Dense(100, activation="relu"), keras.layers.Dense(1, activation="sigmoid"), ]) model.compile(loss="binary_crossentropy", optimizer="nadam", metrics=["accuracy"]) model.fit(train_set, epochs=5, validation_data=valid_set) # We get about 75% accuracy on the validation set after just the first epoch, but after that the model makes no progress. We will do better in Chapter 16. For now the point is just to perform efficient preprocessing using `tf.data` and Keras preprocessing layers. # ### e. # _Exercise: Add an `Embedding` layer and compute the mean embedding for each review, multiplied by the square root of the number of words (see Chapter 16). This rescaled mean embedding can then be passed to the rest of your model._ # To compute the mean embedding for each review, and multiply it by the square root of the number of words in that review, we will need a little function: # + def compute_mean_embedding(inputs): not_pad = tf.math.count_nonzero(inputs, axis=-1) n_words = tf.math.count_nonzero(not_pad, axis=-1, keepdims=True) sqrt_n_words = tf.math.sqrt(tf.cast(n_words, tf.float32)) return tf.reduce_mean(inputs, axis=1) * sqrt_n_words another_example = tf.constant([[[1., 2., 3.], [4., 5., 0.], [0., 0., 0.]], [[6., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]) compute_mean_embedding(another_example) # - # Let's check that this is correct. The first review contains 2 words (the last token is a zero vector, which represents the `<pad>` token). The second review contains 1 word. So we need to compute the mean embedding for each review, and multiply the first one by the square root of 2, and the second one by the square root of 1: tf.reduce_mean(another_example, axis=1) * tf.sqrt([[2.], [1.]]) # Perfect. Now we're ready to train our final model. It's the same as before, except we replaced the `BagOfWords` layer with an `Embedding` layer followed by a `Lambda` layer that calls the `compute_mean_embedding` layer: # + embedding_size = 20 model = keras.models.Sequential([ text_vectorization, keras.layers.Embedding(input_dim=n_tokens, output_dim=embedding_size, mask_zero=True), # <pad> tokens => zero vectors keras.layers.Lambda(compute_mean_embedding), keras.layers.Dense(100, activation="relu"), keras.layers.Dense(1, activation="sigmoid"), ]) # - # ### f. # _Exercise: Train the model and see what accuracy you get. Try to optimize your pipelines to make training as fast as possible._ model.compile(loss="binary_crossentropy", optimizer="nadam", metrics=["accuracy"]) model.fit(train_set, epochs=5, validation_data=valid_set) # The model is not better using embeddings (but we will do better in Chapter 16). The pipeline looks fast enough (we optimized it earlier). # ### g. # _Exercise: Use TFDS to load the same dataset more easily: `tfds.load("imdb_reviews")`._ # + import tensorflow_datasets as tfds datasets = tfds.load(name="imdb_reviews") train_set, test_set = datasets["train"], datasets["test"] # - for example in train_set.take(1): print(example["text"]) print(example["label"])
13_loading_and_preprocessing_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="FGoGtt4XK1wJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="35f77bf4-7e4a-4e27-9eb4-fa4917e2a0dc" # mount Google Drive root from google.colab import drive drive.mount('/content/gdrive') # + id="bYOBu1QqLDyV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 394} outputId="678ad1eb-6ea2-4e92-e8fc-0c0d5cf5c6de" # !pip install pytorch_pretrained_bert # + id="VPWcx_-FLFL0" colab_type="code" colab={} import torch from pytorch_pretrained_bert import BertTokenizer, BertForMaskedLM, BertForNextSentencePrediction import csv import random import argparse from nltk.translate.bleu_score import sentence_bleu import warnings import torch.nn.functional as F from torch.distributions import Categorical # + id="Bk9hg_OLnPD_" colab_type="code" colab={} @torch.no_grad() def predict_masked_word(tokenizer, model, device): # Tokenized input text1 = "[CLS] And the riot squad they're restless, they need somewhere to go [SEP]" text2 = "As Lady and I look out tonight, from Desolation Row. [SEP]" tokenized_text1 = tokenizer.tokenize(text1) tokenized_text2 = tokenizer.tokenize(text2) tokenized_text = tokenized_text1 + tokenized_text2 # Mask a token that we will try to predict back with `BertForMaskedLM` masked_index = tokenized_text2.index('lady') + len(tokenized_text1) tokenized_text[masked_index] = '[MASK]' # Convert token to vocabulary indices indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) # Define sentence A and B indices associated to 1st and 2nd sentences (see paper) segments_ids = [0]*len(tokenized_text1) + [1]*(len(tokenized_text2)) # Convert inputs to PyTorch tensors tokens_tensor = torch.tensor([indexed_tokens]) segments_tensors = torch.tensor([segments_ids]) tokens_tensor = tokens_tensor.to(device) segments_tensors = segments_tensors.to(device) predictions = model(tokens_tensor, segments_tensors) # Confirm we were able to predict the correct '[MASK]' predicted_index = torch.argmax(predictions[0, masked_index]).item() predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0] print("\n","Predicted [MASK] = ",predicted_token,"\n") # + id="TEB97pexLXJZ" colab_type="code" colab={} def predict_next_sentence(sentenceA, sentenceBs, tokenizer, model, device): sentenceA_toks = tokenizer.tokenize(sentenceA) sentenceA_ids = tokenizer.convert_tokens_to_ids(sentenceA_toks) sentenceA_types = [0] * len(sentenceA_ids) sentenceA_attention = [1] * len(sentenceA_ids) tok_ids = [] tok_types = [] tok_attention = [] sentenceBs_ids = [] for sentenceB in sentenceBs: sentenceB_toks = tokenizer.tokenize(sentenceB) sentenceB_ids = tokenizer.convert_tokens_to_ids(sentenceB_toks) sentenceBs_ids.append(sentenceB_ids) max_sentenceB_length = max(len(sentenceB_ids) for sentenceB_ids in sentenceBs_ids) for sentenceB_ids in sentenceBs_ids: padding_size = max_sentenceB_length - len(sentenceB_ids) padded_sentenceB_ids = sentenceB_ids + [0] * padding_size padded_sentenceB_types = [1] * max_sentenceB_length padded_sentenceB_attention = [1] * len(sentenceB_ids) + [0] * padding_size tok_ids.append(sentenceA_ids + padded_sentenceB_ids) tok_types.append(sentenceA_types + padded_sentenceB_types) tok_attention.append(sentenceA_attention + padded_sentenceB_attention) tok_ids_tensor = torch.LongTensor(tok_ids) tok_types_tensor = torch.LongTensor(tok_types) tok_attention_tensor = torch.LongTensor(tok_attention) tok_ids_tensor = tok_ids_tensor.to(device) tok_types_tensor = tok_types_tensor.to(device) tok_attention_tensor = tok_attention_tensor.to(device) seq_relationship_logits = model(tok_ids_tensor, tok_types_tensor, tok_attention_tensor) return sentenceBs[seq_relationship_logits[:,0].argmax().tolist()] # + id="Xlavyt6VNZ-V" colab_type="code" colab={} @torch.no_grad() def generate_predictions(args): all_lines = [] all_pairs = [] with open(args.datafile, encoding='utf8') as csv_file: csv_reader = csv.DictReader(csv_file) for row in csv_reader: lines = row['lyrics'].split('\n') for i in range(len(lines) - 1): all_pairs.append((lines[i], lines[i + 1])) all_lines.append(lines[i]) all_lines.append(lines[len(lines) - 1]) sampled_data_x = {} sampled_data_y = {} correct_pairs = random.sample(all_pairs, 100) for line1, line2 in correct_pairs: sampled_data_y[line1] = line2 sampled_data_x[line1] = [line2] sampled_data_x[line1].extend(random.sample(all_lines, 2)) with open('gdrive/My Drive/CIS530/predfile', 'w') as file: for i, (line1, line2s) in enumerate(sampled_data_x.items()): line2 = predict_next_sentence(line1, line2s, tokenizer, next_sent_pred_model, device) file.write(f'{line1}\t{line2}\n') if (i + 1) % 10 == 0: print(f'Finished predicting {i + 1} lines...') with open('gdrive/My Drive/CIS530/goldfile', 'w') as file: for line1, line2 in sampled_data_y.items(): file.write(f'{line1}\t{line2}\n') # + id="BYD-t8uGNtEQ" colab_type="code" colab={} tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') masked_model = BertForMaskedLM.from_pretrained('bert-base-uncased') masked_model.eval() next_sent_pred_model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased') next_sent_pred_model.eval() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") masked_model = masked_model.to(device) next_sent_pred_model = next_sent_pred_model.to(device) # + id="6Z61OUT4o_4G" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="770eca88-b96f-4501-aadf-38efe4903018" predict_masked_word(tokenizer, masked_model, device) # + id="5TKvRPVArHxZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="96830a2b-660c-49d3-d509-73fee14f6549" parser = argparse.ArgumentParser() parser.add_argument('--datafile', type=str, required=True) args = parser.parse_args(['--datafile', 'gdrive/My Drive/CIS530/Lyrics-Generation/data/test_rock.csv']) generate_predictions(args) # + id="oY8o6qMcPk_l" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="0b362b04-31a6-4bca-a3c4-6fb71b7a6351" warnings.simplefilter("ignore", UserWarning) def bleuScore(gold, pred): cumulativeBlue, totalSentences = 0, len(gold) for line in gold: assert line in pred reference = [gold[line].split(' ')] candidate = pred[line].split(' ') cumulativeBlue += sentence_bleu(reference, candidate, weights=(.334, 0.333, 0.333, 0)) return cumulativeBlue / totalSentences def accuracy(gold, pred): num_correct, num_total = 0, 0 for line1 in gold: assert line1 in pred if gold[line1] == pred[line1]: num_correct += 1 num_total += 1 accuracy = num_correct / num_total return accuracy def loadData(name): data = {} with open(name) as file: for line in file: line1, line2 = line.strip().split('\t') data[line1] = line2 return data def main(args): gold = loadData(args.goldfile) pred = loadData(args.predfile) assert len(gold) == len(pred) print(f'Accuracy: {accuracy(gold, pred):.2f}') print(f'BLEU score: {bleuScore(gold, pred):.2f}') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--goldfile', type=str, required=True) parser.add_argument('--predfile', type=str, required=True) args = parser.parse_args(['--goldfile', 'gdrive/My Drive/CIS530/goldfile', '--predfile', 'gdrive/My Drive/CIS530/predfile']) main(args) # + id="oxVlrCzysDxF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="65255f2b-dee8-429b-884a-f62d0de1e349" """ Try to generate from BERT """ MASK = "[MASK]" MASK_ATOM = "[MASK]" def preprocess(tokens, tokenizer, device): """ Preprocess the sentence by tokenizing and converting to tensor """ tok_ids = tokenizer.convert_tokens_to_ids(tokens) tok_tensor = torch.tensor([tok_ids]) tok_tensor = tok_tensor.to(device) return tok_tensor def get_mask_ids(masking): if masking: mask_ids = [int(d) for d in masking.split(',')] else: mask_ids = [] return mask_ids def get_seed_sent(seed_sentence, tokenizer, masking=None, n_append_mask=0): """ Get initial sentence to decode from, possible with masks """ # Get initial mask mask_ids = get_mask_ids(masking) # Tokenize, respecting [MASK] seed_sentence = seed_sentence.replace(MASK, MASK_ATOM) toks = tokenizer.tokenize(seed_sentence) for i, tok in enumerate(toks): if tok == MASK_ATOM: mask_ids.append(i) # Mask the input for mask_id in mask_ids: toks[mask_id] = MASK # Append MASKs for _ in range(n_append_mask): mask_ids.append(len(toks)) toks.append(MASK) mask_ids = sorted(list(set(mask_ids))) seg = [0] * len(toks) seg_tensor = torch.tensor([seg]) return toks, seg_tensor, mask_ids def load_model(version): """ Load model """ model = BertForMaskedLM.from_pretrained(version) model.eval() return model def predict(model, tokenizer, tok_tensor, seg_tensor, how_select="argmax"): """ Get model predictions and convert back to tokens """ preds = model(tok_tensor, seg_tensor) if how_select == "sample": dist = Categorical(logits=F.log_softmax(preds[0], dim=-1)) pred_idxs = dist.sample().tolist() elif how_select == "topk": kth_vals, kth_idx = F.log_softmax(preds[0], dim=-1).topk(3, dim=-1) dist = Categorical(logits=kth_vals) pred_idxs = kth_idx.gather(dim=1, index=dist.sample().unsqueeze(-1)).squeeze(-1).tolist() elif how_select == "argmax": pred_idxs = preds.argmax(dim=-1).tolist()[0] else: raise NotImplementedError("Prediction procedure %s not found!" % how_select) pred_toks = tokenizer.convert_ids_to_tokens(pred_idxs) return pred_toks def masked_decoding(toks, seg_tensor, masks, model, tokenizer, device, selection_strategy): """ Decode from model by replacing masks """ for step_n, mask_id in enumerate(masks): tok_tensor = preprocess(toks, tokenizer, device) pred_toks = predict(model, tokenizer, tok_tensor, seg_tensor, selection_strategy) toks[mask_id] = pred_toks[mask_id] return toks def detokenize(pred_toks): """ Return the detokenized lyric prediction """ new_sent = [] for i, tok in enumerate(pred_toks): if tok.startswith("##"): new_sent[len(new_sent) - 1] = new_sent[len(new_sent) - 1] + tok[2:] else: new_sent.append(tok) return new_sent @torch.no_grad() def main(): toks, seg_tensor, mask_ids = get_seed_sent("[CLS] Sing with me, Sing for the years [SEP] [MASK] [MASK] [MASK] [MASK] , [MASK] [MASK] [MASK] tears. [SEP]", tokenizer, masking=None, n_append_mask=0) seg_tensor = seg_tensor.to(device) pred_toks = masked_decoding(toks, seg_tensor, mask_ids, masked_model, tokenizer, device, "argmax") pred_lyric = detokenize(pred_toks) print("\nFinal: %s" % (" ".join(pred_lyric)),"\n") if __name__ == '__main__': main()
Proyectos-guia/Lyrics-Generation-using-BERT-master/code/bert_baselines.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # RGB colorspace # ### Import resources # + import matplotlib.pyplot as plt import matplotlib.image as mpimg # %matplotlib inline # - # ### Read in an image # + # Read in the image image = mpimg.imread('images/wa_state_highway.jpg') plt.imshow(image) # - # ### RGB channels # # Visualize the levels of each color channel. Pay close attention to the traffic signs! # + # Isolate RGB channels r = image[:,:,0] g = image[:,:,1] b = image[:,:,2] # Visualize the individual color channels f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20,10)) ax1.set_title('R channel') ax1.imshow(r, cmap='gray') ax2.set_title('G channel') ax2.imshow(g, cmap='gray') ax3.set_title('B channel') ax3.imshow(b, cmap='gray') ## Which area has the lowest value for red? What about for blue?
1_1_Image_Representation/2. Visualizing RGB Channels.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="5sidCNvBBc8o" # # Multiclass Classification # # In the last notebook, we looked at binary classification. This works well when the data observations belong to one of two classes or categories, such as "True" or "False". When the data can be categorized into more than two classes, you must use a multiclass classification algorithm. # # Multiclass classification can be thought of as a combination of multiple binary classifiers. There are two ways in which you approach the problem: # # - **One vs Rest (OVR)**, in which a classifier is created for each possible class value, with a positive outcome for cases where the prediction is *this* class, and negative predictions for cases where the prediction is any other class. A classification problem with four possible shape classes (*square*, *circle*, *triangle*, *hexagon*) would require four classifiers that predict: # - *square* or not # - *circle* or not # - *triangle* or not # - *hexagon* or not # # - **One vs One (OVO)**, in which a classifier for each possible pair of classes is created. The classification problem with four shape classes would require the following binary classifiers: # - *square* or *circle* # - *square* or *triangle* # - *square* or *hexagon* # - *circle* or *triangle* # - *circle* or *hexagon* # - *triangle* or *hexagon* # # In both approaches, the overall model that combines the classifiers generates a vector of predictions in which the probabilities generated from the individual binary classifiers are used to determine which class to predict. # # Fortunately, in most machine learning frameworks, including scikit-learn, implementing a multiclass classification model is not significantly more complex than binary classification - and in most cases, the estimators used for binary classification implicitly support multiclass classification by abstracting an OVR algorithm, an OVO algorithm, or by allowing a choice of either. # # > **More Information**: To learn more about estimator support for multiclass classification in Scikit-Learn, see the [Scikit-Learn documentation](https://scikit-learn.org/stable/modules/multiclass.html). # # ### Explore the data # # Let's start by examining a dataset that contains observations of multiple classes. We'll use a dataset that contains observations of three different species of penguin. # # > **Citation**: The penguins dataset used in the this exercise is a subset of data collected and made available by [Dr. <NAME>](https://www.uaf.edu/cfos/people/faculty/detail/kristen-gorman.php) # and the [Palmer Station, Antarctica LTER](https://pal.lternet.edu/), a # member of the [Long Term Ecological Research # Network](https://lternet.edu/). # + colab={"base_uri": "https://localhost:8080/", "height": 554} id="F0tsQj1EBgo_" executionInfo={"status": "ok", "timestamp": 1646197704474, "user_tz": -345, "elapsed": 536, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="459df380-c049-4f1c-a4d7-536555c760cd" import pandas as pd # load the training dataset # !wget https://raw.githubusercontent.com/MicrosoftDocs/mslearn-introduction-to-machine-learning/main/Data/ml-basics/penguins.csv penguins = pd.read_csv('penguins.csv') # Display a random sample of 10 observations sample = penguins.sample(10) sample # + [markdown] id="SecsZ37aB7Yp" # The dataset contains the following columns: # * **CulmenLength**: The length in mm of the penguin's culmen (bill). # * **CulmenDepth**: The depth in mm of the penguin's culmen. # * **FlipperLength**: The length in mm of the penguin's flipper. # * **BodyMass**: The body mass of the penguin in grams. # * **Species**: An integer value that represents the species of the penguin. # # The **Species** column is the label we want to train a model to predict. The dataset includes three possible species, which are encoded as 0, 1, and 2. The actual species names are revealed by the code below: # + colab={"base_uri": "https://localhost:8080/"} id="aPgkfAtPBkrG" executionInfo={"status": "ok", "timestamp": 1646197731922, "user_tz": -345, "elapsed": 501, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="357bff99-9ab7-4d2a-d9a5-4de8fcc1d420" penguin_classes = ['Adelie', 'Gentoo', 'Chinstrap'] print(sample.columns[0:5].values, 'SpeciesName') for index, row in penguins.sample(10).iterrows(): print('[',row[0], row[1], row[2], row[3], int(row[4]),']',penguin_classes[int(row[4])]) # + [markdown] id="VjIdJmcBCCMA" # Now that we know what the features and labels in the data represent, let's explore the dataset. First, let's see if there are any missing (*null*) values. # + colab={"base_uri": "https://localhost:8080/"} id="7gKxBHhZCFdz" executionInfo={"status": "ok", "timestamp": 1646197763982, "user_tz": -345, "elapsed": 1491, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="5d9351a7-ee07-4f27-d0ed-f53a0144680b" # Count the number of null values for each column penguins.isnull().sum() # + [markdown] id="4oKcrpfMCJS9" # It looks like there are some missing feature values, but no missing labels. Let's dig a little deeper and see the rows that contain nulls. # + colab={"base_uri": "https://localhost:8080/", "height": 112} id="Bq0LHp4qCF7k" executionInfo={"status": "ok", "timestamp": 1646197803017, "user_tz": -345, "elapsed": 806, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="44555797-e3a3-4119-c4a9-3c1b71cab977" # Show rows containing nulls penguins[penguins.isnull().any(axis=1)] # + [markdown] id="DEuJQO6GCTq-" # There are two rows that contain no feature values at all (*NaN* stands for "not a number"), so these won't be useful in training a model. Let's discard them from the dataset. # + colab={"base_uri": "https://localhost:8080/"} id="b3S8dFp2CP9V" executionInfo={"status": "ok", "timestamp": 1646197828582, "user_tz": -345, "elapsed": 468, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="8666e02d-8b1a-41dd-8bdc-48e7ecdc8c9a" # Drop rows containing NaN values penguins=penguins.dropna() #Confirm there are now no nulls penguins.isnull().sum() # + [markdown] id="pPRXAGy9Cf-t" # Now that we've dealt with the missing values, let's explore how the features relate to the label by creating some box charts. # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="YHjEcjA8CWWP" executionInfo={"status": "ok", "timestamp": 1646197880503, "user_tz": -345, "elapsed": 1793, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="f234ca68-e11b-47f3-8b6f-5961ada648c5" from matplotlib import pyplot as plt # %matplotlib inline penguin_features = ['CulmenLength','CulmenDepth','FlipperLength','BodyMass'] penguin_label = 'Species' for col in penguin_features: penguins.boxplot(column=col, by=penguin_label, figsize=(6,6)) plt.title(col) plt.show() # + [markdown] id="uZ2tcRi9Clj1" # From the box plots, it looks like species 0 and 2 (Adelie and Chinstrap) have similar data profiles for culmen depth, flipper length, and body mass, but Chinstraps tend to have longer culmens. Species 1 (Gentoo) tends to have fairly clearly differentiated features from the others; which should help us train a good classification model. # # ### Prepare the data # # Just as for binary classification, before training the model, we need to separate the features and label, and then split the data into subsets for training and validation. We'll also apply a *stratification* technique when splitting the data to maintain the proportion of each label value in the training and validation datasets. # + colab={"base_uri": "https://localhost:8080/"} id="9MEyFQQWCiqg" executionInfo={"status": "ok", "timestamp": 1646197904689, "user_tz": -345, "elapsed": 1232, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="99bda27a-457e-43e6-f66f-867497ee09e4" from sklearn.model_selection import train_test_split # Separate features and labels penguins_X, penguins_y = penguins[penguin_features].values, penguins[penguin_label].values # Split data 70%-30% into training set and test set x_penguin_train, x_penguin_test, y_penguin_train, y_penguin_test = train_test_split(penguins_X, penguins_y, test_size=0.30, random_state=0, stratify=penguins_y) print ('Training Set: %d, Test Set: %d \n' % (x_penguin_train.shape[0], x_penguin_test.shape[0])) # + [markdown] id="-LBPF9DLCsh1" # ### Train and evaluate a multiclass classifier # # Now that we have a set of training features and corresponding training labels, we can fit a multiclass classification algorithm to the data to create a model. Most scikit-learn classification algorithms inherently support multiclass classification. We'll try a logistic regression algorithm. # + colab={"base_uri": "https://localhost:8080/"} id="EaMyO550CowR" executionInfo={"status": "ok", "timestamp": 1646197930571, "user_tz": -345, "elapsed": 1163, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="d6590e64-3799-4b05-f5ba-aa15fabbe97f" from sklearn.linear_model import LogisticRegression # Set regularization rate reg = 0.1 # train a logistic regression model on the training set multi_model = LogisticRegression(C=1/reg, solver='lbfgs', multi_class='auto', max_iter=10000).fit(x_penguin_train, y_penguin_train) print (multi_model) # + [markdown] id="tobqj8isCxuO" # Now we can use the trained model to predict the labels for the test features, and compare the predicted labels to the actual labels: # + colab={"base_uri": "https://localhost:8080/"} id="mkw8HA5rCvFi" executionInfo={"status": "ok", "timestamp": 1646197954575, "user_tz": -345, "elapsed": 470, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="5f7ab163-7144-4310-dcb7-1ac553472d70" penguin_predictions = multi_model.predict(x_penguin_test) print('Predicted labels: ', penguin_predictions[:15]) print('Actual labels : ' ,y_penguin_test[:15]) # + [markdown] id="tWblGZRaC4LX" # Let's look at a classification report. # + colab={"base_uri": "https://localhost:8080/"} id="0x-42uRPC4pm" executionInfo={"status": "ok", "timestamp": 1646197983837, "user_tz": -345, "elapsed": 577, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="2fd11963-ac7a-49dc-e406-71b469bd83b7" from sklearn. metrics import classification_report print(classification_report(y_penguin_test, penguin_predictions)) # + [markdown] id="eTnDb2PzC_Ju" # As with binary classification, the report includes *precision* and *recall* metrics for each class. However, while with binary classification we could focus on the scores for the *positive* class; in this case, there are multiple classes so we need to look at an overall metric (either the macro or weighted average) to get a sense of how well the model performs across all three classes. # # You can get the overall metrics separately from the report using the scikit-learn metrics score classes, but with multiclass results you must specify which average metric you want to use for precision and recall. # + colab={"base_uri": "https://localhost:8080/"} id="Pd_O2N4DC8Oc" executionInfo={"status": "ok", "timestamp": 1646198007195, "user_tz": -345, "elapsed": 579, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="2c18a05f-4d7c-495e-85bd-9b3621a5987e" from sklearn.metrics import accuracy_score, precision_score, recall_score print("Overall Accuracy:",accuracy_score(y_penguin_test, penguin_predictions)) print("Overall Precision:",precision_score(y_penguin_test, penguin_predictions, average='macro')) print("Overall Recall:",recall_score(y_penguin_test, penguin_predictions, average='macro')) # + [markdown] id="qOOu1JBsDEbG" # Now let's look at the confusion matrix for our model: # + colab={"base_uri": "https://localhost:8080/"} id="VUPpYx9CDB7k" executionInfo={"status": "ok", "timestamp": 1646198031723, "user_tz": -345, "elapsed": 521, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="3992a501-86fe-4842-c82d-aebc37646353" from sklearn.metrics import confusion_matrix # Print the confusion matrix mcm = confusion_matrix(y_penguin_test, penguin_predictions) print(mcm) # + [markdown] id="1ybPpoRWDKzb" # The confusion matrix shows the intersection of predicted and actual label values for each class - in simple terms, the diagonal intersections from top-left to bottom-right indicate the number of correct predictions. # # When dealing with multiple classes, it's generally more intuitive to visualize this as a heat map, like this: # + colab={"base_uri": "https://localhost:8080/", "height": 310} id="gz3UuYWZDH5v" executionInfo={"status": "ok", "timestamp": 1646198053866, "user_tz": -345, "elapsed": 574, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="a6aa3a1e-ffbf-4805-87c8-245e959c6577" import numpy as np import matplotlib.pyplot as plt # %matplotlib inline plt.imshow(mcm, interpolation="nearest", cmap=plt.cm.Blues) plt.colorbar() tick_marks = np.arange(len(penguin_classes)) plt.xticks(tick_marks, penguin_classes, rotation=45) plt.yticks(tick_marks, penguin_classes) plt.xlabel("Predicted Species") plt.ylabel("Actual Species") plt.show() # + [markdown] id="lwDoJD-wDRo2" # The darker squares in the confusion matrix plot indicate high numbers of cases, and you can hopefully see a diagonal line of darker squares indicating cases where the predicted and actual label are the same. # # In the case of a multiclass classification model, a single ROC curve showing true positive rate vs false positive rate is not possible. However, you can use the rates for each class in a One vs Rest (OVR) comparison to create a ROC chart for each class. # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="uBaQEW27DNTK" executionInfo={"status": "ok", "timestamp": 1646198089142, "user_tz": -345, "elapsed": 964, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="fddc6723-e5dd-442a-f3fb-5ddd57aba4ed" from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score # Get class probability scores penguin_prob = multi_model.predict_proba(x_penguin_test) # Get ROC metrics for each class fpr = {} tpr = {} thresh ={} for i in range(len(penguin_classes)): fpr[i], tpr[i], thresh[i] = roc_curve(y_penguin_test, penguin_prob[:,i], pos_label=i) # Plot the ROC chart plt.plot(fpr[0], tpr[0], linestyle='--',color='orange', label=penguin_classes[0] + ' vs Rest') plt.plot(fpr[1], tpr[1], linestyle='--',color='green', label=penguin_classes[1] + ' vs Rest') plt.plot(fpr[2], tpr[2], linestyle='--',color='blue', label=penguin_classes[2] + ' vs Rest') plt.title('Multiclass ROC curve') plt.xlabel('False Positive Rate') plt.ylabel('True Positive rate') plt.legend(loc='best') plt.show() # + [markdown] id="R94oGcaNDY-y" # To quantify the ROC performance, you can calculate an aggregate area under the curve score that is averaged across all of the OVR curves. # + colab={"base_uri": "https://localhost:8080/"} id="fvd_nrtZDV1N" executionInfo={"status": "ok", "timestamp": 1646198110481, "user_tz": -345, "elapsed": 436, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="46af7998-fd91-45f5-d953-dea8ed5952b4" auc = roc_auc_score(y_penguin_test,penguin_prob, multi_class='ovr') print('Average AUC:', auc) # + [markdown] id="iU_fEXOCDdfp" # ### Preprocess data in a pipeline # # Again, just like with binary classification, you can use a pipeline to apply preprocessing steps to the data before fitting it to an algorithm to train a model. Let's see if we can improve the penguin predictor by scaling the numeric features in a transformation steps before training. We'll also try a different algorithm (a support vector machine), just to show that we can! # + colab={"base_uri": "https://localhost:8080/"} id="bEe1bhHNDbLP" executionInfo={"status": "ok", "timestamp": 1646198129907, "user_tz": -345, "elapsed": 820, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="e93eabb3-4aa8-4742-8046-6cbaf5a19b99" from sklearn.preprocessing import StandardScaler from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.svm import SVC # Define preprocessing for numeric columns (scale them) feature_columns = [0,1,2,3] feature_transformer = Pipeline(steps=[ ('scaler', StandardScaler()) ]) # Create preprocessing steps preprocessor = ColumnTransformer( transformers=[ ('preprocess', feature_transformer, feature_columns)]) # Create training pipeline pipeline = Pipeline(steps=[('preprocessor', preprocessor), ('regressor', SVC(probability=True))]) # fit the pipeline to train a linear regression model on the training set multi_model = pipeline.fit(x_penguin_train, y_penguin_train) print (multi_model) # + [markdown] id="B7IxwhTJDijf" # Now we can evaluate the new model. # + colab={"base_uri": "https://localhost:8080/", "height": 379} id="rNO6N_JXDfz3" executionInfo={"status": "ok", "timestamp": 1646198151271, "user_tz": -345, "elapsed": 570, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="2eba45e3-d88f-4df4-b5f6-a7cee944043a" # Get predictions from test data penguin_predictions = multi_model.predict(x_penguin_test) penguin_prob = multi_model.predict_proba(x_penguin_test) # Overall metrics print("Overall Accuracy:",accuracy_score(y_penguin_test, penguin_predictions)) print("Overall Precision:",precision_score(y_penguin_test, penguin_predictions, average='macro')) print("Overall Recall:",recall_score(y_penguin_test, penguin_predictions, average='macro')) print('Average AUC:', roc_auc_score(y_penguin_test,penguin_prob, multi_class='ovr')) # Confusion matrix plt.imshow(mcm, interpolation="nearest", cmap=plt.cm.Blues) plt.colorbar() tick_marks = np.arange(len(penguin_classes)) plt.xticks(tick_marks, penguin_classes, rotation=45) plt.yticks(tick_marks, penguin_classes) plt.xlabel("Predicted Species") plt.ylabel("Actual Species") plt.show() # + [markdown] id="Tq_sSE4XDnVJ" # ### Use the model with new data observations # # Now let's save our trained model so we can use it again later. # + colab={"base_uri": "https://localhost:8080/"} id="wDVbhQufDlCS" executionInfo={"status": "ok", "timestamp": 1646198171743, "user_tz": -345, "elapsed": 797, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="d8743a48-b445-4780-ee13-6757b7cc432b" import joblib # Save the model as a pickle file filename = './penguin_model.pkl' joblib.dump(multi_model, filename) # + [markdown] id="BxLz4W26DsKi" # OK, so now we have a trained model. Let's use it to predict the class of a new penguin observation: # + colab={"base_uri": "https://localhost:8080/"} id="JNcYU8-2DpvU" executionInfo={"status": "ok", "timestamp": 1646198189424, "user_tz": -345, "elapsed": 448, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="5c18fe19-671d-426f-83fa-7de64980349f" # Load the model from the file multi_model = joblib.load(filename) # The model accepts an array of feature arrays (so you can predict the classes of multiple penguin observations in a single call) # We'll create an array with a single array of features, representing one penguin x_new = np.array([[50.4,15.3,224,5550]]) print ('New sample: {}'.format(x_new[0])) # The model returns an array of predictions - one for each set of features submitted # In our case, we only submitted one penguin, so our prediction is the first one in the resulting array. penguin_pred = multi_model.predict(x_new)[0] print('Predicted class is', penguin_classes[penguin_pred]) # + [markdown] id="alTIHjj7DwmT" # You can also submit a batch of penguin observations to the model, and get back a prediction for each one. # + colab={"base_uri": "https://localhost:8080/"} id="xMjKg6ACDuc5" executionInfo={"status": "ok", "timestamp": 1646198207737, "user_tz": -345, "elapsed": 503, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjLLRQteQA-BSVM11RQI0Jpgt7v0nw6hWzW4zvL=s64", "userId": "07019866810236427547"}} outputId="7f9bb503-ad7f-4d90-cbea-ec27574c23aa" # This time our input is an array of two feature arrays x_new = np.array([[49.5,18.4,195, 3600], [38.2,20.1,190,3900]]) print ('New samples:\n{}'.format(x_new)) # Call the web service, passing the input data predictions = multi_model.predict(x_new) # Get the predicted classes. for prediction in predictions: print(prediction, '(' + penguin_classes[prediction] +')') # + [markdown] id="lngyiAIdD1Sc" # ## Summary # # Classification is one of the most common forms of machine learning, and by following the basic principles we've discussed in this notebook you should be able to train and evaluate classification models with scikit-learn. It's worth spending some time investigating classification algorithms in more depth, and a good starting point is the [Scikit-Learn documentation](https://scikit-learn.org/stable/user_guide.html).
11. Train and evaluate multiclass classification models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python37 # language: python # name: py37 # --- #hide from nbdev_end2end.core import * # # **nbdev**牛逼开发环境从开始到放弃 # > 觉得好,就做。。。 # This file will become your README and also the index of your documentation. # ## Install # `pip install your_project_name` # ## How to use # Fill me in please! Don't forget code examples: 1+1 # !pip install -U nbdev #HIDE from nbdev.export import notebook2script notebook2script() # !nbdev_build_docs
index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example Seldon Core Deployments using Helm # <img src="images/deploy-graph.png" alt="predictor with canary" title="ml graph"/> # ## Prerequistes # You will need # - [Git clone of Seldon Core](https://github.com/SeldonIO/seldon-core) # - A running Kubernetes cluster with kubectl authenticated # - [seldon-core Python package](https://pypi.org/project/seldon-core/) (```pip install seldon-core>=0.2.6.1```) # - [Helm client](https://helm.sh/) # ### Creating a Kubernetes Cluster # # Follow the [Kubernetes documentation to create a cluster](https://kubernetes.io/docs/setup/). # # Once created ensure ```kubectl``` is authenticated against the running cluster. # ## Setup # !kubectl create namespace seldon # !kubectl config set-context $(kubectl config current-context) --namespace=seldon # !kubectl create clusterrolebinding kube-system-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default # ## Install Helm # !kubectl -n kube-system create sa tiller # !kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller # !helm init --service-account tiller # !kubectl rollout status deploy/tiller-deploy -n kube-system # ## Start seldon-core # !helm install ../helm-charts/seldon-core-operator --name seldon-core --set usageMetrics.enabled=true --namespace seldon-system # !kubectl rollout status statefulset.apps/seldon-operator-controller-manager -n seldon-system # ## Setup Ingress # There are gRPC issues with the latest Ambassador, so we rewcommend 0.40.2 until these are fixed. # !helm install stable/ambassador --name ambassador --set crds.keep=false # !kubectl rollout status deployment.apps/ambassador # ### Port Forward to Ambassador # # ``` # kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080 # ``` # ## Serve Single Model # !helm install ../helm-charts/seldon-single-model --name mymodel # !helm template ../helm-charts/seldon-single-model | pygmentize -l json # !kubectl rollout status deploy/mymodel-mymodel-7cd068f # ### Get predictions from seldon_core.seldon_client import SeldonClient sc = SeldonClient(deployment_name="mymodel",namespace="seldon",gateway_endpoint="localhost:8003",gateway="ambassador") # #### REST Request r = sc.predict(transport="rest") print(r) # #### gRPC Request r = sc.predict(transport="grpc") print(r) # !helm delete mymodel --purge # ## Serve AB Test # !helm install ../helm-charts/seldon-abtest --name myabtest # !helm template ../helm-charts/seldon-abtest | pygmentize -l json # !kubectl rollout status deploy/myabtest-abtest-41de5b8 # !kubectl rollout status deploy/myabtest-abtest-df66c5c # ### Get predictions from seldon_core.seldon_client import SeldonClient sc = SeldonClient(deployment_name="myabtest",namespace="seldon",gateway_endpoint="localhost:8003",gateway="ambassador") # #### REST Request r = sc.predict(transport="rest") print(r) # #### gRPC Request r = sc.predict(transport="grpc") print(r) # !helm delete myabtest --purge # ## Serve Multi-Armed Bandit # !helm install ../helm-charts/seldon-mab --name mymab # !helm template ../helm-charts/seldon-mab | pygmentize -l json # !kubectl rollout status deploy/mymab-abtest-41de5b8 # !kubectl rollout status deploy/mymab-abtest-b8038b2 # !kubectl rollout status deploy/mymab-abtest-df66c5c # ### Get predictions from seldon_core.seldon_client import SeldonClient sc = SeldonClient(deployment_name="mymab",namespace="seldon",gateway_endpoint="localhost:8003",gateway="ambassador") # #### REST Request r = sc.predict(transport="rest") print(r) # #### gRPC Request r = sc.predict(transport="grpc") print(r) # !helm delete mymab --purge
notebooks/helm_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] colab_type="text" id="tN1oNjn9VGdB" # # SIT742: Modern Data Science # **(Week 02: A Touch of Data Science)** # # --- # - Materials in this module include resources collected from various open-source online repositories. # - You are free to use, change and distribute this package. # - If you found any issue/bug for this document, please submit an issue at [tulip-lab/sit742](https://github.com/tulip-lab/sit742/issues) # # Prepared by **SIT742 Teaching Team** # # # --- # # # # Session 2B - NumPy's Structured Arrays # + [markdown] colab_type="text" id="2bonTwf8VGdK" # While often our data can be well represented by a homogeneous array of values, sometimes this is not the case. This section demonstrates the use of NumPy's *structured arrays* and *record arrays*, which provide efficient storage for compound, heterogeneous data. While the patterns shown here are useful for simple operations, scenarios like this often lend themselves to the use of Pandas ``Dataframe``s, which we'll explore in future sessions. # + colab={} colab_type="code" id="WXUqJH5kVGdR" import numpy as np # + [markdown] colab_type="text" id="KTQfLWePVGde" # Imagine that we have several categories of data on a number of people (say, name, age, and weight), and we'd like to store these values for use in a Python program. # It would be possible to store these in three separate arrays: # + colab={} colab_type="code" id="m1bo6ntuVGdh" name = ['Alice', 'Bob', 'Cathy', 'Doug'] age = [25, 45, 37, 19] weight = [55.0, 85.5, 68.0, 61.5] # + [markdown] colab_type="text" id="rix5zqmPVGdq" # But this is a bit clumsy. There's nothing here that tells us that the three arrays are related; it would be more natural if we could use a single structure to store all of this data. # NumPy can handle this through structured arrays, which are arrays with compound data types. # # Recall that previously we created a simple array using an expression like this: # + colab={} colab_type="code" id="AcQ8KY1uVGdv" x = np.zeros(4, dtype=int) # + [markdown] colab_type="text" id="Ke0w8f26VGd4" # We can similarly create a structured array using a compound data type specification: # + colab={} colab_type="code" id="vZLFmnhMVGd7" # Use a compound data type for structured arrays data = np.zeros(4, dtype={'names':('name', 'age', 'weight'), 'formats':('U10', 'i4', 'f8')}) print(data.dtype) # + [markdown] colab_type="text" id="heNzAie2VGeF" # Here ``'U10'`` translates to "Unicode string of maximum length 10," ``'i4'`` translates to "4-byte (i.e., 32 bit) integer," and ``'f8'`` translates to "8-byte (i.e., 64 bit) float." # We'll discuss other options for these type codes in the following section. # # Now that we've created an empty container array, we can fill the array with our lists of values: # + colab={} colab_type="code" id="RHGZKSf6VGeI" data['name'] = name data['age'] = age data['weight'] = weight print(data) # + [markdown] colab_type="text" id="GRR0ZQMcVGeR" # As we had hoped, the data is now arranged together in one convenient block of memory. # # The handy thing with structured arrays is that you can now refer to values either by index or by name: # + colab={} colab_type="code" id="N58jqN5AVGeU" # Get all names data['name'] # + colab={} colab_type="code" id="QAQTvJOHVGel" # Get first row of data data[0] # + colab={} colab_type="code" id="Le5J2X5GVGey" # Get the name from the last row data[-1]['name'] # + [markdown] colab_type="text" id="h7QXxzmpVGe9" # Using Boolean masking, this even allows you to do some more sophisticated operations such as filtering on age: # + colab={} colab_type="code" id="8XWhfGsQVGfH" # Get names where age is under 30 data[data['age'] < 30]['name'] # + [markdown] colab_type="text" id="-pwa4tFSVGfP" # Note that if you'd like to do any operations that are any more complicated than these, you should probably consider the Pandas package, covered in the next chapter. # As we'll see, Pandas provides a ``Dataframe`` object, which is a structure built on NumPy arrays that offers a variety of useful data manipulation functionality similar to what we've shown here, as well as much, much more. # + [markdown] colab_type="text" id="HoIwTvrtVGfS" # ## Creating Structured Arrays # # Structured array data types can be specified in a number of ways. # Earlier, we saw the dictionary method: # + colab={} colab_type="code" id="-_JrEBJ1VGfU" np.dtype({'names':('name', 'age', 'weight'), 'formats':('U10', 'i4', 'f8')}) # + [markdown] colab_type="text" id="Cm6uDC7XVGfb" # For clarity, numerical types can be specified using Python types or NumPy ``dtype``s instead: # + colab={} colab_type="code" id="WZ4hWV3gVGfe" np.dtype({'names':('name', 'age', 'weight'), 'formats':((np.str_, 10), int, np.float32)}) # + [markdown] colab_type="text" id="Nh6Wv-_rVGfl" # A compound type can also be specified as a list of tuples: # + colab={} colab_type="code" id="k4J505_pVGfo" np.dtype([('name', 'S10'), ('age', 'i4'), ('weight', 'f8')]) # + [markdown] colab_type="text" id="zWQ8dF8VVGfv" # If the names of the types do not matter to you, you can specify the types alone in a comma-separated string: # + colab={} colab_type="code" id="HJJhxX_qVGfy" np.dtype('S10,i4,f8') # + [markdown] colab_type="text" id="5Qp2KNniVGf5" # The shortened string format codes may seem confusing, but they are built on simple principles. # The first (optional) character is ``<`` or ``>``, which means "little endian" or "big endian," respectively, and specifies the ordering convention for significant bits. # The next character specifies the type of data: characters, bytes, ints, floating points, and so on (see the table below). # The last character or characters represents the size of the object in bytes. # # | Character | Description | Example | # | --------- | ----------- | ------- | # | ``'b'`` | Byte | ``np.dtype('b')`` | # | ``'i'`` | Signed integer | ``np.dtype('i4') == np.int32`` | # | ``'u'`` | Unsigned integer | ``np.dtype('u1') == np.uint8`` | # | ``'f'`` | Floating point | ``np.dtype('f8') == np.int64`` | # | ``'c'`` | Complex floating point| ``np.dtype('c16') == np.complex128``| # | ``'S'``, ``'a'`` | String | ``np.dtype('S5')`` | # | ``'U'`` | Unicode string | ``np.dtype('U') == np.str_`` | # | ``'V'`` | Raw data (void) | ``np.dtype('V') == np.void`` | # + [markdown] colab_type="text" id="W6fXzzT6VGf8" # ## More Advanced Compound Types # # It is possible to define even more advanced compound types. # For example, you can create a type where each element contains an array or matrix of values. # Here, we'll create a data type with a ``mat`` component consisting of a $3\times 3$ floating-point matrix: # + colab={} colab_type="code" id="52eUWALIVGgB" tp = np.dtype([('id', 'i8'), ('mat', 'f8', (3, 3))]) X = np.zeros(1, dtype=tp) print(X[0]) print(X['mat'][0]) # + [markdown] colab_type="text" id="oC4jqjB4VGgT" # Now each element in the ``X`` array consists of an ``id`` and a $3\times 3$ matrix. # Why would you use this rather than a simple multidimensional array, or perhaps a Python dictionary? # The reason is that this NumPy ``dtype`` directly maps onto a C structure definition, so the buffer containing the array content can be accessed directly within an appropriately written C program. # If you find yourself writing a Python interface to a legacy C or Fortran library that manipulates structured data, you'll probably find structured arrays quite useful! # + [markdown] colab_type="text" id="SvdBUpvVVGgW" # ## RecordArrays: Structured Arrays with a Twist # # NumPy also provides the ``np.recarray`` class, which is almost identical to the structured arrays just described, but with one additional feature: fields can be accessed as attributes rather than as dictionary keys. # Recall that we previously accessed the ages by writing: # + colab={} colab_type="code" id="cQy_hZgHVGgY" data['age'] # + [markdown] colab_type="text" id="qv1PIr0JVGgi" # If we view our data as a record array instead, we can access this with slightly fewer keystrokes: # + colab={} colab_type="code" id="fWKE4lTfVGgr" data_rec = data.view(np.recarray) data_rec.age # + [markdown] colab_type="text" id="khKEGTLNVGgz" # The downside is that for record arrays, there is some extra overhead involved in accessing the fields, even when using the same syntax. We can see this here: # + colab={} colab_type="code" id="2hkmj0NkVGg2" # %timeit data['age'] # %timeit data_rec['age'] # %timeit data_rec.age
Jupyter/M09-Optional/SIT742P02B-StructuredDataNumPy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="agrdrUc10xST" colab_type="text" # ## IMPORTS # # + id="q4x3qvwVSKJm" colab_type="code" colab={} import numpy as np import matplotlib.pyplot as plt from matplotlib.path import Path from matplotlib.spines import Spine from matplotlib.projections.polar import PolarAxes from matplotlib.projections import register_projection # + id="SrT5EcZiSQF0" colab_type="code" colab={} import matplotlib.pyplot as plt import pandas as pd from math import pi # + id="kyGDpwgGRnO9" colab_type="code" colab={} # 'acousticness', 'danceability', # 'energy', 'instrumentalness', 'key', 'liveness', # 'loudness', , 'speechiness', 'tempo', 'time_signature' # + id="knX7eGhvk_jf" colab_type="code" colab={} d = {'Danceability': [0.2904, 0.4540, 0.322200, 0.46700], 'Energy': [0.9340, 0.91300, 0.89700, 0.65200], 'Instrumentalness': [0.8780, 0.86100, 0.9000, 0.89300], 'Liveness': [0.1110, 0.13500, 0.14500, 0.00250], 'Valence': [0.8890, 0.8220, 0.58900, 0.95600]} df = pd.DataFrame(data=d) # + id="IS4MZicXSQP5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="54988563-2651-414f-9ee6-03ea425cdafe" df # + id="q2GEvQ_Co6H6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 845} outputId="1fa99e9c-ce9d-444b-d9aa-8107b6971728" # Set data df = pd.DataFrame({'group': ['A','B','C','D'], 'Danceability': [0.2904, 0.4540, 0.322200, 0.46700], 'Energy': [0.9340, 0.91300, 0.89700, 0.65200], 'Instrumentalness': [0.8780, 0.86100, 0.9000, 0.89300], 'Liveness': [0.1110, 0.13500, 0.14500, 0.00250], 'Valence': [0.8890, 0.8220, 0.58900, 0.95600]}) # ------- PART 1: Define a function that do a plot for one line of the dataset! def make_spider( row, title, color): # number of variable categories=list(df)[1:] N = len(categories) # What will be the angle of each axis in the plot? (we divide the plot / number of variable) angles = [n / float(N) * 2 * pi for n in range(N)] angles += angles[:1] # Initialise the spider plot ax = plt.subplot(2,2,row+1, polar=True, ) # If you want the first axis to be on top: ax.set_theta_offset(pi / 2) ax.set_theta_direction(-1) # Draw one axe per variable + add labels labels yet plt.xticks(angles[:-1], categories, color='grey', size=8) # Draw ylabels ax.set_rlabel_position(0) plt.yticks([0.2,0.4,0.6,0.8], ["0.2","0.4","0.6","0.8"], color="grey", size=7) plt.ylim(0,1) # Ind1 values=df.loc[row].drop('group').values.flatten().tolist() values += values[:1] ax.plot(angles, values, color=color, linewidth=2, linestyle='solid') ax.fill(angles, values, color=color, alpha=0.4) # Add a title plt.title(title, size=11, color=color, y=1.1) # ------- PART 2: Apply to all individuals # initialize the figure my_dpi=96 plt.figure(figsize=(1000/my_dpi, 1000/my_dpi), dpi=my_dpi) # Create a color palette: my_palette = plt.cm.get_cmap("cool", len(df.index)) # Loop to plot for row in range(0, len(df.index)): make_spider( row=row, title='Song '+df['group'][row], color=my_palette(row)) plt.savefig('songcomparison.png') # + id="zZqml5TUqPjB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="7b81f571-94ce-4cfd-c54a-5617c08017e9" df # + id="PBGp_7RCqtAH" colab_type="code" colab={}
radar_chart.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="ncqsDZUKl9yK" colab={"base_uri": "https://localhost:8080/"} outputId="def4e224-9f4f-4520-e983-917344224b87" # !pip install transformers # !pip install seqeval # + colab={"base_uri": "https://localhost:8080/"} id="PNPOfiJw2tpm" outputId="b7ee4ab1-693d-43bf-e3b5-21ec51691342" import torch from torch import nn from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from transformers import BertForTokenClassification, AdamW, BertTokenizer from transformers import get_linear_schedule_with_warmup from keras.preprocessing.sequence import pad_sequences from abc import ABC, abstractmethod import seqeval import requests torch.manual_seed(28) # + id="Hclbpj9y2t-k" # + id="jVHhF40_qO4O" class Config: DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") MODEL_NAME = "bert-base-uncased" TRAIN_BATCH_SIZE = 32 TRAIN_EPOCHS = 4 MAX_SEQUENCE_LENGTH = 270 FULL_FINE_TUNING= True MAX_GRAD_NORM = 1.0 # + id="z-9gdi7x23_i" # + id="hvIxnbLo3aB-" # label_map = {"1": "O", "2": "B-MISC", "3": "I-MISC", "4": "B-PER", "5": "I-PER", "6": "B-ORG", # "7": "I-ORG", "8": "B-LOC", "9": "I-LOC", "10": "[CLS]", "11": "[SEP]", "12": "PAD"} # + id="f8yl2Hz0lDmD" class InputSample: def __init__(self, guid, text, label): self.guid = guid self.text = text self.label = label # + id="WzNXq9GF46Q3" class FileReader(ABC): @abstractmethod def read_file(self, file_name): pass # + id="xJGNO5h65mUd" class TSVFileReader(FileReader): def read_file(self, file_name): data = [] sentence = [] label = [] text = requests.get(file_name).text.split('\n') # with open(file_name, 'r') as file_handle: for line in text: if len(line) == 0 or line.startswith('-DOCSTART') or line[0] == '\n': if len(sentence) > 0: data.append((sentence, label)) sentence = [] label = [] continue splits = line.split(' ') sentence.append(splits[0]) #label.append(splits[-1][:-1]) label.append(splits[-1][:]) if len(sentence) > 0: data.append((sentence, label)) return data # + id="U1HTBQUPbQfb" # stri = "EU NNP B-NP B-ORG" # splits = stri.split(' ') # print(splits) # print(splits[0]) # print(splits[-1][:-1]) # + id="66cQdu7E3vmD" class DataProcessor(ABC): @abstractmethod def fetch_train_samples(self, file_path): pass @abstractmethod def fetch_validation_samples(self, file_path): pass @abstractmethod def fetch_test_samples(self, file_path): pass @abstractmethod def fetch_labels(self): pass def read_file(self, file_name, file_type): if file_type == 'tsv': tsv = TSVFileReader() return tsv.read_file(file_name) # + id="v9-gNpe260tf" class NERDataProcessor(DataProcessor): def __init__(self): self.labels = ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "[CLS]", "[SEP]"] def fetch_train_samples(self, file_path): data = self.read_file(file_path, "tsv") return self.__create_input_samples("train", data) def fetch_validation_samples(self, file_path): data = self.read_file(file_path, "tsv") return self.__create_input_samples("valid", data) def fetch_test_samples(self, file_path): data = self.read_file(file_path, "tsv") return self.__create_input_samples("test", data) def fetch_labels(self): return self.labels def __create_input_samples(self, dataset_type, lines): input_samples = [] for idx, (sentence, label) in enumerate(lines): input_samples.append(InputSample(guid=f"{dataset_type}-{idx}", text=sentence, #text=" ".join(sentence), label=label)) return input_samples # + colab={"base_uri": "https://localhost:8080/"} id="oXWiwpzf6ckq" outputId="4b9f55ca-ef23-4abb-ad0b-effb17e05011" float(0.1!=0.0) # + id="zLIZ6p8rBqJf" ner_processor = NERDataProcessor() labels = ner_processor.fetch_labels() labels.append("PAD") num_labels = len(labels) + 1 # addition 1 for PAD # + id="1aS9Xh88BjXB" train_samples = ner_processor.fetch_train_samples("https://raw.githubusercontent.com/saishiva024/BERT_NamedEntityRecognition/master/dataset/train.txt") valid_samples = ner_processor.fetch_train_samples("https://raw.githubusercontent.com/saishiva024/BERT_NamedEntityRecognition/master/dataset/valid.txt") test_samples = ner_processor.fetch_train_samples("https://raw.githubusercontent.com/saishiva024/BERT_NamedEntityRecognition/master/dataset/test.txt") # + colab={"base_uri": "https://localhost:8080/"} id="LqFG6hr8GXKi" outputId="68402787-1aff-4950-90c8-5636d500e2fa" train_samples[:3] # + colab={"base_uri": "https://localhost:8080/"} id="tqYyLD9YBjgl" outputId="208f2d21-635a-4a70-f8f4-ac0003a3f0c1" train_samples[0].text, train_samples[0].label # + id="1oNNTLZTDDjv" def represent_features_for_samples(samples, all_labels, max_seq_length, tokenizer): try: input_tokens = [] input_labels = [] labels_map = {label : i for i, label in enumerate(all_labels,1)} for sample_idx, sample in enumerate(samples): words_list = sample.text label = sample.label tokens = [] labels = [] for word_idx, word in enumerate(words_list): token = tokenizer.tokenize(word) tokens.extend(token) lbl = label[word_idx] lbl = labels_map[lbl] labels.extend([lbl] * len(token)) input_tokens.append(tokens) input_labels.append(labels) input_ids = pad_sequences([tokenizer.convert_tokens_to_ids(txt) for txt in input_tokens], maxlen=Config.MAX_SEQUENCE_LENGTH, dtype='long', value=0.0, truncating='post', padding='post') label_ids = pad_sequences(input_labels, Config.MAX_SEQUENCE_LENGTH, dtype='long', value=labels_map["PAD"], truncating='post', padding='post') attention_masks = [[float(i != 0.0) for i in ii] for ii in input_ids] return torch.tensor(input_ids), torch.tensor(label_ids), torch.tensor(attention_masks) except Exception as ex: print(ex) # + id="aFiC7K4CK13s" colab={"base_uri": "https://localhost:8080/", "height": 144, "referenced_widgets": ["c7c5ca99ee684c78b42f2c8592d3d392", "cdd268d276a34e5abacf977c906341c7", "09c5603012ee486e90e329004b8c5315", "e2a1ee4a67994a4888b675c094e90526", "6dd677b3890a42cba04bac8eb6d3fcb8", "5311026d6af24fa6a48b8a947856babc", "<KEY>", "<KEY>", "9cad900f1f364fe883a613ab4a517c07", "d2c96146f4fc4c0eb1967b7f6c81e3ce", "<KEY>", "<KEY>", "401a65937b5c4770ab4d5ff69e77912c", "<KEY>", "0ea4aab749ef4a80b43709e7a833af33", "<KEY>", "12b4a2767e204cae9e294c3c13c96efd", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "501d1bde1d7743abba69cf1de10b3f6e", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "8f87fe61be4549ceb266438565ca4d2f", "d73627a1f14d43e9a2c8473257bb7139", "adc421d7729849339d5da97915046044", "<KEY>", "fec8764dd5dd474ca7c88ce2af544d22", "<KEY>", "53269d50ba444027ad3fbce52b617917", "82c88f7e500e424689c84de60ac4ca87", "7564a235e0954ba5bab024e6ab010f05", "16bd67e4feda44548051b6dec55f1950", "<KEY>", "<KEY>", "0a6fa31583b94a7fa4c05f3e86fe858a", "4fe0a94d482d4daea268b5554fa1614c", "<KEY>", "<KEY>", "4ea3a63d39bc475fb0238bcc312a4296"]} outputId="dc5f6233-12b9-4a90-a28c-bf6aeeed7cf8" tokenizer = BertTokenizer.from_pretrained(Config.MODEL_NAME) # + id="wjaWa1NmBm-v" train_input_ids, train_input_labels, train_attention_masks = represent_features_for_samples(train_samples, labels, Config.MAX_SEQUENCE_LENGTH, tokenizer) valid_input_ids, valid_input_labels, valid_attention_masks = represent_features_for_samples(valid_samples, labels, Config.MAX_SEQUENCE_LENGTH, tokenizer) test_input_ids, test_input_labels, test_attention_masks = represent_features_for_samples(test_samples, labels, Config.MAX_SEQUENCE_LENGTH, tokenizer) # + id="5u5Zxk0nBnBr" train_data = TensorDataset(train_input_ids, train_attention_masks, train_input_labels) valid_data = TensorDataset(valid_input_ids, valid_attention_masks, valid_input_labels) test_data = TensorDataset(test_input_ids, test_attention_masks, test_input_labels) train_sampler = RandomSampler(train_data) valid_sampler = RandomSampler(valid_data) test_sampler = RandomSampler(test_data) train_data_loader = DataLoader(train_data, sampler=train_sampler, batch_size=Config.TRAIN_BATCH_SIZE) valid_data_loader = DataLoader(valid_data, sampler=valid_sampler, batch_size=Config.TRAIN_BATCH_SIZE) test_data_loader = DataLoader(test_data, sampler=test_sampler, batch_size=Config.TRAIN_BATCH_SIZE) # + colab={"base_uri": "https://localhost:8080/", "height": 157, "referenced_widgets": ["5861ff02c1b748a680416f968e06276b", "680b0916d2ae44fcac166024d29db508", "1b2beafdba584eac87f55f264a5b128b", "99b4dbcebe044a80aaeb6e174d39f93c", "fef2c2e3b9714cc980a006fe7e7f035e", "53523be628d5435da62066736e7ba4a4", "c0f6103387e64f11b8ec817c45a07243", "ba7aaaf264594e6a9fa20788177f30f8", "d4b1c112916a4e5a860e1f93dafa402d", "<KEY>", "<KEY>"]} id="kb4f4Uu9qkgB" outputId="29ccdce1-9c57-4498-b85c-deacf4f95a1e" bert_model = BertForTokenClassification.from_pretrained(Config.MODEL_NAME, num_labels=num_labels, output_attentions=False, output_hidden_states=False) # + id="Ra_AQg26vmLs" # bert_model.cuda() # + id="fCofXvFIvmOF" if Config.FULL_FINE_TUNING: param_optimizer = list(bert_model.named_parameters()) no_decay = ['bias', 'gamma', 'beta'] optimizer_grouped_params = [{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0}] else: param_optimizer = list(bert_model.named_parameters()) optimizer_grouped_params = [{"params": [p for n, p in param_optimizer]}] optimizer = AdamW(optimizer_grouped_params, lr = 3e-5, eps = 1e-8) # + id="--mWnXJgwN_a" total_steps = len(train_data_loader) * Config.TRAIN_EPOCHS scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps) # + colab={"base_uri": "https://localhost:8080/"} id="YZ5gTCvMyoH5" outputId="f9d762bc-9816-457a-fcdb-2e277dc60212" # def train(): labels_map = {i : label for i, label in enumerate(labels,1)} print(labels_map) # + id="TW5D50oNxaeh" # #%%time losses = [] eval_losses = [] for i in range(Config.TRAIN_EPOCHS): bert_model.train() total_loss = 0 for step, batch in enumerate(train_data_loader): batch = tuple(t.to(Config.DEVICE) for t in batch) batch_input_ids, batch_attention_mask, batch_labels = batch bert_model.zero_grad() output = bert_model(batch_input_ids, token_type_ids=None, attention_mask=batch_attention_mask, labels=batch_labels) loss = output[0] loss.backward() total_loss += loss.item() torch.nn.utils.clip_grad_norm_(parameters=bert_model.parameters(), max_norm=Config.MAX_GRAD_NORM) optimizer.step() scheduler.step() avg_train_loss = total_loss / len(train_data_loader) print(f"Average Training Loss - {avg_train_loss}\n") losses.append(avg_train_loss) bert_model.eval() eval_loss = eval_acc = nb_eval_steps = nb_eval_samples = 0 predictions = [] actual_labels = [] for batch in valid_data_loader: batch = tuple(t.to(Config.DEVICE) for t in batch) batch_input_ids, batch_attention_mask, batch_labels = batch with torch.no_grad(): outputs = bert_model(batch_input_ids, token_type_ids=None, attention_mask=batch_attention_mask, labels=batch_labels) # logits = outputs[1].detach().cpu().numpy() # label_ids = batch_labels.to('cpu').numpy() eval_loss += outputs[0].mean().item() # predictions.extend([list(p) for p in np.argmax(logits, axis=2)]) # actual_labels.extend(label_ids) logits = torch.argmax(F.log_softmax(outputs, dim=2), dim=2) logits = logits.detach().cpu().numpy() label_ids = batch_labels.to('cpu').numpy() for i, label in enumerate(label_ids): temp_1 = [] temp_2 = [] for j,m in enumerate(label): if j == 0: continue elif label_ids[i][j] == len(label_map): actual_labels.append(temp_1) predictions.append(temp_2) break else: temp_1.append(label_map[label_ids[i][j]]) temp_2.append(label_map[logits[i][j]]) eval_loss = eval_loss / len(valid_data_loader) eval_losses.append(eval_loss) print(f"Eval Loss - {eval_loss}") report = seqeval.metrics.classification_report(actual_labels, predictions, digits=4) print(report) print('\n') # + id="zvIjJar0xahb"
NER_BERT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import json import pickle from sklearn.metrics.pairwise import cosine_similarity from tqdm.auto import tqdm # - import matplotlib.pyplot as plt import seaborn as sns sns.set_style('darkgrid') sns.set(rc={'figure.figsize':(11.7,6.27)}) # + import os import glob def read_lines(fn): if not os.path.exists(fn): return [] with open(fn, 'r', encoding='utf-8') as f: text = f.read() lines = text.split("\n") if lines[-1] == '': return lines[:-1] else: return lines def write_lines(fn, lines, mode='w'): text_to_write = "\n".join(list(lines)) with open(fn, encoding='utf-8', mode=mode) as f: f.write(text_to_write) # + def show_cos_src_tgt_dataset(df, min_cos_src_tgt_sim=0): return df[df["cos_src_tgt"] <= min_cos_src_tgt_sim].sort_values("cos_src_tgt") def show_cos_check_dataset(df, min_cos_check_sim=0): return df[df["cos_check"] <= min_cos_check_sim].sort_values("cos_check") def plot_cos_src_tgt(df, col="cos_src_tgt"): return df[df["text_src"] != df["text_tgt"]][col].plot.hist(bins=30) def show_statistic(df, len_before=0, show_rate=True): diff_len = len_before - len(df) print("Count of rows: ", len(df)) if diff_len > 0: print("Diff count: ", diff_len) if show_rate==True: print("Count of rows with change: ", df['have_change'].sum()) print("Rate of rows with change: ", round(df['have_change'].mean(),2)) print() def basic_clean(df, show = True, min_cos_src_tgt_sim = 0): len_start = len(df) print("Initial statistics") df['have_change'] = (df['text_src']!=df['text_tgt']).astype(int) show_statistic(df, len_start) print("Drop dulicates") len_before = len(df) df = df.drop_duplicates(subset=['text_src', 'text_tgt']) show_statistic(df, len_before, show_rate=True) print("Drop where len less 5 and less then 1 token") len_before = len(df) df = df[df["text_tgt"].str.len() > 5] df = df[df['text_tgt'].apply(lambda x: len(x.split(" ")) > 1)] show_statistic(df, len_before, show_rate=True) print("Drop where start from non-capital") len_before = len(df) df = df[df["text_tgt"].apply(lambda x: x[0] == x[0].upper())] show_statistic(df, len_before, show_rate=True) print("Drop where all capital") len_before = len(df) df = df[df["text_tgt"].apply(lambda x: x != x.upper())] show_statistic(df, len_before, show_rate=True) # print("Drop where for one src more than one target") # len_before = len(df) # val_count = df.text_src.value_counts() # index_to_delete = [] # for text_src in val_count[val_count > 1].index: # sub_df = df[df.text_src == text_src] # res_ind = sub_df[sub_df["cos_src_tgt"] > 0.999].index # if len(res_ind): # index_to_delete.extend(res_ind) # df = df[~df.index.isin(index_to_delete)] # show_statistic(df, len_before, show_rate=True) print("Drop where cosine similarity between src and tgt is less than ", min_cos_src_tgt_sim) len_before = len(df) df = df[df["cos_src_tgt"] > min_cos_src_tgt_sim] show_statistic(df, len_before, show_rate=True) print('Final rate of cleaned data: ', round(len(df)/len_start,2)) return df # - # ### WL # + text_src = read_lines("../../data_parallel/wi+locness/train_src") text_tgt = read_lines("../../data_parallel/wi+locness/train_tgt") with open("../Clustering/data/wl_train_src_embed.pickle", "rb") as f: vectors_src = pickle.load(f) with open("../Clustering/data/wl_train_tgt_embed.pickle", "rb") as f: vectors_tgt = pickle.load(f) cos_sim_wl = [] for i in tqdm(range(len(vectors_src))): cos_sim_wl.append(cosine_similarity([vectors_src[i]],[vectors_tgt[i]])[0,0]) # - wl_df = pd.DataFrame({"text_src": text_src, "text_tgt": text_tgt, "cos_src_tgt": cos_sim_wl}) g = plot_cos_src_tgt(wl_df) plt.xticks(np.arange(0, 1, 0.1)) plt.show() wl_df[wl_df["cos_src_tgt"] <= 0.5].sort_values("cos_src_tgt") clean_wl = basic_clean(wl_df, show = True, min_cos_src_tgt_sim=0.5) # ### Nucle # + text_src = read_lines("../../data_parallel/nucle/nucle_src") text_tgt = read_lines("../../data_parallel/nucle/nucle_tgt") with open("../Clustering/data/nucle_train_src_embed.pickle", "rb") as f: vectors_src = pickle.load(f) with open("../Clustering/data/nucle_train_tgt_embed.pickle", "rb") as f: vectors_tgt = pickle.load(f) cos_sim = [] for i in tqdm(range(len(vectors_src))): cos_sim.append(cosine_similarity([vectors_src[i]],[vectors_tgt[i]])[0,0]) # - with open("../Checkpoint_exp/nucle_cos.pickle", "rb") as f: cos_check = pickle.load(f) nucle_df = pd.DataFrame({"text_src": text_src, "text_tgt": text_tgt, "cos_src_tgt": cos_sim, "cos_check": cos_check}) g = plot_cos_src_tgt(nucle_df) plt.xticks(np.arange(0, 1, 0.1)) plt.show() show_cos_src_tgt_dataset(nucle_df, min_cos_src_tgt_sim=0.5) clean_nucle = basic_clean(nucle_df, show = True, min_cos_src_tgt_sim=0.5) # ### FCE # + text_src = read_lines("../../data_parallel/fce/fce_train_src") text_tgt = read_lines("../../data_parallel/fce/fce_train_tgt") with open("../Clustering/data/fce_train_src_embed.pickle", "rb") as f: vectors_src = pickle.load(f) with open("../Clustering/data/fce_train_tgt_embed.pickle", "rb") as f: vectors_tgt = pickle.load(f) cos_sim = [] for i in tqdm(range(len(vectors_src))): cos_sim.append(cosine_similarity([vectors_src[i]],[vectors_tgt[i]])[0,0]) # - with open("../Checkpoint_exp/fce_cos.pickle", "rb") as f: cos_check = pickle.load(f) fce_df = pd.DataFrame({"text_src": text_src, "text_tgt": text_tgt, "cos_src_tgt": cos_sim, "cos_check": cos_check}) g = plot_cos_src_tgt(fce_df) plt.xticks(np.arange(0, 1, 0.1)) plt.show() show_cos_src_tgt_dataset(fce_df, min_cos_src_tgt_sim=0.5) fce_df.loc[7278].values clean_fce = basic_clean(fce_df, show = True, min_cos_src_tgt_sim=0.5) # ## Lang8 # + text_src = read_lines("../../data_parallel/lang8/lang8_src") text_tgt = read_lines("../../data_parallel/lang8/lang8_tgt") with open("../Clustering/data/lang8_train_src_embed.pickle", "rb") as f: vectors_src = pickle.load(f) with open("../Clustering/data/lang8_train_tgt_embed.pickle", "rb") as f: vectors_tgt = pickle.load(f) cos_sim = [] for i in tqdm(range(len(vectors_src))): cos_sim.append(cosine_similarity([vectors_src[i]],[vectors_tgt[i]])[0,0]) # - with open("../Checkpoint_exp/lang8_cos.pickle", "rb") as f: cos_check = pickle.load(f) lang8_df = pd.DataFrame({"text_src": text_src, "text_tgt": text_tgt, "cos_src_tgt": cos_sim, "cos_check": cos_check}) g = plot_cos_src_tgt(lang8_df) plt.xticks(np.arange(0, 1, 0.1)) plt.show() show_cos_src_tgt_dataset(lang8_df, min_cos_src_tgt_sim=0.5) lang8_df.loc[991930].values clean_lang8 = basic_clean(lang8_df, show = True, min_cos_src_tgt_sim=0.5) import copy orig_merge = copy.deepcopy(lang8_df) orig_merge = orig_merge.append(nucle_df) orig_merge = orig_merge.append(fce_df) orig_merge = orig_merge.append(wl_df) len(orig_merge) orig_merge.have_change.sum() merge = copy.deepcopy(clean_lang8) merge = merge.append(clean_nucle) merge = merge.append(clean_fce) merge = merge.append(clean_wl) len(merge) merge.have_change.sum() len(merge) merge.have_change.sum() len(merge) merge.have_change.sum() # ### Split each dataset one by one lang8_train, lang8_dev = train_test_split(clean_lang8, test_size=0.02, random_state=4, stratify=clean_lang8.have_change) nucle_train, nucle_dev = train_test_split(clean_nucle, test_size=0.02, random_state=4, stratify=clean_nucle.have_change) fce_train, fce_dev = train_test_split(clean_fce, test_size=0.02, random_state=4, stratify=clean_fce.have_change) wl_train, wl_dev = train_test_split(clean_wl, test_size=0.02, random_state=4, stratify=clean_wl.have_change) clean_merge_train = copy.deepcopy(lang8_train) clean_merge_train = clean_merge_train.append(nucle_train) clean_merge_train = clean_merge_train.append(fce_train) clean_merge_train = clean_merge_train.append(wl_train) clean_merge_dev = copy.deepcopy(lang8_dev) clean_merge_dev = clean_merge_dev.append(nucle_dev) clean_merge_dev = clean_merge_dev.append(fce_dev) clean_merge_dev = clean_merge_dev.append(wl_dev) train_src = clean_merge_train.text_src.values train_tgt = clean_merge_train.text_tgt.values dev_src = clean_merge_dev.text_src.values dev_tgt = clean_merge_dev.text_tgt.values path_save = "../../data_parallel/light_clean_gold/" write_lines(path_save+"train_src", train_src, mode='w') write_lines(path_save+"train_tgt", train_tgt, mode='w') write_lines(path_save+"dev_src", dev_src, mode='w') write_lines(path_save+"dev_tgt", dev_tgt, mode='w') from sklearn.model_selection import train_test_split train, dev = train_test_split(merge, test_size=0.02, random_state=4) train_src = train.text_src.values train_tgt = train.text_tgt.values dev_src = dev.text_src.values dev_tgt = dev.text_tgt.values path_save = "../../data_parallel/base_clean_gold/" write_lines(path_save+"train_src", train_src, mode='w') write_lines(path_save+"train_tgt", train_tgt, mode='w') write_lines(path_save+"dev_src", dev_src, mode='w') write_lines(path_save+"dev_tgt", dev_tgt, mode='w') # ### Clean with checkpoints # ### FCE g = plot_cos_src_tgt(clean_fce, col="cos_check") sh = show_cos_check_dataset(clean_fce, min_cos_check_sim=0.97) sh[sh['have_change'] == 1] len(clean_fce) clean_fce_check = clean_fce[clean_fce["cos_check"] > 0.97] len(clean_fce_check) # ### Nucle g = plot_cos_src_tgt(clean_nucle, col="cos_check") sh = show_cos_check_dataset(clean_nucle, min_cos_check_sim=0.97) sh[sh['have_change'] == 1] clean_nucle_check = clean_nucle[clean_nucle["cos_check"] > 0.97] # ### Lang8 g = plot_cos_src_tgt(clean_lang8, col="cos_check") sh = show_cos_check_dataset(clean_lang8, min_cos_check_sim=0.97) sh[sh['have_change'] == 1] clean_lang8_check = clean_lang8[clean_lang8["cos_check"] > 0.97] lang8_train, lang8_dev = train_test_split(clean_lang8_check, test_size=0.02, random_state=4, stratify=clean_lang8_check.have_change) nucle_train, nucle_dev = train_test_split(clean_nucle_check, test_size=0.02, random_state=4, stratify=clean_nucle_check.have_change) fce_train, fce_dev = train_test_split(clean_fce_check, test_size=0.02, random_state=4, stratify=clean_fce_check.have_change) wl_train, wl_dev = train_test_split(clean_wl, test_size=0.02, random_state=4, stratify=clean_wl.have_change) clean_merge_train = copy.deepcopy(lang8_train) clean_merge_train = clean_merge_train.append(nucle_train) clean_merge_train = clean_merge_train.append(fce_train) clean_merge_train = clean_merge_train.append(wl_train) clean_merge_dev = copy.deepcopy(lang8_dev) clean_merge_dev = clean_merge_dev.append(nucle_dev) clean_merge_dev = clean_merge_dev.append(fce_dev) clean_merge_dev = clean_merge_dev.append(wl_dev) train_src = clean_merge_train.text_src.values train_tgt = clean_merge_train.text_tgt.values dev_src = clean_merge_dev.text_src.values dev_tgt = clean_merge_dev.text_tgt.values path_save = "../../data_parallel/check_ligth_clean_gold/" write_lines(path_save+"train_src", train_src, mode='w') write_lines(path_save+"train_tgt", train_tgt, mode='w') write_lines(path_save+"dev_src", dev_src, mode='w') write_lines(path_save+"dev_tgt", dev_tgt, mode='w') merge_check = copy.deepcopy(clean_lang8_check) merge_check = merge_check.append(clean_nucle_check) merge_check = merge_check.append(clean_fce_check) merge_check = merge_check.append(clean_wl) len(merge_check) merge_check.have_change.sum() train, dev = train_test_split(merge_check, test_size=0.02, random_state=4) train_src = train.text_src.values train_tgt = train.text_tgt.values dev_src = dev.text_src.values dev_tgt = dev.text_tgt.values path_save = "../../data_parallel/check_clean_gold/" write_lines(path_save+"train_src", train_src, mode='w') write_lines(path_save+"train_tgt", train_tgt, mode='w') write_lines(path_save+"dev_src", dev_src, mode='w') write_lines(path_save+"dev_tgt", dev_tgt, mode='w')
notebooks/Clean_gold/Clean_v3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %matplotlib inline # # # Reading BEM surfaces from a forward solution # # # Plot BEM surfaces used for forward solution generation. # # # + # Author: <NAME> <<EMAIL>> # # License: BSD (3-clause) import mne from mne.datasets import sample print(__doc__) data_path = sample.data_path() fname = data_path + '/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif' surfaces = mne.read_bem_surfaces(fname, patch_stats=True) print("Number of surfaces : %d" % len(surfaces)) # - # Show result # # # + head_col = (0.95, 0.83, 0.83) # light pink skull_col = (0.91, 0.89, 0.67) brain_col = (0.67, 0.89, 0.91) # light blue colors = [head_col, skull_col, brain_col] # 3D source space from mayavi import mlab # noqa mlab.figure(size=(600, 600), bgcolor=(0, 0, 0)) for c, surf in zip(colors, surfaces): points = surf['rr'] faces = surf['tris'] mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2], faces, color=c, opacity=0.3)
0.12/_downloads/plot_read_bem_surfaces.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 5장 제어문 # ## 5.1 조건에 따라 분기하는 if 문 # ### 단일 조건에 따른 분기(if) # **[5장: 72페이지]** # + code_folding=[] x = 95 if x >= 90: # 조건문이 참이면 실행 print("Pass") # - # ### 단일 조건 및 그 외 조건에 따른 분기(if ~ else) # **[5장: 73페이지]** x = 75 if x >= 90: # 조건문이 참이면 실행 print("Pass") else: # 거짓일때 실행 print("Fail") # ### 여러 조건에 따른 분기(if ~ elif ~ else) # **[5장: 75페이지]** x = 85 if x >= 90: # 조건1 print("Very good") elif (x >= 80) and (x < 90): # 조건2 print("Good") else: # 조건문이 False일 때 print("Bad") # **[5장: 75페이지]** x = 85 if x >= 90: print("Very Good") elif 80 <= x < 90: # 두개의 조건문 하나로 입력 print("Good") else: print("Bad") # ### 중첩 조건에 따른 분기 # **[5장: 77페이지]** x = 99 if x >= 90: # 조건1 90이상일때 if x==100 : # 조건1-1 100점일때 print("Perfect") else: # else 100점이 아닐때 print("Very Good") elif (x >= 80) and (x < 90): print("Good") else: print("Bad") # ## 5.2 지정된 범위만큼 반복하는 for 문 # ### 반복문의 필요성 # **[5장: 78페이지]** # + a = 0 # 변수 a를 0으로 초기화 print(a) # 변수 a 출력 a = a + 1 # 변수 a에 1을 더한 후, 다시 a에 대입 print(a) # 변수 a 출력 a = a + 1 # 같은 코드 반복 print(a) a = a + 1 # 같은 코드 반복 print(a) a = a + 1 # 같은 코드 반복 print(a) a = a + 1 # 같은 코드 반복 print(a) # - # ### for 문의 구조 # # # ### 반복 범위 지정 # #### 리스트 이용 # **[5장: 80페이지]** for a in [0, 1, 2, 3, 4, 5]: # 반복변수를 반복범위만큼 반복 print(a) # **[5장: 81페이지]** myFriends = ['James', 'Robert', 'Lisa', 'Mary'] # 리스트를 변수에 할당 for myFriend in myFriends: # 반복변수를 반복범위만큼 반복 print(myFriend) # #### range() 함수 이용 # **[5장: 82페이지]** print(range(0, 10, 1)) # **[5장: 82페이지]** print(list(range(0, 10, 1))) # **[5장: 82페이지]** for a in range(0, 6, 1): # step = 1 print(a) # **[5장: 82페이지]** for a in range(0, 6, 2): # step = 2 print(a) # **[5장: 83페이지]** print(list(range(0, 10, 1))) print(list(range(0, 10))) print(list(range(10))) # 시작, step parameter를 입력하지 않으면 default값으로 출력 # **[5장: 83페이지]** print(list(range(0, 20, 5))) # Line 1 print(list(range(-10, 0, 2))) # Line 2 print(list(range(3, -10, -3))) # Line 3 (뒤로가기) print(list(range(0, -5, 1))) # Line 4 # ### 중첩 for 문 # **[5장: 84페이지]** # + x_list = ['x1', 'x2'] y_list = ['y1', 'y2'] print("x y") for x in x_list: for y in y_list: print(x,y) # - # ### 여러 개의 리스트 다루기 # **[5장: 85페이지]** names = ['James', 'Robert', 'Lisa', 'Mary'] scores = [95, 96, 97, 94] # **[5장: 85페이지]** for k in range(len(names)): print(names[k], scores[k], k, range(len(names))) # **[5장: 86페이지]** for name, score in zip(names, scores): # 반복 변수가 2 이상일 때, zip 함수 사용 print(name, score) # ## 5.3 조건에 따라 반복하는 while 문 # ### while 문의 구조 # **[5장: 87페이지]** # + i = 0 # 초기화 sum = 0 # 초기화 print("i sum") while (sum < 20): # 조건 검사 i = i + 1 # i를 1씩 증가 sum = sum + i # 이전의 sum과 현재 i를 더해서 sum을 갱신 print(i, sum) # i와 sum을 출력 print(i, sum) # - # ### 무한 반복 while 문 # ## 5.4 반복문을 제어하는 break와 continue # ### 반복문을 빠져나오는 break # **[5장: 89페이지]** k=0 while True: k = k + 1 # k는 1씩 증가 if(k > 3): # k가 3보다 크면 break # break로 while 문을 빠져나옴 print(k) # k 출력 # **[5장: 89페이지]** for k in range(10): if(k > 2): # k 가 2보다 크면 break # break로 for 문을 빠져나옴 print(k) # k 출력 # ### 다음 반복을 실행하는 continue # **[5장: 90페이지]** for k in range(5): if(k == 2): continue print(k) # **[5장: 91페이지]** k = 0 while True: k = k + 1 if(k == 2): print("continue next") continue if(k > 4): break print(k) print(k, '에서 반복문이 종료됨') # ## 5.5 간단하게 반복하는 한 줄 for 문 # ### 리스트 컴프리헨션의 기본 구조 # **[5장: 92페이지]** # + numbers = [1,2,3,4,5] square = [] for i in numbers: square.append(i**2) print(square) # - # **[5장: 92페이지]** numbers = [1,2,3,4,5] square = [i**2 for i in numbers] print(square) ls_a = list(range(1, 10, 2)) ls_b = [i **2 for i in ls_a] print(ls_a, ls_b) # ### 조건문을 포함한 리스트 컴프리헨션 # **[5장: 93페이지]** # + numbers = [1,2,3,4,5] square = [] for i in numbers: if i >= 3: square.append(i**2) print(square) # - # **[5장: 93페이지]** # + numbers = [1,2,3,4,5] square = [i**2 for i in numbers if i>=3] print(square) # + ls_x = list(range(0, 101, 10)) ls_y = [i**2 for i in ls_x if i >= 60] print(ls_x, ls_y, sep='\n') # - # # 5.6 정리
Chapter_05_if_for_while.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## This notebook is an implementation of the article: " *Automatic image-based segmentation of the heart from CT scans* " by <NAME> et al. # # ## Import Libraries # + import numpy as np import pandas as pd import pydicom import os import scipy import matplotlib.pyplot as plt import seaborn as sns from skimage import measure, morphology from skimage.measure import label # - # ## <font color=green> Load CT scan </font> # --- def load_CT_scan(path, force = False): slices = [] for s in os.listdir(path): if s[:1] == '.': continue slices.append(pydicom.read_file(path + '/' + s, force = force)) #Sort slices per Instance Number try: slices.sort(key = lambda x: int(x.InstanceNumber)) except: AttributeError return(slices) # --- CT_scan = load_CT_scan(path = '/Users/stefanoskarageorgiou/Desktop/CT_Scan', force=True) print("This patient's CT scan comprises of ",len(CT_scan)," CT slices.") def multiple_plots(images_list, display_step = 0): length_of_list = len(images_list) if length_of_list == 0: return('List is empty') rows = int(length_of_list / 5) fig, ax = plt.subplots(nrows = rows if length_of_list%5 == 0 else rows + 1 , ncols = 5, figsize = (20,15)) counter = 0 for i in range(rows): for j in range(5): title_index = counter + 1 ax[i,j].imshow(images_list[counter], cmap ='gray') ax[i,j].set_title('Slice %d' % title_index) ax[i,j].axis('off') counter += 1 if (length_of_list / 5) != rows: for j in range(length_of_list%6): title_index = counter + 1 ax[rows,j].imshow(images_list[counter], cmap ='gray') ax[rows,j].set_title('Slice %d' % title_index) ax[rows,j].axis('off') counter += 1 plt.show() # # + ct_slices = [] for i in range(len(CT_scan)): ct_slices.append(CT_scan[i].pixel_array) multiple_plots(ct_slices) # - # ## Preprocessing CT_slice = ct_slices[1] plt.imshow(CT_slice, cmap = 'gray'); plt.figure(figsize=(12,6)) sns.distplot(CT_slice.flatten(), kde = False, color = '#035E7B', hist_kws={'alpha':1}, bins = 100) plt.xlabel('Pixel values / Levels of gray') plt.ylabel('Frequency') plt.title("Histogram of slice's pixels"); # **Note:** As we can see from the histogram, there is an amount of pixels with a value around -2000. Those pixel values correspond to the black circular frame of each slice which esentialy has nothing to do with the image. Thus an optional decision is to treat it as air. CT_slice[CT_slice < -1000] = -1000 plt.imshow(CT_slice, cmap = 'gray'); plt.figure(figsize=(12,6)) sns.distplot(CT_slice.flatten(), kde = False, color = '#035E7B', hist_kws={'alpha':1}, bins = 100) plt.xlabel('Pixel values / Levels of gray') plt.ylabel('Frequency') plt.title("Histogram of slice's pixels"); # # In this section we will: # # - Determine all variables needed for the segmenataion (statistical parameters, position of spine, position of aorta etc) # # - Perform preliminary cleaning of the images ( through of which of the Region Of Interest) # ## 1.1 Statistical parameters: # # # We can define a CT scan as a scalar function $f(x,y,z)$, where $x = 1, ..., N$ is the image height, $y = 1, ..., M$ is the image width and $z = 1,..., K$ is the slice number. # # # For the $k_{th}$ CT slice: # # **(a) Mean value of intensity of pixels, $\mu(k)$:** # # $\mu(k) = \frac{1}{NM} \sum_{x = 1}^{N} \sum_{y = 1}^{M} f(x,y,k)$ # # # This value allows us to seperate the air and background from the rest of the slice. # # We can create a binary mask by thresholding with $\mu(k)$ # + mu_k = CT_slice.mean() mu_binary_mask = CT_slice > mu_k plt.imshow(mu_binary_mask, cmap = 'gray') plt.title('Binary mask computed by thresholding with μ(k)'); # - # **(b) Mean intensity value of the pixels with an intensity level higher than $\mu(k)$, $\mu_{sup}(k)$ :** # # $\mu_{sup}(k)= \frac{1}{R_k} \sum_{i = 1}^{R_k} (X_i,Y_i,k)$ # # where $R_k$ is the number of pixels $(X_i,Y_i)$ in the $k_{th}$ slice which satisfy $f(X_i,Y_i,k)>\mu(k)$. # # Can be used for obtaining a binary mask which determines the position of the spine in each image. # # + Rk = np.where(CT_slice.flatten() > mu_k)[0] mu_sup_k = CT_slice.flatten()[Rk].mean() mu_sup_binary_mask = CT_slice > mu_sup_k plt.imshow(mu_sup_binary_mask, cmap = 'gray') plt.title('Binary mask computed by thresholding with μsup(k)'); # - # **(c) Standard deviation of intensities of pixels in the $k_{th}$ slice with intensity level higher than $\mu(k)$, $\sigma(k)$ :** # # $\sigma(k) = \sqrt{ \frac{1}{R_k - 1} \sum_{i = 1}^{R_k} (f(X_i,Y_i,k) - \mu_{sup}(k))^2}$ # # The threshold $\mu_{sup}(k) + \sigma(k)$ allows us to obtain a binary mask which is used later in the segmentation stage in order to locate the descending aorta in all the slices of the volumetric scan. The resulting gray level is useful for separating the outer layer of the bones and the structures where oxygenated blood flows from the rest of the image. # + sigma_k = CT_slice.flatten()[Rk].std() mu_sup_s_binary_mask = CT_slice > mu_sup_k + sigma_k plt.imshow(mu_sup_s_binary_mask, cmap = 'gray') plt.title('Binary mask computed by thresholding with μsup(k) + σ(k)'); # - mu_k, mu_sup_k, mu_sup_k+ sigma_k # + plt.figure(figsize = (15,6)) sns.distplot(CT_slice.flatten(), kde = False, color = '#035E7B', hist_kws={'alpha':1}, bins = 100) plt.axvline(x = mu_k, c = '#035E7B') plt.text(x = mu_k + 30, y = 50000, s = "μ(k) = -497.02", c = '#035E7B') plt.axvline(x = mu_sup_k, c = '#1C1018') plt.text(x = mu_sup_k + 30, y = 50000, s = "μ_sup(k) = -11.63", c = '#1C1018') plt.axvline(x = mu_sup_k + sigma_k, c = 'red',linestyle = '-.' ) plt.text(x = mu_sup_k + sigma_k + 31, y = 40000, s = "μ_sup(k) + σ(k) = 120.65", c = 'red') plt.xlabel('Values of pixels / Levels of gray') plt.ylabel('Frequency of pixels') plt.title("Histogram of slice's pixels"); # - # **(d) Mean of $\mu_{sup}(k)$ minus standard deviation of $\mu_{sup}(k)$, $\mu_{global}$:** # # # $\mu_{global} = \frac{1}{K} \sum_{k=1}^{K} \mu_{sup}(k) - \sqrt{\frac{1}{K-1} \sum_{i-1}^{K} \mu_{sup}(i) - \frac{1}{P} \sum_{k-1}^{K} \mu_{sup}(k)^2}$ # # # Thresholding with $\mu_{global}$ will show the intensities which characterize muscular tissues. mu_sup_list = [] for i in range(len(CT_scan)): ct = CT_scan[i].pixel_array mu_k_i = ct.mean() Rk_i = np.where(ct.flatten() > mu_k_i)[0] mu_sup_k_i = ct.flatten()[Rk_i].mean() mu_sup_list.append(mu_sup_k_i) # + mu_global = np.mean(mu_sup_list) - np.std(mu_sup_list) mu_global_binary_mask = CT_slice > mu_global plt.imshow(mu_global_binary_mask, cmap = 'gray') plt.title('Binary mask computed by thresholding with μglobal'); # - # # ## 2.1.3 Automatic selection of the region of interest # --- def get_image_from_mask (image,mask): new_image = np.ones(shape = (image.shape))*(-1000) index = (mask == True) new_image[index] = image[index] return(new_image) # --- def highest_area_object(image, at_least_one_object = False): ''' - This functions checks if there is at least 1 object in an binary image and keeps only the one of highest area. ''' #Label connected regions of 1s from the binary_mask (contains only 0s and 1s). labels = label(image) #tests if at least 1 object exists in image if at_least_one_object == True: assert( labels.max() != 0 ) #create array with 1 values correspoding to bigest'objects label if len(np.bincount(labels.flatten())) == 1: return(image) else: largest_object = labels == np.argmax(np.bincount(labels.flatten())[1:])+1 return (largest_object) # --- def return_pulses(profile): ''' - According to the article a pulse is a sequence of 1s in each binary masked profile. - This function returns a list of lists cointaining all the pulses seperatly contained in each of the M profiles of the slice along with the list of their indexes. - The number of pulses is equal or greater than 0 - In case of 0 pulses the function returns a list of a (0,0) tuple. ''' stacked = np.hstack(([0], profile, [0])) #finds the first index of each sequence of 1s start = (np.diff(stacked)>0) #find the last index of each sequence of 1s end = (np.diff(stacked)<0) number_of_pulses = len(np.where(start == True)[0]) pulses = [] for i in range(number_of_pulses): #for each pulse append: #the list of i_th pulse. pulses.append([profile[np.where(start == True)[0][i]:np.where(end == True)[0][i]], #the list of i_th pulse's index in the profile list(range(np.where(start == True)[0][i],np.where(end == True)[0][i]))]) if pulses == []: return[(0,0)] else: return(pulses) # --- def ROI(image): ''' This function takes a single CT slice and returns the region of interest. ''' #rows and columns of k_th slice N, M = image.shape #Mean value of the intensity of the pixels, μ(k) mu_k = image.mean() #STEP 1 #Appy binary mask on k_th slice by thresholding with μ(k) binary_mask = image > mu_k #STEP 2 #Keep only object of highest number of pixels / highest area binary_mask = highest_area_object(binary_mask) #STEP 3 Initialiase width values #initialize width_mean width_mean = 0.1 * N #initialize width_max width_max = 0.3 * N #STEP 4 #for each column of pixel array for j in range(M): #create profile profile = np.asarray(binary_mask[:,j]) #leftmost pulse of the jth profile i.e. the 1rst pulse of the jth profile #along with the index first_pulse, index = return_pulses(profile)[0] #if 0 pulses in the profile do nothing if isinstance(first_pulse, np.ndarray) == False: width_mean = 0 continue #set width_j as the width of leftmost pulse i.e. the number of the first (sequenced) 1s in the jth profile w_j = len(first_pulse) if w_j < width_max: #if condition met, update width_mean to be equal to width_j width_mean = w_j #set to 0 the upmost wj pixels with a value of 1 in the jth column of the binary mask binary_mask[index,j] = False else: #set to 0 the upmost wj pixels with a value of 1 in the jth column of the binary mask binary_mask[index[0]:index[0] + int(width_mean),j] = False #STEP 6 #If after the processing there is more than one object in the resulting mask, #select the largest one and discard the rest. #largest_object = highest_area_object(binary_mask) binary_mask = highest_area_object(binary_mask) return(binary_mask) # --- # + roi_binary_mask = ROI(CT_slice) plt.imshow(get_image_from_mask(CT_slice,roi_binary_mask), cmap = 'gray'); # - ct_slice_roi = [] for i in range(len(ct_slices)): ct_slice_roi_binary = ROI(ct_slices[i]) ct_slice_roi.append(get_image_from_mask(ct_slices[i],ct_slice_roi_binary)) multiple_plots(ct_slice_roi)
Automatic image-based segmentation of the heart from CT scans.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="h2q27gKz1H20" # ##### Copyright 2019 The TensorFlow Authors. # + cellView="form" id="TUfAcER1oUS6" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="Gb7qyhNL1yWt" # # Image classification with TensorFlow Lite Model Maker # + [markdown] id="nDABAblytltI" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/lite/tutorials/model_maker_image_classification"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/model_maker_image_classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/model_maker_image_classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/tutorials/model_maker_image_classification.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # <td> # <a href="https://tfhub.dev/"><img src="https://www.tensorflow.org/images/hub_logo_32px.png" />See TF Hub model</a> # </td> # </table> # + [markdown] id="m86-Nh4pMHqY" # The [TensorFlow Lite Model Maker library](https://www.tensorflow.org/lite/guide/model_maker) simplifies the process of adapting and converting a TensorFlow neural-network model to particular input data when deploying this model for on-device ML applications. # # This notebook shows an end-to-end example that utilizes this Model Maker library to illustrate the adaption and conversion of a commonly-used image classification model to classify flowers on a mobile device. # + [markdown] id="bcLF2PKkSbV3" # ## Prerequisites # # To run this example, we first need to install several required packages, including Model Maker package that in GitHub [repo](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker). # + id="6cv3K3oaksJv" # !pip install -q tflite-model-maker # + [markdown] id="Gx1HGRoFQ54j" # Import the required packages. # + id="XtxiUeZEiXpt" import os import numpy as np import tensorflow as tf assert tf.__version__.startswith('2') from tflite_model_maker import model_spec from tflite_model_maker import image_classifier from tflite_model_maker.config import ExportFormat from tflite_model_maker.config import QuantizationConfig from tflite_model_maker.image_classifier import DataLoader import matplotlib.pyplot as plt # + [markdown] id="KKRaYHABpob5" # ## Simple End-to-End Example # + [markdown] id="SiZZ5DHXotaW" # ### Get the data path # # Let's get some images to play with this simple end-to-end example. Hundreds of images is a good start for Model Maker while more data could achieve better accuracy. # + cellView="form" id="3jz5x0JoskPv" image_path = tf.keras.utils.get_file( 'flower_photos.tgz', 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz', extract=True) image_path = os.path.join(os.path.dirname(image_path), 'flower_photos') # + [markdown] id="a55MR6i6nuDm" # You could replace `image_path` with your own image folders. As for uploading data to colab, you could find the upload button in the left sidebar shown in the image below with the red rectangle. Just have a try to upload a zip file and unzip it. The root file path is the current path. # # <img src="https://storage.googleapis.com/download.tensorflow.org/models/tflite/screenshots/model_maker_image_classification.png" alt="Upload File" width="800" hspace="100"> # + [markdown] id="NNRNv_mloS89" # If you prefer not to upload your images to the cloud, you could try to run the library locally following the [guide](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker) in GitHub. # + [markdown] id="w-VDriAdsowu" # ### Run the example # The example just consists of 4 lines of code as shown below, each of which representing one step of the overall process. # # + [markdown] id="6ahtcO86tZBL" # Step 1. Load input data specific to an on-device ML app. Split it into training data and testing data. # + id="lANoNS_gtdH1" data = DataLoader.from_folder(image_path) train_data, test_data = data.split(0.9) # + [markdown] id="Y_9IWyIztuRF" # Step 2. Customize the TensorFlow model. # + id="yRXMZbrwtyRD" model = image_classifier.create(train_data) # + [markdown] id="oxU2fDr-t2Ya" # Step 3. Evaluate the model. # + id="wQr02VxJt6Cs" loss, accuracy = model.evaluate(test_data) # + [markdown] id="eVZw9zU8t84y" # Step 4. Export to TensorFlow Lite model. # # Here, we export TensorFlow Lite model with [metadata](https://www.tensorflow.org/lite/convert/metadata) which provides a standard for model descriptions. The label file is embedded in metadata. The default post-training quantization technique is full integer quantization for the image classification task. # # You could download it in the left sidebar same as the uploading part for your own use. # + id="Zb-eIzfluCoa" model.export(export_dir='.') # + [markdown] id="pyju1qc_v-wy" # After these simple 4 steps, we could further use TensorFlow Lite model file in on-device applications like in [image classification](https://github.com/tensorflow/examples/tree/master/lite/examples/image_classification) reference app. # + [markdown] id="R1QG32ivs9lF" # ## Detailed Process # # Currently, we support several models such as EfficientNet-Lite* models, MobileNetV2, ResNet50 as pre-trained models for image classification. But it is very flexible to add new pre-trained models to this library with just a few lines of code. # # # The following walks through this end-to-end example step by step to show more detail. # + [markdown] id="ygEncJxtl-nQ" # ### Step 1: Load Input Data Specific to an On-device ML App # # The flower dataset contains 3670 images belonging to 5 classes. Download the archive version of the dataset and untar it. # # The dataset has the following directory structure: # # <pre> # <b>flower_photos</b> # |__ <b>daisy</b> # |______ 100080576_f52e8ee070_n.jpg # |______ 14167534527_781ceb1b7a_n.jpg # |______ ... # |__ <b>dandelion</b> # |______ 10043234166_e6dd915111_n.jpg # |______ 1426682852_e62169221f_m.jpg # |______ ... # |__ <b>roses</b> # |______ 102501987_3cdb8e5394_n.jpg # |______ 14982802401_a3dfb22afb.jpg # |______ ... # |__ <b>sunflowers</b> # |______ 12471791574_bb1be83df4.jpg # |______ 15122112402_cafa41934f.jpg # |______ ... # |__ <b>tulips</b> # |______ 13976522214_ccec508fe7.jpg # |______ 14487943607_651e8062a1_m.jpg # |______ ... # </pre> # + id="7tOfUr2KlgpU" image_path = tf.keras.utils.get_file( 'flower_photos.tgz', 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz', extract=True) image_path = os.path.join(os.path.dirname(image_path), 'flower_photos') # + [markdown] id="E051HBUM5owi" # Use `DataLoader` class to load data. # # As for `from_folder()` method, it could load data from the folder. It assumes that the image data of the same class are in the same subdirectory and the subfolder name is the class name. Currently, JPEG-encoded images and PNG-encoded images are supported. # + id="I_fOlZsklmlL" data = DataLoader.from_folder(image_path) # + [markdown] id="u501eT4koURB" # Split it to training data (80%), validation data (10%, optional) and testing data (10%). # + id="cY4UU5SUobtJ" train_data, rest_data = data.split(0.8) validation_data, test_data = rest_data.split(0.5) # + [markdown] id="Z9_MYPie3EMO" # Show 25 image examples with labels. # + id="Ih4Wx44I482b" plt.figure(figsize=(10,10)) for i, (image, label) in enumerate(data.gen_dataset().unbatch().take(25)): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(image.numpy(), cmap=plt.cm.gray) plt.xlabel(data.index_to_label[label.numpy()]) plt.show() # + [markdown] id="AWuoensX4vDA" # ### Step 2: Customize the TensorFlow Model # # Create a custom image classifier model based on the loaded data. The default model is EfficientNet-Lite0. # # + id="TvYSUuJY3QxR" model = image_classifier.create(train_data, validation_data=validation_data) # + [markdown] id="4JFOKWnH9x8_" # Have a look at the detailed model structure. # + id="QNXAfjl192dC" model.summary() # + [markdown] id="LP5FPk_tOxoZ" # ### Step 3: Evaluate the Customized Model # # Evaluate the result of the model, get the loss and accuracy of the model. # + id="A8c2ZQ0J3Riy" loss, accuracy = model.evaluate(test_data) # + [markdown] id="6ZCrYOWoCt05" # We could plot the predicted results in 100 test images. Predicted labels with red color are the wrong predicted results while others are correct. # + id="n9O9Kx7nDQWD" # A helper function that returns 'red'/'black' depending on if its two input # parameter matches or not. def get_label_color(val1, val2): if val1 == val2: return 'black' else: return 'red' # Then plot 100 test images and their predicted labels. # If a prediction result is different from the label provided label in "test" # dataset, we will highlight it in red color. plt.figure(figsize=(20, 20)) predicts = model.predict_top_k(test_data) for i, (image, label) in enumerate(test_data.gen_dataset().unbatch().take(100)): ax = plt.subplot(10, 10, i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(image.numpy(), cmap=plt.cm.gray) predict_label = predicts[i][0][0] color = get_label_color(predict_label, test_data.index_to_label[label.numpy()]) ax.xaxis.label.set_color(color) plt.xlabel('Predicted: %s' % predict_label) plt.show() # + [markdown] id="S3H0rkbLUZAG" # If the accuracy doesn't meet the app requirement, one could refer to [Advanced Usage](#scrollTo=zNDBP2qA54aK) to explore alternatives such as changing to a larger model, adjusting re-training parameters etc. # + [markdown] id="aeHoGAceO2xV" # ### Step 4: Export to TensorFlow Lite Model # # Convert the trained model to TensorFlow Lite model format with [metadata](https://www.tensorflow.org/lite/convert/metadata) so that you can later use in an on-device ML application. The label file and the vocab file are embedded in metadata. The default TFLite filename is `model.tflite`. # # In many on-device ML application, the model size is an important factor. Therefore, it is recommended that you apply quantize the model to make it smaller and potentially run faster. # The default post-training quantization technique is full integer quantization for the image classification task. # + id="Im6wA9lK3TQB" model.export(export_dir='.') # + [markdown] id="ROS2Ay2jMPCl" # See [example applications and guides of image classification](https://www.tensorflow.org/lite/examples/image_classification/overview) for more details about how to integrate the TensorFlow Lite model into mobile apps. # # This model can be integrated into an Android or an iOS app using the [ImageClassifier API](https://www.tensorflow.org/lite/inference_with_metadata/task_library/image_classifier) of the [TensorFlow Lite Task Library](https://www.tensorflow.org/lite/inference_with_metadata/task_library/overview). # + [markdown] id="habFnvRxxQ4A" # The allowed export formats can be one or a list of the following: # # * `ExportFormat.TFLITE` # * `ExportFormat.LABEL` # * `ExportFormat.SAVED_MODEL` # # By default, it just exports TensorFlow Lite model with metadata. You can also selectively export different files. For instance, exporting only the label file as follows: # + id="BvxWsOTmKG4P" model.export(export_dir='.', export_format=ExportFormat.LABEL) # + [markdown] id="-4jQaxyT5_KV" # You can also evaluate the tflite model with the `evaluate_tflite` method. # + id="S1YoPX5wOK-u" model.evaluate_tflite('model.tflite', test_data) # + [markdown] id="zNDBP2qA54aK" # ## Advanced Usage # # The `create` function is the critical part of this library. It uses transfer learning with a pretrained model similar to the [tutorial](https://www.tensorflow.org/tutorials/images/transfer_learning). # # The `create` function contains the following steps: # # 1. Split the data into training, validation, testing data according to parameter `validation_ratio` and `test_ratio`. The default value of `validation_ratio` and `test_ratio` are `0.1` and `0.1`. # 2. Download a [Image Feature Vector](https://www.tensorflow.org/hub/common_signatures/images#image_feature_vector) as the base model from TensorFlow Hub. The default pre-trained model is EfficientNet-Lite0. # 3. Add a classifier head with a Dropout Layer with `dropout_rate` between head layer and pre-trained model. The default `dropout_rate` is the default `dropout_rate` value from [make_image_classifier_lib](https://github.com/tensorflow/hub/blob/master/tensorflow_hub/tools/make_image_classifier/make_image_classifier_lib.py#L55) by TensorFlow Hub. # 4. Preprocess the raw input data. Currently, preprocessing steps including normalizing the value of each image pixel to model input scale and resizing it to model input size. EfficientNet-Lite0 have the input scale `[0, 1]` and the input image size `[224, 224, 3]`. # 5. Feed the data into the classifier model. By default, the training parameters such as training epochs, batch size, learning rate, momentum are the default values from [make_image_classifier_lib](https://github.com/tensorflow/hub/blob/master/tensorflow_hub/tools/make_image_classifier/make_image_classifier_lib.py#L55) by TensorFlow Hub. Only the classifier head is trained. # # # In this section, we describe several advanced topics, including switching to a different image classification model, changing the training hyperparameters etc. # # + [markdown] id="Gc4Jk8TvBQfm" # ## Customize Post-training quantization on the TensorFLow Lite model # # + [markdown] id="tD8BOYrHBiDt" # [Post-training quantization](https://www.tensorflow.org/lite/performance/post_training_quantization) is a conversion technique that can reduce model size and inference latency, while also improving CPU and hardware accelerator inference speed, with a little degradation in model accuracy. Thus, it's widely used to optimize the model. # # + [markdown] id="iyIo0d5TCzE2" # Model Maker library applies a default post-training quantization techique when exporting the model. If you want to customize post-training quantization, Model Maker supports multiple post-training quantization options using [QuantizationConfig](https://www.tensorflow.org/lite/api_docs/python/tflite_model_maker/config/QuantizationConfig) as well. Let's take float16 quantization as an instance. First, define the quantization config. # + id="k8hL2mstCxQl" config = QuantizationConfig.for_float16() # + [markdown] id="K1gzx_rmFMOA" # Then we export the TensorFlow Lite model with such configuration. # + id="WTJzFQnJFMjr" model.export(export_dir='.', tflite_filename='model_fp16.tflite', quantization_config=config) # + [markdown] id="Safo0e40wKZW" # In Colab, you can download the model named `model_fp16.tflite` from the left sidebar, same as the uploading part mentioned above. # + [markdown] id="A4kiTJtZ_sDm" # ## Change the model # # + [markdown] id="794vgj6ud7Ep" # ### Change to the model that's supported in this library. # # This library supports EfficientNet-Lite models, MobileNetV2, ResNet50 by now. [EfficientNet-Lite](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite) are a family of image classification models that could achieve state-of-art accuracy and suitable for Edge devices. The default model is EfficientNet-Lite0. # # We could switch model to MobileNetV2 by just setting parameter `model_spec` to the MobileNetV2 model specification in `create` method. # + id="7JKsJ6-P6ae1" model = image_classifier.create(train_data, model_spec=model_spec.get('mobilenet_v2'), validation_data=validation_data) # + [markdown] id="gm_B1Wv08AxR" # Evaluate the newly retrained MobileNetV2 model to see the accuracy and loss in testing data. # + id="lB2Go3HW8X7_" loss, accuracy = model.evaluate(test_data) # + [markdown] id="vAciGzVWtmWp" # ### Change to the model in TensorFlow Hub # # Moreover, we could also switch to other new models that inputs an image and outputs a feature vector with TensorFlow Hub format. # # As [Inception V3](https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1) model as an example, we could define `inception_v3_spec` which is an object of [image_classifier.ModelSpec](https://www.tensorflow.org/lite/api_docs/python/tflite_model_maker/image_classifier/ModelSpec) and contains the specification of the Inception V3 model. # # We need to specify the model name `name`, the url of the TensorFlow Hub model `uri`. Meanwhile, the default value of `input_image_shape` is `[224, 224]`. We need to change it to `[299, 299]` for Inception V3 model. # + id="xdiMF2WMfAR4" inception_v3_spec = image_classifier.ModelSpec( uri='https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1') inception_v3_spec.input_image_shape = [299, 299] # + [markdown] id="T_GGIoXZCs5F" # Then, by setting parameter `model_spec` to `inception_v3_spec` in `create` method, we could retrain the Inception V3 model. # # The remaining steps are exactly same and we could get a customized InceptionV3 TensorFlow Lite model in the end. # + [markdown] id="UhZ5IRKdeex3" # ### Change your own custom model # + [markdown] id="svTjlZhrCrcV" # If we'd like to use the custom model that's not in TensorFlow Hub, we should create and export [ModelSpec](https://www.tensorflow.org/hub/api_docs/python/hub/ModuleSpec) in TensorFlow Hub. # # Then start to define `ModelSpec` object like the process above. # + [markdown] id="4M9bn703AHt2" # ## Change the training hyperparameters # We could also change the training hyperparameters like `epochs`, `dropout_rate` and `batch_size` that could affect the model accuracy. The model parameters you can adjust are: # # # * `epochs`: more epochs could achieve better accuracy until it converges but training for too many epochs may lead to overfitting. # * `dropout_rate`: The rate for dropout, avoid overfitting. None by default. # * `batch_size`: number of samples to use in one training step. None by default. # * `validation_data`: Validation data. If None, skips validation process. None by default. # * `train_whole_model`: If true, the Hub module is trained together with the classification layer on top. Otherwise, only train the top classification layer. None by default. # * `learning_rate`: Base learning rate. None by default. # * `momentum`: a Python float forwarded to the optimizer. Only used when # `use_hub_library` is True. None by default. # * `shuffle`: Boolean, whether the data should be shuffled. False by default. # * `use_augmentation`: Boolean, use data augmentation for preprocessing. False by default. # * `use_hub_library`: Boolean, use `make_image_classifier_lib` from tensorflow hub to retrain the model. This training pipeline could achieve better performance for complicated dataset with many categories. True by default. # * `warmup_steps`: Number of warmup steps for warmup schedule on learning rate. If None, the default warmup_steps is used which is the total training steps in two epochs. Only used when `use_hub_library` is False. None by default. # * `model_dir`: Optional, the location of the model checkpoint files. Only used when `use_hub_library` is False. None by default. # # Parameters which are None by default like `epochs` will get the concrete default parameters in [make_image_classifier_lib](https://github.com/tensorflow/hub/blob/02ab9b7d3455e99e97abecf43c5d598a5528e20c/tensorflow_hub/tools/make_image_classifier/make_image_classifier_lib.py#L54) from TensorFlow Hub library or [train_image_classifier_lib](https://github.com/tensorflow/examples/blob/f0260433d133fd3cea4a920d1e53ecda07163aee/tensorflow_examples/lite/model_maker/core/task/train_image_classifier_lib.py#L61). # # For example, we could train with more epochs. # # + id="A3k7mhH54QcK" model = image_classifier.create(train_data, validation_data=validation_data, epochs=10) # + [markdown] id="VaYBQymQDsXU" # Evaluate the newly retrained model with 10 training epochs. # + id="VafIYpKWD4Sw" loss, accuracy = model.evaluate(test_data) # + [markdown] id="dhBU5NCy5Ji2" # # Read more # # You can read our [image classification](https://www.tensorflow.org/lite/examples/image_classification/overview) example to learn technical details. For more information, please refer to: # # * TensorFlow Lite Model Maker [guide](https://www.tensorflow.org/lite/guide/model_maker) and [API reference](https://www.tensorflow.org/lite/api_docs/python/tflite_model_maker). # * Task Library: [ImageClassifier](https://www.tensorflow.org/lite/inference_with_metadata/task_library/image_classifier) for deployment. # * The end-to-end reference apps: [Android](https://github.com/tensorflow/examples/tree/master/lite/examples/image_classification/android), [iOS](https://github.com/tensorflow/examples/tree/master/lite/examples/image_classification/ios), and [Raspberry PI](https://github.com/tensorflow/examples/tree/master/lite/examples/image_classification/raspberry_pi). # #
tensorflow/lite/g3doc/tutorials/model_maker_image_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ScratchPad # language: python # name: scratchpad # --- import numpy as np import pandas as pd import plotly.express as px import plotly.io as pio # + from os import listdir as ls filespath = "./O_Nautilus_Runs_csv/" files = ls(filespath) # - files = pd.DataFrame([file.split('_') for file in files]).join(pd.DataFrame(files, columns=['filename'])) problem = 'DTLZ7' problem_files = list(files[files[8]==problem]['filename']) data_all = [] for file in problem_files: data = pd.read_csv(filespath + file) data_surrogate = data[['s0','s1']].join(pd.DataFrame(['Surrogate']*len(data), columns=['source'])) data_func_eval = data[['f0','f1']].join(pd.DataFrame(['Function evaluation<br> of surrogate results']*len(data), columns=['source'])) data_func_eval = data_func_eval.rename(columns={'f0':'X', 'f1':'Y'}) data_surrogate = data_surrogate.rename(columns={'s0':'X', 's1':'Y'}) data_merged = pd.concat([data_func_eval,data_surrogate], ignore_index=True) data_all.append(data_merged) data_all = pd.concat(data_all, ignore_index=True) fig = px.scatter(data_all, x='X', y='Y', color='source', title=( 'Pareto front achieved by optimistic surrogate model' + '<br>'+ 'and the corresponding evaluated values for ' + problem + ' problem'), ) fig pio.write_html(fig, file='./'+problem+'.html')
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Chapter 6: Ensemble Methods # ___ # # ## Exercises # **6.1** Why is bagging based on random sampling with replacement? Would bagging still reduce a forecast’s variance if sampling were without replacement? # # **6.2** Suppose that your training set is based on highly overlap labels (i.e., with low uniqueness, as defined in Chapter 4). # - **(a)** Does this make bagging prone to overfitting, or just ineffective? Why? # - **(b)** Is out-of-bag accuracy generally reliable in financial applications? Why? # # **6.3** Build an ensemble of estimators, where the base estimator is a decision tree. # - **(a)** How is this ensemble different from an RF? # - **(b)** Using sklearn, produce a bagging classifier that behaves like an RF. What parameters did you have to set up, and how? # # **6.4** Consider the relation between an RF, the number of trees it is composed of, and # the number of features utilized: # - **(a)** Could you envision a relation between the minimum number of trees needed in an RF and the number of features utilized? # - **(b)** Could the number of trees be too small for the number of features used? # - **(c)** Could the number of trees be too high for the number of observations available? # # **6.5** How is out-of-bag accuracy different from stratified k-fold (with shuffling) cross validation accuracy? # ## Code Snippets # SNIPPET 6.1 ACCURACY OF THE BAGGING CLASSIFIER from scipy.special import comb N,p,k=100,1./3,3. p_=0 for i in range(0,int(N/k)+1): p_+=comb(N,i)*p**i*(1-p)**(N-i) print(f"Acc clasifier: {p:.{2}}, ACC bagging: {(1-p_):.{2}}")
notebooks/ch6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/PepeScott/daa_2021_1/blob/master/Tarea8.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="tSc7hf_L2_hL" colab={"base_uri": "https://localhost:8080/"} outputId="a796d50d-266b-4f3c-ca66-8e8d95344c5c" alumno1={'id':2, 'nombre':"Juan" , 'carrera':"ICO", 'promedio':7.67} alumno2={'id':4, 'nombre':"Rocio" , 'carrera':"ICI", 'promedio':8.67} alumno3={'id':5, 'nombre':"Diego" , 'carrera':"DER", 'promedio':8.98} alumno4={'id':7, 'nombre':"May" , 'carrera':"ICI", 'promedio':9.87} alumno5={'id':9, 'nombre':"Rob" , 'carrera':"IME", 'promedio':10.00} alumno6={'id':10, 'nombre':"Santi" , 'carrera':"ICO", 'promedio':5.37} alumno7={'id':14, 'nombre':"Moy" , 'carrera':"IME", 'promedio':6.85} alumno8={'id':16, 'nombre':"Diana" , 'carrera':"DER", 'promedio':9.99} alumno9={'id':19, 'nombre':"Zoila" , 'carrera':"ICO", 'promedio':8.22} alumno10={'id':22, 'nombre':"Armando" , 'carrera':"ICO", 'promedio':7.32} bd = [] bd.append(alumno1) bd.append(alumno2) bd.append(alumno3) bd.append(alumno4) bd.append(alumno5) bd.append(alumno6) bd.append(alumno7) bd.append(alumno8) bd.append(alumno9) bd.append(alumno10) def busquedaBinaria(cadena, ide, inicio, final, indice,referencia): if cadena[indice]["id"] == ide: print(bd[indice]["nombre"], "estudia la carrera de", bd[indice]["carrera"], " y tiene un promedio de", bd[indice]["promedio"]) elif ide < cadena[indice]["id"] and indice<=final: final = final // 2 indice = final // 2 busquedaBinaria(cadena, ide, inicio, final, indice, referencia) elif ide > cadena[indice]["id"] and referencia >= 1: inicio = indice referencia=referencia//2 if referencia == 0: referencia = 1 indice = inicio+(referencia) busquedaBinaria(cadena, ide, inicio, final, indice,referencia) else: print("Alumno no encontrado") return None ide = int(input("Inserte su id: ")) busquedaBinaria(bd, ide, 0, len(bd), len(bd)//2,len(bd)//2) ide = int(input("Inserte su id: ")) busquedaBinaria(bd, ide, 0, len(bd), len(bd)//2,len(bd)//2)
Tarea8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # + from sklearn.metrics import log_loss class NumpyLogisticRegression: def __init__(self, learning_rate=0.1, max_iter=100, tol=1e-3, batch_size=20): self.learning_rate = learning_rate self.max_iter = max_iter self.tol = tol self.batch_size = batch_size def _sigmoid(self, X): return 1 / (1 + np.exp(-X)) def _decision_function(self, X): return np.dot(X, self.coefs_) def decision_function(self, X): X = self._add_intercept(X) return self._decision_function(X).ravel() def _grad_nll(self, X, y): grad = (self._predict_proba(X) - y) return np.dot(X.T, grad) def _add_intercept(self, X): return np.hstack((X, np.ones(shape=(X.shape[0], 1)))) def fit(self, X, y): X = self._add_intercept(X) # Make y to be a column vector for later operation y = np.atleast_2d(y).T # Initialize randomly the weights self.coefs_ = np.random.rand(X.shape[1], 1) it = 0 loss = np.inf while it < self.max_iter and loss > self.tol: # select a minibatch idx = np.random.choice(np.arange(X.shape[0]), size=self.batch_size) X_subset, y_subset = X[idx], y[idx] # compute the gradient dnll = self._grad_nll(X_subset, y_subset) # update the parameter self.coefs_ -= (self.learning_rate / X_subset.shape[0]) * dnll # update the loss and the number of iteration loss = log_loss(y, self._predict_proba(X)) it += 1 return self def _predict_proba(self, X): return self._sigmoid(self._decision_function(X)) def predict_proba(self, X): X = self._add_intercept(X) return self._predict_proba(X) def predict(self, X): prob = self.predict_proba(X) prob[prob < 0.5] = 0 prob[prob >= 0.5] = 1 return prob.astype(int).ravel() def score(self, X, y): return np.mean(y == self.predict(X)) # - from sklearn.datasets import load_iris iris = load_iris() X, y = iris.data, iris.target X = X[:, :2] mask_class_0_1 = np.bitwise_or(y == 0, y == 1) X = X[mask_class_0_1] y = y[mask_class_0_1] clf = NumpyLogisticRegression(learning_rate=0.1) clf.fit(X, y).score(X, y) def plot_2d_separator(classifier, X, ax=None, levels=None, eps=None): eps = X.std() / 2. if eps is None else eps levels = [0] if levels is None else levels ax = plt.gca() if ax is None else ax x_min, x_max = X[:, 0].min() - eps, X[:, 0].max() + eps y_min, y_max = X[:, 1].min() - eps, X[:, 1].max() + eps xx = np.linspace(x_min, x_max, 100) yy = np.linspace(y_min, y_max, 100) X1, X2 = np.meshgrid(xx, yy) X_grid = np.c_[X1.ravel(), X2.ravel()] decision_values = classifier.decision_function(X_grid) CS = ax.contour(X1, X2, decision_values.reshape(X1.shape), levels=levels) ax.clabel(CS, inline=1, fontsize=10) plt.scatter(X[:, 0], X[:, 1], c=y) plt.xlabel(iris.feature_names[0]) plt.ylabel(iris.feature_names[1]); plot_2d_separator(clf, X, levels=np.arange(-5, 5, 1)) from sklearn.preprocessing import StandardScaler from sklearn.linear_model import SGDClassifier from sklearn.pipeline import make_pipeline from sklearn.model_selection import cross_val_score pipe = make_pipeline(StandardScaler(), SGDClassifier(loss='log', tol=1e-3)) cross_val_score(pipe, X, y, cv=5) # + from keras.models import Sequential from keras.layers.core import Dense, Activation from keras import optimizers from sklearn.metrics import accuracy_score model = Sequential() model.add(Dense(1, input_shape=(X.shape[1],))) model.add(Activation("sigmoid")) model.compile(optimizer=optimizers.SGD(lr=0.1), loss='binary_crossentropy', metrics=['accuracy']) model.fit(X, y, epochs=10, batch_size=20) print(f'The mean accuracy is: {accuracy_score(y, model.predict_classes(X))}') # -
00_why_python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from io import BytesIO import os import pandas as pd import pathlib import zipfile # + DATA_DIR = '/home/chris/src/provenance_py/provenance_lib/tests/data' archive = os.path.join(DATA_DIR, 'v5_uu_emperor.qzv') with zipfile.ZipFile(archive) as zf: # for name in sorted(zf.namelist()): # suff = list(pathlib.Path(name).parts) # print(pathlib.Path(*suff)) with zf.open('ffb7cee3-2f1f-4988-90cc-efd5184ef003/provenance/artifacts/83a80bfd-8954-4571-8fc7-ac9e8435156e/action/metadata.tsv') as myfile: # print(myfile.read()) df = pd.read_csv(BytesIO(myfile.read()), sep='\t') # - df
provenance_lib/notebooks/zf_to_metadata_df.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + tags=[] import os import socket from pyspark import SparkContext, SparkConf from pyspark.sql import SparkSession # Create Spark config for our Kubernetes based cluster manager sparkConf = SparkConf() sparkConf.setMaster("k8s://https://kubernetes.default.svc.cluster.local:443") sparkConf.setAppName("spark001") sparkConf.set("spark.kubernetes.container.image", "angelmaroco/spark:3.2.0-hadoop-3.2-aws-sdk-1.12.132-python-3.8") sparkConf.set("spark.kubernetes.namespace", "default") sparkConf.set("spark.executor.instances", "3") sparkConf.set("spark.executor.cores", "1") sparkConf.set("spark.executor.memory", "1800m") sparkConf.set("spark.submit.deployMode",'client') sparkConf.set("spark.driver.cores", "1") sparkConf.set("spark.driver.instances", "1") sparkConf.set("spark.driver.blockManager.port", "7777") sparkConf.set("spark.driver.port", "2222") sparkConf.set("spark.driver.host", socket.gethostbyname(socket.gethostname())) sparkConf.set("spark.driver.bindAddress", "0.0.0.0") sparkConf.set("spark.kubernetes.node.selector.workload", "workload-jupyterhub-user") sparkConf.set("spark.kubernetes.pyspark.pythonVersion", "3") sparkConf.set("spark.kubernetes.local.dirs.tmpfs", "true") sparkConf.set("‍spark.dynamicAllocation.enabled", "true") sparkConf.set("spark.dynamicAllocation.shuffleTracking.enabled", "true") sparkConf.set("spark.dynamicAllocation.shuffleTracking.timeout", "120") sparkConf.set("spark.dynamicAllocation.minExecutors", "1") sparkConf.set("spark.dynamicAllocation.maxExecutors", "2") sparkConf.set("spark.kubernetes.allocation.batch.size", "15") sparkConf.set("spark.dynamicAllocation.executorAllocationRatio", "1") sparkConf.set("spark.dynamicAllocation.schedulerBacklogTimeout", "1") # + tags=[] # Initialize our Spark cluster, this will actually # generate the worker nodes. spark = SparkSession.builder.config(conf=sparkConf).getOrCreate() sc = spark.sparkContext from random import random from operator import add partitions = 30 n = 1000 * partitions def f(_): x = random() * 2 - 1 y = random() * 2 - 1 return 1 if x ** 2 + y ** 2 <= 1 else 0 count = sc.parallelize(range(1, n + 1), partitions).map(f).reduce(add) print("Pi is roughly %f" % (4.0 * count / n)) # + tags=[] sc.stop()
scripts/notebook-python-spark.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc="true" # # Table of Contents # <div class="toc" style="margin-top: 1em;"><ul class="toc-item" id="toc-level0"><li><span><a href="http://localhost:8888/notebooks/ia898/master/0_index.ipynb#Tutoriais-básicos-sobre-Jupyter,-Python,-NumPy,-Matplotlib,-Proc.-Imagens" data-toc-modified-id="Tutoriais-básicos-sobre-Jupyter,-Python,-NumPy,-Matplotlib,-Proc.-Imagens-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Tutoriais básicos sobre Jupyter, Python, NumPy, Matplotlib, Proc. Imagens</a></span><ul class="toc-item"><li><span><a href="http://localhost:8888/notebooks/ia898/master/0_index.ipynb#Jupyter" data-toc-modified-id="Jupyter-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Jupyter</a></span></li><li><span><a href="http://localhost:8888/notebooks/ia898/master/0_index.ipynb#Python" data-toc-modified-id="Python-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Python</a></span></li><li><span><a href="http://localhost:8888/notebooks/ia898/master/0_index.ipynb#NumPy" data-toc-modified-id="NumPy-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>NumPy</a></span><ul class="toc-item"><li><span><a href="http://localhost:8888/notebooks/ia898/master/0_index.ipynb#Precisa-ser-melhorado" data-toc-modified-id="Precisa-ser-melhorado-1.3.1"><span class="toc-item-num">1.3.1&nbsp;&nbsp;</span>Precisa ser melhorado</a></span></li></ul></li><li><span><a href="http://localhost:8888/notebooks/ia898/master/0_index.ipynb#Revisão---Cálculo,-Álgebra" data-toc-modified-id="Revisão---Cálculo,-Álgebra-1.4"><span class="toc-item-num">1.4&nbsp;&nbsp;</span>Revisão - Cálculo, Álgebra</a></span></li><li><span><a href="http://localhost:8888/notebooks/ia898/master/0_index.ipynb#Processamento-de-Imagens" data-toc-modified-id="Processamento-de-Imagens-1.5"><span class="toc-item-num">1.5&nbsp;&nbsp;</span>Processamento de Imagens</a></span></li><li><span><a href="http://localhost:8888/notebooks/ia898/master/0_index.ipynb#Necessitam-ajustar-para-jupyter-notebooks" data-toc-modified-id="Necessitam-ajustar-para-jupyter-notebooks-1.6"><span class="toc-item-num">1.6&nbsp;&nbsp;</span>Necessitam ajustar para jupyter notebooks</a></span></li></ul></li></ul></div> # - # # Tutoriais básicos sobre Jupyter, Python, NumPy, Matplotlib, Proc. Imagens # # Este diretório é composto de uma série de pequenos tutoriais básicos do Python e principalmente do NumPy, com aplicação à processamento de imagens. # ## Jupyter # # - [Edição HTML](Ferramentas de Edicao HTML.ipynb) - Ferramentas de edição multimídia, links, imagens, equações # # ## Python # # - [Python I - Tipos de variáveis](tutorial_python_1_1.ipynb) # - [Python III - Declaração de funções](tutorial_python_1_3.ipynb) # ## NumPy # # - [Chessboard](chess.ipynb) - Motivação sobre programação matricial NumPy versus programação com laço explícito # - [Rampa_solucoes](Rampa_solucoes.ipynb) - # - [one_hot](one_hot.ipynb) - Codificação one-hot, duas soluções # - [NumPy - Introdução ao ndarray](tutorial_numpy_1_1.ipynb) # - [Matplotlib](tutorial_matplotlib.ipynb) # - [NumPy - Formatando array para impressão](tutorial_numpy_1_11.ipynb) # - [NumPy - Fatiamento array unidimensional](tutorial_numpy_1_2.ipynb) # - [NumPy - Fatiamento em duas dimensões](tutorial_numpy_1_3.ipynb) # - [NumPy - Cópia Rasa e Profunda](tutorial_numpy_1_4.ipynb) # - [NumPy - Array Strides](Array-strides.ipynb) # - [NumPy - Redução de eixo](tutorial_numpy_1_5a.ipynb) # # ### Precisa ser melhorado # - [NumPy - Uso do tile](tutorial_numpy_1_8.ipynb) # - [NumPy - Uso do resize](tutorial_numpy_1_9.ipynb) # - [NumPy - Uso do clip](tutorial_numpy_1_10.ipynb) # ## Revisão - Cálculo, Álgebra # # - [Revisão de Números Complexos](Revisao_NumerosComplexos.ipynb ) # ## Processamento de Imagens # # - [Imagens - Representação, Leitura e Visualização](tutorial_img_ds.ipynb) # - [Proc Imagens com Fatiamento](tutorial_1_imagens.ipynb) # - [NumPy - Indices e meshgrid para gerar imagens sintéticas](tutorial_numpy_1_7.ipynb) # - [Histograma e Estatística](tutorial_hist__stat_2.ipynb) # - [Transformação de Intensidade](tutorial_ti_2.ipynb) # - [Equalização de histograma](tutorial_hist_eq_2.ipynb) # - [Equalização por ordenação dos pixels](tutorial_pehist_1.ipynb) # - [Especificação de histograma por ordenação dos pixels](tutorial_pehist_2.ipynb) # - [Ajuste interativo de contraste](tutorial_contraste_iterativo_2.ipynb) # - [Convolução](tutorial_conv_3.ipynb) # - [Propriedades da convolução](tutorial_convprop_3.ipynb) # - Propriedades da DFT # - [Propriedade da escala (expansão) da DFT](dftscaleproperty.ipynb) # - [Interpolação na expansão (análise no domínio da frequência](magnify.ipynb) # - [Transforma Discreta de Wavelets](wavelets.ipynb) # ## Necessitam ajustar para jupyter notebooks # # - [tutorial_trans_geom_intro_2.ipynb](tutorial_trans_geom_intro_2.ipynb) # - [tutorial_ptrans.ipynb](tutorial_ptrans.ipynb) # - [gengaussian.ipynb](gengaussian.ipynb ) # # !ls
master/0_index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from openrtdynamics2.dsp import * import math import numpy as np import openrtdynamics2.lang, openrtdynamics2.dsp as dy import openrtdynamics2.py_execute as dyexe import os import matplotlib.pyplot as plt # #%matplotlib widget # https://github.com/matplotlib/ipympl from vehicle_lib.vehicle_lib import * # - # + system = dy.enter_system() velocity = dy.system_input( dy.DataTypeFloat64(1), name='velocity', default_value=5.0, value_range=[0, 25], title="vehicle velocity [m/s]") s1 = dy.system_input( dy.DataTypeFloat64(1), name='s1', default_value=0.0, value_range=[-0.05, 0.05], title="steering rate 1 [rad/s]") s2 = dy.system_input( dy.DataTypeFloat64(1), name='s2', default_value=0.0, value_range=[-0.05, 0.05], title="steering rate 2 [rad/s]") s3 = dy.system_input( dy.DataTypeFloat64(1), name='s3', default_value=0.0, value_range=[-0.05, 0.05], title="steering rate 3 [rad/s]") s4 = dy.system_input( dy.DataTypeFloat64(1), name='s4', default_value=0.0, value_range=[-0.05, 0.05], title="steering rate 4 [rad/s]") s5 = dy.system_input( dy.DataTypeFloat64(1), name='s5', default_value=0.0, value_range=[-0.05, 0.05], title="steering rate 5 [rad/s]") initial_steering = dy.system_input( dy.DataTypeFloat64(1), name='initial_steering', default_value=-0.0, value_range=[-40, 40], title="initial steering angle [degrees]") * dy.float64(math.pi / 180.0) initial_orientation = dy.system_input( dy.DataTypeFloat64(1), name='initial_orientation', default_value=0.0, value_range=[-360, 360], title="initial orientation angle [degrees]") * dy.float64(math.pi / 180.0) # parameters wheelbase = 3.0 # sampling time Ts = 0.01 steering_rate = dy.float64(0) cnt = dy.counter() steering_rate = dy.conditional_overwrite(steering_rate, condition= cnt >= dy.int32(200), new_value=s1 ) steering_rate = dy.conditional_overwrite(steering_rate, condition= cnt >= dy.int32(400), new_value=s2 ) steering_rate = dy.conditional_overwrite(steering_rate, condition= cnt >= dy.int32(600), new_value=s3 ) steering_rate = dy.conditional_overwrite(steering_rate, condition= cnt >= dy.int32(800), new_value=s4 ) steering_rate = dy.conditional_overwrite(steering_rate, condition= cnt >= dy.int32(1000), new_value=s5 ) # linearly increasing steering angle delta = dy.euler_integrator( steering_rate, Ts, initial_state=initial_steering ) delta = dy.saturate(u=delta, lower_limit=-math.pi/2.0, upper_limit=math.pi/2.0) # the model of the vehicle x, y, psi, x_dot, y_dot, psi_dot = discrete_time_bicycle_model(delta, velocity, Ts, wheelbase, psi0=initial_orientation) # # outputs: these are available for visualization in the html set-up # dy.append_output(x, 'x') dy.append_output(y, 'y') dy.append_output(psi, 'psi') dy.append_output(delta, 'steering') # generate code for Web Assembly (wasm), requires emcc (emscripten) to build code_gen_results = dy.generate_code(template=dy.TargetWasm(), folder="generated/trajectory_generation", build=True) # dy.clear() # - from IPython.display import JSON JSON(code_gen_results['manifest']) compiled_system = dyexe.CompiledCode(code_gen_results) # + testsim = dyexe.SystemInstance(compiled_system) N=3000 input_data = {'initial_steering': 0.0, 's1' : 0.1, 's2' : -0.1 } # sim_results = run_batch_simulation(testsim, input_data, N ) sim_results = dyexe.run_batch_simulation(testsim, input_data, N, output_keys=['x', 'y', 'steering'] ) # - sim_results['y'] # + plt.figure() plt.plot(sim_results['x'], sim_results['y']) plt.show() # - plt.figure() plt.plot( sim_results['steering']) # + testsim = dyexe.SystemInstance(compiled_system) N=600 plt.figure() for s2 in np.linspace(-0.0,-0.2,5): input_data = {'initial_steering': 0.0, 's1' : 0.1, 's2' : s2 } sim_results = dyexe.run_batch_simulation(testsim, input_data, N, output_keys=['x', 'y', 'steering'] ) plt.plot(sim_results['x'], sim_results['y']) # + testsim = dyexe.SystemInstance(compiled_system) N=600+200 plt.figure() for s2 in np.linspace(-0.0,-0.2,5): for s3 in np.linspace(-0.0,-0.2,5): input_data = {'initial_steering': 0.0, 's1' : 0.1, 's2' : s2, 's3' : s3 } sim_results = dyexe.run_batch_simulation(testsim, input_data, N, output_keys=['x', 'y', 'steering'] ) plt.plot(sim_results['x'], sim_results['y']) # + testsim = dyexe.SystemInstance(compiled_system) N=600+200+200 plt.figure() for s2 in np.linspace(-0.0,-0.2,5): for s3 in np.linspace(-0.0,-0.1,5): for s4 in np.linspace(0.1,+0.2,4): input_data = {'initial_steering': 0.0, 's1' : 0.1, 's2' : s2, 's3' : s3, 's4' : s4 } sim_results = dyexe.run_batch_simulation(testsim, input_data, N, output_keys=['x', 'y', 'steering'] ) plt.plot(sim_results['x'], sim_results['y']) # + testsim = dyexe.SystemInstance(compiled_system) N=600+200+200+200 plt.figure() for s2 in np.linspace(-0.0,-0.2,5): for s3 in np.linspace(-0.0,-0.1,5): for s4 in np.linspace(0.1,+0.2,4): for s5 in np.linspace(-0.1,+0.1,4): input_data = {'initial_steering': 0.0, 's1' : 0.1, 's2' : s2, 's3' : s3, 's4' : s4, 's5' : s5 } sim_results = dyexe.run_batch_simulation(testsim, input_data, N, output_keys=['x', 'y', 'steering'] ) plt.plot(sim_results['x'], sim_results['y']) # -
vehicle_control/trajectory_generation.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .java // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Java // language: java // name: java // --- // # Lab Five // --- // // For this lab we're going to be messing around with our Linear Data Types! // // Our Goals are: // - Creating an Array // - Creating an ArrayList // - Creating an LinkedList // - Creating an Queue // - Creating an Stack // // Please show the ability to: // - Create these data types // - Populate the data types // - Print out the Linear Data Types // Create an Array, Populate it, and Print it out int[] arrayLab = {4, 5, 7, 23, 50, 343}; for (int i = 0; i < arrayLab.length; i++){ System.out.println(arrayLab[i]); } // Create an ArrayList, Populate it, and Print it out ArrayList<Double> arrayListLab = new ArrayList(Arrays.asList(3,54,245,565,4325,324554)); for (int i = 0; i < arrayListLab.size(); i++){ System.out.println(arrayListLab.get(i));} // + // Create an LinkedList, Populate it, and Print it out LinkedList<String> shoppingCart = new LinkedList<String>(); shoppingCart.add("Cold Cuts"); shoppingCart.add("Paper Towels"); shoppingCart.add("Popcorn"); shoppingCart.add("Potato Chips"); shoppingCart.add("Vegetables"); shoppingCart.add("Fruits"); shoppingCart.add("Yogurt"); System.out.println(shoppingCart); // + // Create an Queue, Populate it, and Print it out Queue<String> premTable = new LinkedList<String>(); premTable.add("Man City"); premTable.add("Man United"); premTable.add("Leicester"); premTable.add("West Ham"); System.out.println(premTable ); // + // Create an Stack, Populate it, and Print it out Stack<String> LaLigastack = new Stack<String>(); LaLigastack.push("Atletico Madrid"); LaLigastack.push("Real Madrid"); LaLigastack.push("Barca"); LaLigastack.push("Sevilla"); System.out.println(LaLigastack);
JupyterNotebooks/Labs/Lab 5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Fit the CB models based on optimized parameters # + import pandas as pd import numpy as np import matplotlib.pyplot as plt from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" import os import re, string, unicodedata import nltk import contractions import inflect import lime from lime import lime_text from lime.lime_text import LimeTextExplainer import re from wordcloud import WordCloud, STOPWORDS import nltk # import contractions import inflect from nltk import word_tokenize, sent_tokenize from nltk.corpus import stopwords from nltk import SnowballStemmer from nltk.stem import LancasterStemmer, WordNetLemmatizer from sklearn import metrics from sklearn.model_selection import train_test_split # from keras.preprocessing.sequence import pad_sequences from sklearn.metrics import confusion_matrix, classification_report from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import TruncatedSVD from sklearn.metrics.pairwise import cosine_similarity from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error from sklearn.metrics.pairwise import euclidean_distances from sklearn.preprocessing import StandardScaler # %matplotlib inline from sklearn.preprocessing import OneHotEncoder import scipy from gensim import corpora from gensim import corpora from gensim.similarities.docsim import Similarity from gensim import corpora, models, similarities import pickle import time import Utils as util # - # ## Load all functions # + ### Text Prepocessing def remove_punctuation(words): """Remove punctuation from list of tokenized words""" new_words = [] for word in words: new_word = re.sub(r'[^\w\s]', '', word) if new_word != '': new_words.append(new_word) return new_words def remove_special(words): """Remove special signs like &*""" new_words = [] for word in words: new_word = re.sub(r'[-,$()#+&*]', '', word) if new_word != '': new_words.append(new_word) return new_words def replace_numbers(words): """Replace all interger occurrences in list of tokenized words with textual representation""" p = inflect.engine() new_words = [] for word in words: if word.isdigit(): new_word = p.number_to_words(word) new_words.append(new_word) else: new_words.append(word) return new_words def remove_stopwords(words): """Remove stop words from list of tokenized words""" stopwords = nltk.corpus.stopwords.words('english') myStopWords = [] stopwords.extend(myStopWords) new_words = [] for word in words: if word not in stopwords: new_words.append(word) return new_words def to_lowercase(words): """Convert words to lowercase""" new_words=[] for word in words: new_words.append(word.lower()) return new_words def stem_words(words): """Stem words in list of tokenized words""" stemmer = LancasterStemmer() stemmer = SnowballStemmer('english') stems = [] for word in words: stem = stemmer.stem(word) stems.append(stem) return stems def lemmatize_verbs(words): """Lemmatize verbs in list of tokenized words""" lemmatizer = WordNetLemmatizer() lemmas = [] for word in words: lemma = lemmatizer.lemmatize(word, pos='v') lemmas.append(lemma) return lemmas def normalize_lemmatize(words): words = remove_special(words) words = to_lowercase(words) words = remove_punctuation(words) words = replace_numbers(words) words = remove_stopwords(words) words = stem_words(words) words = lemmatize_verbs(words) return words def get_processed(sample): processed = pd.DataFrame(data=[],columns = ['business_id', 'text']) new_texts = [] for i in range(0, len(sample)): business_id = sample['business_id'].iloc[i] words = nltk.word_tokenize(sample['text'].iloc[i]) text = ' '.join(normalize_lemmatize(words)) dfnew = pd.DataFrame([[business_id, text]], columns=['business_id', 'text']) new_texts.append(text) processed = processed.append(dfnew,ignore_index = True) return processed ## Similarity matrix def get_tfidf_matrix(processed): ''' get the Tf-Idf matrix of processed texts for business reviews ''' TV = TfidfVectorizer(stop_words = "english") processed["text"] = processed["text"].fillna('') tfidf_matrix = TV.fit_transform((processed["text"])) return tfidf_matrix def get_cos_sim_matrix(tfidf_matrix, n): ''' use truncated SVD to reduce dimensions to n @n: the dimensions to keep ''' SVD = TruncatedSVD(n_components = n , random_state = 42) # 42 is the ultimate answer to everything tfidf_truncated = SVD.fit_transform(tfidf_matrix) cosine_sim = cosine_similarity(tfidf_truncated, tfidf_truncated) return cosine_sim def get_euclidean_sim(business, n_components): SVD = TruncatedSVD(n_components = n_components , random_state = 42) # 42 is the ultimate answer to everything bus_truncated = SVD.fit_transform(business) eucl_dist = euclidean_distances(bus_truncated) eucl_sim = 1/np.exp(eucl_dist) return eucl_sim def get_buscosine_sim(business,n_components): SVD = TruncatedSVD(n_components = n_components , random_state = 42) # 42 is the ultimate answer to everything bus_truncated = SVD.fit_transform(business) cosine_sim = cosine_similarity(bus_truncated, bus_truncated) return cosine_sim def get_mix_sim_matrix(cosine_sim, bus_cos_sim, lmbda = 0.5, ): mixed_sim = np.add(cosine_sim*lmbda, bus_cos_sim*(1-lmbda)) # assume equally weighted return mixed_sim def get_mix_sim_df(df_tfidf_sim, df_bus_sim, lmbda = 0.5): df_tfidf_pivot = pd.melt(df_tfidf_sim.reset_index(), id_vars = "index" , value_vars = df_tfidf_sim.columns.values) df_bus_pivot = pd.melt(df_bus_sim.reset_index(), id_vars = "index" , value_vars = df_bus_sim.columns.values) df_merge = pd.merge(df_tfidf_pivot, df_bus_pivot, on = ["index", "variable"]) df_merge["value"] = (lmbda) * df_merge["value_x"] + (1-lmbda) * df_merge["value_y"] df_mixed_sim = pd.pivot(df_merge, index="index", columns="variable", values ="value") return df_mixed_sim ## Get recommendations and prediction def get_recommendation_cos(reviews, business_id, user_id, df_sim, k): '''get the business_id_array that shows top_k greatest similarity to the specific business_id''' user_bids = reviews[reviews['user_id']==user_id]['business_id'].values df_user = df_sim.loc[df_sim.index.isin(user_bids), df_sim.columns == business_id] df_user_topk = df_user.sort_values(df_user.columns[0], ascending = False).iloc[:k] return np.array(df_user_topk.index.values) def predict_rating(reviews, user_id, business_ids): '''predict the avg of the user's rating on business in business_ids''' scores = reviews.loc[(reviews.user_id == user_id) & (reviews.business_id.isin(business_ids))]["stars"].values return np.mean(scores) def get_results_cos(reviews, reviews_test, business_id, user_id, df_sim, k): ''' prediction on the business_id:avg the ratings on top_k business that shows similarity to the business_id actual on the business_id: the true rating ''' actual = reviews_test.loc[(reviews_test.user_id==user_id) & (reviews_test.business_id==business_id)]['stars'].values[0] business_ids = get_recommendation_cos(reviews, business_id, user_id, df_sim, k) prediction = predict_rating(reviews, user_id, business_ids) return actual, prediction def get_review_processed(processed, reviews): reviews_processed = reviews.loc[reviews.business_id.isin(processed.business_id)]\ .reset_index()\ .drop(columns=['index']) return reviews_processed def CB_predict(reviews, reviews_test, df_sim, k = 5): ''' based on test_df get a dataframe with each user on each business's true ratings and prediction ratings @df_sim, n*n DataFrame for business similarities @k: int, top k similar businesses ''' user_id_sample = reviews_test['user_id'].values busi_id_sample = reviews_test['business_id'].values actual = [] predictions = [] for i in range(len(reviews_test)): try: act, pred = get_results_cos(reviews, reviews_test, busi_id_sample[i], user_id_sample[i], df_sim, k) actual.append(act) predictions.append(pred) except: actual.append(np.nan) predictions.append(np.nan) return pd.DataFrame({"user_id": user_id_sample, "business_id": busi_id_sample, "true_ratings": actual, "prediction_ratings": predictions }) ## LSI model def get_lsi(processed, reviews, user_id, n_topics): ''' get the lsi model for user_id ''' user_bids = reviews[reviews['user_id']==user_id]['business_id'].values processed_user = processed.loc[processed.business_id.isin(user_bids)] documents = list(processed_user['text'].values) texts = [[word for word in document.split(' ')] for document in documents] dictionary = corpora.Dictionary(texts) corpus = [dictionary.doc2bow(text) for text in texts] lsi = models.LsiModel(corpus, id2word=dictionary, num_topics=n_topics) return lsi, dictionary, corpus def get_recommendation_lsi(processed, reviews, df_lsi, business_id, user_id, k, n_topics): lsi = df_lsi[(df_lsi["n_topic"] == n_topics) & (df_lsi["user_id"] == user_id)]["lsi"][0] dictionary = df_lsi[(df_lsi["n_topic"] == n_topics) & (df_lsi["user_id"] == user_id)]["dictionary"][0] user_bids = reviews[reviews['user_id']==user_id]['business_id'].values processed_user = processed.loc[processed.business_id.isin(user_bids)] documents = list(processed_user['text'].values) texts = [[word for word in document.split(' ')] for document in documents] corpus = [dictionary.doc2bow(text) for text in texts] doc = processed['text'].loc[processed.business_id==business_id].values[0] vec_bow = dictionary.doc2bow(doc.lower().split()) vec_lsi = lsi[vec_bow] index = similarities.MatrixSimilarity(lsi[corpus]) sims = list(index[vec_lsi]) results = list(zip(user_bids, sims)) results_ordered = np.array(sorted(results, key=lambda x: x[1], reverse=True)) results_topk = results_ordered[:k] return results_topk[:,0] def get_results_lsi(processed,reviews,reviews_test, df_lsi ,business_id,user_id,k,n_topics): actual = reviews_test.loc[(reviews_test.user_id==user_id) & (reviews_test.business_id==business_id)]['stars'].values[0] business_ids = get_recommendation_lsi(processed,reviews,df_lsi ,business_id,user_id,k,n_topics) prediction = predict_rating(reviews, user_id, business_ids) return actual, prediction def get_recommendation_lsi(processed,reviews,business_id,user_id,k,n_topics): user_bids = reviews[reviews['user_id']==user_id]['business_id'].values processed_user = processed.loc[processed.business_id.isin(user_bids)] documents = list(processed_user['text'].values) texts = [[word for word in document.split(' ')] for document in documents] dictionary = corpora.Dictionary(texts) corpus = [dictionary.doc2bow(text) for text in texts] lsi = models.LsiModel(corpus, id2word=dictionary, num_topics=n_topics) doc = processed['text'].loc[processed.business_id==business_id].values[0] vec_bow = dictionary.doc2bow(doc.lower().split()) vec_lsi = lsi[vec_bow] index = similarities.MatrixSimilarity(lsi[corpus]) sims = list(index[vec_lsi]) results = list(zip(user_bids, sims)) results_ordered = np.array(sorted(results, key=lambda x: x[1], reverse=True)) results_topk = results_ordered[:k] return results_topk[:,0] def get_results_lsi(processed,reviews,reviews_test ,business_id,user_id,k,n_topics): actual = reviews_test.loc[(reviews_test.user_id==user_id) & (reviews_test.business_id==business_id)]['stars'].values[0] business_ids = get_recommendation_lsi(processed,reviews,business_id,user_id,k,n_topics) prediction = predict_rating(reviews, user_id, business_ids) return actual, prediction def CB_LSI_predict(df_texts_train, reviews, reviews_test, k = 5, n_topics = 100): uid_sample = reviews_test['user_id'].values bid_sample = reviews_test['business_id'].values actual_lsi = [] predictions_lsi = [] for i in range(len(reviews_test)): try: act, pred = get_results_lsi(df_texts_train, reviews, reviews_test, bid_sample[i],uid_sample[i], k, n_topics) predictions_lsi.append(pred) actual_lsi.append(act) except: predictions_lsi.append(np.nan) actual_lsi.append(np.nan) return pd.DataFrame({"user_id": uid_sample, "business_id": bid_sample, "ratings": actual_lsi, "pred_lsi": predictions_lsi}) def get_recommendation_cos_full(reviews, user_id, df_sim, k, busi_id_lst): '''get the business_id_array that shows top_k greatest similarity to the specific business_id''' df_user_rating = reviews[reviews.user_id == user_id] # df_sim = df_sim.loc[busi_id_lst, busi_id_lst] user_bids = df_user_rating['business_id'].values df_user = df_sim.loc[df_sim.index.isin(user_bids)] df_user_rank = df_user.rank(ascending = False, axis = 0) df_user_rank[df_user_rank <= k] = 1 df_user_rank[df_user_rank > k] = 0 df_user_rank = df_user_rank/ np.min([k, len(user_bids)]) user_rating_matrix = np.array(df_user_rating[["business_id", "stars"]].set_index(["business_id"]).loc[df_user_rank.index.values]) pred = user_rating_matrix.T @ np.array(df_user_rank) return pred def CB_sim_fit_full_matrix(train_valid_df, df_sim, k, user_id_lst, busi_id_lst): rating_pred_matrix = np.zeros((len(user_id_lst), len(busi_id_lst))) for i,user_id in enumerate(user_id_lst): rating_pred_matrix[i,] = get_recommendation_cos_full(train_valid_df, user_id, df_sim, k, busi_id_lst) return(rating_pred_matrix) def get_mse(pred, actual): # Ignore zero terms. pred = pred[actual.nonzero()].flatten() actual = actual[actual.nonzero()].flatten() return mean_squared_error(pred, actual) # - # ## Load Data # + ## Read data rev_busi_Pho= pd.read_csv('../data/filtered_reviews_in_Phonex.csv', parse_dates=["date"]) user_id_lst = rev_busi_Pho['user_id'].unique().tolist() # rows of sparse matrix busi_id_lst = rev_busi_Pho['business_id'].unique().tolist() # columns of sparse matrix # train_valid_df = pickle.load(open('../data/train_valid_df.pkl', "rb")) test_df = pickle.load(open('../data/test_df.pkl', "rb")) train_df = pickle.load(open('../data/train_df.pkl', "rb")) valid_df = pickle.load(open('../data/valid_df.pkl', "rb")) train_sparse_matrix = np.load('train_sparse_matrix.npy') test_sparse_matrix = np.load('test_sparse_matrix.npy') valid_sparse_matrix = np.load('valid_sparse_matrix.npy') bus_df_subset = pd.read_csv("../data/business_subset_cleaned.csv", index_col= "business_id") bus_df_subset.head(1) # - # ## 1. non-NLP CB Model (CB_Bus) # + k1 = 15 similarity = "cos" # or "eucl" n_components1 = 10 if similarity == "cos": bus_sim = get_buscosine_sim(bus_df_subset, n_components1) else: bus_sim = get_euclidean_sim(bus_df_subset, n_components1) df_bus_sim = pd.DataFrame(bus_sim, index= bus_df_subset.index.values, columns=bus_df_subset.index.values) df_bus_sim.shape # - rev_busi_Pho.head(2) # + def get_recommendation_cos_full(reviews, user_id, df_sim, k, busi_id_lst): '''get the business_id_array that shows top_k greatest similarity to the specific business_id''' df_user_rating = reviews[reviews.user_id == user_id] # df_sim = df_sim.loc[busi_id_lst, busi_id_lst] user_bids = df_user_rating['business_id'].values df_user = df_sim.loc[df_sim.index.isin(user_bids)] df_user_rank = df_user.rank(ascending = False, axis = 0) df_user_rank[df_user_rank <= k] = 1 df_user_rank[df_user_rank > k] = 0 df_user_rank = df_user_rank/ np.min([k, len(user_bids)]) user_rating_matrix = np.array(df_user_rating[["business_id", "stars"]].set_index(["business_id"]).loc[df_user_rank.index.values]) pred = user_rating_matrix.T @ np.array(df_user_rank) return pred def CB_sim_fit_full_matrix(train_valid_df, df_sim, k, user_id_lst, busi_id_lst): rating_pred_matrix = np.zeros((len(user_id_lst), len(busi_id_lst))) for i,user_id in enumerate(user_id_lst): rating_pred_matrix[i,] = get_recommendation_cos_full(train_valid_df, user_id, df_sim, k, busi_id_lst) return(rating_pred_matrix) def get_mse(pred, actual): # Ignore zero terms. pred = pred[actual.nonzero()].flatten() actual = actual[actual.nonzero()].flatten() return mean_squared_error(pred, actual) # + #### Generate model fit: t0 = time.time() pred_matrix = CB_sim_fit_full_matrix(train_df, df_bus_sim.loc[busi_id_lst, busi_id_lst],k1, user_id_lst, busi_id_lst) t1 = time.time() print("time elapsed: (seconds)") print(np.round(t1 - t0,3)) print("MSE on test set:", get_mse(pred_matrix, valid_sparse_matrix)) # - np.save('../data/pred_matrix.npy', pred_matrix) print("MSE on test set:", get_mse(pred_matrix, valid_sparse_matrix)) # ## 2. CB - similarity with NLP (CB_NLP) # + # df_texts = pickle.load(open('../data/text_train_valid_df.pkl', "rb")) df_texts = pickle.load(open('../data/text_train_df.pkl', "rb")) df_texts.head(3) # - # Parameters n_components2 = 20 #n singular values for reviews' vector space k2 = 45 tfidf_matrix = get_tfidf_matrix(df_texts) cosine_sim = get_cos_sim_matrix(tfidf_matrix, n_components2) df_tfidf_sim = pd.DataFrame(cosine_sim, index=df_texts['business_id'].values, columns=df_texts['business_id'].values) df_tfidf_sim.head(1) # + #### Generate model fit: t0 = time.time() pred_matrix_nlp = CB_sim_fit_full_matrix(train_df, df_tfidf_sim.loc[busi_id_lst, busi_id_lst],k1, user_id_lst, busi_id_lst) t1 = time.time() print("time elapsed: (seconds)") print(np.round(t1 - t0,3)) print("MSE on test set:", get_mse(pred_matrix_nlp, valid_sparse_matrix)) # - np.save('../data/pred_matrix_nlp.npy', pred_matrix_nlp) print("MSE on test set:", get_mse(pred_matrix_nlp, valid_sparse_matrix)) # ## 4. LSI Model k4 = 50 n_topics = 150 df_texts = pickle.load(open('../data/text_train_valid_df.pkl', "rb")) #### Generate model fit: t0 = time.time() df_pred_lsi = util.CB_LSI_predict(df_texts,train_valid_df, test_df, k = k4, n_topics = n_topics) t1 = time.time() print("time elapsed: (seconds)") print(np.round(t1 - t0,3)) # + print("MSE on test set:", mean_squared_error(df_pred_lsi.pred_lsi, df_pred_lsi.ratings )) # - df_pred_lsi # np.save('../data/pred_matrix_nlp.npy', pred_matrix_nlp) # np.save('..data/pred_matrix.npy', pred_matrix) # np.save('../data/pred_matrix_mixed.npy', pred_matrix_mixed) # df_pred_lsi.to_csv("../data/Predictions_CB_LSI.csv", index = False)
code/1.4.3_Content_Filtering_Fullmatrix_pred.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Load Iris Dataset import numpy as np from sklearn.model_selection import train_test_split from sklearn.datasets import load_iris X, y = load_iris(return_X_y=True) y = np.array(y).reshape(150, 1) # # Split Data Into Train/Test Sets X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.30) # # Train on CellularModel from CellularModel import CellularModel model = CellularModel(fitness_func='accuracy', N=150, mutate_proba=0.0005, min_depth=14, max_depth=14, max_epochs=1000, auto_plot=True) model.fit(X_train,y_train) # # Evaluate Model Score y_pred = model.predict(X_test) from sklearn.metrics import f1_score f1_score(y_test, y_pred, average='macro')
tests-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 2.3. Hyphotesis Final List # **1.** Female patients are more persistent of a drug than male. # # **2.** Patients from Northeast are more persistent of a drug than patients from South. # # **3.** Patients over 65 years of age are more persistent of a drug than patients 65 years of age or younger. # # **4.** Caucasian patients, not hispanic are more persistent of a drug than patients with different race and ethinicity. # # **5.** Patients mapped to IDN are more persistent of a drug than patient not mapped. # **6.** Patients that received the drug prescription from General Practtitioner Specialty are less persistent of a drug than patients that received the drug prescription from others Specialty. # **7.** Patients that used Glucocorticoid and had a Fragility Fracture, before and during the therapy, are more persistent of a drug than patients that not used Glucocorticoid neither had a Fragility Fracture, in any situation. # # **8.** Patients that had Dexa Scan more than 20 times are more persistent of a drug than patients that had Dexa Scan less than 20 times, and patients that dit not have Dexa Scan are more persistent of a drug than patients that had Dexa Scan. # # **9.** Patients that presents HR_VHR as Risk Segment during the therapy and presents an improve in Risk Segment after the therapy are more persistent of a drug than patients that presents other types of Risk Segment and Change in Risk Segment. # # **10.** Patients that presents T Socre of >-2.5 before the therapy and presents worsen T Score status after the therapy are less persistent of a drug than patients that presents other types of T Score and T Score status. # **11.** Patients that presents the amount of risk factor higher than 4 are more persistent of a drug than patients that presents the amount of risk factor higher than 4. # # **12.** Patients adherent for therapies and that did not use drugs berofe the therapy more persistent of a drug than patients that are not adherent for therapies and that used drugs berofe the therapy. # # **13.** Patients that used more than 6 concomitancy drugs before the therapy are less persistent of a drug than patients that used more than 6 concomitancy drugsbefore the therapy. # # **14.** Patients that presents more than 8 diffetrent types of comorbidity are more persistent of a drug than patients that presents less than 8 diffetrent types of comorbidity. Ptid object Persistency_Flag object Gender object Race object Ethnicity object Region object Age_Bucket object Ntm_Speciality object Ntm_Specialist_Flag object Ntm_Speciality_Bucket object Gluco_Record_Prior_Ntm object Gluco_Record_During_Rx object Dexa_Freq_During_Rx int64 Dexa_During_Rx object Frag_Frac_Prior_Ntm object Frag_Frac_During_Rx object Risk_Segment_Prior_Ntm object Tscore_Bucket_Prior_Ntm object Risk_Segment_During_Rx object Tscore_Bucket_During_Rx object Change_T_Score object Change_Risk_Segment object Adherent_Flag object Idn_Indicator object Injectable_Experience_During_Rx object Comorb_Encounter_For_Screening_For_Malignant_Neoplasms object Comorb_Encounter_For_Immunization object Comorb_Encntr_For_General_Exam_W_O_Complaint,_Susp_Or_Reprtd_Dx object Comorb_Vitamin_D_Deficiency object Comorb_Other_Joint_Disorder_Not_Elsewhere_Classified object Comorb_Encntr_For_Oth_Sp_Exam_W_O_Complaint_Suspected_Or_Reprtd_Dx object Comorb_Long_Term_Current_Drug_Therapy object Comorb_Dorsalgia object Comorb_Personal_History_Of_Other_Diseases_And_Conditions object Comorb_Other_Disorders_Of_Bone_Density_And_Structure object Comorb_Disorders_of_lipoprotein_metabolism_and_other_lipidemias object Comorb_Osteoporosis_without_current_pathological_fracture object Comorb_Personal_history_of_malignant_neoplasm object Comorb_Gastro_esophageal_reflux_disease object Concom_Cholesterol_And_Triglyceride_Regulating_Preparations object Concom_Narcotics object Concom_Systemic_Corticosteroids_Plain object Concom_Anti_Depressants_And_Mood_Stabilisers object Concom_Fluoroquinolones object Concom_Cephalosporins object Concom_Macrolides_And_Similar_Types object Concom_Broad_Spectrum_Penicillins object Concom_Anaesthetics_General object Concom_Viral_Vaccines object Risk_Type_1_Insulin_Dependent_Diabetes object Risk_Osteogenesis_Imperfecta object Risk_Rheumatoid_Arthritis object Risk_Untreated_Chronic_Hyperthyroidism object Risk_Untreated_Chronic_Hypogonadism object Risk_Untreated_Early_Menopause object Risk_Patient_Parent_Fractured_Their_Hip object Risk_Smoking_Tobacco object Risk_Chronic_Malnutrition_Or_Malabsorption object Risk_Chronic_Liver_Disease object Risk_Family_History_Of_Osteoporosis object Risk_Low_Calcium_Intake object Risk_Vitamin_D_Insufficiency object Risk_Poor_Health_Frailty object Risk_Excessive_Thinness object Risk_Hysterectomy_Oophorectomy object Risk_Estrogen_Deficiency object Risk_Immobilization object Risk_Recurring_Falls object Count_Of_Risks int64 Count_Of_Concomitancy int64 Count_Of_Comorbidity int64
final_project/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Suppose we have `n` features and `m` observations # | Index | $X_{1}$ | $X_{2}$ | $X_{3}$ | .... | .... | $X_{n}$ | y | # |--------------|---------------|---------------|---------------|------|------|----------------|----------| # | 1 | $x_{1}^{1} $ | $x_{2}^{1}$ | $x_{3}^{1}$ | ... | ... | $x_{n}^{1}$ | $y^{1}$ | # | 2 | $x_{1}^{2}$ | $x_{2}^{2}$ | $x_{3}^{2}$ | ... | ... | $x_{n}^{2}$ | $y^{2}$ | # | 3 | $x_{1}^{3}$ | $x_{2}^{3}$ | $x_{3}^{3}$ | ... | ... | $x_{n}^{3}$ | $y^{3}$ | # | . | . | . | . | ... | ... | . | | # | . | . | . | . | ... | ... | . | | # | . | . | . | . | ... | ... | . | | # | m | $x_{1}^{m}$ | $x_{2}^{m}$ | $x_{3}^{m}$ | ... | ... | $x_{n}^{m}$ | $y^{m}$ | # Here `subscript` denotes feature and `superscript` denote observation number # Suppose the weights of matrix for n features is denoted by column vectors of shape [1, n] # <br/> # $$ \beta = \begin{bmatrix} \beta_{1} & \beta_{2} & \beta_{3} & .... & \beta_{n} \end{bmatrix} $$ # ### we need to calculate prediction for each observation # \begin{equation} # \hat{y^1} = \beta_{0} + \beta_{1}x_{1}^{1} + \beta_{2}x_{2}^{1} + \beta_{3}x_{3}^{1} + \beta_{3}x_{3}^{1} + .... + \beta_{n}x_{n}^{1} # \end{equation} # # \begin{equation} # \hat{y^2} = \beta_{0} + \beta_{1}x_{1}^{2} + \beta_{2}x_{2}^{2} + \beta_{3}x_{3}^{2} + \beta_{3}x_{3}^{2} + .... + \beta_{n}x_{n}^{2} # \end{equation} # # # \begin{equation} # \hat{y^3} = \beta_{0} +\beta_{1}x_{1}^{3} + \beta_{2}x_{2}^{3} + \beta_{3}x_{3}^{3} + \beta_{3}x_{3}^{3} + .... + \beta_{n}x_{n}^{3} # \end{equation} # # \begin{equation} # .................. # \end{equation} # # \begin{equation} # .................. # \end{equation} # # \begin{equation} # \hat{y^m} = \beta_{0} +\beta_{1}x_{1}^{m} + \beta_{2}x_{2}^{m} + \beta_{3}x_{3}^{m} + \beta_{3}x_{3}^{m} + .... + \beta_{n}x_{n}^{m} # \end{equation} # ### In matrix form # \begin{equation} # \begin{bmatrix} \hat{y}^{1} \\ \hat{y}^{2} \\ \hat{y}^{3} \\ .. \\.. \\ \hat{y}^{m} \end{bmatrix} = # \begin{bmatrix} x_{1}^{1} & x_{2}^{1} & x_{3}^{1} & .... & x_{n}^{1} # \\ x_{1}^{2} & x_{2}^{2} & x_{3}^{2} & .... & x_{n}^{2} # \\ x_{1}^{3} & x_{2}^{3} & x_{3}^{3} & .... & x_{n}^{3} # \\.... &.... & ... &.... &.... # \\.... &.... & ... &.... &.... # \\ x_{1}^{m} & x_{2}^{m} & x_{3}^{m} & .... & x_{n}^{m} # \end{bmatrix} # * # \begin{bmatrix} \beta_{1} \\ \beta_{2} \\ \beta_{3} \\ .. \\.. \\ \beta_{n} \end{bmatrix} # + \beta_{0} # \end{equation} # \begin{equation} \hat{y} = X.\beta^{T} + \beta_{0} \end{equation} # #### Equivalent numpy implementation is: # `y_hat = np.dot(X, B.T) + b` # Where # $$ B = \beta $$ # $$ b = \beta_{0}$$ # ### The Mean Squared Error cost function is: # $$ J(\beta, \beta_{0}) = \frac{1}{2m}\sum_{n=1}^{m} (y^{(i)} - \hat{y}^{(i)})^{2} $$ # #### Equivalent numpy implementation is: # `cost = (1/(2*m))*np.sum((y-y_hat)**2)` # ### Just for confirmation let us take an example # | y | $\hat{y}$ | (y - $\hat{y}$)^2 | # |---|---------|-----------------| # | 2 | 1 | 1 | # | 4 | 2 | 4 | # | 6 | 3 | 9 | # | | | 14 | # # \begin{equation} 14/3 = 4.67 \end{equation} # + pycharm={"is_executing": false} import numpy as np y=np.array([[2], [4], [6]]) y_hat=np.array([[1], [2], [3]]) # + pycharm={"is_executing": false} np.sum((y-y_hat)**2)/3 # - # ### Now lets calculate the gradient descent # ### We have the loss function defined as # $$ J(\beta, \beta_{0}) = \frac{1}{2m}\sum_{n=1}^{m} (y - \hat{y})^{2} $$ # where: $$ \hat{y} = \beta_{0} + X*\beta^{T} $$ # So: # $$ \frac {\partial J(\beta, \beta_{0})}{\partial \beta_{0}} = # \frac{-1}{m}\sum_{n=1}^{m}(y-\hat{y}) $$ # Again: $$ J(\beta, \beta_{0}) = \frac{1}{2m}\sum_{n=1}^{m} (y - \hat{y})^{2} $$ # $$ \frac {\partial J(\beta, \beta_{0})}{\partial \beta} = \frac{-1}{m}(y - \hat{y}) * \frac{\partial (\beta_{0} + X*\beta^{T})}{\partial \beta} $$ # $$ \frac {\partial J(\beta, \beta_{0})}{\partial \beta} = # \frac{-1}{m}(y-\hat{y})*X * \frac{\partial \beta^{T}}{\partial \beta} $$ # where $$ \beta = \begin{bmatrix} \beta_{1} & \beta_{2} & \beta_{3} &.... & \beta_{n} \end{bmatrix}$$ # # and # # $$ \beta^{T} = \begin{bmatrix} \beta_{1} \\ \beta_{2} \\ \beta_{3} \\.... \\ \beta_{n} \end{bmatrix}$$ # # Since: $$ \frac{\partial \beta^{T}}{\partial \beta} = I $$ # ### Just a side note for calculation of $$ \frac{\partial \beta^{T}}{\partial \beta} $$ # where $$ \beta = \begin{bmatrix} \beta_{1} & \beta_{2} & \beta_{3} &.... & \beta_{n} \end{bmatrix}$$ # # and # # $$ \beta^{T} = \begin{bmatrix} \beta_{1} \\ \beta_{2} \\ \beta_{3} \\.... \\ \beta_{n} \end{bmatrix}$$ # # $$ \frac{\partial \beta^{T}}{\partial \beta} = \begin{bmatrix} \frac{\partial \beta^{T}}{\partial \beta_{1}} & \frac{\partial \beta^{T}}{\partial \beta_{2}} & \frac{\partial \beta^{T}}{\partial \beta_{3}} & .... & \frac{\partial \beta^{T}}{\partial \beta_{n}}\end{bmatrix} $$ # # # # \begin{equation} # \frac{\partial \beta^{T}}{\partial \beta} = # \begin{bmatrix} # \frac{\partial \beta_{1}}{\partial \beta_{1}} & \frac{\partial \beta_{1}}{\partial \beta_{2}} & \frac{\partial \beta_{1}}{\partial \beta_{3}} & ..&..& \frac{\partial \beta_{1}}{\partial \beta_{n}} # \\ # \frac{\partial \beta_{2}}{\partial \beta_{1}} & \frac{\partial \beta_{2}}{\partial \beta_{2}} & \frac{\partial \beta_{2}}{\partial \beta_{3}} &..&..& \frac{\partial \beta_{2}}{\partial \beta_{n}} # \\ # \frac{\partial \beta_{2}}{\partial \beta_{1}} & \frac{\partial \beta_{3}}{\partial \beta_{2}} & \frac{\partial \beta_{3}}{\partial \beta_{3}} &..&..& \frac{\partial \beta_{3}}{\partial \beta_{n}} # \\ # .. & .. & .. & .. & .. &.. # \\ # .. & .. & .. & .. & .. &.. # \\ # \frac{\partial \beta_{n}}{\partial \beta_{1}} & \frac{\partial \beta_{n}}{\partial \beta_{2}} & \frac{\partial \beta_{n}}{\partial \beta_{3}} & ..&..& \frac{\partial \beta_{n}}{\partial \beta_{n}} # \end{bmatrix} # \end{equation} # # # \begin{equation} # \frac{\partial \beta^{T}}{\partial \beta} = # \begin{bmatrix} # 1 & 0 & 0 & ..&..& 0 # \\ # 0 & 1 & 0 &..&..& 0 # \\ # 0 & 0 & 1 &..&..& 0 # \\ # .. & .. & .. & .. & .. &.. # \\ # .. & .. & .. & .. & .. &.. # \\ # 0 & 0 & 0 & ..&..& 1 # \end{bmatrix} # \end{equation} # # # i.e $$ \frac{\partial \beta^{T}}{\partial \beta} = I_{n*n} $$ # Since: $$ X_{m*n} * I_{n*n} = X $$ # So: # $$ \frac {\partial J(\beta, \beta_{0})}{\partial \beta} = # \frac{-1}{m}(y-\hat{y})*X $$ # ### Shape of $$ (y-\hat{y}) $$ `m rows and 1 cols [m, 1]` # # ### Shape of `X is [m, n] ie m observations and n features` # # ### Required shape of `dB is [1, n] ` # # ### [1, m] * [m, n] == [1, n] # ## Equivalent Numpy Implementation is: # `dB = (-1/m)* np.dot((y-y_hat).T, X)` # <br/> # `db = (-1/m)*np.sum(y-y_hat)` # + pycharm={"is_executing": false, "name": "#%%\n"} def propagate(B, b, X, Y): """ params: B: weights of size [1, X.shape[1]] b: bias X: matrix of observations and features size [X.shape[0], X.shape[1]] Y: matrix of actual observation size [Y.shape[0], 1] returns: grads: dict of gradients, dB of shape same as B and db of shape [1, 1]. cost: MSE cost """ ## m is no of observations ie rows of X m = X.shape[0] #Calculate hypothesis y_hat = np.dot(X, B.T) + b y = Y.values.reshape(Y.shape[0],1) #Compute Cost cost = (1/(2*m))*np.sum((y-y_hat)**2) # BACKWARD PROPAGATION (TO FIND GRAD) dB = (-1/m)* np.dot((y-y_hat).T, X) db = -np.sum(y-y_hat)/m grads = {"dB": dB, "db": db} return grads, cost # + pycharm={"is_executing": false} def optimize(B, b, X, Y, num_iterations, learning_rate): """ params: B: weights of size [1, X.shape[1]] b: bias X: matrix of observations and features size [X.shape[0], X.shape[1]] Y: matrix of actual observation size [Y.shape[0], 1] num_iterations: number of iterations learning_rate: learning rate returns: params: parameters B of shape [1, X.shape[1]] and bias grads: dict of gradients, dB of shape same as B and db costs: MSE cost """ costs = [] for i in range(num_iterations): # Cost and gradient calculation call function propagate grads, cost = propagate(B,b,X,Y) # Retrieve derivatives from grads dB = grads["dB"] db = grads["db"] # update parameters B = B - learning_rate * dB b = b - learning_rate * db costs.append(cost) params = {"B": B, "b": b} grads = {"dB": dB, "db": db} return params, grads, costs # + pycharm={"is_executing": false} def predict(B, b, X): """:param B: weights b: bias X: matrix of observations and features """ # Compute predictions for X Y_prediction = np.dot(X, B.T) + b return Y_prediction # + pycharm={"is_executing": false} def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5): """ params: X_train: X_train Y_train: Y_train X_test: X_test Y_test: Y_test returns: d: dictionary """ # initialize parameters with zeros B = np.zeros(shape=(1, X_train.shape[1])) b = 0 # Gradient descent parameters, grads, costs = optimize(B, b, X_train, Y_train, num_iterations, learning_rate) # Retrieve parameters w and b from dictionary "parameters" B = parameters["B"] b = parameters["b"] # Predict test/train set examples Y_prediction_test = predict(B, b, X_test) Y_prediction_train = predict(B, b, X_train) Y_train = Y_train.values.reshape(Y_train.shape[0], 1) Y_test = Y_test.values.reshape(Y_test.shape[0], 1) # Print train/test Errors print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100)) print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100)) d = {"costs": costs, "Y_prediction_test": Y_prediction_test, "Y_prediction_train" : Y_prediction_train, "B" : B, "b" : b, "learning_rate" : learning_rate, "num_iterations": num_iterations} return d # + pycharm={"is_executing": false} import pandas as pd df = pd.read_csv('../datasets/USA_Housing.csv') # + pycharm={"is_executing": false} df # + pycharm={"is_executing": false} df.drop(['Address'],axis=1,inplace=True) # + pycharm={"is_executing": false} df.head() # + pycharm={"is_executing": false} df.info() # + pycharm={"is_executing": false} #normalization of column values df_norm = (df - df.mean()) / (df.max() - df.min()) # Putting feature variable to X X = df_norm[['Avg. Area Income','Avg. Area House Age','Avg. Area Number of Rooms','Avg. Area Number of Bedrooms','Area Population']] # Putting response variable to y y = df_norm['Price'] #random_state is the seed used by the random number generator, it can be any integer. from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7 ,test_size = 0.3, random_state=2) # + pycharm={"is_executing": false} df_norm.head() # + pycharm={"is_executing": false} X_train.shape # + pycharm={"is_executing": false} X_train.head() # + pycharm={"is_executing": false} y_train.head() # + pycharm={"is_executing": false} y_test.shape[0] # + pycharm={"is_executing": false} model1 = model(X_train=X_train, Y_train=y_train, X_test=X_test, Y_test=y_test, num_iterations = 1000, learning_rate = 0.001) model2 = model(X_train=X_train, Y_train=y_train, X_test=X_test, Y_test=y_test, num_iterations = 1000, learning_rate = 0.01) model3 = model(X_train=X_train, Y_train=y_train, X_test=X_test, Y_test=y_test, num_iterations = 1000, learning_rate = 0.1) model4 = model(X_train=X_train, Y_train=y_train, X_test=X_test, Y_test=y_test, num_iterations = 1000, learning_rate = 0.3) # + pycharm={"is_executing": false} import matplotlib.pyplot as plt plt.plot([i for i in range(1000)], model1['costs']) plt.plot([i for i in range(1000)], model2['costs']) plt.plot([i for i in range(1000)], model3['costs']) plt.plot([i for i in range(1000)], model4['costs']) plt.gca().legend(('alpha 0.001','alpha 0.01', 'alpha 0.1', 'alpha 0.3')) plt.show() # + pycharm={"is_executing": false, "name": "#%%\n"} # model4 = model(X_train=X_train, Y_train=y_train, X_test=X_test, Y_test=y_test, num_iterations = 10, learning_rate = 0.01) # model5 = model(X_train=X_train, Y_train=y_train, X_test=X_test, Y_test=y_test, num_iterations = 100, learning_rate = 0.01) # model6 = model(X_train=X_train, Y_train=y_train, X_test=X_test, Y_test=y_test, num_iterations = 1000, learning_rate = 0.01)
notebooks/Multivariate-Linear-Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # two types of arguments: # - postional, listed first, must be listed # - keyword (= kwarg), are optional (default variables, e.g. all should have the same color) class Circle(): #positional must come is_shape = True #default variable color def __init__(self, radius, color='red'): self.radius = radius self.color = color little_red = Circle(23) print(little_red.color) print(little_red.radius)
01_Workshop-master/Chapter05/Keyword Arguments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Using Container classes # # `ctapipe.core.Container` is the base class for all event-wise data classes in ctapipe. It works like a object-relational mapper, in that it defines a set of `Fields` along with their metadata (description, unit, default), which can be later translated automatially into an output table using a `ctapipe.io.TableWriter`. from ctapipe.core import Container, Field, Map import numpy as np from astropy import units as u # Let's define a few example containers with some dummy fields in them: # + class SubContainer(Container): junk = Field("nothing","Some junk") value = Field(0.0, "some value", unit=u.deg) class EventContainer(Container): event_id = Field(-1,"event id number") tels_with_data = Field([], "list of telescopes with data") sub = Field(SubContainer(), "stuff") # a sub-container in the hierarchy # for dicts of sub-containers, use Map instead # of a dict() as the default value to support serialization tel = Field(Map(), "telescopes") # - # ## Basic features ev = EventContainer() # Check that default values are automatically filled in print(ev.event_id) print(ev.tels_with_data) print(ev.tel) # print the json representation print(ev) # values can be set as normal for a class: ev.event_id = 100 ev.event_id ev.as_dict() # by default only shows the bare items, not sub-containers (See later) ev.as_dict(recursive=True) # Now, let's define a sub-container that we can add per telescope: # + class TelContainer(Container): tel_id = Field(-1, "telescope ID number") image = Field(np.zeros(10), "camera pixel data") # - # and we can add a few of these to the parent container inside the tel dict: ev.tel[10] = TelContainer() ev.tel[5] = TelContainer() ev.tel[42] = TelContainer() ev.tel # ## Converion to dictionaries ev.as_dict() ev.as_dict(recursive=True, flatten=False) # for serialization to a table, we can even flatten the output into a single set of columns ev.as_dict(recursive=True, flatten=True) # ## Setting and clearing values ev.tel[5].image[:] = 9 print(ev) ev.reset() ev.as_dict(recursive=True, flatten=True) # ## look at a pre-defined Container from ctapipe.containers import MCEventContainer ev = MCEventContainer() ev
docs/examples/containers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="DH-dWGjDN1mn" pip install visualkeras # + id="YLUPb1iKSjD9" executionInfo={"status": "ok", "timestamp": 1636271534594, "user_tz": -540, "elapsed": 611, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} # Import libraries import sys, glob, os import numpy as np import scipy as sp import pandas as pd from csv import reader, writer import joblib import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt from tensorflow.keras.models import Sequential, model_from_json from tensorflow.keras.layers import Activation, Input, Dense, Dropout, Conv2D, Conv2DTranspose, BatchNormalization, UpSampling2D, Reshape, LeakyReLU from tensorflow.keras import optimizers, regularizers from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import visualkeras # + id="A2DzTQSvSuW4" executionInfo={"status": "ok", "timestamp": 1636254877773, "user_tz": -540, "elapsed": 1, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} # Activiate GPU physical_devices = tf.config.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(physical_devices[0], True) # + id="DYmj8ewKa-4T" executionInfo={"status": "ok", "timestamp": 1636254878583, "user_tz": -540, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} # current working directory cwd = '/content/drive/MyDrive/Daylight Autonomy/' # + [markdown] id="d-mDnrVtpiv3" # # Data setup # + id="81UUW7I7_Jjf" executionInfo={"status": "ok", "timestamp": 1636265411082, "user_tz": -540, "elapsed": 1316, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} # load data from data folder files = glob.glob(os.path.join(cwd, "data", "*.txt")) dfs = [pd.read_csv(f, header=None, sep=";") for f in files] df = pd.concat(dfs,ignore_index=True) # + id="EvNOv7fPTKmy" executionInfo={"status": "ok", "timestamp": 1636265623776, "user_tz": -540, "elapsed": 425, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} # Clean data headers = list(df.iloc[0,0:9]) data = df.iloc[:,10:18].copy() data.columns = headers[0:8] target = df.iloc[:,-930:].copy() target.columns = range(len(target.columns)) target = np.array(target).reshape(len(df),30,31) # + id="uqd32JZIUQDg" executionInfo={"status": "ok", "timestamp": 1636265629002, "user_tz": -540, "elapsed": 386, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} # Split data to train, validate, test set (60% training, 20% validation, 20% test split) X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2, random_state=49) X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.25, random_state=98) # + [markdown] id="_Gva3ih2pWfA" # ##### backup hyperparameters # + id="RLMX4XP-LL1h" # Space for hyperparameters of previous runs """ model.add(Conv2DTranspose(filters=128, kernel_size=4,strides=(2,1))) model.add(Conv2DTranspose(filters=64, kernel_size=2,strides=(2,2))) model.add(Conv2DTranspose(filters=32, kernel_size=(3,2),strides=(1,1))) model.add(Conv2DTranspose(filters=16, kernel_size=3,strides=(2,1))) model.add(Conv2DTranspose(filters=8, kernel_size=4,strides=(1,1))) model.add(Conv2DTranspose(filters=4, kernel_size=(7,4),strides=(1,1))) model.add(Conv2DTranspose(filters=1, kernel_size=1, padding="same")) """ """ model.add(Conv2DTranspose(filters=128, kernel_size=(2,2), strides=(2,1))) model.add(Conv2DTranspose(filters=64, kernel_size=(3,2), strides=(2,1))) model.add(Conv2DTranspose(filters=32, kernel_size=(2,2), strides=(2,1))) model.add(Conv2DTranspose(filters=16, kernel_size=12, strides=(1,1))) model.add(Conv2DTranspose(filters=4, kernel_size=6, strides=(1,1))) model.add(Conv2DTranspose(filters=2, kernel_size=5, strides=(1,1))) model.add(Conv2DTranspose(filters=1, kernel_size=1, padding="same")) """ """ model.add(Conv2DTranspose(filters=64, kernel_size=4,strides=(2,1))) model.add(Dropout(rate=0.2)) model.add(Conv2DTranspose(filters=32, kernel_size=4,strides=(2,1))) model.add(Dropout(rate=0.2)) model.add(Conv2DTranspose(filters=32, kernel_size=4,strides=(1,1))) model.add(Dropout(rate=0.2)) model.add(Conv2DTranspose(filters=32, kernel_size=4,strides=(1,1))) model.add(Dropout(rate=0.2)) model.add(Conv2DTranspose(filters=32, kernel_size=5,strides=(1,1))) model.add(Dropout(rate=0.2)) model.add(Conv2DTranspose(filters=32, kernel_size=(7,6),strides=(1,1))) model.add(Dropout(rate=0.2)) model.add(Conv2DTranspose(filters=32, kernel_size=(5,3),strides=(1,1))) model.add(Dropout(rate=0.2)) model.add(Conv2DTranspose(filters=1, kernel_size=1, padding="same")) """ """ model.add(Dense(32 * 1 * 8, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.001), input_dim=8)) model.add(Activation('selu')) model.add(Dense(64 * 1 * 8, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.001))) model.add(Activation('selu')) model.add(Dense(128 * 1 * 8, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.001))) model.add(Activation('selu')) """ """ model.add(Dense(32 * 1 * 8, input_dim=8, activation='relu')) model.add(Dense(64 * 1 * 8, activation='relu')) model.add(Dense(128 * 1 * 8, activation='relu')) """ # + [markdown] id="BGEP2rAOpcnh" # # Model and training # + colab={"base_uri": "https://localhost:8080/"} id="pWr8lFKmU2Di" executionInfo={"status": "ok", "timestamp": 1636276382844, "user_tz": -540, "elapsed": 584, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} outputId="c6e822d6-2ef5-4f3e-c664-5024aed52e74" # Clear keras workspace for new model keras.backend.clear_session() # Deconvolutional Neural Network Model (DCNN) for generating daylight autonomy model = Sequential() ### DNN model to analysis the input parameters model.add(Dense(32 * 1 * 8, input_dim=8, activation='relu')) model.add(Dense(64 * 1 * 8, activation='relu')) model.add(Dense(128 * 1 * 8, activation='relu')) ### Deconvolution to generate image model.add(Reshape((1, 8, 128))) model.add(Conv2DTranspose(filters=128, kernel_size=(2,2), strides=(2,1))) model.add(BatchNormalization()) model.add(LeakyReLU()) model.add(Conv2DTranspose(filters=64, kernel_size=(3,2), strides=(2,1))) model.add(BatchNormalization()) model.add(LeakyReLU()) model.add(Conv2DTranspose(filters=32, kernel_size=(2,2), strides=(2,1))) model.add(BatchNormalization()) model.add(LeakyReLU()) model.add(Conv2DTranspose(filters=16, kernel_size=6, strides=(1,1))) model.add(BatchNormalization()) model.add(LeakyReLU()) model.add(Conv2DTranspose(filters=16, kernel_size=6, strides=(1,1))) model.add(BatchNormalization()) model.add(LeakyReLU()) model.add(Conv2DTranspose(filters=4, kernel_size=6, strides=(1,1))) model.add(BatchNormalization()) model.add(LeakyReLU()) model.add(Conv2DTranspose(filters=2, kernel_size=6, strides=(1,1))) model.add(BatchNormalization()) model.add(LeakyReLU()) model.add(Conv2DTranspose(filters=1, kernel_size=1, padding="same")) # optimizer model.compile(optimizer=optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999), loss="mse") model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="T_NzaA55U5eO" executionInfo={"status": "ok", "timestamp": 1636276764283, "user_tz": -540, "elapsed": 377505, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} outputId="fdf0956a-4804-4231-8f57-cbfcf9896cd2" history = model.fit( X_train, y_train, epochs = 1500, batch_size = 8, validation_data = (X_val, y_val), callbacks = [ EarlyStopping(monitor='val_loss', patience=150, mode='min'), ModelCheckpoint(filepath = 'CNN2D.ckpt', save_weights_only=True, monitor='val_loss', save_best_only=True) ] ) # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="DM3IDrJS1GQF" executionInfo={"status": "ok", "timestamp": 1636276771549, "user_tz": -540, "elapsed": 7275, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} outputId="78df534d-f63a-441e-ca35-5b994715e520" plt.plot(history.history["loss"], label="Training Loss") plt.plot(history.history["val_loss"], label="Validation Loss") plt.legend() plt.show() # + id="RxbtX7h0GQAO" executionInfo={"status": "ok", "timestamp": 1636276786797, "user_tz": -540, "elapsed": 493, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} model.load_weights('CNN2D.ckpt'); # + colab={"base_uri": "https://localhost:8080/"} id="jSdpDbX-VBJ6" executionInfo={"status": "ok", "timestamp": 1636276787330, "user_tz": -540, "elapsed": 3, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} outputId="652f4d9f-031b-4a98-abc5-92b604b4abab" y_pred = model.predict(X_test) np.sqrt(mean_squared_error(y_test.reshape(-1, 30 * 31), y_pred.reshape(-1, 30 * 31))) # + id="y-OO0CfYVE7b" executionInfo={"status": "ok", "timestamp": 1636276828977, "user_tz": -540, "elapsed": 507, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} k = 71 # + colab={"base_uri": "https://localhost:8080/"} id="OVpBb4kfdtsA" executionInfo={"status": "ok", "timestamp": 1636276829633, "user_tz": -540, "elapsed": 6, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} outputId="39dc7b9c-4940-466c-cbd2-cc61b62dae92" X_test.iloc[k] # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="PnWmsHt7YC_e" executionInfo={"status": "ok", "timestamp": 1636276829634, "user_tz": -540, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} outputId="3a773c23-4494-46ac-ee36-75c31a266b88" plt.imshow(y_test[k]) # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="BQoiOyUBYx9U" executionInfo={"status": "ok", "timestamp": 1636276832458, "user_tz": -540, "elapsed": 407, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} outputId="b2f157ab-bd82-4c8a-debb-62d7c2937794" plt.imshow(y_pred[k].reshape(30,-1)) # + colab={"base_uri": "https://localhost:8080/"} id="gVJTWxvIVrYj" executionInfo={"status": "ok", "timestamp": 1636276796370, "user_tz": -540, "elapsed": 3, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} outputId="ae0ef3c4-3e7f-4293-b3d5-369ee3d88401" np.sqrt(mean_squared_error(y_test[k],y_pred[k].reshape(30,-1))) # + id="qzknfcOSD-Nw" executionInfo={"status": "ok", "timestamp": 1636276806062, "user_tz": -540, "elapsed": 399, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} results = [np.sqrt(mean_squared_error(y_test[k],y_pred[k].reshape(30,-1))) for k in range(len(y_test))] # + colab={"base_uri": "https://localhost:8080/"} id="VC-IEVCgEu_c" executionInfo={"status": "ok", "timestamp": 1636276808037, "user_tz": -540, "elapsed": 379, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} outputId="ed0f0192-9831-4cd5-df9b-9aa4465d4c69" # best result results.index(min(results)) # + colab={"base_uri": "https://localhost:8080/"} id="hc0q773eEwnt" executionInfo={"status": "ok", "timestamp": 1636276808441, "user_tz": -540, "elapsed": 3, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} outputId="161aeaf1-6fec-4a79-dc1c-d8eb0aef6e99" # worst result results.index(max(results)) # + [markdown] id="TQkcy8Ixh8At" # # Save model # + id="tMhwxRAVFHx2" executionInfo={"status": "ok", "timestamp": 1636273942215, "user_tz": -540, "elapsed": 998, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} json = os.path.join(cwd, 'models/DA_CNN.json') hdf5 = os.path.join(cwd, 'models/DA_CNN.h5') model_json = model.to_json() with open(json,'w') as json_file: json_file.write(model_json) model.save_weights(hdf5) # + colab={"base_uri": "https://localhost:8080/", "height": 177} id="80b6SKrSJslR" executionInfo={"status": "ok", "timestamp": 1636274563258, "user_tz": -540, "elapsed": 474, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} outputId="610acaf8-120a-4e63-a13b-62a63da8a168" visualkeras.layered_view(model) # + [markdown] id="2ueS8PAepNj_" # # Test for random input # + id="7xTXxKln1Mos" executionInfo={"status": "ok", "timestamp": 1636274730084, "user_tz": -540, "elapsed": 380, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} test_case = np.array([4.7,8,6,9.52,2.87,1.25,45,45]) test_pred = model.predict(np.expand_dims(test_case, axis=0))[0].reshape(1,-1) # + colab={"base_uri": "https://localhost:8080/", "height": 266} id="LM77ZAbsJ8wJ" executionInfo={"status": "ok", "timestamp": 1636275140852, "user_tz": -540, "elapsed": 576, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}} outputId="9af01eec-7195-4a66-eb6c-842ff5f7b934" plt.imshow(test_pred.reshape(30,-1)) plt.gca().invert_xaxis() # + id="KAZFJcuDdNIg" executionInfo={"status": "ok", "timestamp": 1636274607908, "user_tz": -540, "elapsed": 2, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "14209840916304094999"}}
tensorflow_model/archive/Daylight_Autonomy_Array.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lab 1 # ## Overview # # Welcome to your first (or perhaps second) lab! Labs are designed to be your opportunity to experiment with Python and gain hands-on experience with the language. # # The primary goal of the first half is to ensure that your Python installation process went smoothly, and that there are no lingering Python installation bugs floating around. # # The second half focuses more on using data structures to solve some interesting problems. # # You're welcome to work in groups or individually. # # **Note: These labs are *designed* to be long! You shouldn't be able to finish all the problems in one class period. Work through as much as you can in the time allotted, but also feel free to skip from question to question freely. The extra problems are intended to be extra practice, if you want to hone your Python skills even more.** # # Above all, have fun playing with Python! Enjoy. # # ### Running this notebook # # To run a Jupyter notebook, first change directories (`cd`) to wherever you've downloaded the notebook. Then, from a command line run: # # ```bash # $ jupyter lab # ``` # # You might need to `pip install jupyterlab` from within if you haven't yet. # ## Zen of Python # # Run the following code cell by selecting the cell and pressing Shift+Enter. import this # ## Hello World # # Edit the following cell so that it prints `"Hello, world!"` when executed, and then run the cell to confirm. print("Hello, world!") # ## Warming Up # ### Fizz, Buzz, FizzBuzz! # If we list all of the natural numbers under 41 that are a multiple of 3 or 5, we get # # ``` # 3, 5, 6, 9, 10, 12, 15, # 18, 20, 21, 24, 25, 27, 30, # 33, 35, 36, 39, 40 # ``` # # The sum of these numbers is 408. # # Find the sum of all the multiples of 3 or 5 below 1001. As a sanity check, the last two digits of the sum should be `68`. # def fizzbuzz(n): # """Returns the sum of all numbers less than n divisible by 3 or 5.""" # total_sum = 0 # for i in range(1,n): # remainder_3 = i % 3 # remainder_5 = i % 5 # if remainder_3 == 0 or remainder_5 == 0: # total_sum += i # # return total_sum # # print(fizzbuzz(41)) # => 408 # print(fizzbuzz(1001)) # ### Collatz Sequence # Depending on from whom you took math class, you may have seen this problem before. # # The *Collatz sequence* is an iterative sequence defined on the positive integers by: # # ``` # n -> n / 2 if n is even # n -> 3n + 1 if n is odd # ``` # # For example, using the rule above and starting with 13 yields the sequence: # # ``` # 13 -> 40 -> 20 -> 10 -> 5 -> 16 -> 8 -> 4 -> 2 -> 1 # ``` # # It can be seen that this sequence (starting at 13 and finishing at 1) contains 10 terms. Although unproven, it it hypothesized that all starting numbers finish at 1. # # What is the length of the longest chain which has a starting number under 1000? # # *NOTE: Once the chain starts the terms are allowed to go above one thousand.* # # Challenge: Same question, but for any starting number under 1,000,000. What about for any starting number under 10,000,000? You may need to implement a cleverer-than-naive algorithm. # + def collatz_len(n): """Computes the length of the Collatz sequence starting at n.""" t = (n,) while n != 1: if n % 2 == 0: next_n = n / 2 else: next_n = (3 * n) + 1 n = next_n t = t + (next_n,) print(t) return len(t) def max_collatz_len(n): """Computes the longest Collatz sequence length for starting numbers less than n""" pass print(collatz_len(13)) # => 10 print(max_collatz_len(1000)) # Challenge: Only attempt to solve these if you feel very comfortable with this material. print(max_collatz_len(1000000)) print(max_collatz_len(100000000)) # - # ### Fahrenheit-to-Celsius converter # Write a program to convert degrees Fahrenheit to degrees Celcius by (1) asking the user for a number (not necessarily integral) representing the current temperature in degrees Fahrenheit, (2) converting that value into the equivalent degrees Celsius, and (3) printing the final equivalent value. # # For example, your program should be able to emulate the following sample run: # # ``` # Temperature F? 212 # It is 100.0 degrees Celsius. # # Temperature F? 98.6 # It is 37.0 degrees Celsius. # # Temperature F? 10 # It is -12.222222222222221 degrees Celsius. # ``` # # Want to be fancy (challenge)? Try to print the final temperature to two decimal places. For example, in the last case above we would print `-12.22` instead of `-12.222222222222221`. *Hint: Take a look at the [`round()`](https://docs.python.org/3/library/functions.html#round) function. Isn't Python great?* # + def convert_fahr_to_cels(deg_fahr): """Convert a temperature given in degrees Fahrenheit to degrees Celcius.""" pass def convert(): """Ask the user for a temperature in degrees Fahrenheir, and print out the temperature in degrees Celsius.""" pass # - # ## Bonus Challenges # # Don't worry about doing these bonus problems. In most cases, bonus questions ask you to think more critically or use more advanced algorithms. # # In this case, we'll investigate advanced forms of printing and then reflect on Pythonic design. # ### Zen Printing # # Write a program using `print()` that, when run, prints out a tic-tac-toe board. # # ``` # X | . | . # ----------- # . | O | . # ----------- # . | O | X # ``` # # You may find the optional arguments to `print()` useful, which you can read about [here](https://docs.python.org/3/library/functions.html#print). In no more than five minutes, try to use these optional arguments to print out this particular tic-tac-toe board. # Print a tic-tac-toe board using optional arguments. def print_tictactoe(): """Print out a specific partially-filled tic-tac-toe board.""" pass # Maybe you were able to print out the tic-tac-toe board. Maybe not. In the five minutes you've been working on that, I've gotten bored with normal tic-tac-toe (too many ties!) so now, I want to play SUPER tic-tac-toe. # # Write a program that prints out a SUPER tic-tac-toe board. # # ``` # | | H | | H | | # --+--+--H--+--+--H--+--+-- # | | H | | H | | # --+--+--H--+--+--H--+--+-- # | | H | | H | | # ========+========+======== # | | H | | H | | # --+--+--H--+--+--H--+--+-- # | | H | | H | | # --+--+--H--+--+--H--+--+-- # | | H | | H | | # ========+========+======== # | | H | | H | | # --+--+--H--+--+--H--+--+-- # | | H | | H | | # --+--+--H--+--+--H--+--+-- # | | H | | H | | # ``` # # You'll find that there might be many ways to solve this problem. Which do you think is the most 'pythonic?' Talk to someone next to you about your approach to this problem. Remember the Zen of Python! def print_super_tictactoe(): """Print an empty SUPER tic-tac-toe board.""" pass # ## Investigating Data Structures # # *[Optional Reading on Standard Types - check out Sequence types and Mapping types](https://docs.python.org/3/library/stdtypes.html)* # ### Lists # Predict what the following lines of Python will do. Then, run the code block below to see if they match what you expect: # # ```Python # s = [0] * 3 # print(s) # s[0] += 1 # print(s) # # s = [''] * 3 # print(s) # s[0] += 'a' # print(s) # # s = [[]] * 3 # print(s) # s[0] += [1] # print(s) # ``` # + # Explore the elements of lists. Is the output what you expect? s = [0] * 3 print(s) s[0] += 1 print(s) s = [''] * 3 print(s) s[0] += 'a' print(s) s = [[]] * 3 print(s) s[0] += [1] print(s) # - # Why is this happening? Consider using the `id` function to investigate further. What happens when we replace the second-to-last line with `s[0] = s[0] + [1]`? What if we replace the line with `s[0].append(1)`? # Write a function to verify `if` a list is ordered with increasing numbers ($\leqslant$). # # You can assume that the list contains only numbers that can be ordered. # + def ordered_list(L): """Verify id a given list L is increasingly ordered.""" for i in range(1,len(L)): if L[i] < L[i-1]: return False return True print(ordered_list([10000,12,3,2,5,6])) # => False print(ordered_list([2,3,5,6,12,10000])) # => True # - # #### Matrices # Write a function to "pretty print" a matrix `M`, taken as parameter. You can use `print(e, end='\t')` to print a line that end with a `tab` character. # # You can assume that the matrix is well-defined. # # ```python # >>> M = [[10000,12],[3,2],[5,6]] # >>> pretty_print_matrix(M) # >>> ... # 10000 12 # 3 2 # 5 6 # ... # ``` # + def pretty_print_matrix(M): """Pretty print a matrix M.""" rows = len(M) columns = len(M[0]) for i in range(rows): for j in range(columns): print(M[i][j], end='\t') print() pretty_print_matrix([[10000,12],[3,2],[5,6]]) # => 10000 12 # 3 2 # 5 6 # it is possible to print chars too, have a look! print("\nA Matrix made of chars:") pretty_print_matrix([['10000','12'],["3","2"],['hello', 'world']]) # - # Write a function to compute the transpose of a matrix `M`, taken as parameter. # + def tranpose_matrix(M): """Compute the transposed of a matrix M (new one)""" MT = [] for row in range(len(M[0])): #rows(MT) == columns(M) MT.append([]) #each row in MT is an empty list for col in range(len(M)): #columns(MT) == rows(M) MT[row].append(M[col][row]) return MT M = [[1,2], [3,4], [5,6]] print(M) pretty_print_matrix(M) print(tranpose_matrix(M)) # => [[1, 3, 5], [2, 4, 6]] pretty_print_matrix(tranpose_matrix(M)) # - # #### Recursion on lists # Scrivere una funzione ricorsiva ListaFibonacci(n) che restituisce una lista con la sequenza da Fibonacci(0) a Fibonacci(n), compreso. Non usare liste ausiliarie o funzioni ausiliarie. # Es.: ListaFibonacci(10) restituirà. # + def fibonacci(n): """ Compute the Fibonacci sequence, return a list with the sequence from fibonacci(0) to fibonacci(n). Remember that: --- 0 if n == 0 / fibonacci(n) = ----- 1 if n == 1 \ --- fibonacci(n - 1) + fibonacci(n - 2) if n > 1 """ if n <= 0: return [] # empty list if n is 0 if n == 1: return [1] # list containing 1 if n == 2: return [1,1] # list containing 1 contat to 1 tmp = fibonacci(n-1) L = tmp + [ tmp[ -1 ] + tmp[ -2 ] ] return L print(fibonacci(-1)) # => [] print(fibonacci(0)) # => [] print(fibonacci(1)) # => [1] print(fibonacci(2)) # => [1,1] print(fibonacci(10)) # => [0, 1, 1, 2, 3, 5, 8, 13, 21, 34] #print(fibonacci(2961)) # => this is the last it can compute # - # ### Tuples # # Write a function to compute the [GCD](https://en.wikipedia.org/wiki/Greatest_common_divisor) of two positive integers. You can freely use the fact that `gcd(a, b)` is mathematically equal to `gcd(b, a % b)`, and that `gcd(a, 0) == a`. # # You can assume that `a >= b` if you'd like. # # It is possible to accomplish this in three lines of Python code (or with extra cleverness, even fewer!). Consider exploiting tuple packing and unpacking! # # *Note: The standard library has a `gcd` function. Avoid simply importing that function and using it here - the goal is to practice with tuple packing and unpacking!* # + def gcd(a, b): """Compute the GCD of two positive integers.""" pass # Your implementation here gcd(10, 25) # => 5 gcd(14, 15) # => 1 gcd(3, 9) # => 3 gcd(1, 1) # => 1 # - # ### Dictionaries (advanced) # In class, we saw a (naive) implementation of a dictionary comprehension that swaps the keys and values in a dictionary: # # ``` # {value: key for key, value in dictionary.items()} # ``` # # However, this approach will fail when there are two keys in the dictionary with the same value. Why will it fail? # # Write a function that properly reverses the keys and values of a dictionary - each key (originally a value) should map to a collection of values (originally keys) that mapped to it. For example, # # ``` # flip_dict({"CA": "US", "NY": "US", "ON": "CA"}) # # => {"US": ["CA", "NY"], "CA": ["ON"]} # ``` # # Note: there is a data structure in the `collections` module from the standard library called `defaultdict` which provides exactly this sort of functionality. You provide it a factory method for creating default values in the dictionary (in this case, a list.) You can read more about `defaultdict` and other `collections` data structures [here](https://docs.python.org/3/library/collections.html). def flip_dict(input_dict): """Reverse the keys and values of a dictionary.""" pass # ### Comprehensions (advanced) # # #### Read # # Predict the output of each of the following list comprehensions. After you have written down your hypothesis, run the code cell to see if you were correct. If you were incorrect, discuss with a partner why Python returns what it does. # # ```Python # [x for x in [1, 2, 3, 4]] # [n - 2 for n in range(10)] # [k % 10 for k in range(41) if k % 3 == 0] # [s.lower() for s in ['PythOn', 'iS', 'cOoL'] if s[0] < s[-1]] # # # Something is fishy here. Can you spot it? # arr = [[3,2,1], ['a','b','c'], [('do',), ['re'], 'mi']] # print([el.append(el[0] * 4) for el in arr]) # What is printed? # print(arr) # What is the content of `arr` at this point? # # [letter for letter in "pYthON" if letter.isupper()] # {len(w) for w in ["its", "the", "remix", "to", "ignition"]} # ``` # + # Predict the output of the following comprehensions. Does the output match what you expect? print([x for x in [1, 2, 3, 4]]) print([n - 2 for n in range(10)]) print([k % 10 for k in range(41) if k % 3 == 0]) print([s.lower() for s in ['PythOn', 'iS', 'cOoL'] if s[0] < s[-1]]) # Something is fishy here. Can you spot it? arr = [[3,2,1], ['a','b','c'], [('do',), ['re'], 'mi']] print([el.append(el[0] * 4) for el in arr]) # What is printed? print(arr) # What is the content of `arr` at this point? print([letter for letter in "pYthON" if letter.isupper()]) print({len(w) for w in ["its", "the", "remix", "to", "ignition"]}) # - # #### Write # # Write comprehensions to transform the input data structure into the output data structure: # # ``` # [0, 1, 2, 3] -> [1, 3, 5, 7] # Double and add one # ['apple', 'orange', 'pear'] -> ['A', 'O', 'P'] # Capitalize first letter # ['apple', 'orange', 'pear'] -> ['apple', 'pear'] # Contains a 'p' # # ["TA_sam", "student_poohbear", "TA_guido", "student_htiek"] -> ["sam", "guido"] # ['apple', 'orange', 'pear'] -> [('apple', 5), ('orange', 6), ('pear', 4)] # # ['apple', 'orange', 'pear'] -> {'apple': 5, 'orange': 6, 'pear': 4} # ``` # + nums = [1, 3, 5, 7] fruits = ['apple', 'orange', 'pear'] people = ["TA_sam", "student_poohbear", "TA_guido", "student_htiek"] # Add your comprehensions here! # - # ### Pascal's Triangle # Write a function that generates the next level of [Pascal's triangle](https://en.wikipedia.org/wiki/Pascal%27s_triangle) given a list that represents a row of Pascal’s triangle. # # ``` # generate_pascal_row([1, 2, 1]) -> [1, 3, 3, 1] # generate_pascal_row([1, 4, 6, 4, 1]) -> [1, 5, 10, 10, 5, 1] # generate_pascal_row([]) -> [1] # ``` # # As a reminder, each element in a row of Pascal's triangle is formed by summing the two elements in the previous row directly above (to the left and right) that elements. If there is only one element directly above, we only add that one. For example, the first 5 rows of Pascal's triangle look like: # # ``` # 1 # 1 1 # 1 2 1 # 1 3 3 1 # 1 4 6 4 1 # ``` # # You may find the `zip` function discussed briefly in lecture useful, along with some cleverness. Alternatively, you could solve this problem with `enumerate`. Avoid using a loop of the form `for i in len(range(row)):`. # # *Hint: Check out the diagram below. How could you use this insight to help complete this problem?* # # ``` # 0 1 3 3 1 # + 1 3 3 1 0 # ----------- # 1 4 6 4 1 # ``` # + def generate_pascal_row(row): """Generate the next row of Pascal's triangle.""" pass generate_pascal_row([1, 2, 1]) # => [1, 3, 3, 1] generate_pascal_row([1, 4, 6, 4, 1]) # => [1, 5, 10, 10, 5, 1] generate_pascal_row([]) # => [1] # - # #### Pretty Printing Pascal (Optional) # # Given a number `n`, print out the first `n` rows of Pascal's triangle, centering each line. You should use the `generate_pascal_row` function you wrote previously. The Pascal's triangle with 1 row just contains the number `1`. # # To center a string in Python, you can use the `.center(width)` method. For example: # # ``` # "CS".center(10) # => ' CS ' # ``` # # You can even specify an optional `fillchar` to fill with characters other than spaces! # # The hardest part of this problem is determining the width of the bottom row of the triangle. You'll need to generate all rows of the triangle and store them before you can print any of them. def print_pascal_triangle(n): """Print the first n rows of Pascal's triangle.""" pass # ## Special Words # # For each of the following problems, we describe a criterion that makes a word (or phrase!) special, similarly to our "Efficient Words" from lecture. # # If you are using macOS or Linux, you should have a dictionary file available at `/usr/share/dict/words`, a 2.5M text file containing over 200 thousand English words, one per line. However, if you are offline or windows user the file can be found at `https://stanfordpython.com/res/misc/words`, so you can download the dictionary from there. # # What would be an appropriate data structure in which to store the English words? # # Write the method `load_english` to load English words from this file. How many English words are there in this file? # + # If you downloaded words from the course website, # change me to the path to the downloaded file. DICTIONARY_FILE = '/usr/share/dict/words' def load_english(): """Load and return a collection of english words from a file.""" pass english = load_english() print(len(english)) # - # ### Triad Phrases # # Triad words are English words for which the two smaller strings you make by extracting alternating letters both form valid words. # # For example: # # ![Triad Phrases](http://i.imgur.com/jGEXJWi.png) # # Write a function to determine whether an entire phrase passed into a function is made of triad words. You can assume that all words are made of only alphabetic characters, and are separated by whitespace. We will consider the empty string to be an invalid English word. # # ``` # is_triad_phrase("learned theorem") # => True # is_triad_phrase("studied theories") # => False # is_triad_phrase("wooded agrarians") # => True # is_triad_phrase("forrested farmers") # => False # is_triad_phrase("schooled oriole") # => True # is_triad_phrase("educated small bird") # => False # is_triad_phrase("a") # => False # is_triad_phrase("") # => False # ``` # # Generate a list of all triad words. How many are there? We found 2770 distinct triad words (case-insensitive). # + def is_triad_word(word, english): """Return whether a word is a triad word.""" pass def is_triad_phrase(phrase, english): """Return whether a phrase is composed of only triad words.""" pass # - # ### Surpassing Phrases (challenge) # # Surpassing words are English words for which the gap between each adjacent pair of letters strictly increases. These gaps are computed without "wrapping around" from Z to A. # # For example: # # ![Surpassing Phrases](http://i.imgur.com/XKiCnUc.png) # # Write a function to determine whether an entire phrase passed into a function is made of surpassing words. You can assume that all words are made of only alphabetic characters, and are separated by whitespace. We will consider the empty string and a 1-character string to be valid surpassing phrases. # # ```python # is_surpassing_phrase("superb subway") # => True # is_surpassing_phrase("excellent train") # => False # is_surpassing_phrase("porky hogs") # => True # is_surpassing_phrase("plump pigs") # => False # is_surpassing_phrase("turnip fields") # => True # is_surpassing_phrase("root vegetable lands") # => False # is_surpassing_phrase("a") # => True # is_surpassing_phrase("") # => True # ``` # # We've provided a `character_gap` function that returns the gap between two characters. To understand how it works, you should first learn about the Python functions `ord` (one-character string to integer ordinal) and `chr` (integer ordinal to one-character string). For example: # # ```python # ord('a') # => 97 # chr(97) # => 'a' # ``` # # So, in order to find the gap between `G` and `E`, we compute `abs(ord('G') - ord('E'))`, where `abs` returns the absolute value of its argument. # # Generate a list of all surpassing words. How many are there? We found 1931 distinct surpassing words. # + def character_gap(ch1, ch2): """Return the absolute gap between two characters.""" return abs(ord(ch1) - ord(ch2)) def is_surpassing_word(word): """Return whether a word is surpassing.""" pass def is_surpassing_phrase(word): """Return whether a word is surpassing.""" # - # ### Cyclone Phrases (challenge) # # Cyclone words are English words that have a sequence of characters in alphabetical order when following a cyclic pattern. # # For example: # # ![Cyclone Phrases](http://i.stack.imgur.com/4XBV3.png) # # Write a function that to determine whether an entire phrase passed into a function is made of cyclone words. You can assume that all words are made of only alphabetic characters, and are separated by whitespace. # # ``` # is_cyclone_phrase("adjourned") # => True # is_cyclone_phrase("settled") # => False # is_cyclone_phrase("all alone at noon") # => True # is_cyclone_phrase("by myself at twelve pm") # => False # is_cyclone_phrase("acb") # => True # is_cyclone_phrase("") # => True # ``` # # Generate a list of all cyclone words. How many are there? As a sanity check, we found 769 distinct cyclone words. # + def is_cyclone_word(word): """Return whether a word is a cyclone word.""" def is_cyclone_phrase(word): """Return whether a phrase is composed only of cyclone words.""" # - # ### Other Phrases (challenge) # # On Puzzling.StackExchange, the user [JLee](https://puzzling.stackexchange.com/users/463/jlee) has come up with a ton of interesting puzzles of this form ("I call words that follow a certain rule "adjective" words"). If you like puzzles, optionally read through [these JLee puzzles](https://puzzling.stackexchange.com/search?q=%22I+call+it%22+title%3A%22what+is%22+is%3Aquestion+user%3A463) or [these other puzzles inspired by JLee](https://puzzling.stackexchange.com/search?tab=votes&q=%22what%20is%20a%22%20word%20is%3aquestion). # ### Triangle Words # The nth term of the sequence of triangle numbers is given by 1 + 2 + ... + n = n(n+1) / 2. For example, the first ten triangle numbers are: `1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...` # # By converting each letter in a word to a number corresponding to its alphabetical position (`A=1`, `B=2`, etc) and adding these values we form a word value. For example, the word value for SKY is `19 + 11 + 25 = 55` and 55 is a triangle number. If the word value is a triangle number then we shall call the word a triangle word. # # Generate a list of all triangle words. How many are there? As a sanity check, we found 16303 distinct triangle words. # # *Hint: you can use `ord(ch)` to get the integer ASCII value of a character. You can also use a dictionary to accomplish this!* def is_triangle_word(word): """Return whether a word is a triangle word.""" pass # ## Bonus Problems # # *Only attempt to solve these bonus problems if you've finished the rest of the lab. Bonus problems are intentionally much harder than the other lab problems.* # ### Polygon Collision # # Given two polygons in the form of lists of 2-tuples, determine whether the two polygons intersect. # # Formally, a polygon is represented by a list of (x, y) tuples, where each (x, y) tuple is a vertex of the polygon. Edges are assumed to be between adjacent vertices in the list, and the last vertex is connected to the first. For example, the unit square would be represented by # # ``` # square = [(0,0), (0,1), (1,1), (1,0)] # ``` # # You can assume that the polygon described by the provided list of tuples is not self-intersecting, but do not assume that it is convex. # # **Note: this is a *hard* problem. Quite hard.** # + def polygon_collision(poly1, poly2): pass unit_square = [(0,0), (0,1), (1,1), (1,0)] triangle = [(0,0), (0.5,2), (1,0)] print(polygon_collision(unit_square, triangle)) # => True # - # ## Done Early? # # Skim [Python’s Style Guide](https://www.python.org/dev/peps/pep-0008/), keeping the Zen of Python in mind. Feel free to skip portions of the style guide that cover material we haven't yet touched on in this class, but it's always good to start with an overview of good style. # ## Credits for this Lab # # *Credit to Puzzling.SE (specifically [JLee](https://puzzling.stackexchange.com/users/463/jlee)), ProjectEuler and InterviewCake for several problem ideas* # *Standford @sredmond*
notebooks/001-datastructures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Exploring Preprocessed fMRI Data from fMRIPREP # # BIDS applications such as fMRIPREP output data into a full data structure with strong similarity to BIDS organization principals. In fact, there is a specification for derivatives (outputs derived from) BIDS datasets; although this is a current work in progress, details can be found in: [BIDS Derivatives](https://bids-specification.readthedocs.io/en/latest/06-extensions.html). # # In this tutorial, we'll explore the outputs generated by fMRIPREP and get a handle of how the data is organized from this preprocessing pipeline # *** # Luckily the semi-standardized output for fMRIPREP is organized in such a way that the data is easily accessible using pyBIDS! We'll first show what the full data-structure looks like then provide you methods on how you can pull specific types of outputs using pyBIDS. # ### The fMRIPREP Derivative Data Structure # First let's take a quick look at the fmriprep data structure: # !tree -L 1 '../data/ds000030/derivatives/fmriprep/' # First note that inside the fmriprep folder, we have a folder per-subject. Let's take a quick look at a single subject folder: # !tree '../data/ds000030/derivatives/fmriprep/sub-10788/' # As you can see above, each subject folder is organized into an <code>anat</code> and <code>func</code> sub-folder. # # Specifically: # # - the <code>anat</code> folder contains the preprocessed anatomical data. If multiple T1 files are available (all T1s even across sessions), then these data are merged - you will always have one <code>anat</code> folder under the subject folder # - the <code>func</code> folder contains the preprocessed functional data. All tasks are dumped into the same folder and like the BIDS convention are indicated by the use of their filenames (<code>task-[task_here]</code>) # # *** # This data is single-session, so a session folder is missing here - but with multiple sessions you will see <code>anat</code> and <code>ses-[insert_session_here]</code> folders where each session folder contain a <code>func</code> folder. # *** # Hopefully you're now convinced that the outputs of fMRIPREP roughly follows BIDS organization principles and is, in fact, quite simple. The filenames themselves give you a full description of what each file is (check the [slides](https://docs.google.com/presentation/d/1er6dQcERL-Yeb5-7A29tJnmqgHNaLpTLXM3e-SmpjDg/edit?usp=sharing) to get an idea of what each file means! # # Now let's see how we can pull data in using pyBIDS! # Let's import pyBIDS through the <code>bids</code> module first: # We can make a <code>bids.BIDSLayout</code> object as usual by just feeding in the fmriprep directory! However, one caveat is that since the fmriprep outputs are *not really BIDS but BIDS-like*, we have to turn off bids validation: # Now that we have a layout object, we can pretend like we're working with a BIDS dataset! Let's try some common commands that you would've used with a BIDS dataset: # **Getting Subjects** # **Getting Tasks** # Now let's try fetching specific files. Similar to how you would fetch BIDS data using pyBIDS, the exact same syntax will work for fMRIPREP derivatives. Let's try pulling just the preprocessed anatomical data. # # Recall that the anatomical folder is named as follows: # !tree '../data/ds000030/derivatives/fmriprep/sub-10788/anat' # The file that we're interested in is of form <code>sub-[subject]_T1w_preproc.nii.gz</code>. Now we can construct a pyBIDS call to pull these types of files specifically: # Note that we also pulled in MNI152NLin2009cAsym_preproc.nii.gz data as well. This is data that has been transformed into MNI152NLin2009cAsym template space. We can pull this data out by further specifying our <code>layout.get</code> using the <code>space</code> argument: # What if we wanted to pull out the data in T1 "native space" (it really is a template space, since it is merged T1s)? Unfortunately for *this specific version of fMRIPREP* this isn't directly possible using <code>layout.get</code>. Instead we'll use a bit of python magic to pull the data that we want: # # *** # In newer versions of fMRIPREP <code>space</code> is included in the native T1w file filename as <code>space-T1w</code> - in this case you can pull the data by using <code>layout.get(..., space='T1w')</code> # *** # Similarily fMRI data can be pulled by specifying <code>datatype='func'</code> and using the <code>suffix</code> argument as appropriate: # **Exercise: Pulling Functional Data** # # 1. Get the list of **all** preprocessed functional data # 2. Get the list of functional data in MNI152NLin2009cAsym space # 3. Get the list of functional data in T1w space (native) # *All Functional Data* # *MNI152NLin2009cAsym Functional Data* # *Native Functional Data* # Now that we have a handle on how fMRIPREP preprocessed data is organized and how we can pull this data. Let's start working with the actual data itself!
code/02-exploring_fmriprep.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #We can create numpy arrays with more than one dimension #This section will focus only on 2D arrays, but you can use numpy to build arrays of much higher dimensions #In this video we will cover the basics and array creation in 2D #indexing and slicing in 2D, and basic operations in 2D #3D #The list contains three nested lists each of equal size import numpy as np print('\n LIST:') list3d=[[23,4,5,6],[11,4, 7, 3],[34, 74, 4,9]] print (list3d) numlist=np.array(list3d) print('\nNumpy 3D lists:\n', numlist) # + #Rectangular array # It is helpful to visualize the numpy array as a rectangular array; # Each nested list corresponds to a different row of the matrix # We can use the attribute "ndim" to obtain #the number of axes or dimensions referred to as the rank #We can use the attribute "ndim" to obtain the number of "axes or dimensions" referred to as the rank print(numlist.ndim) #As with the 1d array, the attribute "shape" returns a tuple #It’s helpful to use the rectangular representation as well print(numlist.shape) #The first element in the tuple corresponds to the number of nested lists contained in #the original list or the number of rows in the rectangular representation, in this case 3 #The second element corresponds to the size of each of the nested lists # or the number of columns in the rectangular array 0 #Size # can also use the attribute size to get the size of the array print(numlist.size) # We can use rectangular brackets to access the different elements of the array Z=np.array([[2,4], [24,5], [21,98]]) print('\nAccess to the 3D arrays by rectangular brackets:\n',Z) Z=[[Z[0][0],Z[0][1]],[Z[1][0],Z[1][1]],[Z[2][0],Z[2][1]]] # Using the rectangular representation, the # first index corresponds to the row index print('\n Numpy 3D array:\n',Z) Z=np.array([[2,4], [24,5], [21,98]]) print(Z) print('\nAccess to the 3D arrays a single bracket:\n',Z) # We can also use a single bracket to access the elements as follows Z=[[Z[0,0],Z[0,1]],[Z[1,0],Z[1,1]],[Z[2,0],Z[2,1]]] # + #slicing #We can also use slicing in numpy arrays #The first index corresponds to the first row #The second index accesses the first two columns import numpy as np print('\n LIST:') list3d=[[23,4,5,6],[11,4, 7, 3],[34, 74, 4,9]] print (list3d) numlist=np.array(list3d) print('\nNumpy 3D lists:\n', numlist) print('\n #slicing [0,0:3]:') print(numlist[0,0:3]) print('\n #slicing [0:2, 0:3]:') print(numlist[0:2,0:3]) print('\n #slicing [0:2, 2]:') print(numlist[0:2,2]) print('\n #slicing [0:3, 0:2]:') print(numlist[0:3,0:2]) # + #Adding #We can also add arrays; the process is identical to matrix addition import numpy as np list3d=np.array([[23,4,5,6],[11,4, 7, 3]]) print ('\n Matrix A:\n',list3d) list3db=np.array([[-2, -5, 4, 9.6],[34, 74, 4,9]]) print ('\n Matrix B:\n',list3db) result=list3d+list3db print('\n Matrix A+ Matrix B:\n', result) # + #Python #Numpy #Multiplying #Multiplying a numpy array by a scaler is identical to multiplying a matrix by a scaler import numpy as np #Consider the matrix list2, if we multiply the matrix by the scaler 3 #we simply multiply every element in the matrix by 3 #The result is a new matrix of the same size list2=np.array([[5,7,8],[2,4,6],[2,5,7],[11, 4, 6]]) print ('\n numpy array (list2):\n',list2) Result=3* list2; print ('\n Multiplying a numpy array (list2) by a scaler (3)>>> Result=3*list2:\n',Result) Result=2* list2; print ('\n Multiplying a numpy array (list2) by a scaler (2)>>> Result=2*list2:\n',Result) Result=13.567* list2; print ('\n Multiplying a numpy array (list2) by a scaler (13.567)>>> Result=13.567*list2:\n',Result) list3=np.array([[2.4, 6.8],[9.3, 4.6],[2.9, 8.6]]) Result=1000.05* list3; print ('\n Multiplying a numpy array (list3) by a scaler (1000)>>> Result=1000*list2:\n',Result) # + #Python Numpy #Hadamard product #Multiplication of two arrays corresponds to an element-wise product or Hadamard product import numpy as bm arryNm1=bm.array([[2,4], [62,4], [3,6]]) arryNm2=bm.array([[3,5], [26,2], [7,8]]) print('\n Print arrayNm1: \n', arryNm1) print('\n print arrayNm2: \n', arryNm2) #The result is a new matrix that is the same size as matrix Y or X #Each element in this new matrix is the product of the corresponding elements in X and Y Result=arryNm1*arryNm2 print("\nHadamard product Resulet=arrayNm1*arrayNm1:\n", Result) #Ex2 ArryNm1=bm.array([[2,4], [3,6]]) ArryNm2=bm.array([[3,5], [7,8]]) print('\n Print aArrayNm1: \n', ArryNm1) print('\n print ArrayNm2: \n', ArryNm2) #The result is a new matrix that is the same size as matrix Y or X #Each element in this new matrix is the product of the corresponding elements in X and Y Result=ArryNm1*ArryNm2 print("\n Hadamard product Resulet=ArrayNm1*ArrayNm1:\n", Result) # + #Python #Numpy #The dot product #We can also perform matrix multiplication with numpy arrays #Matrix multiplication is a little more complex #but let's provide a basic overview import numpy as bm arryNm1=bm.array([[2,4], [6.2,4], [3,6]]) arryNm2=bm.array([[3, 5, 9], [7.4, 7.8, 2.8]]) print('\n Print arrayNm1: \n', arryNm1) print('\n print arrayNm2: \n', arryNm2) #The result is a new matrix that is the same size as matrix Y or X #Each element in this new matrix is the product of the corresponding elements in X and Y # we must make sure that the number of columns in matrix "arrayNm1"in this case 3, # is equal to the number of rows in matrix "arrayNm1", in this case 3 Result=np.dot(arryNm1,arryNm2) print("\n Dot product 'np.dot' Resulet=arrayNm1.arrayNm2:\n", Result) Result=np.dot(arryNm2,arryNm1) print("\n Dot product 'np.dot' Resulet=arrayNm2.arrayNm1:\n", Result) arryNm2=bm.array([[3, 5], [7.4, 7.8], [3.7, 8.6]]) arryNm1=bm.array([[2, 4, 5], [6.2, 4, 5.5]]) print('\n Print arrayNm1: \n', arryNm1) print('\n print arrayNm2: \n', arryNm2) #The result is a new matrix that is the same size as matrix Y or X #Each element in this new matrix is the product of the corresponding elements in X and Y # we must make sure that the number of columns in matrix "arrayNm1"in this case 3, # is equal to the number of rows in matrix "arrayNm1", in this case 3 Result=np.dot(arryNm1,arryNm2) print("\n Dot product 'np.dot' Resulet=arrayNm1.arrayNm2:\n", Result) Result=np.dot(arryNm2,arryNm1) print("\n Dot product 'np.dot' Resulet=arrayNm2.arrayNm1:\n", Result)
PythonNumpy2DArrays.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lecture 19: Stochastic Gradient Descent # In the last lecture we have learned how to set up and use the softmax regression to classify all 10 handwritten digits based on pixel intensities on a 28x28 grid (MNIST dataset). # # However, the training is slow on a decent configured laptop due to the size the dataset and the size of the parameter. # # Today we will learn a new method called Stochastic Gradient Descent (SGD), one of the two pillars of the deep learning (the other being backpropagation). # # > Every state-of-the-art Deep Learning library contains implementations of various algorithms to optimize (stochastic) gradient descent. # # References: # * [Why Momentum Really Works](https://distill.pub/2017/momentum/) # * [An overview of gradient descent optimization algorithms](http://ruder.io/optimizing-gradient-descent/index.html#stochasticgradientdescent) # # Vanilla Gradient Descent # We consider a loss function (e.g., the one we saw in softmax regression) which is the average of the sample-wise loss: # # $$L(\mathbf{w}) := L(\mathbf{w}; X,\mathbf{y}) = \frac{1}{N}\sum_{i=1}^N f_i(\mathbf{w}; \mathbf{x}^{(i)},y^{(i)})$$ # # which has the weights $\mathbf{w}$ as the parameters. Let us recall the softmax loss function here for comparison: # # $$ # L (\mathbf{w}) = - \frac{1}{N}\sum_{i=1}^N \Big\{\text{cross-entropy for each sample} \Big\}=- \frac{1}{N}\sum_{i=1}^N \left\{\sum_{k=1}^K # 1_{\{y^{(i)} = k\}} \ln \Bigg( \frac{\exp(\mathbf{w}_k^{\top} \mathbf{x}^{(i)})}{\sum_{j=1}^{K} # \exp\big(\mathbf{w}_j^{\top} \mathbf{x}^{(i)} \big) } \Bigg)\right\}. # $$ # # Then the gradient descent method for it reads: # # > Choose initial guess $\mathbf{w}_0$, step size (learning rate) $\eta$, number of iterations $M$<br><br> # > For $k=0,1,2, \cdots, M$<br> # > &nbsp;&nbsp;&nbsp;&nbsp; $\displaystyle\mathbf{w}_{k+1} = \mathbf{w}_k - \eta\nabla_{\mathbf{w}} L(\mathbf{w}_k) = \mathbf{w}_k - \eta\frac{1}{N}\sum_{i=1}^N \nabla_{\mathbf{w}} f_i(\mathbf{w}; \mathbf{x}^{(i)},y^{(i)})$ # # The gradient has to be evaluated $N$ times in one iteration, each evaluation involves a matrix matrix multiplication of order $O(n)$ (number of features in one sample). # ## Drawbacks of gradient descent: # * Large amount of gradient evaluation (for each sample), long computation time, wasting electricity, etc. # * Suppose we add more examples to our training set. For simplicity, imagine we just add an extra copy of every training example (but the computer algorithm does not know it is the same samples): then the amount of work doubles! # $$ # \nabla L = \frac{1}{2N}\sum_{i=1}^N \nabla f_i(\mathbf{w}) + \frac{1}{2N}\sum_{i=1}^N \nabla f_i(\mathbf{w}) , # $$ # even for the same loss function. # * The training examples arrive one-at-a-time (or several-at-a-time) as the model is "learning" (through gradient descent to improve the accuracy). Should we include these into the original dataset and re-compute the gradient? # # Stochastic Gradient Descent # # Suppose our loss function is still: # # $$L := L(\mathbf{w}; X,\mathbf{y}) = \frac{1}{N}\sum_{i=1}^N f_i(\mathbf{w}; \mathbf{x}^{(i)},y^{(i)}),$$ # # where $X = (\mathbf{x}^{(1)}, \dots, \mathbf{x}^{(N)})^{\top}$ are the training samples, $\mathbf{y} = (y^{(1)}, \dots, y^{(N)})^{\top}$ are the labels/taget values for the training samples. # # > Choose initial guess $\mathbf{w}_0$, step size (learning rate) $\eta$, number of inner iterations $M$, number of epochs $n_E$ <br><br> # > Set $\mathbf{w}_{M+1} = \mathbf{w}_0$ for epoch $e=0$<br> # > For epoch $n=1,2, \cdots, n_E$<br> # > &nbsp;&nbsp;&nbsp;&nbsp; $\mathbf{w}_{0}$ for the current epoch is $\mathbf{w}_{M+1}$ for the previous epoch.<br> # > &nbsp;&nbsp;&nbsp;&nbsp; Randomly shuffle the samples so that $\{\mathbf{x}^{(m)},y^{(m)}\}_{m=1}^N$ is a permutation of the original dataset.<br> # > &nbsp;&nbsp;&nbsp;&nbsp; For $m=0,1,2, \cdots, M$<br> # > &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; $\displaystyle\mathbf{w}_{m+1} = \mathbf{w}_m - \eta \nabla f_m(\mathbf{w}; \mathbf{x}^{(m)},y^{(m)})$ # # If $M = N$, which is the current batch of all training samples, one outer iteration is called a completed *epoch*. # # ### Vanilla SGD: Single gradient evaluation at each iteration # # Linear regression # # Let us give it a go on the linear regression. This time, we are using `scikit-learn`'s built-in dataset generator. For the linear regression, we can use `scikit-learn`'s `LinearRegression()` class for the multivariate regression from previous lectures, but since we are illustrating SGD, we are implementing ourselves. # # Recall the loss function with the $L^2$ regularization and the gradient: let the weight $\mathbf{w} = (w_0, \widehat{\mathbf{w}})$ where $w_0$ is the bias, and $\widehat{\mathbf{w}}$ is the vector containing the weights for the features of the dataset # # $$ # L(\mathbf{w}) = \frac{1}{N}\sum_{i=1}^N # \left( [1, \;\mathbf{x}^{(i)}]^{\top} \mathbf{w} - y^{(i)} \right)^2 # + \epsilon |\widehat{\mathbf{w}}|^2, # \\ # \frac{\partial L(w)}{\partial \mathbf{w}} = \frac{2}{N}\sum_{i=1}^N [1, \;\mathbf{x}^{(i)}]\left( [1, \;\mathbf{x}^{(i)}]^{\top} \mathbf{w} - y^{(i)}\right) + 2\epsilon\, [0, \widehat{\mathbf{w}}] # $$ # # If there is no bias: # $$ # L(\mathbf{w}) = \frac{1}{N}\sum_{i=1}^N # \left( \mathbf{w}^{\top}\mathbf{x}^{(i)} - y^{(i)} \right)^2 # + \epsilon |\mathbf{w}|^2, # \\ # \frac{\partial L(w)}{\partial \mathbf{w}} = \frac{2}{N}\sum_{i=1}^N \mathbf{x}^{(i)} # \left( \mathbf{w}^{\top}\mathbf{x}^{(i)} - y^{(i)}\right) + 2\epsilon\,\mathbf{w}. # $$ # # # Reference: [Scikit-learn's dataset loading utilities](https://scikit-learn.org/stable/datasets/index.html) # + import numpy as np import matplotlib.pyplot as plt # %matplotlib inline np.set_printoptions(suppress=True) from sklearn.datasets import make_regression from sklearn.model_selection import train_test_split # - X, y = make_regression(n_samples=10000, n_features=10) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) # + eps = 1e-3 # regularization parameter # our model, returns the linear function [1 X]^T w def h(X, w): # X is the training data return np.matmul(X,w) # loss function = total square error on the given data set X,y def loss(w, X, y): # N = len(y) # this is one of the key, if y is only part of/one of the data label, then N will be small! residual_components = h(X, w) - y regularization = eps*np.sum(w**2) # return (1/len(y))*np.sum(residual_components**2) + regularization return np.mean(residual_components**2) + regularization # the second implementation is prefered due to the len(y) may not exist def gradient_loss(w, X, y): gradient_for_all_training_data = (h(X,w) - y).reshape(-1,1)*X gradient_for_regularization = 2*eps*w gradient_mean_training_data = np.mean(gradient_for_all_training_data, axis=0) # we should return a (10,) array, which is averaging all training data return 2*gradient_mean_training_data + gradient_for_regularization # we define a cross validating function to compute the R^2 score def rsquared(w, X, y): y_pred = h(X, w) return 1 - (np.sum((y- y_pred)**2))/(np.sum((y- y.mean())**2)) # - # ## First let us try gradient descent # #### just making sure our implementation above is good... # initialization and hyper-parameter w = 1e-2*np.random.random(np.shape(X_train)[1]) # weights and bias (bias is 0 in this case) eta = 1e-3 # step size (learning rate) num_steps = 1000 np.sum(w * X_train, axis=1)[:10] np.matmul(X_train,w)[:10] loss_at_eachstep = np.zeros(num_steps) # record the change of the loss function for i in range(num_steps): loss_at_eachstep[i] = loss(w,X_train,y_train) dw = gradient_loss(w,X_train,y_train) w -= eta * dw if i % 200 == 0: print("loss after", i+1, "iterations is: ", loss(w,X_train,y_train)) print("Training R squared after", i+1, "iterations is: ", rsquared(w, X_train, y_train)) print("Testing R squared after", i+1, "iterations is: ", rsquared(w, X_test, y_test)) # keep track of training accuracy just making sure we are in the right direction plt.plot(range(num_steps), loss_at_eachstep) plt.show() # # Stochastic gradient descent # # * One sample's gradient at a time. # * Every epoch we use `np.random.permutation` to randomly permute the order of the samples. eta = 1e-4 # step size (learning rate), in general SGD should have a smaller learning rate num_epochs = 5 # no. of outer iteration N = len(y_train) # no. of inner iteration M = N # in general you can choose M <= N w = 1e-2*np.random.random(np.shape(X_train)[1]) # + sgd_loss_at_eachstep = np.zeros([num_epochs,M]) # record the change of the loss function # num_epochs is the # of outer iterations # N is the # of samples, which is the number of inner iterations for e in range(num_epochs): shuffle_index = np.random.permutation(N) for m in range(M): i = shuffle_index[m] # i corresponds i-th sample sgd_loss_at_eachstep[e,m] = loss(w,X_train,y_train) dw = gradient_loss(w,X_train[i,:],y_train[i]) # this is the gradient for i-th sample w -= eta * dw if m % 1000 ==0: print("loss after", e+1, "epochs and ", m+1, "iterations is: ", loss(w,X_train,y_train)) print("Training R squared after", e+1, "epochs and ", m+1, "iterations is:", rsquared(w, X_train, y_train)) print("Testing R squared after", e+1, "epochs and ", m+1, "iterations is:", rsquared(w, X_test, y_test)) # - plt.plot(sgd_loss_at_eachstep.reshape(-1)[:300], label="SGD loss") plt.legend() plt.show() # # Reading: Mini-batch SGD # # In the vanilla SGD, each parameter $\mathbf{w}$ update is computed w.r.t one training sample randomly selected. In mini-batch SGD, the update is computed for a mini-batch (a small number of training samples), as opposed to a single example. The reason for this is twofold: # * This reduces the variance in the parameter update and can lead to more stable convergence. # * This allows the computation to be more efficient, since our code is written in a vectorized way. # # A typical mini-batch size is $2^k$ (32, 256, etc), although the optimal size of the mini-batch can vary for different applications, and size of dataset (e.g., AlphaGo training uses mini-batch size of 2048 positions). # # > Choose initial guess $\mathbf{w}_0$, step size (learning rate) $\eta$, <br> # batch size $n_B$, number of inner iterations $M\leq N/n_B$, number of epochs $n_E$ <br><br> # > Set $\mathbf{w}_{M+1} = \mathbf{w}_0$ for epoch $e=0$<br> # > For epoch $n=1,2, \cdots, n_E$<br> # > &nbsp;&nbsp;&nbsp;&nbsp; $\mathbf{w}_{0}$ for the current epoch is $\mathbf{w}_{M+1}$ for the previous epoch.<br> # > &nbsp;&nbsp;&nbsp;&nbsp; Randomly shuffle the training samples.<br> # > &nbsp;&nbsp;&nbsp;&nbsp; For $m=0,1,2, \cdots, M$<br> # > &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; $\displaystyle\mathbf{w}_{m+1} = \mathbf{w}_m - \frac{\eta}{n_B}\sum_{i=1}^{n_B} \nabla f_{m+i}(\mathbf{w}; \mathbf{x}^{(m+i)},y^{(m+i)})$ eta = 1e-4 # step size (learning rate) num_epochs = 20 N = len(y_train) num_batch = 16 # number of mini-batch M = int(N/num_batch) w = 1e-2*np.random.random(np.shape(X_train)[1]) # + sgdmini_loss_at_eachstep = np.zeros([num_epochs,M]) # record the change of the loss function for e in range(num_epochs): shuffle_index = np.random.permutation(N) for m in range(0,N,num_batch): i = shuffle_index[m:m+num_batch] # indices for the gradient of loss function # of the i-th sample to be evaluated sgdmini_loss_at_eachstep[e,m//num_batch-1] = loss(w,X_train,y_train) dw = gradient_loss(w,X_train[i,:],y_train[i]) w -= eta * dw if m % 1000 ==0 and e % 5 == 0: print("loss after", e+1, "epochs and ", m+1, "iterations is: ", loss(w,X_train,y_train)) print("Training R squared after", e+1, "epochs and ", (m+1)//num_batch, "iterations is:", rsquared(w, X_train, y_train)) print("Testing R squared after", e+1, "epochs and ", (m+1)//num_batch, "iterations is:", rsquared(w, X_test, y_test)) # - import seaborn as sns sns.set() num_plot = 300 plt.figure(figsize= [16,8]) plt.plot(sgdmini_loss_at_eachstep.reshape(-1)[:num_plot], 'b-', label="SGD mini-batch loss") plt.plot(sgd_loss_at_eachstep.reshape(-1)[:num_plot], label="SGD loss", linewidth=2, color = 'green') plt.legend(fontsize=20) plt.show()
Data Science and Machine Learning/Introduction to ML Notebooks/Introduction to ML with Python (UCI)/Lecture-19-Stochastic-Gradient-Descent.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import s3fs # Redshift Server Details dsn_database = "dev" dsn_hostname = "redshift-cluster-1.citpxgaovgkr.eu-north-1.redshift.amazonaws.com" dsn_port = "5439" dsn_uid = "awsuser" dsn_database = "telcom" jdbcUrl = 'jdbc:redshift:iam://'+ dsn_hostname+':'+dsn_port+'/'+dsn_database # + (df = spark.read. format("jdbc"). option("driver", "com.amazon.redshift.jdbc42.Driver"). option("url", jdbcUrl). option("dbtable", dsn_database). option("user", jdbcUsername). load() ) type(df) # - #from hops import hdfs as hdfs #df.write.format("delta").save(hdfs.project_path() + "Resources/telcom-delta") df.write.format("delta").save("s3a://sagemakerjim/telco-delta") # + from pyspark.sql.types import * # Auxiliar functions def equivalent_type(f): if f == 'datetime64[ns]': return DateType() elif f == 'int64': return LongType() elif f == 'int32': return IntegerType() elif f == 'float64': return FloatType() else: return StringType() def define_structure(string, format_type): try: typo = equivalent_type(format_type) except: typo = StringType() return StructField(string, typo) # Given pandas dataframe, it will return a spark's dataframe. def pandas_to_spark(pandas_df): columns = list(pandas_df.columns) types = list(pandas_df.dtypes) struct_list = [] for column, typo in zip(columns, types): struct_list.append(define_structure(column, typo)) p_schema = StructType(struct_list) return sqlContext.createDataFrame(pandas_df, p_schema) # - df.registerTempTable("telcom") sqlContext.sql("describe telcom") df.describe()
convert-delta.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Sentiment Classification & How To "Frame Problems" for a Neural Network # # by <NAME> # # - **Twitter**: @iamtrask # - **Blog**: http://iamtrask.github.io # ### What You Should Already Know # # - neural networks, forward and back-propagation # - stochastic gradient descent # - mean squared error # - and train/test splits # # ### Where to Get Help if You Need it # - Re-watch previous Udacity Lectures # - Leverage the recommended Course Reading Material - [Grokking Deep Learning](https://www.manning.com/books/grokking-deep-learning) (Check inside your classroom for a discount code) # - Shoot me a tweet @iamtrask # # # ### Tutorial Outline: # # - Intro: The Importance of "Framing a Problem" (this lesson) # # - [Curate a Dataset](#lesson_1) # - [Developing a "Predictive Theory"](#lesson_2) # - [**PROJECT 1**: Quick Theory Validation](#project_1) # # # - [Transforming Text to Numbers](#lesson_3) # - [**PROJECT 2**: Creating the Input/Output Data](#project_2) # # # - Putting it all together in a Neural Network (video only - nothing in notebook) # - [**PROJECT 3**: Building our Neural Network](#project_3) # # # - [Understanding Neural Noise](#lesson_4) # - [**PROJECT 4**: Making Learning Faster by Reducing Noise](#project_4) # # # - [Analyzing Inefficiencies in our Network](#lesson_5) # - [**PROJECT 5**: Making our Network Train and Run Faster](#project_5) # # # - [Further Noise Reduction](#lesson_6) # - [**PROJECT 6**: Reducing Noise by Strategically Reducing the Vocabulary](#project_6) # # # - [Analysis: What's going on in the weights?](#lesson_7) # + [markdown] nbpresent={"id": "56bb3cba-260c-4ebe-9ed6-b995b4c72aa3"} # # Lesson: Curate a Dataset<a id='lesson_1'></a> # The cells from here until Project 1 include code Andrew shows in the videos leading up to mini project 1. We've included them so you can run the code along with the videos without having to type in everything. # + nbpresent={"id": "eba2b193-0419-431e-8db9-60f34dd3fe83"} def pretty_print_review_and_label(i): print(labels[i] + "\t:\t" + reviews[i][:80] + "...") g = open('reviews.txt','r') # What we know! reviews = list(map(lambda x:x[:-1],g.readlines())) g.close() g = open('labels.txt','r') # What we WANT to know! labels = list(map(lambda x:x[:-1].upper(),g.readlines())) g.close() # - # **Note:** The data in `reviews.txt` we're using has already been preprocessed a bit and contains only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like `The`, `the`, and `THE`, all the same way. len(reviews) # + nbpresent={"id": "bb95574b-21a0-4213-ae50-34363cf4f87f"} reviews[0] # + nbpresent={"id": "e0408810-c424-4ed4-afb9-1735e9ddbd0a"} labels[0] # - # # Lesson: Develop a Predictive Theory<a id='lesson_2'></a> # + nbpresent={"id": "e67a709f-234f-4493-bae6-4fb192141ee0"} print("labels.txt \t : \t reviews.txt\n") pretty_print_review_and_label(2137) pretty_print_review_and_label(12816) pretty_print_review_and_label(6267) pretty_print_review_and_label(21934) pretty_print_review_and_label(5297) pretty_print_review_and_label(4998) # - # # Project 1: Quick Theory Validation<a id='project_1'></a> # # There are multiple ways to implement these projects, but in order to get your code closer to what Andrew shows in his solutions, we've provided some hints and starter code throughout this notebook. # # You'll find the [Counter](https://docs.python.org/2/library/collections.html#collections.Counter) class to be useful in this exercise, as well as the [numpy](https://docs.scipy.org/doc/numpy/reference/) library. from collections import Counter import numpy as np # We'll create three `Counter` objects, one for words from postive reviews, one for words from negative reviews, and one for all the words. # Create three Counter objects to store positive, negative and total counts positive_counts = Counter() negative_counts = Counter() total_counts = Counter() # **TODO:** Examine all the reviews. For each word in a positive review, increase the count for that word in both your positive counter and the total words counter; likewise, for each word in a negative review, increase the count for that word in both your negative counter and the total words counter. # # **Note:** Throughout these projects, you should use `split(' ')` to divide a piece of text (such as a review) into individual words. If you use `split()` instead, you'll get slightly different results than what the videos and solutions show. # TODO: Loop over all the words in all the reviews and increment the counts in the appropriate counter objects for i in range(len(reviews)): if(labels[i] == 'POSITIVE'): for word in reviews[i].split(" "): positive_counts[word] += 1 total_counts[word] += 1 else: for word in reviews[i].split(" "): negative_counts[word] += 1 total_counts[word] += 1 # Run the following two cells to list the words used in positive reviews and negative reviews, respectively, ordered from most to least commonly used. # Examine the counts of the most common words in positive reviews positive_counts.most_common() # Examine the counts of the most common words in negative reviews negative_counts.most_common() # As you can see, common words like "the" appear very often in both positive and negative reviews. Instead of finding the most common words in positive or negative reviews, what you really want are the words found in positive reviews more often than in negative reviews, and vice versa. To accomplish this, you'll need to calculate the **ratios** of word usage between positive and negative reviews. # # **TODO:** Check all the words you've seen and calculate the ratio of postive to negative uses and store that ratio in `pos_neg_ratios`. # >Hint: the positive-to-negative ratio for a given word can be calculated with `positive_counts[word] / float(negative_counts[word]+1)`. Notice the `+1` in the denominator – that ensures we don't divide by zero for words that are only seen in positive reviews. # + # Create Counter object to store positive/negative ratios pos_neg_ratios = Counter() # TODO: Calculate the ratios of positive and negative uses of the most common words # Consider words to be "common" if they've been used at least 100 times for word in total_counts.keys(): if(total_counts[word]>100): pos_neg_ratios[word]=positive_counts[word]/ float(negative_counts[word]+1) # - # Examine the ratios you've calculated for a few words: print("Pos-to-neg ratio for 'the' = {}".format(pos_neg_ratios["the"])) print("Pos-to-neg ratio for 'amazing' = {}".format(pos_neg_ratios["amazing"])) print("Pos-to-neg ratio for 'terrible' = {}".format(pos_neg_ratios["terrible"])) # Looking closely at the values you just calculated, we see the following: # # * Words that you would expect to see more often in positive reviews – like "amazing" – have a ratio greater than 1. The more skewed a word is toward postive, the farther from 1 its positive-to-negative ratio will be. # * Words that you would expect to see more often in negative reviews – like "terrible" – have positive values that are less than 1. The more skewed a word is toward negative, the closer to zero its positive-to-negative ratio will be. # * Neutral words, which don't really convey any sentiment because you would expect to see them in all sorts of reviews – like "the" – have values very close to 1. A perfectly neutral word – one that was used in exactly the same number of positive reviews as negative reviews – would be almost exactly 1. The `+1` we suggested you add to the denominator slightly biases words toward negative, but it won't matter because it will be a tiny bias and later we'll be ignoring words that are too close to neutral anyway. # # Ok, the ratios tell us which words are used more often in postive or negative reviews, but the specific values we've calculated are a bit difficult to work with. A very positive word like "amazing" has a value above 4, whereas a very negative word like "terrible" has a value around 0.18. Those values aren't easy to compare for a couple of reasons: # # * Right now, 1 is considered neutral, but the absolute value of the postive-to-negative rations of very postive words is larger than the absolute value of the ratios for the very negative words. So there is no way to directly compare two numbers and see if one word conveys the same magnitude of positive sentiment as another word conveys negative sentiment. So we should center all the values around netural so the absolute value fro neutral of the postive-to-negative ratio for a word would indicate how much sentiment (positive or negative) that word conveys. # * When comparing absolute values it's easier to do that around zero than one. # # To fix these issues, we'll convert all of our ratios to new values using logarithms. # # **TODO:** Go through all the ratios you calculated and convert them to logarithms. (i.e. use `np.log(ratio)`) # # In the end, extremely positive and extremely negative words will have positive-to-negative ratios with similar magnitudes but opposite signs. # TODO: Convert ratios to logs for word in pos_neg_ratios: pos_neg_ratios[word]=np.log(pos_neg_ratios[word]) # Examine the new ratios you've calculated for the same words from before: print("Pos-to-neg ratio for 'the' = {}".format(pos_neg_ratios["the"])) print("Pos-to-neg ratio for 'amazing' = {}".format(pos_neg_ratios["amazing"])) print("Pos-to-neg ratio for 'terrible' = {}".format(pos_neg_ratios["terrible"])) # If everything worked, now you should see neutral words with values close to zero. In this case, "the" is near zero but slightly positive, so it was probably used in more positive reviews than negative reviews. But look at "amazing"'s ratio - it's above `1`, showing it is clearly a word with positive sentiment. And "terrible" has a similar score, but in the opposite direction, so it's below `-1`. It's now clear that both of these words are associated with specific, opposing sentiments. # # Now run the following cells to see more ratios. # # The first cell displays all the words, ordered by how associated they are with postive reviews. (Your notebook will most likely truncate the output so you won't actually see *all* the words in the list.) # # The second cell displays the 30 words most associated with negative reviews by reversing the order of the first list and then looking at the first 30 words. (If you want the second cell to display all the words, ordered by how associated they are with negative reviews, you could just write `reversed(pos_neg_ratios.most_common())`.) # # You should continue to see values similar to the earlier ones we checked – neutral words will be close to `0`, words will get more positive as their ratios approach and go above `1`, and words will get more negative as their ratios approach and go below `-1`. That's why we decided to use the logs instead of the raw ratios. # words most frequently seen in a review with a "POSITIVE" label pos_neg_ratios.most_common() # + # words most frequently seen in a review with a "NEGATIVE" label list(reversed(pos_neg_ratios.most_common()))[0:30] # Note: Above is the code Andrew uses in his solution video, # so we've included it here to avoid confusion. # If you explore the documentation for the Counter class, # you will see you could also find the 30 least common # words like this: pos_neg_ratios.most_common()[:-31:-1] # - # # End of Project 1. # ## Watch the next video to see Andrew's solution, then continue on to the next lesson. # # # Transforming Text into Numbers<a id='lesson_3'></a> # The cells here include code Andrew shows in the next video. We've included it so you can run the code along with the video without having to type in everything. # + from IPython.display import Image review = "This was a horrible, terrible movie." Image(filename='sentiment_network.png') # + review = "The movie was excellent" Image(filename='sentiment_network_pos.png') # - # # Project 2: Creating the Input/Output Data<a id='project_2'></a> # # **TODO:** Create a [set](https://docs.python.org/3/tutorial/datastructures.html#sets) named `vocab` that contains every word in the vocabulary. # TODO: Create set named "vocab" containing all of the words from all of the reviews vocab = None vocab=[word for word in total_counts.keys()] # Run the following cell to check your vocabulary size. If everything worked correctly, it should print **74074** vocab_size = len(vocab) print(vocab_size) # Take a look at the following image. It represents the layers of the neural network you'll be building throughout this notebook. `layer_0` is the input layer, `layer_1` is a hidden layer, and `layer_2` is the output layer. from IPython.display import Image Image(filename='sentiment_network_2.png') # **TODO:** Create a numpy array called `layer_0` and initialize it to all zeros. You will find the [zeros](https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html) function particularly helpful here. Be sure you create `layer_0` as a 2-dimensional matrix with 1 row and `vocab_size` columns. # TODO: Create layer_0 matrix with dimensions 1 by vocab_size, initially filled with zeros layer_0 = np.zeros((1,vocab_size)) # Run the following cell. It should display `(1, 74074)` layer_0.shape from IPython.display import Image Image(filename='sentiment_network.png') # `layer_0` contains one entry for every word in the vocabulary, as shown in the above image. We need to make sure we know the index of each word, so run the following cell to create a lookup table that stores the index of every word. # + # Create a dictionary of words in the vocabulary mapped to index positions # (to be used in layer_0) word2index = {} for i,word in enumerate(vocab): word2index[word] = i # display the map of words to indices word2index # - # **TODO:** Complete the implementation of `update_input_layer`. It should count # how many times each word is used in the given review, and then store # those counts at the appropriate indices inside `layer_0`. def update_input_layer(review): """ Modify the global layer_0 to represent the vector form of review. The element at a given index of layer_0 should represent how many times the given word occurs in the review. Args: review(string) - the string of the review Returns: None """ global layer_0 # clear out previous state by resetting the layer to be all 0s layer_0 *= 0 for word in review.split(" "): layer_0[0][word2index[word]]+=1 # TODO: count how many times each word is used in the given review and store the results in layer_0 # Run the following cell to test updating the input layer with the first review. The indices assigned may not be the same as in the solution, but hopefully you'll see some non-zero values in `layer_0`. update_input_layer(reviews[0]) layer_0 # **TODO:** Complete the implementation of `get_target_for_labels`. It should return `0` or `1`, # depending on whether the given label is `NEGATIVE` or `POSITIVE`, respectively. def get_target_for_label(label): """Convert a label to `0` or `1`. Args: label(string) - Either "POSITIVE" or "NEGATIVE". Returns: `0` or `1`. """ # TODO: Your code here if(label=='POSITIVE'): return 1 else: return 0 # Run the following two cells. They should print out`'POSITIVE'` and `1`, respectively. labels[0] get_target_for_label(labels[0]) # Run the following two cells. They should print out `'NEGATIVE'` and `0`, respectively. labels[1] get_target_for_label(labels[1]) # # End of Project 2. # ## Watch the next video to see Andrew's solution, then continue on to the next lesson. # # Project 3: Building a Neural Network<a id='project_3'></a> # **TODO:** We've included the framework of a class called `SentimentNetork`. Implement all of the items marked `TODO` in the code. These include doing the following: # - Create a basic neural network much like the networks you've seen in earlier lessons and in Project 1, with an input layer, a hidden layer, and an output layer. # - Do **not** add a non-linearity in the hidden layer. That is, do not use an activation function when calculating the hidden layer outputs. # - Re-use the code from earlier in this notebook to create the training data (see `TODO`s in the code) # - Implement the `pre_process_data` function to create the vocabulary for our training data generating functions # - Ensure `train` trains over the entire corpus # ### Where to Get Help if You Need it # - Re-watch earlier Udacity lectures # - Chapters 3-5 - [Grokking Deep Learning](https://www.manning.com/books/grokking-deep-learning) - (Check inside your classroom for a discount code) # + import time import sys import numpy as np # Encapsulate our neural network in a class class SentimentNetwork: def __init__(self, reviews, labels, hidden_nodes = 10, learning_rate = 0.1): """Create a SentimenNetwork with the given settings Args: reviews(list) - List of reviews used for training labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews hidden_nodes(int) - Number of nodes to create in the hidden layer learning_rate(float) - Learning rate to use while training """ # Assign a seed to our random number generator to ensure we get # reproducable results during development np.random.seed(1) # process the reviews and their associated labels so that everything # is ready for training self.pre_process_data(reviews, labels) # Build the network to have the number of hidden nodes and the learning rate that # were passed into this initializer. Make the same number of input nodes as # there are vocabulary words and create a single output node. self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate) def pre_process_data(self, reviews, labels): review_vocab = set() for review in reviews: for word in review.split(" "): review_vocab.add(word) # TODO: populate review_vocab with all of the words in the given reviews # Remember to split reviews into individual words # using "split(' ')" instead of "split()". # Convert the vocabulary set to a list so we can access words via indices self.review_vocab = list(review_vocab) label_vocab = set(label for label in labels) # TODO: populate label_vocab with all of the words in the given labels. # There is no need to split the labels because each one is a single word. # Convert the label vocabulary set to a list so we can access labels via indices self.label_vocab = list(label_vocab) # Store the sizes of the review and label vocabularies. self.review_vocab_size = len(self.review_vocab) self.label_vocab_size = len(self.label_vocab) # Create a dictionary of words in the vocabulary mapped to index positions self.word2index = {} for word,i in enumerate(self.review_vocab): self.word2index[word]=i # TODO: populate self.word2index with indices for all the words in self.review_vocab # like you saw earlier in the notebook # Create a dictionary of labels mapped to index positions self.label2index = {} for label,i in enumerate(labels): self.label2index[label]=i # TODO: do the same thing you did for self.word2index and self.review_vocab, # but for self.label2index and self.label_vocab instead def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Store the number of nodes in input, hidden, and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Store the learning rate self.learning_rate = learning_rate # Initialize weights # TODO: initialize self.weights_0_1 as a matrix of zeros. These are the weights between # the input layer and the hidden layer. self.weights_0_1 = np.zeros((input_nodes,hidden_nodes)) # TODO: initialize self.weights_1_2 as a matrix of random values. # These are the weights between the hidden layer and the output layer. self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5, (self.hidden_nodes, self.output_nodes)) # TODO: Create the input layer, a two-dimensional matrix with shape # 1 x input_nodes, with all values initialized to zero self.layer_0 = np.zeros((1,input_nodes)) def update_input_layer(self,review): self.layer_0 *= 0 # TODO: You can copy most of the code you wrote for update_input_layer # earlier in this notebook. # # However, MAKE SURE YOU CHANGE ALL VARIABLES TO REFERENCE # THE VERSIONS STORED IN THIS OBJECT, NOT THE GLOBAL OBJECTS. # For example, replace "layer_0 *= 0" with "self.layer_0 *= 0" for word in review.split(" "): if(word in self.word2index.keys()): self.layer_0[self.word2index[word]] += 1 def get_target_for_label(self,label): # TODO: Copy the code you wrote for get_target_for_label # earlier in this notebook. if(label == 'POSITIVE'): return 1 else: return 0 def sigmoid(self,x): # TODO: Return the result of calculating the sigmoid activation function # shown in the lectures return 1 / (1 + np.exp(-x)) def sigmoid_output_2_derivative(self,output): # TODO: Return the derivative of the sigmoid activation function, # where "output" is the original output from the sigmoid fucntion return output*(1-output) def train(self, training_reviews, training_labels): # make sure out we have a matching number of reviews and labels assert(len(training_reviews) == len(training_labels)) # Keep track of correct predictions to display accuracy during training correct_so_far = 0 # Remember when we started for printing time statistics start = time.time() # loop through all the given reviews and run a forward and backward pass, # updating weights for every item for i in range(len(training_reviews)): # TODO: Get the next review and its correct label review=training_reviews[i] label=training_labels[i] # TODO: Implement the forward pass through the network. # That means use the given review to update the input layer, # then calculate values for the hidden layer, # and finally calculate the output layer. # # Do not use an activation function for the hidden layer, # but use the sigmoid activation function for the output layer. self.update_input_layer(review) hidden_input=np.dot(self.layer_0,self.weights_0_1) finalLayer_input=np.dot(hidden_input,self.weights_1_2) finalLayer_output=self.sigmoid(finalLayer_input) # TODO: Implement the back propagation pass here. # That means calculate the error for the forward pass's prediction # and update the weights in the network according to their # contributions toward the error, as calculated via the # gradient descent and back propagation algorithms you # learned in class. error = self.get_target_for_label(label)-finalLayer_output output_error_term=error*self.sigmoid_output_2_derivative(finalLayer_output) hidden_error_term=output_error_term*self.weights_1_2 # TODO: Keep track of correct predictions. To determine if the prediction was # correct, check that the absolute value of the output error # is less than 0.5. If so, add one to the correct_so_far count. #self.weights_1_2 += self.learning_rate*(output_error_term*hidden_input) self.weights_1_2 -= self.learning_rate*(output_error_term*hidden_input.T) self.weights_0_1 -= self.learning_rate*(hidden_error_term*self.layer_0).T # For debug purposes, print out our prediction accuracy and speed # throughout the training process. if(finalLayer_output >= 0.5 and label == 'POSITIVE'): correct_so_far += 1 elif(finalLayer_output < 0.5 and label == 'NEGATIVE'): correct_so_far += 1 elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) \ + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%") if(i % 2500 == 0): print("") def test(self, testing_reviews, testing_labels): """ Attempts to predict the labels for the given testing_reviews, and uses the test_labels to calculate the accuracy of those predictions. """ # keep track of how many correct predictions we make correct = 0 # we'll time how many predictions per second we make start = time.time() # Loop through each of the given reviews and call run to predict # its label. for i in range(len(testing_reviews)): pred = self.run(testing_reviews[i]) if(pred == testing_labels[i]): correct += 1 # For debug purposes, print out our prediction accuracy and speed # throughout the prediction process. elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct) + " #Tested:" + str(i+1) \ + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%") def run(self, review): """ Returns a POSITIVE or NEGATIVE prediction for the given review. """ # TODO: Run a forward pass through the network, like you did in the # "train" function. That means use the given review to # update the input layer, then calculate values for the hidden layer, # and finally calculate the output layer. # # Note: The review passed into this function for prediction # might come from anywhere, so you should convert it # to lower case prior to using it. self.update_input_layer(review.lower()) #hidden_input=self.layer_0.dot(self.weights_0_1) hidden_input=np.dot(self.layer_0,self.weights_0_1) finalLayer_input=np.dot(hidden_input,self.weights_1_2) finalLayer_output=self.sigmoid(finalLayer_input) # TODO: The output layer should now contain a prediction. # Return `POSITIVE` for predictions greater-than-or-equal-to `0.5`, # and `NEGATIVE` otherwise. if(finalLayer_output[0]>0.5): return 'POSITIVE' else: return 'NEGATIVE' # - # # Run the following cell to create a `SentimentNetwork` that will train on all but the last 1000 reviews (we're saving those for testing). Here we use a learning rate of `0.1`. mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1) # Run the following cell to test the network's performance against the last 1000 reviews (the ones we held out from our training set). # # **We have not trained the model yet, so the results should be about 50% as it will just be guessing and there are only two possible values to choose from.** mlp.test(reviews[-1000:],labels[-1000:]) # Run the following cell to actually train the network. During training, it will display the model's accuracy repeatedly as it trains so you can see how well it's doing. mlp.train(reviews[:-1000],labels[:-1000]) # That most likely didn't train very well. Part of the reason may be because the learning rate is too high. Run the following cell to recreate the network with a smaller learning rate, `0.01`, and then train the new network. mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01) mlp.train(reviews[:-1000],labels[:-1000]) # That probably wasn't much different. Run the following cell to recreate the network one more time with an even smaller learning rate, `0.001`, and then train the new network. mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001) mlp.train(reviews[:-1000],labels[:-1000]) # With a learning rate of `0.001`, the network should finall have started to improve during training. It's still not very good, but it shows that this solution has potential. We will improve it in the next lesson. # # End of Project 3. # ## Watch the next video to see Andrew's solution, then continue on to the next lesson. # # Understanding Neural Noise<a id='lesson_4'></a> # # The following cells include includes the code Andrew shows in the next video. We've included it here so you can run the cells along with the video without having to type in everything. from IPython.display import Image Image(filename='sentiment_network.png') # + def update_input_layer(review): global layer_0 # clear out previous state, reset the layer to be all 0s layer_0 *= 0 for word in review.split(" "): layer_0[0][word2index[word]] += 1 update_input_layer(reviews[0]) # - layer_0 review_counter = Counter() for word in reviews[0].split(" "): review_counter[word] += 1 review_counter.most_common() # # Project 4: Reducing Noise in Our Input Data<a id='project_4'></a> # # **TODO:** Attempt to reduce the noise in the input data like Andrew did in the previous video. Specifically, do the following: # * Copy the `SentimentNetwork` class you created earlier into the following cell. # * Modify `update_input_layer` so it does not count how many times each word is used, but rather just stores whether or not a word was used. # Run the following cell to recreate the network and train it. Notice we've gone back to the higher learning rate of `0.1`. mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1) mlp.train(reviews[:-1000],labels[:-1000]) # That should have trained much better than the earlier attempts. It's still not wonderful, but it should have improved dramatically. Run the following cell to test your model with 1000 predictions. mlp.test(reviews[-1000:],labels[-1000:]) # # End of Project 4. # ## Andrew's solution was actually in the previous video, so rewatch that video if you had any problems with that project. Then continue on to the next lesson. # # Analyzing Inefficiencies in our Network<a id='lesson_5'></a> # The following cells include the code Andrew shows in the next video. We've included it here so you can run the cells along with the video without having to type in everything. Image(filename='sentiment_network_sparse.png') layer_0 = np.zeros(10) layer_0 layer_0[4] = 1 layer_0[9] = 1 layer_0 weights_0_1 = np.random.randn(10,5) layer_0.dot(weights_0_1) indices = [4,9] layer_1 = np.zeros(5) for index in indices: layer_1 += (1 * weights_0_1[index]) layer_1 Image(filename='sentiment_network_sparse_2.png') layer_1 = np.zeros(5) for index in indices: layer_1 += (weights_0_1[index]) layer_1 # # Project 5: Making our Network More Efficient<a id='project_5'></a> # **TODO:** Make the `SentimentNetwork` class more efficient by eliminating unnecessary multiplications and additions that occur during forward and backward propagation. To do that, you can do the following: # * Copy the `SentimentNetwork` class from the previous project into the following cell. # * Remove the `update_input_layer` function - you will not need it in this version. # * Modify `init_network`: # >* You no longer need a separate input layer, so remove any mention of `self.layer_0` # >* You will be dealing with the old hidden layer more directly, so create `self.layer_1`, a two-dimensional matrix with shape 1 x hidden_nodes, with all values initialized to zero # * Modify `train`: # >* Change the name of the input parameter `training_reviews` to `training_reviews_raw`. This will help with the next step. # >* At the beginning of the function, you'll want to preprocess your reviews to convert them to a list of indices (from `word2index`) that are actually used in the review. This is equivalent to what you saw in the video when Andrew set specific indices to 1. Your code should create a local `list` variable named `training_reviews` that should contain a `list` for each review in `training_reviews_raw`. Those lists should contain the indices for words found in the review. # >* Remove call to `update_input_layer` # >* Use `self`'s `layer_1` instead of a local `layer_1` object. # >* In the forward pass, replace the code that updates `layer_1` with new logic that only adds the weights for the indices used in the review. # >* When updating `weights_0_1`, only update the individual weights that were used in the forward pass. # * Modify `run`: # >* Remove call to `update_input_layer` # >* Use `self`'s `layer_1` instead of a local `layer_1` object. # >* Much like you did in `train`, you will need to pre-process the `review` so you can work with word indices, then update `layer_1` by adding weights for the indices used in the review. # + # TODO: -Copy the SentimentNetwork class from Project 4 lesson # -Modify it according to the above instructions import time import sys import numpy as np # Encapsulate our neural network in a class class SentimentNetwork: def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1): """Create a SentimenNetwork with the given settings Args: reviews(list) - List of reviews used for training labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews hidden_nodes(int) - Number of nodes to create in the hidden layer learning_rate(float) - Learning rate to use while training """ # Assign a seed to our random number generator to ensure we get # reproducable results during development np.random.seed(1) # process the reviews and their associated labels so that everything # is ready for training self.pre_process_data(reviews, labels) # Build the network to have the number of hidden nodes and the learning rate that # were passed into this initializer. Make the same number of input nodes as # there are vocabulary words and create a single output node. self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate) def pre_process_data(self, reviews, labels): # populate review_vocab with all of the words in the given reviews review_vocab = set() for review in reviews: for word in review.split(" "): review_vocab.add(word) # Convert the vocabulary set to a list so we can access words via indices self.review_vocab = list(review_vocab) # populate label_vocab with all of the words in the given labels. label_vocab = set() for label in labels: label_vocab.add(label) # Convert the label vocabulary set to a list so we can access labels via indices self.label_vocab = list(label_vocab) # Store the sizes of the review and label vocabularies. self.review_vocab_size = len(self.review_vocab) self.label_vocab_size = len(self.label_vocab) # Create a dictionary of words in the vocabulary mapped to index positions self.word2index = {} for i, word in enumerate(self.review_vocab): self.word2index[word] = i # Create a dictionary of labels mapped to index positions self.label2index = {} for i, label in enumerate(self.label_vocab): self.label2index[label] = i def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Set number of nodes in input, hidden and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Store the learning rate self.learning_rate = learning_rate # Initialize weights # These are the weights between the input layer and the hidden layer. self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes)) # These are the weights between the hidden layer and the output layer. self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5, (self.hidden_nodes, self.output_nodes)) # The input layer, a two-dimensional matrix with shape 1 x input_nodes self.layer_1 = np.zeros((1,hidden_nodes)) def get_target_for_label(self,label): if(label == 'POSITIVE'): return 1 else: return 0 def sigmoid(self,x): return 1 / (1 + np.exp(-x)) def sigmoid_output_2_derivative(self,output): return output * (1 - output) def train(self, training_reviews_raw, training_labels): # make sure out we have a matching number of reviews and labels assert(len(training_reviews_raw) == len(training_labels)) # Keep track of correct predictions to display accuracy during training correct_so_far = 0 # Remember when we started for printing time statistics start = time.time() # loop through all the given reviews and run a forward and backward pass, # updating weights for every item for i in range(len(training_reviews_raw)): # Get the next review and its correct label review = training_reviews_raw[i] label = training_labels[i] training_review=set() for word in review.split(" "): training_review.add(self.word2index[word]) training_review=list(training_review) #### Implement the forward pass here #### ### Forward pass ### # Input Layer # Hidden layer self.layer_1 *= 0 for indices in training_review: self.layer_1 += self.weights_0_1[indices] # Output layer layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2)) #### Implement the backward pass here #### ### Backward pass ### # Output error layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output. layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2) # Backpropagated error layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error # Update the weights self.weights_1_2 -= self.layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step for indices in training_review: self.weights_0_1[indices] -= self.learning_rate *layer_1_delta[0] #self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step # Keep track of correct predictions. if(layer_2 >= 0.5 and label == 'POSITIVE'): correct_so_far += 1 elif(layer_2 < 0.5 and label == 'NEGATIVE'): correct_so_far += 1 # For debug purposes, print out our prediction accuracy and speed # throughout the training process. elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews_raw)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) \ + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%") if(i % 2500 == 0): print("") def train1(self, training_reviews_raw, training_labels): training_reviews = list() for review in training_reviews_raw: indices = set() for word in review.split(" "): if(word in self.word2index.keys()): indices.add(self.word2index[word]) training_reviews.append(list(indices)) # make sure out we have a matching number of reviews and labels assert(len(training_reviews) == len(training_labels)) # Keep track of correct predictions to display accuracy during training correct_so_far = 0 # Remember when we started for printing time statistics start = time.time() # loop through all the given reviews and run a forward and backward pass, # updating weights for every item for i in range(len(training_reviews)): # Get the next review and its correct label review = training_reviews[i] label = training_labels[i] #### Implement the forward pass here #### ### Forward pass ### ## New for Project 5: Removed call to 'update_input_layer' function # because 'layer_0' is no longer used # Hidden layer ## New for Project 5: Add in only the weights for non-zero items self.layer_1 *= 0 for index in review: self.layer_1 += self.weights_0_1[index] # Output layer ## New for Project 5: changed to use 'self.layer_1' instead of 'local layer_1' layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2)) #### Implement the backward pass here #### ### Backward pass ### # Output error layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output. layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2) # Backpropagated error layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error # Update the weights ## New for Project 5: changed to use 'self.layer_1' instead of local 'layer_1' self.weights_1_2 -= self.layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step ## New for Project 5: Only update the weights that were used in the forward pass for index in review: self.weights_0_1[index] -= layer_1_delta[0] * self.learning_rate # update input-to-hidden weights with gradient descent step # Keep track of correct predictions. if(layer_2 >= 0.5 and label == 'POSITIVE'): correct_so_far += 1 elif(layer_2 < 0.5 and label == 'NEGATIVE'): correct_so_far += 1 # For debug purposes, print out our prediction accuracy and speed # throughout the training process. elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) \ + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%") if(i % 2500 == 0): print("") def test(self, testing_reviews, testing_labels): """ Attempts to predict the labels for the given testing_reviews, and uses the test_labels to calculate the accuracy of those predictions. """ # keep track of how many correct predictions we make correct = 0 # we'll time how many predictions per second we make start = time.time() # Loop through each of the given reviews and call run to predict # its label. for i in range(len(testing_reviews)): pred = self.run(testing_reviews[i]) if(pred == testing_labels[i]): correct += 1 # For debug purposes, print out our prediction accuracy and speed # throughout the prediction process. elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct) + " #Tested:" + str(i+1) \ + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%") def run(self, review): """ Returns a POSITIVE or NEGATIVE prediction for the given review. """ ''' # Run a forward pass through the network, like in the "train" function. # Input Layer self.layer_1 *= 0 reviewIndices=set() for word in review.lower().split(" "): if word in self.word2index.keys(): reviewIndices.add(self.word2index[word]) # Hidden layer for indices in reviewIndices: self.layer_1 += self.weights_0_1[indices] # Output layer layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2)) # Return POSITIVE for values above greater-than-or-equal-to 0.5 in the output layer; # return NEGATIVE for other values if(layer_2[0] >= 0.5): return "POSITIVE" else: return "NEGATIVE" ''' self.layer_1 *= 0 unique_indices = set() for word in review.lower().split(" "): if word in self.word2index.keys(): unique_indices.add(self.word2index[word]) for index in unique_indices: self.layer_1 += self.weights_0_1[index] # Output layer ## New for Project 5: changed to use self.layer_1 instead of local layer_1 layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2)) # Return POSITIVE for values above greater-than-or-equal-to 0.5 in the output layer; # return NEGATIVE for other values if(layer_2[0] >= 0.5): return "POSITIVE" else: return "NEGATIVE" # - # Run the following cell to recreate the network and train it once again. mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1) mlp.train(reviews[:-1000],labels[:-1000]) # That should have trained much better than the earlier attempts. Run the following cell to test your model with 1000 predictions. mlp.test(reviews[-1000:],labels[-1000:]) # # End of Project 5. # ## Watch the next video to see Andrew's solution, then continue on to the next lesson. # # Further Noise Reduction<a id='lesson_6'></a> Image(filename='sentiment_network_sparse_2.png') # words most frequently seen in a review with a "POSITIVE" label pos_neg_ratios.most_common() # words most frequently seen in a review with a "NEGATIVE" label list(reversed(pos_neg_ratios.most_common()))[0:30] from bokeh.models import ColumnDataSource, LabelSet from bokeh.plotting import figure, show, output_file from bokeh.io import output_notebook output_notebook() # + hist, edges = np.histogram(list(map(lambda x:x[1],pos_neg_ratios.most_common())), density=True, bins=100, normed=True) p = figure(tools="pan,wheel_zoom,reset,save", toolbar_location="above", title="Word Positive/Negative Affinity Distribution") p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555") show(p) # + frequency_frequency = Counter() for word, cnt in total_counts.most_common(): frequency_frequency[cnt] += 1 # + hist, edges = np.histogram(list(map(lambda x:x[1],frequency_frequency.most_common())), density=True, bins=100, normed=True) p = figure(tools="pan,wheel_zoom,reset,save", toolbar_location="above", title="The frequency distribution of the words in our corpus") p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555") show(p) # - # # Project 6: Reducing Noise by Strategically Reducing the Vocabulary<a id='project_6'></a> # # **TODO:** Improve `SentimentNetwork`'s performance by reducing more noise in the vocabulary. Specifically, do the following: # * Copy the `SentimentNetwork` class from the previous project into the following cell. # * Modify `pre_process_data`: # >* Add two additional parameters: `min_count` and `polarity_cutoff` # >* Calculate the positive-to-negative ratios of words used in the reviews. (You can use code you've written elsewhere in the notebook, but we are moving it into the class like we did with other helper code earlier.) # >* Andrew's solution only calculates a postive-to-negative ratio for words that occur at least 50 times. This keeps the network from attributing too much sentiment to rarer words. You can choose to add this to your solution if you would like. # >* Change so words are only added to the vocabulary if they occur in the vocabulary more than `min_count` times. # >* Change so words are only added to the vocabulary if the absolute value of their postive-to-negative ratio is at least `polarity_cutoff` # * Modify `__init__`: # >* Add the same two parameters (`min_count` and `polarity_cutoff`) and use them when you call `pre_process_data` # + # TODO: -Copy the SentimentNetwork class from Project 5 lesson # -Modify it according to the above instructions # - # Run the following cell to train your network with a small polarity cutoff. mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.05,learning_rate=0.01) mlp.train(reviews[:-1000],labels[:-1000]) # And run the following cell to test it's performance. It should be mlp.test(reviews[-1000:],labels[-1000:]) # Run the following cell to train your network with a much larger polarity cutoff. mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.8,learning_rate=0.01) mlp.train(reviews[:-1000],labels[:-1000]) # And run the following cell to test it's performance. mlp.test(reviews[-1000:],labels[-1000:]) # # End of Project 6. # ## Watch the next video to see Andrew's solution, then continue on to the next lesson. # # Analysis: What's Going on in the Weights?<a id='lesson_7'></a> mlp_full = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=0,polarity_cutoff=0,learning_rate=0.01) mlp_full.train(reviews[:-1000],labels[:-1000]) Image(filename='sentiment_network_sparse.png') def get_most_similar_words(focus = "horrible"): most_similar = Counter() for word in mlp_full.word2index.keys(): most_similar[word] = np.dot(mlp_full.weights_0_1[mlp_full.word2index[word]],mlp_full.weights_0_1[mlp_full.word2index[focus]]) return most_similar.most_common() get_most_similar_words("excellent") get_most_similar_words("terrible") # + import matplotlib.colors as colors words_to_visualize = list() for word, ratio in pos_neg_ratios.most_common(500): if(word in mlp_full.word2index.keys()): words_to_visualize.append(word) for word, ratio in list(reversed(pos_neg_ratios.most_common()))[0:500]: if(word in mlp_full.word2index.keys()): words_to_visualize.append(word) # + pos = 0 neg = 0 colors_list = list() vectors_list = list() for word in words_to_visualize: if word in pos_neg_ratios.keys(): vectors_list.append(mlp_full.weights_0_1[mlp_full.word2index[word]]) if(pos_neg_ratios[word] > 0): pos+=1 colors_list.append("#00ff00") else: neg+=1 colors_list.append("#000000") # - from sklearn.manifold import TSNE tsne = TSNE(n_components=2, random_state=0) words_top_ted_tsne = tsne.fit_transform(vectors_list) # + p = figure(tools="pan,wheel_zoom,reset,save", toolbar_location="above", title="vector T-SNE for most polarized words") source = ColumnDataSource(data=dict(x1=words_top_ted_tsne[:,0], x2=words_top_ted_tsne[:,1], names=words_to_visualize, color=colors_list)) p.scatter(x="x1", y="x2", size=8, source=source, fill_color="color") word_labels = LabelSet(x="x1", y="x2", text="names", y_offset=6, text_font_size="8pt", text_color="#555555", source=source, text_align='center') p.add_layout(word_labels) show(p) # green indicates positive words, black indicates negative words
sentiment-network/Sentiment_Classification_Projects.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import pandas as pd iris_data = pd.read_csv('iris.data') iris_data.columns = ['sepal_length_cm', 'sepal_width_cm', 'petal_length_cm', 'petal_width_cm', 'class'] iris_data.head() # - from PIL import Image img=Image.open('test.jpg') plt.imshow(img) plt.show() iris_data.describe() # + # %matplotlib inline import matplotlib.pyplot as plt import seaborn as sb sb.pairplot(iris_data.dropna(), hue='class') # - plt.figure(figsize=(10, 10)) for column_index, column in enumerate(iris_data.columns): if column == 'class': continue plt.subplot(2, 2, column_index + 1) sb.violinplot(x='class', y=column, data=iris_data) # + from sklearn.cross_validation import train_test_split all_inputs = iris_data[['sepal_length_cm', 'sepal_width_cm', 'petal_length_cm', 'petal_width_cm']].values all_classes = iris_data['class'].values (training_inputs, testing_inputs, training_classes, testing_classes) = train_test_split(all_inputs, all_classes, train_size=0.75, random_state=1) # + from sklearn.tree import DecisionTreeClassifier # 1.criterion gini or entropy # 2.splitter best or random 前者是在所有特征中找最好的切分点 后者是在部分特征中(数据量大的时候) .dropna() # 3.max_features None(所有),log2,sqrt,N 特征小于50的时候一般使用所有的 # 4.max_depth 数据少或者特征少的时候可以不管这个值,如果模型样本量多,特征也多的情况下,可以尝试限制下 # 5.min_samples_split 如果某节点的样本数少于min_samples_split,则不会继续再尝试选择最优特征来进行划分 # 如果样本量不大,不需要管这个值。如果样本量数量级非常大,则推荐增大这个值。 # 6.min_samples_leaf 这个值限制了叶子节点最少的样本数,如果某叶子节点数目小于样本数,则会和兄弟节点一起被 # 剪枝,如果样本量不大,不需要管这个值,大些如10W可是尝试下5 # 7.min_weight_fraction_leaf 这个值限制了叶子节点所有样本权重和的最小值,如果小于这个值,则会和兄弟节点一起 # 被剪枝默认是0,就是不考虑权重问题。一般来说,如果我们有较多样本有缺失值, # 或者分类树样本的分布类别偏差很大,就会引入样本权重,这时我们就要注意这个值了。 # 8.max_leaf_nodes 通过限制最大叶子节点数,可以防止过拟合,默认是"None”,即不限制最大的叶子节点数。 # 如果加了限制,算法会建立在最大叶子节点数内最优的决策树。 # 如果特征不多,可以不考虑这个值,但是如果特征分成多的话,可以加以限制 # 具体的值可以通过交叉验证得到。 # 9.class_weight 指定样本各类别的的权重,主要是为了防止训练集某些类别的样本过多 # 导致训练的决策树过于偏向这些类别。这里可以自己指定各个样本的权重 # 如果使用“balanced”,则算法会自己计算权重,样本量少的类别所对应的样本权重会高。 # 10.min_impurity_split 这个值限制了决策树的增长,如果某节点的不纯度 # (基尼系数,信息增益,均方差,绝对差)小于这个阈值 # 则该节点不再生成子节点。即为叶子节点 。 decision_tree_classifier = DecisionTreeClassifier() # Train the classifier on the training set decision_tree_classifier.fit(training_inputs, training_classes) # Validate the classifier on the testing set using classification accuracy decision_tree_classifier.score(testing_inputs, testing_classes) # + from sklearn.cross_validation import cross_val_score import numpy as np decision_tree_classifier = DecisionTreeClassifier() # cross_val_score returns a list of the scores, which we can visualize # to get a reasonable estimate of our classifier's performance cv_scores = cross_val_score(decision_tree_classifier, all_inputs, all_classes, cv=10) print (cv_scores) #kde=False sb.distplot(cv_scores) plt.title('Average score: {}'.format(np.mean(cv_scores))) # + decision_tree_classifier = DecisionTreeClassifier(max_depth=1) cv_scores = cross_val_score(decision_tree_classifier, all_inputs, all_classes, cv=10) print (cv_scores) sb.distplot(cv_scores, kde=False) plt.title('Average score: {}'.format(np.mean(cv_scores))) # + from sklearn.grid_search import GridSearchCV from sklearn.cross_validation import StratifiedKFold decision_tree_classifier = DecisionTreeClassifier() parameter_grid = {'max_depth': [1, 2, 3, 4, 5], 'max_features': [1, 2, 3, 4]} cross_validation = StratifiedKFold(all_classes, n_folds=10) grid_search = GridSearchCV(decision_tree_classifier, param_grid=parameter_grid, cv=cross_validation) grid_search.fit(all_inputs, all_classes) print('Best score: {}'.format(grid_search.best_score_)) print('Best parameters: {}'.format(grid_search.best_params_)) # + grid_visualization = [] for grid_pair in grid_search.grid_scores_: grid_visualization.append(grid_pair.mean_validation_score) grid_visualization = np.array(grid_visualization) grid_visualization.shape = (5, 4) sb.heatmap(grid_visualization, cmap='Blues') plt.xticks(np.arange(4) + 0.5, grid_search.param_grid['max_features']) plt.yticks(np.arange(5) + 0.5, grid_search.param_grid['max_depth'][::-1]) plt.xlabel('max_features') plt.ylabel('max_depth') # - decision_tree_classifier = grid_search.best_estimator_ decision_tree_classifier # + import sklearn.tree as tree from sklearn.externals.six import StringIO with open('iris_dtc.dot', 'w') as out_file: out_file = tree.export_graphviz(decision_tree_classifier, out_file=out_file) #http://www.graphviz.org/ # + from sklearn.ensemble import RandomForestClassifier random_forest_classifier = RandomForestClassifier() parameter_grid = {'n_estimators': [5, 10, 25, 50], 'criterion': ['gini', 'entropy'], 'max_features': [1, 2, 3, 4], 'warm_start': [True, False]} cross_validation = StratifiedKFold(all_classes, n_folds=10) grid_search = GridSearchCV(random_forest_classifier, param_grid=parameter_grid, cv=cross_validation) grid_search.fit(all_inputs, all_classes) print('Best score: {}'.format(grid_search.best_score_)) print('Best parameters: {}'.format(grid_search.best_params_)) grid_search.best_estimator_ # -
case3-iris_data/basic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from collections import defaultdict # Sanity check of input data with open("day/8/input") as fh: for line in fh: assert len(line.split()) == 7, "This is the problematic line {}".format(line) # + registers = defaultdict(int) with open("day/8/input") as fh: for line in fh: var, op, val, _, cond_var, comparison, cond_val = line.split() # using registers as globals adds the "__builtins__" key! if eval(cond_var + comparison + cond_val, None, registers): val = int(val) if op == "inc" else -int(val) registers[var] += val # - max(registers.values()) # # Part 2 registers = defaultdict(int) grand_maximum = 0 with open("day/8/input") as fh: for line in fh: var, op, val, _, cond_var, comparison, cond_val = line.split() if eval(cond_var + comparison + cond_val, None, registers): val = int(val) if op == "inc" else -int(val) registers[var] += val grand_maximum = max(grand_maximum, max(registers.values())) grand_maximum
Day08.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Neural networks from scratch in numpy import numpy as np def activation(x): return 1.0 / (1.0 + np.exp(-x)) def initialise(no_nodes, mean=0.0, std=1.0): return np.array([np.random.normal(mean, std, [no_nodes[i], no_nodes[i-1] + 1]) for i in range(1, len(no_nodes))]) def forward(nn, inputs): outputs = inputs.transpose() for w in nn: outputs = np.vstack([outputs, np.ones(outputs.shape[1])]) outputs = np.matmul(w, outputs) outputs = activation(outputs) outputs = outputs.transpose() return outputs # + # Two inputs, five hidden, three hidden, two outputs nn = initialise([2, 5, 3, 2]) # Show the weights for i in nn: print(i) # - p = forward(nn, np.array([[1, 2],[3,4]])) # Show the output print(p) inputs = np.random.uniform(0, 1, [1000, 2]) actuals = np.column_stack([inputs[:, 0] <= inputs[:, 1], inputs[:, 0] > inputs[:, 1]]).astype(float) # + import matplotlib.pyplot as pl pl.rcParams['figure.figsize'] = (16, 10) blue = inputs[inputs[:, 0] <= inputs[:, 1]] yell = inputs[inputs[:, 0] > inputs[:, 1]] pl.plot(blue[:,0], blue[:,1], 'b.') pl.plot(yell[:,0], yell[:,1], 'y.') pl.show() # - preds = forward(nn, inputs) def cost(actuals, preds): return 0.5 * (actuals - preds)**2 cost(actuals, preds) preds * (1.0 - preds) * -(actuals - preds) def back(nn, inputs, outputs): for w in nn[] return nn # + # Do 1 training example first def all_oij(nn, x): oij = [] outputs = x.transpose() for w in nn: outputs = np.vstack([outputs, np.ones(outputs.shape[1])]) outputs = np.matmul(w, outputs) outputs = activation(outputs) oij.append(outputs.transpose()) return oij oij = all_oij(nn, np.array([inputs[0]])) # - oij # For output layer d_j_fin = (oij[-1] - actuals[0]) * oij[-1] * (1.0 - oij[-1]) deltas = np.tile(np.hstack([oij[-2], np.ones([1,1])]), [2,1]) * np.tile(d_j_fin.transpose(), [1,4]) eta = 0.001 # These are the new weights for last layer nn[-1] - eta * deltas d_j_fin.shape, nn[-1].shape np.matmul(d_j_fin, nn[-1]) * np.hstack([oij[-2], np.ones([1,1])]) * (1.0 - np.hstack([oij[-2], np.ones([1,1])]))
numpy-neural-networks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Materialien zu <i>zufall</i> # # Autor: <NAME> - <EMAIL> # # ## Aufgaben 18 - Korrelation und Regression # <br> # <i>Die Aufgabe wurde entnommen aus<br> # <br> # <NAME><br> # Mathematik<br> # Stochastik<br> # <NAME> Verlag 1993<br> # S. 23 Aufgabe 12</i><br><br> # In einem Handelsunternehmen werden in einem bestimmten Zeitraum die Angebots-<br> # preise und die Absatzmengen für eine bestimmte Ware aufgezeichnet # <div style='font-family:roman; font-size:13px'> # <table align='left', style=> # <col width='200'> # <col width='40'> # <col width='40'> # <col width='40'> # <col width='40'> # <col width='40'> # <col width='40'> # <col width='40'> # <col width='40'> # <col width='40'> # <col width='40'> # <col width='40'> # <col width='40'> # <row align='right'> # <tr> # <td>Angebotspreis in DM/kg &nbsp;&nbsp;($x$)</th> # <td>2.49</th> # <td>2.51</th> # <td>2.53</th> # <td>2.57</th> # <td>2.58</th> # <td>2.62</th> # <td>2.67</th> # <td>2.68</th> # <td>2.77</th> # <td>2.78</th> # <td>2.84</th> # <td>2.85</th> # </tr> # <tr> # <td>Absatzmenge in kg &nbsp;&nbsp;($y$)</th> # <td>670</th> # <td>620</th> # <td>680</th> # <td>630</th> # <td>680</th> # <td>520</th> # <td>560</th> # <td>620</th> # <td>530</th> # <td>580</th> # <td>540</th> # <td>560</th> # </tr # </table> # </div> # <br><br><br><br><br> # a) Berechnen Sie den Korrelationskoeffizienten # <br><br> # b) Stellen Sie den Zusammenhang zwischen Angebotspreis und Absatzmenge durch <br> # &nbsp;&nbsp;&nbsp;&nbsp;eine lineare Funktion $y = a_0 + a_1 x$ dar # <br> # c) Zeichnen Sie das Streudiagramm und die Regressionsgerade # <br> # %run zufall/start x = DR( 2.49, 2.51, 2.53, 2.57, 2.58, 2.62, 2.67, 2.68, 2.77, 2.78, 2.84, 2.85 ) y = DR( 670, 620, 680, 630, 680, 520, 560, 620, 530, 580, 540, 560 ) x.korr_koeff(y) # Ergebnis zu a) x.streu_diagr(y, g=ja) # Ergebnis zu b) und c)
zufall/mat/aufgaben18.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## ADS4 - Champion/Challenger demo # ## Credit risk assesment with Fannie Mae dataset # + #create Spark context and import libraries from pyspark import SparkContext sc = SparkContext.getOrCreate() import pyspark.sql.functions as F from pyspark.sql.types import StructType, StructField, StringType, IntegerType, DoubleType, DateType, TimestampType from pyspark.sql import SparkSession import pandas as pd sqlContext = SparkSession.builder.getOrCreate() import matplotlib.pyplot as plt # %matplotlib inline # + import numpy as np from jpmml_sparkml import toPMMLBytes from pyspark.ml.feature import SQLTransformer from pyspark.ml import Pipeline from pyspark.ml.classification import RandomForestClassifier, LogisticRegression from pyspark.ml.feature import IndexToString, StringIndexer, VectorIndexer from pyspark.ml.evaluation import BinaryClassificationEvaluator, MulticlassClassificationEvaluator from pyspark.ml.tuning import CrossValidator, ParamGridBuilder import re from pyspark.ml.feature import VectorAssembler from pyspark.ml.feature import Normalizer, MinMaxScaler import time import locale locale.setlocale(locale.LC_ALL, 'en_US') # - # ## Load Fannie Mae locations and data structures # + #datasourcel = 's3a://10x-ai-dev/data/fannieMae/' datasource="file:///Users/roxana.danger/Documents/OneDrive - 10x Banking/AI/datasets/fannie_mae" #define the data schema for the datafiles acquisition_schema = StructType([ StructField("loan_identifier", StringType(), False), StructField("channel", StringType(), True), StructField("seller_name", StringType(), True), StructField("original_interest_rate", DoubleType(), True), StructField("original_unpaid_principal_balance", DoubleType(), True), StructField("original_loan_term", IntegerType(), True), StructField("origination_date", DateType(), True), StructField("first_payment_date", DateType(), True), StructField("original_ltv", DoubleType(), True), StructField("original_cltv", DoubleType(), True), StructField("number_of_borrowers", IntegerType(), True), StructField("dti", DoubleType(), True), StructField("borrower_credit_score", IntegerType(), True), StructField("first_time_homebuyer", StringType(), True), StructField("loan_purpose", StringType(), True), StructField("property_type", StringType(), True), StructField("number_of_units", StringType(), True), StructField("occupancy_status", StringType(), True), StructField("property_state", StringType(), True), StructField("zip_code", StringType(), True), StructField("mortgage_insurance_pct", DoubleType(), True), StructField("product_type", StringType(), True), StructField("coborrower_credit_score", IntegerType(), True) ]) performance_schema = StructType([ StructField("loan_identifier", StringType(), False), StructField("monthly_reporting_period", StringType(), True), StructField("servicer_name", StringType(), True), StructField("current_interest_rate", DoubleType(), True), StructField("current_actual_unpaid_principal_balance", DoubleType(), True), StructField("loan_age", IntegerType(), True), StructField("remaining_months_to_legal_maturity", IntegerType(), True), StructField("adjusted_remaining_months_to_matruity", IntegerType(), True), StructField("maturity_date", StringType(), True), StructField("metropolitan_statistical_area", StringType(), True), StructField("current_loan_delinquency_status", StringType(), True), StructField("modification_flag", StringType(), True), StructField("zero_balance_code", StringType(), True), StructField("zero_balance_effective_date", StringType(), True), StructField("last_paid_installment_date", StringType(), True), StructField("foreclosure_date", StringType(), True), StructField("disposition_date", StringType(), True), StructField("foreclosure_costs", DoubleType(), True), StructField("property_preservation_and_repair_costs", DoubleType(), True), StructField("asset_recovery_costs", DoubleType(), True), StructField("miscellaneous_holding_expenses_and_credits", DoubleType(), True), StructField("associated_taxes_for_holding_property", DoubleType(), True), StructField("net_sale_proceeds", DoubleType(), True), StructField("credit_enhancement_proceeds", DoubleType(), True), StructField("repurchase_make_whole_proceeds", DoubleType(), True), StructField("other_foreclosure_proceeds", DoubleType(), True), StructField("non_interest_bearing_upb", DoubleType(), True), StructField("principal_forgiveness_upb", DoubleType(), True) ]) # - # ## Step 1: Read data from S3 (views of aggregated data from Redshift and DynamoDB) # + #Read files into pyspark sql dataframes datafile1= datasource + "/data/*/Acquisition_2000*.txt" datafile2= datasource + "/data/*/Performance_2000*.txt" starttime = time.time() acqu=sqlContext.read.format('com.databricks.spark.csv')\ .options(delimiter="|")\ .option('dateFormat', 'MM/yyyy') \ .schema(acquisition_schema) \ .load(datafile1) perf=sqlContext.read.format('com.databricks.spark.csv')\ .options(delimiter="|")\ .option('dateFormat', 'MM/yyyy') \ .schema(performance_schema) \ .load(datafile2) a = acqu.count() p = perf.count() print("number of borrowers: "+locale.format("%d", a, grouping=True)) print("number of payments: "+locale.format("%d", p, grouping=True)) print "Data loaded in " + str(time.time()-starttime) + " seconds." # - # ## Step 2: Generate dataset for modeling # ## 2.1 Update delinquency status per client from raw data # + #create a default flag in the performance data (3-months or more passed due = 1) new_column= F.when((F.col("current_loan_delinquency_status")=="0") | (F.col("current_loan_delinquency_status")=="1") | (F.col("current_loan_delinquency_status")=="2") |(F.col("current_loan_delinquency_status")=="X"), 0).otherwise(1) perf_new = perf.withColumn("default",new_column) #select columns we need to do the aggregation df=perf_new.select("loan_identifier","default") #we have multiple default flags for each loan identifier so we now consolidate this by taking the "max" df_new = df.groupby(df.loan_identifier).agg(F.max('default')).collect() #read the data back into an RDD data = sc.parallelize(df_new) df1 = sqlContext.createDataFrame(data, ('loan_identifier', 'default')) #merge the default data with the acquisition data joined_df = df1.join(acqu, df1.loan_identifier==acqu.loan_identifier) print 'Acquisition and performance datasets.... merged.' # - # ## 2.2. Obtaining a balanced dataset # + #spilt the data into zeros and ones so we can take a sub-samples of the zeros zeros = joined_df.where((F.col('default')==0)) ones= joined_df.where((F.col('default')==1)) default_count = ones.count() #create random sub sample zeros_sample = zeros.rdd.takeSample(False, default_count, seed=0) #read the data back into RDD format so they can be merged ones_sample_rdd = ones.rdd zeros_sample_rdd = sc.parallelize(zeros_sample) #merge ones and zeros into a single RDD sample_rdd = zeros_sample_rdd.union(ones_sample_rdd) sample_sdf = sqlContext.createDataFrame(sample_rdd) sample_sdf = sample_sdf.withColumn('label', sample_sdf['default'].cast('double')).drop('default') sample_df = sample_sdf.toPandas() sample_df.head() print 'Balanced dataset.... created.' # - # ## 2.3 Save Training and Test dataset and create Spark's dataframes # + readdata = True if readdata: train_df = pd.read_csv('credit_risk_fannieMae_train.csv') test_df = pd.read_csv('credit_risk_fannieMae_test.csv') else: sample_df.to_csv('credit_risk_fannieMae.csv') msk = np.random.rand(len(sample_df)) < 0.7 train_df = sample_df[msk] test_df = sample_df[~msk] train_df.to_csv('credit_risk_fannieMae_train.csv') test_df.to_csv('credit_risk_fannieMae_test.csv') trainingData_sdf = sqlContext.createDataFrame(train_df) testData_sdf = sqlContext.createDataFrame(test_df) print 'Training and test datasets... saved/loaded.' # - # ## Step 3. Modelling and testing champion and challenger models # ## 3.1 Defining quality measures computing # + from pyspark.mllib.evaluation import BinaryClassificationMetrics from pyspark.mllib.evaluation import MulticlassMetrics def getLabels(labelsAndPredictionsPairs): return labelsAndPredictionsPairs.map(lambda lp: lp['label']).distinct().collect() def computeQuality(testData_labelAndPrediction_sdf, labels=[],\ qmetrics=['auc', 'accuracy', 'precision', 'recall', 'f1']): labelsAndPredictions = testData_labelAndPrediction_sdf.select(['label', 'prediction']).rdd measureResults = {} metrics_binary = BinaryClassificationMetrics(labelsAndPredictions) metrics_multiclass = MulticlassMetrics(labelsAndPredictions) if 'auc' in qmetrics: measureResults['auc'] = metrics_binary.areaUnderROC if 'accuracy' in qmetrics: measureResults['accuracy'] = metrics_multiclass.accuracy if 'precision' in qmetrics: measureResults['precision'] = metrics_multiclass.precision() if 'recall' in qmetrics: measureResults['recall'] = metrics_multiclass.recall() if 'f1' in qmetrics: measureResults['f1'] = metrics_multiclass.fMeasure() predictedLabels=getLabels(labelsAndPredictions) print 'labels:', predictedLabels for label in labels: if 'precision' in qmetrics: try: measureResults['precision_'+str(label)] = metrics_multiclass.precision(label) except: None if 'recall' in qmetrics: try: measureResults['recall_'+str(label)] = metrics_multiclass.recall(label) except: None if 'f1' in qmetrics: try: measureResults['f1_'+str(label)] = metrics_multiclass.fMeasure(label) except: None return measureResults # - # ## 3.2 Logistic regression modelling and testing - Champion model # ### 3.2.1 Logistic regression modelling # + starttime = time.time() assembler_oupb = VectorAssembler(inputCols=["original_unpaid_principal_balance"], outputCol="features_oupb") normalizer_oupb = MinMaxScaler(inputCol='features_oupb', outputCol='scaled_original_unpaid_principal_balance') assembler_oir = VectorAssembler(inputCols=["original_interest_rate"], outputCol="features_oir") normalizer_oir = MinMaxScaler(inputCol='features_oir', outputCol='scaled_original_interest_rate') assembler_ltv = VectorAssembler(inputCols=["original_ltv"], outputCol="features_ltv") normalizer_ltv = MinMaxScaler(inputCol='features_ltv', outputCol='scaled_original_ltv') assembler_ocltv = VectorAssembler(inputCols=["original_ltv"], outputCol="features_ocltv") normalizer_ocltv = MinMaxScaler(inputCol='features_ocltv', outputCol='scaled_original_cltv') assembler_dti = VectorAssembler(inputCols=["dti"], outputCol="features_dti") normalizer_dti = MinMaxScaler(inputCol='features_dti', outputCol='scaled_dti') assembler_bcs = VectorAssembler(inputCols=["borrower_credit_score"], outputCol="features_bcs") normalizer_bcs = MinMaxScaler(inputCol='features_bcs', outputCol='scaled_borrower_credit_score') assembler_mipct = VectorAssembler(inputCols=["mortgage_insurance_pct"], outputCol="features_mipct") normalizer_mipct = MinMaxScaler(inputCol='features_mipct', outputCol='scaled_mortgage_insurance_pct') assembler_nob = VectorAssembler(inputCols=["number_of_borrowers"], outputCol="features_nob") normalizer_nob = MinMaxScaler(inputCol='features_nob', outputCol='scaled_number_of_borrowers') assembler_ccs = VectorAssembler(inputCols=["coborrower_credit_score"], outputCol="features_ccs") normalizer_ccs = MinMaxScaler(inputCol='features_ccs', outputCol='scaled_coborrower_credit_score') assembler_lr = VectorAssembler( inputCols=["scaled_original_unpaid_principal_balance",\ "scaled_original_interest_rate"],\ #"scaled_original_ltv",\ #"scaled_original_cltv",\ #"scaled_dti",\ #"scaled_borrower_credit_score"],\ #"scaled_mortgage_insurance_pct"], \ #"scaled_number_of_borrowers", \ #"scaled_coborrower_credit_score"], outputCol="features") lr = LogisticRegression(labelCol="label", featuresCol="features", maxIter=20, regParam=0.001)#, weightCol="weight") #normalizer2 = new Normalizer(p = Double.PositiveInfinity) #pipeline_lr = Pipeline(stages=[assembler, normalizer, lr]) #pipeline_lr = Pipeline(stages=[normalizer_oupb, normalizer_oir, normalizer_oltv, normalizer_ocltv,\ # normalizer_dti, normalizer_bcs, normalizer_mipct, normalizer_nob,\ # normalizer_ccs, assembler_lr, lr]) pipeline_lr = Pipeline(stages=[assembler_oupb, normalizer_oupb,\ assembler_oir, normalizer_oir,\ #assembler_ltv, normalizer_ltv,\ #assembler_ocltv, normalizer_ocltv,\ #assembler_dti, normalizer_dti,\ #assembler_bcs, normalizer_bcs,\ #assembler_mipct, normalizer_mipct,\ #assembler_nob, normalizer_nob,\ #assembler_ccs, normalizer_ccs,\ assembler_lr, lr]) model_lr = pipeline_lr.fit(trainingData_sdf) print 'Logistic regression model... generated in ' + str(time.time()-starttime) + ' seconds.' # - # ### 3.2.2 Review Logistic regression model's performance # + def maxProb(x): return float(max(x)) def predLabel1(x): return float(x[1]) maxProbUDF = F.udf(maxProb, DoubleType()) predLabel1UDF = F.udf(predLabel1, DoubleType()) def pairLabelPredictions(model, testingdata_sdf, qmetrics=['auc', 'accuracy', 'precision', 'recall', 'f1'], label=None): predictions_sdf = model.transform(testingdata_sdf) #predictions_sdf.show(10, False) testData_labelAndPrediction_sdf = testingdata_sdf.select(['Unnamed: 0', 'label'])\ .join(predictions_sdf.select(['Unnamed: 0', 'prediction', 'probability']), \ on='Unnamed: 0')\ .withColumn('targetProb', predLabel1UDF(F.col('probability')))\ .select(['label', 'prediction', 'targetProb']) #print testData_labelAndPrediction_sdf testData_labelAndPrediction_sdf.show()#10, False) return testData_labelAndPrediction_sdf labels_predictions_lr = pairLabelPredictions(model_lr, testData_sdf) results_lr = computeQuality(labels_predictions_lr, [0.0, 1.0]) print results_lr # + correctPredictionUDF = F.udf(lambda arr: int(arr[0]==arr[1]), IntegerType()) def plotStabilityDuringPredictions(labels_predictions, legsymb, title): labels_predictions_withCorrPred = labels_predictions.withColumn( 'correctPred', correctPredictionUDF(F.array('label', 'prediction'))) y = np.array(labels_predictions_withCorrPred.select('correctPred').collect()) y1 = y.cumsum() x = np.arange(1, len(y)+1).astype(float) y1 = y1/x x=x/len(y) plt.plot(x, y1, legsymb + '--',label=title) plt.plot(x, x, 'b-', label="random model") plt.xlabel("percent total") plt.ylabel("accuracy") legend = plt.legend(loc='lower right', shadow=True) plt.show() return x, y1 x, y1 = plotStabilityDuringPredictions(labels_predictions_lr, 'r', 'Logistic regression') # - # ## 3.3 Random forest modelling and testing - Challenger model # ### 3.3.1 Random forest modelling starttime = time.clock() assembler = VectorAssembler( inputCols=["original_interest_rate","original_unpaid_principal_balance",\ "original_ltv","number_of_borrowers","dti","borrower_credit_score"], outputCol="features") rf = RandomForestClassifier(labelCol="label", featuresCol="features", numTrees=10) pipeline_rf = Pipeline(stages=[assembler, rf]) model_rf = pipeline_rf.fit(trainingData_sdf) print 'Random forest model... generated in ' + str(time.clock()-starttime) + ' seconds.' # ### 3.3.2 Review Random forest model's performance labels_predictions_rf = pairLabelPredictions(model_rf, testData_sdf) results_rf = computeQuality(labels_predictions_rf, [0.0, 1.0]) print results_rf x, y2 = plotStabilityDuringPredictions(labels_predictions_rf, 'g', 'Random forest') # ### 3.4 Comparing Champion-Challenger models, stability of predictions # + # red dashes, blue squares and green triangles plt.plot(x, y1, 'r--',label="Logistic regresion") plt.plot(x, y2, 'g--',label="Random forest") plt.plot(x, x,'b-',label="random model") plt.xlabel("percent total") plt.ylabel("accuracy") legend = plt.legend(loc='lower right', shadow=True) plt.show() # + ### Comparing Champion-Challenger models, roc curve from sklearn.metrics import roc_curve ytrue = labels_predictions_rf.rdd.map(lambda r: r.label).collect() ypred_rf = labels_predictions_rf.rdd.map(lambda r: r.targetProb).collect() ypred_lr = labels_predictions_lr.rdd.map(lambda r: r.targetProb).collect() fpr_rf, tpr_rf, thr_rf = roc_curve(ytrue, ypred_rf) fpr_lr, tpr_lr, thr_lr = roc_curve(ytrue, ypred_lr) #print 'RF: ', fpr_rf[:10], tpr_rf[:10], thr_rf[:10] #print 'LR: ', fpr_lr[:10], tpr_lr[:10], thr_lr[:10] plt.plot(fpr_lr, tpr_lr, 'r--',label="Logistic regresion") plt.plot(fpr_rf, tpr_rf, 'g--',label="Random forest") #plt.plot(x, x,'b-',label="random model") plt.xlabel("fpr") plt.ylabel("tpr") legend = plt.legend(loc='lower right', shadow=True) plt.show() #fpr, tpr, thresholds = roc_curve(list(labels_predictions_lr['label']), list(labels_predictions_lr['prediction'])) # - # ## Step 4: Deployment execution: credit risk real-time decisioning # + import time ''' loan_identifier, loan_identifier.1, channel, seller_name, original_interest_rate, original_unpaid_principal_balance, original_loan_term, origination_date, first_payment_date, original_ltv, original_cltv, number_of_borrowers, dti, borrower_credit_score, first_time_homebuyer, loan_purpose, property_type, number_of_units, occupancy_status, property_state, zip_code, mortgage_insurance_pct, product_type, coborrower_credit_score ''' def real_time_decision(model, modelname, interest_rate,loan_size,original_unpaid_principal_balance,loan_to_value,number_of_borrowers,debt_to_income,\ borrower_credit_score): data = [[0, interest_rate,loan_size,original_unpaid_principal_balance,loan_to_value,number_of_borrowers,debt_to_income,borrower_credit_score]] df = sqlContext.createDataFrame(data, ('original_interest_rate', 'original_loan_term', 'original_unpaid_principal_balance', 'original_ltv',\ 'number_of_borrowers', 'dti', 'borrower_credit_score')) start1 = time.time() default=model.transform(df) if default.select(['prediction']).collect()[0][0]==0: answer="Loan Approved by " + modelname else: answer="Loan Declined by " + modelname end1 = time.time() start2 = time.time() #pd = model_r.predict([interest_rate,loan_size,loan_to_value,number_of_borrowers,debt_to_income,borrower_credit_score])*default_count*2/a #pd_format = '{percent:.2%}'.format(percent=pd) end2 = time.time() elapsed_time1 = round(end1-start1,3) elapsed_time2 = round(end2-start2,3) return answer + " in " + str(elapsed_time1) +" seconds" # + " ;probability of default = " + pd_format + " calculated in "+ str(elapsed_time2) +" seconds" #debt_to_income = 10 #borrower_credit_score = 800 #original_unpaid_principal_balance = 300 #interest_rate =8.5 #loan_size = 100000 #loan_to_value = 75 #number_of_borrowers = 2 debt_to_income = 42.0 borrower_credit_score = 702.0 original_unpaid_principal_balance = 360 interest_rate =8.5 loan_size = 92000.0 loan_to_value = 64.0 number_of_borrowers = 2 print(real_time_decision(model_rf, 'regression', interest_rate,original_unpaid_principal_balance,loan_size,loan_to_value,number_of_borrowers,debt_to_income,borrower_credit_score)) print(real_time_decision(model_lr, 'random forest', interest_rate,original_unpaid_principal_balance,loan_size,loan_to_value,number_of_borrowers,debt_to_income,borrower_credit_score)) # - # ## Step 5: Monitoring daily and cummulative model performance # ### Simulation of daily and cummulative quality measures computing # + schema = StructType([ StructField("label", DoubleType(), True), StructField("prediction", DoubleType(), True), StructField("_id", IntegerType(), True) ]) labelAndPredictions_sdf_rf = (pairLabelPredictions(model_rf, testData_sdf).rdd .zipWithIndex() .map(lambda row: (row[0][0], row[0][1], row[1])))\ .toDF(schema) labelAndPredictions_sdf_lr = (pairLabelPredictions(model_lr, testData_sdf).rdd .zipWithIndex() .map(lambda row: (row[0][0], row[0][1], row[1])))\ .toDF(schema) # - # ## Quality measures storage in Redshift # + from sqlalchemy import create_engine engine = create_engine('redshift+psycopg2://root:p455w0rdRS@tenx-vmdb-tableau.coovt4f86klw.eu-west-1.redshift.amazonaws.com:5439/fanniemae') insert_stmt_placehold = ''' INSERT INTO @TABLE(modelID,qmDate,accuracy,precision, recall,F1,AUC) values (@MODEL_ID, '@dateT00:00:00'::timestamp, @Accuracy, @P, @R, @F1, @AUC)''' def insertInRedshift(table, modelid, date, acc, p, r, f1, auc): insert_stmt = insert_stmt_placehold.replace('@TABLE', table)\ .replace('@MODEL_ID', str(round(modelid, 5)))\ .replace('@date', date)\ .replace('@Accuracy', str(round(acc, 5)))\ .replace('@P', str(round(p, 5)))\ .replace('@R', str(round(r, 5)))\ .replace('@F1', str(round(f1, 5)))\ .replace('@AUC', str(round(auc, 5))) print '\t', insert_stmt #engine.execute(insert_stmt) #commented to not change the available demo data available in Redshift # - # ## Compute daily and cummulative quality measures and save to Redshift # + import datetime import random #print set(labelAndPredictions_sdf_rf.select(['label']).collect()) #ids = labelAndPredictions_sdf_rf.select(['_id']).collect() #print len(set(ids)), min(ids), max(ids), labelAndPredictions_sdf_rf.count() #labelAndPredictions_sdf_rf = labelAndPredictions_sdf_rf.withColumn("id1", range(0, ndata)) #labelAndPredictions_sdf_rf.take(4) #print labelAndPredictions_sdf_rf.take(5) #print labelAndPredictions_sdf_lr.take(5) ndata = testData_sdf.count() indexStartDate = 0 date = datetime.date(2017, 1, 1) while indexStartDate < ndata and indexStartDate<500: npredictionscurrDate = random.randint(100, 200) indexEndDate = indexStartDate + npredictionscurrDate #print npredictionscurrDate, indexEndDate cumlabelAndPredictions_sdf_rf = labelAndPredictions_sdf_rf.filter(labelAndPredictions_sdf_rf._id < indexEndDate).select(['label', 'prediction']) cumlabelAndPredictions_sdf_lr = labelAndPredictions_sdf_lr.filter(labelAndPredictions_sdf_lr._id < indexEndDate).select(['label', 'prediction']) currlabelAndPredictions_sdf_rf = labelAndPredictions_sdf_rf.where((labelAndPredictions_sdf_rf["_id"] >= indexStartDate) & (labelAndPredictions_sdf_rf["_id"] < indexEndDate)).select(['label', 'prediction']) currlabelAndPredictions_sdf_lr = labelAndPredictions_sdf_lr.where((labelAndPredictions_sdf_lr["_id"] >= indexStartDate) & (labelAndPredictions_sdf_lr["_id"] < indexEndDate)).select(['label', 'prediction']) cum_rf = computeQuality(cumlabelAndPredictions_sdf_rf)#cumTestData_sdf1) cum_lr = computeQuality(cumlabelAndPredictions_sdf_lr)#cumTestData_sdf1) curr_rf = computeQuality(currlabelAndPredictions_sdf_rf)#currDayTestData_sdf1) curr_lr = computeQuality(currlabelAndPredictions_sdf_lr)#currDayTestData_sdf1) date_str = date.strftime('%m/%d/%Y') insertInRedshift('quality_measure_acc', 1, date_str, cum_rf['accuracy'], cum_rf['precision'], cum_rf['recall'], cum_rf['f1'], cum_rf['auc']) insertInRedshift('quality_measure_acc', 2, date_str, cum_lr['accuracy'], cum_lr['precision'], cum_lr['recall'], cum_lr['f1'], cum_lr['auc']) insertInRedshift('quality_measure_daily', 1, date_str, curr_rf['accuracy'], curr_rf['precision'], curr_rf['recall'], cum_rf['f1'], cum_rf['auc']) insertInRedshift('quality_measure_daily', 2, date_str, curr_lr['accuracy'], curr_lr['precision'], curr_lr['recall'], curr_lr['f1'], curr_lr['auc']) date = date + datetime.timedelta(days=1) indexStartDate = indexEndDate # -
FannieMae_credit_ risk_Champion-Challenger.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .java // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Java // language: java // name: java // --- // ## This demonstrates Tribuo classfication for comparison with scikit-learn classfication // %jars ../../jars/tribuo-classification-experiments-4.1.0-jar-with-dependencies.jar // %jars ../../jars/tribuo-classification-liblinear-4.1.0-jar-with-dependencies.jar // %jars ../../jars/tribuo-json-4.1.0-jar-with-dependencies.jar import java.nio.file.Paths; import java.nio.file.Files; import java.util.logging.Level; import java.util.logging.Logger; import org.tribuo.*; import org.tribuo.evaluation.TrainTestSplitter; import org.tribuo.data.csv.CSVLoader; import org.tribuo.math.optimisers.*; import org.tribuo.classification.*; import org.tribuo.classification.evaluation.*; import org.tribuo.classification.sgd.linear.LogisticRegressionTrainer; import org.tribuo.classification.sgd.linear.LinearSGDTrainer; import org.tribuo.classification.liblinear.LibLinearClassificationTrainer; import org.tribuo.classification.sgd.objectives.Hinge; import org.tribuo.classification.dtree.CARTClassificationTrainer; import org.tribuo.classification.xgboost.XGBoostClassificationTrainer; import org.tribuo.Trainer; import org.tribuo.util.Util; var labelFactory = new LabelFactory(); var csvLoader = new CSVLoader<>(labelFactory); var rainHeaders = new String[]{"Month", "MinTemp", "MaxTemp", "Rainfall", "WindGustSpeed", "WindSpeed9am", "WindSpeed3pm", "Humidity9am", "Humidity3pm", "Pressure9am", "Pressure3pm", "Temp9am", "Temp3pm", "RainToday", "WindGustDir_E", "WindGustDir_ENE", "WindGustDir_ESE", "WindGustDir_N", "WindGustDir_NE", "WindGustDir_NNE", "WindGustDir_NNW", "WindGustDir_NW", "WindGustDir_S", "WindGustDir_SE", "WindGustDir_SSE", "WindGustDir_SSW", "WindGustDir_SW", "WindGustDir_W", "WindGustDir_WNW", "WindGustDir_WSW", "WindDir9am_E", "WindDir9am_ENE", "WindDir9am_ESE", "WindDir9am_N", "WindDir9am_NE", "WindDir9am_NNE", "WindDir9am_NNW", "WindDir9am_NW", "WindDir9am_S", "WindDir9am_SE", "WindDir9am_SSE", "WindDir9am_SSW", "WindDir9am_SW", "WindDir9am_W", "WindDir9am_WNW", "WindDir9am_WSW", "WindDir3pm_E", "WindDir3pm_ENE", "WindDir3pm_ESE", "WindDir3pm_N", "WindDir3pm_NE", "WindDir3pm_NNE", "WindDir3pm_NNW", "WindDir3pm_NW", "WindDir3pm_S", "WindDir3pm_SE", "WindDir3pm_SSE", "WindDir3pm_SSW", "WindDir3pm_SW", "WindDir3pm_W", "WindDir3pm_WNW", "WindDir3pm_WSW", "RainTomorrowN"}; // This dataset is prepared in the notebook: scikit-learn Classifier - Data Cleanup var weatherSource = csvLoader.loadDataSource(Paths.get("../../data/cleanedWeatherAUS.csv"),"RainTomorrowN",rainHeaders); var weatherSplitter = new TrainTestSplitter<>(weatherSource,0.8,1L); var trainingDataset = new MutableDataset<>(weatherSplitter.getTrain()); var testingDataset = new MutableDataset<>(weatherSplitter.getTest()); System.out.println(String.format("Training data size = %d, number of features = %d, number of classes = %d",trainingDataset.size(),trainingDataset.getFeatureMap().size(),trainingDataset.getOutputInfo().size())); System.out.println(String.format("Testing data size = %d, number of features = %d, number of classes = %d",testingDataset.size(),testingDataset.getFeatureMap().size(),testingDataset.getOutputInfo().size())); // Note: the types including generics were tricky to get working public Model train(String name, Trainer trainer, Dataset<Label> trainData) { // Train the model var startTime = System.currentTimeMillis(); var model = trainer.train(trainData); var endTime = System.currentTimeMillis(); System.out.println("Training " + name + " took " + Util.formatDuration(startTime,endTime)); // Evaluate the model on the training data // var eval = new LabelEvaluator(); // var evaluation = eval.evaluate(model,trainData); // Don't report training scores // System.out.println(evaluation.toString()); //System.out.println(evaluation.getConfusionMatrix().toString()); return model; } public void evaluate(Model model, Dataset<Label> testData) { // Evaluate the model on the test data var eval = new LabelEvaluator(); var evaluation = eval.evaluate(model,testData); System.out.println(evaluation.toString()); //System.out.println(evaluation.getConfusionMatrix().toString()); } // + var lrsgd = new LinearSGDTrainer( new Hinge(), new AdaGrad(0.1, 0.1), // SGD.getLinearDecaySGD(0.01), 5, Trainer.DEFAULT_SEED ); var lr = new LibLinearClassificationTrainer(); var cart = new CARTClassificationTrainer(); var xgb = new XGBoostClassificationTrainer(100); // - System.out.println(lrsgd.toString()); System.out.println(lr.toString()); System.out.println(cart.toString()); System.out.println(xgb.toString()); // Turn off that SGD logging - it effects performance var logger = Logger.getLogger(org.tribuo.common.sgd.AbstractSGDTrainer.class.getName()); logger.setLevel(Level.OFF); // + var lrsgdModel = train("Linear Regression (SGD)", lrsgd, trainingDataset); // run 1 // time 2.22s // run 2 // time 1.56s // run 3 // time 1.54s // + evaluate(lrsgdModel,testingDataset); // run 1 // Class recall prec f1 // No 0.959 0.854 0.903 // Yes 0.426 0.748 0.543 // run 2 // Class recall prec f1 // No 0.959 0.854 0.903 // Yes 0.426 0.748 0.543 // run 3 // Class recall prec f1 // No 0.959 0.854 0.903 // Yes 0.426 0.748 0.543 // - // + var lrModel = train("Linear Regression", lr, trainingDataset); // run 1 // time 8.21 s // run 2 // time 7.21 s // run 3 // time 6.50 s // + evaluate(lrModel,testingDataset); // run 1 // Class recall prec f1 // No 0.955 0.858 0.904 // Yes 0.449 0.740 0.559 // run 2 // Class recall prec f1 // No 0.955 0.858 0.904 // Yes 0.449 0.740 0.559 // run 3 // Class recall prec f1 // No 0.955 0.858 0.904 // Yes 0.449 0.740 0.559 // - // + var cartModel = train("Decision Tree", cart, trainingDataset); // run 1 // time 5.43 s // run 2 // time 5.19 s // run 3 // time 4.24 s // + evaluate(cartModel,testingDataset); // run 1 // Class recall prec f1 // No 0.896 0.861 0.878 // Yes 0.495 0.576 0.532 // run 2 // Class recall prec f1 // No 0.896 0.861 0.878 // Yes 0.495 0.576 0.532 // run 3 // Class recall prec f1 // No 0.896 0.861 0.878 // Yes 0.495 0.576 0.532 // - // + var xgbModel = train("XGBoost", xgb, trainingDataset); // run 1 // time 1min 25s // run 2 // time 1min 18s // run 3 // time 1min 21s // + evaluate(xgbModel,testingDataset); // run 1 // Class recall prec f1 // No 0.948 0.876 0.910 // Yes 0.531 0.745 0.620 // run 2 // Class recall prec f1 // No 0.948 0.876 0.910 // Yes 0.531 0.745 0.620 // run 3 // Class recall prec f1 // No 0.948 0.876 0.910 // Yes 0.531 0.745 0.620 // -
notebooks/classifier/Tribuo Classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Notebook for investigating JONSWAP waves. <br/> # References: <br/> # DNVGL-RP-F109 On-bottom stability design of submarine pipelines, edition May 2017. <br/> # DNVGL-RP-C205 Environmental conditions and environmental loads, edition August 2017. <br/> # <NAME>.and <NAME>., 1986, A direct method of calculating bottom orbital velocity under waves, Technical Report No. SR76, Hydraulics Research Wallingford. http://eprints.hrwallingford.co.uk/761/ # %matplotlib inline import numpy as np import scipy.optimize import scipy.integrate import matplotlib.pyplot as plt # $S_{\eta\eta} = \alpha \cdot g^2 \cdot \omega^{-5} \cdot # \exp^{-\frac{5}{4}\left(\frac{\omega}{\omega_p}\right)^{-4}} \cdot # \gamma^{\exp\left(-0.5 \left(\frac{\omega-\omega_p}{\sigma \cdot \omega_p} \right)^2 \right)}$ # &nbsp; &nbsp; &nbsp; JONSWAP spectrum «S_etaeta» # # $\alpha = \frac{5}{16} \cdot \frac{H_s^2 \cdot \omega_p^4}{g^2} \cdot # \left(1 - 0.287 \cdot \ln{\gamma} \right)$ # &nbsp; &nbsp; &nbsp; Phillip's constant «phillips» # # $\sigma = \begin{cases} # 0.07 \quad if \, \omega < \omega_p\\ # 0.09 \quad else\\ # \end{cases} $ # &nbsp; &nbsp; &nbsp; Spectral width parameter «sigma» # # $\gamma = \begin{cases} # 5.0 \hspace{9em} \phi \leq 3.6\\ # \exp \left(5.75-1.15 \phi \right) \qquad 3.6 < \phi < 5.0\\ # 1.0 \hspace{9em} \phi \geq 5.0\\ # \end{cases} $ # &nbsp; &nbsp; &nbsp; Peak-enhancement factor «gamma» # # $\phi = \frac{T_p}{\sqrt{H_s}}$ &nbsp; &nbsp; &nbsp; «phi» # # Ref: DNVGL-RP-F109, sec. 3.4.3, p. 16, eqns. 3.4, 3.5, 3.6, 3.7 def JONSWAP(omega, H_s, T_p, grav_acc=9.81, gamma=None): omega_p = 2*np.pi/T_p sigma = np.full(omega.shape, 0.09) sigma[omega<=omega_p] = 0.07 phi = T_p / np.sqrt(H_s) if not gamma: if phi<=3.6: gamma = 5 elif 3.6<phi<5: gamma = np.exp(5.75-1.15*phi) else: gamma = 1 phillips = ( 5/16 * H_s**2 * omega_p**4 / grav_acc**2 * (1-0.287*np.log(gamma)) ) S_etaeta = ( phillips * grav_acc**2 * omega**-5 * np.exp(-5/4 * (omega/omega_p)**-4 ) * gamma**np.exp(-0.5*((omega-omega_p)/(sigma*omega_p))**2 ) ) return S_etaeta, gamma omega = np.linspace(0.01, 3.0, 300) S_etaeta1, gamma = JONSWAP(omega, H_s=4, T_p=8, gamma=1) S_etaeta2, gamma = JONSWAP(omega, H_s=4, T_p=8, gamma=2) S_etaeta3, gamma = JONSWAP(omega, H_s=4, T_p=8, gamma=5) plt.title("JONSWAP spectra ($H_s=4m$, $T_p=8s$) \n example from DNVGL-RP-C205, sec. 3.5.5.2, fig. 3-9\n") plt.plot(omega, S_etaeta1, '-r', label='$\gamma=1$') plt.plot(omega, S_etaeta2, '-g', label='$\gamma=2$') plt.plot(omega, S_etaeta3, '-b', label='$\gamma=5$') plt.xlim([0,3]) plt.ylim([0,6]) plt.xlabel("$\omega$", size="x-large") plt.ylabel("$S_{\eta\eta}(\omega)$", size="x-large") plt.grid(True) plt.legend(loc='best') # $T = \left[ \frac{g}{2\pi\lambda} \cdot \tanh \left( \frac{2\pi d}{\lambda} \right) \right]^{-\frac{1}{2}}$ # &nbsp; &nbsp; &nbsp; $\lambda$ wave length «wavelen», &nbsp; &nbsp; &nbsp; T wave period # # $k = \frac{2 \pi}{\lambda}$ &nbsp; &nbsp;wave number, # &nbsp; &nbsp; &nbsp; # $\omega = \frac{2 \pi}{T}$ &nbsp; &nbsp; angular frequency «omega» # # Ref: DNVGL-RP-C205, sec. 3.2.2.3, p. 46 def find_wavelen_func(_lambda, T, depth, grav_acc=9.81): return T**2 - 2*np.pi*_lambda/grav_acc / np.tanh(2*np.pi*depth/_lambda) def calc_wave_length(T, depth, grav_acc=9.81): if isinstance(T, np.ndarray): wavelen = np.zeros_like(T) for ii, _T in enumerate(T): try: wavelen[ii] = scipy.optimize.bisect(find_wavelen_func, 0.1, 1000, args=(_T, depth)) except ValueError as err: wavelen[ii] = np.NaN else: wavelen = scipy.optimize.bisect(find_wavelen_func, 0.1, 1000, args=(T, depth)) return wavelen T=10; depth=2000 wavelen = calc_wave_length(T, depth) print(f"wave length={wavelen:.3f}m (T={T}s, depth={depth}m)") T=10; depth=1 wavelen = calc_wave_length(T, depth) print(f"wave length={wavelen:.3f}m (T={T}s, depth={depth}m)") T=15; depth=90 wavelen = calc_wave_length(T, depth) print(f"wave length={wavelen:.3f}m (T={T}s, depth={depth}m)") # $S_{UU}(\omega) = G^2(\omega) \cdot S_{\eta\eta}(\omega)$ # # $G(\omega) = \frac{\omega}{\sinh(k \cdot d)}$ # # Ref: DNVGL-RP-F109, sec. 3.4.3, p. 17, eqns. 3.8, 3.9 def JONSWAP_depth_transform(omega, S_etaeta, depth, D=0, e=0, grav_acc=9.81): T = 2*np.pi/omega wavelen = calc_wave_length(T, depth) k = 2*np.pi/wavelen G = omega*np.cosh(k*(D+e))/np.sinh(k*depth) S_uu = G**2 * S_etaeta return S_uu, G omega = np.linspace(0.01, 3.0, 300) T_p=15; depth=90; H_s=10 omega = np.linspace(0.1, 3.0, 100) S_etaeta, gamma = JONSWAP(omega, H_s=4, T_p=8, gamma=1) S_uu, G = JONSWAP_depth_transform(omega, S_etaeta, D=0, depth=depth) plt.title(f"JONSWAP spectra - surface and at depth \n($H_s={H_s}m$, $T_p={T_p}s$, $depth={depth}m$, $\gamma={gamma}$)") plt.plot(omega, S_etaeta, '-r', label='$S_{\eta\eta}$') plt.plot(omega, G, '--g', label='$G$') plt.plot(omega, S_uu, '-b', label='$S_{UU}$') plt.xlim([0,3]) plt.ylim([0,2]) plt.xlabel("$\omega$", size="x-large") plt.ylabel("$S(\omega)$", size="x-large") plt.grid(True) plt.legend(loc='best') T_p=15; depth=90; H_s=10 omega = np.linspace(0.1, 3.0, 100) S_etaeta, gamma = JONSWAP(omega, H_s=H_s, T_p=T_p, gamma=1) M_0 = 1/16 * H_s**2 # DNVGL-RP-C205, sec. 3.5.5.3, p. 66 U_s = 2 * np.sqrt(M_0) print(f"approx. M_0 = {M_0}, U_s={U_s}") M_0 = scipy.integrate.trapz(S_etaeta, omega) print(f"M_0 = {M_0}") M_2 = 1/16 * H_s**2 * (2*np.pi/T_p)**2 * (11+gamma)/(5+gamma) # DNVGL-RP-C205, sec. 3.5.5.3, p. 66 print(f"approx. M_2 = {M_2}") M_2 = scipy.integrate.trapz(S_etaeta*omega**2, omega) print(f"M_2 = {M_2}") U_s = 2 * np.sqrt(M_0) print(f"U_s = {U_s}") T_u = 2 * np.pi * np.sqrt(M_0/M_2) print(f"T_u = {T_u}") S_uu, G = JONSWAP_depth_transform(omega, S_etaeta, D=0, depth=depth) S_uu = np.nan_to_num(S_uu) #print(S_uu) M_0 = scipy.integrate.trapz(S_uu, omega) print(f"M_0 = {M_0}, depth={depth}") M_2 = scipy.integrate.trapz(S_uu*omega**2, omega) print(f"M_2 = {M_2}, depth={depth}") U_s = 2 * np.sqrt(M_0) print(f"U_s = {U_s}") T_u = 2 * np.pi * np.sqrt(M_0/M_2) print(f"T_u = {T_u}") # example DNV-RP-F105 course p. 67 T_p=15; depth=90; H_s=10 T_n = np.sqrt(depth/9.81) print(f"T_n = {T_n}") T_n_T_p = T_n/T_p print(f"T_n/T_p = {T_n_T_p}") U_s = 0.2*H_s/T_n print(f"U_s = {U_s}") # 1986 Soulsby and Smallman p. 17 T_z=10; depth=50; H_s=5 T_p = 1.281 *T_z # eq. 19, p. 13 (see also Table 1 in appendix) omega = np.linspace(0.1, 3.0, 100) S_etaeta, gamma = JONSWAP(omega, H_s=H_s, T_p=T_p, gamma=1) M_0 = 1/16 * H_s**2 # DNVGL-RP-C205, sec. 3.5.5.3, p. 66 U_s = 2 * np.sqrt(M_0) print(f"approx. M_0 = {M_0}, U_s={U_s}") M_0 = scipy.integrate.trapz(S_etaeta, omega) print(f"M_0 = {M_0}") M_2 = 1/16 * H_s**2 * (2*np.pi/T_p)**2 * (11+gamma)/(5+gamma) # DNVGL-RP-C205, sec. 3.5.5.3, p. 66 print(f"approx. M_2 = {M_2}") M_2 = scipy.integrate.trapz(S_etaeta*omega**2, omega) print(f"M_2 = {M_2}") U_s = 2 * np.sqrt(M_0) print(f"U_s = {U_s}") T_u = 2 * np.pi * np.sqrt(M_0/M_2) print(f"T_u = {T_u}") S_uu, G = JONSWAP_depth_transform(omega, S_etaeta, D=0, depth=depth) S_uu = np.nan_to_num(S_uu) #print(S_uu) M_0 = scipy.integrate.trapz(S_uu, omega) print(f"M_0 = {M_0}, depth={depth}") M_2 = scipy.integrate.trapz(S_uu*omega**2, omega) print(f"M_2 = {M_2}, depth={depth}") T_n = np.sqrt(depth/9.81) print(f"T_n = {T_n}, depth={depth}") U_s = 2 * np.sqrt(M_0) print(f"U_s = {U_s}") T_u = 2 * np.pi * np.sqrt(M_0/M_2) print(f"T_u = {T_u}") def calc_Us_Tu_at_depth(depth, H_s, T_p, gamma=1, grav_acc=9.81): omega = np.linspace(0.1, 5.0, 600) S_etaeta, gamma = JONSWAP(omega, H_s=H_s, T_p=T_p, gamma=gamma) S_uu, G = JONSWAP_depth_transform(omega, S_etaeta, D=0, depth=depth) S_uu = np.nan_to_num(S_uu) M_0 = scipy.integrate.trapz(S_uu, omega) M_2 = scipy.integrate.trapz(S_uu*omega**2, omega) U_s = 2 * np.sqrt(M_0) T_u = 2 * np.pi * np.sqrt(M_0/M_2) T_n = np.sqrt(depth/grav_acc) return U_s, T_u, T_n # Soulsby & Smallman (1986), p. 17 H_s = 5 T_z = 8 T_p = 1.281*T_z gamma=1 depth=10 U_s, T_u, T_n = calc_Us_Tu_at_depth(depth=depth, H_s=H_s, T_p=T_p, gamma=gamma) Tn_Tz = T_n/T_z print(f"U_s={U_s:.3f}, T_u={T_u:.3f}, T_n={T_n:.3f}, T_n/T_z={Tn_Tz:.3f}, depth={depth}") U_m = np.sqrt(2) * U_s print(f"U_m={U_m}") depth=50 U_s, T_u, T_n = calc_Us_Tu_at_depth(depth=depth, H_s=H_s, T_p=T_p, gamma=gamma) Tn_Tz = T_n/T_z print(f"U_s={U_s:.3f}, T_u={T_u:.3f}, T_n={T_n:.3f}, T_n/T_z={Tn_Tz:.3f}, depth={depth}") # results not consistent (M_2 not correct??, see below) # 2016 Thesis SPECIFIC SOFTWARE TOOL DEVELOPMENT FOR RIGID PIPELINE DESIGN, <NAME> # https://fenix.tecnico.ulisboa.pt/downloadFile/1689244997255722/dissertacao.pdf # sec. 5.2.4, p. 83; M0=0.5184 m /Hz; M2=0.0043 m /Hz gamma = 1 # page 72 H_s = 2.88 T_p = 15.5 #U_s, T_u, T_n = calc_Us_Tu_at_depth(depth=depth, H_s=H_s, T_p=T_p, gamma=gamma) #print(f"U_s={U_s:.3f}, T_u={T_u:.3f}, T_n={T_n:.3f}, T_n/T_z={Tn_Tz:.3f}, depth={depth}") depth = 0 omega = np.linspace(0.01, 10.0, 1000) S_etaeta, gamma = JONSWAP(omega, H_s=H_s, T_p=T_p, gamma=gamma) M_0 = scipy.integrate.trapz(S_etaeta, omega) print(f"M_0 = {M_0}, depth={depth} (expected 0.5184) ") M_2 = scipy.integrate.trapz(S_etaeta*omega**2, omega) print(f"M_2 = {M_2}, depth={depth} (expected 0.0043)") U_s = 2 * np.sqrt(M_0) print(f"U_s = {U_s}") T_u = 2 * np.pi * np.sqrt(M_0/M_2) print(f"T_u = {T_u}") depth = 937 # minimum water depth, page 69 S_uu, G = JONSWAP_depth_transform(omega, S_etaeta, D=0, depth=depth) S_uu = np.nan_to_num(S_uu) #print(S_uu) M_0 = scipy.integrate.trapz(S_uu, omega) print(f"M_0 = {M_0}, depth={depth}") M_2 = scipy.integrate.trapz(S_uu*omega**2, omega) print(f"M_2 = {M_2}, depth={depth}") U_s = 2 * np.sqrt(M_0) print(f"U_s = {U_s}") T_u = 2 * np.pi * np.sqrt(M_0/M_2) print(f"T_u = {T_u}") #GE p 35 - these results are good (slight differences in T_u)! depth=260 H_s = 11 # 1 year T_p = 14.6 gamma = 3.3 U_s, T_u, T_n = calc_Us_Tu_at_depth(depth=depth, H_s=H_s, T_p=T_p, gamma=gamma) print(f"1-year U_s={U_s:.3f}, T_u={T_u:.3f}, T_n={T_n:.3f}, depth={depth}") H_s = 13.691 # 10 year T_p = 15.565 U_s, T_u, T_n = calc_Us_Tu_at_depth(depth=depth, H_s=H_s, T_p=T_p, gamma=gamma) print(f"10-year U_s={U_s:.3f}, T_u={T_u:.3f}, T_n={T_n:.3f}, depth={depth}") H_s = 16.3 # 100 year T_p = 16.5 U_s, T_u, T_n = calc_Us_Tu_at_depth(depth=depth, H_s=H_s, T_p=T_p, gamma=gamma) print(f"100-year U_s={U_s:.3f}, T_u={T_u:.3f}, T_n={T_n:.3f}, depth={depth}") H_s = 18.844 # 1000 year T_p = 17.412 U_s, T_u, T_n = calc_Us_Tu_at_depth(depth=depth, H_s=H_s, T_p=T_p, gamma=gamma) print(f"1000-year U_s={U_s:.3f}, T_u={T_u:.3f}, T_n={T_n:.3f}, depth={depth}")
examples/metocean/JONSWAP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # [optcutfreq](https://pypi.org/project/optcutfreq) # # A Python module for automatic search of optimal filter cutoff frequency based on residual analysis # # [https://pypi.org/project/optcutfreq](https://pypi.org/project/optcutfreq) # ## Determination of the optimal cutoff frequency for a low-pass filter # # A common problem in signal processing is to automatically determine the optimal cutoff frequency that should be employed in a low-pass filter to attenuate as much as possible the noise without compromising the signal content of the data. # # Unfortunately, there is no definite solution for this problem, but there are some techniques, with different degrees of success, to try to determine the optimal cutoff frequency. # # <NAME>, in his classic book *Biomechanics and motor control of human movement*, proposed a method to find the optimal cutoff frequency based on residual analysis of the difference between filtered and unfiltered signals over a range of cutoff frequencies. The optimal cutoff frequency is the one where the residual starts to change very little because it is considered that from this point, it's being filtered mostly noise and minimally signal, ideally. This concept is straightforward to implement. # ### The function `optcutfreq.py` # # The function `optcutfreq.py` of the Python module `optcutfreq` contains an implementation of this method and it is divided in three parts (after the help section): first, the residuals over a range of cutoff frequencies are calculated; second, an algorithm tries to find the noisy region (with a supposed linear behavior in the frequency domain) of the residuals versus cutoff frequencies plot and finds the optimal cutoff frequency; and third, the results are plotted. The code is lengthy relatively to the simplicity of the idea because of the long help section, the implementation of the automatic search and a rich plot. Here is the function signature: # # ```python # fc_opt = optcutfreq(y, freq=1, fclim=[], show=False, ax=None): # ``` # ## Installation # # ```bash # pip install optcutfreq # ``` # # Or # # ```bash # conda install -c duartexyz optcutfreq # ``` # ### Test with benchmark data # Let's test this function with benchmark data. # # In 1977, Pezzack, Norman and Winter published a paper where they investigated the effects of differentiation and filtering processes on experimental data (the angle of a bar manipulated in space). Since then, these data have became a benchmark to test new algorithms. Let's work with these data (available at [http://isbweb.org/data/pezzack/index.html](http://isbweb.org/data/pezzack/index.html)). The data have the angular displacement measured by video and the angular acceleration directly measured by an accelerometer, which we will consider as the true acceleration. # Part of these data are showing next: import numpy as np # %matplotlib inline import matplotlib.pyplot as plt # load data file time, disp, disp2, aacc = np.loadtxt('./pezzack.txt', skiprows=6, unpack=True) dt = np.mean(np.diff(time)) # plot data fig, (ax1,ax2) = plt.subplots(1, 2, sharex = True, figsize=(11, 4)) plt.suptitle("Pezzack's benchmark data", fontsize=20) ax1.plot(time, disp, 'b') ax1.set_xlabel('Time [s]'); ax1.set_ylabel('Angular displacement [rad]') ax2.plot(time, aacc, 'g') ax2.set_xlabel('Time [s]'); ax2.set_ylabel('Angular acceleration [rad/s$^2$]') plt.subplots_adjust(wspace=0.3) # And using the residual analsysis code: from optcutfreq import optcutfreq freq = np.mean(1/np.diff(time)) fc_opt = optcutfreq(disp, freq=freq, show=True) # The optimal cutoff frequency found is 5.6 Hz. Note that the filtering process is relevant only for the derivative of the data; we cannot distinguish the unfiltered and unfiltered displacements (see that the RMSE residual is very small). # Let's employ this filter, differentiate the data twice and compare with the true acceleration as we did before: from scipy.signal import butter, filtfilt # Butterworth filter # Correct the cutoff frequency for the number of passes in the filter C = 0.802 # for dual pass; C = (2**(1/npasses) - 1)**0.25 b, a = butter(2, (fc_opt/C)/(freq/2)) dispf = filtfilt(b, a, disp) aaccBW = np.diff(dispf, 2)*freq*freq # RMSE: rmseBW = np.sqrt(np.mean((aaccBW-aacc[1:-1])**2)) # plot data fig, ax1 = plt.subplots(1, 1, figsize=(11, 4)) plt.suptitle("Pezzack's benchmark data", fontsize=20) ax1.plot(time[1:-1], aacc[1:-1], 'g', label='Analog acceleration: (True value)') ax1.plot(time[1:-1], aaccBW, 'r', label='Butterworth %.3g Hz: RMSE = %0.2f' %(fc_opt,rmseBW)) ax1.set_xlabel('Time [s]'); ax1.set_ylabel('Angular acceleration [rad/s$^2$]'); plt.legend(frameon=False, fontsize=12, loc='upper left'); # The performance seems satisfactory (see [this Jupyter notebook](https://nbviewer.jupyter.org/github/BMClab/BMC/blob/master/notebooks/DataFiltering.ipynb) for a comparison using other filters), but it is known that this residual analysis algorithm results in oversmoothing the kinematic data (see [http://www.clinicalgaitanalysis.com/faq/cutoff.html](http://www.clinicalgaitanalysis.com/faq/cutoff.html)). # To read more about the determination of the optimal cutoff frequency, see the following papers: # # - Pezzack, Norman, & Winter (1977). [An assessment of derivative determining techniques used for motion analysis](http://www.health.uottawa.ca/biomech/courses/apa7305/JB-Pezzack-Norman-Winter-1977.pdf). Journal of Biomechanics, 10, 377-382. # - Giakas & Baltizopoulos (1997) [A comparison of automatic filtering techniques applied to biomechanical walking data](http://www.pe.uth.gr/sk_cms/scriptlib/getblob.php?redir=../sk_cms/images/notfound.htm&table=pepublications&field=doc&id=30). J. Biomech. 30, 847-850. # - Alonso, Salgado, Cuadrado & Pintado (2009) [Automatic smoothing of raw kinematic signals using SSA and cluster analysis](http://lim.ii.udc.es/docs/proceedings/2009_09_EUROMECH_Automatic.pdf). 7th EUROMECH Solid Mechanics Conference. # - Kristianslund, Krosshaug & Bogert (2012) [Effect of low pass filtering on joint moments from inverse dynamics: Implications for injury prevention](http://www.klokavskade.no/upload/Publication/Kristianslund_2012_J%20Biomechan_Effect%20of%20low-pass%20filtering%20on%20joint%20moments%20from%20inverse%20dynamics.pdf). J. Biomech. 45, 666-671. # ## References # # - <NAME>, <NAME>, & <NAME> (1977). [An assessment of derivative determining techniques used for motion analysis](http://www.health.uottawa.ca/biomech/courses/apa7305/JB-Pezzack-Norman-Winter-1977.pdf). Journal of Biomechanics, 10, 377-382. [PubMed](http://www.ncbi.nlm.nih.gov/pubmed/893476). # - <NAME> (2009) [Biomechanics and motor control of human movement](http://books.google.com.br/books?id=_bFHL08IWfwC&printsec=frontcover&source=gbs_ge_summary_r&cad=0#v=onepage&q&f=false). 4 ed. Hoboken, EUA: Wiley.
docs/optcutfreq.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import pyart import os import numpy as np from matplotlib import pyplot as plt from __future__ import print_function import datetime import matplotlib.dates as mdates from matplotlib.dates import MonthLocator, DayLocator, HourLocator, DateFormatter, drange from matplotlib.colors import LogNorm import netCDF4 from copy import deepcopy from mpl_toolkits.basemap import Basemap, pyproj import dateutil.parser # %matplotlib inline # + status_dir = '/Users/scollis/temp/status/' all_files = os.listdir(status_dir) all_files.sort() max_rf =[] mean_rf = [] good_files = [] datetimes = [] for filen in all_files: fh = open(status_dir+filen) line = fh.readline()[23::] fh.close() try: p1 = float(line.split(' ')[0]) p2 = float(line.split(' ')[1]) max_rf.append(p1) mean_rf.append(p2) good_files.append(filen) datetimes.append(datetime.datetime.strptime(filen[4:-11], '%Y%m%d_%H%M%S')) except: pass mean_rf = np.array(mean_rf) max_rf = np.array(max_rf) datetimes = np.array(datetimes) # - closest_ts = deepcopy(datetimes) for i in range(len(closest_ts)): if closest_ts[i].hour in np.arange(12) + 6: closest_ts[i] = datetime.datetime(closest_ts[i].year, closest_ts[i].month, closest_ts[i].day, 12) else: closest_ts[i] = datetime.datetime(closest_ts[i].year, closest_ts[i].month, closest_ts[i].day, 0) pattern = 'http://nomads.ncdc.noaa.gov/thredds/dodsC/narr-a/%Y%m/%Y%m%d/narr-a_221_%Y%m%d_%H00_000.grb' thredds_addy = [obj.strftime(pattern) for obj in closest_ts] dataset_test = netCDF4.Dataset(thredds_addy[10]) # + xg, yg = np.meshgrid(dataset_test.variables['x'][:]*1000.0, dataset_test.variables['y'][:]*1000.0) pnyc = pyproj.Proj( proj='lcc', datum='NAD83', lat_0=50.0, lat_1=50.0, lat_2=50.0, lon_0=-107.0) lons, lats = pnyc(xg , yg , inverse=True) lons[np.where(lons >= 0)] = lons[np.where(lons >= 0)] -360. pm_lat = 43.6667 pm_lon = -70.2667 cf = abs(lons - pm_lon) + abs(lats - pm_lat) loc = np.where(cf == cf.min()) # - height_resolved = [] for key in dataset_test.variables.keys(): if 29 in dataset_test.variables[key].shape and len(dataset_test.variables[key].shape) == 4: height_resolved.append(key) #height_resolved = ['Temperature', 'Temperature_height_above_ground'] #(u'time', u'height_above_ground2', u'y', u'x') sfc_only = [ 'Convective_available_potential_energy_surface'] hrs = dataset_test.variables[height_resolved[0]].shape sfcs = dataset_test.variables[sfc_only[0]].shape dataset_test.close() ta_array = np.array(thredds_addy) uta = np.unique(ta_array) # + data_dict = {} for key in height_resolved: data_dict.update({key : np.zeros([29, len(uta)])}) for key in sfc_only: data_dict.update({key : np.zeros([len(uta)])}) for i in range(len(uta)): dataset = netCDF4.Dataset(uta[i]) for key in height_resolved: #print(key, dataset.variables[key].shape) data_dict[key][:,i] = dataset.variables[key][0,:,loc[0][0], loc[1][0]] for key in sfc_only: #print(key, dataset.variables[key].shape) data_dict[key][i] = dataset.variables[key][0,loc[0][0], loc[1][0]] dataset.close() # - plt.pcolor(data_dict['Ice_mixing_ratio']) plt.plot(data_dict['Convective_available_potential_energy_surface']) print(len(uta)) print(data_dict.keys()) print(1)
notebooks/Reanalysis over Portland.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation # New figure with white background fig = plt.figure(figsize=(6,6), facecolor='white') # New axis over the whole figure, no frame and a 1:1 aspect ratio ax = fig.add_axes([0,0,1,1], frameon=False, aspect=1) # Number of ring n = 50 size_min = 50 size_max = 50*50 # Ring position P = np.random.uniform(0,1,(n,2)) # Ring colors C = np.ones((n,4)) * (0,0,0,1) # Alpha color channel goes from 0 (transparent) to 1 (opaque) C[:,3] = np.linspace(0,1,n) # Ring sizes S = np.linspace(size_min, size_max, n) # Scatter plot scat = ax.scatter(P[:,0], P[:,1], s=S, lw = 0.5, edgecolors = C, facecolors='None') # Ensure limits are [0,1] and remove ticks ax.set_xlim(0,1), ax.set_xticks([]) ax.set_ylim(0,1), ax.set_yticks([]) def update(frame): global P, C, S # Every ring is made more transparent C[:,3] = np.maximum(0, C[:,3] - 1.0/n) # Each ring is made larger S += (size_max - size_min) / n # Reset ring specific ring (relative to frame number) i = frame % 50 P[i] = np.random.uniform(0,1,2) S[i] = size_min C[i,3] = 1 # Update scatter object scat.set_edgecolors(C) scat.set_sizes(S) scat.set_offsets(P) # Return the modified object return scat, animation = FuncAnimation(fig, update, interval=10, blit=True, frames=200) # animation.save('rain.gif', writer='imagemagick', fps=30, dpi=40) plt.show() # + import numpy as np import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation fig, ax = plt.subplots() xdata, ydata = [], [] ln, = plt.plot([], [], 'ro', animated=True) def init(): ax.set_xlim(0, 2*np.pi) ax.set_ylim(-1, 1) return ln, def update(frame): xdata.append(frame) ydata.append(np.sin(frame)) ln.set_data(xdata, ydata) return ln, ani = FuncAnimation(fig, update, frames=np.linspace(0, 2*np.pi, 128), init_func=init, blit=True) plt.show()
science/notebook/matplot-animation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import math from pandas import * csvfile = read_csv('test_data.csv') data = list(csvfile['data1']) data2 = list(csvfile['data2']) dataSum = 0 for val in data: dataSum = dataSum + val std_dev_mean = dataSum/len(data) sumData = 0 for item in data2: sumData = sumData + item mean = sumData/len(data2) sumSquares = 0 for val in data: sumSquares = sumSquares + (val-std_dev_mean)*(val-std_dev_mean) avgDist = 0 for item in data2: avgDist += abs(item-mean) avgDist/=len(data2) pop_std_dev_mean = std_dev_mean+1 stdDev = sumSquares/len(data) stdDev = stdDev ** 0.5 minVal = 100 maxVal = 0 for item in data2: if item < minVal: minVal = item if item > maxVal: maxVal = item rangeVal = maxVal - minVal print("The Average Distance of this dataset is: ", avgDist/rangeVal) print("The Standard Deviation of this data is: ", stdDev)
src/jupyter_contrib_nbextensions/TestFiles/Control_Unordered Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/cira037/jupy-graph/blob/main/Smart_Home.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="WhwYRzQAMSFT" # INSTALACIJA # + id="J4ZvwWd9ISYG" colab={"base_uri": "https://localhost:8080/"} outputId="83e1faf0-4be3-4b49-b5d2-e18182ae3085" # %pip install git+https://github.com/jupyterhub/binderhub # !apt install libgraphviz-dev # !apt install pkg-config # !apt install python-dev # # !apt install d3-graphviz # # !apt install dash # + colab={"base_uri": "https://localhost:8080/"} id="Tk1rDKYp7pSs" outputId="cf6753a3-06c5-4967-8d90-1f7b37ad5691" # %pip install networkx pydot graphviz # %pip install pydotplus # %pip install pygraphviz # %pip install pydot # %pip install dash # %pip install dash-interactive-graphviz # + [markdown] id="SRf8DEQYaddV" # IMPORTOVANJE # + id="R2D91RaGIw3B" from pprint import pprint as pp import networkx as nx from networkx.drawing.nx_pydot import write_dot import graphviz as gv from graphviz import Digraph as dg import pydot as dot import matplotlib.pyplot as plt import dash_interactive_graphviz as ding # + [markdown] id="Cr1ZCV88UqE4" # IOT SPISAK # + id="0afVJqcpMNsa" # WiFi ruteri = ['r_telekom','r_stan','r_gara','r_cila'] habovi = ['h_stan','h_gara','h_cila'] antene = ['a_A','a_B'] ripiteri = ['ri_gara', 'ri_xiaomi'] # SVETLA sijalice = ['s_stan1', 's_stan2', 's_stan3', 's_gara', 's_cila'] lampe = ['l_stan'] # TAJMERI tajmeri = ['t_gara', 't_cila_TA', 't_cila'] utik = ['u_gara'] # KAMERE kamere = ['k_stan', 'k_gara', 'k_cila'] # PREKIDAČI prekid = ['pr_kocka', 'pr_stan', 'pr_gara'] # SENZORI meteo = ['m_stan', 'mo_stan', 'm_terasa', 'm_gara', 'm_cila1', 'm_cila2'] # 'm_okrugli' otvor = ['o_stan', 'o_gara', 'o_cila'] vibra = ['v_gara', 'v_cila'] pokret = ['p_gara'] dim = ['d_stan', 'd_cila'] voda = ['vo_stan'] senzori = (*meteo, *otvor, *vibra, *pokret, *dim, *voda) # PO LOKACIJI stan = ['r_telekom', 'r_stan', 'h_stan', 'a_A', 'ri_xiaomi', 's_stan1','s_stan2', 's_stan3', 'l_stan', 'k_stan', 'pr_kocka', 'pr_stan', 'm_stan', 'mo_stan', 'm_terasa', 'o_stan','d_stan', 'vo_stan'] gara = ['r_gara', 'h_gara' ,'a_B', 'ri_gara', 's_gara', 't_gara', 'u_gara', 'k_gara', 'pr_gara', 'm_gara', 'o_gara', 'v_gara', 'p_gara'] cila = ['r_cila', 'h_cila', 's_cila', 't_cila_TA', 't_cila', 'k_cila', 'm_cila1', 'm_cila2', 'o_cila', 'v_cila', 'd_cila'] # + [markdown] id="BYUyGl9pYWYC" # <NAME> # + id="v6_eBMhgJY1U" # GRAFOVI WIFI G_ruter = nx.MultiGraph() G_ruter.add_nodes_from(ruteri, shape="circle", size=10, weight=0.5, color="blue") G_hab = nx.MultiGraph() G_hab.add_nodes_from(habovi, shape="circle", size=10, weight=0.5, color="blue") G_ant = nx.MultiGraph() G_ant.add_nodes_from(antene, shape="circle", size=10, weight=0.5, color="blue") G_ripit = nx.MultiGraph() G_ripit.add_nodes_from(ripiteri, shape="circle", size=10, weight=0.5, color="blue") G_wifi =nx.compose_all([G_ruter, G_hab, G_ant, G_ripit]) # GRAFOVI SVETLA G_sijal = nx.MultiGraph() G_sijal.add_nodes_from(sijalice, shape="circle", size=10, weight=0.5, color="blue") G_lampe = nx.MultiGraph() G_lampe.add_nodes_from(lampe, shape="circle", size=10, weight=0.5, color="blue") G_svetlo =nx.compose_all([G_sijal, G_lampe]) # GRAFOVI TAJMERA G_tajmeri = nx.MultiGraph() G_tajmeri.add_nodes_from(tajmeri, shape="circle", size=10, weight=0.5, color="blue") G_utik = nx.MultiGraph() G_utik.add_nodes_from(utik, shape="circle", size=10, weight=0.5, color="blue") G_tajm = nx.compose_all([G_tajmeri, G_utik]) # GRAFOVI KAMERA G_kam = nx.MultiGraph() G_kam.add_nodes_from(kamere, shape="circle", size=10, weight=0.5, color="blue") # GRAFOVI PREKID G_prek = nx.MultiGraph() G_prek.add_nodes_from(prekid, shape="circle", size=10, weight=0.5, color="blue") # GRAFOVI SENZORA G_meteo = nx.MultiGraph() G_meteo.add_nodes_from(meteo, shape="circle", size=10, weight=0.5, color="blue") G_otvor = nx.MultiGraph() G_otvor.add_nodes_from(otvor, shape="triangle", size=12, weight=0.3, color="red") G_vibra = nx.MultiGraph() G_vibra.add_nodes_from(vibra, shape="circle", size=10, weight=0.4, color="green") G_pokret = nx.MultiGraph() G_pokret.add_nodes_from(pokret, shape="circle", size=14, weight=0.5, color="yellow") G_voda = nx.MultiGraph() G_voda.add_nodes_from(voda, shape="circle", size=16, weight=0.6, color="blue") G_senzori = nx.compose_all([G_meteo, G_otvor, G_vibra, G_pokret, G_voda]) # SMART HOME GRAF G_smart = nx.compose_all([G_wifi, G_svetlo, G_tajm, G_kam, G_prek, G_senzori]) # + [markdown] id="vjgC9_nQM48F" # SUBGRAFOVI # + id="M9MpfB47M-6E" colab={"base_uri": "https://localhost:8080/"} outputId="b833b2d1-c3e6-4f6e-b239-ce4f6298c339" # SUBGRAF STAN Sg_stan = G_smart.subgraph(stan) pp(list(Sg_stan.nodes(data=True))) # SUBGRAF GARAŽA Sg_gara = G_smart.subgraph(gara) # SUBGRAF CILA Sg_cila = G_smart.subgraph(cila) # + [markdown] id="84ScgpX5ecAS" # SPAJANJE NODOVA # + colab={"base_uri": "https://localhost:8080/"} id="Rbt64kLaelYK" outputId="f1ddaff5-6a2a-494b-847b-7f8c9cdd95ac" # ruteri G_smart.add_edge('r_telekom','r_stan', color="red", weight=0.8) G_smart.add_edge('r_stan','h_stan', color="red", weight=0.8) G_smart.add_edge('r_stan','a_A', color="red", weight=0.8) G_smart.add_edge('a_A','a_B', color="red", weight=0.8) G_smart.add_edge('a_B','r_gara', color="red", weight=0.8) G_smart.add_edge('r_gara','h_gara', color="red", weight=0.8) G_smart.add_edge('r_cila','h_cila', color="red", weight=0.8) # senzori u stanu G_smart.add_edge('h_stan','m_stan', color="blue", weight=0.6) G_smart.add_edge('h_stan','o_stan', color="red", weight=0.4) G_smart.add_edge('h_stan','m_terasa', color="green", weight=0.8) G_smart.add_edge('h_stan','vo_stan', color="black", weight=0.3) G_smart.add_edge('h_stan','d_stan', color="black", weight=0.3) G_smart.add_edge('h_stan','pr_kocka', color="red", weight=0.4) G_smart.add_edge('h_stan','pr_stan', color="red", weight=0.4) # ostalo stan G_smart.add_edge('h_stan','s_stan1', color="green", weight=0.8) G_smart.add_edge('h_stan','s_stan2', color="green", weight=0.8) G_smart.add_edge('h_stan','s_stan3', color="green", weight=0.8) G_smart.add_edge('r_stan','l_stan', color="green", weight=0.8) G_smart.add_edge('r_stan','k_stan', color="green", weight=0.8) # senzori u garaži G_smart.add_edge('h_gara','m_gara', color="blue", weight=0.6) G_smart.add_edge('h_gara','o_gara', color="red", weight=0.4) G_smart.add_edge('h_gara','v_gara', color="green", weight=0.8) G_smart.add_edge('h_gara','p_gara', color="green", weight=0.8) # ostalo garaža G_smart.add_edge('h_gara','pr_gara', color="blue", weight=0.6) G_smart.add_edge('r_gara','k_gara', color="red", weight=0.4) G_smart.add_edge('r_gara','s_gara', color="green", weight=0.8) G_smart.add_edge('r_gara','t_gara', color="green", weight=0.8) G_smart.add_edge('r_gara','u_gara', color="green", weight=0.8) # senzori kod Cile G_smart.add_edge('h_cila','m_cila1', color="blue", weight=0.6) G_smart.add_edge('h_cila','m_cila2', color="red", weight=0.4) G_smart.add_edge('h_cila','o_cila', color="blue", weight=0.6) G_smart.add_edge('h_cila','v_cila', color="red", weight=0.4) G_smart.add_edge('h_cila','d_cila', color="red", weight=0.4) # ostalo cila G_smart.add_edge('r_cila','s_cila', color="blue", weight=0.6) G_smart.add_edge('r_cila','k_cila', color="red", weight=0.4) G_smart.add_edge('r_cila','t_cila_TA', color="blue", weight=0.6) G_smart.add_edge('r_cila','t_cila', color="red", weight=0.4) # + [markdown] id="0FTBp_f8rcJF" # # # # # <NAME> # + colab={"base_uri": "https://localhost:8080/"} id="PjtR5ln2hOVB" outputId="8d54248f-392a-4fb5-cdfb-01bf3a82c5d4" pp(list(G_meteo.nodes(data=True))) print(len(G_meteo)) pp(list(G_otvor.nodes(data=True))) print(len(G_otvor)) pp(list(G_vibra.nodes(data=True))) print(len(G_vibra)) pp(list(G_senzori.nodes(data=True))) print(len(G_senzori)) # + [markdown] id="RV1w-fw1MOfm" # # + colab={"base_uri": "https://localhost:8080/"} id="Rwl7wKfB2URI" outputId="118045b7-1521-438d-f532-bf8fdc16dd01" senzori = [*meteo, *otvor, *vibra, *pokret, *voda] # pp(list(G_smart.nodes(data=True))) print(len(G_smart), ' nodova') # pp(list(G_smart.edges(data=True))) print(len(G_smart.edges()), ' veza') # + colab={"base_uri": "https://localhost:8080/", "height": 218} id="GmNgLxoIDsRK" outputId="537dc96b-5e33-41ec-c399-795786327b0f" #Crtanje sa mathplotlib # nx.draw(G_smart) pos = nx.kamada_kawai_layout(G_smart) nx.draw_networkx(G_smart, pos) plt.show() # pos1 = dot(G_smart) # pos1.show(block=False) # pp(dir(dot)) # pos = nx.nx_pydot.graphviz_layout(G_smart) # nx.draw_networkx(G_smart, pos) ## plt.savefig('networkx_graph.png') # + colab={"base_uri": "https://localhost:8080/", "height": 319} id="UMFuc_HN1B-L" outputId="cdcadf3b-a4a5-4f8f-f221-2444dce9accd" # Crtanje sa graphviz pos = nx.nx_agraph.graphviz_layout(G_smart) # pos = nx.nx_agraph.dot_layout(G_smart) nx.draw(G_smart, pos=pos) # write_dot(G_smart, 'file.dot') # + colab={"base_uri": "https://localhost:8080/", "height": 319} id="NBYvWk_O9qdj" outputId="bc9ea988-a23b-4840-ba8e-1e2d45da7a03" # import pydotplus as pdp # pp(list(dir(dot))) pos = nx.nx_pydot.pydot_layout(G_smart, prog="neato") nx.draw(G_smart, pos=pos) # write_dot(G_smart, 'file.dot') # + id="rjHpE9lRZjsB" colab={"base_uri": "https://localhost:8080/"} outputId="d8145fed-3cc7-4202-b765-653f05e64879" pp(list(dir(ding)))
Smart_Home.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="PA9WOB4zJfCu" colab_type="text" # # To install `QMCTorch` copy the code from one of the 3 text cells below in a code cell and run that cell. # + [markdown] id="6KLOhN_dGQ11" colab_type="text" # ## Install QMCTorch from Pypi Package manager # # ``` # # # ! pip install qmctorch # ``` # # # + [markdown] id="Eb9wI3eOGfz1" colab_type="text" # ## Install QMCTorch from GitHub # ``` # from google.colab import drive # drive.mount('/content/gdrive') # % cd gdrive/My Drive/ # # # ! git clone https://github.com/NLESC-JCER/QMCTorch # % cd QMCTorch # # # ! pip install -e . # % cd ../ # ``` # + [markdown] id="_VNw5sAeHC7M" colab_type="text" # ## Pull latest code from Github # ``` # from google.colab import drive # drive.mount('/content/gdrive') # % cd gdrive/My Drive/QMCTorch # # # ! git pull origin master # # # ! pip install -e . # % cd ../ # ``` # + id="khGd1-ewHZWF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 671} outputId="674fbe9e-7817-4e11-cc91-cd508bda8107" executionInfo={"status": "ok", "timestamp": 1588617088336, "user_tz": -120, "elapsed": 9770, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07120063468244602126"}} from google.colab import drive drive.mount('/content/gdrive') % cd gdrive/My Drive/QMCTorch # ! git pull origin master # ! pip install -e . % cd .. # + [markdown] id="MGNu_L-OJ-7u" colab_type="text" # # Using QMCTorch # + id="p7qEQTV2HB4h" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="52c2bbce-3775-442c-95de-d18bd432c2e3" executionInfo={"status": "ok", "timestamp": 1588617090211, "user_tz": -120, "elapsed": 11632, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07120063468244602126"}} from torch import optim from qmctorch.wavefunction import SlaterJastrow, Molecule from qmctorch.solver import SolverSlaterJastrow from qmctorch.sampler import Metropolis from qmctorch.utils import set_torch_double_precision from qmctorch.utils import plot_energy, plot_data # + id="x-s06JyaHUdN" colab_type="code" colab={} set_torch_double_precision() # + id="7HO4cNaAID-F" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="a1f652aa-9bcb-4e9b-c038-833b29da2eae" executionInfo={"status": "ok", "timestamp": 1588617090213, "user_tz": -120, "elapsed": 11608, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07120063468244602126"}} mol = Molecule(atom='H 0 0 0.69; H 0 0 -0.69', unit='bohr', \ calculator='pyscf', basis='sto-3g') # + id="hFZg-XanIOeY" colab_type="code" colab={} wf = SlaterJastrow(mol, configs='cas(2,2)', cuda=True) # + id="Mqlk1N3tIVXN" colab_type="code" colab={} sampler = Metropolis(nwalkers=2000, nstep=2000, step_size=0.2, \ ntherm=-1, ndecor=100, nelec=wf.nelec, \ init=mol.domain('atomic'), \ move={'type':'all-elec', 'proba':'normal'}, cuda=True) # + id="5-I7abLxI5qG" colab_type="code" colab={} lr_dict = [{'params': wf.jastrow.parameters(), 'lr': 3E-3}, {'params': wf.ao.parameters(), 'lr': 1E-6}, {'params': wf.mo.parameters(), 'lr': 1E-3}, {'params': wf.fc.parameters(), 'lr': 2E-3}] opt = optim.Adam(lr_dict, lr=1E-3) # + id="xgXSp8JwJIr9" colab_type="code" colab={} scheduler = optim.lr_scheduler.StepLR(opt, step_size=100, gamma=0.90) # + id="g6TE--nNJL1H" colab_type="code" colab={} solver = SolverSlaterJastrow(wf=wf, sampler=sampler, optimizer=opt, scheduler=scheduler) # + id="Y5MPLiv2JTCy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 87} outputId="ef067f86-11b3-48e0-9dac-dc8ff5784905" executionInfo={"status": "ok", "timestamp": 1588617105475, "user_tz": -120, "elapsed": 26805, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07120063468244602126"}} obs = solver.single_point() # + id="yfx4g1Luz9Z-" colab_type="code" colab={} solver.configure(task='wf_opt', freeze=['ao', 'mo']) solver.track_observable(['local_energy']) solver.configure_resampling(mode='update', resample_every=1, nstep_update=50) solver.ortho_mo = False # + id="VtrVSk620A1A" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="32ad9904-40e5-4efa-bcee-27145e04cfd4" executionInfo={"status": "ok", "timestamp": 1588617131289, "user_tz": -120, "elapsed": 52595, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07120063468244602126"}} obs = solver.run(50, batchsize=None, loss='energy', grad='manual', clip_loss=False)
notebooks/qmctorch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ARMA(p,q) and ARIMA(p,d,q) # # Autoregressive Moving Averages # # For non-stationary datasets with a trend component, ARIMA models apply a differencing coefficient as well. # # <div class="alert alert-info"><h3>Related Functions:</h3> # <tt><strong> # <a href='https://www.statsmodels.org/stable/generated/statsmodels.tsa.arima_model.ARMA.html'>arima_model.ARMA</a></strong><font color=black>(endog, order[, exog, …])</font>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Autoregressive Moving Average ARMA(p,q) model<br> # <strong> # <a href='https://www.statsmodels.org/stable/generated/statsmodels.tsa.arima_model.ARMAResults.html'>arima_model.ARMAResults</a></strong><font color=black>(model, params[, …])</font>&nbsp;&nbsp;&nbsp;Class to hold results from fitting an ARMA model<br> # <strong> # <a href='https://www.statsmodels.org/stable/generated/statsmodels.tsa.arima_model.ARIMA.html'>arima_model.ARIMA</a></strong><font color=black>(endog, order[, exog, …])</font>&nbsp;&nbsp;&nbsp;&nbsp;Autoregressive Integrated Moving Average ARIMA(p,d,q) model<br> # <strong> # <a href='https://www.statsmodels.org/stable/generated/statsmodels.tsa.arima_model.ARIMAResults.html'>arima_model.ARIMAResults</a></strong><font color=black>(model, params[, …])</font>&nbsp;&nbsp;Class to hold results from fitting an ARIMA model<br> # <strong> # <a href='https://www.statsmodels.org/stable/generated/statsmodels.tsa.kalmanf.kalmanfilter.KalmanFilter.html'>kalmanf.kalmanfilter.KalmanFilter</a></strong>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Kalman Filter code intended for use with the ARMA model</tt> # # <h3>For Further Reading:</h3> # <strong> # <a href='https://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model'>Wikipedia</a></strong>&nbsp;&nbsp;<font color=black>Autoregressive–moving-average model</font><br> # <strong> # <a href='https://otexts.com/fpp2/non-seasonal-arima.html'>Forecasting: Principles and Practice</a></strong>&nbsp;&nbsp;<font color=black>Non-seasonal ARIMA models</font></div> # ## Perform standard imports and load datasets # + import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline from matplotlib.pylab import rcParams rcParams['figure.figsize']=18,9 # Load specific forecasting tools from statsmodels.tsa.arima_model import ARMA,ARMAResults,ARIMA,ARIMAResults from statsmodels.tsa.seasonal import seasonal_decompose # for determining (p,q) orders from statsmodels.graphics.tsaplots import plot_acf,plot_pacf # for determining ARIMA orders from pmdarima import auto_arima # Ignore harmless warnings import warnings warnings.filterwarnings("ignore") # Load datasets df1 = pd.read_csv('../Data/DailyTotalFemaleBirths.csv',index_col='Date',parse_dates=True) df1.index.freq = 'D' df1 = df1[:120] # we only want the first four months df2 = pd.read_csv('../Data/TradeInventories.csv',index_col='Date',parse_dates=True) df2.index.freq='MS' # - # ## Automate the augmented Dickey-Fuller Test # Since we'll be using it a lot to determine if an incoming time series is stationary, let's write a function that performs the augmented Dickey-Fuller Test. # + from statsmodels.tsa.stattools import adfuller def adf_test(series,title=''): """ Pass in a time series and an optional title, returns an ADF report """ print(f'Augmented Dickey-Fuller Test: {title}\n') result = adfuller(series.dropna(),autolag='AIC') # .dropna() handles differenced data labels = ['ADF test statistic','p-value','# lags used','# observations'] out = pd.Series(result[0:4],index=labels) for key,val in result[4].items(): out[f'critical value ({key})']=val print(out.to_string()) # .to_string() removes the line "dtype: float64" if result[1] <= 0.05: print("\nStrong evidence against the null hypothesis") print("Reject the null hypothesis") print("Data has no unit root and is stationary") else: print("\nWeak evidence against the null hypothesis") print("Fail to reject the null hypothesis") print("Data has a unit root and is non-stationary") # - # ___ # # <h1><center>ARMA</center></h1> # Among ARMA and ARIMA, we will strat with <font color='red'> **ARMA** </font> # ## Autoregressive Moving Average - ARMA(p,q) # In this first section we'll look at a stationary dataset, determine (p,q) orders, and run a forecasting ARMA model fit to the data. In practice it's rare to find stationary data with no trend or seasonal component, but the first four months of the <em>Daily Total Female Births</em> dataset should work for our purposes. # ### Plot the source data df1['Births'].plot(figsize=(12,5)); # ### Run the augmented Dickey-Fuller Test to confirm stationarity adf_test(df1['Births'],"ADF For US Pop Dataset") # ### Determine the (p,q) ARMA Orders using <tt>pmdarima.auto_arima</tt> # This tool should give just $p$ and $q$ value recommendations for this dataset. auto_arima(df1['Births'],seasonal=False, trace=True).summary() # ### Split the data into train/test sets # As a general rule you should set the length of your test set equal to your intended forecast size. For this dataset we'll attempt a 1-month forecast. # Set one month for testing train = df1.iloc[:90] test = df1.iloc[90:] # ### Fit an ARMA(p,q) Model # If you want you can run <tt>help(ARMA)</tt> to learn what incoming arguments are available/expected, and what's being returned. model = ARMA(train['Births'],order=(2,2)) results = model.fit() results.summary() # ### Obtain a month's worth of predicted values start=len(train) end=len(train)+len(test)-1 predictions = results.predict(start=start, end=end).rename('ARMA(2,2) Predictions') # ### Plot predictions against known values # + title = 'Daily Total Female Births' ylabel='Births' xlabel='' # we don't really need a label here test['Births'].plot(legend=True,figsize=(18,9),title=title) # - # Since our starting dataset exhibited no trend or seasonal component, this prediction makes sense. In the next section we'll take additional steps to evaluate the performance of our predictions, and forecast into the future. print(test.mean()); print(predictions.mean()) # ___ # ___ # <h1><center>ARIMA</center></h1> # ___ # ___ # ## Autoregressive Integrated Moving Average - ARIMA(p,d,q) # ___ # # The steps are the same as for ARMA(p,q), except that we'll apply a differencing component to make the dataset stationary.<br> # First let's take a look at the <em>Real Manufacturing and Trade Inventories</em> dataset. # ### Plot the Source Data # + plt.ylabel("Years") plt.xlabel("Inventories") plt.title("Inventory Dataset") plt.plot(df2["Inventories"]) plt.show(); # - # ### Run an ETS Decomposition (optional) # We probably won't learn a lot from it, but it never hurts to run an ETS Decomposition plot. from statsmodels.tsa.seasonal import seasonal_decompose result = seasonal_decompose(df2['Inventories'], model='additive') # model='add' also works result.plot(); # ### Use <tt>pmdarima.auto_arima</tt> to determine ARIMA Orders stepwise_fit = auto_arima(df2['Inventories'], seasonal=False, trace=True) stepwise_fit.summary() # ### \[Optionally, checking out with\] # *Seasonal=<font color="Green">True</font>* stepwise_fit = auto_arima(df2['Inventories'], seasonal=False, trace=True) stepwise_fit.summary() # # Forecasting ARIMA # Looks good from here! Now let's train & test the ARIMA(1,1,1) model, evaluate it, then produce a forecast of future values. # ### Split the data into train/test sets print("#Months in df:",len(df2),"i.e.",len(df2)//12," years") # ## Training ARIMA Model # Since total years=22, we would take 21 years for training and 1 year for testing # Set one year for testing, rest of the years for training train, test = df2.iloc[:252], df2.iloc[252:]; print(len(train)); print(len(test)); # ### Train and Fit an ARIMA(1,1,1) Model model = ARIMA(train['Inventories'],order=(1,1,1)) results = model.fit() results.summary() # ## Making Predictions: # Obtain predicted values start_predections = len(train) end_predections = len(train)+len(test)-1 predictions = results.predict(start=start_predections, end=end_predections, dynamic=False, typ="levels").rename('ARIMA(1,1,1) Predictions') test["Inventories"].plot(figsize=(18,9),legend=True) predictions.plot(legend=True); # ### Its performing good for catching up with trends, but as we said Seasnal=False in Auto-Arima, its performing poorly on catching up with Seasonality # ### If Seasonal=True parameter was supplied, Auto-Arima would have suggested => SARIMAX(0, 1, 0) # ### Evaluate the Model # + from statsmodels.tools.eval_measures import rmse from sklearn.metrics import r2_score, mean_squared_error rmse_error = np.sqrt(mean_squared_error(test['Inventories'], predictions)) print(f'ARIMA(1,1,1) RMSE Error: {rmse_error}') print("Test's mean:",test["Inventories"].mean()) print("\nFor a mean value of 2MN, our RMSE is only in 7k, which is a good value\n") r2_result = r2_score(test['Inventories'], predictions) print(f"RSq: {r2_result} which is a quite high value, thus 'good-for-fit' score is a good fit") # - # # Forecast into future model = ARIMA(df2["Inventories"],order=(1,1,1)) results = model.fit() fcast = results.predict(start=len(df2),end=len(df2)+11, typ='levels').rename("ARIMA(1,1,1) Forecast") df2['Inventories'].plot(legend=True,figsize=(18,9)) fcast.plot(legend=True);
tsa/jose/08_General Forecasting Models/64_ARIMA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import copy def edges(flights): ret = {} for _, (a, b) in enumerate(flights): if a in ret: ret[a].add(b) else: ret[a] = {b} for k,v in ret.items(): ret[k] = set(sorted(v)) return ret def rest_edges(edges, edge): a, b = edge ret = copy.deepcopy(edges) ret[a].remove(b) return ret def is_empty(edges): for value in edges.values(): if len(value)>0: return False return True def dfs(edges, start): if is_empty(edges): return [start] if start not in edges: return None for end in edges[start]: rest = dfs(rest_edges(edges, (start, end)), end) if rest is not None: return [start] + rest return None # + e = edges([('SFO', 'HKO'), ('YYZ', 'SFO'), ('YUL', 'YYZ'), ('HKO', 'ORD')]) print(dfs(e, 'YUL')) # -
daily/20191023.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8 # language: python # name: python3.8 # --- # # Distributed TensorFlow # # description: train tensorflow CNN model on mnist data distributed via tensorflow # Train a distributed TensorFlow job using the `tf.distribute.Strategy` API on Azure ML. # # For more information on distributed training with TensorFlow, refer [here](https://www.tensorflow.org/guide/distributed_training). # + from azureml.core import Workspace ws = Workspace.from_config() ws # + import git from pathlib import Path # get root of git repo prefix = Path(git.Repo(".", search_parent_directories=True).working_tree_dir) # training script source_dir = prefix.joinpath( "code", "models", "tensorflow", "mnist-distributed" ) script_name = "train.py" # environment file environment_file = prefix.joinpath("environments", "tf-gpu.yml") # azure ml settings environment_name = "tf-gpu" experiment_name = "tf-mnist-distr-example" cluster_name = "gpu-K80-2" # + tags=[] print(open(source_dir.joinpath(script_name)).read()) # - # ## Create environment # + from azureml.core import Environment env = Environment.from_conda_specification(environment_name, environment_file) # specify a GPU base image env.docker.enabled = True env.docker.base_image = ( "mcr.microsoft.com/azureml/openmpi3.1.2-cuda10.1-cudnn7-ubuntu18.04" ) # - # ## Configure and run training job # # Create a `ScriptRunConfig` to specify the training script & arguments, environment, and cluster to run on. # # The training script in this example utilizes multi-worker distributed training of a Keras model using the `tf.distribute.Strategy` API, specifically `tf.distribute.experimental.MultiWorkerMirroredStrategy`. To run a multi-worker TensorFlow job on Azure ML, create a `TensorflowConfiguration`. Specify a `worker_count` corresponding to the number of nodes for your training job. # # In TensorFlow, the `TF_CONFIG` environment variable is required for training on multiple machines. Azure ML will configure and set the `TF_CONFIG` variable appropriately for each worker before executing your training script. You can access `TF_CONFIG` from your training script if you need to via `os.environ['TF_CONFIG']`. # + import os from azureml.core import ScriptRunConfig, Experiment from azureml.core.runconfig import TensorflowConfiguration cluster = ws.compute_targets[cluster_name] distr_config = TensorflowConfiguration( worker_count=2, parameter_server_count=0 ) model_path = os.path.join("./outputs", "keras-model") src = ScriptRunConfig( source_directory=source_dir, script=script_name, arguments=["--epochs", 30, "--model-dir", model_path], compute_target=cluster, environment=env, distributed_job_config=distr_config, ) run = Experiment(ws, experiment_name).submit(src) run # + from azureml.widgets import RunDetails RunDetails(run).show() # - run.wait_for_completion(show_output=True)
notebooks/tensorflow/train-mnist-distributed.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %load_ext Cython # + magic_args="--annotate" language="cython" # import networkx as nx # import time # G = nx.read_gpickle("test_graph") # # def pd_very_fast(G): # '''G es un Grafo''' # # source = 0 # weight = 'd' # target = G.order()-1 # # Tmin = nx.all_pairs_dijkstra_path_length(G,'t') # M = G.order() # N = M-2 # P = set(range(1,N/2+1)) # D = set(range(N/2+1,N+1)) # # caminos = [] # # S0 = [0 ,0, 0] #[primeros M es la ruta, costo, tiempo] # h = [S0] # estados = 0 # t = 0 # l = 1 # while h: # # ruta = h.pop() # v = ruta[-3] #nodo terminal de la ruta actual # vecinos = set(G[v].keys()) # pickup_posibles = [x for x in P.intersection(vecinos) if x not in ruta[0:-2]] # delivery_posibles = [x for x in D.intersection(vecinos) if (x not in ruta[0:-2] and x-N/2 in ruta[0:-2])] # edges = pickup_posibles + delivery_posibles # # # if delivery_posibles==[] and target in vecinos: # print ' agregando ultimo nodo' # edges.insert(0, target) # # print 'ruta: '+str(ruta) # # print 'vecinos: '+str(vecinos) # # print 'pickup_posible: ' +str(pickup_posibles) # # print 'delivery_posibles: '+str(delivery_posibles) # # print 'edges: '+str(edges) # # for w in edges: # time.sleep(0.2) # c_vw = G[v][w][weight] # t_vw = G[v][w]['t'] # cost = ruta[-2] + c_vw # t = ruta[-1] + t_vw # if (t <= G.node[w]['u']): # if (t <= G.node[w]['l']): # lo puedo agregar! # t = G.node[w]['l'] # rutaN = ruta[0:-2] # rutaN.append(w) # rutaN.append(cost) # rutaN.append(t) # if w == target: # print "ruta al target" # caminos.append(rutaN) # print rutaN # h.append(rutaN) # estados += 1 # # return caminos # # -
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="X0qI6s3pTQPW" outputId="94bac0a9-6350-41b5-e3d6-8f0ca3460927" from google.colab import drive drive.mount('/content/drive') # + id="xQgoOG3GUfbD" import json #changer chemin du fichier with open('/content/drive/My Drive/RAMI/terrasses(1).json') as json_file: data = json.load(json_file) # + colab={"base_uri": "https://localhost:8080/"} id="pq47c_AYG0mk" outputId="e74c736d-3e5f-4a86-8bfc-6460eb8b2727" print(data['terrasses']) # + colab={"base_uri": "https://localhost:8080/"} id="MN9z4AVEWWj-" outputId="45c5d5d9-2628-4687-e9aa-fc1a7f217696" #récupère les adresses liste_adr = [] for d in data['terrasses']: liste_adr.append(d['adresses']) print(liste_adr) # + id="HfgUsY3IFvxS" import json import urllib import urllib.request # + colab={"base_uri": "https://localhost:8080/"} id="PaTf2wo_qUVW" outputId="7e457b0a-c7a6-4ef5-c31a-f3d7572756d2" print(liste_adr[0].split(' ')) # + id="8-bV65pbHxel" colab={"base_uri": "https://localhost:8080/"} outputId="d95d9fab-fb7e-4a8c-f2b2-e4d8ad326fcc" #traduis l'adresse en coordonnées listeCoordinates = [] for adresses in liste_adr: if adresses != '': adr_array = adresses.split(" ") url = "https://api-adresse.data.gouv.fr/search/?q=" for i in range(len(adr_array)-1): ### partie de remplacement des caractères spéciaux, si y en a qui y sont pas les rajouter a = adr_array[i].replace("’","") b = a.replace("è","e") ### url += b+"+" url += adr_array[len(adr_array)-1] req = urllib.request.Request(url) response = urllib.request.urlopen(req) data = response.read() values = json.loads(data) listeCoordinates.append(values['features'][0]['geometry']['coordinates']) print(listeCoordinates) # + id="1ISbgKxGNuz-" #géocodage du json for i in range(len(data['terrasses'])): data['terrasses'][i]["locs:"]=listeCoordinates[i] # + colab={"base_uri": "https://localhost:8080/"} id="HvUe8gEwPdED" outputId="23a2ffe7-5191-4134-8da1-63f85367040f" for d in data['terrasses']: print(d) # + id="h-CewvUXP40N" import json #écriture dans le fichier with open('test.json', 'w') as f: json.dump(data, f, indent=4)
geoloc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''base'': conda)' # name: python3 # --- # ## Preprocessing # + # Import our dependencies from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import pandas as pd import tensorflow as tf # Import and read the charity_data.csv. import pandas as pd application_df = pd.read_csv("./Resources/charity_data.csv") application_df.head() # - # Drop the non-beneficial ID columns, 'EIN' and 'NAME'. application_df = application_df.drop(['EIN', 'NAME'], axis=1) # Determine the number of unique values in each column. application_df.value_counts() # + # Look at APPLICATION_TYPE value counts for binning # YOUR CODE GOES HERE # + # Choose a cutoff value and create a list of application types to be replaced # use the variable name `application_types_to_replace` # YOUR CODE GOES HERE # Replace in dataframe for app in application_types_to_replace: application_df['APPLICATION_TYPE'] = application_df['APPLICATION_TYPE'].replace(app,"Other") # Check to make sure binning was successful application_df['APPLICATION_TYPE'].value_counts() # + # Look at CLASSIFICATION value counts for binning # YOUR CODE GOES HERE # + # You may find it helpful to look at CLASSIFICATION value counts >1 # YOUR CODE GOES HERE # + # Choose a cutoff value and create a list of classifications to be replaced # use the variable name `classifications_to_replace` # YOUR CODE GOES HERE # Replace in dataframe for cls in classifications_to_replace: application_df['CLASSIFICATION'] = application_df['CLASSIFICATION'].replace(cls,"Other") # Check to make sure binning was successful application_df['CLASSIFICATION'].value_counts() # + # Convert categorical data to numeric with `pd.get_dummies` # YOUR CODE GOES HERE # + # Split our preprocessed data into our features and target arrays # YOUR CODE GOES HERE # Split the preprocessed data into a training and testing dataset # YOUR CODE GOES HERE # + # Create a StandardScaler instances scaler = StandardScaler() # Fit the StandardScaler X_scaler = scaler.fit(X_train) # Scale the data X_train_scaled = X_scaler.transform(X_train) X_test_scaled = X_scaler.transform(X_test) # - # ## Compile, Train and Evaluate the Model # + # Define the model - deep neural net, i.e., the number of input features and hidden nodes for each layer. # YOUR CODE GOES HERE nn = tf.keras.models.Sequential() # First hidden layer # YOUR CODE GOES HERE # Second hidden layer # YOUR CODE GOES HERE # Output layer # YOUR CODE GOES HERE # Check the structure of the model nn.summary() # + # Compile the model # YOUR CODE GOES HERE # + # Train the model # YOUR CODE GOES HERE # - # Evaluate the model using the test data model_loss, model_accuracy = nn.evaluate(X_test_scaled,y_test,verbose=2) print(f"Loss: {model_loss}, Accuracy: {model_accuracy}") # + # Export our model to HDF5 file # YOUR CODE GOES HERE
Starter_Code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: exatrkx-iml # language: python # name: exatrkx-iml # --- import os os.environ['TRKXINPUTDIR']="/global/cfs/cdirs/m3443/data/trackml-kaggle/train_10evts" os.environ['TRKXOUTPUTDIR']= "/global/cfs/projectdirs/m3443/usr/caditi97/iml2020/layerless_check/n1" # + import pkg_resources import yaml import pprint import random random.seed(1234) import numpy as np import pandas as pd import itertools import matplotlib.pyplot as plt import tqdm from os import listdir from os.path import isfile, join import matplotlib.cm as cm import sys # # %matplotlib widget sys.path.append('/global/homes/c/caditi97/exatrkx-iml2020/exatrkx/src/') # 3rd party import torch import torch.nn.functional as F from torch_geometric.data import Data from trackml.dataset import load_event from pytorch_lightning import Trainer from pytorch_lightning.callbacks import ModelCheckpoint # local import from exatrkx import config_dict # for accessing predefined configuration files from exatrkx import outdir_dict # for accessing predefined output directories from exatrkx.src import utils_dir from exatrkx.src import utils_robust from utils_robust import * # for preprocessing from exatrkx import FeatureStore from exatrkx.src import utils_torch # for embedding from exatrkx import LayerlessEmbedding from exatrkx.src import utils_torch from torch_cluster import radius_graph from utils_torch import build_edges from embedding.embedding_base import * # for filtering from exatrkx import VanillaFilter # for GNN import tensorflow as tf from graph_nets import utils_tf from exatrkx import SegmentClassifier import sonnet as snt # for labeling from exatrkx.scripts.tracks_from_gnn import prepare as prepare_labeling from exatrkx.scripts.tracks_from_gnn import clustering as dbscan_clustering # track efficiency from trackml.score import _analyze_tracks from exatrkx.scripts.eval_reco_trkx import make_cmp_plot, pt_configs, eta_configs from functools import partial # + # embed_ckpt_dir = '/global/cfs/cdirs/m3443/data/lightning_models/embedding/checkpoints/epoch=10.ckpt' # filter_ckpt_dir = '/global/cfs/cdirs/m3443/data/lightning_models/filtering/checkpoints/epoch=92.ckpt' # gnn_ckpt_dir = '/global/cfs/cdirs/m3443/data/lightning_models/gnn' # plots_dir = '/global/homes/c/caditi97/exatrkx-iml2020/exatrkx/src/plots/noise1' # needs to change... # ckpt_idx = -1 # which GNN checkpoint to load # dbscan_epsilon, dbscan_minsamples = 0.25, 2 # hyperparameters for DBScan # min_hits = 5 # minimum number of hits associated with a particle to define "reconstructable particles" # frac_reco_matched, frac_truth_matched = 0.5, 0.5 # parameters for track matching # evtid = 1000 # event_file = os.path.join(utils_dir.inputdir, 'event{:09}'.format(evtid)) # + action = 'build' config_file = pkg_resources.resource_filename( "exatrkx", os.path.join('configs', config_dict[action])) with open(config_file) as f: b_config = yaml.load(f, Loader=yaml.FullLoader) pp = pprint.PrettyPrinter(indent=4) # - b_config['pt_min'] = 0 b_config['endcaps'] = True b_config['n_workers'] = 1 b_config['n_files'] = 1 b_config['noise'] = 1 pp.pprint(b_config) # this cell is only needed for the first run to prodcue the dataset preprocess_dm = FeatureStore(b_config) preprocess_dm.prepare_data() def plot_noise_dist(noise_keeps): noise = [] not_noise = [] for i in noise_keeps: data_path = f"/global/cfs/projectdirs/m3443/usr/caditi97/iml2020/layerless_check/n{i}/feature_store/1000" #data = torch.load(f"/global/cfs/projectdirs/m3443/usr/caditi97/iml2020/feature_store_endcaps/n{i}/1000") data = torch.load(data_path) arr = data['pid'] n_count = np.count_nonzero(arr==0) not_n = np.count_nonzero(arr) noise.append(n_count) not_noise.append(not_n) print("-----") print(data) x = np.arange(len(noise_keeps)) # the label locations width = 0.35 # the width of the bars fig, ax = plt.subplots(figsize=(10,5)) rects1 = ax.bar(x - width/2, noise, width, label='noise') rects2 = ax.bar(x + width/2, not_noise, width, label='not noise') # Add some text for labels, title and custom x-axis tick labels, etc. ax.set_ylabel('# of hits') ax.set_xlabel('keep') ax.set_xticks(x) labels = noise_keeps ax.set_xticklabels(labels) ax.legend() fig2, ax2 = plt.subplots(figsize=(10,5)) ax2.plot(x,noise, label='noise') ax2.set_ylabel('# of hits') ax2.set_xlabel('keep') ax2.set_xticks(x) labels = noise_keeps ax2.set_xticklabels(labels) ax2.legend() fig3, ax3 = plt.subplots(figsize=(10,5)) ax3.plot(x,not_noise, label='not noise') ax3.set_ylabel('# of hits') ax3.set_xlabel('keep') ax3.set_xticks(x) labels = noise_keeps ax3.set_xticklabels(labels) ax3.legend() noise_keep = ["0", "0.2", "0.4", "0.6", "0.8", "1"] # "0.4", "0.6", "0.8", "1" plot_noise_dist(noise_keep) embed_ckpt_dir = '/global/cfs/cdirs/m3443/data/lightning_models/embedding/checkpoints/epoch=10.ckpt' filter_ckpt_dir = '/global/cfs/cdirs/m3443/data/lightning_models/filtering/checkpoints/epoch=92.ckpt' gnn_ckpt_dir = '/global/cfs/cdirs/m3443/data/lightning_models/gnn' plots_dir = '/global/homes/c/caditi97/exatrkx-iml2020/exatrkx/src/plots/run' # needs to change... ckpt_idx = -1 # which GNN checkpoint to load dbscan_epsilon, dbscan_minsamples = 0.25, 2 # hyperparameters for DBScan min_hits = 5 # minimum number of hits associated with a particle to define "reconstructable particles" frac_reco_matched, frac_truth_matched = 0.5, 0.5 # parameters for track matching emb_ckpt = torch.load(embed_ckpt_dir, map_location='cpu') emb_ckpt['hyper_parameters']['train_split'] = [8,1,1] emb_ckpt['hyper_parameters']['clustering'] = 'build_edges' emb_ckpt['hyper_parameters']['knn_val'] = 500 emb_ckpt['hyper_parameters']['r_val'] = 1.7 emb_ckpt['hyper_parameters'] emb_model = LayerlessEmbedding(emb_ckpt['hyper_parameters']) emb_model.load_state_dict(emb_ckpt["state_dict"]) emb_model.eval() # %%time p_all = [] e_all = [] for i in noise_keep: data_path = f'/global/cfs/projectdirs/m3443/usr/caditi97/iml2020/layerless_check/n{i}/feature_store/1000' p, e = get_emb_metrics(data_path, emb_model) p_all.append(p) e_all.append(e) # + x = np.arange(len(noise_keep)) fig1, (ax11,ax22) = plt.subplots(1,2, figsize=(15,7)) fig1.suptitle('Evt 1000') fig1.tight_layout(w_pad = 10) ax11.plot(x, p_all, label = 'True Positive / Positive') ax11.set_xlabel('noise_level') ax11.set_ylabel('Purity') ax11.set_xticks(x) ax11.set_xticklabels(noise_keep) ax11.legend() ax22.plot(x, e_all, label = 'True Positive / True') ax22.set_xlabel('noise_level') ax22.set_ylabel('Efficiency') ax22.set_xticks(x) ax22.set_xticklabels(noise_keep) ax22.legend() # -
notebooks/checks/layerless_check.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Extract Gene Sets # This section will contain functionality to extract gene sets from symbolic linked disease sets. The goal of this is that once gene sets are extracted, monte-carlo's can be performed on a locally run DGIdb to generate histogram's of returned drugs. This will identified gene sets (hopefully) as stastically significant and relevant to diseases. import pandas as pd import re import requests import json import random import plotly.express as px import plotly.io as pio import plotly.graph_objects as go from plotly.subplots import make_subplots # + # Define disease sets disease_sets = ['brainCancer','hereditaryCancer','metabolicDiseases','renalDiseases','cardiacDisorders','cnsCancer','skinDiseases','t-All','wilmsTumor','MDS','epilepsy','pediatricCancer'] # Done: all # Still too long URI: pediatricDisease # NOTE: If returning URITooLong error, have to add code into the DGIdb app to increase max URI length. # + # Open yaml files to get excel names (missing epilepsy & mds & pediatricCancer & pediatricDisease) excel_file_list = list() for i in disease_sets: with open('../symlinks/panels/'+ i + '.yaml') as f: individual_gene_panels = f.read().splitlines() excel_file_list.append(individual_gene_panels) # - # Functions to get genes def get_genes(df): gene_list = list() try: for i in df.geneSymbol: gene_list.append(i) except: for i in df.Gene: gene_list.append(i) return(gene_list) # + # Open files and retrieve metrics list_counter = 0 genes_df_list = list() disease_df_list = list() source_file_list = list() genes_length_list = list() while list_counter < len(excel_file_list): for i in excel_file_list[list_counter]: try: try: f = pd.read_excel('../../diseases/' + disease_sets[list_counter] + '/' + str(i),sheet_name='genes',index_col=0) genes = get_genes(f) genes_df_list.append(genes) genes_length_list.append(str(len(genes))) disease_df_list.append(disease_sets[list_counter]) source_file_list.append(i) except: f = pd.read_excel('../../diseases/' + disease_sets[list_counter] + '/' + str(i),sheet_name='data',index_col=0) genes = get_genes(f) genes_df_list.append(genes) genes_length_list.append(str(len(genes))) disease_df_list.append(disease_sets[list_counter]) source_file_list.append(i) print('Opened file: ' + i + ' for disease ' + disease_sets[list_counter]) except: print ('../../diseases/' + disease_sets[list_counter] + '/' + str(i)) genes_df_list.append('None') genes_length_list.append('Nan') disease_df_list.append(disease_sets[list_counter]) source_file_list.append(i) list_counter += 1 # - genes_df = pd.DataFrame() genes_df = genes_df.assign(disease=disease_df_list,source=source_file_list,genes=genes_df_list,number=genes_length_list) genes_df # # Define Monte Carlo functionality # Gene sets have been extracted with appropriate meta data attached in one big dataframe. Build functionality to perform monte carlo simulations off local DGIdb instance using the length of gene list for sample size. Enable functionality for both source level and disease level? I think disease level is the focus # + # Get gene set as defined by diseases def get_gene_set(disease,df): df = df[df['disease']==disease] all_genes = list() for i in df.genes: if i == 'None': pass else: for j in i: all_genes.append(j) full_gene_set = list(set(all_genes)) return(full_gene_set) # Get drug JSON from locally run DGIdb def get_json(input_genes): input_genes = ','.join(input_genes) r = requests.get('http://localhost:3000/api/v2/interactions.json?genes=' + input_genes + '&fda_approved_drug=true') data = r.json() return(data) # Get number of drugs from JSON (more data points can be added here) def get_monte_carlo_data_points(data): number_of_drugs = 0 for i in data['matchedTerms']: data_point = len(i['interactions']) number_of_drugs = number_of_drugs + data_point return(number_of_drugs) # - lol = get_gene_set('hereditaryCancer',genes_df) len(lol) data = get_json(lol) result = get_monte_carlo_data_points(data) result # # Random sampling functionality # Random sampling functionality for simulations. Use previously made random gene sets. # + def get_random_sample(length): with open('allGenes.yaml') as f: all_genes = f.read().splitlines() sample = random.sample(all_genes, length) return(sample) # For random sampling purposes, save json for later retrieval def save_json(data,file_name): with open('json/' + file_name + '.json', 'w') as outfile: json.dump(data, outfile) pass # - random_genes = get_random_sample(len(lol)) random_genes random_data = get_json(random_genes) random_result = get_monte_carlo_data_points(random_data) random_result # # Putting it all together # Do an actual monte carlo simulation with 500 samples (499 + 1) # + def run_monte_carlo_simulation(genes_df,disease): # Putting it all together df = pd.DataFrame() sample_source_list = list() number_of_drugs_list = list() genes_input_list = list() # disease_sets has full list, but random samples length will change depending on gene set length # Primary samples primary_sample = get_gene_set(disease,genes_df) data = get_json(primary_sample) result = get_monte_carlo_data_points(data) sample_source_list.append(disease) number_of_drugs_list.append(str(result)) genes_input_list.append(primary_sample) # Loop loop_counter = 1 while loop_counter < 500: random_sample = get_random_sample(len(primary_sample)) random_data = get_json(random_sample) random_result = get_monte_carlo_data_points(random_data) sample_source_list.append('random_sample') number_of_drugs_list.append(int(random_result)) genes_input_list.append(random_sample) loop_counter += 1 if loop_counter == 1 | 50 | 100 | 200 | 300 | 400 | 500: print(str('On iteration ' + loop_counter + 'for disease state: ' + disease)) df = df.assign(sample_source=sample_source_list, number_of_drugs=number_of_drugs_list, genes_input=genes_input_list) # Write DataFrames to excel files by query input file_name = disease writer = pd.ExcelWriter('simulation_results/' + file_name + '.xlsx') df.to_excel(writer,sheet_name='results') print('Saving simulation result for ' + disease) writer.save() return(df) # - for i in disease_sets: print(i) for i in disease_sets: run_monte_carlo_simulation(genes_df,i) # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # # Plot resulting histogram # Simulation is ran and data is saved. Plot the results! # + # Get all P values pvalues = pd.DataFrame() pvalues_list = list() disease_list = list() for i in disease_sets: incidence_counter = 0 pval_df = pd.read_excel('simulation_results/' + i + '.xlsx',sheet_name='results',index_col=0) test_value = pval_df['number_of_drugs'][0] incidence_counter += 1 for j in pval_df['number_of_drugs']: if j >= test_value: incidence_counter += 1 else: pass p_value = incidence_counter / len(pval_df['number_of_drugs']) pvalues_list.append(p_value) disease_list.append(i) pvalues = pvalues.assign(pvalues = pvalues_list, diseases = disease_list) pvalues # - # Read data from file filename = 'renalDiseases' monte_carlo_df = pd.read_excel('simulation_results/' + filename + '.xlsx',sheet_name='results',index_col=0) monte_carlo_df # Figure loop for i in disease_sets: monte_carlo_df = pd.read_excel('simulation_results/' + i + '.xlsx',sheet_name='results',index_col=0) fig = fig = px.histogram(monte_carlo_df, x='number_of_drugs', color='sample_source', marginal='box') pio.write_image(fig,'graphs/' + i + '.pdf', width=3*400, height=3*250, scale=3) # # Code Optimization # All code is written, works, and generates data. It takes an insanely long time to run all simulations however (40+ hours). If this analysis is deemed useful, the number of simulations per gene set is likely to increase from 500 to 10000, meaning it will take even longer and be even more computationally expensive. Use tools provided by colleagues to analysis code and find out what can be better optimized. # + import cProfile # %load_ext snakeviz # Two test sets disease_test_set = ['brainCancer'] # - # %%snakeviz for i in disease_test_set: run_monte_carlo_simulation(genes_df,i) # # Wheres the snakeviz? # Snakeviz did not render. Need to refactor test (smaller sample) and run snakeviz again. Perhaps single line snakeviz and feed it just a single sample. # + import cProfile # %load_ext snakeviz # Two test sets disease_test_set = ['brainCancer'] # - def run_monte_carlo_simulation_sample(genes_df,disease): # Putting it all together df = pd.DataFrame() sample_source_list = list() number_of_drugs_list = list() genes_input_list = list() # disease_sets has full list, but random samples length will change depending on gene set length # Primary samples primary_sample = get_gene_set(disease,genes_df) data = get_json(primary_sample) result = get_monte_carlo_data_points(data) sample_source_list.append(disease) number_of_drugs_list.append(str(result)) genes_input_list.append(primary_sample) # Loop loop_counter = 1 while loop_counter < 50: random_sample = get_random_sample(len(primary_sample)) random_data = get_json(random_sample) random_result = get_monte_carlo_data_points(random_data) sample_source_list.append('random_sample') number_of_drugs_list.append(int(random_result)) genes_input_list.append(random_sample) loop_counter += 1 if loop_counter == 1 | 50 | 100 | 200 | 300 | 400 | 500: print(str('On iteration ' + loop_counter + 'for disease state: ' + disease)) df = df.assign(sample_source=sample_source_list, number_of_drugs=number_of_drugs_list, genes_input=genes_input_list) return(df) # %%snakeviz for i in disease_test_set: run_monte_carlo_simulation_sample(genes_df,i) # Try increasing more samples, previous samples = 5. This samples = 50 # %snakeviz run_monte_carlo_simulation_sample(genes_df,'brainCancer')
genesCuration/monte-carlo/extract-gene-sets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={} # # Hold me tight! Influence of discriminative features on deep network boundaries # # **Authors**: <NAME>, <NAME>, <NAME> and <NAME> # + [markdown] pycharm={} # ## Requirements # + [markdown] pycharm={} # For executing the code, please make sure that you meet the following requirements: # # * python (Successfully tested on v3.8.3) # * [PyTorch](https://pytorch.org/get-started/previous-versions/) (Successfully tested on v1.5.0 with CUDA v10.0.130) # * [Torchvision](https://pytorch.org/get-started/previous-versions/) (Successfully tested on v0.6.0 with CUDA v10.0.130) # * [NumPy](https://numpy.org/) (Successfully tested on v1.18.1) # * [torch-dct](https://github.com/zh217/torch-dct) (Successfully tested on v0.1.5) # * [Matplotlib](https://matplotlib.org/) (Successfully tested on v3.1.3) # # In our experiments, every package was installed through a Conda environment. Assuming CUDA v10.0.130 and Conda v4.8.1 (installed through [Miniconda3](https://docs.conda.io/en/latest/miniconda.html) on CentOS Linux 7), these are the corresponding commands: # # &nbsp;&nbsp;&nbsp; ```conda create -n hold_me_tight python==3.8.3``` # &nbsp;&nbsp;&nbsp; ```conda activate hold_me_tight``` # &nbsp;&nbsp;&nbsp; ```conda install numpy==1.18.1``` # &nbsp;&nbsp;&nbsp; ```conda install pytorch=1.5.0 torchvision=0.6.0 cudatoolkit=10.1 -c pytorch``` # &nbsp;&nbsp;&nbsp; ```pip install torch-dct==0.1.5``` # &nbsp;&nbsp;&nbsp; ```conda install matplotlib==3.1.3``` # + [markdown] pycharm={} # ## Table of contents # # - [Margin distribution on diagonal of DCT](#margin) # - [Training LeNet on MNIST](#mnist) # - [Evaluating LeNet on MNIST](#margin) # - [LeNet on flipped MNIST](#flipped) # - [Frequency manipulated image examples](#images) # - [Flipped data](#flipped_im) # - [Filtered data](#filtered_im) # + [markdown] pycharm={} # ## <a name=margin>Margin distribution on diagonal of DCT<a/> # # We first give an example of our training procedure and how we obtain the margin distribution of a network. We show this for the LeNet architecture trained on MNIST. # + [markdown] pycharm={} # Let's load the data. Please fix the root path of the project `TREE_ROOT` where all results will be saved. # + pycharm={} TREE_ROOT = './' # + [markdown] pycharm={} # Set the default PyTorch device # + pycharm={} import torch DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' # + [markdown] pycharm={} # Create the loaders # + pycharm={} import torchvision import os DATASET_DIR = os.path.join(TREE_ROOT,'Datasets/') os.makedirs(DATASET_DIR, exist_ok=True) BATCH_SIZE = 128 trainset = torchvision.datasets.MNIST(root=DATASET_DIR, download=True, train=True, transform=torchvision.transforms.ToTensor()) testset = torchvision.datasets.MNIST(root=DATASET_DIR, download=True, train=False, transform=torchvision.transforms.ToTensor()) pin_memory = True if DEVICE == 'cuda' else False trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2, pin_memory=pin_memory) testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False, num_workers=2, pin_memory=pin_memory) mean = torch.tensor([0.1307], device=DEVICE)[None, :, None, None] std = torch.tensor([0.3081], device=DEVICE)[None, :, None, None] # + [markdown] pycharm={} # Let's create the model and the normalization layer # + pycharm={} from model_classes.mnist import LeNet from model_classes import TransformLayer model = LeNet() model = model.to(DEVICE) trans = TransformLayer(mean=mean, std=std) # + [markdown] pycharm={} # The network will be saved here # + pycharm={} SAVE_TRAIN_DIR = os.path.join(TREE_ROOT, 'Models/Generated/%s/%s/' % ('MNIST', model.__class__.__name__)) os.makedirs(SAVE_TRAIN_DIR, exist_ok=True) # + [markdown] pycharm={} # ### <a name=mnist>Training loop</a> # # We use a standard SGD optimizer with a cyclic learning rate schedule to optimize this network, and a Cross-Entropy loss function. Let's setup the hyperparameters # + pycharm={} import torch.nn as nn import numpy as np from utils import train EPOCHS = 30 MAX_LR = 0.21 MOMENTUM = 0.9 WEIGHT_DECAY = 5e-4 opt = torch.optim.SGD(model.parameters(), lr=MAX_LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY) loss_fun = nn.CrossEntropyLoss() lr_schedule = lambda t: np.interp([t], [0, EPOCHS * 2 // 5, EPOCHS], [0, MAX_LR, 0])[0] # + pycharm={} import time t0 = time.time() print('---> Training a LeNet architecture on MNIST') model = train(model, trans, trainloader, testloader, EPOCHS, opt, loss_fun, lr_schedule, SAVE_TRAIN_DIR) print('---> Training is done! Elapsed time: %.5f minutes\n' % ((time.time() - t0) / 60.)) # + [markdown] pycharm={} # ### <a name=margin>Compute margin distribution</a> # Once we have our trained model we can proceed to measure its margin distribution. In order to approximate the distance to the boundary, we will use the subspace-constrained version of DeepFool. # Then, we can use a procedure to perform this measurement on a sequence of subspaces. # # But first, we need to generate a list of the desired subspaces. # # + pycharm={} from utils import generate_subspace_list from utils import compute_margin_distribution SUBSPACE_DIM = 8 DIM = 28 SUBSPACE_STEP = 1 subspace_list = generate_subspace_list(SUBSPACE_DIM, DIM, SUBSPACE_STEP, channels=1) # + [markdown] pycharm={} # Let's get the result # + pycharm={} RESULTS_DIR = os.path.join(TREE_ROOT, 'Results/margin_%s/%s/' % ('MNIST', model.__class__.__name__)) os.makedirs(RESULTS_DIR, exist_ok=True) NUM_SAMPLES_EVAL = 100 indices = np.random.choice(len(testset), NUM_SAMPLES_EVAL, replace=False) eval_dataset = torch.utils.data.Subset(testset, indices[:NUM_SAMPLES_EVAL]) eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=2, pin_memory=pin_memory) # + pycharm={} margins = compute_margin_distribution(model, trans, eval_loader, subspace_list, RESULTS_DIR + 'margins.npy') # + [markdown] pycharm={} # Finally let's plot the result # + pycharm={} from graphics import swarmplot # %matplotlib inline # + pycharm={} swarmplot(margins, color='tab:blue') # + [markdown] pycharm={} # ## <a name=flipped>Flipped MNIST</a> # # We now repeat the same procedure for the frequency flipped version of MNIST. We start first by flipping the data. # + pycharm={} from utils_dct import dct_flip flip_train_data = dct_flip(trainset.data.type(torch.float32).view([-1, 1, 28, 28]) / 255.) flip_test_data = dct_flip(testset.data.type(torch.float32).view([-1, 1, 28, 28]) / 255.) flip_trainset = torch.utils.data.TensorDataset(flip_train_data, trainset.targets) flip_testset = torch.utils.data.TensorDataset(flip_test_data, testset.targets) flip_trainloader = torch.utils.data.DataLoader(flip_trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2, pin_memory=pin_memory) flip_testloader = torch.utils.data.DataLoader(flip_testset, batch_size=BATCH_SIZE, shuffle=False, num_workers=2, pin_memory=pin_memory) # + [markdown] pycharm={} # Because we want to simply rotate (flip) the data we feed the network, we need to modify the normalization layer to account for the new representation # + pycharm={} from model_classes import TransformFlippedLayer flip_trans = TransformFlippedLayer(mean, std, [1, 28, 28], DEVICE) # + [markdown] pycharm={} # Let's train again a LeNet but on the data of the new (flipped) representation # + pycharm={} flip_model = LeNet() flip_model = flip_model.to(DEVICE) SAVE_TRAIN_DIR = os.path.join(TREE_ROOT, 'Models/Generated/%s_flipped/%s/' % ('MNIST', model.__class__.__name__)) os.makedirs(SAVE_TRAIN_DIR, exist_ok=True) opt = torch.optim.SGD(flip_model.parameters(), lr=MAX_LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY) loss_fun = nn.CrossEntropyLoss() lr_schedule = lambda t: np.interp([t], [0, EPOCHS * 2 // 5, EPOCHS], [0, MAX_LR, 0])[0] t0 = time.time() print('\n---> Training a LeNet architecture on Flipped MNIST') flip_model = train(flip_model, flip_trans, flip_trainloader, flip_testloader, EPOCHS, opt, loss_fun, lr_schedule, SAVE_TRAIN_DIR) print('---> Training is done! Elapsed time: %.5f minutes\n' % ((time.time() - t0) / 60.)) # + [markdown] pycharm={} # And compute the margin distribution of this model for the same data as before, but with a flipped representation # + pycharm={} flip_eval_dataset = torch.utils.data.Subset(flip_testset, indices[:NUM_SAMPLES_EVAL]) flip_eval_loader = torch.utils.data.DataLoader(flip_eval_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=2, pin_memory=pin_memory) RESULTS_DIR = os.path.join(TREE_ROOT, 'Results/margin_%s_flipped/%s/' % ('MNIST', model.__class__.__name__)) os.makedirs(RESULTS_DIR, exist_ok=True) flip_margins = compute_margin_distribution(flip_model, flip_trans, flip_eval_loader, subspace_list, RESULTS_DIR + 'margins.npy') # + pycharm={} swarmplot(flip_margins, color='tab:red') # + [markdown] pycharm={} # ## <a name=images>Frequency manipulated image examples</a> # # We now show a few image examples from the modified versions of the standard datasets. # + [markdown] pycharm={} # ### <a name=flipped_im>Flipped images</a> # + [markdown] pycharm={} # Reproduce the flipped ImageNet examples of the Supplementary material # + pycharm={} from PIL import Image import os import matplotlib.pyplot as plt IMAGE_DIR = os.path.join(TREE_ROOT, 'Images/') DATASET = 'ImageNet' for file in os.listdir(IMAGE_DIR + DATASET): im = Image.open(os.path.join(IMAGE_DIR, DATASET, file)) im_flipped = dct_flip(torch.from_numpy(np.asarray(im).transpose((2, 0, 1))).float()) im_flipped = im_flipped - im_flipped.min() im_flipped = im_flipped / im_flipped.max() im_flipped = Image.fromarray((255 * im_flipped.permute(1, 2, 0).numpy()).astype(np.uint8)) cat_im = Image.new(im.mode, (2 * im.size[0], im.size[1])) cat_im.paste(im, (0, 0)) cat_im.paste(im_flipped, (im.size[0], 0)) display(cat_im) # + [markdown] pycharm={} # Reproduce the flipped CIFAR-10 examples of the Supplementary material # + pycharm={} DATASET = 'CIFAR10' for file in os.listdir(IMAGE_DIR + DATASET): im = Image.open(os.path.join(IMAGE_DIR, DATASET, file)) im_flipped = dct_flip(torch.from_numpy(np.asarray(im).transpose((2, 0, 1))).float()) im_flipped = im_flipped - im_flipped.min() im_flipped = im_flipped / im_flipped.max() im_flipped = Image.fromarray((255 * im_flipped.permute(1, 2, 0).numpy()).astype(np.uint8)) cat_im = Image.new(im.mode, (2 * im.size[0], im.size[1])) cat_im.paste(im, (0, 0)) cat_im.paste(im_flipped, (im.size[0], 0)) display(cat_im) # + [markdown] pycharm={} # Reproduce the flipped MNIST examples of the Supplementary material # + pycharm={} DATASET = 'MNIST' for file in os.listdir(IMAGE_DIR + DATASET): im = Image.open(os.path.join(IMAGE_DIR, DATASET, file)) im_flipped = dct_flip(torch.from_numpy(np.asarray(im)).float()) im_flipped = im_flipped - im_flipped.min() im_flipped = im_flipped / im_flipped.max() im_flipped = Image.fromarray((255 * im_flipped.numpy()).astype(np.uint8)) cat_im = Image.new(im.mode, (2 * im.size[0], im.size[1])) cat_im.paste(im, (0, 0)) cat_im.paste(im_flipped, (im.size[0], 0)) display(cat_im) # + [markdown] pycharm={} # ### <a name=filtered_im>Filtered images</a> # + pycharm={} from utils_dct import dct_low_pass from utils_dct import dct_high_pass from utils_dct import dct_cutoff_low # + [markdown] pycharm={} # Reproduce the low-pass and high-pass CIFAR-10 examples of the Supplementary material # + pycharm={} DATASET = 'CIFAR10' BANDWIDTH = 16 for file in os.listdir(IMAGE_DIR + DATASET): im = Image.open(os.path.join(IMAGE_DIR, DATASET, file)) im_lp = dct_low_pass(torch.from_numpy(np.asarray(im).transpose((2, 0, 1))).float(), bandwidth=BANDWIDTH) im_lp = im_lp - im_lp.min() im_lp = im_lp / im_lp.max() im_lp = Image.fromarray((255 * im_lp.permute(1, 2, 0).numpy()).astype(np.uint8)) im_hp = dct_high_pass(torch.from_numpy(np.asarray(im).transpose((2, 0, 1))).float(), bandwidth=BANDWIDTH) im_hp = im_hp - im_hp.min() im_hp = im_hp / im_hp.max() im_hp = Image.fromarray((255 * im_hp.permute(1, 2, 0).numpy()).astype(np.uint8)) cat_im = Image.new(im.mode, (3 * im.size[0], im.size[1])) cat_im.paste(im, (0, 0)) cat_im.paste(im_lp, (im.size[0], 0)) cat_im.paste(im_hp, (2 * im.size[0], 0)) display(cat_im) # + [markdown] pycharm={} # Reproduce the high-pass MNIST examples of the Supplementary material # + pycharm={} DATASET = 'MNIST' BANDWIDTH = 14 for file in os.listdir(IMAGE_DIR + DATASET): im = Image.open(os.path.join(IMAGE_DIR, DATASET, file)) im_hp = dct_cutoff_low(torch.from_numpy(np.asarray(im)).float(), bandwidth=BANDWIDTH) im_hp = im_hp - im_hp.min() im_hp = im_hp / im_hp.max() im_hp = Image.fromarray((255 * im_hp.numpy()).astype(np.uint8)) cat_im = Image.new(im.mode, (2 * im.size[0], im.size[1])) cat_im.paste(im, (0, 0)) cat_im.paste(im_hp, (im.size[0], 0)) display(cat_im)
Hold_Me_Tight.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![hercules](img/Hercules_sesame.jpg) # # Python & hdf5 # # *goal: Overview of python, being to use data from hdf5 file using python* # # resources:https://github.com/silx-kit/silx-training/sesame # # ``` bash # git clone https://github.com/silx-kit/silx-training/sesame # ``` # # or from the web interface: 'clone or download' -> 'download zip' # summary: # # - quick round table # - introduction to python (0_introduction.ipynb & 1_1_python_startup.ipynb) # - a word about numpy (2_numpy.ipynb) # - hdf5 & h5py (3_2_hdf5_h5py.ipynb) # ## round table time # # now you can either: # # * open notebooks from sesame jupyterhub sever: # # http://10.1.27.10:8000 # # They should have provide you a login / password # # ![sesame jupyterhub](img/sesame_jupyterhub.png) # # * open the first notebook locally with jupyter-lab or jupyter-notebook # # ``` bash # >>> jupyter-lab # ``` # ``` bash # >>> jupyter notebook # ``` # # ![jupter-lab interface](img/jupyterlab_screenshot.png) # * use [binder](https://mybinder.org/v2/gh/silx-kit/silx-training/main) # # ![binder interface](img/binder_screenshot.png) # # * just follow the tutorial and play with a python console # # ``` bash # >>> python # ``` # # ![python interface](img/python_screenshot.png)
sesame/starter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Matplolib Task # + import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format = 'retina' plt.style.use('ggplot') df = pd.read_csv("data/pandas_tasks.csv") # - # ## Instruction # * Using the loaded dataset, make a boxplot, barplot, histogram, and a scatter plot. # * Ensure to provide appropriate labels, and titles where necessary. # * In each plot, ensure to use all parameters used in the Matplolib tutorial. # * Optionally, add additional parameters for each plot for some extra points. # * For each of the plot, choose appropriate columns in the dataset. The explanation for each of the variables has been provided in a word document called `Variable_Codebook.docx`. df.head() # ${\textbf{Plotting Scatter Plot}}$ fig, ax = plt.subplots(figsize=(12, 7)) plt.scatter(df['Latitude'], df['Longitude'], alpha=0.5) plt.xlabel('Latitude', fontsize=18, fontweight='bold') plt.ylabel('Longitude', fontsize=18, fontweight='bold') plt.title('Latitude Vs Longitude', size=25) plt.show() # ${\textbf{Plotting Bar Chart}}$ df['savings'].value_counts().plot.bar(figsize=(12, 7), color=['#671099', '#889921']) plt.title('Distribution of Savings in the Data', size=22, fontweight='bold') plt.ylabel('Frequency', fontsize=18) plt.show() def plot_bar(data, col): """ plot categorical columns to show its distribution in the data """ data[col].value_counts().plot.bar(figsize=(12, 7), color=['#671099', '#889921']) plt.title(f'{col} Distribution', size=22, color='#889222') plt.ylabel('Frequency', fontsize=18) plt.xticks(fontsize=14, rotation=360) plt.yticks(fontsize=15) plt.legend(loc='best', fontsize=15) plt.show() category_columns = [ 'mobile_money', 'savings', 'borrowing', 'insurance'] for cat_col in category_columns: plot_bar(df, cat_col) # + df['Q3'] = df['Q3'].replace({1:'Married', 2:'Divorced', 3:'Widowed', 4:'Single'}) df['Q3'].value_counts().plot.bar(figsize=(12, 7), color=['#671099', '#889921', 'gray', 'green']) plt.title('Marital Status Distribution', size=22, fontfamily='sans-serif') plt.ylabel('Frequency', fontsize=18) plt.xticks(fontsize=14, rotation=45) plt.yticks(fontsize=14) plt.show() # + l_edu = ['Primary', 'No formal education', 'Some Primary', 'University', 'Secondary', 'Dont Know', 'Techmical Trainig', 'Nill'] df['Q4'] = df['Q4'].replace({3: l_edu[0], 1:l_edu[1], 2:l_edu[2], 6:l_edu[3], 5:l_edu[4], 7:l_edu[5], 4:l_edu[6], 8:l_edu[-1]}) df['Q4'].value_counts().plot.bar(figsize=(12, 7), color=['#671099', '#889921']) plt.title('Level of Education', size=22, fontfamily='arial') plt.ylabel('Frequency', fontsize=18) plt.xticks(fontsize=14, rotation=70) plt.yticks(fontsize=14) plt.show() # - # ${\textbf{Plotting Histogram}}$ plt.figure(figsize=(12, 7)) plt.hist(df['Q1'], bins=40, color='r', histtype='stepfilled') plt.title('Histogram Chart of Age', size=22) plt.xlabel('Age Values', fontsize=16) plt.ylabel('Frequency', fontsize=16) plt.show() # ${\textbf{Plotting Box Plot}}$ plt.figure(figsize=(12, 7)) plt.boxplot(df['Latitude'], vert=True, meanline=True, showcaps=True, labels=['Latitude']) plt.title('A Box plot showing the spread of Latitude', size=22) plt.show() plt.figure(figsize=(12, 7)) plt.boxplot(df['Longitude'], vert=True, meanline=True, showcaps=True, labels=['Longitude'], medianprops={'linewidth': 2, 'color': 'green'}, meanprops={'linewidth': 2, 'color': 'red'}) plt.title('A Box plot showing the spread of Longitude', size=22) plt.show()
Day 4/alaroabubakarolayemi@yahoo.com(Matplotlib Task).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/moraes-ederson/Curso_DominandoDS_Flai/blob/main/3_ML_Regressao_Custos_Seguro_Saude.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="U4AFRq9sTsMq" # # Dados de uma empresa de Seguros # # Identificar qual tipo de cliente representa um maior custo para seguradora. # # Observação: Já considerada a EDA básica da aula com o pacote "sweetviz". # + id="V9bP3U__SVX3" import pandas as pd caminho = '/content/insurance.csv' dados = pd.read_csv(caminho) # + colab={"base_uri": "https://localhost:8080/"} id="H6_nrPjkVARh" outputId="fe7c48cb-7b06-4ea7-9776-82216f93f504" dados.shape # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="u-vnbFJHS0EV" outputId="e8a9bf69-7a06-4407-c948-eca1c8505ace" dados.head() # + [markdown] id="7xEDMOBNTbJ6" # bmi significa "índice de massa corporal" # # charges significa "custos do seguro" # + [markdown] id="L9JYQE40UnSr" # Observado que a pessoa fumante representa um custo muito alto pela baixa idade. # Idade e Fumante mostra ter um peso importante nos custos. # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="rtxE6D7fS3WP" outputId="1ab9469c-5cd4-41e7-fb73-9863606d24da" dados.describe() # + [markdown] id="2XBF4HkIVUiy" # ## Pré-processamento dos dados antes de prosseguir. # # * Transformar a variável "smoker" de categórica (sim, não) para numérica binária (0 = não, 1 = sim). # * Alterar a escala dos dados analisando os custos a cada 1000 dólares. # # # + colab={"base_uri": "https://localhost:8080/"} id="Ls373Cp_TUuH" outputId="fc860ba5-48c2-412e-e516-0b0cea22c8b4" dados['smoker'] = dados['smoker'].map({'yes': 1, 'no': 0}) dados['smoker'] # + colab={"base_uri": "https://localhost:8080/"} id="qlMlGfZEWpPG" outputId="f8ccc47c-473f-4f39-b569-1edf5f966218" dados['charges'] = dados['charges']/1000 dados['charges'] # + [markdown] id="pwy-j9lxW-iX" # ## Deixando o dataset apenas com as variáveis desejadas # + id="P5OO1TBZW1RP" dados = dados[['bmi', 'smoker', 'charges']] # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="HGOZ4BIBXbtg" outputId="bd77dea5-d338-4681-8c12-cbddff4dd76c" dados.head() # + [markdown] id="iZwBATeoXu6Q" # ## Gráficos pertinentes # + id="iikQTQLgXn6o" import seaborn as sns # + colab={"base_uri": "https://localhost:8080/", "height": 296} id="ECjmjk3pX3iQ" outputId="db843280-8ae6-4e51-93cf-2c49e4967595" # criando um gráfico de dispersão para analisar relação entre "bmi", "fumantes" e "custos" sns.scatterplot(x = 'bmi', y = 'charges', hue = 'smoker', data = dados) # + [markdown] id="9ILJ0TUpYgWx" # É observado que as pessoas fumantes (laranja) representam os maiores custos. # + [markdown] id="500-9t19ZZvb" # # **Preparando para criar modelo de Machine Learning** # + [markdown] id="cLRpd_dIZhnp" # ## Separando a Variável Alvo "y" (Target, Label, Dependente) das Covariáveis "X" (Variáveis Independentes) # + id="adxWt3txYRAH" X = dados[['bmi', 'smoker']] y = dados['charges'] # + colab={"base_uri": "https://localhost:8080/"} id="n0gulIgbnoed" outputId="47bb2c9c-4978-42ed-e3e9-8595410ed861" from sklearn.linear_model import LinearRegression reg = LinearRegression() reg.fit(X, y) # + colab={"base_uri": "https://localhost:8080/"} id="WQm5smJtnuGv" outputId="b95995a0-e926-4daf-91df-69eecbde7fb0" y # + colab={"base_uri": "https://localhost:8080/"} id="r1z8-P0PnwTL" outputId="b4c11af5-d4c3-4033-9b5a-4f8ecece7a6e" reg.predict(X) # + [markdown] id="AkOqAYP5al-8" # ## Método de Validação Holdout # + id="j65Ng3R9alfR" # holdout # separando em dados de teste e treinamento from sklearn.model_selection import train_test_split # + id="gQNHgmG0ac4-" # dados de teste representando 25% dos dados. Xtreino, Xteste, ytreino, yteste = train_test_split(X, y, test_size = 0.25) # + colab={"base_uri": "https://localhost:8080/"} id="wbho3wGvihQZ" outputId="63f145a9-1705-4a12-fb30-3572e6236bb8" Xtreino.shape # + colab={"base_uri": "https://localhost:8080/"} id="hvQrAp8-kVVR" outputId="e6e21c8f-1ef1-47d1-90d6-1649784b4707" Xteste.shape # + id="WpoTPxWAkvF5" # Ajuste do modelo de regressão linear com os dados de treino from sklearn.linear_model import LinearRegression reg = LinearRegression() # + colab={"base_uri": "https://localhost:8080/"} id="sZpYguKwlVck" outputId="adfbeede-0d77-45d9-f5dc-bd36e2fc06e3" reg.fit(Xtreino, ytreino) # + colab={"base_uri": "https://localhost:8080/"} id="zACZ10dzlv3d" outputId="f0e84fc5-8439-4530-9538-df133b3b33a8" yteste # + id="wOY83VhinQ4N" colab={"base_uri": "https://localhost:8080/"} outputId="14cdf79a-9679-4ccd-b296-4c6c49a068ea" # Predição utilizando a amostra teste comparando com a variável resposta yteste preditos = reg.predict(Xteste) preditos # + [markdown] id="Xv_okJUTpZuD" # ## <NAME> # + colab={"base_uri": "https://localhost:8080/"} id="NRnxBG7CpVBy" outputId="a64d8515-8742-4d50-b1a8-2334094dccc3" # Calculando através do numpy import numpy as np np.mean((yteste - preditos)**2) # + colab={"base_uri": "https://localhost:8080/"} id="mwYRJE3ZpyXk" outputId="ec8b88af-f288-41e0-9060-31338751a47c" # Calculando através do Scikit Learn from sklearn.metrics import mean_squared_error eqm = mean_squared_error(yteste, preditos) print('EQM: {}'.format(round(eqm, 2))) # + [markdown] id="iPfw6Hqsqvgy" # Nesse cenário, o modelo de regressão linear é dado por # # * Se fumante = sim # # **CUSTOS = b0 + b1 *IMC + b2* fumante** # # * Se fumante = não # # **CUSTOS = b0 + b1 * IMC** # # # # # # # + colab={"base_uri": "https://localhost:8080/"} id="w9XNVQ7rr9KZ" outputId="f9f9be73-2c24-4920-c5a1-163be6f27874" # Coeficientes b0 = reg.intercept_ b1, b2 = reg.coef_ print('Intercepto: \n', reg.intercept_) print('Coeficientes: \n', reg.coef_) # + [markdown] id="WYmNWwZTtBmv" # ## Reta Ajustada # + id="U2dxxWM1tEZw" import numpy as np x = np.arange(15, 55, 0.01) y0 = b0 + b1*x + b2*0 y1 = b0 + b1*x + b2*1 # + colab={"base_uri": "https://localhost:8080/"} id="_w2aoXAatfEr" outputId="f88c83ec-d04c-4004-e0c8-2903e8b4fbdd" import matplotlib.pyplot as plt sns.scatterplot(x = 'bmi', y = 'charges', hue = 'smoker', data = dados) plt.plot(x, y0, color = 'blue') plt.plot(x, y1, color = 'orange') plt.title('Retas de Regressão') plt.xlabel('IMC') plt.ylabel('Custo') plt.show() # + [markdown] id="E0HP9Wy1vbcs" # ## **Exercício** # + [markdown] id="a5rN0RIWvfnb" # Faça repetições da técnica Holdout, variando a distribuição de quem vai para amostra de teste e treinamento. Guarde os resultados do EQM para cada repetição e, no final, calcule sua média para obter uma valor mais estável para o EQM. # + id="pJvmuHjQwB_f" R = 100 eqms = [] for i in range(R): Xtreino, Xteste, ytreino, yteste = train_test_split(X, y, test_size = 0.25) reg = LinearRegression() reg.fit(Xtreino, ytreino) preditos = reg.predict(Xteste) eqm = mean_squared_error(yteste, preditos) eqms.append(eqm) # + id="zvdccPEchUs_" colab={"base_uri": "https://localhost:8080/"} outputId="e4207ef8-faa4-4ce7-c03b-a82783b077ef" eqms # + colab={"base_uri": "https://localhost:8080/"} id="qByA9ZVXyd9K" outputId="74ceaa3e-f283-46af-9e35-62a7d8f1c885" np.mean(eqms) # + id="MqkWLrT6i-Iu" # criando uma função a partir do comando acima def HouldoutRep(modelo = LinearRegression(), replicas = 100, proporcao = 0.25): from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split eqms_treino = [] eqms_teste = [] for i in range(replicas): Xtreino, Xteste, ytreino, yteste = train_test_split(X, y, test_size = 0.25) # treinando o modelo reg = modelo reg.fit(Xtreino, ytreino) # verificando o eqm teste preditos_treino = reg.predict(Xtreino) eqm_treino = mean_squared_error(ytreino, preditos_treino) eqms_treino.append(eqm_treino) preditos_teste = reg.predict(Xteste) eqm_teste = mean_squared_error(yteste, preditos_teste) eqms_teste.append(eqm_teste) res_treino = np.array(eqms_treino).mean().round(2) res_teste = np.array(eqms_teste).mean().round(2) print('Média de EQM de TREINO: {}'.format(res_treino)) print('Média de EQM de TESTE: {}'.format(res_teste)) return res_treino, res_teste # + colab={"base_uri": "https://localhost:8080/"} id="A6qpnSi2mM_d" outputId="3acdfc37-2bf2-4763-eff5-5f906b44a1f9" HouldoutRep(replicas = 1000) # + [markdown] id="3UrvVdsXq0Q3" # # **Outros modelos de regressão** # + [markdown] id="dwhqnGGPsUwt" # ## **K-Vizinhos Mais Próximos (K-Nearest Neighbors)** # + id="ONt-uwFQq5AX" X = dados[['bmi', 'smoker']] y = dados['charges'] # + id="jvn-NsrYuUfM" Xtreino, Xteste, ytreino, yteste = train_test_split(X, y, test_size = 0.25) # + colab={"base_uri": "https://localhost:8080/"} id="slxXZhw2s7J4" outputId="6e8c8a4d-1631-44bf-b5c5-2bade7882091" from sklearn.neighbors import KNeighborsRegressor knnreg = KNeighborsRegressor(n_neighbors = 11) knnreg.fit(Xtreino, ytreino) # + colab={"base_uri": "https://localhost:8080/"} id="ayt0Fabft_2V" outputId="5c3ce268-6726-4536-ed6f-6d34743469c3" from sklearn.metrics import mean_squared_error preditos_treino = knnreg.predict(Xtreino) preditos_teste = knnreg.predict(Xteste) eqm_treino = mean_squared_error(preditos_treino, ytreino) eqm_teste = mean_squared_error(preditos_teste, yteste) print('O Erro Quadrático Médio de TREINO foi: {}'.format(round(eqm_treino, 2))) print('O Erro Quadrático Médio de TESTE foi: {}'.format(round(eqm_teste, 2))) # + colab={"base_uri": "https://localhost:8080/"} id="edyWriFVuozG" outputId="60475e62-d83d-470d-d0a3-d0198933475c" HouldoutRep(modelo = KNeighborsRegressor(n_neighbors = 500), replicas = 100) # + [markdown] id="GPHP7OQfcEpk" # ## **Exercício** # + [markdown] id="tPcrZxC6cGGM" # Encontre uma maneira de descobrir qual é o melhor quantidade de vizinhos para se utilizar no KNN. Verifique para os casos quando se usa apenas a variável bmi como preditora e quando se usa as variáveis bmi e smoker. # + id="8-13nFSXyXjM" colab={"base_uri": "https://localhost:8080/"} outputId="33d71bcc-9a44-484c-dee5-0233c08ceaaf" K = 100 eqms_knn = [] maior_eqm = 0 menor_eqm = 0 contador_maior = 0 contador_menor = 0 for i in range(K): Xtreino, Xteste, ytreino, yteste = train_test_split(X, y, test_size = 0.25) from sklearn.neighbors import KNeighborsRegressor knnreg = KNeighborsRegressor(n_neighbors = K) knnreg.fit(Xtreino, ytreino) preditos_knn = knnreg.predict(Xteste) eqm_knn = mean_squared_error(yteste, preditos_knn) eqms_knn.append(eqm_knn) if i == 1: maior_eqm = eqm_knn menor_eqm = eqm_knn else: if eqm_knn > maior_eqm: maior_eqm = eqm_knn contador_maior = i if eqm_knn < menor_eqm: menor_eqm = eqm_knn contador_menor = i print(f'O menor EQM foi de {menor_eqm:.2f} com K = {contador_menor}.') print(f'O maior EQM foi de {maior_eqm:.2f} com K = {contador_maior}.') print(f'\nPortanto {contador_menor} é a melhor quantidade de vizinhos.') # + [markdown] id="hZG1JmAsqMLM" # ## **Árvore de Regressão** # + id="HoBd7l-gqRcs" X = dados['bmi'].values.reshape(-1, 1) y = dados['charges'] # + id="D2vVMJECrKoT" Xtreino, Xteste, ytreino, yteste = train_test_split(X, y, test_size = 0.25) # + colab={"base_uri": "https://localhost:8080/"} id="I9nx4NlXrL0t" outputId="601bd911-da9e-4768-e14e-5aa2d16e54ae" # Trainando o modelo from sklearn.tree import DecisionTreeRegressor arvore = DecisionTreeRegressor() arvore.fit(Xtreino, ytreino) # + id="ScS_BicBrlV9" preditos_treino = arvore.predict(Xtreino) preditos_teste = arvore.predict(Xteste) # + colab={"base_uri": "https://localhost:8080/"} id="ByNBSzYRr5OU" outputId="a820ffc9-00bd-44df-b964-20dd5f2b4e91" eqm_treino = mean_squared_error(preditos_treino, ytreino) eqm_teste = mean_squared_error(preditos_teste, yteste) print(f'O Erro Quadrático Médio no TREINO foi: {eqm_treino:.2f}') print(f'O Erro Quadrático Médio no TESTE foi: {eqm_teste:.2f}') # + colab={"base_uri": "https://localhost:8080/"} id="wFagf2EisHqN" outputId="4f764b1b-3c9a-4d9d-df0e-7657f7e49e58" HouldoutRep(modelo = DecisionTreeRegressor(max_depth = 2), replicas = 100) # + colab={"base_uri": "https://localhost:8080/", "height": 466} id="0QJEUjEstIni" outputId="c52b05b9-c48a-4ce4-b8d1-1824901ddc91" from sklearn.tree import plot_tree arvore = DecisionTreeRegressor(max_depth = 2, min_samples_split = 200) arvore.fit(Xtreino, ytreino) plt.figure(figsize = (14, 8)) plot_tree(arvore, filled = True, proportion = False, impurity = False) plt.show() # + id="srxlNhR6uBKz" colab={"base_uri": "https://localhost:8080/", "height": 406} outputId="42c621de-9b0a-40df-b74d-4422f5e50861" # gráfico de ajuste profundidade = 7 arvore = DecisionTreeRegressor(max_depth = profundidade, min_samples_split = 5) arvore.fit(Xtreino, ytreino) preditos_treino = arvore.predict(Xtreino) preditos_teste = arvore.predict(Xteste) eqm_treino = mean_squared_error(preditos_treino, ytreino) eqm_treino = mean_squared_error(preditos_teste, yteste) import numpy as np x = np.arange(15, 55, 0.01) y0 = arvore.predict(x.reshape(-1, 1)) plt.figure(figsize = (15, 6)) plt.subplot(1, 2, 1) sns.scatterplot(x = Xtreino.reshape(-1), y = ytreino) plt.plot(x, y0, color = 'red', alpha = 0.75) plt.title('Ajuste Árvore TREINO - EQM: ' + str(round(eqm_treino, 2))) plt.xlabel('IMC') plt.ylabel('Custo') plt.subplot(1, 2, 2) sns.scatterplot(x = Xteste.reshape(-1), y = yteste) plt.plot(x, y0, color = 'blue', alpha = 0.75) plt.title('Ajuste Árvore TESTE - EQM: ' + str(round(eqm_teste, 2))) plt.xlabel('IMC') plt.ylabel('Custo') plt.show() # + id="kywiXJxk0DJk" X = dados[['bmi', 'smoker']] y = dados['charges'] # + id="yw4dZKEX0Eam" Xtreino, Xteste, ytreino, yteste = train_test_split(X, y, test_size = 0.25) # + colab={"base_uri": "https://localhost:8080/"} id="Has4Zg4S0LkG" outputId="762e6769-6001-4a28-aae7-05e4912b9c39" profundidade = 5 modelo = DecisionTreeRegressor(max_depth = profundidade) modelo.fit(Xtreino, ytreino) # + id="FZ45zJm80iZI" preditos_treino = modelo.predict(Xtreino) preditos_teste = modelo.predict(Xteste) # + id="zszNGsbz073u" eqm_treino = mean_squared_error(preditos_treino, ytreino) eqm_teste = mean_squared_error(preditos_teste, yteste) # + [markdown] id="1YJSSTZFXTB0" # ### **Gráficos** # + colab={"base_uri": "https://localhost:8080/"} id="5pFbPBI41JRu" outputId="e578decb-1968-4688-d037-01233e277f25" # árvore de ressão plt.figure(figsize = (15, 6)) plot_tree(modelo, filled = True, proportion = True, impurity = False) plt.show() # gráfico de dispersão com o comportamento da árvore import numpy as np x = np.arange(15, 55, 0.01) v0 = np.zeros(len(x)) v1 = np.ones(len(x)) c0 = pd.DataFrame([x, v0], index = ['bmi', 'smoker']).T c1 = pd.DataFrame([x, v1], index = ['bmi', 'smoker']).T d0 = modelo.predict(c0) d1 = modelo.predict(c1) plt.figure(figsize = (12, 5)) plt.subplot(1, 2, 1) sns.scatterplot(x = Xtreino['bmi'], y = ytreino, hue = Xtreino['smoker'], alpha = 0.6) plt.plot(x, d0, '-', color = 'blue', linewidth = 2) plt.plot(x, d1, '-', color = 'orangered', linewidth = 2) plt.title('Ajuste Árvore TREINO - EQM: ' + str(round(eqm_treino, 2))) plt.xlabel('IMC') plt.ylabel('Custo') plt.subplot(1, 2, 2) sns.scatterplot(x = Xteste['bmi'], y = yteste, hue = Xteste['smoker'], alpha = 0.6) plt.plot(x, d0, '-', color = 'blue', linewidth = 2) plt.plot(x, d1, '-', color = 'orangered', linewidth = 2) plt.title('Ajuste Árvore TESTE - EQM: ' + str(round(eqm_teste, 2))) plt.xlabel('IMC') plt.ylabel('Custo') plt.show() # + [markdown] id="AipBapIXWDsI" # ## **Exercício** # + [markdown] id="1eJ3wwziWKq_" # Repita o exercício anterior agora no contexto de árvores. Procure pelo melhor valor da profundidade nos dois casos considerados. # + colab={"base_uri": "https://localhost:8080/"} id="gwbgYYwtWilv" outputId="c21f62da-8b23-4d1b-d086-6a6bbecd6ec0" profundidade = 20 eqms_arvore = [] for i in range(profundidade): Xtreino, Xteste, ytreino, yteste = train_test_split(X, y, test_size = 0.25) from sklearn.tree import DecisionTreeRegressor arvore = DecisionTreeRegressor(max_depth = profundidade) arvore.fit(Xtreino, ytreino) preditos_arvore = arvore.predict(Xteste) eqm_arvore = mean_squared_error(yteste, preditos_arvore) eqms_arvore.append(eqm_arvore) print(f'O menor EQM foi de {min(eqms_arvore):.2f} com profundidade {eqms_arvore.index(min(eqms_arvore))+1}.') print(f'O maior EQM foi de {max(eqms_arvore):.2f} com profundidade {eqms_arvore.index(max(eqms_arvore))+1}.') print(f'\nPortanto {eqms_arvore.index(min(eqms_arvore))+1} foi a melhor profundidade para esta árvore de regressão.') # + colab={"base_uri": "https://localhost:8080/"} id="LqZdoMBGaN-L" outputId="babbbbc1-68b9-479c-8f7d-1ff15d470ac0" np.array(eqms_arvore) # + colab={"base_uri": "https://localhost:8080/"} id="M_ddQLZba5aD" outputId="02845f56-5433-4a38-a8d6-19969d4e285f" np.sort(eqms_arvore)
3_ML_Regressao_Custos_Seguro_Saude.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: stat-rethink-pymc # language: python # name: stat-rethink-pymc # --- # + # %matplotlib inline import arviz as az import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc as pm import scipy as sp import seaborn as sns sns.set(context='notebook', font_scale=1.2, rc={'figure.figsize': (12, 5)}) plt.style.use(['seaborn-colorblind', 'seaborn-darkgrid']) RANDOM_SEED = 8927 np.random.seed(286) # Helper function def stdz(series: pd.Series): """Standardize the given pandas Series""" return (series - series.mean())/series.std() # - # ### 12E1. # *Which of the following priors will produce more shrinkage in the estimates?* # # - $\alpha_{TANK} \sim Normal(0, 1)$ # - $\alpha_{TANK} \sim Normal(0, 2)$ # # The first option will produce more shrinkage, because the prior is more concentrated: the standard deviation is smaller, so the density piles up more mass around zero and will pull extreme values closer to zero. # ### 12E2. # *Make the following model into a multilevel model:* # # $y_{i} \sim Binomial(1, p_{i})$ # # $logit(p_{i}) = \alpha_{GROUP[i]} + \beta x_{i}$ # # $\alpha_{GROUP} \sim Normal(0, 10)$ # # $\beta \sim Normal(0, 1)$ # # All that is really required to convert the model to a multilevel model is to take the prior for the vector of intercepts, $\alpha_{GROUP}$, and make it adaptive. This means we define parameters for its mean and standard deviation. Then we assign these two new parameters their own priors, *hyperpriors*. This is what it looks like: # # $y_{i} \sim Binomial(1, p_{i})$ # # $logit(p_{i}) = \alpha_{GROUP[i]} + \beta x_{i}$ # # $\alpha_{GROUP} \sim Normal(\mu_{\alpha}, \sigma_{\alpha})$ # # $\beta \sim Normal(0, 1)$ # # $\mu_{\alpha} \sim Normal(0, 10)$ # # $\sigma_{\alpha} \sim HalfCauchy(1)$ # # The exact hyperpriors you assign don’t matter here. Since this problem has no data context, it isn’t really possible to say what sensible priors would be. Note also that an exponential prior on $\sigma_{\alpha}$ is just as sensible, absent context, as the half-Cauchy prior. # ### 12E3. # *Make the following model into a multilevel model:* # # $y_{i} \sim Normal(\mu_{i}, \sigma)$ # # $\mu_{i} = \alpha_{GROUP[i]} + \beta x_{i}$ # # $\alpha_{GROUP} \sim Normal(0, 10)$ # # $\beta \sim Normal(0, 1)$ # # $\sigma \sim HalfCauchy(2)$ # # This is very similar to the previous problem. The only trick here is to notice that there is already a standard deviation parameter, σ. But that standard deviation is for the residuals, at the top level. We’ll need yet another standard deviation for the varying intercepts: # # $y_{i} \sim Normal(\mu_{i}, \sigma)$ # # $\mu_{i} = \alpha_{GROUP[i]} + \beta x_{i}$ # # $\alpha_{GROUP} \sim Normal(\mu_{\alpha}, \sigma_{\alpha})$ # # $\beta \sim Normal(0, 1)$ # # $\sigma \sim HalfCauchy(2)$ # # $\mu_{\alpha} \sim Normal(0, 10)$ # # $\sigma_{\alpha} \sim HalfCauchy(1)$ # ### 12E4. # *Write an example mathematical model formula for a Poisson regression with varying intercepts* # # You can just copy the answer from problem 12E2 and swap out the binomial likelihood for a Poisson, taking care to change the link function from logit to log: # # $y_{i} \sim Poisson(\lambda_{i})$ # # $log(\lambda_{i}) = \alpha_{GROUP[i]} + \beta x_{i}$ # # $\alpha_{GROUP} \sim Normal(\mu_{\alpha}, \sigma_{\alpha})$ # # $\beta \sim Normal(0, 1)$ # # $\mu_{\alpha} \sim Normal(0, 10)$ # # $\sigma_{\alpha} \sim HalfCauchy(1)$ # # Under the hood, all multilevel models are alike. It doesn’t matter which likelihood function rests at the top. Take care, however, to reconsider priors. The scale of the data and parameters is likely quite different for a Poisson model. Absent any particular context in this problem, you can’t recommend better priors. But in real work, it’s good to think about reasonable values and provide regularizing priors on the relevant scale. # ### 12E5. # *Write an example mathematical model formula for a Poisson regression with two different kinds of varying intercepts - a cross-classified model* # # The cross-classified model adds another varying intercept type. This is no harder than duplicating the original varying intercepts structure. But you have to take care now not to over-parameterize the model by having a hyperprior mean for both intercept types. You can do this by just assigning one of the adaptive priors a mean of zero. Suppose for example that the second cluster type is day: # # $y_{i} \sim Poisson(\lambda_{i})$ # # $log(\lambda_{i}) = \alpha_{GROUP[i]} + \alpha_{DAY[i]} + \beta x_{i}$ # # $\alpha_{GROUP} \sim Normal(\mu_{\alpha}, \sigma_{GROUP})$ # # $\alpha_{DAY} \sim Normal(0, \sigma_{DAY})$ # # $\beta \sim Normal(0, 1)$ # # $\mu_{\alpha} \sim Normal(0, 10)$ # # $\sigma_{GROUP}, \sigma_{DAY} \sim HalfCauchy(1)$ # # Or you can just pull the mean intercept out of both priors and put it in the linear model: # # $y_{i} \sim Poisson(\lambda_{i})$ # # $log(\lambda_{i}) = \alpha + \alpha_{GROUP[i]} + \alpha_{DAY[i]} + \beta x_{i}$ # # $\alpha \sim Normal(0, 10)$ # # $\alpha_{GROUP} \sim Normal(0, \sigma_{GROUP})$ # # $\alpha_{DAY} \sim Normal(0, \sigma_{DAY})$ # # $\beta \sim Normal(0, 1)$ # # $\sigma_{GROUP}, \sigma_{DAY} \sim HalfCauchy(1)$ # # These are exactly the same model. Although as you’ll see later in Chapter 13, these different forms might be more or less efficient in sampling. # ### 12M1. # *Revisit the Reed frog survival data, reedfrogs.csv, and add the $predation$ and $size$ treatment variables to the varying intercepts model. Consider models with either main effect alone, both main effects, as well as a model including both and their interaction. Instead of focusing on inferences about these two predictor variables, focus on the inferred variation across tanks. Explain why it changes as it does across models.* # + frogs = pd.read_csv('../Data/reedfrogs.csv', sep=",") # Switch predictors to dummies frogs["size"] = pd.Categorical(frogs["size"]).reorder_categories(["small", "big"]).codes frogs["pred"] = pd.Categorical(frogs["pred"]).codes # make the tank cluster variable tank = np.arange(frogs.shape[0]) print(frogs.shape) frogs.head(8) # - frogs.describe() # + pred = frogs["pred"].values size = frogs["size"].values n_samples, tuning = 1000, 2000 with pm.Model() as m_itcpt: a = pm.Normal('a', 0., 10.) sigma_tank = pm.HalfCauchy('sigma_tank', 1.) a_tank = pm.Normal('a_tank', a, sigma_tank, shape=frogs.shape[0]) p = pm.math.invlogit(a_tank[tank]) surv = pm.Binomial('surv', n=frogs.density, p=p, observed=frogs.surv) trace_itcpt = pm.sample(n_samples, tune=tuning, cores=2) with pm.Model() as m_p: a = pm.Normal('a', 0., 10.) sigma_tank = pm.HalfCauchy('sigma_tank', 1.) a_tank = pm.Normal('a_tank', a, sigma_tank, shape=frogs.shape[0]) bp = pm.Normal('bp', 0., 1.) p = pm.math.invlogit(a_tank[tank] + bp*pred) surv = pm.Binomial('surv', n=frogs.density, p=p, observed=frogs.surv) trace_p = pm.sample(n_samples, tune=tuning, cores=2) with pm.Model() as m_s: a = pm.Normal('a', 0., 10.) sigma_tank = pm.HalfCauchy('sigma_tank', 1.) a_tank = pm.Normal('a_tank', a, sigma_tank, shape=frogs.shape[0]) bs = pm.Normal('bs', 0., 1.) p = pm.math.invlogit(a_tank[tank] + bs*size) surv = pm.Binomial('surv', n=frogs.density, p=p, observed=frogs.surv) trace_s = pm.sample(n_samples, tune=tuning, cores=2) with pm.Model() as m_p_s: a = pm.Normal('a', 0., 10.) sigma_tank = pm.HalfCauchy('sigma_tank', 1.) a_tank = pm.Normal('a_tank', a, sigma_tank, shape=frogs.shape[0]) bp = pm.Normal('bp', 0., 1.) bs = pm.Normal('bs', 0., 1.) p = pm.math.invlogit(a_tank[tank] + bp*pred + bs*size) surv = pm.Binomial('surv', n=frogs.density, p=p, observed=frogs.surv) trace_p_s = pm.sample(n_samples, tune=tuning, cores=2) with pm.Model() as m_p_s_ps: a = pm.Normal('a', 0., 10.) sigma_tank = pm.HalfCauchy('sigma_tank', 1.) a_tank = pm.Normal('a_tank', a, sigma_tank, shape=frogs.shape[0]) bp = pm.Normal('bp', 0., 1.) bs = pm.Normal('bs', 0., 1.) bps = pm.Normal('bps', 0., 1.) p = pm.math.invlogit(a_tank[tank] + bp*pred + bs*size + bps*pred*size) surv = pm.Binomial('surv', n=frogs.density, p=p, observed=frogs.surv) trace_p_s_ps = pm.sample(n_samples, tune=tuning, cores=2) # - # Now we’d like to inspect how the estimated variation across tanks changes from model to model. This means comparing posterior distributions for $\sigma_{tank}$ across the models: az.plot_forest([trace_itcpt, trace_p, trace_s, trace_p_s, trace_p_s_ps], model_names=["m_itcpt", "m_p", "m_s", "m_p_s", "m_p_s_ps"], var_names=["sigma_tank"], credible_interval=.89, figsize=(9,4), combined=True); # Note that adding a predictor always decreased the posterior mean variation across tanks. Why? Because the predictors are, well, predicting variation. This leaves less variation for the varying intercepts to mop up. In theory, if we had in the form of predictor variables all of the relevant information that determined the survival outcomes, there would be zero variation across tanks. # # You might also notice that the $size$ treatment variable reduces the variation much less than does $predation$. The predictor $size$, in these models, doesn’t help prediction very much, so accounting for it has minimal impact on the estimated variation across tanks. # ### 12M2. # *Compare the models you fit just above, using WAIC. Can you reconcile the differences in WAIC with the posterior distributions of the models?* az.compare({"m_itcpt": trace_itcpt, "m_p": trace_p, "m_s": trace_s, "m_p_s": trace_p_s, "m_p_s_ps": trace_p_s_ps}, method="pseudo-BMA") # The models are extremely close, but m_s seems to be the last one, suggesting that $size$ accounts for very little. Can we see this in the coefficients? def get_coefs(est_summary: pd.DataFrame) -> dict: mean_est = est_summary["mean"].to_dict() coefs = {} coefs['sigma_tank'] = mean_est.get('sigma_tank', np.nan) coefs['bp'] = mean_est.get('bp', np.nan) coefs['bs'] = mean_est.get('bs', np.nan) coefs['bps'] = mean_est.get('bps', np.nan) return coefs pd.DataFrame.from_dict({"m_itcpt": get_coefs(az.summary(trace_itcpt, credible_interval=0.89)), "m_p": get_coefs(az.summary(trace_p, credible_interval=0.89)), "m_s": get_coefs(az.summary(trace_s, credible_interval=0.89)), "m_p_s": get_coefs(az.summary(trace_p_s, credible_interval=0.89)), "m_p_s_ps": get_coefs(az.summary(trace_p_s_ps, credible_interval=0.89))}) # The posterior means for $b_{s}$ are smaller in absolute value than those for $b_{p}$. This is consistent with the WAIC comparison. In fact, the standard deviations on these coefficients are big enough that the $b_{s}$ posterior distributions overlap zero quite a bit. Consider for example the model m_s: az.summary(trace_s, var_names=["a", "bs", "sigma_tank"], credible_interval=0.89) # But before you conclude that tadpole size doesn’t matter, remember that other models, perhaps including additional predictors, might find new life for $size$. Inference is always conditional on the model. # ### 12M3. # *Re-estimate the basic Reed frog varying intercept model, but now using a Cauchy distribution in place of the Gaussian distribution for the varying intercepts. That is, fit this model:* # # $s_{i} \sim Binomial(n_{i}, p_{i})$ # # $logit(p_{i}) = \alpha_{TANK[i]}$ # # $\alpha_{TANK} \sim Cauchy(\alpha, \sigma)$ # # $\alpha \sim Normal(0, 1)$ # # $\sigma \sim HalfCauchy(1)$ # # *Compare the posterior means of the intercepts, $\alpha_{TANK}$, to the posterior means produced in the chapter, using the customary Gaussian prior. Can you explain the pattern of differences?* with pm.Model() as m_itcpt_cauch: a = pm.Normal('a', 0., 1.) sigma_tank = pm.HalfCauchy('sigma_tank', 1.) a_tank = pm.Cauchy('a_tank', a, sigma_tank, shape=frogs.shape[0]) p = pm.math.invlogit(a_tank[tank]) surv = pm.Binomial('surv', n=frogs.density, p=p, observed=frogs.surv) trace_itcpt_cauch = pm.sample(3000, tune=3000, cores=2, nuts_kwargs={"target_accept": .99}) # You might have some trouble sampling efficiently from this posterior, on account of the long tails of the Cauchy. This results in the intercepts a_tank being poorly identifed. You saw a simple example of this problem in Chapter 8, when you met MCMC and learned about diagnosing bad chains. To help the sampler explore the space more efficiently, we've increase the target_accept ratio to 0.99. This topic will come up in more detail in Chapter 13. In any event, be sure to check the chains carefully and sample more if you need to. # # The problem asked you to compare the posterior means of the a_tank parameters. Plotting the posterior means will be a lot more meaningful than just looking at the values: # + post_itcpt = pm.trace_to_dataframe(trace_itcpt) a_tank_m = post_itcpt.drop(["a", "sigma_tank"], axis=1).mean() post_itcpt_cauch = pm.trace_to_dataframe(trace_itcpt_cauch) a_tank_mC = post_itcpt_cauch.drop(["a", "sigma_tank"], axis=1).mean() plt.figure(figsize=(10,5)) plt.scatter(x=a_tank_m, y=a_tank_mC) plt.plot([a_tank_m.min()-0.5, a_tank_m.max()+0.5], [a_tank_m.min()-0.5, a_tank_m.max()+0.5], "k--") plt.xlabel("under Gaussian prior") plt.ylabel("under Cauchy prior") plt.title("Posterior mean of each tank's intercept"); # - # The dashed line shows the values for which the intercepts are equal in the two models. You can see that for the majority of tank intercepts, the Cauchy model actually produces posterior means that are essentially the same as those from the Gaussian model. But the large intercepts, under the Gaussian prior, are very much more extreme under the Cauchy prior. # # For those tanks on the righthand side of the plot, all of the tadpoles survived. So using only the data from each tank alone, the log-odds of survival are infinite. The adaptive prior applies pooling that shrinks those log-odds inwards from infinity, thankfully. But the Gaussian prior causes more shrinkage of the extreme values than the Cauchy prior does. That is what accounts for those 5 extreme points on the right of the plot above. # ### 12M4. # *Fit the following cross-classified multilevel model to the chimpanzees data:* # # $L_{i} \sim Binomial(1, p_{i})$ # # $logit(p_{i}) = \alpha_{ACTOR[i]} + \alpha_{BLOCK[i]} + (\beta_{P} + \beta_{PC} C_{i}) P_{i}$ # # $\alpha_{ACTOR} \sim Normal(\alpha, \sigma_{ACTOR})$ # # $\alpha_{BLOCK} \sim Normal(\gamma, \sigma_{BLOCK})$ # # $\alpha, \gamma, \beta_{P}, \beta_{PC} \sim Normal(0, 10)$ # # $\sigma_{ACTOR}, \sigma_{BLOCK} \sim HalfCauchy(1)$ # # *Compare the posterior distribution to that produced by the similar cross-classified model from the chapter. Also compare the number of effective samples. Can you explain the differences?* # + chimp = pd.read_csv('../Data/chimpanzees.csv', sep=";") # we change "actor" and "block" to zero-index chimp.actor = (chimp.actor - 1).astype(int) chimp.block = (chimp.block - 1).astype(int) Nactor = len(chimp.actor.unique()) Nblock = len(chimp.block.unique()) chimp.head() # + with pm.Model() as m_chapter: sigma_actor = pm.HalfCauchy('sigma_actor', 1.) sigma_block = pm.HalfCauchy('sigma_block', 1.) a_actor = pm.Normal('a_actor', 0., sigma_actor, shape=Nactor) a_block = pm.Normal('a_block', 0., sigma_block, shape=Nblock) a = pm.Normal('a', 0., 10.) bp = pm.Normal('bp', 0., 10.) bpc = pm.Normal('bpc', 0., 10.) p = pm.math.invlogit(a + a_actor[chimp.actor.values] + a_block[chimp.block.values] + (bp + bpc * chimp.condition) * chimp.prosoc_left) pulled_left = pm.Binomial('pulled_left', 1, p, observed=chimp.pulled_left) trace_chapter= pm.sample(1000, tune=3000, cores=2) with pm.Model() as m_exerc: alpha = pm.Normal("alpha", 0., 10.) gamma = pm.Normal("gamma", 0., 10.) sigma_actor = pm.HalfCauchy('sigma_actor', 1.) sigma_block = pm.HalfCauchy('sigma_block', 1.) a_actor = pm.Normal('a_actor', alpha, sigma_actor, shape=Nactor) a_block = pm.Normal('a_block', gamma, sigma_block, shape=Nblock) bp = pm.Normal('bp', 0., 10.) bpc = pm.Normal('bpc', 0., 10.) p = pm.math.invlogit(a_actor[chimp.actor.values] + a_block[chimp.block.values] + (bp + bpc * chimp.condition) * chimp.prosoc_left) pulled_left = pm.Binomial('pulled_left', 1, p, observed=chimp.pulled_left) trace_exerc= pm.sample(1000, tune=3000, cores=2) # - # This is much like the model in the chapter, just with the two varying intercept means inside the two priors, instead of one mean outside both priors (inside the linear model). Since there are two parameters for the means, one inside each adaptive prior, this model is over-parameterized: an infinite number of different values of $\alpha$ and $\gamma$ will produce the same sum $\alpha + \gamma$. In other words, the $\gamma$ parameter is redundant. # # This will produce a poorly-identified posterior. It’s best to avoid specifying a model like this. As a matter of fact, you probably noticed the second model took a lot more time to sample than the first one (about 10x more time), which is usually a sign of a poorly parametrized model. Remember the folk theorem of statistical computing: "*When you have computational problems, often there’s a problem with your model*". # # Now let's look at each model's parameters: az.summary(trace_chapter, var_names=["a", "bp", "bpc", "sigma_actor", "sigma_block"], credible_interval=0.89) az.summary(trace_exerc, var_names=["alpha", "gamma", "bp", "bpc", "sigma_actor", "sigma_block"], credible_interval=0.89) # Look at these awful effective sample sizes (ess) and R-hat values for trace_exerc! In a nutshell, the new model (m_exerc) samples quite poorly. This is what happens when you over-parameterize the intercept. Notice however that the inferences about the slopes are practically identical. So even though the over-parameterized model is inefficient, it has identified the slope parameters. # ### 12H1. # *In 1980, a typical Bengali woman could have 5 or more children in her lifetime. By the year 2000, a typical Bengali woman had only 2 or 3 children. You're going to look at a historical set of data, when contraception was widely available but many families chose not to use it. These data reside in bangladesh.csv and come from the 1988 Bangladesh Fertility Survey. Each row is one of 1934 women. There are six variables, but you can focus on three of them for this practice problem:* # # - $district$: ID number of administrative district each woman resided in # - $use.contraception$: An indicator (0/1) of whether the woman was using contraception # - $urban$: An indicator (0/1) of whether the woman lived in a city, as opposed to living in a rural area # # *The first thing to do is ensure that the cluster variable, $district$, is a contiguous set of integers. Recall that these values will be index values inside the model. If there are gaps, you’ll have parameters for which there is no data to inform them. Worse, the model probably won’t run. Let's look at the unique values of the $district$ variable:* d = pd.read_csv('../Data/bangladesh.csv', sep=";") d.head() d.describe() d.district.unique() # District 54 is absent. So $district$ isn’t yet a good index variable, because it’s not contiguous. This is easy to fix. Just make a new variable that is contiguous: d["district_id"], _ = pd.factorize(d.district, sort=True) district_id = d.district_id.values Ndistricts = len(d.district_id.unique()) d.district_id.unique() # Now there are 60 values, contiguous integers 0 to 59. # # Now, focus on predicting $use.contraception$, clustered by district ID. Fit both (1) a traditional fixed-effects model that uses an index variable for district and (2) a multilevel model with varying intercepts for district. Plot the predicted proportions of women in each district using contraception, for both the fixed-effects model and the varying-effects model. That is, make a plot in which district_id is on the horizontal axis and expected proportion using contraception is on the vertical. Make one plot for each model, or layer them on the same plot, as you prefer. # # How do the models disagree? Can you explain the pattern of disagreement? In particular, can you explain the most extreme cases of disagreement, both why they happen, where they do and why the models reach different inferences? # + with pm.Model() as m_fixed: a_district = pm.Normal('a_district', 0., 10., shape=Ndistricts) p = pm.math.invlogit(a_district[district_id]) used = pm.Bernoulli('used', p=p, observed=d["use.contraception"]) trace_fixed = pm.sample(1000, tune=2000, cores=2) with pm.Model() as m_varying: a = pm.Normal('a', 0., 10.) sigma_district = pm.Exponential('sigma_district', 1.) a_district = pm.Normal('a_district', 0., sigma_district, shape=Ndistricts) p = pm.math.invlogit(a + a_district[district_id]) used = pm.Bernoulli('used', p=p, observed=d["use.contraception"]) trace_varying = pm.sample(1000, tune=2000, cores=2) # - # Sampling was smooth and quick, so the traces should be ok. We can confirm by plotting them: az.plot_trace(trace_fixed, compact=True); az.plot_trace(trace_varying, compact=True); # The chains are indeed fine. These models have a lot of parameters, so the summary dataframe we are used to is not really convenient here. Let's use forest plots instead: fig, axes = az.plot_forest([trace_fixed, trace_varying], model_names=["Fixed", "Varying"], credible_interval=0.89, combined=True, figsize=(8,35)) axes[0].grid(); # We can already see that some estimates are particularly uncertain in some districts, but only for the fixed-effects model. Chances are these districts are extreme compared to the others, and/or the sample sizes are very small. This would be a case where the varying-effects model's estimates would be better and less volatile in those districts, because it is pooling information - information flows across districts thanks to the higher level common distribution of districts. # + post_fixed = pm.trace_to_dataframe(trace_fixed) p_mean_fixed = sp.special.expit(post_fixed.mean()) post_varying = pm.trace_to_dataframe(trace_varying) # add a_district to a (because they are offsets of the global intercept), then convert to probabilities with logistic p_mean_varying = sp.special.expit(post_varying.drop(["a", "sigma_district"], axis=1).add(post_varying["a"], axis="index").mean()) global_a = sp.special.expit(post_varying["a"].mean()) # + plt.figure(figsize=(11,5)) plt.hlines(d["use.contraception"].mean(), -1, Ndistricts, linestyles="dotted", label="Empirical global mean", alpha=.6, lw=2) plt.hlines(global_a, -1, Ndistricts, linestyles="dashed", label="Estimated global mean", alpha=.6, lw=2) plt.plot(np.arange(Ndistricts), p_mean_fixed, "o", ms=6, alpha=.8, label="Fixed-effects estimates") plt.plot(np.arange(Ndistricts), p_mean_varying, "o", fillstyle="none", ms=6, markeredgewidth=1.5, alpha=.8, label="Varying-effects estimates") plt.xlabel("District") plt.ylabel("Probability contraception") plt.legend(ncol=2); # - # The blue points are the fixed-effects estimates, and the open green ones are the varying effects. The dotted line is the observed average proportion of women using contraception, in the entire sample. The dashed line is the average proportion of women using contraception, in the entire sample, *as estimated by the varying effects model*. # # Notice first that the green points are always closer to the dashed line, as was the case with the tadpole example in lecture. This results from shrinkage, which results from pooling information. There are cases with rather extreme disagreements, though. The most obvious is district 2, which has a fixed (blue) estimate of 1 but a varying (green) estimate of only 0.44. There are also two districts (10 and 48) for which the fixed estimates are zero, but the varying estimates are 0.18 and 0.30. If you go back to the forest plot above, these are exactly the three districts whose fixed-effects parameters were both far from zero and very uncertain. # # So what’s going on here? As we suspected, these districts presented extreme results: either all sampled women used contraception or none did. As a result, the fixed-effects estimates were silly. The varying-effects model was able to produce more rational estimates, because it pooled information from other districts. # # But note that the intensity of pooling was different for these three extreme districts. As we intuited too, depending upon how many women were sampled in each district, there was more or less shrinkage (pooling) towards the grand mean. So for example in the case of district 2, there were only 2 women in the sample, and so there is a lot of distance between the blue and green points. In contrast, district 10 had 21 women in the sample, and so while pooling pulls the estimate off of zero to 0.18, it doesn’t pull it nearly as far as district 2. # # Another way to think of this phenomenon is to view the same estimates arranged by number of women in the sampled district, on the horizontal axis. Then on the vertical we can plot the distance (absolute value of the difference) between the fixed and varying estimates. Here’s what that looks like: # + nbr_women = d.groupby("district_id").count()["woman"] abs_dist = (p_mean_fixed - p_mean_varying).abs() plt.figure(figsize=(11,5)) plt.plot(nbr_women, abs_dist, 'o', fillstyle="none", ms=7, markeredgewidth=2, alpha=.6) plt.xlabel("Number of women sampled") plt.ylabel("Shrinkage by district"); # - # You can think of the vertical axis as being the amount of shrinkage. The districts with fewer women sampled show a lot more shrinkage, because there is less information in them. As a result, they are expected to overfit more, and so they are shrunk more towards the overall mean. # ### 12H2. # *Return to the Trolley data from Chapter 11. Define and fit a varying intercepts model for these data. By this I mean to add an intercept parameter for the individuals to the linear model. Cluster the varying intercepts on individual participants, as indicated by the unique values in the id variable. Include $action$, $intention$, and $contact$ as before. Compare the varying intercepts model and a model that ignores individuals, using both WAIC/LOO and posterior predictions. What is the impact of individual variation in these data?* # # **This will be adressed in a later pull request, as there is currently an issue with PyMC's OrderedLogistic implementation** # ### 12H3. # *The Trolley data are also clustered by $story$, which indicates a unique narrative for each vignette. Define and fit a cross-classified varying intercepts model with both $id$ and $story$. Use the same ordinary terms as in the previous problem. Compare this model to the previous models. What do you infer about the impact of different stories on responses?* # # **This will be adressed in a later pull request, as there is currently an issue with PyMC's OrderedLogistic implementation** # + import platform import sys import IPython import matplotlib import scipy print(f"This notebook was created on a computer {platform.machine()}, using: " f"\nPython {sys.version[:5]}\nIPython {IPython.__version__}\nPyMC {pm.__version__}\nArviz {az.__version__}\nNumPy {np.__version__}" f"\nPandas {pd.__version__}\nSciPy {scipy.__version__}\nMatplotlib {matplotlib.__version__}\n") # -
Rethinking/end-of-chapter-practice-problems/ch-12.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Smart Queue Monitoring System - Retail Scenario # # ## Overview # Now that you have your Python script and job submission script, you're ready to request an **IEI Tank-870** edge node and run inference on the different hardware types (CPU, GPU, VPU, FPGA). # # After the inference is completed, the output video and stats files need to be retrieved and stored in the workspace, which can then be viewed within the Jupyter Notebook. # # ## Objectives # * Submit inference jobs to Intel's DevCloud using the `qsub` command. # * Retrieve and review the results. # * After testing, go back to the proposal doc and update your original proposed hardware device. # ## Step 0: Set Up # # #### IMPORTANT: Set up paths so we can run Dev Cloud utilities # You *must* run this every time you enter a Workspace session. # (Tip: select the cell and use **Shift+Enter** to run the cell.) # %env PATH=/opt/conda/bin:/opt/spark-2.4.3-bin-hadoop2.7/bin:/opt/conda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/intel_devcloud_support import os import sys sys.path.insert(0, os.path.abspath('/opt/intel_devcloud_support')) sys.path.insert(0, os.path.abspath('/opt/intel')) # ### Step 0.1: (Optional-step): Original Video # # If you are curious to see the input video, run the following cell to view the original video stream we'll be using for inference. import videoHtml videoHtml.videoHTML('Retail', ['original_videos/Retail.mp4']) # ## Step 1 : Inference on a Video # # In the next few cells, You'll submit your job using the `qsub` command and retrieving the results for each job. Each of the cells below should submit a job to different edge compute nodes. # # The output of the cell is the `JobID` of your job, which you can use to track progress of a job with `liveQStat`. # # You will need to submit a job for each of the following hardware types: # * **CPU** # * **GPU** # * **VPU** # * **FPGA** # # **Note** You will have to submit each job one at a time and retrieve their results. # # After submission, they will go into a queue and run as soon as the requested compute resources become available. # (Tip: **shift+enter** will run the cell and automatically move you to the next cell.) # # If your job successfully runs and completes, once you retrieve your results, it should output a video and a stats text file in the `results/retail/<DEVICE>` directory. # # For example, your **CPU** job should output its files in this directory: # > **results/retail/cpu** # # **Note**: To get the queue labels for the different hardware devices, you can go to [this link](https://devcloud.intel.com/edge/get_started/devcloud/). # # The following arguments should be passed to the job submission script after the `-F` flag: # * Model path - `/data/models/intel/person-detection-retail-0013/<MODEL PRECISION>/`. You will need to adjust this path based on the model precision being using on the hardware. # * Device - `CPU`, `GPU`, `MYRIAD`, `HETERO:FPGA,CPU` # * Manufacturing video path - `/data/resources/retail.mp4` # * Manufacturing queue_param file path - `/data/queue_param/retail.npy` # * Output path - `/output/results/retail/<DEVICE>` This should be adjusted based on the device used in the job. # * Max num of people - This is the max number of people in queue before the system would redirect them to another queue. # ## Step 1.1: Submit to an Edge Compute Node with an Intel CPU # In the cell below, write a script to submit a job to an <a # href="https://software.intel.com/en-us/iot/hardware/iei-tank-dev-kit-core">IEI # Tank* 870-Q170</a> edge node with an <a # href="https://ark.intel.com/products/88186/Intel-Core-i5-6500TE-Processor-6M-Cache-up-to-3-30-GHz-">Intel® Core™ i5-6500TE processor</a>. The inference workload should run on the CPU. # + #Submit job to the queue # cpu_job_id = !qsub queue_job.sh -d . -l nodes=1:tank-870:i5-6500te -F "/data/models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013 CPU /data/resources/retail.mp4 /data/queue_param/retail.npy /output/results/retail/cpu 3" print(cpu_job_id[0]) # - # #### Check Job Status # # To check on the job that was submitted, use `liveQStat` to check the status of the job. # # Column `S` shows the state of your running jobs. # # For example: # - If `JOB ID`is in Q state, it is in the queue waiting for available resources. # - If `JOB ID` is in R state, it is running. import liveQStat liveQStat.liveQStat() # #### Get Results # # Run the next cell to retrieve your job's results. import get_results get_results.getResults(cpu_job_id[0], filename='output.tgz', blocking=True) # #### Unpack your output files and view stdout.log # !tar zxf output.tgz # !cat stdout.log # #### View stderr.log # This can be used for debugging # !cat stderr.log # #### View Output Video # Run the cell below to view the output video. If inference was successfully run, you should see a video with bounding boxes drawn around each person detected. # + import videoHtml videoHtml.videoHTML('Retail CPU', ['results/retail/cpu/output_video.mp4']) # - # ## Step 1.2: Submit to an Edge Compute Node with a CPU and IGPU # In the cell below, write a script to submit a job to an <a # href="https://software.intel.com/en-us/iot/hardware/iei-tank-dev-kit-core">IEI # Tank* 870-Q170</a> edge node with an <a href="https://ark.intel.com/products/88186/Intel-Core-i5-6500TE-Processor-6M-Cache-up-to-3-30-GHz-">Intel® Core i5-6500TE</a>. The inference workload should run on the **Intel® HD Graphics 530** integrated GPU. # + #Submit job to the queue # gpu_job_id = !qsub queue_job.sh -d . -l nodes=1:tank-870:i5-6500te:intel-hd-530 -F "/data/models/intel/person-detection-retail-0013/FP16/person-detection-retail-0013 GPU /data/resources/retail.mp4 /data/queue_param/retail.npy /output/results/retail/gpu 3" print(gpu_job_id[0]) # - # ### Check Job Status # # To check on the job that was submitted, use `liveQStat` to check the status of the job. # # Column `S` shows the state of your running jobs. # # For example: # - If `JOB ID`is in Q state, it is in the queue waiting for available resources. # - If `JOB ID` is in R state, it is running. import liveQStat liveQStat.liveQStat() # #### Get Results # # Run the next cell to retrieve your job's results. import get_results get_results.getResults(gpu_job_id[0], filename='output.tgz', blocking=True) # #### Unpack your output files and view stdout.log # !tar zxf output.tgz # !cat stdout.log # #### View stderr.log # This can be used for debugging # !cat stderr.log # #### View Output Video # Run the cell below to view the output video. If inference was successfully run, you should see a video with bounding boxes drawn around each person detected. # + import videoHtml videoHtml.videoHTML('Retail GPU', ['results/retail/gpu/output_video.mp4']) # - # ## Step 1.3: Submit to an Edge Compute Node with an Intel® Neural Compute Stick 2 # In the cell below, write a script to submit a job to an <a # href="https://software.intel.com/en-us/iot/hardware/iei-tank-dev-kit-core">IEI # Tank 870-Q170</a> edge node with an <a href="https://ark.intel.com/products/88186/Intel-Core-i5-6500TE-Processor-6M-Cache-up-to-3-30-GHz-">Intel Core i5-6500te CPU</a>. The inference workload should run on an <a # href="https://software.intel.com/en-us/neural-compute-stick">Intel Neural Compute Stick 2</a> installed in this node. # + #Submit job to the queue # vpu_job_id = !qsub queue_job.sh -d . -l nodes=1:tank-870:i5-6500te:intel-ncs2 -F "/data/models/intel/person-detection-retail-0013/FP16/person-detection-retail-0013 MYRIAD /data/resources/retail.mp4 /data/queue_param/retail.npy /output/results/retail/vpu 3" print(vpu_job_id[0]) # - # ### Check Job Status # # To check on the job that was submitted, use `liveQStat` to check the status of the job. # # Column `S` shows the state of your running jobs. # # For example: # - If `JOB ID`is in Q state, it is in the queue waiting for available resources. # - If `JOB ID` is in R state, it is running. import liveQStat liveQStat.liveQStat() # #### Get Results # # Run the next cell to retrieve your job's results. import get_results get_results.getResults(vpu_job_id[0], filename='output.tgz', blocking=True) # #### Unpack your output files and view stdout.log # !tar zxf output.tgz # !cat stdout.log # #### View stderr.log # This can be used for debugging # !cat stderr.log # #### View Output Video # Run the cell below to view the output video. If inference was successfully run, you should see a video with bounding boxes drawn around each person detected. # + import videoHtml videoHtml.videoHTML('Retail VPU', ['results/retail/vpu/output_video.mp4']) # - # ## Step 1.4: Submit to an Edge Compute Node with IEI Mustang-F100-A10 # In the cell below, write a script to submit a job to an <a # href="https://software.intel.com/en-us/iot/hardware/iei-tank-dev-kit-core">IEI # Tank 870-Q170</a> edge node with an <a href="https://ark.intel.com/products/88186/Intel-Core-i5-6500TE-Processor-6M-Cache-up-to-3-30-GHz-">Intel Core™ i5-6500te CPU</a> . The inference workload will run on the <a href="https://www.ieiworld.com/mustang-f100/en/"> IEI Mustang-F100-A10 </a> FPGA card installed in this node. # + #Submit job to the queue # fpga_job_id = !qsub queue_job.sh -d . -l nodes=1:tank-870:i5-6500te:iei-mustang-f100-a10 -F "/data/models/intel/person-detection-retail-0013/FP16/person-detection-retail-0013 HETERO:FPGA,CPU /data/resources/retail.mp4 /data/queue_param/retail.npy /output/results/retail/fpga 3" print(fpga_job_id[0]) # - # ### Check Job Status # # To check on the job that was submitted, use `liveQStat` to check the status of the job. # # Column `S` shows the state of your running jobs. # # For example: # - If `JOB ID`is in Q state, it is in the queue waiting for available resources. # - If `JOB ID` is in R state, it is running. import liveQStat liveQStat.liveQStat() # #### Get Results # # Run the next cell to retrieve your job's results. import get_results get_results.getResults(fpga_job_id[0], filename='output.tgz', blocking=True) # #### Unpack your output files and view stdout.log # !tar zxf output.tgz # !cat stdout.log # #### View stderr.log # This can be used for debugging # !cat stderr.log # #### View Output Video # Run the cell below to view the output video. If inference was successfully run, you should see a video with bounding boxes drawn around each person detected. # + import videoHtml videoHtml.videoHTML('Retail FPGA', ['results/retail/fpga/output_video.mp4']) # - # ***Wait!*** # # Please wait for all the inference jobs and video rendering to complete before proceeding to the next step. # # ## Step 2: Assess Performance # # Run the cells below to compare the performance across all 4 devices. The following timings for the model are being comapred across all 4 devices: # # - Model Loading Time # - Average Inference Time # - FPS # + import matplotlib.pyplot as plt device_list=['cpu', 'gpu', 'fpga', 'vpu'] inference_time=[] fps=[] model_load_time=[] for device in device_list: with open('results/retail/'+device+'/stats.txt', 'r') as f: inference_time.append(float(f.readline().split("\n")[0])) fps.append(float(f.readline().split("\n")[0])) model_load_time.append(float(f.readline().split("\n")[0])) # - plt.bar(device_list, inference_time) plt.xlabel("Device Used") plt.ylabel("Total Inference Time in Seconds") plt.show() plt.bar(device_list, fps) plt.xlabel("Device Used") plt.ylabel("Frames per Second") plt.show() plt.bar(device_list, model_load_time) plt.xlabel("Device Used") plt.ylabel("Model Loading Time in Seconds") plt.show() # # Step 3: Update Proposal Document # # Now that you've completed your hardware testing, you should go back to the proposal document and validate or update your originally proposed hardware. Once you've updated your proposal, you can move onto the next scenario.
Retail_Scenario.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import sys import geopandas as gpd import pandas as pd data_folder = "/Users/lena/Dropbox/Hackathon/rv" file_path = os.path.join( data_folder, 'incoming_data', 'ocean_coast.shp') files = gpd.read_file(file_path) files.crs = {'init':'epsg:4326'} files = files.to_crs({'init':'epsg:2006'}) output = os.path.join( data_folder, 'incoming_data', 'ocean_coast.shp') files.to_file(output) # ### For developments file_path = os.path.join( data_folder, 'data','developments', 'project_choc.shp') files = gpd.read_file(file_path) files.crs = {'init':'epsg:2006'} files = files.to_crs({'init':'epsg:2006'}) output = os.path.join( data_folder, 'data','developments', 'project_choc.shp') files.to_file(output) def crs_adjust(data_folder, file_id): file_path = os.path.join( data_folder, 'incoming_data', '{}.shp'.format(file_id)) files = gpd.read_file(file_path) files = files.to_crs({'init':'epsg:2006'}) return files files= ['landarea'] # + file_path = os.path.join( data_folder, 'data','infrastructure', 'port.shp') files = gpd.read_file(file_path) files.crs = {'init':'epsg:2006'} files = files.to_crs({'init':'epsg:2006'}) output = os.path.join( data_folder, 'data','infrastructure', 'port.shp') files.to_file(output) # -
src/preprocess/preprocess_for_geodatabase.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Egypt serving malware import pandas as pd import numpy as np import functools from matplotlib import pylab as plt # %matplotlib inline # ## Data massage # !tshark -2 -r mads-141.0.174.38.pcap -Tfields \ # -eframe.time_epoch \ # -eip.id -e ip.ttl -e ip.dst \ # -etcp.srcport -etcp.dstport -etcp.len -etcp.stream -etcp.seq -etcp.ack -etcp.flags.str -etcp.window_size \ # -etcp.options.timestamp.tsval -etcp.options.timestamp.tsecr \ # -etcp.analysis.initial_rtt -etcp.analysis.ack_rtt \ # -ehttp.host -ehttp.response.code -ehttp.user_agent -ehttp.location \ # > mads-141.0.174.38.txt data = pd.read_csv('./mads-141.0.174.38.txt', delimiter='\t', names= ('time_epoch id ttl dst srcport dstport len stream seq ack flags window_size ' 'tsval tsecr initial_rtt ack_rtt host code user_agent location').split()) data['intid'] = data.id.apply(functools.partial(int, base=16)) data.time_epoch -= data.time_epoch.min() data.sample(1).T len(data.stream.unique()) data.host.value_counts() # `www.xnxx.com` stream is not really interesting as it's just a single webpage that occasionally hit the dataset. data.location.value_counts() # There are three different sorts of streams in the dataset. # *Good* streams get correct redirect, *bad* streams get injection, *ugly* streams get no redirect at all. s_good = set(data[data.location == 'http://www.xnxx.com/'].stream) s_bad = set(data[data.location == 'http://marketing-sv.com/mads.html'].stream) s_ugly = set(data[data.host == 'xnxx.com'].stream) - set(data[~data.location.isnull()].stream) print 'Bad:', sorted(s_bad) print 'Ugly:', sorted(s_ugly) # ## RTT to HTTP response for *good* & *bad* streams data[(data.stream.isin(s_good)) & (~data.code.isnull())].ack_rtt.hist(color='green', normed=True) data[(data.stream.isin(s_bad)) & (~data.code.isnull())].ack_rtt.hist(color='red', normed=True) plt.xlabel('RTT, s'); plt.ylabel('Density'); # There are not so much streams outside of 250ms range, so let's look at high-res histograms at that range. data[(data.stream.isin(s_good)) & (~data.code.isnull())].ack_rtt.hist(bins=np.linspace(0, 0.25, 25), color='green') plt.xlabel('RTT, s'); plt.ylabel('Count'); data[(data.stream.isin(s_bad)) & (~data.code.isnull())].ack_rtt.hist(bins=np.linspace(0, 0.25, 25), color='red') plt.xlabel('RTT, s'); plt.ylabel('Count'); # We have only six samples of injected redirects, but five of these samples have significantly lower RTT than the usual RTT of http response. The ~45ms RTT corresponds well to the latency of last-mile ADSL link that was used during this analysis, so it is close to latency to get a packet from ISP's network. # ## Let the good streams roll d_good = data[data.stream.isin(s_good) & (data.srcport == 80)] d_good[d_good.tsval.isnull()].shape # So, good data from server **always** has TCP timestamp, there are no rows in the slice. d_good.ttl.value_counts() # So, good data from server **always** has `TTL=54`. # # Let's look at [IP fragment ID](https://en.wikipedia.org/wiki/IPv4#Identification) field: dsa = d_good[d_good.flags == '*******A**S*'] plt.scatter(dsa.time_epoch, dsa.intid, marker='.') plt.xlabel('Time since 1st packet, s'); plt.ylabel('IP ID') plt.title('IP ID for SYN-ACK packets from server'); # + dsa = d_good[d_good.flags != '*******A**S*'] fig = plt.figure(); fig.set_figwidth(15); fig.set_figheight(3) ax = fig.add_subplot(1, 2, 1) ax.scatter(dsa.time_epoch, dsa.intid, marker='.') ax.set_xlabel('Time since 1st packet, s'); ax.set_ylabel('IP frag. ID') ax.set_title('IP ID for non-SYN-ACK packets from server'); ax = fig.add_subplot(1, 2, 2) dsa.intid.hist(bins=32, ax=ax) ax.set_xlim(0, 2**16) ax.set_xlabel('IP frag. ID'); ax.set_ylabel('Packets') ax.set_title('IP ID for non-SYN-ACK packets from server'); print 'Min/Max IP ID observed for non-SYN-ACK packets:', dsa.intid.min(), dsa.intid.max() # - # Good server replies with `IP-ID=0` in `SYN-ACK` and almost never has `IP-ID=0` in other packets, IP-ID is rather random for other packets. plt.scatter(d_good.time_epoch, d_good.window_size) plt.xlabel('Time since 1st packet, s'); plt.ylabel('TCP Window, bytes'); plt.title('TCP Window announced by server'); # Good server has window size in 25k…31k range (scaling is applied). # ## Bisect bad streams one-by-one print sorted(s_bad) d_bad = data[data.stream.isin(s_bad) & (data.srcport == 80)] d_bad['time_epoch stream id ttl len seq ack flags window_size tsval ack_rtt'.split()].sort_values(by=['stream', 'time_epoch']) # So, all packets that look-like-injected have: # - IP-ID=0x3412 # - IP TTL=59 # - no TCP options # # The server also sends `408 Request timeout` in 120 seconds. It means, that the server has not seen the request at all, so the injector act as an in-band device. # # Also ACK that is confirming FIN-ACK is looks like injected according to IP ID and TTL, but it has **weird RTT** (~98ms, but not ~44ms). # ### Stream 287 # That's injected stream that has ~200ms latency. On the other hand, *genuine* SYN-ACK from the server also has larger-than-usual RTT, so it's probably just a temporary Bufferbloat lag. d_bad[d_bad.stream == 287]['time_epoch stream id ttl len seq ack flags window_size tsval ack_rtt'.split()] # The interesting thing about this stream is that ACK confirming FIN-ACK has 18ms ACK_RTT, so it actually means that the packet was likely sent **BEFORE** seeing the FIN from the client as the last-mile RTT is ~38ms according to `mtr` measurements. # # If the statement is actually true, then another question arises: why is ACK-confirming-FIN-ACK usually ~98ms delayed? Is it triggered by some packet from original server? Is it sort of latency camouflage? No further research was done yet to clarify these questions. # ## Ugly streams d_ugly = data[data.stream.isin(s_ugly) & (data.srcport == 80)] d_ugly.groupby(by='stream tsecr'.split()).time_epoch.agg(['count']) # It means, that the remote server has seen `SYN` packet and the first `ACK` after the `SYN`, but the server has never seen the request itself. It suggests that the *ugly* streams are just a sort of *bad* streams those got no redirection packet for some reason. # ## Reunion of the bad and the ugly # It's interesting that only mobile User-agents were redirected to the `…/mads.html`. Our test sent ~33% of requests using mobile User-Agent and 67% of requests using desktop User-Agent. d_goo = data[data.stream.isin(s_bad | s_ugly)] d_goo.user_agent.value_counts() # It explains why OONI dataset sees no redirection. We've seen redirections only for mobile `User-Agent` so probably the DPI targets mobile users. # + print 'Redirection happens in %.1f%% cases' % (100.*len(s_bad|s_ugly) / len(set(data.stream))) print 'Redirection happens in %.1f%% of mobile cases' % (100.*len(s_bad|s_ugly) / len(set(data[data.user_agent.str.match('.*(?:Android|RIM|Symbian|Series60|iPhone|BlackBerry|MIDP)', as_indexer=True) == True].stream)))
static/notebooks/eg-serving-malware.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import nltk.tokenize as tk def word_tokenize(text): words = tk.word_tokenize(text) return len(words), words # Call on concatenation of body and title from collections import namedtuple tokenized_row = namedtuple('tokenized_row', 'sent_count sentences word_count words') # + from gensim.models.doc2vec import Doc2Vec import multiprocessing from collections import namedtuple Document = namedtuple('ScoreDocument', 'words tags') filenames = ['combined_train_test.p', 'r_train_so_test.p', 'so_train_r_test.p', 'so_alone.p', 'reddit_alone.p'] cores = multiprocessing.cpu_count() # + import pickle def import_dict(token_dict, titles, bodies, title_sentences, body_sentences): for index in token_dict: title, body = token_dict[index] titles.append(Document(title.words, tags=[index])) bodies.append(Document(body.words, tags=[index])) count = 0 for sentence in title.sentences: title_sentences.append(Document(word_tokenize(sentence)[1], tags=[str(index) + '_' +str(count)])) count += 1 count = 0 for sentence in body.sentences: body_sentences.append(Document(word_tokenize(sentence)[1], tags=[str(index) + '_' +str(count)])) count += 1 return titles, bodies, title_sentences, body_sentences for filename in filenames: directory_name = filename.split('.p')[0] with open(directory_name + "/tokenized_dict.p", 'rb') as pfile: train_token_dict, test_token_dict = pickle.load(pfile) titles = [] title_sentences = [] bodies = [] body_sentences = [] titles, bodies, title_sentences, body_sentences = import_dict(train_token_dict, titles, bodies, title_sentences, body_sentences) titles, bodies, title_sentences, body_sentences = import_dict(test_token_dict, titles, bodies, title_sentences, body_sentences) model = Doc2Vec(titles, size=100, workers=cores, window=8, min_count=5) model.save(directory_name + '/titles.doc2vec') model = Doc2Vec(bodies, size=100, workers=cores, window=8, min_count=5) model.save(directory_name + '/bodies.doc2vec') model = Doc2Vec(body_sentences, size=100, workers=cores, window=8, min_count=5) model.save(directory_name + '/body_sentences.doc2vec') model = Doc2Vec(title_sentences, size=100, workers=cores, window=8, min_count=5) model.save(directory_name + '/title_sentences.doc2vec') # -
Vectorize.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # import relevant libraries import numpy as np import pandas as pd import os # - # define path to image dataset path = './AFRIFASHION1600/' # get image names images = os.listdir(path) # format name to get image class def get_style(name): names = name.split('_') style = '' group = names[:-1] for idx, i in enumerate(group): style += i try: group[idx+1] style+=' ' except: pass return style # test function get_style('Agbada_118.png') # + # get image name and class data= [] for image_id in images: image_path = os.path.join(path, image_id) style = get_style(image_id) data.append({ 'image_id': image_id, 'clothing': style }) # - # read data into dataframe df = pd.DataFrame(data) # show dataframe df # display all rows in dataframe df.style # show unique clothing df['clothing'].unique() # define clothing for males male = ['African Shirts', 'Agbada', 'Buba and Trouser'] # create gender column df['gender']= df['clothing'].apply(lambda x: 'Male' if x in male else 'Female') # show current dataframe df # show unique clothing for male df[df['gender']=='Male']['clothing'].unique() # show count of each gender class df.groupby('gender').count() # save df as csv file to directory df.to_csv('AFRIFASHION1600.csv', index= False)
model_structures/gender identification/data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Settings # %env TF_KERAS = 1 import os sep_local = os.path.sep print(sep_local) print(os.getcwd()) import sys os.chdir('..' + sep_local +'..' + sep_local +'..' + sep_local + '..' + sep_local + '..' + sep_local + '..') # For Linux import print(os.getcwd()) import tensorflow as tf print(tf.__version__) # # Dataset loading dataset_name='CBSD68' images_dir = '.' + sep_local + 'data' + sep_local + '.CBSD68' validation_percentage = 10 valid_format = 'png' images_dir from training.generators.file_image_generator import create_image_lists, get_generators imgs_list = create_image_lists( image_dir=images_dir, validation_pct=validation_percentage, valid_imgae_formats=valid_format ) from PIL import Image trace_image = Image.open(images_dir+sep_local+'original'+sep_local+'{:04d}.png'.format(66)) trace_image image_size_original=(481, 321, 3) scale = 2 image_size = list(map(lambda x: x//scale , image_size_original[:-1])) + [image_size_original[-1]] image_size = (*image_size,) batch_size = 16 latents_dim = 150 intermediate_dim = 50 image_size training_generator, testing_generator = get_generators( images_list=imgs_list, image_dir=images_dir, image_size=image_size, batch_size=batch_size, class_mode=None ) # ## input is half of the output scale=1 inputs_shape = list(map(lambda x: x//scale , image_size[:-1])) + [image_size[-1]] inputs_shape = (*inputs_shape, ) image_size, inputs_shape shrink_fn = lambda image: tf.image.resize(image, inputs_shape[:-1]) enlarge_fn = lambda image: tf.image.resize(image, image_size[:-1]) import numpy as np # + noise_percentage=20/100 noise_image = tf.random.normal(shape=(batch_size, )+image_size) noising_fn = lambda images: tf.clip_by_value((images + noise_image*noise_percentage),0,1) # - import numpy as np # + def generator_noiser(generator): while True: batch = next(generator) yield noising_fn(batch), batch # + train_ds = tf.data.Dataset.from_generator( lambda: generator_noiser(training_generator), output_types= (tf.float32, tf.float32), output_shapes=(tf.TensorShape((batch_size, ) + inputs_shape), tf.TensorShape((batch_size, ) + image_size)), ) test_ds = tf.data.Dataset.from_generator( lambda: generator_noiser(testing_generator), output_types= (tf.float32, tf.float32), output_shapes=(tf.TensorShape((batch_size, ) + inputs_shape), tf.TensorShape((batch_size, ) + image_size)), ) # - _instance_scale=1.0 for data in train_ds: _instance_scale = float(data[0].numpy().max()) break _instance_scale import numpy as np from collections.abc import Iterable if isinstance(image_size, Iterable): _outputs_shape = np.prod(image_size) _outputs_shape # # Model's Layers definition kernel_size=3 stride = 2 c = list(map(lambda x: x// (stride*stride), image_size[:-1])) c = (*c, intermediate_dim) c # + enc_lays = [ tf.keras.layers.UpSampling2D(size=(2, 2)), tf.keras.layers.Flatten(), # No activation tf.keras.layers.Dense(latents_dim) ] dec_lays = [ tf.keras.layers.Dense(units=np.product(c), activation=tf.nn.relu), tf.keras.layers.Reshape(target_shape=c), tf.keras.layers.Conv2DTranspose(filters=intermediate_dim, kernel_size=kernel_size, strides=(stride, stride), padding="SAME", activation='relu'), tf.keras.layers.Conv2DTranspose(filters=intermediate_dim, kernel_size=kernel_size, strides=(stride, stride), padding="SAME", activation='relu'), # No activation tf.keras.layers.Conv2DTranspose(filters=3, kernel_size=kernel_size, strides=(1, 1), padding="SAME") ] # - # # Model definition # + model_name = dataset_name+'_Conv_Pre_Upsampling_denoising' #windows #experiments_dir='..' + sep_local + '..' + sep_local +'..' + sep_local + '..' + sep_local + '..'+sep_local+'experiments'+sep_local + model_name #linux experiments_dir=os.getcwd()+ sep_local +'experiments'+sep_local + model_name # - variables_params = \ [ { 'name': 'inference', #'upsampler', 'inputs_shape':inputs_shape, 'outputs_shape':latents_dim, 'layers': enc_lays } , { 'name': 'generative', #'constructive', 'inputs_shape':latents_dim, 'outputs_shape':image_size, 'layers':dec_lays } ] # + pycharm={"name": "#%%\n"} from os.path import abspath from utils.data_and_files.file_utils import create_if_not_exist _restore = os.path.join(experiments_dir, 'var_save_dir') create_if_not_exist(_restore) absolute = abspath(_restore) print("Restore_dir",absolute) absolute = abspath(experiments_dir) print("Recording_dir",absolute) print("Current working dir",os.getcwd()) # - from training.autoencoding_basic.transformative.AE import autoencoder as AE ae = AE( name=model_name, latents_dim=latents_dim, batch_size=batch_size, variables_params=variables_params, filepath=None,#to restore trained model, set filepath=_restore episode_len=1 ) image_size, inputs_shape # + #ae.compile(metrics=None) #ae.compile(metrics=create_metrics()) ae.compile() # - # # Callbacks # + # added for linux warning suppression import logging mpl_logger = logging.getLogger('matplotlib') mpl_logger.setLevel(logging.WARNING) from training.callbacks.trace_image_reconstruction import trace_reconstruction from training.callbacks.save_model import ModelSaver # - es = tf.keras.callbacks.EarlyStopping( monitor='loss', min_delta=1e-12, patience=12, verbose=1, restore_best_weights=False ) ms = ModelSaver(filepath=_restore) csv_dir = os.path.join(experiments_dir, 'csv_dir') create_if_not_exist(csv_dir) csv_dir = os.path.join(csv_dir, model_name+'.csv') csv_log = tf.keras.callbacks.CSVLogger(csv_dir, append=True) absolute = abspath(csv_dir) print("Csv_dir",absolute) image_reconstuction_dir = os.path.join(experiments_dir, 'image_reconstuction_dir') create_if_not_exist(image_reconstuction_dir) absolute = abspath(image_reconstuction_dir) print("image_reconstuction_dir",absolute) image = shrink_fn(np.asarray(trace_image)).numpy()/255.0 image = image + np.clip((image + np.random.normal(loc=0, scale=1, size=image.shape)*noise_percentage),0,1) img_reconst = trace_reconstruction(filepath=image_reconstuction_dir, image=image, gen_freq=5) # + [markdown] pycharm={"name": "#%% md\n"} # # Model Training # - ae.fit( x=train_ds, input_kw=None, steps_per_epoch=int(1e4), epochs=int(1e6), verbose=2, callbacks=[ es, ms, csv_log, img_reconst], workers=-1, use_multiprocessing=True, validation_data=test_ds, validation_steps=int(1e4) )
notebooks/Super_Resolution/denoising/Upsampling/ellwlb/CBSD68/CBSD68_Pre-Upsampling_Convolutional.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Neural networks for road segmentation** # ======================== # Your next task is to train neural network to segment road on images from car cams. # The original dataset is provided by Karlsruhe Institute of Technology (http://www.cvlibs.net/datasets/kitti/eval_road.php). Their images are ±370x1270, but, for simlictiy, we will use 370x370 squares. # One can download the dataset from https://disk.yandex.ru/d/QPOw4hk84-se_w # Here is an example of input data with corresponding ground truth: # + import os import cv2 import numpy as np import torchvision.transforms as transforms from torch.utils.data import Dataset import matplotlib.pyplot as plt # %matplotlib inline # + def get_paths(path): _, _, filenames = next(os.walk(path)) images_paths = [] for filename in sorted(filenames): images_paths.append(os.path.join(path, filename)) return np.stack(images_paths) class RoadDataset(Dataset): """Feel free to rewrite it. For ex. cache all images in RAM to increase training speed""" def __init__(self, images, masks, transform, aug=None,): self.images = sorted(images) self.masks = sorted(masks) self.transform = transform self.aug = aug def __len__(self): return len(self.images) def __getitem__(self, idx): im_name = self.images[idx] mask_name = self.masks[idx] image = cv2.imread(im_name)[:,:,::-1] / 255.0 # ::-1 to convert from BGR to RGB mask = cv2.imread(mask_name, cv2.IMREAD_GRAYSCALE) mask = (mask > 0).astype(np.float32) if self.aug: sample = self.aug( image=image, mask=mask, ) else: sample = { 'image': image, 'mask': mask, } sample['image'] = self.transform(sample['image']).float() sample['mask'] = self.transform(sample['mask']).float() return sample # + X_train = get_paths("data/train/images") y_train = get_paths("data/train/gt") transform = transforms.Compose([ transforms.ToTensor(), ]) # - train_dataset = RoadDataset(X_train, y_train, transform) # + sample = train_dataset[110] image = sample['image'] mask = sample['mask'] plt.subplot(1, 2, 1) plt.imshow(image.permute(1, 2, 0)) plt.subplot(1, 2, 2) plt.imshow(mask.permute(1, 2, 0)) # - # Workflow: # --- # * Choose correct loss function, write training loop and code for testing. Feel free to use previous HW for inspiration. # * Train any segmentation neural network from scratch (for example U-Net) and achieve >= 0.75 IoU on test set (40% points). See function to calculate the metric below. # * Use any pretrained model for image classification, convert it for segmentation by adding decoder (don't forget skip-connections) or usign dilated convolutions and achieve >= 0.87 IoU on test set (60% points). # # You're not allowed to do only one thing: train your network on test set. # # ---- # Your final solution will consist of an ipython notebook with code (for final networks training + any experiments with data) and test metric calculation. # # Feel free to ask in Telegram chat if something is not clear :3 # print(f"Dataset length {len(train_dataset)}") # Dataset is small so actively use data augmentation: rotations, flip, color-change etc. to prevent overfitting. # # Most likely you'll have to pad your images to 512x512 (it divides by 2^5=32, like U-Net wants). Use PadIfNeeded from Albumentations and central crop (see below) after prediction to calculate loss/metrics (you don't want to pay attention on padded values). # # ---- # There is a hard data class imbalance in dataset, so the network output will be biased toward "zero" class. You can either tune the minimal probability threshold for the "road" class, or add class weights in optimized loss. You also can try to use softIoU or DICE loss. # Good luck! def calc_iou(prediction, ground_truth): n_images = len(prediction) intersection, union = 0, 0 for i in range(n_images): intersection += np.logical_and(prediction[i] > 0, ground_truth[i] > 0).astype(np.float32).sum() union += np.logical_or(prediction[i] > 0, ground_truth[i] > 0).astype(np.float32).sum() return float(intersection) / union def central_crop(images, size): current_size = images.size(2) border_len = (current_size - size) // 2 images = images[:, :, border_len:current_size-border_len, border_len:current_size-border_len]
homework02/homework-part1-segmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import torch import torch.nn as nn import numpy as np from copy import deepcopy device = "cuda" if torch.cuda.is_available() else "cpu" class RBF(nn.Module): def __init__(self, timelag): super(RBF, self).__init__() device = "cuda" if torch.cuda.is_available() else "cpu" torch.cuda.manual_seed(0) self.cause_clt = self.init_clt() self.cause_std = self.init_std() self.timelag = timelag self.init_weight_target = nn.Parameter(torch.rand(self.timelag, device=device)) self.b = nn.Parameter(torch.rand(1, device = device)) def init_clt(self): return nn.Parameter(torch.rand(1, device=device)) def init_std(self): return nn.Parameter(torch.rand(1, device=device)) def rbf(self, x, cluster, std): return torch.exp(-(x - cluster) * (x - cluster) / 2 * (std * std)) def rbf_gradient(self, x, clt, std): return (-2 * (x - clt) / (std * std)) * (torch.exp(-(x - clt) * (x - clt) / 2 * (std * std))) def rbf_num_grad(self, x): rbf_grad_list = [] for j in range(x - 2): rbf_grad_list.append(self.rbf(j + 2, self.cause_clt, self.cause_std) - self.rbf(j, self.cause_clt, self.cause_std)) return rbf_grad_list def rbf_grad(self, x): # 1~ x.shape[0]-2th gradient list rbf_grad_list = [] for j in range(x - 2): rbf_grad_list.append(self.rbf_gradient(j + 1, self.cause_clt, self.cause_std)) return rbf_grad_list def forward(self, t): for i in range(t): if i == 0: a = self.rbf(i, self.cause_clt, self.cause_std) else: a = torch.cat([a, self.rbf(i, self.cause_clt, self.cause_std)], dim=0) pred = sum(a.T * self.init_weight_target) + self.b return a, pred # + def restore_parameters(model, best_model): '''Move parameter values from best_model to model.''' for params, best_params in zip(model.parameters(), best_model.parameters()): params.data = best_params def train_rbf(model,X ,Y, lr, epochs, lookback=5,device = device): model.to(device) loss_fn = nn.MSELoss(reduction='mean') optimizer = torch.optim.Adam(model.parameters(), lr=lr) train_loss_list = [] best_it = None best_model = None best_loss = np.inf for epoch in range(epochs): loss_ = 0 a_list = [] pred_list = [] for i in range(len(X)): # pred loss a, pred = model(len(X[i])) a_list.append(a) pred_list.append(pred) loss_ += loss_fn(pred, Y[i]) # rbf_num_list = model.rbf_num_grad(len(Y)) # rbf_fn_grad = model.rbf_grad(len(Y)) # loss_grad = sum([loss_fn(rbf_fn_grad[j], rbf_num_list[j]) for j in range(len(rbf_num_list))]) # if epoch % 100 == 0: print("epoch {} loss {} :".format(epoch, loss_ / len(Y))) print("------------------------------------------------------") print() loss_.backward() optimizer.step() model.zero_grad() mean_loss = loss_ / len(Y) train_loss_list.append(mean_loss) if mean_loss < best_loss: best_loss = mean_loss best_it = epoch best_model = deepcopy(model) elif (epoch - best_it) == lookback: if verbose: print('Stopping early') break restore_parameters(model, best_model) return train_loss_list , model, pred_list, a_list # - import pandas as pd df = pd.read_csv('C:/Users/chanyoung/Desktop/Neural-GC-master/lorenz_96_10_10_1000.csv') X2d = df[['a','b']] def data_split(X, cause, target, timelag, device = device): input_cause = [] input_target = [] Y = [] for i in range(len(X) - (timelag + 1)): input_cause.append(X[cause].values[i: i + timelag]) input_target.append(X[target].values[i: i + timelag]) Y.append([X[target][i + timelag + 1]]) return torch.tensor(input_cause, device=device).float(), torch.tensor(input_target,device=device).float(), torch.tensor(Y, device=device).float() input_cause, input_target, Y = data_split(X2d, 'a', 'a', 10) init_weight_target = nn.Parameter(torch.rand(10, device=device)) init_weight_target 0.8207 * 0.8207 sum(init_weight_target.T * init_weight_target) + b b = nn.Parameter(torch.rand(1, device = device)) len(input_cause) model = RBF(10) print(model.cause_clt) print(model.cause_std) loss, best_model, pred = train_rbf(model, input_cause,Y, 0.01, 100, device) print(best_model.cause_clt) print(best_model.cause_std) # + import matplotlib.pyplot as plt plt.plot(pred[:19].cpu().detach().numpy()) plt.plot(a[:19].cpu().detach().numpy()) plt.show() # -
cs224w/yes_gd_rbf.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from pprint import pprint # 4 Samples of features example_features = [ ['F11','F12','F13','F14'], ['F21','F22','F23','F24'], ['F31','F32','F33','F34'], ['F41','F42','F43','F44']] # 4 Samples of labels example_labels = [ ['L11','L12'], ['L21','L22'], ['L31','L32'], ['L41','L42']] # - import math def batches(batch_size, features, labels): """ Create batches of features and labels :param batch_size: The batch size :param features: List of features :param labels: List of labels :return: Batches of (Features, Labels) """ assert len(features) == len(labels) output_batches = [] sample_size = len(features) for start_i in range(0, sample_size, batch_size): end_i = start_i + batch_size batch = [features[start_i:end_i], labels[start_i:end_i]] output_batches.append(batch) return output_batches # PPrint prints data structures like 2d arrays, so they are easier to read pprint(batches(3, example_features, example_labels)) # + from tensorflow.examples.tutorials.mnist import input_data import tensorflow as tf import numpy as np learning_rate = 0.001 n_input = 784 # MNIST data input (img shape: 28*28) n_classes = 10 # MNIST total classes (0-9 digits) # Import MNIST data mnist = input_data.read_data_sets('./datasets/ud730/mnist', one_hot=True) # The features are already scaled and the data is shuffled train_features = mnist.train.images test_features = mnist.test.images train_labels = mnist.train.labels.astype(np.float32) test_labels = mnist.test.labels.astype(np.float32) # Features and Labels features = tf.placeholder(tf.float32, [None, n_input]) labels = tf.placeholder(tf.float32, [None, n_classes]) # Weights & bias weights = tf.Variable(tf.random_normal([n_input, n_classes])) bias = tf.Variable(tf.random_normal([n_classes])) # Logits - xW + b logits = tf.add(tf.matmul(features, weights), bias) # Define loss and optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) # Calculate accuracy correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) batch_size = 512 assert batch_size is not None, 'You must set the batch size' init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for batch_features, batch_labels in batches(batch_size, train_features, train_labels): sess.run(optimizer, feed_dict={features: batch_features, labels: batch_labels}) # Calculate accuracy for test dataset test_accuracy = sess.run( accuracy, feed_dict={features: test_features, labels: test_labels}) print('Test Accuracy: {}'.format(test_accuracy)) # -
007 - Batches.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # # SIMPLE CONVOLUTIONAL NEURAL NETWORK # https://github.com/sjchoi86/Tensorflow-101/blob/master/notebooks/cnn_mnist_simple.ipynb import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data # %matplotlib inline print ("PACKAGES LOADED") # # LOAD MNIST mnist = input_data.read_data_sets('data/', one_hot=True) trainimg = mnist.train.images trainlabel = mnist.train.labels testimg = mnist.test.images testlabel = mnist.test.labels print ("MNIST ready") # # SELECT DEVICE TO BE USED #device_type = "/gpu:1" device_type = "" # # DEFINE CNN with tf.device(device_type): # <= This is optional n_input = 784 n_output = 10 weights = { 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1)), 'wd1': tf.Variable(tf.random_normal([14*14*64, n_output], stddev=0.1)) } biases = { 'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)), 'bd1': tf.Variable(tf.random_normal([n_output], stddev=0.1)) } def conv_simple(_input, _w, _b): # Reshape input _input_r = tf.reshape(_input, shape=[-1, 28, 28, 1]) # Convolution _conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME') # Add-bias _conv2 = tf.nn.bias_add(_conv1, _b['bc1']) # Pass ReLu _conv3 = tf.nn.relu(_conv2) # Max-pooling _pool = tf.nn.max_pool(_conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # Vectorize _dense = tf.reshape(_pool, [-1, _w['wd1'].get_shape().as_list()[0]]) # Fully-connected layer _out = tf.add(tf.matmul(_dense, _w['wd1']), _b['bd1']) # Return everything out = { 'input_r': _input_r, 'conv1': _conv1, 'conv2': _conv2, 'conv3': _conv3 , 'pool': _pool, 'dense': _dense, 'out': _out } return out print ("CNN ready") # # DEFINE COMPUTATIONAL GRAPH # tf Graph input x = tf.placeholder(tf.float32, [None, n_input]) y = tf.placeholder(tf.float32, [None, n_output]) # Parameters learning_rate = 0.001 training_epochs = 10 batch_size = 100 display_step = 1 # Functions! with tf.device(device_type): # <= This is optional _pred = conv_simple(x, weights, biases)['out'] cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=_pred)) optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) _corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1)) # Count corrects accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) # Accuracy init = tf.global_variables_initializer() # Saver save_step = 1; savedir = "nets/" saver = tf.train.Saver(max_to_keep=3) print ("Network Ready to Go!") # # OPTIMIZE # ## DO TRAIN OR NOT do_train = 0 sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(init) if do_train == 1: for epoch in range(training_epochs): avg_cost = 0. total_batch = int(mnist.train.num_examples/batch_size) # Loop over all batches for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # Fit training using batch data sess.run(optm, feed_dict={x: batch_xs, y: batch_ys}) # Compute average loss avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch # Display logs per epoch step if epoch % display_step == 0: print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost)) train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys}) print (" Training accuracy: %.3f" % (train_acc)) test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel}) print (" Test accuracy: %.3f" % (test_acc)) # Save Net if epoch % save_step == 0: saver.save(sess, "nets/cnn_mnist_simple.ckpt-" + str(epoch)) print ("Optimization Finished.") # # RESTORE if do_train == 0: epoch = training_epochs-1 saver.restore(sess, "nets/cnn_mnist_simple.ckpt-" + str(epoch)) print ("NETWORK RESTORED") # # LET'S SEE HOW CNN WORKS # + with tf.device(device_type): conv_out = conv_simple(x, weights, biases) input_r = sess.run(conv_out['input_r'], feed_dict={x: trainimg[0:1, :]}) conv1 = sess.run(conv_out['conv1'], feed_dict={x: trainimg[0:1, :]}) conv2 = sess.run(conv_out['conv2'], feed_dict={x: trainimg[0:1, :]}) conv3 = sess.run(conv_out['conv3'], feed_dict={x: trainimg[0:1, :]}) pool = sess.run(conv_out['pool'], feed_dict={x: trainimg[0:1, :]}) dense = sess.run(conv_out['dense'], feed_dict={x: trainimg[0:1, :]}) out = sess.run(conv_out['out'], feed_dict={x: trainimg[0:1, :]}) # - # # Input # + # Let's see 'input_r' print ("Size of 'input_r' is %s" % (input_r.shape,)) label = np.argmax(trainlabel[0, :]) print ("Label is %d" % (label)) # Plot ! plt.matshow(input_r[0, :, :, 0], cmap=plt.get_cmap('gray')) plt.title("Label of this image is " + str(label) + "") plt.colorbar() plt.show() # - # # Conv1 (convolution) # + # Let's see 'conv1' print ("Size of 'conv1' is %s" % (conv1.shape,)) # Plot ! for i in range(3): plt.matshow(conv1[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + "th conv1") plt.colorbar() plt.show() # - # # Conv2 (+bias) # + # Let's see 'conv2' print ("Size of 'conv2' is %s" % (conv2.shape,)) # Plot ! for i in range(3): plt.matshow(conv2[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + "th conv2") plt.colorbar() plt.show() # - # # Conv3 (ReLU) # + # Let's see 'conv3' print ("Size of 'conv3' is %s" % (conv3.shape,)) # Plot ! for i in range(3): plt.matshow(conv3[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + "th conv3") plt.colorbar() plt.show() # - # # Pool (max_pool) # + # Let's see 'pool' print ("Size of 'pool' is %s" % (pool.shape,)) # Plot ! for i in range(3): plt.matshow(pool[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + "th pool") plt.colorbar() plt.show() # - # # Dense # Let's see 'dense' print ("Size of 'dense' is %s" % (dense.shape,)) # Let's see 'out' print ("Size of 'out' is %s" % (out.shape,)) # # Convolution filters # + # Let's see weight! wc1 = sess.run(weights['wc1']) print ("Size of 'wc1' is %s" % (wc1.shape,)) # Plot ! for i in range(3): plt.matshow(wc1[:, :, 0, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + "th conv filter") plt.colorbar() plt.show() # -
code/mnist_04_cnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] id="MRP-fAQedMTd" # <h2> 3.6 Featurizing text data with tfidf weighted word-vectors </h2> # + id="-3IbomL8dMTi" outputId="3fa8eb7c-ddf2-4f98-edee-0c49db6502e8" import pandas as pd import matplotlib.pyplot as plt import re import time import warnings import numpy as np from nltk.corpus import stopwords from sklearn.preprocessing import normalize from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer warnings.filterwarnings("ignore") import sys import os import pandas as pd import numpy as np from tqdm import tqdm # exctract word2vec vectors # https://github.com/explosion/spaCy/issues/1721 # http://landinghub.visualstudio.com/visual-cpp-build-tools import spacy # + id="j5XNgVyLdMT7" # avoid decoding problems df = pd.read_csv("train.csv") # encode questions to unicode # https://stackoverflow.com/a/6812069 # ----------------- python 2 --------------------- # df['question1'] = df['question1'].apply(lambda x: unicode(str(x),"utf-8")) # df['question2'] = df['question2'].apply(lambda x: unicode(str(x),"utf-8")) # ----------------- python 3 --------------------- df['question1'] = df['question1'].apply(lambda x: str(x)) df['question2'] = df['question2'].apply(lambda x: str(x)) # + id="HbiMFpgRdMUJ" outputId="21c00698-7f2a-4ce4-e665-f7a2feaab6fa" df.head() # + id="RU3HqJXwdMUj" from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import CountVectorizer # merge texts questions = list(df['question1']) + list(df['question2']) tfidf = TfidfVectorizer(lowercase=False, ) tfidf.fit_transform(questions) # dict key:word and value:tf-idf score word2tfidf = dict(zip(tfidf.get_feature_names(), tfidf.idf_)) # + [markdown] id="2JKI2yT4dMUv" # - After we find TF-IDF scores, we convert each question to a weighted average of word2vec vectors by these scores. # - here we use a pre-trained GLOVE model which comes free with "Spacy". https://spacy.io/usage/vectors-similarity # - It is trained on Wikipedia and therefore, it is stronger in terms of word semantics. # + id="PFS6m8z5dMUz" outputId="3c4fb6fd-7f86-4955-8b8f-762b5969ecce" # en_vectors_web_lg, which includes over 1 million unique vectors. nlp = spacy.load('en_core_web_trf') vecs1 = [] # https://github.com/noamraph/tqdm # tqdm is used to print the progress bar for qu1 in tqdm(list(df['question1'])): doc1 = nlp(qu1) # 384 is the number of dimensions of vectors mean_vec1 = np.zeros([len(doc1), len(doc1[0].vector)]) for word1 in doc1: # word2vec vec1 = word1.vector # fetch df score try: idf = word2tfidf[str(word1)] except: idf = 0 # compute final vec mean_vec1 += vec1 * idf mean_vec1 = mean_vec1.mean(axis=0) vecs1.append(mean_vec1) df['q1_feats_m'] = list(vecs1) # + id="62GEF-RbdMVB" outputId="60a4f5f8-5582-4886-befd-2ab6ed99c753" vecs2 = [] for qu2 in tqdm(list(df['question2'])): doc2 = nlp(qu2) mean_vec1 = np.zeros([len(doc1), len(doc2[0].vector)]) for word2 in doc2: # word2vec vec2 = word2.vector # fetch df score try: idf = word2tfidf[str(word2)] except: #print word idf = 0 # compute final vec mean_vec2 += vec2 * idf mean_vec2 = mean_vec2.mean(axis=0) vecs2.append(mean_vec2) df['q2_feats_m'] = list(vecs2) # + id="a38GBlGWdMVQ" #prepro_features_train.csv (Simple Preprocessing Feartures) #nlp_features_train.csv (NLP Features) if os.path.isfile('nlp_features_train.csv'): dfnlp = pd.read_csv("nlp_features_train.csv",encoding='latin-1') else: print("download nlp_features_train.csv from drive or run previous notebook") if os.path.isfile('df_fe_without_preprocessing_train.csv'): dfppro = pd.read_csv("df_fe_without_preprocessing_train.csv",encoding='latin-1') else: print("download df_fe_without_preprocessing_train.csv from drive or run previous notebook") # + id="apdRa1kndMVb" df1 = dfnlp.drop(['qid1','qid2','question1','question2'],axis=1) df2 = dfppro.drop(['qid1','qid2','question1','question2','is_duplicate'],axis=1) df3 = df.drop(['qid1','qid2','question1','question2','is_duplicate'],axis=1) df3_q1 = pd.DataFrame(df3.q1_feats_m.values.tolist(), index= df3.index) df3_q2 = pd.DataFrame(df3.q2_feats_m.values.tolist(), index= df3.index) # + id="xzWAqGegdMVp" outputId="2f88eeda-244f-4bbb-a51c-a8680fe8fb92" # dataframe of nlp features df1.head() # + id="N4DQnDtndMV4" outputId="2e288eed-e8fa-4ec3-a9b9-4e4daba52fc1" # data before preprocessing df2.head() # + id="_1YIPtTwdMWC" outputId="510f4c73-0706-4633-d706-e0d348ebfa71" # Questions 1 tfidf weighted word2vec df3_q1.head() # + id="wUMdkJTNdMWL" outputId="69e3e256-cbb8-4fe2-aaf2-9088c3868b29" # Questions 2 tfidf weighted word2vec df3_q2.head() # + id="Ozz83vh4dMWU" outputId="e5b30f77-2849-4b08-9949-0912ec0db418" print("Number of features in nlp dataframe :", df1.shape[1]) print("Number of features in preprocessed dataframe :", df2.shape[1]) print("Number of features in question1 w2v dataframe :", df3_q1.shape[1]) print("Number of features in question2 w2v dataframe :", df3_q2.shape[1]) print("Number of features in final dataframe :", df1.shape[1]+df2.shape[1]+df3_q1.shape[1]+df3_q2.shape[1]) # + id="HmfZ5Q1zdMWl" # storing the final features to csv file if not os.path.isfile('final_features.csv'): df3_q1['id']=df1['id'] df3_q2['id']=df1['id'] df1 = df1.merge(df2, on='id',how='left') df2 = df3_q1.merge(df3_q2, on='id',how='left') result = df1.merge(df2, on='id',how='left') result.to_csv('final_features.csv')
3.Q_Mean_W2V.ipynb