code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Building your first model in keras and get it ready for mobile use
# > Learning how to build your first tensorflow model using keras and how to get a file that you can use in your mobile device
#
# - toc:true
# - branch: master
# - badges: true
# - comments: true
# - author: <NAME>
# - categories: [deep-learning, jupyter, tensorflow, keras, machine_learning]
# +
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
import pathlib
import matplotlib.pyplot as plt
import numpy as np
# -
# We are going to create a basic model of the form y=mx+b
x = []
for i in range(-5,10):
x.append(i)
x
y = []
for i in range(len(x)):
y.append((x[i] * 2) - 1)
y
model = tf.keras.Sequential([
tf.keras.layers.Dense(units=1, input_shape=[1])
])
model.compile(optimizer='sgd', loss='mean_squared_error', metrics=['mse'])
# + jupyter={"outputs_hidden": true}
model.fit(
x=x,
y=y,
epochs=200
)
# -
# Now that we have the model we can generate a SavedModel
export_dir = 'saved_model/1'
tf.saved_model.save(model, export_dir)
# Convert the SavedModel to TFLite
converter = tf.lite.TFLiteConverter.from_saved_model(export_dir)
tflite_model = converter.convert()
tflite_model_file = pathlib.Path('model.tflite')
tflite_model_file.write_bytes(tflite_model)
# Load TFLite model and allocate tensors
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
# Get input and output tensors
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test the Tensorflow Lite model on random input data
input_shape = input_details[0]['shape']
inputs, outputs = [], []
for _ in range(100):
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_results = interpreter.get_tensor(output_details[0]['index'])
# Test the Tensorflow model on random input
tf_results = model(tf.constant(input_data))
output_data = np.array(tf_results)
inputs.append(input_data[0][0])
outputs.append(output_data[0][0])
# Visualize the model
plt.plot(inputs, outputs)
plt.show()
| _notebooks/2020-10-13-keras-model-how-to-save-it.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # heading
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# ## Cosh, Sinh, Tanh
# with rectangualr hyperbola
x = np.linspace(0,1,100)
x2 = np.square(x)
y = np.sqrt(1- x2)
plt.plot(x,y)
from sklearn.neural_network import MLPClassifier
from sklearn.datasets import make_moons
X,y = make_moons(noise=0)
len(X)
x0 = list(map(lambda x:x[0], X))
x1 = list(map(lambda x:x[1], X))
plt.scatter(x0,x1)
X_quart,y_quart = make_moons(noise=0.25)
x0_quart = list(map(lambda x:x[0], X_quart))
x1_quart = list(map(lambda x:x[1], X_quart))
plt.plot(x0_quart, 'rx')
plt.plot(x1_quart, 'go')
| math_jetpack/trigonometry.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
from pandas._libs.tslibs.timestamps import Timestamp
import datetime
import random
clients = pd.read_csv('../datasets/clients.csv')
items = pd.read_csv('../datasets/items.csv')
clients
items
# ## Add trend and seasonality
# +
from timeseries_generator import LinearTrend, Generator, WhiteNoise, RandomFeatureFactor, WeekdayFactor, HolidayFactor
start="09-01-2020"
end="08-31-2021"
# setting up a linear tren
lt = LinearTrend(coef=10.0, offset=2., col_name="my_linear_trend")
weekday_factor = WeekdayFactor(
col_name="weekend_boost_factor",
factor_values={4: 1.05, 5: 1.1, 6: 1.05} # Here we assign a factor of 1.1 to Friday, and 1.2 to Sat/Sun
)
g = Generator(factors={lt, weekday_factor}, features=None, date_range=pd.date_range(start=start, end=end))
g.generate()
# update by adding some white noise to the generator
wn = WhiteNoise(stdev_factor=0.06)
g.update_factor(wn)
g.generate()
g.plot()
# -
data = g.generate()
data['rows'] = (2*data.value).astype(int)
data = data[['date', 'rows']]
data.set_index('date').plot()
data
# +
## holiday: dashain, christmas/new year, valentines day
## major promotion campain with sisan baniya, multiple tech channels, facebook promo etc
##
# -
def nepali_holiday(row):
date = datetime.datetime.strptime(str(row.date), "%Y-%m-%d %H:%M:%S")
num = row.rows
if date>datetime.datetime(2020, 10, 16) and date<datetime.datetime(2020, 10, 25):
num = int (num*1.1)
if date>datetime.datetime(2020, 12, 24) and date<datetime.datetime(2021, 1, 2):
num = int (num*1.2)
if date>datetime.datetime(2021, 1, 4) and date<datetime.datetime(2021, 1, 7):
num = num - num
if date>datetime.datetime(2021, 2, 13) and date<datetime.datetime(2021, 2, 15):
num = int (num*1.1)
if date>datetime.datetime(2021, 7, 1):
num = int(num * 1.2)
return num
data['rows'] = data.apply(nepali_holiday, axis=1)
data.set_index('date').plot()
data
def create_list(row):
return [str(row.date)]*row.rows
df = pd.DataFrame({'created_at': data.apply(create_list, axis=1).sum()})
def random_times(input_time):
random_hour = random.uniform(0, 23)
return datetime.datetime.strptime(input_time, "%Y-%m-%d %H:%M:%S") + datetime.timedelta(hours=random_hour)
df['created_at'] = df['created_at'].apply(random_times)
df.resample('D', on='created_at').count().plot()
df
df.to_csv('../datasets/transactions_raw.csv', index=False)
| notebooks/3_trend_gen.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="OQy6wb1TdpXH" colab_type="code" colab={}
# #!pip install datadotworld
# #!pip install datadotworld[pandas]
# + id="wQlVXZ00gTFk" colab_type="code" colab={}
# #!dw configure
# + id="mDcA5i-3fVQs" colab_type="code" colab={}
from google.colab import drive
import pandas as pd
import numpy as np
import datadotworld as dw
# + id="6dlzRSB-gi3_" colab_type="code" colab={}
#drive.mount('/content/drive')
# + id="fPZ7geYqgv6T" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8ec97492-91f7-421a-ffc1-db565914daf1" executionInfo={"status": "ok", "timestamp": 1581546506688, "user_tz": -60, "elapsed": 721, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
# cd "drive/My Drive/Colab Notebooks/Matrix_DW"
# + id="N-y2CkzqhIOW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8d0ac52f-cc4f-4b93-b787-bd8fbbfc6987" executionInfo={"status": "ok", "timestamp": 1581546529410, "user_tz": -60, "elapsed": 2458, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
# ls
# + id="YgILlmZDhNZ5" colab_type="code" colab={}
# !mkdir data
# + id="6ugXlViwhT2U" colab_type="code" colab={}
# !echo 'data' > .gitignore
# + id="dJ8aRnD0iOHM" colab_type="code" colab={}
# !git add .gitignore
# + id="v1H9h4l_iToF" colab_type="code" colab={}
data = dw.load_dataset('datafiniti/mens-shoe-prices')
# + id="ldeT-vPgi5Nx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4e043e00-c156-4981-9b75-f6d18bb40a5f" executionInfo={"status": "ok", "timestamp": 1581547057317, "user_tz": -60, "elapsed": 619, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
df = data.dataframes['7004_1']
df.shape
# + id="3cAYy6E6i_qJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 608} outputId="b92c7407-c347-49d0-d613-5d936b0f001b" executionInfo={"status": "ok", "timestamp": 1581547080723, "user_tz": -60, "elapsed": 747, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
df.sample(5)
# + id="0fZ3hoasjUNg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 245} outputId="5a38aebf-66a3-4322-95c3-7ea79d05d227" executionInfo={"status": "ok", "timestamp": 1581547100532, "user_tz": -60, "elapsed": 748, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
df.columns
# + id="rcOuVcfajZL7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 124} outputId="05b4b7c5-926b-4e27-c470-5afb96d124ff" executionInfo={"status": "ok", "timestamp": 1581547169364, "user_tz": -60, "elapsed": 809, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
df.prices_currency.unique()
# + id="q9Tb84Jjjp6Z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 260} outputId="6dfe93cd-f1af-402c-888b-1d2b0d61a34b" executionInfo={"status": "ok", "timestamp": 1581547238368, "user_tz": -60, "elapsed": 643, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
df.prices_currency.value_counts(normalize=True)
# + id="J3b0aEUqj0jG" colab_type="code" colab={}
df_usd = df[ df.prices_currency == 'USD' ].copy()
# + id="fu4hYIerkFov" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ee3c3312-559b-484f-85ef-8ffcaff18f65" executionInfo={"status": "ok", "timestamp": 1581547360433, "user_tz": -60, "elapsed": 1157, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
df_usd.shape
# + id="1DCXWjA4kYg5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="77af1661-a308-42aa-93e6-2f7a36bec210" executionInfo={"status": "ok", "timestamp": 1581548166278, "user_tz": -60, "elapsed": 734, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
df_usd['prices_amountmin'] = df_usd.prices_amountmin.astype(np.float)
df_usd['prices_amountmin'].hist()
# + id="eldl3w2Vm7xm" colab_type="code" colab={}
filter_max = np.percentile( df_usd['prices_amountmin'], 99 )
# + id="kJ3QDhW2nsFh" colab_type="code" colab={}
df_usd_filter = df_usd[ df_usd['prices_amountmin'] < filter_max ]
# + id="-L2rolSOn56n" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="07a9c334-fa30-4b7e-e93f-f999aec4ac18" executionInfo={"status": "ok", "timestamp": 1581549043300, "user_tz": -60, "elapsed": 967, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
df_usd_filter.prices_amountmin.hist(bins=100)
# + id="Gh8Tw-5MoQzv" colab_type="code" colab={}
df.to_csv('data/shoes_prices.csv', index=False)
# + id="uA15azcNsTfA" colab_type="code" colab={}
# !git add DzienTrzeciMatrix.ipynb
# + id="BQWvITTTs1S6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 86} outputId="9acfa8bd-cfac-4e51-8756-5607c7ee16f9" executionInfo={"status": "ok", "timestamp": 1581549702895, "user_tz": -60, "elapsed": 4429, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13358070497250286950"}}
# !git commit -m "About Men's Shoe Prices"
# + id="u_3QImB7tHNd" colab_type="code" colab={}
# !git config --global user.email "<EMAIL>"
# !git config --global user.name "Beata"
# + id="8eX_DY0ntR45" colab_type="code" colab={}
| DzienTrzeciMatrix.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="VOsgCkustYcP"
# <a href="https://colab.research.google.com/github/https-deeplearning-ai/tensorflow-1-public/blob/master/C3/W2/ungraded_labs/C3_W2_Lab_2_sarcasm_classifier.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="ZfVtdwryvvP6"
# # Ungraded Lab: Training a binary classifier with the Sarcasm Dataset
#
# In this lab, you will revisit the [News Headlines Dataset for Sarcasm Detection](https://www.kaggle.com/rmisra/news-headlines-dataset-for-sarcasm-detection/home) from last week and proceed to build a train a model on it. The steps will be very similar to the previous lab with IMDB Reviews with just some minor modifications. You can tweak the hyperparameters and see how it affects the results. Let's begin!
# + [markdown] id="aWIM6gplHqfx"
# ## Download the dataset
#
# You will first download the JSON file, load it into your workspace and put the sentences and labels into lists.
# + id="BQVuQrZNkPn9" colab={"base_uri": "https://localhost:8080/"} outputId="309b47f4-cb5d-4de8-d504-65fb6d141179"
# Download the dataset
# !wget https://storage.googleapis.com/tensorflow-1-public/course3/sarcasm.json
# + id="oaLaaqhNkUPd"
import json
# Load the JSON file
with open("./sarcasm.json", 'r') as f:
datastore = json.load(f)
# Initialize the lists
sentences = []
labels = []
# Collect sentences and labels into the lists
for item in datastore:
sentences.append(item['headline'])
labels.append(item['is_sarcastic'])
# + [markdown] id="kw1I6oNSfCxa"
# ## Hyperparameters
#
# We placed the hyperparameters in the cell below so you can easily tweak it later:
# + id="wpF4x5olfHX-"
# Number of examples to use for training
training_size = 20000
# Vocabulary size of the tokenizer
vocab_size = 1000
# Maximum length of the padded sequences
max_length = 32
# Output dimensions of the Embedding layer
embedding_dim = 48
# + [markdown] id="dHibcDI0H5Zj"
# ## Split the dataset
#
# Next, you will generate your train and test datasets. You will use the `training_size` value you set above to slice the `sentences` and `labels` lists into two sublists: one fore training and another for testing.
# + id="S1sD-7v0kYWk"
# Split the sentences
training_sentences = sentences[0:training_size]
testing_sentences = sentences[training_size:]
# Split the labels
training_labels = labels[0:training_size]
testing_labels = labels[training_size:]
# + [markdown] id="qciTzNR7IHzJ"
# ## Preprocessing the train and test sets
#
# Now you can preprocess the text and labels so it can be consumed by the model. You use the `Tokenizer` class to create the vocabulary and the `pad_sequences` method to generate padded token sequences. You will also need to set the labels to a numpy array so it can be a valid data type for `model.fit()`.
# + id="3u8UB0MCkZ5N"
import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
# Parameters for padding and OOV tokens
trunc_type='post'
padding_type='post'
oov_tok = "<OOV>"
# Initialize the Tokenizer class
tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)
# Generate the word index dictionary
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
# Generate and pad the training sequences
training_sequences = tokenizer.texts_to_sequences(training_sentences)
training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
# Generate and pad the testing sequences
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
# Convert the labels lists into numpy arrays
training_labels = np.array(training_labels)
testing_labels = np.array(testing_labels)
# + [markdown] id="AMF4afx2IdHo"
# ## Build and Compile the Model
#
# Next, you will build the model. The architecture is similar to the previous lab but you will use a [GlobalAveragePooling1D](https://www.tensorflow.org/api_docs/python/tf/keras/layers/GlobalAveragePooling1D) layer instead of `Flatten` after the Embedding. This adds the task of averaging over the sequence dimension before connecting to the dense layers. See a short demo of how this works using the snippet below. Notice that it gets the average over 3 arrays (i.e. `(10 + 1 + 1) / 3` and `(2 + 3 + 1) / 3` to arrive at the final output.
# + id="7KDCvSc0kFOz" colab={"base_uri": "https://localhost:8080/"} outputId="bec05737-d757-4b10-ead6-d35d0ece4b08"
import tensorflow as tf
# Initialize a GlobalAveragePooling1D (GAP1D) layer
gap1d_layer = tf.keras.layers.GlobalAveragePooling1D()
# Define sample array
sample_array = np.array([[[10,2],[1,3],[1,1],[2,3],[3,4]]])
# Print shape and contents of sample array
print(f'shape of sample_array = {sample_array.shape}')
print(f'sample array: {sample_array}')
# Pass the sample array to the GAP1D layer
output = gap1d_layer(sample_array)
# Print shape and contents of the GAP1D output array
print(f'output shape of gap1d_layer: {output.shape}')
print(f'output array of gap1d_layer: {output.numpy()}')
# + [markdown] id="evlU_kqOshc4"
# This added computation reduces the dimensionality of the model as compared to using `Flatten()` and thus, the number of training parameters will also decrease. See the output of `model.summary()` below and see how it compares if you swap out the pooling layer with a simple `Flatten()`.
# + id="FufaT4vlkiDE" colab={"base_uri": "https://localhost:8080/"} outputId="eb9954b2-8bfa-4f05-bb0e-ee944fc7e845"
# Build the model
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(24, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
# Print the model summary
model.summary()
# + [markdown] id="GMxT5NzKtRgr"
# You will use the same loss, optimizer, and metrics from the previous lab.
# + id="XfDt1hmYkiys"
# Compile the model
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
# + [markdown] id="Axtd-WQAJIUK"
# ## Train the Model
#
# Now you will feed in the prepared datasets to train the model. If you used the default hyperparameters, you will get around 99% training accuracy and 80% validation accuracy.
#
# *Tip: You can set the `verbose` parameter of `model.fit()` to `2` to indicate that you want to print just the results per epoch. Setting it to `1` (default) displays a progress bar per epoch, while `0` silences all displays. It doesn't matter much in this Colab but when working in a production environment, you may want to set this to `2` as recommended in the [documentation](https://keras.io/api/models/model_training_apis/#fit-method).*
# + id="2DTKQFf1kkyc" colab={"base_uri": "https://localhost:8080/"} outputId="012c67ad-1ff3-4fed-8917-31c99ed8de22"
num_epochs = 30
# Train the model
history = model.fit(training_padded, training_labels, epochs=num_epochs, validation_data=(testing_padded, testing_labels), verbose=1)
# + [markdown] id="L_bWhGOSJLLm"
# ## Visualize the Results
#
# You can use the cell below to plot the training results. You may notice some overfitting because your validation accuracy is slowly dropping while the training accuracy is still going up. See if you can improve it by tweaking the hyperparameters. Some example values are shown in the lectures.
# + id="2HYfBKXjkmU8" colab={"base_uri": "https://localhost:8080/", "height": 543} outputId="28ae5043-f2ec-471f-d490-62c29b59bbb4"
import matplotlib.pyplot as plt
# Plot utility
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
# Plot the accuracy and loss
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
# + [markdown] id="JN6kaxxcJQgd"
# ## Visualize Word Embeddings
#
# As before, you can visualize the final weights of the embeddings using the [Tensorflow Embedding Projector](https://projector.tensorflow.org/).
# + id="c9MqihtEkzQ9" colab={"base_uri": "https://localhost:8080/"} outputId="809bad05-ec86-4d7c-ca9a-0690a6cbc22a"
# Get the index-word dictionary
reverse_word_index = tokenizer.index_word
# Get the embedding layer from the model (i.e. first layer)
embedding_layer = model.layers[0]
# Get the weights of the embedding layer
embedding_weights = embedding_layer.get_weights()[0]
# Print the shape. Expected is (vocab_size, embedding_dim)
print(embedding_weights.shape)
# + id="LoBXVffknldU"
import io
# Open writeable files
out_v = io.open('vecs.tsv', 'w', encoding='utf-8')
out_m = io.open('meta.tsv', 'w', encoding='utf-8')
# Initialize the loop. Start counting at `1` because `0` is just for the padding
for word_num in range(1, vocab_size):
# Get the word associated at the current index
word_name = reverse_word_index[word_num]
# Get the embedding weights associated with the current index
word_embedding = embedding_weights[word_num]
# Write the word name
out_m.write(word_name + "\n")
# Write the word embedding
out_v.write('\t'.join([str(x) for x in word_embedding]) + "\n")
# Close the files
out_v.close()
out_m.close()
# + id="U4eZ5HtVnnEE" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="51dad27c-e36a-4b94-fc1c-b8e4b4cad617"
# Import files utilities in Colab
try:
from google.colab import files
except ImportError:
pass
# Download the files
else:
files.download('vecs.tsv')
files.download('meta.tsv')
# + [markdown] id="1GierJvdJWMt"
# ## Wrap Up
#
# In this lab, you were able to build a binary classifier to detect sarcasm. You saw some overfitting in the initial attempt and hopefully, you were able to arrive at a better set of hyperparameters.
#
# So far, you've been tokenizing datasets from scratch and you're treating the vocab size as a hyperparameter. Furthermore, you're tokenizing the texts by building a vocabulary of full words. In the next lab, you will make use of a pre-tokenized dataset that uses a vocabulary of *subwords*. For instance, instead of having a uniqe token for the word `Tensorflow`, it will instead have a token each for `Ten`, `sor`, and `flow`. You will see the motivation and implications of having this design in the next exercise. See you there!
| Natural Language Processing Tensorflow/C3_W2_Lab_2_sarcasm_classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "skip"}
# <table>
# <tr align=left><td><img align=left src="./images/CC-BY.png">
# <td>Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license. (c) <NAME></td>
# </table>
#
# Note: This material largely follows the text "Numerical Linear Algebra" by Trefethen and Bau (SIAM, 1997) and is meant as a guide and supplement to the material presented there.
# + init_cell=true slideshow={"slide_type": "skip"}
from __future__ import print_function
# %matplotlib inline
import numpy
import matplotlib.pyplot as plt
import warnings
# + [markdown] slideshow={"slide_type": "slide"}
# # Eigenproblems
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Overview
#
# We will now consider eigenproblems of the form
#
# $$
# A \mathbf{x} = \lambda \mathbf{x}
# $$
#
# where $A \in \mathbb C^{m \times m}$, $\mathbf{x} \in \mathbb C^m$ and $\lambda \in \mathbb C$. The vector $\mathbf{x}$ is known as the **eigenvector** and $\lambda$ the **eigenvalue**. The set of all eigenvalues is called the **spectrum** of $A$.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### The basics
#
# The eigenproblem
# $$
# A \mathbf{x} = \lambda \mathbf{x}
# $$
# can be rewritten as
#
# $$
# ( A - \lambda I)\mathbf{x} = \mathbf{0}
# $$
#
# which implies that the eigenvectors are in the Null space of $A-\lambda I$.
#
# However for this matrix to have a non-trivial Null space, requires that $A-\lambda I$ is singular.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Characteristic Polynomial
#
# If $A-\lambda I$ is singular, it follows that
#
# $$
# \det( A - \lambda I) = {\cal P}_A(\lambda) = 0
# $$
#
# where ${\cal P}_A(\lambda)$ can be shown to be a $m$th order polynomial in $\lambda$ known as the **characteristic polynomial** of a matrix $A$
#
#
# + [markdown] slideshow={"slide_type": "fragment"}
# We can then state the following theorem regarding the zeros of $\mathcal{P}_A$ and the eigenvalues of $A$:
#
# *Theorem:* $\lambda$ is an eigenvalue of $A$ if and only if $\mathcal{P}_A(\lambda) = 0$.
#
# i.e. the eigenvalues are the roots of ${\cal P}_A(\lambda)$, and therefore there are exactly $m$ eigenvalues.
# + [markdown] slideshow={"slide_type": "skip"}
# *Proof:*
#
# $$\begin{aligned}
# \text{If } \lambda \text{ is an eigenvalue of } A &\Leftrightarrow \text{ there is a non-zero vector } x \text{ s.t. } \lambda x - A x = 0 \\
# &\Leftrightarrow \lambda I A \text{ is singular (since }x\text{ is a non-trivial vector in the null space of } \lambda I - A) \\
# &\Leftrightarrow \det(\lambda I - A) = 0
# \end{aligned}$$
#
# Note that this theorem implies that even though $A \in \mathbb R^{m \times m}$ that $\lambda \in \mathbb C$.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Computing Eigenvalues
#
# In basic linear algebra classes we usually find the eigenvalues by directly calculating the roots of ${\cal P}_A(\lambda)$ which can work for low-degree polynomials. Unfortunately the following theorem (due to Galois) suggests this is not a good way to compute eigenvalues:
#
# **Theorem:** For an $m \geq 5$ there is a polynomial $\mathcal{P}(z)$ of degree $m$ with rational coefficients that has a real root $\mathcal{P}(z_0) = 0$ with the property that $z_0$ cannot be written using any expression involving rational numbers, addition, subtraction, multiplication, division, and $k$th roots.
#
# I.e., there is no way to find the roots of a polynomial of degree $>4$ in a deterministic, fixed number of steps.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Not all is lost however!
#
# We just must use an iterative approach where we construct a sequence that converges to the eigenvalues.
#
# **Some Questions**
# * How does this relate to how we found roots previously?
# * Why will it still be difficult to use our rootfinding routines to find Eigenvalues?
#
# We will return to how we actually find Eigenvalues (and roots of polynomials) after a bit more review
# + [markdown] slideshow={"slide_type": "slide"}
# ### Eigenvalue Factorization and Diagonalization
#
# Given that there are exactly $m$ (possibly repeated) eigenvalues for a $m\times m$ system, the eigenproblem is really more correctly written as
#
# $$
# A\mathbf{x}_i = \lambda_i \mathbf{x}_i, \quad i=1,2,\ldots,m
# $$
#
# Or in Matrix form as
# $$
# AX = X\Lambda
# $$
# where $X$ is the matrix formed by the eigenvectors $x$ as its columns and $\Lambda$ is a diagonal matrix with the eigenvalues along its diagonal.
# + [markdown] slideshow={"slide_type": "subslide"}
# Expanded, $A X = X \Lambda$ looks like:
# $$
# \begin{bmatrix}
# & & & & \\
# & & & & \\
# & & A & & \\
# & & & & \\
# & & & &
# \end{bmatrix}
# \begin{bmatrix}
# & & & & \\
# & & & & \\
# \mathbf{x}_1 & \mathbf{x}_2 & \cdots & \mathbf{x}_m \\
# & & & & \\
# & & & &
# \end{bmatrix} =
# \begin{bmatrix}
# & & & & \\
# & & & & \\
# \mathbf{x}_1 & \mathbf{x}_2 & \cdots & \mathbf{x}_{m-1} & \mathbf{x}_m \\
# & & & & \\
# & & & &
# \end{bmatrix}
# \begin{bmatrix}
# \lambda_1 & & & & \\
# & \lambda_2 & & & \\
# & & \ddots & & \\
# & & & \lambda_{m-1} & \\
# & & & & \lambda_m
# \end{bmatrix}
# $$
#
# Here we note that the eigenpair $(\mathbf{x}_j, \lambda_j)$ are matched as the $j$th column of $X$ and the $j$th element of $\Lambda$ on the diagonal.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Diagonalization
#
# Eigenproblems can always be written as
# $$
# AX = X\Lambda
# $$
#
# However, if there are a linearly independent set of eigenvectors then the matrix $X$ is invertible (why?). Under this condition we can transform $A$ into the diagonal matrix $\Lambda$ by
#
# $$
# \Lambda = X^{-1}A X
# $$
# + [markdown] slideshow={"slide_type": "slide"}
# #### Factorization
#
# or more usefully, we can rewrite $A$ as a product of more useful matrices
#
# $$
# A = X\Lambda X^{-1}
# $$
#
# And use it in similar ways to how we use other factorization such as $A=QR$ and $A=LU$
# + [markdown] slideshow={"slide_type": "subslide"}
# ### The Rules for diagonalizability
#
# Unfortunately, not all matrices can be diagonalized (i.e. don't have a full linearly independent set of eigenvectors that span $\mathbb{R}^m$ or $\mathbb{C}^m$). Here are the rules.
#
# A matrix can be factored as $A = X\Lambda X^{-1}$ if
#
# 1. all eigenvalues are distinct (never repeat). These are known as simple eigenvalues
#
# 2. Eigenvalues repeat, but for every repeated eigenvalue there can be found a linearly independent eigenvector
#
# 3. The matrix is Hermitian ($A^\ast = A$, or if real, then symmetric $A^T = A$). In this case the eigenvalues are always real and the eigenvectors can always be chosen orthonormal. In this special case $X=Q$ and
#
# $$
# A = Q\Lambda Q^\ast
# $$
# + [markdown] slideshow={"slide_type": "subslide"}
# The issue of repeated eigenvalues can be made a bit more precise by defining
#
# * **Algebraic multiplicity**: the number of times an eigenvalue is repeated
#
# * **Geometric multiplicity**: the number of linearly independent eigenvectors corresponding to each eigenvalue.
#
# If the algebraic multiplicity is equal to the geometric multiplicity for all $\lambda$ then we can say that there is a full eigenspace and the matrix is diagonalizable.
# + [markdown] slideshow={"slide_type": "skip"}
# #### Example: Computing Multiplicities
#
# Compute the geometric and algebraic multiplicities for the following matrices. What is the relationship between the algebraic and geometric multiplicities?
#
# $$A = \begin{bmatrix}
# 2 & & \\
# & 2 & \\
# & & 2
# \end{bmatrix}$$
#
# $$B = \begin{bmatrix} 2
# & 1 & \\
# & 2 & 1 \\
# & & 2
# \end{bmatrix}$$
# + [markdown] slideshow={"slide_type": "skip"}
# 1. The characteristic polynomial of $A$ is
#
# $$
# \mathcal{P}_A(z) = (2 - z)(2 - z)(2 - z) = (2 - z)^3
# $$
#
# so the eigenvalues are all $\lambda = 2$ so we know the algebraic multiplicity is 3 of this eigenvalue. The geometric multiplicity is determined by the number of linearly independent eigenvectors. For this matrix we have three eigenvectors that are all linearly independent which happen to be the unit vectors in each direction (check!). This means that the geometric multiplicity is also 3.
#
# 1. The characteristic polynomial of $B$ is the same as $A$ so again we know $\lambda = 2$ but now we need to be a bit careful about the eigenvectors. In this case the only eigenvector is a scalar multiple of $e_1$ so the geometric multiplicity is 1.
# + [markdown] slideshow={"slide_type": "skip"}
# ### Interpretations of the Eigenspace
#
# One way to interpret the eigenproblem is that of one that tries to find the subspaces of $\mathbb C^m$ which act like scalar multiplication by $\lambda$. The eigenvectors associated with one eigenvalue then form a subspace of $S \subseteq \mathbb C^m$.
# + [markdown] slideshow={"slide_type": "skip"}
# When an eigenvalue has algebraic multiplicity that equals its geometric then it is called non-defective and otherwise defective. This property is also inherited to the matrix so in the above example $A$ and $B$ are non-defective and defective matrices respectively.
# + [markdown] slideshow={"slide_type": "skip"}
# ### Determinant and Trace
#
# Two important properties of matrices have important relationships with their eigenvalues, namely the determinant and trace. The determinant we have seen, the **trace** is defined as the sum of the elements on the diagonal of a matrix, in other words
# $$
# \text{tr}(A) = \sum^m_{i=1} A_{ii}.
# $$
#
# The relationship between the determinant and the eigenvalues is not difficult to guess due to the nature of the characteristic polynomial. The trace of a diagonal matrix is clear and provides another suggestion to the relationship.
#
# **Theorem:** The determinant $\det(A)$ and trace $\text{trace}(A)$ are equal to the product and sum of the eigenvalues of $A$ respectively counting algebraic multiplicity.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Similarity Transformations
#
# Generally, we say any two matrices $A$ and $B$ are **similar** if they can be related through an invertible matrix $M$ as
#
# $$
# A = M^{-1} B M
# $$
# + [markdown] slideshow={"slide_type": "fragment"}
# **Example**
#
# a diagonalizable matrix $A$ is similar to the diagonal matrix $\Lambda$ through the invertible matrix $X$
#
# $$
# A = X\Lambda X^{-1}
# $$
# + [markdown] slideshow={"slide_type": "subslide"}
# **Theorem**: If $A$ and $B$ are similar matrices, they have the same eigenvalues and their eigenvectors are related through an invertible matrix $M$
# + [markdown] slideshow={"slide_type": "subslide"}
# **Proof**: Let
#
# $$
# B = M A M^{-1}
# $$
# or
# $$
# BM = MA
# $$
# + [markdown] slideshow={"slide_type": "fragment"}
# if $A\mathbf{x} = \lambda\mathbf{x}$ then
# $$
# BM\mathbf{x} = M A\mathbf{x} = \lambda M\mathbf{x}
# $$
# or
# $$
# B\mathbf{y} = \lambda\mathbf{y}
# $$
#
# which shows that $\lambda$ is also an eigenvalue of $B$ with corresponding eigenvector $\mathbf{y} = M\mathbf{x}$
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Schur Factorization
#
# A **Schur factorization** of a matrix $A$ is defined as
#
# $$
# A = Q T Q^\ast
# $$
#
# where $Q$ is unitary and $T$ is upper-triangular. Because $Q^\ast=Q^{-1}$ (for square unitary matrices). It follows directly that $A$ and $T$ are similar.
#
# * Good News! $T$ is upper triangular so its eigenvalues can just be read of the diagonal
# * Bad News! There is no deterministic way to calculate $T$ as that would violate Galois theory of polynomials
# + [markdown] slideshow={"slide_type": "subslide"}
# **Theorem:** Every matrix $A \in \mathbb C^{m \times m}$ has a Schur factorization.
# + [markdown] slideshow={"slide_type": "subslide"}
# Note that the above results imply the following
# - An eigen-decomposition $A = X \Lambda X^{-1}$ exists if and only if $A$ is non-defective (it has a complete set of eigenvectors)
# - A unitary transformation $A = Q \Lambda Q^\ast$ exists if and only if $A$ is normal ($A^\ast A = A A^\ast$)
# - A Schur factorization always exists
#
# Note that each of these lead to a means for isolating the eigenvalues of a matrix and will be useful when considering algorithms for finding them.
# + [markdown] slideshow={"slide_type": "skip"}
# ## Condition Number of a Simple Eigenvalue
#
# Before we discuss a number of approaches to computing eigenvalues it good to consider what the condition number of a given eigenproblem is.
#
# Let
# $$
# Ax = \lambda x
# $$
# define the eigenvalue problem in question. Here we will introduce a related problem
# $$
# y^\ast A = \lambda y^\ast
# $$
# where $y$ is the **left eigenvector** and from before $x$ is the **right eigenvector**. These vectors also can be shown to have the relationship $y^\ast x \neq 0$ for a simple eigenvalue.
# + [markdown] slideshow={"slide_type": "skip"}
# Now consider the perturbed problem
# $$
# (A + \delta A) (x + \delta x) = (\lambda + \delta \lambda) (x + \delta x).
# $$
# Expanding this and throwing out quadratic terms and removing the eigenproblem we have
# $$
# \delta A x + A \delta x = \delta \lambda x + \lambda \delta x.
# $$
# + [markdown] slideshow={"slide_type": "skip"}
# Multiple both sides of the above by the left eigenvector and use $y^\ast x \neq 0$ to find
# $$\begin{aligned}
# y^\ast \delta A x + y^\ast A \delta x &= y^\ast \delta \lambda x + y^\ast \lambda \delta x \\
# y^\ast \delta A x &= y^\ast \delta \lambda x
# \end{aligned}$$
# where we again use the slightly different definition of the eigenproblem. We can then solve for $\delta \lambda$ to find
# $$
# \delta \lambda = \frac{y^\ast \delta A x}{y^\ast x}
# $$
# meaning that the ratio between the dot-product of the left and right eigenvectors and the conjugate dot-product of the matrix $\delta A$ then form a form of bound on the expected error in the simple eigenvalue.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Computing Eigenvalues
#
# Almost all useful approaches to computing eigenvalues do so through the computation of the Schur factorization. The Schur factorization, as we have seen, will preserve the eigenvalues. The steps to compute the Schur factorization are usually broken down into two steps
# 1. Directly transform $A$ into a **Hessenberg** matrix, a matrix that contains zeros below its first sub-diagonal, directly using Householder reflections. This is as close to triangular that you can get by direct similarity transformations.
# 1. Use an iterative method to change the sub-diagonal into all zeros
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Hessenberg and Tridiagonal form
#
# What we want to do is construct a sequence of similarity transformations matrices that turns $A$ into a Hessenberg matrix with the same eigenvalues as $A$. We use Householder reflections to do this with the important distinction that we only want to remove zeros below the first sub-diagonal.
# + [markdown] slideshow={"slide_type": "subslide"}
# $$
# \begin{bmatrix}
# \text{x} & \text{x} & \text{x} & \text{x} & \text{x} \\
# \text{x} & \text{x} & \text{x} & \text{x} & \text{x} \\
# \text{x} & \text{x} & \text{x} & \text{x} & \text{x} \\
# \text{x} & \text{x} & \text{x} & \text{x} & \text{x} \\
# \text{x} & \text{x} & \text{x} & \text{x} & \text{x}
# \end{bmatrix} \overset{H_1^\ast A_0 H_1}{\rightarrow}
# \begin{bmatrix}
# \text{x} & \text{x} & \text{x}& \text{x} & \text{x} \\
# \text{x} & \text{x} & \text{x}& \text{x} & \text{x} \\
# 0 & \text{x} & \text{x}& \text{x} & \text{x} \\
# 0 & \text{x} & \text{x}& \text{x} & \text{x} \\
# 0 & \text{x} & \text{x}& \text{x} & \text{x}
# \end{bmatrix} \overset{H_2^\ast A_1H_2}{\rightarrow}
# \begin{bmatrix}
# \text{x} & \text{x} & \text{x}& \text{x} & \text{x} \\
# \text{x} & \text{x} & \text{x}& \text{x} & \text{x} \\
# 0 & \text{x} & \text{x}& \text{x} & \text{x} \\
# 0 & 0 & \text{x}& \text{x} & \text{x} \\
# 0 & 0 & \text{x}& \text{x} & \text{x}
# \end{bmatrix} \overset{H_3^\ast A_2H_3}{\rightarrow}
# \begin{bmatrix}
# \text{x} & \text{x} & \text{x}& \text{x} & \text{x} \\
# \text{x} & \text{x} & \text{x}& \text{x} & \text{x} \\
# 0 & \text{x} & \text{x}& \text{x} & \text{x} \\
# 0 & 0 & \text{x}& \text{x} & \text{x} \\
# 0 & 0 & 0 & \text{x} & \text{x}
# \end{bmatrix}
# $$
#
# so we have the sequence $H = Q^\ast A Q$ which has the same eigenvalues as the original matrix $A$.
#
# **Question**? Why can't we just use Householder to take $A\rightarrow T$ like we did for the $QR$?
# + [markdown] slideshow={"slide_type": "subslide"}
# One important special case of this sequence of transformations is that if the matrix $A$ is hermitian (the matrix is its own conjugate transpose, $A = A^\ast$, or symmetric in the real case) then the Hessenberg matrix is tridiagonal.
#
# We now will focus on how to formulate the iteration step of the eigenproblem. We will also restrict our attention to symmetric, real matrices. This implies that all eigenvalues will be real and have a complete set of orthogonal eigenvectors. Generalizations can be made of many of the following algorithms but is beyond the scope of this class.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Rayleigh Quotient and Inverse Iteration
#
# There are a number of classical approaches to computing the iterative step above which we will review here. Inverse power iteration in particular is today still the dominant means of finding the eigenvectors once the eigenvalues are known.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Rayleigh Quotient
#
# The **Rayleigh quotient** of a vector $x \in \mathbb R^m$ is the scalar
# $$
# r(x) = \frac{x^T A x}{x^T x}.
# $$
# The importance of the Rayleigh quotient is made clear when we evaluate $r(x)$ at an eigenvector. When this is the case the quotient evaluates to the corresponding eigenvalue.
# + [markdown] slideshow={"slide_type": "subslide"}
# The Rayleigh quotient can be motivated by asking the question, given an eigenvector $x$, what value $\alpha$ acts most like an eigenvalue in an $\ell_2$ sense:
# $$
# \min_\alpha ||A x - \alpha x||_2.
# $$
# + [markdown] slideshow={"slide_type": "subslide"}
# This can be reformulated as a least-squares problem noting that $x$ is the "matrix", $\alpha$ is the unknown vector (scalar) and $Ax$ is the right-hand side so we have
# $$
# (x^T x) \alpha = x^T (A x)
# $$
# which can be solved so that
# $$
# \alpha = r(x) = \frac{x^T A x}{x^T x}.
# $$
# + [markdown] slideshow={"slide_type": "slide"}
# ### Power Iteration
#
# Power iteration is a straightforward approach to finding the eigenvector of the largest eigenvalue of $A$. The basic idea is that the sequence
# $$
# \frac{x}{||x||}, \frac{Ax}{||Ax||}, \frac{A^2x}{||A^2x||}, \frac{A^3x}{||A^3x||}, \ldots
# $$
# will converge (although very slowly) to the desired eigenvector.
#
# We implement this method by initializing the algorithm with some vector $v$ with $||v|| = 1$. We then apply the sequence of multiplications.
# + hide_input=false slideshow={"slide_type": "subslide"}
# Demo
# generate a random symmetric matrix
A = numpy.random.rand(3,3)
A = 0.5*( A + A.T)
print('A=\n{}'.format(A))
lams = numpy.linalg.eigvals(A)
print('\nLambda = {}'.format(lams))
# + hide_input=false slideshow={"slide_type": "subslide"}
#the Rayleigh Quotient
def rayleighq(A, x):
return numpy.dot(x.T, A.dot(x))/numpy.dot(x.T, x)
def power_iteration(A, tol=1.e-6):
""" power_iteration to find the largest eigenvector and corresponding eigenvalue
parameters:
-----------
A: ndarray (square)
m x m matrix
tol: float
stopping criteria for iteration.
iteration will cease when ||x_{i+1} - x_{i}|| < tol or
MAX_ITS exceeded
returns:
--------
x: ndarray
array of iterates of the eigenvector
r: ndarray
"""
MAX_ITS = 100
m = A.shape[0]
x = numpy.empty((MAX_ITS,m))
lam = numpy.empty(MAX_ITS)
res = numpy.empty(MAX_ITS)
# generate a random unit vector
x0 = numpy.random.rand(A.shape[0])
x[0,:] = x0/numpy.linalg.norm(x0, ord=2)
lam[0] = rayleighq(A, x0)
for i in range(1,MAX_ITS+1):
xi = A.dot(x[i-1,:])
x[i,:] = xi/numpy.linalg.norm(xi, ord=2)
lam[i] = rayleighq(A, x[i,:])
res[i-1] = numpy.abs(lam[i] - lam[i - 1])/numpy.abs(lam[i])
if res[i-1] < tol:
break
if i == MAX_ITS:
warnings.warn('Maximum iterations exceeded')
x.resize(i+1,m)
lam.resize(i+1)
res.resize(i)
return x, lam, res
# + slideshow={"slide_type": "subslide"}
x, r, res = power_iteration(A, tol=1.e-8)
print('{} Iterations'.format(len(r)))
print('x = {}'.format(x[-1]))
print('Eigenvalue = {}'.format(r[-1]))
print('eigs(A) = {}'.format(numpy.linalg.eigvals(A)))
# + hide_input=true slideshow={"slide_type": "-"}
fig = plt.figure(figsize=(16, 6))
axes = fig.add_subplot(1,2,1)
axes.plot(r,'o',markersize=8)
lam_max = numpy.max(numpy.linalg.eigvals(A))
axes.plot(lam_max*numpy.ones(r.shape),'k--')
axes.grid()
axes.set_xlabel('Iteration', fontsize=16)
axes.set_ylabel('Rayleigh Quotient',fontsize=16)
axes = fig.add_subplot(1,2,2)
axes.semilogy(res,'o-',markersize=8)
axes.grid()
axes.set_xlabel('Iteration', fontsize=16)
axes.set_ylabel('Residual',fontsize=16)
plt.show()
# + [markdown] hide_input=true slideshow={"slide_type": "subslide"}
# The reason why this works can be seen by considering the initial vector $v$ as a linear combination of the orthonormal eigenvectors (which we have assumed exist) such that
#
# $$
# v^{(0)} = a_1 q_1 + a_2 q_2 + \cdots + a_m q_m.
# $$
#
# Multiplying $v^{(0)}$ by $A$ then leads to
#
# $$\begin{aligned}
# Av^{(0)} = v^{(1)} &= a_1 A q_1 + a_2 A q_2 + \cdots + a_m A q_m \\
# &= c_1 (a_1 \lambda_1 q_1 + a_2 \lambda_2 q_2 + \cdots + a_m \lambda_m q_m) \\
# \end{aligned}$$
#
# here $c_1$ is some constant due to the fact the eigenvectors are not uniquely specified.
# + [markdown] slideshow={"slide_type": "subslide"}
# wRepeating this $k$ times we have
#
# $$\begin{aligned}
# Av^{(k-1)} = v^{(k)} &= a_1 A^k q_1 + a_2 A^k q_2 + \cdots + a_m A^k q_m \\
# &= c_k (a_1 \lambda_1^k q_1 + a_2 \lambda_2^k q_2 + \cdots + a_m \lambda_m^k q_m) \\
# &= c_k \lambda_1^k \left(a_1 q_1 + a_2 \frac{\lambda_2^k}{\lambda_1^k} q_2 + \cdots + a_m \frac{\lambda_m^k}{\lambda_1^k} q_m \right)
# \end{aligned}$$
#
# Since $\lambda_1 > \lambda_i$ for all $i \neq 1$ then in the limit the terms $\lambda_2^k / \lambda_1^k$ will approach zero and on normalization $v^{(k)}/||v^{(k)}||\rightarrow \mathbf{q}_1$.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Inverse Iteration with shifts
#
# Inverse iteration with shifts uses a similar approach with the difference being that we can use it to find any of the eigenvectors for the matrix $A$.
#
# **Some Preliminaries**: inverse and shift rules of Eigenvalues
#
# Show that if $\mathbf{x}$ is an eigenvector of $A$ with eigenvalue $\lambda$, then
#
# * $\mathbf{x}$ is an eigenvector of $A^{-1}$ with eigenvalue $1/\lambda$
# * $\mathbf{x}$ is an eigenvector of $A -\sigma I$ with eigenvalue $\lambda - \sigma$
# + [markdown] slideshow={"slide_type": "subslide"}
# So...
#
# If we want to find the smallest eigenvalue we can consider the power method on $A^{-1}$,
#
# But we really don't want to find $A^{-1}$ which is expensive, instead we can do the equivalent iteration
#
# ```python
# x[0] = x0
# for i in range(MAX_ITS):
# solve A w[i] = x[i]
# x[i+1] = w[i]/norm(w[i])
# ```
#
# as $\mathbf{w}_i = A^{-1}\mathbf{x}_i$
# + [markdown] slideshow={"slide_type": "subslide"}
# and if we want to find the eigenvalue closest to some number $\mu$ we can apply the power method to
#
# $$
# (A - \mu I)^{-1},
# $$
#
# the eigenvectors of this matrix are
#
# $$
# (\lambda_j - \mu)^{-1}
# $$
#
# where $\lambda_j$ are the eigenvalues of $A$.
#
# If $\mu$ is close to a particular $\lambda_j$, say $\lambda_J$, then
#
# $$
# (\lambda_J - \mu)^{-1}
# $$
#
# will be larger than any of the other $(\lambda_j - \mu)^{-1}$. In this way we effectively have picked out the eigenvalue we want to consider in the power iteration!
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Rayleigh Quotient Iteration
#
# By themselves the above approaches are not particularly useful but combining them we can iterate back and forth to find the eigenvalue, eigenvector pair:
# 1. Compute the Rayleigh quotient and find an estimate for $\lambda_j$
# 1. Compute one step of inverse iteration to approximate $x_j$
# 1. Repeat...
# + hide_input=false slideshow={"slide_type": "subslide"}
def rayleigh_quotient_iteration(A, tol=1.e10):
""" rayleigh quotient iteration to find eigenvalues
parameters:
-----------
A: ndarray (square)
m x m matrix
tol: float
stopping criteria for iteration.
iteration will cease when ||x_{i+1} - x_{i}|| < tol or
MAX_ITS exceeded
returns:
--------
x: ndarray
array of iterates of the eigenvector
r: ndarray
"""
MAX_ITS = 100
m = A.shape[0]
x = numpy.empty((MAX_ITS,m))
lam = numpy.empty(MAX_ITS)
res = numpy.empty(MAX_ITS)
I = numpy.eye(m)
# generate a random unit vector
x0 = numpy.random.rand(A.shape[0])
x[0,:] = x0/numpy.linalg.norm(x0, ord=2)
lam[0] = rayleighq(A, x0)
for i in range(1,MAX_ITS+1):
# this is the only different line
w = numpy.linalg.solve(A - lam[i-1]*I, x[i-1,:])
x[i,:] = w/numpy.linalg.norm(w, ord=2)
lam[i] = rayleighq(A, x[i,:])
res[i-1] = numpy.abs(lam[i] - lam[i-1])/numpy.abs(lam[i])
if res[i-1] < tol:
break
if i == MAX_ITS:
warnings.warn('Maximum iterations exceeded')
x.resize(i+1,m)
lam.resize(i+1)
res.resize(i)
return x, lam, res
# + slideshow={"slide_type": "subslide"}
x, r, res = rayleigh_quotient_iteration(A, tol=1.e-10)
print('{} Iterations'.format(len(r)))
print('x = {}'.format(x[-1]))
print('Rayleighquotient = {}'.format(r[-1]))
print('eigs(A) = {}'.format(numpy.linalg.eigvals(A)))
# + hide_input=true slideshow={"slide_type": "-"}
fig = plt.figure(figsize=(16, 6))
axes = fig.add_subplot(1,2,1)
axes.plot(r,'o',markersize=8)
lam_max = numpy.max(numpy.linalg.eigvals(A))
axes.plot(lam_max*numpy.ones(r.shape),'k--')
axes.grid()
axes.set_xlabel('Iteration', fontsize=16)
axes.set_ylabel('Rayleigh Quotient',fontsize=16)
axes = fig.add_subplot(1,2,2)
axes.semilogy(res,'o-',markersize=8)
axes.grid()
axes.set_xlabel('Iteration', fontsize=16)
axes.set_ylabel('Residual',fontsize=16)
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# ## QR/RQ Algorithm
#
# All of the above methods pick out at most a few eigenvalues at a time. However it turns out we can use the $QR$ algorithm, to iterate towards the Schur factorization and find all the eigenvalues simultaneously.
#
# The simplest algorithm just iterates
# ```python
# while not converged:
# Q, R = numpy.linalg.qr(A)
# A = R.dot(Q)
# ```
# calculating the $QR$ factorization of $A$, then forming a new $A=RQ$, This sequence will eventually converge to the Schur decomposition of the matrix $A$.
#
# Code this up and see what happens.
# + slideshow={"slide_type": "subslide"}
# %precision 6
m = 3
A = numpy.array([[2, 1, 1], [1, 3, 1], [1, 1, 4]])
MAX_STEPS = 10
for i in range(MAX_STEPS):
Q, R = numpy.linalg.qr(A)
A = numpy.dot(R, Q)
print()
print("A(%s) =" % (i))
print(A)
# + slideshow={"slide_type": "subslide"}
print()
print("True eigenvalues: ")
print(numpy.linalg.eigvals(A))
print()
print("Computed eigenvalues: ")
for i in range(m):
print(A[i, i])
# + [markdown] slideshow={"slide_type": "subslide"}
# So why does this work? The first step is to find the $QR$ factorization of $A^{(k-1)}$ which is equivalent to finding
#
# $$
# (Q^{(k)})^T A^{(k-1)} = R^{(k)}
# $$
#
# and multiplying on the right leads to
#
# $$
# (Q^{(k)})^T A^{(k-1)} Q^{(k)} = R^{(k)} Q^{(k)}.
# $$
#
# In this way we can see that this is a similarity transformation of the matrix $A^{(k-1)}$ since the $Q^{(k)}$ is an orthogonal matrix ($Q^{-1} = Q^T$). This of course is not a great idea to do directly but works great in this case as we iterate to find the upper triangular matrix $R^{(k)}$ which is exactly where the eigenvalues appear.
# + [markdown] slideshow={"slide_type": "subslide"}
# In practice this basic algorithm is modified to include a few additions:
#
# 1. Before starting the iteration $A$ is reduced to tridiagonal form.
# 1. Motivated by the inverse power iteration we observed we instead consider a shifted matrix $A^{(k)} - \mu^{(k)} I$ for factoring. The $\mu$ picked is related to the estimate given by the Rayleigh quotient. Here we have
#
# $$
# \mu^{(k)} = \frac{(q_m^{(k)})^T A q_m^{(k)}}{(q_m^{(k)})^T q_m^{(k)}} = (q_m^{(k)})^T A q_m^{(k)}.
# $$
#
# 1. Deflation is used to reduce the matrix $A^{(k)}$ into smaller matrices once (or when we are close to) finding an eigenvalue to simplify the problem.
#
# This has been the standard approach until recently for finding eigenvalues of a matrix.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Application: Finding the roots of a polynomial
#
# Numpy has a nice function called roots which returns the $n$ roots of a $n$th degree polynomial
#
# $$
# p(x) = c_0 + c_1 x + c_2 x^2 + \ldots + c_n x^n
# $$
#
# described by a $n+1$ vector of coefficients $\mathbf{c}$
# + slideshow={"slide_type": "subslide"}
c = numpy.array([ -1, -1, 1])
r = numpy.roots(c)
print(r)
# + slideshow={"slide_type": "fragment"}
c = numpy.random.rand(6)
r = numpy.roots(c)
print(r)
# + [markdown] slideshow={"slide_type": "subslide"}
# This routine, does not try and actually find the roots of a high-order polynomial, instead it actually calculates the eigenvalues of a **companion matrix** $C$ whose characteristic polynomial $P_C(\lambda)$ is the **monic** polynomial $p_m(x) = p(x)/c_n$.
#
# It can be shown that this matrix can be constructed as ([see e.g.](https://en.wikipedia.org/wiki/Companion_matrix))
# $$
# C(p)=\begin{bmatrix}
# 0 & 0 & \dots & 0 & -c_0 \\
# 1 & 0 & \dots & 0 & -c_1 \\
# 0 & 1 & \dots & 0 & -c_2 \\
# \vdots & \vdots & \ddots & \vdots & \vdots \\
# 0 & 0 & \dots & 1 & -c_{n-1}
# \end{bmatrix}.
# $$
#
#
# + slideshow={"slide_type": "subslide"}
def myroots(p, verbose=True):
''' Calculate the roots of a polynomial described by coefficient vector
p(x) = p_0 + p_1 x + p_2 x^2 + \ldots + p_n x^n
by finding the eigenvalues of the companion matrix
'''
# construct the companion matrix of the coefficient vector c
# make p monic and drop the last coefficient
c = p/p[-1]
if verbose:
print(c)
m = len(c) - 1
C = numpy.zeros((m,m))
C[:,-1] = -c[:-1]
C[1:,:-1] = numpy.eye(m-1)
if verbose:
print('C = \n{}'.format(C))
return numpy.linalg.eigvals(C)
# + slideshow={"slide_type": "subslide"}
c = numpy.array([ -1, -1, 1])
r = numpy.roots(c)
print(r)
mr = myroots(c)
print
print(mr)
# + slideshow={"slide_type": "fragment"}
c = numpy.random.rand(5)
r = numpy.roots(c)
print(r)
mr = myroots(c)
print
print(mr)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Alternatives
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Jacobi
#
# Jacobi iteration employs the idea that we know the eigenvalues of a matrix of size equal to or less than 4 (we know the roots of the characteristic polynomial directly). Jacobi iteration therefore attempts to break the matrix down into at most 4 by 4 matrices along the diagonal via a series of similarity transformations based on only diagonalizing sub-matrices 4 by 4 or smaller.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Bisection
#
# It turns out if you do not want all of the eigenvalues of a matrix that using a bisection method to find some subset of the eigenvalues is often the most efficient way to get these. This avoids the pitfall of trying to find the eigenvalues via other root-finding approaches by only needing evaluations of the function and if a suitable initial guess is provided can find the eigenvalue quickly that is closest to the initial bracket provided.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Divide-and-conquer
#
# This algorithm is actually the one used most often used if both eigenvalues and eigenvectors are needed and performs up to twice as fast as the $QR$ approach. The basic idea is to split the matrix into two pieces at every iteration by introducing zeros on the appropriate off-diagonals which neatly divides the problem into two pieces.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Arnoldi and Lanczos Iteration
#
# Krylov subspace methods (which we will unfortunately not cover) are another approach to finding eigenvalues of a matrix. These methods generally use some piece of the $QR$ approach outlined above and are extremely effective at finding the "extreme" eigenvalues of the matrix.
| 13_LA_eigen_2020.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hello, Jupylet!
import sys
import os
sys.path.insert(0, os.path.abspath('./..'))
from jupylet.label import Label
from jupylet.app import App
app = App(width=320, height=64)
hello = Label('hello, world', color='cyan', font_size=32, x=app.width, y=22)
# +
@app.event
def render(ct, dt):
app.window.clear()
hello.draw()
@app.run_me_every(1/24)
def scroll(ct, dt):
hello.x -= dt * 48
if hello.right < 0:
hello.x = app.width
# -
app.run()
| examples/02-hello-jupylet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## NMF = Not Monday night Football !
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics.pairwise import cosine_similarity
import seaborn as sns
from random import randint
import numpy as np
from sklearn.decomposition import NMF
import pandas as pd
import random
from matplotlib import pyplot as plt
# %matplotlib inline
import csv
# # User Input
#importing ratings and movies csv files
PATH2 = "ratings.csv"
PATH3 = "movies.csv"
ratings, movies_ind = pd.read_csv(PATH2), pd.read_csv(PATH3)
# create an empty array the length of number of movies in system
user_ratings = np.zeros(9724)
#format ratings dataframe
del ratings['timestamp']
ratings.set_index(['userId','movieId'], inplace=True)
ratings = ratings.unstack(0)
ratings_count = ratings.count(axis=1) #count the number of ratings for each movie as a measure of popularity
top = pd.DataFrame(ratings_count.sort_values(ascending = False).head(10)) #create a dataframe of the top 20 most popular movies
top.reset_index(inplace=True)
movies_ind.set_index('movieId',inplace=True)
top_movies_g = movies_ind.loc[top['movieId']]['title'].values
# ## Of the following movies, rate all that you have seen on a scale of 1-5.
# ## If you have not seen a movie, rate 0.
#creates a list of ratings for the prompted movies
user_input = []
for i in range(0,10):
answer = int(input("How would you rate " + str(top_movies_g[i])))
user_input.append(answer)
movies_ind.reset_index(inplace=True)
top_movies_index = movies_ind.index[top['movieId']].values
top_movies_index
# inputs user rating into large array (9,000+ count) at appropriate indexes
for i in range(0,10):
user_ratings[top_movies_index[i]] = user_input[i]
# # NMF Modeling
# +
PATH4 = "movies.csv"
movies_ind = pd.read_csv(PATH4)
ratings = ratings.fillna(0)
ratings = ratings["rating"]
ratings = ratings.transpose()
# -
ratings.head(2)
# +
R = pd.DataFrame(ratings)
# model assumes R ~ PQ'
model = NMF(n_components=5, init='random', random_state=10)
model.fit(R)
P = model.components_ # Movie feature
Q = model.transform(R) # User features
# -
query = user_ratings.reshape(1,-1)
t=model.transform(query)
outcome = np.dot(t,P)
outcome=pd.DataFrame(outcome)
outcome = outcome.transpose()
outcome['movieId'] = movies_ind['movieId']
outcome = outcome.rename(columns={0:'rating'})
outcome
top = outcome.sort_values(by='rating',ascending=False).head(50)
# # Selecting a Movie
top_movie_recs = movies_ind.loc[top['movieId']]['title'].values
Select = top_movie_recs[randint(0, 4)]
Select
# # Sorting by Genre
#importing genres
PATHG = "movie_genres_years.csv"
movie_genres = pd.read_csv(PATHG)
# list of all movie Ids belonging to certain genres
adventure_movies = list(movie_genres.loc[movie_genres['Genre_Adventure'] == 1]['movieId'])
animation_movies= list(movie_genres.loc[movie_genres['Genre_Animation'] == 1]['movieId'])
children_movies= list(movie_genres.loc[movie_genres['Genre_Children'] == 1]['movieId'])
comedy_movies= list(movie_genres.loc[movie_genres['Genre_Comedy'] == 1]['movieId'])
fantasy_movies= list(movie_genres.loc[movie_genres['Genre_Fantasy'] == 1]['movieId'])
romance_movies= list(movie_genres.loc[movie_genres['Genre_Romance'] == 1]['movieId'])
drama_movies= list(movie_genres.loc[movie_genres['Genre_Drama'] == 1]['movieId'])
action_movies= list(movie_genres.loc[movie_genres['Genre_Action'] == 1]['movieId'])
crime_movies= list(movie_genres.loc[movie_genres['Genre_Crime'] == 1]['movieId'])
thriller_movies= list(movie_genres.loc[movie_genres['Genre_Thriller'] == 1]['movieId'])
horror_movies= list(movie_genres.loc[movie_genres['Genre_Horror'] == 1]['movieId'])
mystery_movies= list(movie_genres.loc[movie_genres['Genre_Mystery'] == 1]['movieId'])
scifi_movies= list(movie_genres.loc[movie_genres['Genre_Sci-Fi'] == 1]['movieId'])
war_movies= list(movie_genres.loc[movie_genres['Genre_War'] == 1]['movieId'])
musical_movies= list(movie_genres.loc[movie_genres['Genre_Musical'] == 1]['movieId'])
documentary_movies= list(movie_genres.loc[movie_genres['Genre_Documentary'] == 1]['movieId'])
imax_movies= list(movie_genres.loc[movie_genres['Genre_IMAX'] == 1]['movieId'])
western_movies= list(movie_genres.loc[movie_genres['Genre_Western'] == 1]['movieId'])
noir_movies= list(movie_genres.loc[movie_genres['Genre_Film-Noir'] == 1]['movieId'])
genres = movie_genres.columns.values[3:22]
a = {}
for x in genres:
key = x
value = ''
a[key] = value
# +
ad = []
an = []
ch = []
co = []
fa = []
ro = []
dr = []
ac = []
cr = []
th = []
ho = []
my = []
sc = []
wa = []
mu = []
do = []
im = []
we = []
fi = []
for x in top['movieId']:
if x in adventure_movies:
ad.append(movies_ind[movies_ind['movieId']==x]['title'].values)
a['Genre_Adventure'] = ad
for x in top['movieId']:
if x in animation_movies:
an.append(movies_ind[movies_ind['movieId']==x]['title'].values)
a['Genre_Animation'] = an
for x in top['movieId']:
if x in children_movies:
ch.append(movies_ind[movies_ind['movieId']==x]['title'].values)
a['Genre_Children'] = ch
for x in top['movieId']:
if x in comedy_movies:
co.append(movies_ind[movies_ind['movieId']==x]['title'].values)
a['Genre_Comedy'] = co
for e in top['movieId']:
if e in fantasy_movies:
fa.append(movies_ind[movies_ind['movieId']==e]['title'].values)
a['Genre_Fantasy'] = fa
for f in top['movieId']:
if f in romance_movies:
ro.append(movies_ind[movies_ind['movieId']==f]['title'].values)
a['Genre_Romance'] = ro
for g in top['movieId']:
if g in drama_movies:
dr.append(movies_ind[movies_ind['movieId']==g]['title'].values)
a['Genre_Drama'] = dr
for h in top['movieId']:
if h in action_movies:
ac.append(movies_ind[movies_ind['movieId']==h]['title'].values)
a['Genre_Action'] = ac
for i in top['movieId']:
if i in crime_movies:
cr.append(movies_ind[movies_ind['movieId']==i]['title'].values)
a['Genre_Crime'] = cr
for j in top['movieId']:
if j in thriller_movies:
th.append(movies_ind[movies_ind['movieId']==j]['title'].values)
a['Genre_Thriller'] = th
for k in top['movieId']:
if k in horror_movies:
ho.append(movies_ind[movies_ind['movieId']==k]['title'].values)
a['Genre_Horror'] = ho
for l in top['movieId']:
if l in mystery_movies:
my.append(movies_ind[movies_ind['movieId']==l]['title'].values)
a['Genre_Mystery'] = my
for m in top['movieId']:
if m in scifi_movies:
sc.append(movies_ind[movies_ind['movieId']==m]['title'].values)
a['Genre_Sci-Fi'] = sc
for n in top['movieId']:
if n in war_movies:
wa.append(movies_ind[movies_ind['movieId']==n]['title'].values)
a['Genre_War'] = wa
for o in top['movieId']:
if o in musical_movies:
mu.append(movies_ind[movies_ind['movieId']==o]['title'].values)
a['Genre_Musical'] = mu
for p in top['movieId']:
if p in documentary_movies:
do.append(movies_ind[movies_ind['movieId']==p]['title'].values)
a['Genre_Documentary'] = do
for q in top['movieId']:
if q in imax_movies:
im.append(movies_ind[movies_ind['movieId']==q]['title'].values)
a['Genre_IMAX'] = im
for r in top['movieId']:
if r in western_movies:
we.append(movies_ind[movies_ind['movieId']==r]['title'].values)
a['Genre_Western'] = we
for s in top['movieId']:
if s in noir_movies:
fi.append(movies_ind[movies_ind['movieId']==s]['title'].values)
a['Genre_Film-Noir'] = fi
# -
a
adventure_rec = a['Genre_Adventure'][randint(0, len(a['Genre_Adventure'])-1)][0]
animation_rec = a['Genre_Animation'][randint(0, len(a['Genre_Animation'])-1)][0]
children_rec = a['Genre_Children'][randint(0, len(a['Genre_Children'])-1)][0]
comedy_rec = a['Genre_Comedy'][randint(0, len(a['Genre_Comedy'])-1)][0]
fantasy_rec = a['Genre_Fantasy'][randint(0, len(a['Genre_Fantasy'])-1)][0]
romance_rec = a['Genre_Romance'][randint(0, len(a['Genre_Romance'])-1)][0]
drama_rec = a['Genre_Drama'][randint(0, len(a['Genre_Drama'])-1)][0]
action_rec = a['Genre_Action'][randint(0, len(a['Genre_Action'])-1)][0]
crime_rec = a['Genre_Crime'][randint(0, len(a['Genre_Crime'])-1)][0]
thriller_rec = a['Genre_Thriller'][randint(0, len(a['Genre_Thriller'])-1)][0]
horror_rec = a['Genre_Horror'][randint(0, len(a['Genre_Horror'])-1)][0]
mystery_rec = a['Genre_Mystery'][randint(0, len(a['Genre_Mystery'])-1)][0]
scifi_rec = a['Genre_Sci-Fi'][randint(0, len(a['Genre_Sci-Fi'])-1)][0]
war_rec = a['Genre_War'][randint(0, len(a['Genre_War'])-1)]
musical_rec = a['Genre_Musical'][randint(0, len(a['Genre_Musical'])-1)][0]
imax_rec = a['Genre_IMAX'][randint(0, len(a['Genre_IMAX'])-1)][0]
western_rec = a['Genre_Western'][randint(0, len(a['Genre_Western'])-1)][0]
noir_rec = a['Genre_Film-Noir'][randint(0, len(a['Genre_Film-Noir'])-1)][0]
western_rec
| No_longer_crap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
print(train_images.shape)
print(train_labels.shape)
print(type(train_images))
print(type(train_labels))
one = train_images[1,:,:]
print(one)
print(one.shape)
print(type(one))
plt.imshow(one)
# +
from keras.layers import Dense,Activation
from keras.models import Sequential
#model = keras.Sequential()
model = Sequential()
model.add(Dense(60,input_shape=(28,28)))
model.add(Activation("relu"))
model.add(Activation("softmax"))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=10, batch_size=60)
# -
| ml/frame/tensorflow/keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="SMOq8mDwdHUm"
# # Funciones de distribución de probabilidades
# ## Distribución Uniforme
# + id="og4_IwBwdHUz"
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + id="7JAWW22BdHU0"
#Definimos los parámetrps de la distribución uniforme
a = 1
b = 100
n = 1000000
data = np.random.uniform(a, b, n)
# + id="UEMpJbjvdHU1" outputId="95041c83-06da-4c0e-fe32-5ecea7e1516e"
# %matplotlib inline
plt.hist(data)
# + [markdown] id="lzbRifO7dHU3"
# ## Distribución Normal
# + id="LQjtKbrTdHU4"
#Datos normlaes aleatorios
data = np.random.randn(1000000)
# + id="fQfjZ4Z4dHU5" outputId="ff2fe953-3b78-4a60-a3b1-40af7245f7a3"
x = range(1,1000001)
plt.plot(x, data)
# + id="NQbcBBEMdHU6" outputId="283577f9-3bdc-49be-fcf8-c5092ad0b4e5"
# %matplotlib inline
plt.hist(data)
# + id="nW9qxMfmdHU7" outputId="9db150f3-72e0-4118-8de0-5255f8927973"
#ORdenamos los datos
#Da la función acumulada
plt.plot(x,sorted(data))
# + id="Bmg9WzO6dHU7" outputId="d369743f-c51a-44d4-eaba-cec35903bfbe"
#PAra una normal que no es estandard
mu = 5.5
sd = 2.5
Z_10000 = np.random.randn(10000)
data = mu + sd * Z_10000 # Z = (X - mu) / sd -> N(0,1), X = mu + sd * Z
plt.hist(data)
# + id="66QVA6BudHU8" outputId="c2ce810f-1ece-43fb-fcc4-6a03be17152e"
data = np.random.randn(2,4)
data
# + [markdown] id="vhM6DYcddHU9"
# ## La simulación de Monte Carlo
# + [markdown] id="JrAn7Js-dHU-"
# * Generamos dos números aleatorios uniforme x e y entre 0 y 1 en total 1000 veces.
# * Calcularemos $z = x^2 + y^2$:
# * Si $z < 1 \rightarrow$ estamos dentro del círculo.
# * Si $z \geq 1 \rightarrow$ estamos fuera del círculo.
# * Calculamos el número total de veces que están dentro del círculo y lo dividimos entre el número total de intentos para obtener una aproximación de la probabilidad de caer dentro del círculo.
# * Usamos dicha probabilidad para aproximar el valor de π.
# * Repetimos el experimento un número suficiente de veces (por ejemplo 100), para obtener (100) diferentes aproximaciones de π.
# * Calculamos el promedio de los 100 experimentos anteriores para dar un valor final de π.
#
# + id="3_kOroW5dHU_"
# + id="Lqzpaaq9dHU_"
# + id="PgoeSRqDdHU_"
# + id="LXTEFHkBdHVA"
def pi_montecarlo(n, n_exp):
pi_avg = 0
pi_value_list = []
for i in range(n_exp):
value = 0
x = np.random.uniform(0,1,n).tolist()
y = np.random.uniform(0,1,n).tolist()
for j in range(n):
z = np.sqrt(x[j] * x[j] + y[j] * y[j])
if z<=1:
value += 1
float_value = float(value)
pi_value = float_value * 4 / n
pi_value_list.append(pi_value)
pi_avg += pi_value
pi = pi_avg/n_exp
print(pi)
fig = plt.plot(pi_value_list)
return (pi, fig)
# + id="h6RXml9DdHVA" outputId="676175f6-3bfc-409e-d3b7-d58823c86077"
pi_montecarlo(10000, 200)
# + [markdown] id="0IRVuOftdHVB"
# ### Dummy Data Sets
# + id="FEmA0_HOdHVC"
n = 1000000
data = pd.DataFrame(
{
'A' : np.random.randn(n),
'B' : 1.5 + 2.5 * np.random.randn(n),
'C' : np.random.uniform(5, 32, n)
}
)
# + id="nUM_QckgdHVC" outputId="28b14f10-3412-42b3-d755-78aced27c238"
data.describe()
# + id="WJ4DFIo3dHVD" outputId="8c5b9a6d-7aaf-4954-ad37-12daa8875d35"
plt.hist(data["A"])
# + id="57A5_NlkdHVD" outputId="aef0a160-69e2-45fd-82c4-47e737aaa5ad"
plt.hist(data["B"])
# + id="Ic1K6FiAdHVE" outputId="6ebd8235-5a35-4892-d07c-1d7686dacdb3"
plt.hist(data["C"])
# + id="THiLwhTydHVE"
data = pd.read_csv("../datasets/customer-churn-model/Customer Churn Model.txt")
# + id="_9UbabLYdHVF" outputId="717e7cea-75f6-4ffe-b41f-10b47778e1cf"
data.head()
# + id="Re1yE9G3dHVF"
colum_names = data.columns.values.tolist()
# + id="fRQNX0JCdHVG" outputId="b6ecfa25-b7eb-4114-9582-501c4ccbf5c3"
a = len(colum_names)
a
# + id="CDlZjfX4dHVG"
new_data = pd.DataFrame(
{
'Column Name': colum_names,
'A' : np.random.randn(a),
'B' : np.random.uniform(0,1,a)
}, index = range(42, 42 + a)
)
# + id="qHUB6jhSdHVH" outputId="e45f980a-3d7d-4113-af5c-99e13da5f7cd"
new_data
# + id="plfbTLBDdHVH"
| notebooks/T2 - 2 - Data Cleaning - Funciones de distribución de probabilidad.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="../../../images/qiskit_header.png" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" align="middle">
# # Visualizing a Quantum Circuit
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
# ## Drawing a Quantum Circuit
#
# When building a quantum circuit, it often helps to draw the circuit. This is supported natively by a `QuantumCircuit` object. You can either call `print()` on the circuit, or call the `draw()` method on the object. This will render a [ASCII art version](https://en.wikipedia.org/wiki/ASCII_art) of the circuit diagram.
# +
# Build a quantum circuit
circuit = QuantumCircuit(3, 3)
circuit.x(1)
circuit.h(range(3))
circuit.cx(0, 1)
circuit.measure(range(3), range(3));
# -
print(circuit)
circuit.draw()
# ## Alternative Renderers for Circuits
#
# A text output is useful for quickly seeing the output while developing a circuit, but it doesn't provide the most flexibility in its output. There are two alternative output renderers for the quantum circuit. One uses [matplotlib](https://matplotlib.org/), and the other uses [LaTeX](https://www.latex-project.org/), which leverages the [qcircuit package](https://github.com/CQuIC/qcircuit). These can be specified by using `mpl` and `latex` values for the `output` kwarg on the draw() method.
# Matplotlib Drawing
circuit.draw(output='mpl')
# Latex Drawing
circuit.draw(output='latex')
# ## Controlling output from circuit.draw()
#
# By default, the draw method returns the rendered image as an object and does not output anything. The exact class returned depends on the output specified: `'text'`(the default) returns a `TextDrawer` object, `'mpl'` returns a `matplotlib.Figure` object, and `latex` returns a `PIL.Image` object. Having the return types enables modifying or directly interacting with the rendered output from the drawers. Jupyter notebooks understand these return types and render them for us in this tutorial, but when running outside of Jupyter, you do not have this feature automatically. However, the `draw()` method has optional arguments to display or save the output. When specified, the `filename` kwarg takes a path to which it saves the rendered output. Alternatively, if you're using the `mpl` or `latex` outputs, you can leverage the `interactive` kwarg to open the image in a new window (this will not always work from within a notebook but will be demonstrated anyway).
# ## Customizing the output
#
# Depending on the output, there are also options to customize the circuit diagram rendered by the circuit.
#
# ### Disable Plot Barriers and Reversing Bit Order
# The first two options are shared among all three backends. They allow you to configure both the bit orders and whether or not you draw barriers. These can be set by the `reverse_bits` kwarg and `plot_barriers` kwarg, respectively. The examples below will work with any output backend; `latex` is used here for brevity.
# +
# Draw a new circuit with barriers and more registers
q_a = QuantumRegister(3, name='qa')
q_b = QuantumRegister(5, name='qb')
c_a = ClassicalRegister(3)
c_b = ClassicalRegister(5)
circuit = QuantumCircuit(q_a, q_b, c_a, c_b)
circuit.x(q_a[1])
circuit.x(q_b[1])
circuit.x(q_b[2])
circuit.x(q_b[4])
circuit.barrier()
circuit.h(q_a)
circuit.barrier(q_a)
circuit.h(q_b)
circuit.cswap(q_b[0], q_b[1], q_b[2])
circuit.cswap(q_b[2], q_b[3], q_b[4])
circuit.cswap(q_b[3], q_b[4], q_b[0])
circuit.barrier(q_b)
circuit.measure(q_a, c_a)
circuit.measure(q_b, c_b);
# -
# Draw the circuit
circuit.draw(output='latex')
# Draw the circuit with reversed bit order
circuit.draw(output='latex', reverse_bits=True)
# Draw the circuit without barriers
circuit.draw(output='latex', plot_barriers=False)
# Draw the circuit without barriers and reverse bit order
circuit.draw(output='latex', plot_barriers=False, reverse_bits=True)
# ### Backend-specific customizations
#
# Some available customizing options are specific to a backend. The `line_length` kwarg for the `text` backend can be used to set a maximum width for the output. When a diagram is wider than the maximum, it will wrap the diagram below. The `mpl` backend has the `style` kwarg, which is used to customize the output. The `scale` option is used by the `mpl` and `latex` backends to scale the size of the output image with a multiplicative adjustment factor. The `style` kwarg takes in a `dict` with multiple options, providing a high level of flexibility for changing colors, changing rendered text for different types of gates, different line styles, etc. Available options are:
#
# - **textcolor** (str): The color code to use for text. Defaults to `'#000000'`
# - **subtextcolor** (str): The color code to use for subtext. Defaults to `'#000000'`
# - **linecolor** (str): The color code to use for lines. Defaults to `'#000000'`
# - **creglinecolor** (str): The color code to use for classical register lines `'#778899'`
# - **gatetextcolor** (str): The color code to use for gate text `'#000000'`
# - **gatefacecolor** (str): The color code to use for gates. Defaults to `'#ffffff'`
# - **barrierfacecolor** (str): The color code to use for barriers. Defaults to `'#bdbdbd'`
# - **backgroundcolor** (str): The color code to use for the background. Defaults to `'#ffffff'`
# - **fontsize** (int): The font size to use for text. Defaults to 13
# - **subfontsize** (int): The font size to use for subtext. Defaults to 8
# - **displaytext** (dict): A dictionary of the text to use for each element
# type in the output visualization. The default values are:
#
#
# 'id': 'id',
# 'u0': 'U_0',
# 'u1': 'U_1',
# 'u2': 'U_2',
# 'u3': 'U_3',
# 'x': 'X',
# 'y': 'Y',
# 'z': 'Z',
# 'h': 'H',
# 's': 'S',
# 'sdg': 'S^\\dagger',
# 't': 'T',
# 'tdg': 'T^\\dagger',
# 'rx': 'R_x',
# 'ry': 'R_y',
# 'rz': 'R_z',
# 'reset': '\\left|0\\right\\rangle'
#
#
# You must specify all the necessary values if using this. There is
# no provision for an incomplete dict passed in.
# - **displaycolor** (dict): The color codes to use for each circuit element.
# By default, all values default to the value of `gatefacecolor` and
# the keys are the same as `displaytext`. Also, just like
# `displaytext`, there is no provision for an incomplete dict passed
# in.
# - **latexdrawerstyle** (bool): When set to True, enable LaTeX mode, which will
# draw gates like the `latex` output modes.
# - **usepiformat** (bool): When set to True, use radians for output.
# - **fold** (int): The number of circuit elements at which to fold the circuit.
# Defaults to 20
# - **cregbundle** (bool): If set True, bundle classical registers.
# - **showindex** (bool): If set True, draw an index.
# - **compress** (bool): If set True, draw a compressed circuit.
# - **figwidth** (int): The maximum width (in inches) for the output figure.
# - **dpi** (int): The DPI to use for the output image. Defaults to 150.
# - **creglinestyle** (str): The style of line to use for classical registers.
# Choices are `'solid'`, `'doublet'`, or any valid matplotlib
# `linestyle` kwarg value. Defaults to `doublet`.
# Set line length to 80 for above circuit
circuit.draw(output='text', line_length=80)
# +
# Change the background color in mpl
style = {'backgroundcolor': 'lightgreen'}
circuit.draw(output='mpl', style=style)
# -
# Scale the mpl output to 1/2 the normal size
circuit.draw(output='mpl', scale=0.5)
# Scale the latex output to 1/2 the normal size
circuit.draw(output='latex', scale=0.5)
#
# ## LaTeX Source
#
# One additional option available with the `latex` output type is to return the raw LaTeX source code instead of rendering an image for it. This enables easy integration with a separate LaTeX document. To use this, set the `output` kwarg to `'latex_source'`. You can also use the `filename` kwarg to write this output directly to a file (and still return the string) instead of returning just a string.
# Print the latex source for the visualization
print(circuit.draw(output='latex_source'))
# Save the latex source to a file
circuit.draw(output='latex_source', filename='./circuit.tex');
# ## circuit_drawer() as function
#
# If you have an application where you prefer to draw a circuit with a self-contained function instead of as a method of a circuit object, you can directly use the `circuit_drawer()` function, which is part of the public stable interface from `qiskit.tools.visualization`. The function behaves identically to the `circuit.draw()` method, except that it takes in a circuit object as required argument.
#
# <div class="alert alert-block alert-info">
# <b>Note:</b> In Qiskit Terra <b> <= 0.7, </b> the default behavior for the circuit_drawer() function is to use the <i>latex</i> output backend, and in <b>0.6.x</b> that includes a fallback to <i>mpl</i> if <i>latex</i> fails for any reason. Starting with release <b> > 0.7, </b>the default changes to the <i>text</i> output.
# </div>
from qiskit.tools.visualization import circuit_drawer
circuit_drawer(circuit, output='mpl', plot_barriers=False)
import qiskit.tools.jupyter
# %qiskit_version_table
# %qiskit_copyright
| qiskit-tutorials/13_advanced_circuit_visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
df = pd.read_csv('../data/movies_metadata.csv')
#Print all the features (or columns) of the DataFrame
df.columns
# +
#Only keep those features that we require
df = df[['title','genres', 'release_date', 'runtime', 'vote_average', 'vote_count']]
df.head()
# +
#Convert release_date into pandas datetime format
df['release_date'] = pd.to_datetime(df['release_date'], errors='coerce')
#Extract year from the datetime
df['year'] = df['release_date'].apply(lambda x: str(x).split('-')[0] if x != np.nan else np.nan)
# -
#Helper function to convert NaT to 0 and all other years to integers.
def convert_int(x):
try:
return int(x)
except:
return 0
#Apply convert_int to the year feature
df['year'] = df['year'].apply(convert_int)
# +
#Drop the release_date column
df = df.drop('release_date', axis=1)
#Display the dataframe
df.head()
# -
#Print genres of the first movie
df.iloc[0]['genres']
# +
#Import the literal_eval function from ast
from ast import literal_eval
#Define a stringified list and output its type
a = "[1,2,3]"
print(type(a))
#Apply literal_eval and output type
b = literal_eval(a)
print(type(b))
b
# +
#Convert all NaN into stringified empty lists
df['genres'] = df['genres'].fillna('[]')
#Apply literal_eval to convert stringified empty lists to the list object
df['genres'] = df['genres'].apply(literal_eval)
#Convert list of dictionaries to a list of strings
df['genres'] = df['genres'].apply(lambda x: [i['name'].lower() for i in x] if isinstance(x, list) else [])
# -
df.head()
# +
#Create a new feature by exploding genres
s = df.apply(lambda x: pd.Series(x['genres']),axis=1).stack().reset_index(level=1, drop=True)
#Name the new feature as 'genre'
s.name = 'genre'
# -
gen_df3 = df.join(s)
gen_df3.head()
# +
#Create a new dataframe gen_df which by dropping the old 'genres' feature and adding the new 'genre'.
gen_df = df.drop('genres', axis=1).join(s)
#Print the head of the new gen_df
gen_df.head()
# -
def build_chart(gen_df, percentile=0.8):
#Ask for preferred genres
print("Input preferred genre")
genre = input()
#Ask for lower limit of duration
print("Input shortest duration")
low_time = int(input())
#Ask for upper limit of duration
print("Input longest duration")
high_time = int(input())
#Ask for lower limit of timeline
print("Input earliest year")
low_year = int(input())
#Ask for upper limit of timeline
print("Input latest year")
high_year = int(input())
#Define a new movies variable to store the preferred movies. Copy the contents of gen_df to movies
movies = gen_df.copy()
#Filter based on the condition
movies = movies[(movies['genre'] == genre) &
(movies['runtime'] >= low_time) &
(movies['runtime'] <= high_time) &
(movies['year'] >= low_year) &
(movies['year'] <= high_year)]
#Compute the values of C and m for the filtered movies
C = movies['vote_average'].mean()
m = movies['vote_count'].quantile(percentile)
#Only consider movies that have higher than m votes. Save this in a new dataframe q_movies
q_movies = movies.copy().loc[movies['vote_count'] >= m]
#Calculate score using the IMDB formula
q_movies['score'] = q_movies.apply(lambda x: (x['vote_count']/(x['vote_count']+m) * x['vote_average'])
+ (m/(m+x['vote_count']) * C)
,axis=1)
#Sort movies in descending order of their scores
q_movies = q_movies.sort_values('score', ascending=False)
return q_movies
#Generate the chart for top animation movies and display top 5.
build_chart(gen_df).head()
#Convert the cleaned (non-exploded) dataframe df into a CSV file and save it in the data folder
#Set parameter index to False as the index of the DataFrame has no inherent meaning.
df.to_csv('../data/metadata_clean.csv', index=False)
len(df)
| Chapter3/Knowledge Recommender.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# name: python3
# ---
# # DocTable Schemas
# Your database table column names and types come from a schema class defined using the `@doctable.schema` decorator. In addition to providing a schema definition, this class can be used to encapsulate data when inserting or retrieving from the database.
#
# At its most basic, your schema class operates like a [dataclass](https://realpython.com/python-data-classes/) that uses slots for efficiency and allows for custom methods that will not affect the database schema.
# +
from datetime import datetime
from pprint import pprint
import pandas as pd
import sys
sys.path.append('..')
import doctable
# -
# # Introduction
#
# This is an example of a basic doctable schema. Note the use of the decorator `@doctable.schema`, the inclusion of `__slots__ = []`, and the type hints of the member variables - I will explain each of these later in this document.
#
# This class represents a database schema that includes two columns: `name` (an `int`) and `age` (a `str`).
@doctable.schema
class Record:
__slots__ = []
name: str
age: int
# The schema class definition is then provided to the doctable constructor to create the database table. Here we create an in-memory sqlite table and show the schema resulting from our custom class. Note that doctable automatically inferred that `name` should be a `VARCHAR` and `age` should be an `INTEGER` based on the provided type hints.
# the schema that would result from this dataclass:
table = doctable.DocTable(target=':memory:', schema=Record)
table.schema_table()
# We can also use the schema class to insert data into our `DocTable`. We simply create a new `Record` and pass it to the `DocTable.insert()` method. Using `.head()`, we see the contents of the database so far. Note that you may also pass a dictionary to insert data - this is just one way of inserting data.
new_record = Record(name='<NAME>', age=30)
print(new_record)
table.insert(new_record)
table.head()
# And perhaps more usefully, we can use it to encapsulate results from `.select()` queries. Note that the returned object is exactly the same as the one we put in. Slot classes are more memory-efficient than dictionaries for storing data, but there is cpu time overhead from inserting that data into the slots.
first_record = table.select_first()
print(first_record)
# But, of course, the data can be returned in its raw format by passing the parameter `as_dataclass=False`.
first_record = table.select_first(as_dataclass=False)
print(first_record)
# # The `doctable.schema` Decorator
#
# The `@doctable.schema` decorator does the work to convert your custom class into a schema class. It transforms your schema class in three ways:
#
# 1. **create slots**: First, [slot](https://docs.python.org/3/reference/datamodel.html#slots) variable names will be added to `__slots__` automatically based on the fields in your class definition. This is why the default functionality requires you to add `__slots__ = []` with no variable names. You may also turn slots off by passing `require_slots=False` to the decorator (i.e. `@doctable.schema(require_slots=False)`), otherwise an exception will be raised.
#
# 2. **convert to dataclass**: Second, your schema class will be converted to a [dataclass](https://realpython.com/python-data-classes/) that generates `__init__`, `__repr__`, and other boilerplate methods meant for classes that primarily store data. Any keyword arguments passed to the `schema` decorator, with the exception of `require_slots`, will be passed directly to the `@dataclasses.dataclass` decorator so you have control over the dataclass definition.
#
# 3. **inherit from `DocTableSchema`**: Lastly, your schema class will inherit from `doctable.DocTableSchema`, which provides additional accessors that are used for storage in a `DocTable` and fine-grained control over retreived data. More on this later.
#
#
# Column names and types will be inferred from the type hints in your schema class definition. Because `DocTable` is built on [sqlalchemy core](https://docs.sqlalchemy.org/en/14/core/), all fields will eventually be converted to [`sqlalchemy` column objects](https://docs.sqlalchemy.org/en/13/core/type_basics.html) and added to the DocTable metadata. This table shows the type mappings implemented in doctable:
doctable.python_to_slqlchemy_type
# For example, see this example of the most basic possible schema class that can be used to create a doctable. We use static defaulted parameters and type hints including `str`, `int`, `datetime`, and `Any`, which you can see are converted to `VARCHAR`, `INTEGER`, `DATETIME`, and `BLOB` column types, respectively. `BLOB` was used because the provided type hint `Any` has no entry in the above table.
# +
from typing import Any
import datetime
@doctable.schema
class Record:
__slots__ = []
name: str = None
age: int = None
time: datetime.datetime = None
friends: Any = None
# the schema that would result from this dataclass:
doctable.DocTable(target=':memory:', schema=Record).schema_table()
# -
# You can see that this class operates much like a regular dataclass with slots. Thus, these defaulted parameters are applied in the constructor of the schema class, and _NOT_ as the default value in the database schema.
Record('<NAME>', 30)
# # Use `doctable.Col` For More Control Over Schema Creation
#
# Using `doctable.Col()` as a default value in the schema class definition can give you more control over schema definitions.
#
# Firstly, this function returns a dataclass [`field`](https://docs.python.org/3/library/dataclasses.html#dataclasses.field) object that can be used to set parameters like `default_factory` or `compare` as used by the dataclass. Pass arguments meant for `field` through the `Col` parameter `field_kwargs=dict(..)`. Other data passed to `Col` will be used to create the `DocTable` schema, which is stored as metadata inside the `field`.
#
# This example shows how `Col` can be used to set some parameters meant for `field`. These will affect your schema class behavior without affecting the produced DocTable schema.
# +
@doctable.schema
class Record:
__slots__ = []
name: str = doctable.Col()
age: int = doctable.Col(field_kwargs=dict(default_factory=list, compare=True))
Record()
# -
# `Col` also allows you to explicitly specify a column type using a string, sqlalchemy type definition, or sqlalchemy instance passed to `column_type`. You can then pass arguments meant for the sqlalchemy type constructor through `type_kwargs`. You may also use `type_kwargs` with the column type inferred from the type hint.
# +
import sqlalchemy
@doctable.schema
class Record:
__slots__ = []
# providing only the type as first argument
age: int = doctable.Col(sqlalchemy.BigInteger)
# these are all quivalent
name1: str = doctable.Col(type_kwargs=dict(length=100)) # infers type from type hint
name2: str = doctable.Col(sqlalchemy.String, type_kwargs=dict(length=100)) # accepts provided type sqlalchemy.String, pass parameters through type_kwargs
name3: str = doctable.Col(sqlalchemy.String(length=100)) # accepts type instance (no need for type_kwargs this way)
name4: str = doctable.Col('string', type_kwargs=dict(length=100))
# the schema that would result from this dataclass:
doctable.DocTable(target=':memory:', schema=Record).schema_table()
# -
# A full list of string -> sqlalchemy type mappings is shown below:
doctable.string_to_sqlalchemy_type
# Finally, `Col` allows you to pass keyword arguments directly to the sqlalchemy `Column` constructor. This includes flags like `primary_key` or `default`, which are both used to construct the database schema but do not affect the python dataclass. Note that I recreated the classic `id` column below.
# +
@doctable.schema
class Record:
__slots__ = []
id: int = doctable.Col(primary_key=True, autoincrement=True)
age: int = doctable.Col(nullable=False)
name: str = doctable.Col(default='MISSING_NAME')
# the schema that would result from this dataclass:
doctable.DocTable(target=':memory:', schema=Record).schema_table()
# -
# I also included some shortcut `Col` functions like `IDCol`, `AddedCol`, and `UpdatedCol` - see below.
# +
import datetime
@doctable.schema
class Record:
__slots__ = []
id: int = doctable.IDCol() # auto-increment primary key
added: datetime.datetime = doctable.AddedCol() # record when row was added
updated: datetime.datetime = doctable.UpdatedCol() # record when row was updated
doctable.DocTable(target=':memory:', schema=Record).schema_table()
# -
# In this way, `Col` allows you to give fine-grained control to both the schema class behavior and the sql schema definition.
# # Working With Schema Objects
#
# Using `Col` default parameters also has some additional side effects, primarily due to the inherited class `DocTableSchema`. Among other things, the `Col` method defines the default dataclass value to be a `doctable.EmptyValue()` object, which is essentially a placeholder for data that was not inserted into the class upon construction. The `__repr__` defined in `DocTableSchema` dictates that member objects containing this value not appear when printing the class, and furthermore, member variables with the value `EmptyValue()` will not be provided in the database insertion. This means that the database schema is allowed to use its own default value - an effect which is most obviously useful when inserting an object that does not have an `id` or other automatically provided values.
#
# The example below shows the `new_record.id` contains `EmptyValue()` as a default, and that the `id` column is not included in the insert query - only `name`.
# +
@doctable.schema
class Record:
__slots__ = []
id: int = doctable.IDCol()
name: str = doctable.Col()
new_record = Record(name='<NAME>')
print(new_record, new_record.id)
table = doctable.DocTable(target=':memory:', schema=Record, verbose=True)
table.insert(new_record)
table.head()
# -
# Yet when we go to retrieve the inserted data, we can see that the value has been replaced by the defaulted value in the database. This is a useful feature if your pipeline involves the insertion of schema objects directly (as opposed to inserting dictionaries for each row).
table.select_first(verbose=False)
# The `EmptyValue()` feature is also useful when issuing select queries involving only a subset of columns. See here we run a select query where we just retrieve the name data, yet the result is still stored in a `Record` object.
returned_record = table.select_first(['name'], verbose=False)
print(returned_record, returned_record.id)
# To avoid working with `EmptyValue()` objects directly, it is recommended that you use the `__getitem__` string subscripting to access column data. When using this subscript, the schema object will raise an exception if the returned value is an `EmptyValue()`.
try:
returned_record['id']
except KeyError as e:
print(e)
# # Indices and Constraints
#
# Indices and constraints are provided to the `DocTable` constructor or definition, as it is not part of the schema class. Here I create custom schema and table definitions where the table has some defined indices and constraints. `doctable.Index` is really just a direct reference to `sqlalchemy.Index`, and `doctable.Constraint` is a mapping to an sqlalchemy constraint type, with the first argument indicating which one.
# +
@doctable.schema
class Record:
__slots__ = []
id: int = doctable.IDCol()
name: str = doctable.Col()
age: int = doctable.Col()
class RecordTable(doctable.DocTable):
_tabname_ = 'records'
_schema_ = Record
# table indices
_indices_ = (
doctable.Index('name_index', 'name'),
doctable.Index('name_age_index', 'name', 'age', unique=True),
)
# table constraints
_constraints_ = (
doctable.Constraint('unique', 'name', 'age', name='name_age_constraint'),
doctable.Constraint('check', 'age > 0', name='check_age'),
)
table = RecordTable(target=':memory:')
# -
# And we can see that the constraints are working when we try to insert a record where age is less than 1.
try:
table.insert(Record(age=-1))
except sqlalchemy.exc.IntegrityError as e:
print(e)
# This is a full list of the mappings between constraint names and the associated sqlalchemy objects.
doctable.constraint_lookup
# # Conclusions
#
# In this guide, I tried to show some exmaples and give explanations for the ways that schema classes can be used to create doctables. The design is fairly efficent and flexible, and brings a more object-focused approach compared to raw sql queries without the overhead of ORM.
| examples/doctable_schema.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Running a forward pass through LeNet using MNIST and Joey
# In this example, we will construct LeNet using Joey, set it up with pretrained parameters and run a forward pass through it with test data from MNIST. The results will be compared to the PyTorch ones to confirm Joey's numerical correctness.
# Firstly, let's import all the prerequisites:
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
import numpy as np
import joey as ml
from joey.activation import ReLU
# We'll define `imshow()` to quickly have a look at the MNIST data we'll use for the forward pass.
def imshow(img):
img = img / 2 + 0.5
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# Before we start working with Joey, we have to download the images and convert them to NumPy arrays with `dtype=np.float64`. This is because Joey supports only NumPy arrays (rather than PyTorch tensors) and it currently works with double floating-point numbers.
#
# In our case, a batch will consist of 4 elements.
# +
transform = transforms.Compose(
[transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5)])
testset = torchvision.datasets.MNIST(root='./mnist', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2)
classes = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
dataiter = iter(testloader)
images, labels = dataiter.next()
input_data = images.double().numpy()
# -
# Let's have a look at what we've downloaded:
imshow(torchvision.utils.make_grid(images))
# Now, we'll define `forward_pass()`. It creates LeNet using the `Net` class in Joey along with appropriate layer classes (here: `Conv`, `MaxPooling`, `Flat` and `FullyConnected`). Afterwards, by accessing the `kernel` and `bias` properties of each relevant layer, it inserts the pretrained weights saved in `.npy` files inside `resources/`.
#
# Note that we have to disable a strict stride check in `layer4`. If we didn't do that, we would get an error saying the stride is incompatible with the provided kernel and input sizes.
# +
def forward_pass(input_data):
parameters = get_parameters()
batch_size = 4
# Six 3x3 filters, activation RELU
layer1 = ml.Conv(kernel_size=(6, 3, 3),
input_size=(batch_size, 1, 32, 32),
activation=ReLU())
# 2x2 max pooling
layer2 = ml.MaxPooling(kernel_size=(2, 2),
input_size=(batch_size, 6, 30, 30),
stride=(2, 2))
# Sixteen 3x3 filters, activation RELU
layer3 = ml.Conv(kernel_size=(16, 3, 3),
input_size=(batch_size, 6, 15, 15),
activation=ReLU())
# 2x2 max pooling
layer4 = ml.MaxPooling(kernel_size=(2, 2),
input_size=(batch_size, 16, 13, 13),
stride=(2, 2),
strict_stride_check=False)
# Full connection (16 * 6 * 6 -> 120), activation RELU
layer5 = ml.FullyConnected(weight_size=(120, 576),
input_size=(576, batch_size),
activation=ReLU())
# Full connection (120 -> 84), activation RELU
layer6 = ml.FullyConnected(weight_size=(84, 120),
input_size=(120, batch_size),
activation=ReLU())
# Full connection (84 -> 10), output layer
layer7 = ml.FullyConnected(weight_size=(10, 84),
input_size=(84, batch_size))
# Flattening layer necessary between layer 4 and 5
layer_flat = ml.Flat(input_size=(batch_size, 16, 6, 6))
layers = [layer1, layer2, layer3, layer4,
layer_flat, layer5, layer6, layer7]
net = ml.Net(layers)
# Setting up the pretrained parameters
layer1.kernel.data[:] = parameters[0]
layer1.bias.data[:] = parameters[1]
layer3.kernel.data[:] = parameters[2]
layer3.bias.data[:] = parameters[3]
layer5.kernel.data[:] = parameters[4]
layer5.bias.data[:] = parameters[5]
layer6.kernel.data[:] = parameters[6]
layer6.bias.data[:] = parameters[7]
layer7.kernel.data[:] = parameters[8]
layer7.bias.data[:] = parameters[9]
net.forward(input_data)
return (layer1, layer2, layer3, layer4, layer5, layer6, layer7)
def get_parameters():
# The LeNet pretrained parameters are stored in the following files:
# 1.npy: layer 1 weights
# 2.npy: layer 1 biases
# 3.npy: layer 3 weights
# 4.npy: layer 3 biases
# 5.npy: layer 5 weights
# 6.npy: layer 5 biases
# 7.npy: layer 6 weights
# 8.npy: layer 6 biases
# 9.npy: layer 7 weights
# 10.npy: layer 7 biases
#
# For layer numbering, see forward_pass().
parameters = []
path = './resources/'
for i in range(10):
to_add = np.load(path + str(i + 1) + '.npy')
parameters.append(to_add)
return parameters
# -
# At this point, we're ready to run the forward pass!
layer1, layer2, layer3, layer4, layer5, layer6, layer7 = forward_pass(input_data)
# After the pass is finished, we can access its output by checking the `result` property of the last layer.
output = layer7.result.data
print(output)
# The results look promising: for each batch element (arranged in columns rather than rows), the highest number corresponds to the expected class, i.e. '7' has been recognised as 7, '2' has been recognised as 2, '1' has been recognised as 1 and '0' has been recognised as 0.
#
# For reference, we'll construct the same network with the same weights in PyTorch, run the pass there and compare the outputs.
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 3)
self.conv2 = nn.Conv2d(6, 16, 3)
self.fc1 = nn.Linear(16 * 6 * 6, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
# +
net = Net()
net.double()
with torch.no_grad():
net.conv1.weight[:] = torch.from_numpy(layer1.kernel.data)
net.conv1.bias[:] = torch.from_numpy(layer1.bias.data)
net.conv2.weight[:] = torch.from_numpy(layer3.kernel.data)
net.conv2.bias[:] = torch.from_numpy(layer3.bias.data)
net.fc1.weight[:] = torch.from_numpy(layer5.kernel.data)
net.fc1.bias[:] = torch.from_numpy(layer5.bias.data)
net.fc2.weight[:] = torch.from_numpy(layer6.kernel.data)
net.fc2.bias[:] = torch.from_numpy(layer6.bias.data)
net.fc3.weight[:] = torch.from_numpy(layer7.kernel.data)
net.fc3.bias[:] = torch.from_numpy(layer7.bias.data)
pytorch_output = np.transpose(net(images.double()).detach().numpy())
# -
# After creating and running the network in PyTorch, we'll calculate a relative error matrix as shown below. The maximum value in that matrix will be obtained as well.
# +
error = abs(output - pytorch_output) / abs(pytorch_output)
print(error)
print(np.nanmax(error))
# -
# As we can see, the maximum error is low enough (given the floating-point calculation accuracy) for the Joey results to be considered numerically correct.
| examples/lenet_forward_pass.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:biobombe]
# language: python
# name: conda-env-biobombe-py
# ---
# # Download TARGET Pan Cancer Data for Compression
#
# The notebook downloads gene expression and clinical data from The TARGET project. The data is downloaded from [UCSC Xena](https://xenabrowser.net/datapages/?dataset=target_RSEM_gene_fpkm&host=https%3A%2F%2Ftoil.xenahubs.net).
#
# The data is in `log2(FPKM)` RSEM transformed.
import os
from urllib.request import urlretrieve
# +
# Get Gene Expression Data
url = 'https://toil.xenahubs.net/download/'
name = 'target_RSEM_gene_fpkm.gz'
path = os.path.join('download', name)
# -
urlretrieve('{}{}'.format(url, name), path)
# ! sha256sum 'download/target_RSEM_gene_fpkm.gz'
# +
# Get Probe Mappings
name = 'gencode.v23.annotation.gene.probemap'
path = os.path.join('download', name)
# -
urlretrieve('{}probeMap/{}'.format(url, name), path)
# ! sha256sum 'download/gencode.v23.annotation.gene.probemap'
# +
# Get Sample Identifiers
name = 'TARGET_phenotype.gz'
path = os.path.join('download', name)
# -
urlretrieve('{}{}'.format(url, name), path)
# ! sha256sum 'download/TARGET_phenotype.gz'
| 0.expression-download/1B.download-target-data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Last.FM Recommendation System - An Introduction to Collaborative Filtering
#
# * The dataset contains information about users, gender, age, and which artists they have listened to on Last.FM. In this notebook, we use only Germany's data and transform the data into a frequency matrix.
#
# We are going to implement 2 types of collaborative filtering:
#
# 1. Item based: Which takes similarities between items' consumption histories
# 2. User Based that considers siminarities between user consumption histories and item similarities
# +
import pandas as pd
from scipy.spatial.distance import cosine
# Disable jedi autocompleter
# %config Completer.use_jedi = False
# -
df = pd.read_csv('../Datasets/lastfm-matrix-germany.csv')
df.sample(5)
df.info()
# downcast the datatypes of all column, in order to save some memory
cols = df.columns
df[cols] = df[cols].apply(pd.to_numeric, downcast='unsigned')
df.info()
# ## Item Based Collaborative Filtering
#
# In item based collaborative filtering we don not care about the user column. So let's drop it
df_de = df.drop('user', axis=1)
df_de.info()
df_de.head()
# Before we caluculate the similarities we heed to create a place holder as a pandas DF
ibcf = pd.DataFrame(index=df_de.columns, columns=df_de.columns)
# Now we can start filling in the similarities. We will use the `cosine` similarities from `scipy`
# +
# Lets fill in our place holder with cosine similarities
# Loop through the columns
for i in range(ibcf.shape[1]):
# Loop through the columns for each column
for j in range(ibcf.shape[1]):
# Fill in placeholder with cosine similarities
ibcf.iloc[i,j] = 1 - cosine(df_de.iloc[:,i], df_de.iloc[:,j])
# I don't like using loops in python and particularly not a cascade of loops.
# This code is provisory, until I find a more elegant solution.
# Sorry for that!
# -
ibcf.head()
# With our similarity matrix filled out we can sort each columns separately and save the names of the top 10 songs of each column in a new DF
# Create a placeholder with 10 rows and the same columns as ibcf
top = 10
top10 = pd.DataFrame(index=range(top), columns=ibcf.columns)
for c in ibcf.columns:
top10[c] = ibcf[c].sort_values(ascending=False).index[1:11]
# Show top 10 similarities of the first 10 songs
top10.iloc[:,:9]
# ## User Based Collaborative Filtering
#
# The steps for creating a user based recommendation system are the following:
#
# 1. Generate an item based recommendation system
# 2. Check what products the user has consumed
# 3. For each item the user has consumed, get the top X neighbours
# 4. Get the consumption record of the user for each neighbour
# 5. Calculate a similarity score
# 6. Recommend the items with the highest score
#
# We first need a formula to compute a similarity score. We use the sum of the product between purchase history and item similarity. We then divide that figure by the sum of the similarities:
# Helper function to get similarity scores
def getScore(history, similarities):
return sum(history*similarities)/sum(similarities)
# Now we just have to apply this function to the data frames.
# Place holder
df_sim = pd.DataFrame(index=df.index, columns=df.columns)
df_sim.iloc[:, :1] = df.iloc[:, :1]
# We now loop through the rows and columns filling in empty spaces with similarity scores.
# Note that we score items that the user has already consumed as 0, because there is no point recommending it again.
#Loop through all rows, skip the user column, and fill with similarity scores
for i in range(0,len(df_sim.index)):
for j in range(1,len(df_sim.columns)):
user = df_sim.index[i]
product = df_sim.columns[j]
if df.iloc[i, j] == 1:
df_sim.iloc[i, j] = 0
else:
product_top_names = top10[product]
product_top_sims = ibcf.loc[product].sort_values(ascending=False)[1:11]
user_purchases = df_de.loc[user,product_top_names]
df_sim.iloc[i][j] = getScore(user_purchases,product_top_sims)
df_sim.head()
# Instead of having the matrix filled with similarity scores, however, it would be nice to see the song names.
# +
# We can now produc a matrix of User Based recommendations as follows:
recommendations = pd.DataFrame(index=df_sim.index, columns=['user','1','2','3','4','5','6'])
recommendations.iloc[0:,0] = df_sim.iloc[:,0]
for i in range(len(df_sim.index)):
recommendations.iloc[i,1:] = df_sim.iloc[i,:].sort_values(ascending=False).iloc[1:7,].index.T
# -
recommendations.head()
# ## Reference
#
# * <NAME>, [Collaborative Filtering with Python](http://www.salemmarafi.com/code/collaborative-filtering-with-python/)
| Recommendation_Systems/lastfm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="images/logo.png" width="140" align="left">
#
# <center> <h1>Python for Engineers</h1> </center>
# <center> <h2>Introduction to the Tuple object</h2> </center>
#
# ### Before you begin
# * If you are not familiar with Python variables and operators checkout my video on [YouTube](https://youtu.be/PHQmhXCxv78) and the accompanying jupyter notebook on [Github](https://github.com/endlesseng/python-for-engineers/blob/master/notebooks/Intro-to-Variables-and-Operators.ipynb)
# * The concepts presented here are related to concepts for a python List, checkout my [Intro to Python Lists](https://youtu.be/qD1MmgPQFSs) video on YouTube, and find the accompanying jupyter notebook on [Github](https://github.com/endlesseng/python-for-engineers/blob/master/notebooks/Intro-to-Lists.ipynb)
# ## What is covered?
# 1. What is a tuple in Python?
# 2. Creating a tuple
# 3. Accessing tuple items
# 4. Can we modify a tuple?
# 5. Useful tuple operations
# ## [What is a tuple in Python](https://docs.python.org/3.8/tutorial/datastructures.html#tuples-and-sequences)
# In Python a __tuple__ is an immutable sequence or ordered collection of elements (items). So the order in which elements appear in the tuple is conserved. Tuples are typically defined using parentheses __`( )`__.
# ## Creating tuples
#
# +
# this is a tuple with 4 items
my_first_tuple = ("cat", "1.2", 1.2, "dog")
print("my first tuple: ", my_first_tuple)
# +
# this is an empty tuple
empty_tuple = ()
print("an empty tuple: " , empty_tuple)
# +
# tuple of one element
one_element_tuple = (0.1, )
print("tuple with one element: " , one_element_tuple)
# -
type(one_element_tuple)
# +
# creating a tuple without parentheses
tuple_no_paren = 3, 54, 1
print("tuple created without using parentheses: ", tuple_no_paren)
# -
type(tuple_no_paren)
# <img src="images/basis_vec.png" width="300" align="right">
#
# __Example__ Cartesian coordinate system basis vecotrs.
# <br/><br/>
#
#
# \begin{equation}
# i = [1, 0, 0]
# \end{equation}
#
# \begin{equation}
# j = [0, 1, 0]
# \end{equation}
#
# \begin{equation}
# k= [0, 0, 1]
# \end{equation}
# <br/><br/>
#
# \begin{equation}
# a = a_x i + a_y j + a_z k
# \end{equation}
# +
# Cartesian basis
i = (1, 0, 0)
j = (0, 1, 0)
k = (0, 0, 1)
print("The basis of a Cartesian coordinate system are: ", "\ni: ", i, "\nj: ", j, "\nk: ", k)
# +
# tuples can contain other tuples and objects
tuple_of_obj = ([1, 2, 3], (3, 5, 6), False, ([2, 3], [4, 5]))
print("tuple of objects: " , tuple_of_obj)
# -
# ### Accessing tuple items
# Items in a tuple can be accessed by indexing or slicing.
# +
# first element
first_elem = tuple_of_obj[0]
print("the first element of the object tuple is: ", first_elem)
# +
# last element
last_elem = tuple_of_obj[-1]
print("the last element of the object tuple is: ", last_elem)
# +
# slicing
tuple_to_slice = ('apple', 'banana', 'grape', 'hello', 'python')
first_3_elements = tuple_to_slice[0:3]
last_2_elements = tuple_to_slice[-2:]
elemets_1_to_3 = tuple_to_slice[1:4]
print("the first three elemets: ", first_3_elements)
print("the last two elemets: ", last_2_elements)
print("elements 1 to 3: ", elemets_1_to_3)
# -
# ### Can we modify a tuple?
# No. A tuple is an immutable object, which means it cannot be modified once instantiated.
# +
# modifying a tuple
immutable_tuple = (1, 2, 3, 4, 5, 6)
# the following is not allowed
# immutable_tuple[1] = 0
# immutable_tuple[-2] = "hello"
# -
# ### Useful operations on tuples
#
# +
tup_1 = (1, 2, 4)
tup_2 = (7, 8, 15, 6)
print("tup_1: ", tup_1)
print("tup_2: ", tup_2)
print("\nOperations on tuples: ")
print("-------------------------")
print("tup1 + tup2 = ", tup_1 + tup_2)
print("tup_1 * 3 = ", tup_1 * 3)
print("7 in tup_2: ", 7 in tup_2)
print("7 in tup_1: ", 7 in tup_1)
| notebooks/Intro-to-tuples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] heading_collapsed=true
# # LAD vs. Skl
# + [markdown] hidden=true
# ## Init
# + hidden=true
from sklearn import linear_model
from f_data_prep import *
from f_drosophila_infer import *
from f_train import *
# + hidden=true
data_all = np.loadtxt('../data_complete.txt')
median = np.median(data_all, axis=0)
# data_all = data_all - median
all_bin = np.vsplit(data_all, 6)
all_init = np.vstack([all_bin[i] for i in range(5)])
all_diff = np.vstack([all_bin[i+1]-all_bin[i] for i in range(5)])
complete_all = ([int(x) - 1 for x in open('../indices_complete.txt','r').readline().split()])
comp_ind = list(map(int, list((np.array(complete_all)[::6]-3)/6)))
data_comp = np.copy(data_all[:, comp_ind])
comp_bin = np.vsplit(data_comp, 6)
comp_init = np.vstack([comp_bin[i] for i in range(5)])
comp_diff = np.vstack([comp_bin[i+1] - comp_bin[i] for i in range(5)])
all_init, all_diff, comp_init, comp_diff = shuffle(all_init, all_diff, comp_init, comp_diff)
# + [markdown] hidden=true
# ## LAD def
# + code_folding=[0, 78, 156, 166] hidden=true
def LAD_cv_quad(X, y, gene_comp=comp_ind, kf=10):
quad = np.copy(X)
kfold = KFold(n_splits=kf, shuffle=False, random_state=1)
train_ferror_list = []
ferror_list=[]
error_list_gene = []
error1_list_cell = []
error2_list_cell = []
error22_list_cell = []
w_list = []
bias_list = []
yp_list = []
if len(gene_comp) == 0:
for i in range(X.shape[1]-1):
for j in range(i+1, X.shape[1]):
quad = np.hstack((quad, (X[:,i]*X[:,j])[:,None]))
else:
for i in range(len(comp_ind)-1):
for j in range(i+1, len(comp_ind)):
quad = np.hstack((quad, (X[:,comp_ind[i]]*X[:,comp_ind[j]])[:,None]))
for (tr, te) in (kfold.split(y)):
X_tr, quad_tr, y_tr = X[tr], quad[tr], y[tr]
X_te, quad_te, y_te = X[te], quad[te], y[te]
if y.shape[1] == len(gene_comp):
X_init = np.copy(X_te[:, comp_ind])
train_X_init = np.copy(X_tr[:, comp_ind])
else:
X_init = np.copy(X_te)
train_X_init = np.copy(X_tr)
w,bias = infer_LAD(quad_tr, y_tr)
w_list.append(w)
bias_list.append(bias)
train_ya = train_X_init + y_tr
train_yp = train_X_init + bias + quad_tr.dot(w)
error = np.sum(np.abs(train_yp - train_ya), axis=0)/np.sum(np.abs(train_ya), axis=0)
train_ferror_list.append(error)
ya = X_init + y_te
yp = X_init + (bias + quad_te.dot(w))
yp_list.append(yp)
error = np.sum(np.abs(yp - ya), axis=0)/np.sum(np.abs(ya), axis=0)
ferror_list.append(error)
error = np.sum(np.abs(yp - ya), axis=0)
error_list_gene.append(error)
error = np.sum(np.abs(yp - ya), axis=1)
error1_list_cell.append(error)
error = (np.sum(np.abs(yp - ya)**2, axis=1))**(1/2)
error2_list_cell.append(error)
error = (np.sum(np.abs(yp - ya)**2, axis=1))
error22_list_cell.append(error)
dic = {
'train_ferror': train_ferror_list,
'ferror': ferror_list,
'error_gene': error_list_gene,
'error_cell_l1': error1_list_cell,
'error_cell_l2': error2_list_cell,
'error_cell_l22': error22_list_cell,
'w': w_list,
'bias': bias_list,
'y_pred': yp_list
}
return dic
def skl_cv_quad(X, y, gene_comp=comp_ind, kf=10):
quad = np.copy(X)
kfold = KFold(n_splits=kf, shuffle=False, random_state=1)
train_ferror_list = []
ferror_list=[]
error_list_gene = []
error1_list_cell = []
error2_list_cell = []
error22_list_cell = []
coef = []
intercept = []
yp_list = []
if len(gene_comp) == 0:
for i in range(X.shape[1]-1):
for j in range(i+1, X.shape[1]):
quad = np.hstack((quad, (X[:,i]*X[:,j])[:,None]))
else:
for i in range(len(comp_ind)-1):
for j in range(i+1, len(comp_ind)):
quad = np.hstack((quad, (X[:,comp_ind[i]]*X[:,comp_ind[j]])[:,None]))
for (tr, te) in (kfold.split(y)):
X_tr, quad_tr, y_tr = X[tr], quad[tr], y[tr]
X_te, quad_te, y_te = X[te], quad[te], y[te]
if y.shape[1] == len(gene_comp):
X_init = np.copy(X_te[:, comp_ind])
train_X_init = np.copy(X_tr[:, comp_ind])
else:
X_init = np.copy(X_te)
train_X_init = np.copy(X_tr)
regr = linear_model.LinearRegression()
regr.fit(quad_tr, y_tr)
coef.append(regr.coef_)
intercept.append(regr.intercept_)
train_ya = train_X_init + y_tr
train_yp = train_X_init + regr.predict(quad_tr)
error = (np.sum(np.abs(train_yp - train_ya)**2, axis=0)/np.sum(np.abs(train_ya)**2, axis=0))**(1/2)
train_ferror_list.append(error)
ya = X_init + y_te
yp = X_init + regr.predict(quad_te)
yp_list.append(yp)
error = (np.sum(np.abs(yp - ya)**2, axis=0)/np.sum(np.abs(ya)**2, axis=0))**(1/2)
ferror_list.append(error)
error = (np.sum(np.abs(yp - ya)**2, axis=0))**(1/2)
error_list_gene.append(error)
error = np.sum(np.abs(yp - ya), axis=1)
error1_list_cell.append(error)
error = (np.sum(np.abs(yp - ya)**2, axis=1))**(1/2)
error2_list_cell.append(error)
error = (np.sum(np.abs(yp - ya)**2, axis=1))
error22_list_cell.append(error)
dic = {
'train_ferror': train_ferror_list,
'ferror': ferror_list,
'error_gene': error_list_gene,
'error_cell_l1': error1_list_cell,
'error_cell_l2': error2_list_cell,
'error_cell_l22': error22_list_cell,
'coef': coef,
'intercept': intercept,
'y_pred': yp_list
}
return dic
def infer_all_LAD(X_all, X_comp, y_all, y_comp):
res=[]
results = LAD_cv_quad(X_all, y_all, gene_comp=comp_ind, kf=10)
res.append(results)
results = LAD_cv_quad(X_all, y_comp, gene_comp=comp_ind, kf=10)
res.append(results)
results = LAD_cv_quad(X_comp, y_comp, gene_comp=[], kf=10)
res.append(results)
return res
def infer_all_skl(X_all, X_comp, y_all, y_comp):
res=[]
results = skl_cv_quad(X_all, y_all, gene_comp=comp_ind, kf=10)
res.append(results)
results = skl_cv_quad(X_all, y_comp, gene_comp=comp_ind, kf=10)
res.append(results)
results = skl_cv_quad(X_comp, y_comp, gene_comp=[], kf=10)
res.append(results)
return res
# + [markdown] hidden=true
# ## Run & Save
# + hidden=true
# LAD_quad = infer_all_LAD(all_init, comp_init, all_diff, comp_diff)
# skl_quad = infer_all_skl(all_init, comp_init, all_diff, comp_diff)
# + hidden=true
# with open('./pickles/LAD_quad.pkl', 'wb') as f:
# pickle.dump(LAD_quad, f)
# with open('./pickles/skl_quad.pkl', 'wb') as f:
# pickle.dump(skl_quad, f)
# + code_folding=[] hidden=true
with open('./pickles/LAD_quad.pkl', 'rb') as f:
LAD_quad = pickle.load(f)
with open('./pickles/skl_quad.pkl', 'rb') as f:
skl_quad = pickle.load(f)
# + [markdown] hidden=true
# ## Results
# + [markdown] hidden=true
# ### LAD vs. Skl: L1 error and L2 error
# # + the Kolmogorov-Smirnov (KS test)
# + hidden=true
import seaborn as sns
matplotlib.rcParams.update({'font.size': 22})
plt.figure(figsize=(8,6))
sns.distplot(np.hstack(LAD_quad[0]['error_cell_l1']), label='LAD')
sns.distplot(np.hstack(skl_quad[0]['error_cell_l1']), label='skl')
plt.legend(bbox_to_anchor=(1,0.5))
plt.title('absolute error (L-1 norm)')
plt.show()
plt.figure(figsize=(8,6))
sns.distplot(np.hstack(LAD_quad[0]['error_cell_l2']), label='LAD')
sns.distplot(np.hstack(skl_quad[0]['error_cell_l2']), label='skl')
plt.legend(bbox_to_anchor=(1,0.5))
plt.title('absolute error (L-2 norm)')
plt.show()
import scipy.stats as stats
print(stats.ks_2samp(np.hstack(LAD_quad[0]['error_cell_l1']), np.hstack(skl_quad[0]['error_cell_l1'])))
print(stats.ks_2samp(np.hstack(LAD_quad[0]['error_cell_l2']), np.hstack(skl_quad[0]['error_cell_l2'])))
# + hidden=true
# fig = plt.figure(figsize=(8,12))
# ax1=fig.add_subplot(2,1,1)
# sns.distplot(np.hstack(LAD_quad[0][2]), bins=100, label='LAD')
# sns.distplot(np.hstack(skl_quad[0][3]), bins=100, label='skl')
# ax1.legend(bbox_to_anchor=(1,0.5))
# ax1.set_title('absolute error (L-1 norm)')
# ax2=fig.add_subplot(2,1,2)
# sns.distplot(np.hstack(LAD_quad[0][3]), bins=100, label='LAD')
# sns.distplot(np.hstack(skl_quad[0][2]), bins=100, label='skl')
# ax2.legend(bbox_to_anchor=(1,0.5))
# ax2.set_title('absolute error (L-2 norm)')
# # plt.show()
# plt.savefig('LAD vs. skl.pdf')
# + [markdown] hidden=true
# ### LAD vs. skl: fractional errors
# + code_folding=[] hidden=true
% matplotlib inline
matplotlib.rcParams.update({'font.size': 22})
labels=['(99,27) to 99', '(99,27) to 27', '(27,27) to 27']
for i in range(3):
plt.figure(figsize=(12,6))
if i == 0:
plt.plot(comp_ind, np.mean([LAD_quad[i]['ferror'][j][comp_ind] for j in range(10)], axis=0), 'o--', label='LAD')
plt.plot(comp_ind, np.mean([skl_quad[i]['ferror'][j][comp_ind] for j in range(10)], axis=0), 'o--', label='skl')
else:
plt.plot(comp_ind, np.mean(LAD_quad[i]['ferror'], axis=0), 'o--', label='LAD')
plt.plot(comp_ind, np.mean(skl_quad[i]['ferror'], axis=0), 'o--', label='skl')
plt.title(labels[i])
plt.legend(bbox_to_anchor=(1,0.5))
plt.xlabel('gene')
plt.ylabel('fractional error')
# plt.ylim(0,1)
plt.show()
plt.figure(figsize=(12,6))
plt.plot(range(99), np.mean(LAD_quad[0]['ferror'], axis=0), 'o--', label='LAD')
plt.plot(range(99), np.mean(skl_quad[0]['ferror'], axis=0), 'o--', label='skl')
plt.legend(bbox_to_anchor=(1,0.5))
plt.title('(99,27) to 99')
plt.xlabel('gene')
plt.ylabel('fractional error')
# plt.ylim(0,1)
plt.show()
# + [markdown] hidden=true
# ### LAD (99,27) to 27 vs. (27,27) to 27: fractional errors
# + hidden=true
% matplotlib inline
matplotlib.rcParams.update({'font.size': 28})
labels=['(99,27) to 99', '(99,27) to 27', '(27,27) to 27']
plt.figure(figsize=(12,6))
# plt.plot(comp_ind, np.mean([LAD_quad[0]['ferror'][j][comp_ind] for j in range(10)], axis=0), 'o--', label=labels[0])
plt.plot(comp_ind, np.mean(LAD_quad[1]['ferror'], axis=0), 'o--', label=labels[1])
plt.plot(comp_ind, np.mean(LAD_quad[2]['ferror'], axis=0), 'o--', label=labels[2])
plt.legend(loc='best')
plt.xlabel('gene')
plt.ylabel('fractional error')
plt.ylim(0,0.58)
# plt.ylim(0,1)
plt.show()
# plt.savefig('99vs27LAD.pdf', bbox_inches='tight')
# + hidden=true
% matplotlib inline
matplotlib.rcParams.update({'font.size': 28})
labels=['(99,27) to 99', '(99,27) to 27', '(27,27) to 27']
plt.figure(figsize=(12,6))
# plt.plot(comp_ind, np.mean([skl_quad[0]['ferror'][j][comp_ind] for j in range(10)], axis=0), 'o--', label=labels[0])
plt.plot(comp_ind, np.mean(skl_quad[1]['ferror'], axis=0), 'o--', linewidth=3, label=labels[1])
# plt.plot(comp_ind, np.mean(skl_quad[2]['ferror'], axis=0), 'o--', linewidth=3, label=labels[2])
plt.legend(bbox_to_anchor=(0.47,1))
plt.xlabel('gene')
plt.ylabel('fractional error')
plt.ylim(0,0.63)
plt.show()
# plt.savefig('99skl.pdf', bbox_inches='tight')
# + hidden=true
% matplotlib inline
matplotlib.rcParams.update({'font.size': 28})
labels=['(99,27) to 99', '(99,27) to 27', '(27,27) to 27']
plt.figure(figsize=(12,6))
# plt.plot(comp_ind, np.mean([skl_quad[0]['ferror'][j][comp_ind] for j in range(10)], axis=0), 'o--', label=labels[0])
plt.plot(comp_ind, np.mean(skl_quad[1]['ferror'], axis=0), 'o--', linewidth=3, label=labels[1])
plt.plot(comp_ind, np.mean(skl_quad[2]['ferror'], axis=0), 'o--', linewidth=3, label=labels[2])
plt.legend(bbox_to_anchor=(0.47,1))
plt.xlabel('gene')
plt.ylabel('fractional error')
plt.ylim(0,0.63)
plt.show()
# plt.savefig('99vs27skl.pdf', bbox_inches='tight')
# + [markdown] hidden=true
# ### (99,27) to 99 LAD vs. skl: $L_{1}$ and $L_{2}^{2}$
# + hidden=true
matplotlib.rcParams.update({'font.size': 22})
plt.figure(figsize=(8,6))
sns.distplot(np.hstack(LAD_quad[0]['error_cell_l1']), bins=100, label='LAD')
sns.distplot(np.hstack(skl_quad[0]['error_cell_l1']), bins=100, label='skl')
plt.legend(bbox_to_anchor=(1,0.5))
plt.xlabel('error')
plt.ylabel('probability density')
plt.title('Absolute errors')
plt.show()
plt.figure(figsize=(8,6))
sns.distplot(np.hstack(LAD_quad[0]['error_cell_l22']), label='LAD')
sns.distplot(np.hstack(skl_quad[0]['error_cell_l22']), label='skl')
plt.legend(bbox_to_anchor=(1,0.5))
plt.xlabel('error')
plt.ylabel('probability density')
plt.title('Squares of the errors')
plt.show()
import scipy.stats as stats
print(stats.ks_2samp(np.hstack(LAD_quad[0]['error_cell_l1']), np.hstack(skl_quad[0]['error_cell_l1'])))
print(stats.ks_2samp(np.hstack(LAD_quad[0]['error_cell_l22']), np.hstack(skl_quad[0]['error_cell_l22'])))
print(stats.ttest_ind(np.hstack(LAD_quad[0]['error_cell_l1']), np.hstack(skl_quad[0]['error_cell_l1']), equal_var=False))
print(stats.ttest_ind(np.hstack(LAD_quad[0]['error_cell_l22']), np.hstack(skl_quad[0]['error_cell_l22']), equal_var=False))
print(stats.mannwhitneyu(np.hstack(LAD_quad[0]['error_cell_l1']), np.hstack(skl_quad[0]['error_cell_l1'])))
print(stats.mannwhitneyu(np.hstack(LAD_quad[0]['error_cell_l22']), np.hstack(skl_quad[0]['error_cell_l22'])))
# + [markdown] hidden=true
# ### (99,27) to 27 LAD vs. skl: $L_{1}$ and $L_{2}^{2}$
# + hidden=true
matplotlib.rcParams.update({'font.size': 28})
plt.figure(figsize=(8,6))
sns.distplot(np.hstack(LAD_quad[1]['error_cell_l1']), bins=100, label='LAD')
sns.distplot(np.hstack(skl_quad[1]['error_cell_l1']), bins=100, label='skl')
plt.legend(bbox_to_anchor=(1,0.5))
plt.xlabel('error')
plt.ylabel('probability density')
plt.title('Sums of Absolute Errors')
plt.show()
# plt.savefig('(99,27)to27_L1.pdf', bbox_inches='tight')
plt.figure(figsize=(8,6))
sns.distplot(np.hstack(LAD_quad[1]['error_cell_l22']), label='LAD')
sns.distplot(np.hstack(skl_quad[1]['error_cell_l22']), label='skl')
plt.legend(bbox_to_anchor=(1,0.5))
plt.xlabel('error')
plt.ylabel('probability density')
plt.title('Sums of Squares of Errors')
plt.show()
# plt.savefig('(99,27)to27_L22.pdf', bbox_inches='tight')
import scipy.stats as stats
print(stats.ks_2samp(np.hstack(LAD_quad[1]['error_cell_l1']), np.hstack(skl_quad[1]['error_cell_l1'])))
print(stats.ks_2samp(np.hstack(LAD_quad[1]['error_cell_l22']), np.hstack(skl_quad[1]['error_cell_l22'])))
print(stats.ttest_ind(np.hstack(LAD_quad[1]['error_cell_l1']), np.hstack(skl_quad[1]['error_cell_l1']), equal_var=False))
print(stats.ttest_ind(np.hstack(LAD_quad[1]['error_cell_l22']), np.hstack(skl_quad[1]['error_cell_l22']), equal_var=False))
print(stats.mannwhitneyu(np.hstack(LAD_quad[1]['error_cell_l1']), np.hstack(skl_quad[1]['error_cell_l1']), alternative='less'))
print(stats.mannwhitneyu(np.hstack(LAD_quad[1]['error_cell_l22']), np.hstack(skl_quad[1]['error_cell_l22']), alternative='less'))
print(np.median(np.hstack(LAD_quad[1]['error_cell_l1'])), np.median(np.hstack(skl_quad[1]['error_cell_l1'])))
print(np.median(np.hstack(LAD_quad[1]['error_cell_l22'])), np.median(np.hstack(skl_quad[1]['error_cell_l22'])))
print(np.mean(np.hstack(LAD_quad[1]['error_cell_l1'])), np.mean(np.hstack(skl_quad[1]['error_cell_l1'])))
print(np.mean(np.hstack(LAD_quad[1]['error_cell_l22'])), np.mean(np.hstack(skl_quad[1]['error_cell_l22'])))
# + [markdown] hidden=true
# ### (27,27) to 27 LAD vs. skl: $L_{1}$ and $L_{2}^{2}$
# + hidden=true
matplotlib.rcParams.update({'font.size': 28})
plt.figure(figsize=(8,6))
sns.distplot(np.hstack(LAD_quad[2]['error_cell_l1']), bins=100, label='LAD', kde_kws=dict(linewidth=3))
sns.distplot(np.hstack(skl_quad[2]['error_cell_l1']), bins=100, label='skl', kde_kws=dict(linewidth=3))
plt.legend(bbox_to_anchor=(1,0.5))
plt.xlabel('error')
plt.ylabel('probability density')
plt.title('Sums of Absolute Errors')
plt.show()
# plt.savefig('(27,27)to27_L1.pdf', bbox_inches='tight')
plt.figure(figsize=(8,6))
sns.distplot(np.hstack(LAD_quad[2]['error_cell_l22']), label='LAD', kde_kws=dict(linewidth=3))
sns.distplot(np.hstack(skl_quad[2]['error_cell_l22']), label='skl', kde_kws=dict(linewidth=3))
plt.legend(bbox_to_anchor=(1,0.5))
plt.xlabel('error')
plt.ylabel('probability density')
plt.title('Sums of Squares of Errors')
plt.show()
# plt.savefig('(27,27)to27_L22.pdf', bbox_inches='tight')
import scipy.stats as stats
print(stats.ks_2samp(np.hstack(LAD_quad[2]['error_cell_l1']), np.hstack(skl_quad[2]['error_cell_l1'])))
print(stats.ks_2samp(np.hstack(LAD_quad[2]['error_cell_l22']), np.hstack(skl_quad[2]['error_cell_l22'])))
print(stats.ttest_ind(np.hstack(LAD_quad[2]['error_cell_l1']), np.hstack(skl_quad[2]['error_cell_l1']), equal_var=False))
print(stats.ttest_ind(np.hstack(LAD_quad[2]['error_cell_l22']), np.hstack(skl_quad[2]['error_cell_l22']), equal_var=False))
print(stats.mannwhitneyu(np.hstack(LAD_quad[2]['error_cell_l1']), np.hstack(skl_quad[2]['error_cell_l1']), alternative='less'))
print(stats.mannwhitneyu(np.hstack(LAD_quad[2]['error_cell_l22']), np.hstack(skl_quad[2]['error_cell_l22']), alternative='less'))
print(np.median(np.hstack(LAD_quad[1]['error_cell_l1'])), np.median(np.hstack(skl_quad[1]['error_cell_l1'])))
print(np.median(np.hstack(LAD_quad[1]['error_cell_l22'])), np.median(np.hstack(skl_quad[1]['error_cell_l22'])))
print(np.mean(np.hstack(LAD_quad[1]['error_cell_l1'])), np.mean(np.hstack(skl_quad[1]['error_cell_l1'])))
print(np.mean(np.hstack(LAD_quad[1]['error_cell_l22'])), np.mean(np.hstack(skl_quad[1]['error_cell_l22'])))
# -
# # Cell_select LAD vs. Skl
# - 10 fold crossvalidation
# - 607 (608) cells were excluded for each training, and used as testing cells
# ### Init
# +
from sklearn import linear_model
from f_data_prep import *
from f_drosophila_infer import *
from f_train import *
import seaborn as sns
# +
data_all = np.loadtxt('../data_complete.txt')
median = np.median(data_all, axis=0)
# data_all = data_all - median
all_bin = np.vsplit(data_all, 6)
all_init = np.vstack([all_bin[i] for i in range(5)])
all_diff = np.vstack([all_bin[i+1]-all_bin[i] for i in range(5)])
complete_all = ([int(x) - 1 for x in open('../indices_complete.txt','r').readline().split()])
comp_ind = list(map(int, list((np.array(complete_all)[::6]-3)/6)))
data_comp = np.copy(data_all[:, comp_ind])
comp_bin = np.vsplit(data_comp, 6)
comp_init = np.vstack([comp_bin[i] for i in range(5)])
comp_diff = np.vstack([comp_bin[i+1] - comp_bin[i] for i in range(5)])
# -
# ## Def
# +
def LAD_cv_quad_cellselect(X, y, gene_comp=comp_ind, kf=10):
quad = np.copy(X)
kfold = KFold(n_splits=kf, shuffle=False)
train_ferror_list = []
ferror_list=[]
error_list_gene = []
error1_list_cell = []
error2_list_cell = []
error22_list_cell = []
w_list = []
bias_list = []
yp_list = []
if len(gene_comp) == 0:
for i in range(X.shape[1]-1):
for j in range(i+1, X.shape[1]):
quad = np.hstack((quad, (X[:,i]*X[:,j])[:,None]))
else:
for i in range(len(comp_ind)-1):
for j in range(i+1, len(comp_ind)):
quad = np.hstack((quad, (X[:,comp_ind[i]]*X[:,comp_ind[j]])[:,None]))
for (cell_in, cell_ex) in (kfold.split(range(6078))):
te = np.hstack([cell_ex+(6078*i) for i in range(5)])
tr = np.delete(range(30390), te)
X_tr, quad_tr, y_tr = X[tr], quad[tr], y[tr]
X_te, quad_te, y_te = X[te], quad[te], y[te]
if y.shape[1] == len(gene_comp):
X_init = np.copy(X_te[:, comp_ind])
train_X_init = np.copy(X_tr[:, comp_ind])
else:
X_init = np.copy(X_te)
train_X_init = np.copy(X_tr)
w,bias = infer_LAD(quad_tr, y_tr)
w_list.append(w)
bias_list.append(bias)
train_yp = train_X_init + (bias + quad_tr.dot(w))
train_ya = train_X_init + y_tr
error = np.sum(np.abs(train_yp - train_ya), axis=0)/np.sum(np.abs(train_ya), axis=0)
train_ferror_list.append(error)
ya = X_init + y_te
yp = X_init + (bias + quad_te.dot(w))
yp_list.append(yp)
error = np.sum(np.abs(yp - ya), axis=0)/np.sum(np.abs(ya), axis=0)
ferror_list.append(error)
error = np.sum(np.abs(yp - ya), axis=0)
error_list_gene.append(error)
error = np.sum(np.abs(yp - ya), axis=1)
error1_list_cell.append(error)
error = (np.sum(np.abs(yp - ya)**2, axis=1))**(1/2)
error2_list_cell.append(error)
error = (np.sum(np.abs(yp - ya)**2, axis=1))
error22_list_cell.append(error)
dic = {
'train_ferror': train_ferror_list,
'ferror': ferror_list,
'error_gene': error_list_gene,
'error_cell_l1': error1_list_cell,
'error_cell_l2': error2_list_cell,
'error_cell_l22': error22_list_cell,
'w': w_list,
'bias': bias_list,
'y_pred': yp_list
}
return dic
def skl_cv_quad_cellselect(X, y, gene_comp=comp_ind, kf=10):
quad = np.copy(X)
kfold = KFold(n_splits=kf, shuffle=False, random_state=1)
train_ferror_list = []
ferror_list=[]
error_list_gene = []
error1_list_cell = []
error2_list_cell = []
error22_list_cell = []
coef = []
intercept = []
yp_list = []
if len(gene_comp) == 0:
for i in range(X.shape[1]-1):
for j in range(i+1, X.shape[1]):
quad = np.hstack((quad, (X[:,i]*X[:,j])[:,None]))
else:
for i in range(len(comp_ind)-1):
for j in range(i+1, len(comp_ind)):
quad = np.hstack((quad, (X[:,comp_ind[i]]*X[:,comp_ind[j]])[:,None]))
for (cell_in, cell_ex) in (kfold.split(range(6078))):
te = np.hstack([cell_ex+(6078*i) for i in range(5)])
tr = np.delete(range(30390), te)
X_tr, quad_tr, y_tr = X[tr], quad[tr], y[tr]
X_te, quad_te, y_te = X[te], quad[te], y[te]
if y.shape[1] == len(gene_comp):
X_init = np.copy(X_te[:, comp_ind])
train_X_init = np.copy(X_tr[:, comp_ind])
else:
X_init = np.copy(X_te)
train_X_init = np.copy(X_tr)
regr = linear_model.LinearRegression()
regr.fit(quad_tr, y_tr)
coef.append(regr.coef_)
intercept.append(regr.intercept_)
train_ya = train_X_init + y_tr
train_yp = train_X_init + regr.predict(quad_tr)
error = (np.sum(np.abs(train_yp - train_ya)**2, axis=0)/np.sum(np.abs(train_ya)**2, axis=0))**(1/2)
train_ferror_list.append(error)
ya = X_init + y_te
yp = X_init + regr.predict(quad_te)
yp_list.append(yp)
error = (np.sum(np.abs(yp - ya)**2, axis=0)/np.sum(np.abs(ya)**2, axis=0))**(1/2)
ferror_list.append(error)
error = (np.sum(np.abs(yp - ya)**2, axis=0))**(1/2)
error_list_gene.append(error)
error = np.sum(np.abs(yp - ya), axis=1)
error1_list_cell.append(error)
error = (np.sum(np.abs(yp - ya)**2, axis=1))**(1/2)
error2_list_cell.append(error)
error = (np.sum(np.abs(yp - ya)**2, axis=1))
error22_list_cell.append(error)
dic = {
'train_ferror': train_ferror_list,
'ferror': ferror_list,
'error_gene': error_list_gene,
'error_cell_l1': error1_list_cell,
'error_cell_l2': error2_list_cell,
'error_cell_l22': error22_list_cell,
'w': coef,
'bias': intercept,
'y_pred': yp_list
}
return dic
def infer_all_LAD(X_all, X_comp, y_all, y_comp):
res=[]
results = LAD_cv_quad_cellselect(X_all, y_all, gene_comp=comp_ind, kf=10)
res.append(results)
results = LAD_cv_quad_cellselect(X_all, y_comp, gene_comp=comp_ind, kf=10)
res.append(results)
results = LAD_cv_quad_cellselect(X_comp, y_comp, gene_comp=[], kf=10)
res.append(results)
return res
def infer_all_skl(X_all, X_comp, y_all, y_comp):
res=[]
results = skl_cv_quad_cellselect(X_all, y_all, gene_comp=comp_ind, kf=10)
res.append(results)
results = skl_cv_quad_cellselect(X_all, y_comp, gene_comp=comp_ind, kf=10)
res.append(results)
results = skl_cv_quad_cellselect(X_comp, y_comp, gene_comp=[], kf=10)
res.append(results)
return res
# -
# ## Run & Save
# +
# LAD_quad_cellselect = infer_all_LAD(all_init, comp_init, all_diff, comp_diff)
# skl_quad_cellselect = infer_all_skl(all_init, comp_init, all_diff, comp_diff)
# +
# with open('./pickles/LAD_quad_cellselect.pkl', 'wb') as f:
# pickle.dump(LAD_quad_cellselect, f)
# with open('./pickles/skl_quad_cellselect.pkl', 'wb') as f:
# pickle.dump(skl_quad_cellselect, f)
# +
with open('./pickles/LAD_quad_cellselect.pkl', 'rb') as f:
LAD_quad = pickle.load(f)
with open('./pickles/skl_quad_cellselect.pkl', 'rb') as f:
skl_quad = pickle.load(f)
# -
# ## Results
matplotlib.rcParams.update({'font.size': 22})
labels=['(99,27) to 99', '(99,27) to 27', '(27,27) to 27']
plt.figure(figsize=(12,6))
plt.plot(comp_ind, np.mean(LAD_quad[1]['ferror'], axis=0), 'o--', label=labels[1])
plt.plot(comp_ind, np.mean(LAD_quad[2]['ferror'], axis=0), 'o--', label=labels[2])
plt.legend(loc='best')
plt.xlabel('Gene')
plt.ylabel('Fractional error')
plt.show()
# ### (99,27) to 99 LAD vs. skl: $L_{1}$ and $L_{2}^{2}$
# +
matplotlib.rcParams.update({'font.size': 22})
plt.figure(figsize=(8,6))
sns.distplot(np.hstack(LAD_quad[0]['error_cell_l1']), bins=100, label='LAD')
sns.distplot(np.hstack(skl_quad[0]['error_cell_l1']), bins=100, label='skl')
plt.legend(bbox_to_anchor=(1,0.5))
plt.xlabel('error')
plt.ylabel('probability density')
plt.title('Absolute errors')
plt.show()
plt.figure(figsize=(8,6))
sns.distplot(np.hstack(LAD_quad[0]['error_cell_l22']), label='LAD')
sns.distplot(np.hstack(skl_quad[0]['error_cell_l22']), label='skl')
plt.legend(bbox_to_anchor=(1,0.5))
plt.xlabel('error')
plt.ylabel('probability density')
plt.title('Squares of the errors')
plt.show()
import scipy.stats as stats
print(stats.ks_2samp(np.hstack(LAD_quad[0]['error_cell_l1']), np.hstack(skl_quad[0]['error_cell_l1'])))
print(stats.ks_2samp(np.hstack(LAD_quad[0]['error_cell_l22']), np.hstack(skl_quad[0]['error_cell_l22'])))
print(stats.ttest_ind(np.hstack(LAD_quad[0]['error_cell_l1']), np.hstack(skl_quad[0]['error_cell_l1']), equal_var=False))
print(stats.ttest_ind(np.hstack(LAD_quad[0]['error_cell_l22']), np.hstack(skl_quad[0]['error_cell_l22']), equal_var=False))
print(stats.mannwhitneyu(np.hstack(LAD_quad[0]['error_cell_l1']), np.hstack(skl_quad[0]['error_cell_l1'])))
print(stats.mannwhitneyu(np.hstack(LAD_quad[0]['error_cell_l22']), np.hstack(skl_quad[0]['error_cell_l22'])))
# -
# ### (99,27) to 27, LAD vs. skl: $L_{1}$ and $L_{2}^{2}$
# +
matplotlib.rcParams.update({'font.size': 22})
plt.figure(figsize=(8,6))
sns.distplot(np.hstack(LAD_quad[1]['error_cell_l1']), bins=100, label='LAD')
sns.distplot(np.hstack(skl_quad[1]['error_cell_l1']), bins=100, label='skl')
plt.legend(bbox_to_anchor=(1,0.5))
plt.xlabel('error')
plt.ylabel('probability density')
plt.title('Absolute errors')
plt.show()
plt.figure(figsize=(8,6))
sns.distplot(np.hstack(LAD_quad[1]['error_cell_l22']), label='LAD')
sns.distplot(np.hstack(skl_quad[1]['error_cell_l22']), label='skl')
plt.legend(bbox_to_anchor=(1,0.5))
plt.xlabel('error')
plt.ylabel('probability density')
plt.title('Squares of the errors')
plt.show()
import scipy.stats as stats
print(stats.ks_2samp(np.hstack(LAD_quad[1]['error_cell_l1']), np.hstack(skl_quad[1]['error_cell_l1'])))
print(stats.ks_2samp(np.hstack(LAD_quad[1]['error_cell_l22']), np.hstack(skl_quad[1]['error_cell_l22'])))
print(stats.ttest_ind(np.hstack(LAD_quad[1]['error_cell_l1']), np.hstack(skl_quad[1]['error_cell_l1']), equal_var=False))
print(stats.ttest_ind(np.hstack(LAD_quad[1]['error_cell_l22']), np.hstack(skl_quad[1]['error_cell_l22']), equal_var=False))
print(stats.mannwhitneyu(np.hstack(LAD_quad[1]['error_cell_l1']), np.hstack(skl_quad[1]['error_cell_l1'])))
print(stats.mannwhitneyu(np.hstack(LAD_quad[1]['error_cell_l22']), np.hstack(skl_quad[1]['error_cell_l22'])))
# -
# ### (27,27) to 27 LAD vs. skl: $L_{1}$ and $L_{2}^{2}$
# +
matplotlib.rcParams.update({'font.size': 24})
plt.figure(figsize=(8,6))
sns.distplot(np.hstack(LAD_quad[2]['error_cell_l1']), bins=100, label='LAD')
sns.distplot(np.hstack(skl_quad[2]['error_cell_l1']), bins=100, label='skl')
plt.legend(bbox_to_anchor=(1,0.5))
plt.xlabel('error')
plt.ylabel('probability density')
plt.title('Absolute errors')
plt.show()
plt.figure(figsize=(8,6))
sns.distplot(np.hstack(LAD_quad[2]['error_cell_l22']), label='LAD')
sns.distplot(np.hstack(skl_quad[2]['error_cell_l22']), label='skl')
plt.legend(bbox_to_anchor=(1,0.5))
plt.xlabel('error')
plt.ylabel('probability density')
plt.title('Squares of the errors')
plt.show()
import scipy.stats as stats
print(stats.ks_2samp(np.hstack(LAD_quad[2]['error_cell_l1']), np.hstack(skl_quad[2]['error_cell_l1'])))
print(stats.ks_2samp(np.hstack(LAD_quad[2]['error_cell_l22']), np.hstack(skl_quad[2]['error_cell_l22'])))
print(stats.ttest_ind(np.hstack(LAD_quad[2]['error_cell_l1']), np.hstack(skl_quad[2]['error_cell_l1']), equal_var=False))
print(stats.ttest_ind(np.hstack(LAD_quad[2]['error_cell_l22']), np.hstack(skl_quad[2]['error_cell_l22']), equal_var=False))
print(stats.mannwhitneyu(np.hstack(LAD_quad[2]['error_cell_l1']), np.hstack(skl_quad[2]['error_cell_l1'])))
print(stats.mannwhitneyu(np.hstack(LAD_quad[2]['error_cell_l22']), np.hstack(skl_quad[2]['error_cell_l22'])))
# -
# ### for .pdf
# - (99,27) to 99 LAD vs. skl: $L_{1}$ and $L_{2}^{2}$
# +
# matplotlib.rcParams.update({'font.size': 22})
# fig = plt.figure(figsize=(10,6))
# ax1=fig.add_subplot(1,2,1)
# sns.distplot(np.hstack(LAD_quad[0]['error_cell_l1']), bins=100, label='LAD')
# sns.distplot(np.hstack(skl_quad[0]['error_cell_l1']), bins=100, label='skl')
# ax1.legend(bbox_to_anchor=(1,1))
# ax1.set_title('L1 norm of the errors')
# ax2=fig.add_subplot(1,2,2)
# sns.distplot(np.hstack(LAD_quad[0]['error_cell_l22']), label='LAD')
# sns.distplot(np.hstack(skl_quad[0]['error_cell_l22']), label='skl')
# ax2.legend(bbox_to_anchor=(1,1))
# ax2.set_title('L2 norm of the errors')
# # plt.show()
# plt.savefig('LAD vs. skl cell_ex.pdf', bbox_inches='tight')
# -
# ### LAD (99,27) to 27 vs. (27,27) to 27: $L_{1}$
matplotlib.rcParams.update({'font.size': 22})
plt.figure(figsize=(8,6))
sns.distplot(np.hstack(LAD_quad[1]['error_cell_l1']), label='LAD, (99,27) to 27')
sns.distplot(np.hstack(LAD_quad[2]['error_cell_l1']), label='LAD, (27,27) to 27')
plt.legend(bbox_to_anchor=(1,0.5))
plt.title('absolute error (L-2 norm)')
plt.show()
# ### skl (99,27) to 27 vs. (27,27) to 27: $L_{1}$
matplotlib.rcParams.update({'font.size': 22})
plt.figure(figsize=(8,6))
sns.distplot(np.hstack(skl_quad[1]['error_cell_l1']), label='skl, (99,27) to 27')
sns.distplot(np.hstack(skl_quad[2]['error_cell_l1']), label='skl, (27,27) to 27')
plt.legend(bbox_to_anchor=(1,0.5))
plt.title('absolute error (L-2 norm)')
plt.show()
| LAD vs. skl cellselect +quad.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Put these at the top of every notebook, to get automatic reloading and inline plotting
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
from fastai.conv_learner import *
PATH = "data/cifar10/"
os.makedirs(PATH,exist_ok=True)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
stats = (np.array([ 0.4914 , 0.48216, 0.44653]), np.array([ 0.24703, 0.24349, 0.26159]))
def get_data(sz,bs):
tfms = tfms_from_stats(stats, sz, aug_tfms=[RandomFlip()], pad=sz//8)
return ImageClassifierData.from_paths(PATH, val_name='test', tfms=tfms, bs=bs)
bs=128
data = get_data(32,4)
x,y=next(iter(data.trn_dl))
plt.imshow(data.trn_ds.denorm(x)[0]);
plt.imshow(data.trn_ds.denorm(x)[1]);
# +
from fastai.models.cifar10.resnext import resnext29_8_64
m = resnext29_8_64()
bm = BasicModel(m.cuda(), name='cifar10_rn29_8_64')
# -
data = get_data(8,bs*4)
learn = ConvLearner(data, bm)
learn.unfreeze()
lr=1e-2; wd=5e-4
learn.lr_find()
learn.sched.plot()
# %time learn.fit(lr, 1)
learn.fit(lr, 2, cycle_len=1)
learn.fit(lr, 3, cycle_len=1, cycle_mult=2, wds=wd)
learn.save('8x8_8')
learn.load('8x8_8')
learn.set_data(get_data(16,bs*2))
# %time learn.fit(1e-3, 1, wds=wd)
learn.unfreeze()
learn.lr_find()
learn.sched.plot()
lr=1e-2
learn.fit(lr, 2, cycle_len=1, wds=wd)
learn.fit(lr, 3, cycle_len=1, cycle_mult=2, wds=wd)
learn.save('16x16_8')
learn.load('16x16_8')
| courses/nik/VisualizeConvNet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/csaybar/EEwPython/blob/master/dnn_demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="osShnOZVo8Bw"
# <!--COURSE_INFORMATION-->
# <img align="left" style="padding-right:10px;" src="https://sitejerk.com/images/google-earth-logo-png-5.png" width=5% >
# <img align="right" style="padding-left:10px;" src="https://colab.research.google.com/img/colab_favicon_256px.png" width=6% >
#
#
# >> *This notebook is part of the free course [EEwPython](https://colab.research.google.com/github/csaybar/EEwPython/blob/master/index.ipynb); the content is available [on GitHub](https://github.com/csaybar/EEwPython)* and released under the [Apache 2.0 License](https://www.gnu.org/licenses/gpl-3.0.en.html).
# + [markdown] colab_type="text" id="fKwh6Dqxo_Ll"
# <!--NAVIGATION-->
# < [Climate Change - CMIP5](cmip5.ipynb) | [Contents](index.ipynb) | [Crop Area Prediction Using a Convolutional Neural Network](CNN_demo.ipynb)>
#
# <a href="https://colab.research.google.com/github/csaybar/EEwPython/blob/master/dnn_demo.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
# + [markdown] colab_type="text" id="Su_tIldcds-V"
# <center>
# <h1>Google Earth Engine with Python </h1>
# <h2> Integrating Earth Engine with Tensorflow I - DNN </h2>
# </center>
#
# This notebook has been inspired by the [Chris Brown & <NAME> EarthEngine + Tensorflow presentation](https://www.youtube.com/watch?v=w-1xfF0IaeU). It shows the step by step how to integrate Google Earth Engine and TensorFlow 2.0 in the same pipeline (EE->Tensorflow->EE).
#
# ```
# FILE: dnn_demo.ipynb
# AUTHOR: <NAME>
# EMAIL: <EMAIL>
# ORGANIZATION: n/a
# MODIFIED BY: n/a
# CREATION DATE: 23 May. 2019
# LAST MOD DATE: N/A
# DEPENDENCIES: earthengine-api, os, pandas, urllib, plotly, numpy, folium
# PURPOSE: Introduction to tensorflow & GEE integration
# ```
#
# <center>
# <img src="https://raw.githubusercontent.com/csaybar/EEwPython/master/images/colab_ee_integration.png">
# </center>
#
# ## Topics
#
# 1. Create a training/testing dataset (in a TFRecord format) using Earth Engine.
# 2. Create functions for parse data (TFRecord -> tf.data.Dataset).
# 3. Training and Test a simple Vanilla Deep Neural Network using tensorflow 2.0.
# 3. Making predictions on image data exported from Earth Engine in TFRecord format.
# 4. Upload your results to Earth Engine (asset).
#
#
#
#
# ## 1. Introduction
#
# Deep learning has dramatically improved the state-of-the-art in various science domains. For remote sensing, its potential has not been thoroughly explored. This could be related to the problematic incorporation of spectral & spatial features into a regular deep learning classification scheme or the huge pre-processing that satellite images could need it. Hence, this post aims to teach you how to create a painless deep learning workflow integrating [Google Earth engine](https://earthengine.google.com/) for acquiring spectral & spatial data and [tensorflow](https://www.tensorflow.org/?hl=en) for train and test the model and make predictions.
#
#
# ## 2. What is Google Earth Engine (GEE)?
#
# In a nutshell, is a platform that combines a multi-petabyte [catalog of satellite imagery](https://developers.google.com/earth-engine/datasets/) with planetary-scale analysis capabilities. There are several ways to interact with GEE:
#
# - [Explorer](https://explorer.earthengine.google.com/)
# - [Code Editor](https://code.earthengine.google.com/)
# - [Javascript wrapper library](https://github.com/google/earthengine-api/tree/master/javascript)
# - [**Python wrapper library**](https://github.com/google/earthengine-api/tree/master/python)
#
# In my opinion, the **Python wrapper library** (used in this post) is the best choice to interact with GEE for the following reasons:
#
# - Easy to share code.
# - Easy transition to a web application.
# - Possibility to integrate with ML/DL frameworks.
# - Many plotting options (folium, plotly, matplotlib, seaborn ,etc.).
#
# One more thing!, it's possible to run the Earth Engine Python API in a cloud environment for free. See the introduction of the [EEcourse](https://colab.research.google.com/github/csaybar/EEwPython/blob/dev/1_Introduction.ipynb) for more details.
#
# ## 3. Deep Neural Network (DNN)
#
# <center>
# <image src="https://www.electronicdesign.com/sites/electronicdesign.com/files/MachineLearning_WTD_Fig3.png">
# </center>
#
# A DNN is simply a neural network with more than two layers. The main steps for building a DNN are:
#
# 1. Initialize the model's parameters.
# 2. Loop:
# - Calculate current loss (forward propagation) :
# - Calculate current gradient (backward propagation)
# - Update parameters (gradient descent)
#
# The second step could be a little intimidating, but don't worry about it!. **tf.keras**, the TensorFlow's high-level API, only need that you define the forward propagation correctly and all the steps further down will make automatically. This post does not intend to introduce the algorithm, check out this [repo](https://github.com/csaybar/DLcoursera) for a from zero (numpy) implementation.
#
# ## 4. Crop Area estimation in Camana Valley (DEMO)
#
# Agriculture is part of the backbone Peruvian economy, contributing about 7.6% of the Gross Domestic Product (GDP), being more critical in rural areas where the contribution of GDP increase until 50%. In terms of people, this activity act for the primary source of income for 2.3 million families, representing 34% of Peruvian households. Despite agriculture importance in Peruvian family lives, today no exist a cropping system either at a national or regional scale that monitoring the extension, state, or crop type. Considering this problematic, in this section **you** will create a straightforward methodology to **predict the crop area** in Camana (Arequipa) Valley using a Deep Neural Network.
#
# <center>
# <img src='https://st.depositphotos.com/1171712/3974/i/950/depositphotos_39741899-stock-photo-camana-valley.jpg'>
# </center>
#
# + cellView="form" colab={} colab_type="code" id="03au8DXeuIlH"
#@title Mapdisplay: Display ee.Features and ee.Images using folium.
def Mapdisplay(center, dicc, Tiles="OpensTreetMap",zoom_start=10):
'''
:param center: Center of the map (Latitude and Longitude).
:param dicc: Earth Engine Geometries or Tiles dictionary
:param Tiles: Mapbox Bright,Mapbox Control Room,Stamen Terrain,Stamen Toner,stamenwatercolor,cartodbpositron.
:zoom_start: Initial zoom level for the map.
:return: A folium.Map object.
'''
mapViz = folium.Map(location=center,tiles=Tiles, zoom_start=zoom_start)
for k,v in dicc.items():
if ee.image.Image in [type(x) for x in v.values()]:
folium.TileLayer(
tiles = v["tile_fetcher"].url_format,
attr = 'Google Earth Engine',
overlay =True,
name = k
).add_to(mapViz)
else:
folium.GeoJson(
data = v,
name = k
).add_to(mapViz)
mapViz.add_child(folium.LayerControl())
return mapViz
# + [markdown] colab_type="text" id="4YIUU4Xzfm-N"
# ### 4.1. Installing
#
# Before coding do not forget install and load the following packages and remenber that you can communicate with the bash console prepending an ! to the code.
#
#
#
# + colab={} colab_type="code" id="nrZboinybQto"
# This tensorflow version is necessary just for tensorboard support.
# developer version, on future it maybe breaks.
# !pip install tf-nightly-2.0-preview==2.0.0.dev20190606
# !pip install earthengine-api==0.1.175 #earthengine API
# Load the TensorBoard notebook extension
# %load_ext tensorboard
# + [markdown] colab_type="text" id="uWnZhQhWgPjE"
# ### 4.2. Authentification
#
# This tutorial needs interacting with some Google services. For accomplish this task, it's necessary to authenticate (as yourself). The code below shows you how to do it.
# + [markdown] colab_type="text" id="fFixOP706rwY"
# #### Google Cloud
#
# Google Cloud Storage bucket will serve as a bridge between GEE and Colab.
#
# + colab={} colab_type="code" id="6EcG3BElgRa7"
from google.colab import auth
auth.authenticate_user()
# + [markdown] colab_type="text" id="DAuPWosHgswN"
# #### Google Earth Engine
# + colab={} colab_type="code" id="bxad71Q6gTXF"
# !earthengine authenticate
# + [markdown] colab_type="text" id="q6_5QeN_gZh0"
# ### 4.3. Initialize and testing the software setup
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 53} colab_type="code" id="bEP-G9o7gYq0" outputId="dbb2dab6-9ee2-4842-e313-f7e793be2809"
# Earth Engine Python API
import ee
ee.Initialize()
import tensorflow as tf
print('Tensorflow version: ' + tf.__version__)
import folium
print('Folium version: ' + folium.__version__)
# Define the URL format used for Earth Engine generated map tiles.
EE_TILES = 'https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}'
# + [markdown] colab_type="text" id="FAfl6ABygYVr"
# ### 4.4. Prepare the Dataset
#
# Firstly, we define our prediction area (Camana Valley) and passing to GEE. For moving a vector to GEE, you will use the `ee.Geometry.*` module. The [GeoJSON](https://geojson.org/) spec describes in detail the type of geometries supported by GEE, including `Point` (a list of coordinates in some projection), `LineString` (a list of points), `LinearRing` (a closed LineString), and `Polygon` (a list of LinearRings where the first is a shell and subsequent rings are holes). GEE also supports **MultiPoint**, **MultiLineString**, and **MultiPolygon**. The [GeoJSON](https://geojson.org/) GeometryCollection is also supported, although it has the name **MultiGeometry** within GEE.
# + colab={"base_uri": "https://localhost:8080/", "height": 750} colab_type="code" id="5dXR0HQbhVle" outputId="a66bca30-9f4a-45be-e031-2f4e113b4827"
# 2.4.1 Prediction Area
xmin,ymin,xmax,ymax = [-72.778645, -16.621663, -72.66865, -16.57553]
# Passing a rectangle (prediction area) to Earth Engine
Camana_valley = ee.Geometry.Rectangle([xmin,ymin,xmax,ymax])
center = Camana_valley.centroid().getInfo()['coordinates']
center.reverse()
Mapdisplay(center,{'Camana Valley':Camana_valley.getInfo()},zoom_start=12)
# + [markdown] colab_type="text" id="S4AEJa6sDWel"
# Next, you will read and create a visualization of the train/test dataset. I've already generated some points with the label agriculture/non-agriculture.
#
# - Train dataset (550 points):
# - 275 labeled as "agriculture"
# - 275 labeled as "non agriculture"
#
# - Test dataset (100 points):
# - 50 labeled as "agriculture"
# - 50 labeled as "non agriculture"
# + colab={"base_uri": "https://localhost:8080/", "height": 750} colab_type="code" id="cTcFXVBRQ8RN" outputId="4dc42fd1-7a84-46e6-f26a-8fa47edf0e17"
# 2.4.2 Importing the train/test dataset
train_agriculture = ee.FeatureCollection('users/csaybar/DLdemos/train_set')
test_agriculture = ee.FeatureCollection('users/csaybar/DLdemos/test_set')
# Display the train/test dataset
db_crop = train_agriculture.merge(test_agriculture)
center = db_crop.geometry().centroid().getInfo()['coordinates']
center.reverse()
dicc = {'train': train_agriculture.draw(**{'color': 'FF0000', 'strokeWidth': 5}).getMapId(),
'test' : test_agriculture.draw(**{'color': '0000FF', 'strokeWidth': 5}).getMapId(),
'CamanaValley':Camana_valley.getInfo()
}
Mapdisplay(center,dicc,zoom_start=8)
# + [markdown] colab_type="text" id="NXoIwQ4nTaxg"
# In this part, you will obtain the input data for mapping the **Camana crop area** using [Landsat 8 OLI/TIRS (L8)](https://developers.google.com/earth-engine/datasets/catalog/LANDSAT_LC08_C01_T1_SR). GEE provides L8 images with radiometric and geometry correction. Additionally, **cloud mask information** is provided by means of the bit image `pixel_qa`. The following function allows putting NA to the TOA reflectance values of clouds.
#
# + colab={} colab_type="code" id="U01lsbmchIgC"
def maskS2clouds(img):
'''
Function to mask clouds based on the pixel_qa band of Landsat 8 SR data. See:
https://developers.google.com/earth-engine/datasets/catalog/LANDSAT_LC08_C01_T1_SR
Params:
-------
- img: image input Landsat 8 SR image
Return:
-------
cloudmasked Landsat 8 image
'''
cloudShadowBitMask = (1 << 3)
cloudsBitMask = (1 << 5)
# Get the pixel QA band.
qa = img.select('pixel_qa')
# Both flags should be set to zero, indicating clear conditions.
mask = qa.bitwiseAnd(cloudShadowBitMask).eq(0)\
.And(qa.bitwiseAnd(cloudsBitMask).eq(0))
return img.updateMask(mask)
# + [markdown] colab_type="text" id="R1LIVmdsKg4E"
# Now you will filter and reduce the entire Landsat-8 dataset, considering the following:
#
# 1. Select just bands **[R, G, B, NIR]**.
#
# 2. Filter considering the cloud pixel percentage by scene (< 20%).
#
# 3. Filter considering a date (we just selecting 1 years)
#
# 4. Apply **mask2cloud** to each image.
#
# 5. Get the median of the ImageCollection.
#
# 6. Clip the image considering study area.
#
# **NOTE:** To apply a function on all the elements of specified **`ImageCollection`** or **`FeatureCollection`**, you can use the **`map()`** function.
# + colab={} colab_type="code" id="Y12K-bnbKhO9"
# 2.4.3 Prepare the satellite image (Landsat-8)
RGB_bands = ['B4','B3','B2'] #RGB
NDVI_bands = ['B5','B4'] #NIR
l8 = ee.ImageCollection("LANDSAT/LC08/C01/T1_SR")\
.filterBounds(db_crop)\
.filterDate('2018-01-01', '2018-12-31')\
.filter(ee.Filter.lt('CLOUD_COVER', 20))\
.map(maskS2clouds)\
.median()\
.multiply(0.0001)
l8_ndvi = l8.normalizedDifference(NDVI_bands).rename(['NDVI'])
l8_rgb = l8.select(RGB_bands).rename(['R','G','B'])
l8 = l8_rgb.addBands(l8_ndvi)
# + colab={"base_uri": "https://localhost:8080/", "height": 750} colab_type="code" id="a4ixJRVEVMjj" outputId="af8892f4-a095-4c2d-bb4f-59d9e91276f4"
from collections import OrderedDict
# Create a visualization with folium
visParams_l8 = {
'bands': ['R', 'G', 'B'],
'min': 0,
'max': 0.5,
'gamma': 1.4,
}
l8Mapid = l8.getMapId(visParams_l8)
dicc['Landsat8'] = l8Mapid
# Changing the order of the dictionary
key_order = ['Landsat8','CamanaValley','train','test']
dicc = OrderedDict((k, dicc[k]) for k in key_order)
Mapdisplay(center,dicc,zoom_start=8)
# + [markdown] colab_type="text" id="JAiq7cTHJQsX"
# For the collocation of the train/test datasets to each L8 grid cell value you will use the `ee.Image.sampleRegions` function. Note that geometries will be snapped to pixel centers (nearest center approximation).
#
#
#
# + colab={} colab_type="code" id="cWpTWAvvbXh0"
# 2.4.3 Extract pixels values considering train/test dataset
train_db = l8.sampleRegions(collection=train_agriculture, properties=['class'], scale=30)
test_db = l8.sampleRegions(collection=test_agriculture, properties=['class'], scale=30)
# + [markdown] colab_type="text" id="-Zuyg3ndS0_R"
# Unfortunately, you cannot use Tensorflow directly in Earth Engine. To overcome this problem, you will save the train/test dataset into a Google **Cloud Storage Bucket (GCS)** (you could use Google Drive instead), since both GEE and GCS and Tensorflow can access to them. More details about how to export data in GEE you can find them in the next [link](10_Export.ipynb) or into the [Official Exporting data guide](https://developers.google.com/earth-engine/exporting).
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="e6HzmpE7bXXz" outputId="3b262c15-3f13-4f09-c992-372eaf8810cb"
# 2.4.4 Save results in Google Cloud Storage
outputBucket = 'bag_csaybar' # Replace with your Cloud Storage bucket
# Make sure the bucket exists.
print('Found Cloud Storage bucket.' if tf.io.gfile.exists('gs://' + outputBucket)
else 'Output Cloud Storage bucket does not exist.')
trainFilePrefix = 'TrainingAgriculture_'
testFilePrefix = 'TestAgriculture_'
# Create the tasks for passing of GEE to Google storage
trainingTask = ee.batch.Export.table.toCloudStorage(
collection=train_db,
description='Training Export',
fileNamePrefix=trainFilePrefix,
bucket=outputBucket,
fileFormat='TFRecord')
testingTask = ee.batch.Export.table.toCloudStorage(
collection=test_db,
description='Testing Export',
fileNamePrefix=testFilePrefix,
bucket=outputBucket,
fileFormat='TFRecord')
trainingTask.start()
testingTask.start()
# + colab={"base_uri": "https://localhost:8080/", "height": 125} colab_type="code" id="MJRMmCaDcItW" outputId="8d8c4a57-5028-4f49-e088-66680c7139b1"
# Monitor task progress
# Code Extracted here:
# https://github.com/google/earthengine-api/blob/master/python/examples/ipynb/TF_demo1_keras.ipynb
import time
while trainingTask.active():
print('Polling for task (id: {}).'.format(trainingTask.id))
time.sleep(5)
while testingTask.active():
print('Polling for task (id: {}).'.format(testingTask.id))
time.sleep(5)
print('Done!')
# + [markdown] colab_type="text" id="BeFmmhoKdngc"
# ### 4.5. Creating a tf.data.Dataset from a TFRecord file
#
# Read data from the TFRecord file into a tf.data.Dataset. Pre-process the dataset to get it into a suitable format for input to the DNN model.
# For getting more details about `tf.data.Dataset`see the next [TFdoc](https://www.tensorflow.org/guide/premade_estimators#create_input_functions).
#
#
#
#
#
#
#
# + colab={} colab_type="code" id="8szRwsIxf7dc"
# Fullname train/test db
fileNameSuffix = 'ee_export.tfrecord.gz'
trainFilePath = 'gs://' + outputBucket + '/' + trainFilePrefix + fileNameSuffix
testFilePath = 'gs://' + outputBucket + '/' + testFilePrefix + fileNameSuffix
# + colab={} colab_type="code" id="RxN8c3C4f7LX"
def input_fn(fileNames, numEpochs=None, shuffle=True, batchSize=16):
# Read `TFRecordDatasets`
dataset = tf.data.TFRecordDataset(fileNames, compression_type='GZIP')
# Names of the features
feature_columns = {
'R': tf.io.FixedLenFeature([], dtype=tf.float32),
'G': tf.io.FixedLenFeature([], dtype=tf.float32),
'B': tf.io.FixedLenFeature([], dtype=tf.float32),
'NDVI': tf.io.FixedLenFeature([], dtype=tf.float32),
'class': tf.io.FixedLenFeature([], dtype=tf.float32)
}
# Make a parsing function
def parse(example_proto):
parsed_features = tf.io.parse_single_example(example_proto, feature_columns)
# Separate the class labels from the training features
labels = parsed_features.pop('class')
return parsed_features, tf.cast(labels, tf.int32)
# Map the function over the dataset
dataset = dataset.map(parse, num_parallel_calls=5)
if shuffle:
dataset = dataset.shuffle(buffer_size = batchSize * 10)
dataset = dataset.batch(batchSize)
dataset = dataset.repeat(numEpochs)
return dataset
# + colab={} colab_type="code" id="DU3l1Lj9ls7d"
train_dba = input_fn(trainFilePath,100,True,32)
test_dba = input_fn(testFilePath, numEpochs=1, batchSize=1, shuffle=False)
# + [markdown] colab_type="text" id="Koi4qoKSrH79"
# ### 4.6. Create a DNN model with keras
#
# Here you will create a deep neural network model with:
# - 2 layers (10x10 nodes).
# - 1 dropout layer.
# - 1 output layer.
#
# Additionally, Early Stopping, Tensorboard, and best model callback were added. A callback is a set of functions to be applied at given stages of the training procedure. You can found more details [here](https://keras.io/callbacks/).
#
# + colab={"base_uri": "https://localhost:8080/", "height": 667} colab_type="code" id="eiKMSYnhO1g3" outputId="305a4bc8-aeeb-4c22-ed17-b985ff75dfbf"
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow import feature_column
import os
import datetime
bands = ['R','G','B','NDVI']
# Create a dense `Tensor` based on given `feature_columns`.
feature_columns = [feature_column.numeric_column(x) for x in bands]
feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
# Initialize the DNN model
he_init = tf.keras.initializers.he_uniform(seed=None)
# Define the layers in the model.
model = tf.keras.Sequential([
feature_layer,
layers.Dense(10, activation='relu',kernel_initializer=he_init),
tf.keras.layers.Dropout(0.2),
layers.Dense(10, activation='relu',kernel_initializer=he_init),
layers.Dense(1, activation='sigmoid',kernel_initializer=he_init)
])
# Callbacks time
logdir = os.path.join("logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
es = EarlyStopping(monitor='val_loss', patience=10)
mcp = ModelCheckpoint(filepath='best_model.h5', monitor='val_loss', save_best_only=True)
# Compile the model with the specified loss function.
model.compile(optimizer=keras.optimizers.SGD(momentum=0.01, nesterov=True),
loss='binary_crossentropy',
metrics=['accuracy'])
# Fit the model to the training data.
model.fit(x=train_dba,
epochs=15,
steps_per_epoch=100,
callbacks=[tensorboard_callback,es,mcp],
validation_data=test_dba)
# + colab={} colab_type="code" id="es8Y2pVpYXM8"
# %tensorboard --logdir logs
# #!kill 607
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="0jJtpiYtVfwz" outputId="f5761065-802d-4646-9beb-7c8ba0842f6d"
model.load_weights("best_model.h5")
model.evaluate(x = test_dba)
# + [markdown] colab_type="text" id="l08s8WgjhIEj"
# ### 4.7. Prediction
#
# You will prepare the L8 imagery, likewise, you made it for the train/test dataset.
# + colab={} colab_type="code" id="iKe0xaMchKO_"
l8 = ee.ImageCollection("LANDSAT/LC08/C01/T1_SR")\
.filterBounds(Camana_valley)\
.filterDate('2018-01-01', '2018-12-31')\
.filter(ee.Filter.lt('CLOUD_COVER', 20))\
.map(maskS2clouds)\
.median()\
.multiply(0.0001)\
.clip(Camana_valley)
l8_ndvi = l8.normalizedDifference(NDVI_bands).rename(['NDVI'])
l8_rgb = l8.select(RGB_bands).rename(['R','G','B'])
l8 = l8_rgb.addBands(l8_ndvi)
# + [markdown] colab_type="text" id="sv4kCrptYos1"
# For export the results to the Google Cloud Storage, it's preferred defines the following `formatOptions` parameters to save memory:
#
# - **patchDimensions**: Patch dimensions tiled over the export area, covering every pixel in the bounding box exactly once (except when the patch dimensions do not evenly divide the bounding box in which case the lower and right sides are trimmed).
#
# - **compressed**: If true, compresses the .tfrecord files with gzip and appends the ".gz" suffix
#
# See all the paramerters [here](https://developers.google.com/earth-engine/exporting#configuration-parameters).
# + colab={} colab_type="code" id="unxNYJeAkT0V"
imageFilePrefix = 'CamanaValleyCrop'
# Specify patch and file dimensions.
imageExportFormatOptions = {
'patchDimensions': [64, 64],
'maxFileSize': 100000000,
'compressed': True
}
# Setup the task.
imageTask = ee.batch.Export.image.toCloudStorage(
image=l8,
description='Image Export',
fileNamePrefix=imageFilePrefix,
bucket=outputBucket,
scale=30,
fileFormat='TFRecord',
region=Camana_valley.getInfo()['coordinates'],
formatOptions=imageExportFormatOptions,
)
imageTask.start()
# + colab={"base_uri": "https://localhost:8080/", "height": 226} colab_type="code" id="JmXlRNgCl7YS" outputId="242ab342-3fcb-408f-da3b-02895ed3cd5a"
import time
while imageTask.active():
print('Polling for task (id: {}).'.format(imageTask.id))
time.sleep(5)
# + [markdown] colab_type="text" id="p3KgV33i0wvR"
# Now it's time to classify the image that was exported from GEE to GCS using Tensorflow. If the exported image is large (it not your case), it will be split into multiple TFRecord files in its destination folder. There will also be a JSON sidecar file called **"the mixer"** that describes the format and georeferencing of the image. Here we will find the image files and the mixer file, getting some info out of the mixer that will be useful during model inference.
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="Dnsh-wb8mAyT" outputId="50b7ce80-7454-4433-d301-4d8a703b0660"
# filesList = !gsutil ls 'gs://'{outputBucket}
exportFilesList = [s for s in filesList if imageFilePrefix in s]
# Get the list of image files and the JSON mixer file.
imageFilesList = []
jsonFile = None
for f in exportFilesList:
if f.endswith('.tfrecord.gz'):
imageFilesList.append(f)
elif f.endswith('.json'):
jsonFile = f
# Make sure the files are in the right order.
print(jsonFile)
# + [markdown] colab_type="text" id="n53qwrAfbpq8"
# The mixer contains metadata and georeferencing information for the exported patches, each of which is in a different file. Read the mixer to get some information needed for prediction.
# + colab={"base_uri": "https://localhost:8080/", "height": 197} colab_type="code" id="XNnZ6W-9m50c" outputId="6cbb3508-5f8d-4d4d-d161-0c7eb5152230"
import json
from pprint import pprint
# Load the contents of the mixer file to a JSON object.
# jsonText = !gsutil cat {jsonFile}
# Get a single string w/ newlines from the IPython.utils.text.SList
mixer = json.loads(jsonText.nlstr)
pprint(mixer)
# + [markdown] colab_type="text" id="PlSm0_2DdC5g"
# The next function is slightly different from the to the `input_fn` (see Section 4.5). Mainly, this is because the pixels are written into records as patches, we need to read the patches in as one big tensor (one patch for each band), then flatten them into lots of little tensors. Once the `predict_input_fn` is defined, that can handle the shape of the image data, all you need to do is feed it directly to the trained model to make predictions.
#
# + colab={} colab_type="code" id="b0DR0wlim83X"
def predict_input_fn(fileNames,patch,bands):
# You have to know the following from your export.
PATCH_WIDTH, PATCH_HEIGHT = patch
PATCH_DIMENSIONS_FLAT = [PATCH_WIDTH * PATCH_HEIGHT, 1]
# Note that the tensors are in the shape of a patch, one patch for each band.
imageColumns = [
tf.io.FixedLenFeature(shape=PATCH_DIMENSIONS_FLAT, dtype=tf.float32)
for k in bands
]
featuresDict = dict(zip(bands, imageColumns))
dataset = tf.data.TFRecordDataset(fileNames, compression_type='GZIP')
# Make a parsing function
def parse_image(example_proto):
parsed_features = tf.io.parse_single_example(example_proto, featuresDict)
return parsed_features
dataset = dataset.map(parse_image, num_parallel_calls=4)
# Break our long tensors into many littler ones
#https://stackoverflow.com/questions/50530806/using-flat-map-in-tensorflows-dataset-api
dataset = dataset.flat_map(lambda features: tf.data.Dataset.from_tensor_slices(features))
# Read in batches corresponding to patch size.
dataset = dataset.batch(PATCH_WIDTH * PATCH_HEIGHT)
return dataset
# + colab={} colab_type="code" id="WnRFSIpCfjD4"
predict_db = predict_input_fn(fileNames=imageFilesList,patch=[64,64],bands=['R', 'G', 'B', 'NDVI'])
predictions = model.predict(predict_db)
# + [markdown] colab_type="text" id="bmHyVSkSlrtY"
# Now that there's a `np.array` of probabilities in "predictions", it's time to write them back into a file. You will write directly from TensorFlow to a file in the output Cloud Storage bucket.
#
# Iterate over the list and write the probabilities in patches. Specifically, we need to write the pixels into the file as patches in the same order they came out. The records are written as serialized tf.train.Example protos. This might take a while.
# + colab={} colab_type="code" id="SdepemIOVnD-"
# Instantiate the writer.
PATCH_WIDTH , PATCH_HEIGHT = [64,64]
outputImageFile = 'gs://' + outputBucket + '/CamanaValleyCrop.TFRecord'
writer = tf.io.TFRecordWriter(outputImageFile)
# Every patch-worth of predictions we'll dump an example into the output
# file with a single feature that holds our predictions. Since our predictions
# are already in the order of the exported data, the patches we create here
# will also be in the right order.
patch = []
curPatch = 1
for prediction in predictions:
patch.append(prediction)
if (len(patch) == PATCH_WIDTH * PATCH_HEIGHT):
print('Done with patch ' + str(curPatch) + '...')
# Create an example
example = tf.train.Example(
features=tf.train.Features(
feature={
'crop_prob': tf.train.Feature(
float_list=tf.train.FloatList(
value=patch))
}
)
)
writer.write(example.SerializeToString())
patch = []
curPatch += 1
writer.close()
# + [markdown] colab_type="text" id="rkO1ubuBpWYH"
# ### 4.8 Upload the classifications to an Earth Engine asset
#
# At this stage, there should be a predictions TFRecord file sitting in the output Cloud Storage bucket. Use the gsutil command to verify that the predictions image (and associated mixer JSON) exist and have non-zero size.
#
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 53} colab_type="code" id="vlgDx1cGp0WM" outputId="e260dfaf-c1a1-4e15-c5e1-cf1405b58dc2"
# !gsutil ls -l {outputImageFile}
# + [markdown] colab_type="text" id="V_fFjHiup86J"
# Upload the image to Earth Engine directly from the Cloud Storage bucket with the [earthengine command](https://developers.google.com/earth-engine/command_line#upload). Provide both the image TFRecord file and the JSON file as arguments to earthengine upload.
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="mqB8MS6oWixT" outputId="7df05ee0-fb31-4c4a-b2dc-d35f4afd7485"
# REPLACE WITH YOUR USERNAME:
USER_NAME = 'csaybar'
outputAssetID = 'users/' + USER_NAME + '/CamanaCrop'
print('Writing to ' + outputAssetID)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="2LLazXQgWn85" outputId="a96ccdeb-02c8-4cd3-c04d-fc1fff75874a"
# Start the upload. It step might take a while.
# !earthengine upload image --asset_id={outputAssetID} {outputImageFile} {jsonFile}
# + [markdown] colab_type="text" id="ZHrFVwo7qr7Y"
# Display the Results using Folium!
# + colab={"base_uri": "https://localhost:8080/", "height": 750} colab_type="code" id="jbfPqdqLbfig" outputId="61819c3d-320a-4f1a-b970-a66f245914f6"
ProbsImage = ee.Image(outputAssetID)
predictionsImage = ee.Image(outputAssetID).gte(0.505)
dicc = {'CropProbability':ProbsImage.getMapId(),
'Crop':predictionsImage.getMapId()}
center = Camana_valley.centroid().getInfo()['coordinates']
center.reverse()
Mapdisplay(center=center,dicc=dicc,zoom_start=13)
# + [markdown] colab_type="text" id="hdKdlFI0DhuF"
# ### That's all for this time!, the next post is about semantic segmentation and Earth Engine.
| dnn_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Train
# ### Library setups and imports
import os
import numpy as np
import tensorflow as tf
import tensorflowjs as tfjs
import matplotlib.pyplot as plt
import random
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv1D, MaxPooling1D
# ### Parameters
USE_DOWNLOADED_SET = True
DOWNLOADED_SET_URL = 'https://hkinsley.com/static/downloads/bci/model_data_v2.7z'
DATA_DIR = 'data/mask-iv-openbci' if USE_DOWNLOADED_SET else 'data/personal'
LABELS = {"left": [1, 0, 0], "none": [0, 1, 0], "right": [0, 0, 1]}
INPUT_SHAPE=(-1, 8, 60)
# ### Data loading
def generate_dataset(is_validation_data):
dataset = []
path = os.path.join(DATA_DIR)
subdir = 'validation_data/' if is_validation_data else 'data/'
for label in LABELS:
label_dir = os.path.join(path, subdir, label)
for FILE in os.listdir(label_dir):
if USE_DOWNLOADED_SET:
ffts = np.load(os.path.join(DATA_DIR, subdir, label, FILE))
for fft_data in ffts:
dataset.append([fft_data[:8], LABELS[label]])
else:
fft_data = np.load(os.path.join(DATA_DIR, FILE))
dataset.append([fft_data[:8], label])
random.shuffle(dataset)
return dataset
# +
training = generate_dataset(False)
validation = generate_dataset(True)
train_x = []
train_y = []
for x, y in training:
train_x.append(x)
train_y.append(y)
val_x = []
val_y = []
for x, y in validation:
val_x.append(x)
val_y.append(y)
train_x = np.array(train_x).reshape(INPUT_SHAPE)
val_x = np.array(val_x).reshape(INPUT_SHAPE)
train_y = np.array(train_y)
val_y = np.array(val_y)
# -
# ### Neural network definition
model = Sequential([
Conv1D(32, (2), activation='relu', input_shape=train_x.shape[1:], padding='same'),
MaxPooling1D(pool_size=(2)),
Conv1D(128, (2), activation='relu', padding='same'),
MaxPooling1D(pool_size=(2)),
Conv1D(64, (4), activation='relu', padding='same'),
MaxPooling1D(pool_size=(2)),
Dropout(0.4),
Flatten(),
Dense(128, activation='relu', kernel_regularizer='l1'),
Dense(3, activation=tf.nn.softmax)
])
filepath="tmp/weights-improvement-{epoch:02d}-{val_accuracy:.2f}.hdf5"
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=False,
monitor='val_accuracy',
mode='max',
save_best_only=True,
verbose=1)
model.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
metrics=['accuracy'])
model.summary()
with tf.device('/CPU:0'):
history = model.fit(train_x, train_y, epochs=30, batch_size=1024, validation_data=(val_x, val_y), callbacks=[model_checkpoint_callback])
# +
loss = plt.figure(1)
history_loss = history.history['loss']
history_val_loss = history.history['val_loss']
epochs = range(len(history_loss))
plt.plot(epochs, history_loss, 'ko', label='Training loss')
plt.plot(epochs, history_val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
loss = plt.figure(2)
history_acc = history.history['accuracy']
history_val_acc = history.history['val_accuracy']
epochs = range(len(history_loss))
plt.plot(epochs, history_acc, 'ko', label='Training acc')
plt.plot(epochs, history_val_acc, 'b', label='Validation acc')
plt.title('Training and validation loss')
plt.legend()
plt.show()
# -
model.save('models/tf')
tfjs.converters.save_keras_model(model, 'models/tfjs')
| thought-of-direction/train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pandas as pd
import numpy as np
import matplotlib as plt
# %matplotlib inline
df = pd.read_csv('dados/IR-20170731-000000360.txt', header=None, delimiter=';', dtype='object', parse_dates=[2,8,25])
df.dtypes
# +
#Renomeando colunas
#df.columns = ['Id', '1', 'DataArquivo', '3', '4', 'ProdutoPag', 'TipoProduto', 'NumCartao','DataVenda','9', '10', '11','12','13','14', '15','16','17','18', '19','20','21', '22', '23',
# 'NomeEspetaculo','DataSessao', '26', '27','GatewayPag','CodPedido']
df.columns = ['NSR', 'NSA', 'DataArquivo', 'Loja', 'Terminal', 'Bandeira', 'TipoVenda', 'NumeroCartao', 'DataVenda', 'Pedido',
'Cupom', 'NSU', 'CodigoAutorizacao', 'TID', 'LoteAdquirente', 'DocumentoCliente', 'NomeCliente', 'QtdeParcelas', 'ValorParcela',
'ValorVenda', 'ValorAdicional1', 'ValorAdicional2', 'TipoTransacao', 'idEvento', 'dsEvento', 'dtApresentacao', 'QtdIngresso', 'Troca',
'PaymentGateway', 'PSP']
# +
#Convert para int
df['NSR'] = df['NSR'].astype(int)
df['QtdeParcelas'] = df['QtdeParcelas'].astype(int)
df['QtdIngresso'] = df['QtdIngresso'].astype(int)
# +
#Convert para decimal
def ToDecimal(string):
return float(string[:-2] + "." + string[-2:])
df['ValorAdicional1'].fillna(0)
df['ValorParcela'] = list(map(ToDecimal, df['ValorParcela']))
df['ValorVenda'] = list(map(ToDecimal, df['ValorVenda']))
df['ValorAdicional1'] = list(map(ToDecimal, df['ValorAdicional1']))
df['ValorAdicional2'] = list(map(ToDecimal, df['ValorAdicional2']))
# +
df['ValorParcela']
#df['ValorParcela'][0][:-2] + "." + df['ValorParcela'][0][-2:]
#[df['ValorParcela'][0][:-2] + "." + df['ValorParcela'][0][-2:] for x in df['ValorParcela']]
# -
#Agrupamento
df.groupby('Bandeira').count()[['NSR']]
#Filtro
print(df['PaymentGateway'].unique())
#Transformando valores null
#df[df['TipoProduto'].str.contains('CREDITO')]
df['TipoProduto'].fillna('Nao Informado', inplace=True)
#df.groupby(by='TipoProduto').count()[['Id']]
#Rascunho
df['13']
#df['13'].fillna('Não Informado', inplace=True)
df[df['TipoProduto'].str.contains('CREDITO')].head(5)
df.to_csv('dados/VendasIR_v3.csv')
df['Id'].astype(int)
df.groupby('13').count()['Id']
| Notebooks/AnaliseConciliacao.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Overview
#
# > The goal of this notebook is to show how we could create a list of Medication* resources mapped to a single medication resource type.
#
# Specifically, we'll create a `Bundle` after mapping `MedicationAdministration`, `MedicationDispense` and `MedicationRequest` to `MedicationStatement` - but we could use the same approach to map to any target type.
#
# [](https://colab.research.google.com/github/pete88b/smart-on-fhir-client-py-demo/blob/main/vulcan_medication_bundle_getting_started.ipynb)
#
# This notebook also tries to show some of the exploration and reasoning behind the choices made along the way. Feel free to skip to the "Transform per-subject bundle of medication resources" section at the end if you don't need these details (o:
#
# ## Quick Links
#
# - https://www.hl7.org/fhir/bundle.html
# - https://www.hl7.org/fhir/list.html
#
# ## Next steps
#
# Might we want to
# - create one bundle per subject from bundles containing resources for multiple subjects
# - define some kind of order of entries in the bundle
# - think about how we handle resources that fail validation
#
# ## Why are we not using `List`
#
# We need a container that allows us to include complete resources.
#
# The following JSON shows how a MedicationStatement can be part of a bundle entry;
#
# ```
# {'entry': [
# {'resource':
# {'id': '1733210',
# 'subject': {'reference': 'Patient/1732972'},
# 'resourceType': 'MedicationStatement'}}],
# 'resourceType': 'Bundle'}
# ```
#
# but `List` entries must be references;
#
# ```
# {'entry': [
# {'item': {'reference': 'MedicationStatement/1733210'}}],
# 'resourceType': 'List'}
# ```
#
# and we won't have valid references for mapped/transformed resources (i.e. for MedicationDispense mapped to MedicationStatement)
IN_COLAB = 'google.colab' in str(get_ipython())
if IN_COLAB:
# !pip install -Uqq git+https://github.com/smart-on-fhir/client-py.git
import IPython, json, requests
from datetime import datetime, timezone
from collections import Counter
from pathlib import Path
from fhirclient import client
from fhirclient.models.annotation import Annotation
from fhirclient.models.bundle import Bundle, BundleEntry
from fhirclient.models.dosage import Dosage
from fhirclient.models.fhirreference import FHIRReference
from fhirclient.models.humanname import HumanName
from fhirclient.models.medication import Medication
from fhirclient.models.medicationadministration import MedicationAdministration
from fhirclient.models.medicationdispense import MedicationDispense
from fhirclient.models.medicationrequest import MedicationRequest
from fhirclient.models.medicationstatement import MedicationStatement
from fhirclient.models.patient import Patient
from fhirclient.models.fhirdate import FHIRDate
from fhirclient.models.list import List, ListEntry
settings = {
'app_id': 'my_web_app',
'api_base': 'http://hapi.fhir.org/baseR4'
}
smart = client.FHIRClient(settings=settings)
# ### Why are we reading FHIR resources as raw JSON?
#
# Lots of the data in the test servers fails validation, making it really hard to find examples that work with the FHIR py classes.
#
# ```
# search = MedicationRequest.where(struct={})
# print(search.construct())
# resources = search.perform_resources(smart.server)
# ```
#
# the `perform_resources` call ↑ fails with validation errors ↓ and we can't access the data )o:
#
# ```
# FHIRValidationError: {root}:
# entry.1:
# resource:
# 'Non-optional property "subject" on <fhirclient.models.medicationrequest.MedicationRequest object at 0x00000281E4249790> is missing'
# 'Non-optional property "status" on <fhirclient.models.medicationrequest.MedicationRequest object at 0x00000281E4249790> is missing'
# 'Non-optional property "medication" on <fhirclient.models.medicationrequest.MedicationRequest object at 0x00000281E4249790> is missing'
# 'Non-optional property "intent" on <fhirclient.models.medicationrequest.MedicationRequest object at 0x00000281E4249790> is missing'
# ```
# +
def get_bundle_as_raw_json(api_base, resource_type, url_suffix=None):
"GET a bundle of resources of a specific type"
url=f'{api_base}/{resource_type}'
if url_suffix is not None:
url+=url_suffix
print('GET',url)
return requests.get(url).json()
def get_next_bundle_as_raw_json(json_response):
"GET the next set of results"
if len(json_response['link']) == 0: return None
url = json_response['link'][1]['url']
print('GET',url)
return requests.get(url).json()
# -
# ### Save some bundles to file
#
# this will make repeatable runs of this notebook possible if resources on the http://hapi.fhir.org/baseR4 server change
def resource_bundle_to_file(api_base, resource_type, url_suffix=None):
json_response = get_bundle_as_raw_json(api_base, resource_type, url_suffix)
file_number = 0
with open(f'data/{resource_type}_bundle_{file_number}.json','w') as f:
json.dump(json_response, f, indent=2)
# follow "next" links - this might makes lots of requests depending on how much data the server has
# json_response = get_next_bundle_as_raw_json(json_response)
# while json_response is not None:
# file_number += 1
# with open(f'data/{resource_type}_bundle_{file_count}.json','w') as f:
# json.dump(json_response, f, indent=2)
if not IN_COLAB: # remove this line if you want to save files in colab
Path('data').mkdir(exist_ok=True)
for resource_type, url_suffix in [
['Patient','?name=RWD-Vulcan'],
['MedicationRequest',None],
['MedicationDispense',None],
['MedicationAdministration',None],
['MedicationStatement',None]]:
resource_bundle_to_file(settings['api_base'], resource_type, url_suffix)
# +
def load_from_json(resource_type, file_number=0):
with open(f'data/{resource_type}_bundle_{file_number}.json') as f:
return json.load(f)
def load_bundle(resource_type, file_number=0):
return Bundle(load_from_json(resource_type, file_number))
def convert_json_bundle_to_list_of_resources(json_bundle, resource_type):
result = []
for entry in json_bundle['entry']:
result.append(resource_type(entry), False)
return result
# -
# The following cells show how the functions above work and/or fail with validation errors
# +
# convert_json_bundle_to_list_of_resources(load_from_json('MedicationRequest'), MedicationRequest)
# +
# load_from_json('MedicationRequest') # return py dict
# +
# load_bundle('MedicationRequest').entry[0].resource.as_json() # probably fails validation
# -
# ### Can we use the "RWD-Vulcan" data?
#
# It would be great if we had examples of patients with all kinds of Medication* resources but ...
#
# it looks like we have patients with MedicationStatement ↓ (i.e. no other Medication* resources)
search=Patient.where({'name': 'RWD-Vulcan'})
search = search.include('subject', MedicationStatement, reverse=True)
# search = search.include('subject', MedicationRequest, reverse=True) # -> Counter({'Patient': 10})
list_of_resources = search.perform_resources(smart.server)
Counter([r.__class__.__name__ for r in list_of_resources])
# ### Patient bundle will all medication types
#
# I created a single patient bundle with one of each medication type to help testing/demo later https://raw.githubusercontent.com/pete88b/smart-on-fhir-client-py-demo/main/patient-bundle-with-all-med-types.json
#
# This data probable doesn't make sense together as the request, dispense and administration resources are copied from different subjects.
# ### Should we create FHIR bundles with the python client?
#
# ↓ this is problematic as lots of resources fail validation ...
bundle = Bundle()
# not sure it makes sense to populate id or identifier as we're just using list as a container
bundle.type = 'collection'
bundle.timestamp = FHIRDate(datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'))
bundle.entry = []
# lots of MedicationStatement resources pass validation
bundle.entry.append(BundleEntry())
bundle.entry[-1].resource = MedicationStatement.read('1733210', smart.server)
# but ... most MedicationRequest resources fail
# for resource in MedicationRequest.where({}).perform_resources(smart.server):
# bundle.entry.append(BundleEntry())
# bundle.entry[-1].resource = resource
bundle.as_json()
# ## What would it look like if we work with py dictionaries instead ...
#
# if our taget type is the same as the source type, we could copy the entry list into the target ... but we might want to remove `fullUrl` and just keep resources
bundle = dict(resourceType='Bundle',
type='collection',
timestamp=datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
entry=[])
bundle['entry'].extend(get_bundle_as_raw_json(settings['api_base'],'MedicationRequest')['entry'])
# uncomment and run the following cell if you want to see the bundle we just created
# +
# bundle
# -
# ### Helper functions to map between different resource types
def pull_attr(resouce,attr_path):
"Pull a value from `resource` if we can find the attribute specified"
for _attr_path in attr_path.split(' OR '):
r,found=resouce,True
for _attr in _attr_path.split('.'):
if _attr not in r:
found=False; break
r=r[_attr]
if found: return r
def transform(resource,mapping):
"Pull data from `resource` to create a new instance using `mapping`"
result={}
for k in mapping:
attr=pull_attr(resource,mapping[k])
if attr is not None: result[k]=attr
return result
# ## Map medication request to medication statement
#
# The keys of `medication_request_to_medication_statement` are medication statement attributes, its values are medication request attributes.
medication_request_to_medication_statement=dict(
id='id', meta='meta', implicitRules='implicitRules', language='language', text='text', contained='contained', extension='extension', modifierExtension='modifierExtension',
identifier='identifier',
# basedOn='id', #TODO: make this a reference
partOf='partOf',
status='status',
statusReason='statusReason',
category='category',
medicationCodeableConcept='medicationCodeableConcept',
medicationReference='medicationReference',
subject='subject',
context='encounter',
# effectiveDateTime # might be better to leave this blank as we have dosage
effectivePeriod='dosageInstruction.timing', # might be better to leave this blank as we have dosage
dateAsserted='authoredOn',
informationSource='requester',
# derivedFrom='id', #TODO: make this a reference
reasonCode='reasonCode',
reasonReference='reasonReference',
note='note',
dosage='dosageInstruction'
)
def transform_medication_request_to_medication_statement(resource):
result=transform(resource, medication_request_to_medication_statement)
# TODO: use absolute URL
# if we had a bundle entry (not jsut a medication request) we could use fullUrl
result['basedOn']=result['derivedFrom']=[dict(reference=f'MedicationRequest/{resource["id"]}')]
result['resourceType']='MedicationStatement'
return result
# Read a MedicationRequest that we can test with ↓
# we can pull a request out of a bundle ...
# medication_request = get_bundle_as_raw_json(settings['api_base'],'MedicationRequest')['entry'][1]['resource']
# ... or read a request by ID
medication_request = MedicationRequest.read('1465875', smart.server).as_json()
# ### Show how `transform` works
#
# We can create a `MedicationStatement` like instance from a `MedicationRequest` by copying fields from request to statement using `medication_request_to_medication_statement`
#
# Note: the result of this basic transform will be missing `derivedFrom` etc
tfm = transform(medication_request, medication_request_to_medication_statement)
tfm['subject']
# To populate all fields, we need to transform using `transform_medication_request_to_medication_statement`
tfm = transform_medication_request_to_medication_statement(medication_request)
tfm['subject']
# We should be able to use this dictionary to create a MedicationStatement that passes validation.
#
# Note: Calling `as_json` on the `MedicationStatement` causes validation to be run
str(MedicationStatement(tfm).as_json())[:100]
# If you'd like to see the full JSON of the test medication request of the medication statement that we created, uncomment and run the following cells.
#
# Note: `tfm` and `MedicationStatement(tfm).as_json()` should show us the same content - but fields might be ordered differently.
# +
# medication_request
# +
# tfm
# MedicationStatement(tfm).as_json()
# -
# ## Convert a bundle of MedicationRequest to a bundle of MedicationStatement
#
# we need a few more helper functions ↓
# +
def new_bundle():
return dict(resourceType='Bundle',
type='collection',
timestamp=datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
entry=[])
def add_entry(bundle, resource, full_url=None):
entry = dict(resource=resource)
if full_url is not None: entry['fullUrl'] = full_url
bundle['entry'].append(entry)
# -
def pull_resources_from_bundle(bundle):
return [entry['resource'] for entry in bundle['entry']]
# now we can create a bundle of `MedicationStatement`s from a bundle of `MedicationRequest`s
bundle = new_bundle()
resources = pull_resources_from_bundle(get_bundle_as_raw_json(settings['api_base'], 'MedicationRequest'))
for r in [transform_medication_request_to_medication_statement(r) for r in resources]:
add_entry(bundle, r)
# uncomment and run the following cell if you want to see the bundle we just created
# +
# bundle
# -
# ## Map medication dispense to medication statement
medication_dispense_to_medication_statement=dict(
id='id', meta='meta', implicitRules='implicitRules', language='language', text='text', contained='contained', extension='extension', modifierExtension='modifierExtension',
identifier='identifier',
basedOn='authorizingPrescription',
partOf='partOf',
status='status',
statusReason='statusReason',
category='category',
medicationCodeableConcept='medicationCodeableConcept',
medicationReference='medicationReference',
subject='subject',
context='context',
# effectiveDateTime # might be better to leave this blank as we have dosage
effectivePeriod='dosageInstruction.timing', # might be better to leave this blank as we have dosage
# dateAsserted # pull from event history?
informationSource='performer',
# derivedFrom='id', #TODO: make this a reference
# reasonCode
# reasonReference
note='note',
dosage='dosageInstruction'
)
def transform_medication_dispense_to_medication_statement(resource):
result=transform(resource, medication_dispense_to_medication_statement)
# TODO: use absolute URL
# if we had a bundle entry (not jsut a medication request) we could use fullUrl
result['derivedFrom']=[dict(reference=f'MedicationDispense/{resource["id"]}')]
if 'basedOn' in result and not isinstance(result['basedOn'],list):
result['basedOn']=[result['basedOn']]
result['resourceType']='MedicationStatement'
return result
# ## Map medication administration dosage to dosage
medication_administration_dose_to_dose=dict(
text='text',
site='site',
route='route',
method='method',
doseQuantity='dose',
rateRatio='rateRatio',
rateQuantity='rateQuantity'
)
# ## Map medication administration to medication statement
medication_administration_to_medication_statement=dict(
id='id', meta='meta', implicitRules='implicitRules', language='language', text='text', contained='contained', extension='extension', modifierExtension='modifierExtension',
identifier='identifier',
basedOn='request',
partOf='partOf',
status='status',
statusReason='statusReason',
category='category',
medicationCodeableConcept='medicationCodeableConcept',
medicationReference='medicationReference',
subject='subject',
context='context',
effectiveDateTime='effectiveDateTime',
effectivePeriod='effectivePeriod',
# dateAsserted # pull from event history?
informationSource='performer',
# derivedFrom='id', #TODO: make this a reference
# reasonCode
reasonReference='reasonReference',
note='note',
# dosage='dosage' # need to map MedicationAdministrationDosage to Dosage
)
def transform_medication_administration_to_medication_statement(resource):
result = transform(resource, medication_administration_to_medication_statement)
if 'dosage' in result:
result['dosage'] = [transform(result['dosage'], medication_administration_dose_to_dose)]
# TODO: use absolute URL
# if we had a bundle entry (not jsut a medication request) we could use fullUrl
result['derivedFrom']=[dict(reference=f'MedicationAdministration/{resource["id"]}')]
if 'basedOn' in result and not isinstance(result['basedOn'],list):
result['basedOn']=[result['basedOn']]
result['resourceType']='MedicationStatement'
return result
# put all of the transform functions into a dictionary ↓ to make the simpify creation of the next bundle
to_medication_statement_functions = dict(
MedicationAdministration=transform_medication_administration_to_medication_statement,
MedicationDispense=transform_medication_dispense_to_medication_statement,
MedicationRequest=transform_medication_request_to_medication_statement,
MedicationStatement=lambda r: r # do nothing for statement
)
# # Create a `Bundle` after mapping `MedicationAdministration`, `MedicationDispense` and `MedicationRequest` to `MedicationStatement`
#
# The following cell
# - reads all 4 Medication* resources,
# - converts them to `MedicationStatement` and
# - puts them all in the same bundle
#
# Note: We're reading/combining medication resources for multiple subjects to
# - show that we could do this if we wanted to
# - and to explore a random-ish selection of data - to help understand the kind of resource validation errors we might have to deal with
bundle = new_bundle()
for resource_type in to_medication_statement_functions:
resources = pull_resources_from_bundle(get_bundle_as_raw_json(settings['api_base'], resource_type))
for r in [to_medication_statement_functions[resource_type](r) for r in resources]:
add_entry(bundle, r)
# uncomment and run the following cell if you want to see the bundle we just created
# +
# bundle
# -
# ## Have we made a valid bundle?
#
# We can use the python client to validate the bundle we just created with: `Bundle(bundle)` but ...
#
# we have entries with missing mandatory fields (like mesication* and subject) - the next cell removes these invalid resources from the bundle
# +
bundle['entry']=[e for e in bundle['entry'] if
'medicationCodeableConcept' in e['resource'] or
'medicationReference' in e['resource']]
bundle['entry']=[e for e in bundle['entry'] if
'subject' in e['resource']]
# +
# we can use this kind of code to find and look at invalid resources
# for i, e in enumerate(bundle['entry']):
# r = e['resource']
# if 'medicationCodeableConcept' not in r and 'medicationReference' not in r:
# print(i,r)
# -
# now that the invalid resources are removed, we should see just a few warnings
Bundle(bundle, False)
# here's the informationSource part of the resource that caused the warnings above
bundle['entry'][10]['resource']['informationSource']
# # Transform per-subject bundle of medication resources
#
# The following function
# - takes a bundle optionally containing a subject and any number of Medication* resources and
# - returns a bundle containing the same entries after transforming all Medication* resources to MedicationStatement
def transform_bundle_to_medication_statement(bundle):
# we'll transform admin, dispense and request - but leave everything else alone
transform_functions = dict(
MedicationAdministration=transform_medication_administration_to_medication_statement,
MedicationDispense=transform_medication_dispense_to_medication_statement,
MedicationRequest=transform_medication_request_to_medication_statement)
result = new_bundle()
resources = pull_resources_from_bundle(bundle)
for resource in resources:
transform_function = transform_functions.get(resource['resourceType'], None)
add_entry(result, resource if transform_function is None else transform_function(resource))
return result
# If we can query a subject and get back all medication resources, we could do something like ↓
bundle_in = get_bundle_as_raw_json(settings['api_base'], 'Patient', '?_id=1732972&_revinclude=%2A')
# bundle_in should give us a bundle containing one patient and some MedicationStatements
# so we're not really doing any transformations
transformed_bundle = transform_bundle_to_medication_statement(bundle_in)
# uncomment and run the following cell if you want to see the bundle we just created
# +
# transformed_bundle
# -
# the following example uses the patient bundle will all medication types so we will need to do some transformations
url = 'https://raw.githubusercontent.com/pete88b/smart-on-fhir-client-py-demo/main/patient-bundle-with-all-med-types.json'
bundle_in = requests.get(url).json()
# with open('patient-bundle-with-all-med-types.json') as f:
# bundle_in = json.load(f)
transformed_bundle = transform_bundle_to_medication_statement(bundle_in)
# save the bundle we just created to file
#
# Note: you can download this file via the file explorer in colab
Path('data').mkdir(exist_ok=True)
with open('data/transformed-patient-bundle-with-all-med-types.json', 'w') as f:
json.dump(transformed_bundle, f, indent=2)
# uncomment and run the following cell if you want to see the bundle we just created
transformed_bundle
| vulcan_medication_bundle_getting_started.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#      
#      
#      
#      
#      
#    
# [Home Page](../../START_HERE.ipynb)
#
#
# # Challenge - Gene Expression Classification
#
#
# ### Introduction
#
# This notebook walks through an end-to-end GPU machine learning workflow where cuDF is used for processing the data and cuML is used to train machine learning models on it.
#
# After completing this excercise, you will be able to use cuDF to load data from disk, combine tables, scale features, use one-hote encoding and even write your own GPU kernels to efficiently transform feature columns. Additionaly you will learn how to pass this data to cuML, and how to train ML models on it. The trained model is saved and it will be used for prediction.
#
# It is not required that the user is familiar with cuDF or cuML. Since our aim is to go from ETL to ML training, a detailed introduction is out of scope for this notebook. We recommend [Introduction to cuDF](../../CuDF/01-Intro_to_cuDF.ipynb) for additional information.
#
# ### Problem Statement:
# We are trying to classify patients with acute myeloid leukemia (AML) and acute lymphoblastic leukemia (ALL) using machine learning (classification) algorithms. This dataset comes from a proof-of-concept study published in 1999 by Golub et al. It showed how new cases of cancer could be classified by gene expression monitoring (via DNA microarray) and thereby provided a general approach for identifying new cancer classes and assigning tumors to known classes.
#
# Here is the dataset link: https://www.kaggle.com/crawford/gene-expression.
# ## Here is the list of exercises and modules to work on in the lab:
#
# - Convert the serial Pandas computations to CuDF operations.
# - Utilize CuML to accelerate the machine learning models.
# - Experiment with Dask to create a cluster and distribute the data and scale the operations.
#
# You will start writing code from <a href='#dask1'>here</a>, but make sure you execute the data processing blocks to understand the dataset.
#
#
# ### 1. Data Processing
#
# The first step is downloading the dataset and putting it in the data directory, for using in this tutorial. Download the dataset here, and place it in (host/data) folder. Now we will import the necessary libraries.
import numpy as np; print('NumPy Version:', np.__version__)
import pandas as pd
import sys
import sklearn; print('Scikit-Learn Version:', sklearn.__version__)
from sklearn import preprocessing
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_curve, auc
from sklearn.preprocessing import OrdinalEncoder, StandardScaler
import cudf
import cupy
# import for model building
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import mean_squared_error
from cuml.metrics.regression import r2_score
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn import linear_model
from sklearn.metrics import accuracy_score
from sklearn import model_selection, datasets
from cuml.dask.common import utils as dask_utils
from dask.distributed import Client, wait
from dask_cuda import LocalCUDACluster
import dask_cudf
from cuml.dask.ensemble import RandomForestClassifier as cumlDaskRF
from sklearn.ensemble import RandomForestClassifier as sklRF
# We'll read the dataframe into y from the csv file, view its dimensions and observe the first 5 rows of the dataframe.
# %%time
y = pd.read_csv('../../../data/actual.csv')
print(y.shape)
y.head()
# Let's convert our target variable categories to numbers.
y['cancer'].value_counts()
# Recode label to numeric
y = y.replace({'ALL':0,'AML':1})
labels = ['ALL', 'AML'] # for plotting convenience later on
# Read the training and test data provided in the challenge from the data folder. View their dimensions.
# +
# Import training data
df_train = pd.read_csv('../../../data/data_set_ALL_AML_train.csv')
print(df_train.shape)
# Import testing data
df_test = pd.read_csv('../../../data/data_set_ALL_AML_independent.csv')
print(df_test.shape)
# -
# Observe the first few rows of the train dataframe and the data format.
df_train.head()
# Observe the first few rows of the test dataframe and the data format.
df_test.head()
# As we can see, the data set has categorical values but only for the columns starting with "call". We won't use the columns having categorical values, but remove them.
# +
# Remove "call" columns from training and testing data
train_to_keep = [col for col in df_train.columns if "call" not in col]
test_to_keep = [col for col in df_test.columns if "call" not in col]
X_train_tr = df_train[train_to_keep]
X_test_tr = df_test[test_to_keep]
# -
# Rename the columns and reindex for formatting purposes and ease in reading the data.
# +
train_columns_titles = ['Gene Description', 'Gene Accession Number', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10',
'11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25',
'26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38']
X_train_tr = X_train_tr.reindex(columns=train_columns_titles)
# +
test_columns_titles = ['Gene Description', 'Gene Accession Number','39', '40', '41', '42', '43', '44', '45', '46',
'47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59',
'60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72']
X_test_tr = X_test_tr.reindex(columns=test_columns_titles)
# -
# We will take the transpose of the dataframe so that each row is a patient and each column is a gene.
# +
X_train = X_train_tr.T
X_test = X_test_tr.T
print(X_train.shape)
X_train.head()
# -
# Just clearning the data, removing extra columns and converting to numerical values.
# +
# Clean up the column names for training and testing data
X_train.columns = X_train.iloc[1]
X_train = X_train.drop(["Gene Description", "Gene Accession Number"]).apply(pd.to_numeric)
# Clean up the column names for Testing data
X_test.columns = X_test.iloc[1]
X_test = X_test.drop(["Gene Description", "Gene Accession Number"]).apply(pd.to_numeric)
print(X_train.shape)
print(X_test.shape)
X_train.head()
# -
# We have the 38 patients as rows in the training set, and the other 34 as rows in the testing set. Each of those datasets has 7129 gene expression features. But we haven't yet associated the target labels with the right patients. You will recall that all the labels are all stored in a single dataframe. Let's split the data so that the patients and labels match up across the training and testing dataframes.We are now splitting the data into train and test sets. We will subset the first 38 patient's cancer types.
# +
X_train = X_train.reset_index(drop=True)
y_train = y[y.patient <= 38].reset_index(drop=True)
# Subset the rest for testing
X_test = X_test.reset_index(drop=True)
y_test = y[y.patient > 38].reset_index(drop=True)
# -
# Generate descriptive statistics to analyse the data further.
X_train.describe()
# Clearly there is some variation in the scales across the different features. Many machine learning models work much better with data that's on the same scale, so let's create a scaled version of the dataset.
# +
X_train_fl = X_train.astype(float, 64)
X_test_fl = X_test.astype(float, 64)
# Apply the same scaling to both datasets
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train_fl)
X_test = scaler.transform(X_test_fl) # note that we transform rather than fit_transform
# -
# <a id='dask1'></a>
#
# ### 2. Conversion to CuDF Dataframe
# Convert the pandas dataframes to CuDF dataframes to carry out the further CuML tasks.
# +
#Modify the code in this cell
# %%time
X_cudf_train = cudf.DataFrame() #Pass X train dataframe here
X_cudf_test = cudf.DataFrame() #Pass X test dataframe here
y_cudf_train = cudf.DataFrame() #Pass y train dataframe here
#y_cudf_test = cudf.Series(y_test.values) #Pass y test dataframe here
# -
# ### 3. Model Building
# #### Dask Integration
#
# We will try using the Random Forests Classifier and implement using CuML and Dask.
# #### Start Dask cluster
# +
#Modify the code in this cell
# This will use all GPUs on the local host by default
cluster = LocalCUDACluster() #Set 1 thread per worker using arguments to cluster
c = Client() #Pass the cluster as an argument to Client
# Query the client for all connected workers
workers = c.has_what().keys()
n_workers = len(workers)
n_streams = 8 # Performance optimization
# -
# #### Define Parameters
#
# In addition to the number of examples, random forest fitting performance depends heavily on the number of columns in a dataset and (especially) on the maximum depth to which trees are allowed to grow. Lower `max_depth` values can greatly speed up fitting, though going too low may reduce accuracy.
# Random Forest building parameters
max_depth = 12
n_bins = 16
n_trees = 1000
# #### Distribute data to worker GPUs
X_train = X_train.astype(np.float32)
X_test = X_test.astype(np.float32)
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
# +
n_partitions = n_workers
def distribute(X, y):
# First convert to cudf (with real data, you would likely load in cuDF format to start)
X_cudf = cudf.DataFrame.from_pandas(pd.DataFrame(X))
y_cudf = cudf.Series(y)
# Partition with Dask
# In this case, each worker will train on 1/n_partitions fraction of the data
X_dask = dask_cudf.from_cudf(X_cudf, npartitions=n_partitions)
y_dask = dask_cudf.from_cudf(y_cudf, npartitions=n_partitions)
# Persist to cache the data in active memory
X_dask, y_dask = \
dask_utils.persist_across_workers(c, [X_dask, y_dask], workers=workers)
return X_dask, y_dask
# +
#Modify the code in this cell
X_train_dask, y_train_dask = distribute() #Pass train data as arguments here
X_test_dask, y_test_dask = distribute() #Pass test data as arguments here
# -
# #### Create the Scikit-learn model
#
# Since a scikit-learn equivalent to the multi-node multi-GPU K-means in cuML doesn't exist, we will use Dask-ML's implementation for comparison.
# +
# %%time
# Use all avilable CPU cores
skl_model = sklRF(max_depth=max_depth, n_estimators=n_trees, n_jobs=-1)
skl_model.fit(X_train, y_train.iloc[:,1])
# -
# #### Train the distributed cuML model
# +
#Modify the code in this cell
# %%time
cuml_model = cumlDaskRF(max_depth=max_depth, n_estimators=n_trees, n_bins=n_bins, n_streams=n_streams)
cuml_model.fit() # Pass X and y train dask data here
wait(cuml_model.rfs) # Allow asynchronous training tasks to finish
# -
# #### Predict and check accuracy
# +
#Modify the code in this cell
skl_y_pred = skl_model.predict(X_test)
cuml_y_pred = cuml_model.predict().compute().to_array() #Pass the X test dask data as argument here
# Due to randomness in the algorithm, you may see slight variation in accuracies
print("SKLearn accuracy: ", accuracy_score(y_test.iloc[:,1], skl_y_pred))
print("CuML accuracy: ", accuracy_score()) #Pass the y test dask data and predicted values from CuML model as argument here
# -
# <a id='ex4'></a><br>
#
# ### 4. CONCLUSION
# Let's compare the performance of our solution!
# | Algorithm | Implementation | Accuracy | Time | Algorithm | Implementation | Accuracy | Time |
# | ----------- | ----------- | ----------- | ----------- | ----------- | ----------- | ----------- | ----------- |
#
# Write down your observations and compare the CuML and Scikit learn scores. They should be approximately equal. We hope that you found this exercise exciting and beneficial in understanding RAPIDS better. Share your highest accuracy and try to use the unique features of RAPIDS for accelerating your data science pipelines. Don't restrict yourself to the previously explained concepts, but use the documentation to apply more models and functions and achieve the best results. Jump over to the next notebook for our sample solution.
#
# ### 5. References
#
#
#
# <p xmlns:dct="http://purl.org/dc/terms/">
# <a rel="license"
# href="http://creativecommons.org/publicdomain/zero/1.0/">
# <center><img src="http://i.creativecommons.org/p/zero/1.0/88x31.png" style="border-style: none;" alt="CC0" /></center>
# </a>
#
# </p>
#
#
# - The dataset is licensed under a CC0: Public Domain license.
#
# - Molecular Classification of Cancer: Class Discovery and Class Prediction by Gene Expression. Science 286:531-537. (1999). Published: 1999.10.14. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
#
# ## Licensing
#
# This material is released by OpenACC-Standard.org, in collaboration with NVIDIA Corporation, under the Creative Commons Attribution 4.0 International (CC BY 4.0).
#      
#      
#      
#      
#      
#    
# [Home Page](../../START_HERE.ipynb)
#
| ai/RAPIDS/English/Python/jupyter_notebook/Challenge/Gene-Expression-Classification/Challenge.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="V0cRyhp8YWl2" executionInfo={"status": "ok", "timestamp": 1621615830328, "user_tz": 240, "elapsed": 12187, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgfcDr2KHdiGwdZp-DCPyR3RvG0VfxRFktQ-0JlgQ=s64", "userId": "13669639583442617912"}} outputId="0b9549cd-37b6-4903-bc6a-aecad807d762"
# !pip install neuron
# !pip install netpyne
# !pip install inspyred
import matplotlib
# + id="QzblvwEt9Ovm" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1621615833377, "user_tz": 240, "elapsed": 205, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgfcDr2KHdiGwdZp-DCPyR3RvG0VfxRFktQ-0JlgQ=s64", "userId": "13669639583442617912"}} outputId="53bb3ac1-a128-40a6-a25c-d05037dd94b2"
# rm -r netpyne-course-2021
# + colab={"base_uri": "https://localhost:8080/"} id="2RjvKVYDyuXM" executionInfo={"status": "ok", "timestamp": 1621615835288, "user_tz": 240, "elapsed": 796, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgfcDr2KHdiGwdZp-DCPyR3RvG0VfxRFktQ-0JlgQ=s64", "userId": "13669639583442617912"}} outputId="7a689579-6381-4b97-91e4-fb31e3228bee"
# !git clone --single-branch --branch evol https://github.com/suny-downstate-medical-center/netpyne-course-2021.git
# + colab={"base_uri": "https://localhost:8080/"} id="xAt-SfNS8rQt" executionInfo={"status": "ok", "timestamp": 1621615838144, "user_tz": 240, "elapsed": 99, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgfcDr2KHdiGwdZp-DCPyR3RvG0VfxRFktQ-0JlgQ=s64", "userId": "13669639583442617912"}} outputId="8f15b253-0cdf-4a4d-add1-1e5666938673"
# cd netpyne-course-2021
# + colab={"base_uri": "https://localhost:8080/"} id="v8BUc8UtTxI2" executionInfo={"status": "ok", "timestamp": 1621615839665, "user_tz": 240, "elapsed": 168, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgfcDr2KHdiGwdZp-DCPyR3RvG0VfxRFktQ-0JlgQ=s64", "userId": "13669639583442617912"}} outputId="804d5188-e5f4-43ef-de55-481f75fa5030"
# ls
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="f0P--qg5YUT6" executionInfo={"status": "error", "timestamp": 1621615972335, "user_tz": 240, "elapsed": 131343, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgfcDr2KHdiGwdZp-DCPyR3RvG0VfxRFktQ-0JlgQ=s64", "userId": "13669639583442617912"}} outputId="e63a68dd-05ae-4c36-f46a-eaf7b6550e02"
from netpyne import specs, sim
# %matplotlib inline
from netpyne import specs
from netpyne.batch import Batch
''' Example of evolutionary algorithm optimization of a network using NetPyNE
2 examples are provided: 'simple' and 'complex'
In 'simple', 3 parameters are optimized to match target firing rates in 2 populations
In 'complex', 6 parameters are optimized to match target firing rates in 6 populations
To run use: mpiexec -np [num_cores] nrniv -mpi batchRun.py
'''
def batchEvol(networkType):
# parameters space to explore
if networkType == 'simple':
## simple net
params = specs.ODict()
params['prob'] = [0.01, 0.5]
params['weight'] = [0.001, 0.1]
params['delay'] = [1, 20]
pops = {}
pops['S'] = {'target': 5, 'width': 2, 'min': 2}
pops['M'] = {'target': 15, 'width': 2, 'min': 0.2}
elif networkType == 'complex':
# complex net
params = specs.ODict()
params['probEall'] = [0.05, 0.2] # 0.1
params['weightEall'] = [0.0025, 0.0075] #5.0
params['probIE'] = [0.2, 0.6] #0.4
params['weightIE'] = [0.0005, 0.002]
params['probLengthConst'] = [100,200]
params['stimWeight'] = [0.05, 0.2]
pops = {}
pops['E2'] = {'target': 5, 'width': 2, 'min': 1}
pops['I2'] = {'target': 10, 'width': 5, 'min': 2}
pops['E4'] = {'target': 30, 'width': 10, 'min': 1}
pops['I4'] = {'target': 10, 'width': 3, 'min': 2}
pops['E5'] = {'target': 40, 'width': 4, 'min': 1}
pops['I5'] = {'target': 25, 'width': 5, 'min': 2}
# fitness function
fitnessFuncArgs = {}
fitnessFuncArgs['pops'] = pops
fitnessFuncArgs['maxFitness'] = 1000
def fitnessFunc(simData, **kwargs):
import numpy as np
pops = kwargs['pops']
maxFitness = kwargs['maxFitness']
popFitness = [None for i in pops.items()]
popFitness = [min(np.exp( abs(v['target'] - simData['popRates'][k]) / v['width']), maxFitness)
if simData["popRates"][k]>v['min'] else maxFitness for k,v in pops.items()]
fitness = np.mean(popFitness)
popInfo = '; '.join(['%s rate=%.1f fit=%1.f'%(p,r,f) for p,r,f in zip(list(simData['popRates'].keys()), list(simData['popRates'].values()), popFitness)])
print(' '+popInfo)
return fitness
# create Batch object with paramaters to modify, and specifying files to use
b = Batch(params=params)
# Set output folder, grid method (all param combinations), and run configuration
b.batchLabel = 'simple'
b.saveFolder = './'+b.batchLabel
b.method = 'evol'
b.runCfg = {
'type': 'mpi_bulletin',#'hpc_slurm',
'script': 'init.py',
# options required only for hpc
'mpiCommand': 'mpirun',
'nodes': 1,
'coresPerNode': 2,
'allocation': 'default',
'email': '<EMAIL>',
'reservation': None,
'folder': '/home/salvadord/evol'
#'custom': 'export LD_LIBRARY_PATH="$HOME/.openmpi/lib"' # only for conda users
}
b.evolCfg = {
'evolAlgorithm': 'custom',
'fitnessFunc': fitnessFunc, # fitness expression (should read simData)
'fitnessFuncArgs': fitnessFuncArgs,
'pop_size': 6,
'num_elites': 1, # keep this number of parents for next generation if they are fitter than children
'mutation_rate': 0.4,
'crossover': 0.5,
'maximize': False, # maximize fitness function?
'max_generations': 4,
'time_sleep': 5, # wait this time before checking again if sim is completed (for each generation)
'maxiter_wait': 40, # max number of times to check if sim is completed (for each generation)
'defaultFitness': 1000 # set fitness value in case simulation time is over
}
# Run batch simulations
b.run()
# Main code
if __name__ == '__main__':
batchEvol('simple') # 'simple' or 'complex'
# + id="aij9ZSq0UnLd"
| netpyne/tutorials/netpyne-course-2021/netpyne_batch_evol.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
>>> # Find the Least Common Multiple of Two Divisors
>>> counting = True
>>> first_divisor = 24
>>> second_divisor = 36
>>> i = 1
>>> while counting:
>>> if i % first_divisor == 0 and i % second_divisor == 0:
>>> print('The Least Common Multiple of', first_divisor,
'and', second_divisor, 'is', i, '.')
>>> break
>>> i += 1
| 01_Workshop-master/Chapter01/Activity04/Activity04.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
# + active=""
# iter: 0 , chi= 36048.3 , Lambda= 0.001
# iter: 1 , chi= 30015.5 , Lambda= 699.051
# iter: 2 , chi= 13421.2 , Lambda= 1864.14
# iter: 3 , chi= 7273.96 , Lambda= 1242.76
# iter: 4 , chi= 269.255 , Lambda= 414.252
# iter: 5 , chi= 105.473 , Lambda= 138.084
# iter: 6 , chi= 100.845 , Lambda= 46.028
# iter: 7 , chi= 95.9439 , Lambda= 15.3427
# iter: 8 , chi= 92.3017 , Lambda= 5.11423
# iter: 9 , chi= 91.442 , Lambda= 1.70474
# iter: 10 , chi= 91.3963 , Lambda= 0.568247
# iter: 11 , chi= 91.3959 , Lambda= 0.378832
#
# -
plt.plot([0.001, 699.051,1864.14,1242.76,414.252,138.084,46.028,15.3427,5.11423,1.70474,0.568247,0.378832])
plt.ylabel('Lambda')
plt.show()
| Week3-LM-Optimization/Plot_Lambda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/satyajitghana/ProjektDepth/blob/master/notebooks/06_DepthModel_ModelZoo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="QjK5_PAvML5V" colab_type="text"
# # DepthModel - Model Zoo
# + id="4fkKn9GbFE_c" colab_type="code" outputId="e2c0d51a-96f9-45f2-95da-a2c57fabc942" colab={"base_uri": "https://localhost:8080/", "height": 34}
# ! pip install funcy
# + id="vlS79vV4FKIc" colab_type="code" colab={}
import torch
import torch.nn as nn
import funcy
from torch.autograd import Variable
from collections import OrderedDict
import numpy as np
def summary(model, input_size, batch_size=-1, device="cuda"):
def register_hook(module):
def hook(module, input, output):
class_name = str(module.__class__).split(".")[-1].split("'")[0]
module_idx = len(summary)
m_key = "%s-%i" % (class_name, module_idx + 1)
summary[m_key] = OrderedDict()
summary[m_key]["input_shape"] = list(input[0].size())
summary[m_key]["input_shape"][0] = batch_size
if isinstance(output, (list, tuple)):
summary[m_key]["output_shape"] = [
[-1] + list(o.size())[1:] for o in output
]
else:
summary[m_key]["output_shape"] = list(output.size())
summary[m_key]["output_shape"][0] = batch_size
params = 0
if hasattr(module, "weight") and hasattr(module.weight, "size"):
params += torch.prod(torch.LongTensor(list(module.weight.size())))
summary[m_key]["trainable"] = module.weight.requires_grad
if hasattr(module, "bias") and hasattr(module.bias, "size"):
params += torch.prod(torch.LongTensor(list(module.bias.size())))
summary[m_key]["nb_params"] = params
if (
not isinstance(module, nn.Sequential)
and not isinstance(module, nn.ModuleList)
and not (module == model)
):
hooks.append(module.register_forward_hook(hook))
device = device.lower()
assert device in [
"cuda",
"cpu",
], "Input device is not valid, please specify 'cuda' or 'cpu'"
if device == "cuda" and torch.cuda.is_available():
dtype = torch.cuda.FloatTensor
else:
dtype = torch.FloatTensor
# multiple inputs to the network
if isinstance(input_size, tuple):
input_size = [input_size]
# batch_size of 2 for batchnorm
x = [torch.rand(2, *in_size).type(dtype) for in_size in input_size]
# print(type(x[0]))
# create properties
summary = OrderedDict()
hooks = []
# register hook
model.apply(register_hook)
# make a forward pass
# print(x.shape)
model(*x)
# remove these hooks
for h in hooks:
h.remove()
print("----------------------------------------------------------------")
line_new = "{:>20} {:>25} {:>15}".format("Layer (type)", "Output Shape", "Param #")
print(line_new)
print("================================================================")
total_params = sum(p.numel() for p in model.parameters())
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
total_output = 0
for layer in summary:
# input_shape, output_shape, trainable, nb_params
line_new = "{:>20} {:>25} {:>15}".format(
layer,
str(summary[layer]["output_shape"]),
"{0:,}".format(summary[layer]["nb_params"]),
)
total_output += np.prod(list(funcy.flatten(summary[layer]["output_shape"])))
print(line_new)
# assume 4 bytes/number (float on cuda).
total_input_size = abs(sum([np.prod(input_item) for input_item in input_size]) * batch_size * 4. / (1024 ** 2.))
total_output_size = abs(2. * total_output * 4. / (1024 ** 2.)) # x2 for gradients
total_params_size = abs(total_params * 4. / (1024 ** 2.))
total_size = total_params_size + total_output_size + total_input_size
print("================================================================")
print("Total params: {0:,}".format(total_params))
print("Trainable params: {0:,}".format(trainable_params))
print("Non-trainable params: {0:,}".format(total_params - trainable_params))
print("----------------------------------------------------------------")
print("Input size (MB): %0.2f" % total_input_size)
print("Forward/backward pass size (MB): %0.2f" % total_output_size)
print("Params size (MB): %0.2f" % total_params_size)
print("Estimated Total Size (MB): %0.2f" % total_size)
print("----------------------------------------------------------------")
# return summary
# + id="3q_prQlwBq0Z" colab_type="code" outputId="d2e1b480-c269-46ec-bb8a-7eb6f3b3944e" colab={"base_uri": "https://localhost:8080/", "height": 306}
# ! nvidia-smi
# + [markdown] id="R84_NKeqMOOx" colab_type="text"
# Here we make all the different models we could use with our dataset
# + id="k8X4wrrws05q" colab_type="code" colab={}
import torch
import torch.nn as nn
from tqdm.auto import tqdm
import gc
# + id="CY0PbvvJB8I7" colab_type="code" colab={}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# + [markdown] id="aNG0GjXL2CH5" colab_type="text"
# # Custom Unet - ResNet Backbone
# + [markdown] id="lrqqVBLpXuiC" colab_type="text"
# Note: All the Resnet Blocks use the ResNetV2 Architecure, i.e. preactivated blocks
# + id="gJ520rBca7al" colab_type="code" colab={}
class ResDoubleConv(nn.Module):
'''Basic DoubleConv of a ResNetV2'''
def __init__(self, in_channels, out_channels):
super().__init__()
self.double_conv = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=False)
)
def forward(self, x):
return self.double_conv(x)
# + id="MouqhIgG8K-m" colab_type="code" outputId="fda2d4d5-b2c7-4136-ae35-e2e94083678e" colab={"base_uri": "https://localhost:8080/", "height": 340}
summary(ResDoubleConv(512, 1024).to(device), (512, 12, 12))
# + id="pkQoood2XV-L" colab_type="code" colab={}
class ResDownBlock(nn.Module):
'''Basic DownBlock of a ResNetV2'''
def __init__(self, in_channels, out_channels):
super().__init__()
self.double_conv = ResDoubleConv(in_channels, out_channels)
self.proj_layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels)
)
self.down_sample = nn.MaxPool2d(2)
def forward(self, input):
identity = self.proj_layer(input)
out = self.double_conv(input)
out = out + identity
del identity
return self.down_sample(out), out
# + id="aw_wwtDx8a-Z" colab_type="code" outputId="b17ad768-21d3-49e3-a4b0-0986cc0be885" colab={"base_uri": "https://localhost:8080/", "height": 408}
summary(ResDownBlock(256, 512).to(device), (256, 24, 24))
# + id="04MXRmtDXmz8" colab_type="code" colab={}
class ResUpBlock(nn.Module):
'''Basic UpBlock of a ResNetV2'''
def __init__(self, in_channels, out_channels):
super().__init__()
self.upsample_1 = nn.PixelShuffle(2)
self.upsample_2 = nn.PixelShuffle(2)
self.upsample_3 = nn.PixelShuffle(2)
self.upsample_4 = nn.PixelShuffle(2)
self.upscale = nn.Upsample(scale_factor=2, mode='bilinear')
self.double_conv = ResDoubleConv(in_channels, out_channels)
self.proj_layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, down_input, skip_input, decoder_input=None):
upsampled = [self.upsample_1(down_input), self.upsample_2(down_input), self.upsample_3(down_input), self.upsample_4(down_input)]
x = torch.cat(upsampled, dim=1)
x = torch.cat([x, skip_input], dim=1)
if decoder_input is not None:
x = torch.cat([x, decoder_input], dim=1)
identity = self.proj_layer(x)
out = self.double_conv(x) + identity
del identity, upsampled, x
return out
# + id="sM8oY9nP8ITK" colab_type="code" outputId="b4b426c4-4dae-4441-e11b-ab045a6c5be8" colab={"base_uri": "https://localhost:8080/", "height": 459}
summary(ResUpBlock(512 + 256, 256).to(device), [(512, 24, 24), (256, 48, 48)])
# + id="x6SKnfrTKxAJ" colab_type="code" colab={}
class ResUNet(nn.Module):
def __init__(self):
super().__init__()
# Encoder
self.res_down1 = ResDownBlock(6, 64) # H / 2 ; input = 192x192x6 ; output = 96x96x64 ; skip1 = 192x192x64
self.res_down2 = ResDownBlock(64, 128) # H / 4 ; input = 96x96x64 ; output = 48x48x128 ; skip2 = 96x96x128
self.res_down3 = ResDownBlock(128, 256) # H / 8 ; input = 48x48x128 ; output = 24x24x256 ; skip3 = 48x48x256
self.res_down4 = ResDownBlock(256, 512) # H / 16 ; input = 24x24x256 ; output = 12x12x512 ; skip4 = 24x24x512
# Bridge
self.bridge = ResDoubleConv(512, 512)
# Depth Decoder
self.d_res_up4 = ResUpBlock(512 + 512, 512) # H / 8 ; input = 24x24x1024(upscaled) 24x24x512(skip4) ; output = 24x24x512(dskip4)
self.d_res_up3 = ResUpBlock(512 + 256, 256) # H / 4 ; input = 48x48x512(upscaled) 48x48x256(skip3) ; output = 48x48x256(dskip3)
self.d_res_up2 = ResUpBlock(256 + 128, 128) # H / 2 ; input = 96x96x256(upscaled) 96x96x128(skip2) ; output = 96x96x128(dskip2)
self.d_res_up1 = ResUpBlock(128 + 64, 64) # H / 1 ; input = 192x192x128(upscaled) 192x192x64(skip1) ; output = 192x192x64(dskip1)
# Depth Output
self.depth_output = nn.Conv2d(64, 1, kernel_size=1, stride=1, bias=False) # output = 192x192x1
# Segmentation Decoder
self.s_res_up4 = ResUpBlock(512 + 512 + 512, 512) # H / 8 ; input = 24x24x1024(upscaled) 24x24x512(dskip4) 24x24x512(skip4) ; output = 24x24x512
self.s_res_up3 = ResUpBlock(512 + 256 + 256, 256) # H / 4 ; input = 48x48x512(upscaled) 48x48x256(dskip3) 48x48x256(skip3) ; output = 48x48x256
self.s_res_up2 = ResUpBlock(256 + 128 + 128, 128) # H / 2 ; input = 96x96x256(upscaled) 96x96x128(dskip2) 96x96x128(skip2) ; output = 96x96x128
self.s_res_up1 = ResUpBlock(128 + 64 + 64, 64) # H / 1 ; input = 192x192x128(upscaled) 192x192x64(dskip1) 192x192x64(skip1) ; output = 192x192x64
# Segmentation Output
self.segment_output = nn.Conv2d(64, 1, kernel_size=1, stride=1, bias=False) # output = 192x192x1
def forward(self, input):
# Encoder
rd1, skip1_out = self.res_down1(input)
rd2, skip2_out = self.res_down2(rd1)
rd3, skip3_out = self.res_down3(rd2)
rd4, skip4_out = self.res_down4(rd3)
# Bridge
bridge = self.bridge(rd4)
# Depth Decoder
dru4 = self.d_res_up4(bridge, skip4_out)
dru3 = self.d_res_up3(dru4, skip3_out)
dru2 = self.d_res_up2(dru3, skip2_out)
dru1 = self.d_res_up1(dru2, skip1_out)
d_out = self.depth_output(dru1)
# Segmentation Decoder
sru4 = self.s_res_up4(bridge, skip4_out, dru4)
sru3 = self.s_res_up3(sru4, skip3_out, dru3)
sru2 = self.s_res_up2(sru3, skip2_out, dru2)
sru1 = self.s_res_up1(sru2, skip1_out, dru1)
s_out = self.segment_output(sru1)
return d_out, s_out
# + id="htwlSExZ6rGm" colab_type="code" colab={}
model = ResUNet().to(device)
# + id="osL4kDmV7ZMn" colab_type="code" outputId="0cf61750-a4ee-4b35-df08-fd295baa49f0" colab={"base_uri": "https://localhost:8080/", "height": 1000}
summary(model, (6, 192, 192))
# + id="TpmBYafZFp_e" colab_type="code" outputId="06d2968b-166a-49fb-f652-9289c878b3ba" colab={"base_uri": "https://localhost:8080/", "height": 1000}
summary(model, (6, 192, 192))
# + [markdown] id="Mljk_p522IEB" colab_type="text"
# # Custom UNet - ResNeXt Backbone
# + [markdown] id="3nBRycl_KkEr" colab_type="text"
# The only change is the DoubleConv Backbone, everything else remains the same
# + id="GGkbRlE_Xdua" colab_type="code" colab={}
class ResDoubleConv(nn.Module):
'''Basic DoubleConv of a ResNeXt'''
def __init__(self, in_channels, out_channels):
super(ResDoubleConv, self).__init__()
cardinality = 32
widen_factor = 6
base_width = 64
width_ratio = out_channels / (widen_factor * 64.)
D = cardinality * int(base_width * width_ratio)
self.double_conv = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, D, kernel_size=1, bias=False),
nn.BatchNorm2d(D),
nn.ReLU(inplace=True),
nn.Conv2d(D, D, kernel_size=3, padding=1, groups=cardinality, bias=False),
nn.BatchNorm2d(D),
nn.ReLU(inplace=True),
nn.Conv2d(D, out_channels, kernel_size=1, bias=False)
)
def forward(self, x):
out = self.double_conv(x)
return out
# + id="yIEQ6zTzJI9R" colab_type="code" outputId="514cadf4-cc57-43d6-f9f7-82c2a67e521c" colab={"base_uri": "https://localhost:8080/", "height": 391}
summary(ResDoubleConv(512, 1024).to(device), (512, 12, 12))
# + colab_type="code" id="X14IzW7-LQ_f" colab={}
model = ResUNet().to(device)
# + colab_type="code" outputId="9a95592c-537c-45a2-f4c8-d6a4373ca4eb" id="8aB1jAFiLQ_q" colab={"base_uri": "https://localhost:8080/", "height": 1000}
summary(model, (6, 192, 192))
# + id="COZ_qjaOLTpm" colab_type="code" outputId="50b15177-455e-4fdc-914e-5453f0254876" colab={"base_uri": "https://localhost:8080/", "height": 348, "referenced_widgets": ["9f314f899dc84aa993eada03b0e42571", "b4a107cd2d874413bb430bce4c61d856", "b4b72decdee14c38b056499138511151", "b445416362c140aaa536d41b59f0a6a0", "ab6c33f78dcc44d198d2c4e273cf543f", "85a55a0e00ca4813976dbff5d9fcc508", "08ad2f6aa44746af945e434172841b93", "99aca92e6e5a47908cacaed3286b4271"]}
model = ResUNet()
model.to(device)
for i in tqdm(range(6250)):
input = torch.randn(64, 6, 96, 96)
input = input.to(device)
model(input)
del input
torch.cuda.empty_cache()
# + id="UDbRO7loQbou" colab_type="code" colab={}
gc.collect()
torch.cuda.empty_cache()
# + id="Q0mWkqrpPTlQ" colab_type="code" outputId="24963e5c-1d6a-4e6e-f3d8-7d5ab2e22232" colab={"base_uri": "https://localhost:8080/", "height": 34}
torch.cuda.memory_allocated()
# + id="hTIrvF3FG42t" colab_type="code" colab={}
import torch
import torch.nn as nn
from torchvision import models
def convrelu(in_channels, out_channels, kernel, padding):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel, padding=padding),
nn.ReLU(inplace=True),
)
class ResNetUNet(nn.Module):
def __init__(self, n_class):
super().__init__()
self.base_model = models.resnet18(pretrained=True)
self.base_layers = list(self.base_model.children())
self.layer0 = nn.Sequential(*self.base_layers[:3]) # size=(N, 64, x.H/2, x.W/2)
self.layer0_1x1 = convrelu(64, 64, 1, 0)
self.layer1 = nn.Sequential(*self.base_layers[3:5]) # size=(N, 64, x.H/4, x.W/4)
self.layer1_1x1 = convrelu(64, 64, 1, 0)
self.layer2 = self.base_layers[5] # size=(N, 128, x.H/8, x.W/8)
self.layer2_1x1 = convrelu(128, 128, 1, 0)
self.layer3 = self.base_layers[6] # size=(N, 256, x.H/16, x.W/16)
self.layer3_1x1 = convrelu(256, 256, 1, 0)
self.layer4 = self.base_layers[7] # size=(N, 512, x.H/32, x.W/32)
self.layer4_1x1 = convrelu(512, 512, 1, 0)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_up3 = convrelu(256 + 512, 512, 3, 1)
self.conv_up2 = convrelu(128 + 512, 256, 3, 1)
self.conv_up1 = convrelu(64 + 256, 256, 3, 1)
self.conv_up0 = convrelu(64 + 256, 128, 3, 1)
self.conv_original_size0 = convrelu(3, 64, 3, 1)
self.conv_original_size1 = convrelu(64, 64, 3, 1)
self.conv_original_size2 = convrelu(64 + 128, 64, 3, 1)
self.conv_last = nn.Conv2d(64, n_class, 1)
def forward(self, input):
x_original = self.conv_original_size0(input)
x_original = self.conv_original_size1(x_original)
layer0 = self.layer0(input)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
layer4 = self.layer4_1x1(layer4)
x = self.upsample(layer4)
layer3 = self.layer3_1x1(layer3)
x = torch.cat([x, layer3], dim=1)
x = self.conv_up3(x)
x = self.upsample(x)
layer2 = self.layer2_1x1(layer2)
x = torch.cat([x, layer2], dim=1)
x = self.conv_up2(x)
x = self.upsample(x)
layer1 = self.layer1_1x1(layer1)
x = torch.cat([x, layer1], dim=1)
x = self.conv_up1(x)
x = self.upsample(x)
layer0 = self.layer0_1x1(layer0)
x = torch.cat([x, layer0], dim=1)
x = self.conv_up0(x)
x = self.upsample(x)
x = torch.cat([x, x_original], dim=1)
x = self.conv_original_size2(x)
out = self.conv_last(x)
return out
# + id="NRWiCwRnG5N5" colab_type="code" outputId="f4f6bcd5-3ca1-4fa0-a96d-c6fc0a34d63d" colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["4e836802abf842ec8f09f7840e5523a8", "e8c557c5d4b34558b82a7a65c495b371", "d14b91c924064e19ad99baf90aaa8912", "e45f5706eb594834b7e449f20272c48e", "<KEY>", "<KEY>", "4e9b313612e14c1ab0412877b167e86c", "<KEY>"]}
summary(ResNetUNet(10).to(device), (3, 192, 192))
# + id="0KQl8ZmCHDHG" colab_type="code" colab={}
from __future__ import absolute_import, division, print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import importlib
class conv(nn.Module):
def __init__(self, num_in_layers, num_out_layers, kernel_size, stride):
super(conv, self).__init__()
self.kernel_size = kernel_size
self.conv_base = nn.Conv2d(num_in_layers, num_out_layers, kernel_size=kernel_size, stride=stride)
self.normalize = nn.BatchNorm2d(num_out_layers)
def forward(self, x):
p = int(np.floor((self.kernel_size-1)/2))
p2d = (p, p, p, p)
x = self.conv_base(F.pad(x, p2d))
x = self.normalize(x)
return F.elu(x, inplace=True)
class convblock(nn.Module):
def __init__(self, num_in_layers, num_out_layers, kernel_size):
super(convblock, self).__init__()
self.conv1 = conv(num_in_layers, num_out_layers, kernel_size, 1)
self.conv2 = conv(num_out_layers, num_out_layers, kernel_size, 2)
def forward(self, x):
x = self.conv1(x)
return self.conv2(x)
class maxpool(nn.Module):
def __init__(self, kernel_size):
super(maxpool, self).__init__()
self.kernel_size = kernel_size
def forward(self, x):
p = int(np.floor((self.kernel_size-1) / 2))
p2d = (p, p, p, p)
return F.max_pool2d(F.pad(x, p2d), self.kernel_size, stride=2)
class resconv(nn.Module):
def __init__(self, num_in_layers, num_out_layers, stride):
super(resconv, self).__init__()
self.num_out_layers = num_out_layers
self.stride = stride
self.conv1 = conv(num_in_layers, num_out_layers, 1, 1)
self.conv2 = conv(num_out_layers, num_out_layers, 3, stride)
self.conv3 = nn.Conv2d(num_out_layers, 4*num_out_layers, kernel_size=1, stride=1)
self.conv4 = nn.Conv2d(num_in_layers, 4*num_out_layers, kernel_size=1, stride=stride)
self.normalize = nn.BatchNorm2d(4*num_out_layers)
def forward(self, x):
# do_proj = x.size()[1] != self.num_out_layers or self.stride == 2
do_proj = True
shortcut = []
x_out = self.conv1(x)
x_out = self.conv2(x_out)
x_out = self.conv3(x_out)
if do_proj:
shortcut = self.conv4(x)
else:
shortcut = x
return F.elu(self.normalize(x_out + shortcut), inplace=True)
class resconv_basic(nn.Module):
# for resnet18
def __init__(self, num_in_layers, num_out_layers, stride):
super(resconv_basic, self).__init__()
self.num_out_layers = num_out_layers
self.stride = stride
self.conv1 = conv(num_in_layers, num_out_layers, 3, stride)
self.conv2 = conv(num_out_layers, num_out_layers, 3, 1)
self.conv3 = nn.Conv2d(num_in_layers, num_out_layers, kernel_size=1, stride=stride)
self.normalize = nn.BatchNorm2d(num_out_layers)
def forward(self, x):
# do_proj = x.size()[1] != self.num_out_layers or self.stride == 2
do_proj = True
shortcut = []
x_out = self.conv1(x)
x_out = self.conv2(x_out)
if do_proj:
shortcut = self.conv3(x)
else:
shortcut = x
return F.elu(self.normalize(x_out + shortcut), inplace=True)
def resblock(num_in_layers, num_out_layers, num_blocks, stride):
layers = []
layers.append(resconv(num_in_layers, num_out_layers, stride))
for i in range(1, num_blocks - 1):
layers.append(resconv(4 * num_out_layers, num_out_layers, 1))
layers.append(resconv(4 * num_out_layers, num_out_layers, 1))
return nn.Sequential(*layers)
def resblock_basic(num_in_layers, num_out_layers, num_blocks, stride):
layers = []
layers.append(resconv_basic(num_in_layers, num_out_layers, stride))
for i in range(1, num_blocks):
layers.append(resconv_basic(num_out_layers, num_out_layers, 1))
return nn.Sequential(*layers)
class upconv(nn.Module):
def __init__(self, num_in_layers, num_out_layers, kernel_size, scale):
super(upconv, self).__init__()
self.scale = scale
self.conv1 = conv(num_in_layers, num_out_layers, kernel_size, 1)
def forward(self, x):
x = nn.functional.interpolate(x, scale_factor=self.scale, mode='bilinear', align_corners=True)
return self.conv1(x)
class get_disp(nn.Module):
def __init__(self, num_in_layers):
super(get_disp, self).__init__()
self.conv1 = nn.Conv2d(num_in_layers, 2, kernel_size=3, stride=1)
self.normalize = nn.BatchNorm2d(2)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
p = 1
p2d = (p, p, p, p)
x = self.conv1(F.pad(x, p2d))
x = self.normalize(x)
return 0.3 * self.sigmoid(x)
class Resnet50_md(nn.Module):
def __init__(self, num_in_layers):
super(Resnet50_md, self).__init__()
# encoder
self.conv1 = conv(num_in_layers, 64, 7, 2) # H/2 - 64D
self.pool1 = maxpool(3) # H/4 - 64D
self.conv2 = resblock(64, 64, 3, 2) # H/8 - 256D
self.conv3 = resblock(256, 128, 4, 2) # H/16 - 512D
self.conv4 = resblock(512, 256, 6, 2) # H/32 - 1024D
self.conv5 = resblock(1024, 512, 3, 2) # H/64 - 2048D
# decoder
self.upconv6 = upconv(2048, 512, 3, 2)
self.iconv6 = conv(1024 + 512, 512, 3, 1)
self.upconv5 = upconv(512, 256, 3, 2)
self.iconv5 = conv(512+256, 256, 3, 1)
self.upconv4 = upconv(256, 128, 3, 2)
self.iconv4 = conv(256+128, 128, 3, 1)
self.disp4_layer = get_disp(128)
self.upconv3 = upconv(128, 64, 3, 2)
self.iconv3 = conv(64+64+2, 64, 3, 1)
self.disp3_layer = get_disp(64)
self.upconv2 = upconv(64, 32, 3, 2)
self.iconv2 = conv(32+64+2, 32, 3, 1)
self.disp2_layer = get_disp(32)
self.upconv1 = upconv(32, 16, 3, 2)
self.iconv1 = conv(16+2, 16, 3, 1)
self.disp1_layer = get_disp(16)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
def forward(self, x):
# encoder
x1 = self.conv1(x)
x_pool1 = self.pool1(x1)
x2 = self.conv2(x_pool1)
x3 = self.conv3(x2)
x4 = self.conv4(x3)
x5 = self.conv5(x4)
# skips
skip1 = x1
skip2 = x_pool1
skip3 = x2
skip4 = x3
skip5 = x4
# decoder
upconv6 = self.upconv6(x5)
concat6 = torch.cat((upconv6, skip5), 1)
iconv6 = self.iconv6(concat6)
upconv5 = self.upconv5(iconv6)
concat5 = torch.cat((upconv5, skip4), 1)
iconv5 = self.iconv5(concat5)
upconv4 = self.upconv4(iconv5)
concat4 = torch.cat((upconv4, skip3), 1)
iconv4 = self.iconv4(concat4)
self.disp4 = self.disp4_layer(iconv4)
self.udisp4 = nn.functional.interpolate(self.disp4, scale_factor=2, mode='bilinear', align_corners=True)
upconv3 = self.upconv3(iconv4)
concat3 = torch.cat((upconv3, skip2, self.udisp4), 1)
iconv3 = self.iconv3(concat3)
self.disp3 = self.disp3_layer(iconv3)
self.udisp3 = nn.functional.interpolate(self.disp3, scale_factor=2, mode='bilinear', align_corners=True)
upconv2 = self.upconv2(iconv3)
concat2 = torch.cat((upconv2, skip1, self.udisp3), 1)
iconv2 = self.iconv2(concat2)
self.disp2 = self.disp2_layer(iconv2)
self.udisp2 = nn.functional.interpolate(self.disp2, scale_factor=2, mode='bilinear', align_corners=True)
upconv1 = self.upconv1(iconv2)
concat1 = torch.cat((upconv1, self.udisp2), 1)
iconv1 = self.iconv1(concat1)
self.disp1 = self.disp1_layer(iconv1)
return self.disp1, self.disp2, self.disp3, self.disp4
# + id="32d7FCVUWsAT" colab_type="code" outputId="8abd8b66-96a3-4d32-8118-12d5d9fb1704" colab={"base_uri": "https://localhost:8080/", "height": 1000}
summary(Resnet50_md(3).to(device), (3, 192, 192))
# + id="eaMeH5iSW1DM" colab_type="code" colab={}
model = Resnet50_md(3)
model = model.to(device)
# + id="Rglqcc4se35O" colab_type="code" colab={}
model = Resnet50_md(3)
model = model.to(device)
model.train()
for i in tqdm(range(1000)):
input = torch.randn(128, 3, 192, 192)
input = input.to(device)
model(input)
del input
torch.cuda.empty_cache()
# + id="1FENShq9YJbi" colab_type="code" outputId="81e710ef-50b4-492b-ea21-00acb442759f" colab={"base_uri": "https://localhost:8080/", "height": 34}
torch.cuda.memory_allocated()
# + id="mxvC0IXbYP52" colab_type="code" outputId="33c603a0-52b4-4b89-8563-e80cc49821c7" colab={"base_uri": "https://localhost:8080/", "height": 163}
del input
# + id="GU9LVLaEYS0Y" colab_type="code" colab={}
gc.collect()
torch.cuda.empty_cache()
# + id="nznY2KSwYq7Y" colab_type="code" colab={}
import torch
import torch.nn as nn
class ResDoubleConv(nn.Module):
'''Basic DoubleConv of a ResNetV2'''
def __init__(self, in_channels, out_channels):
super(ResDoubleConv, self).__init__()
self.double_conv = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, out_channels,
kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels,
kernel_size=3, padding=1, bias=False)
)
def forward(self, x):
out = self.double_conv(x)
return out
class ResDownBlock(nn.Module):
'''Basic DownBlock of a ResNetV2'''
def __init__(self, in_channels, out_channels):
super(ResDownBlock, self).__init__()
self.double_conv = ResDoubleConv(in_channels, out_channels)
self.proj_layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels)
)
self.down_sample = nn.MaxPool2d(2)
def forward(self, input):
identity = self.proj_layer(input)
out = self.double_conv(input)
out = out + identity
return self.down_sample(out), out
class ResUpBlock(nn.Module):
'''Basic UpBlock of a ResNetV2'''
def __init__(self, in_channels, out_channels):
super(ResUpBlock, self).__init__()
self.upsample_1 = nn.PixelShuffle(2)
self.upsample_2 = nn.PixelShuffle(2)
self.upsample_3 = nn.PixelShuffle(2)
self.upsample_4 = nn.PixelShuffle(2)
self.double_conv = ResDoubleConv(in_channels, out_channels)
self.proj_layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, down_input, skip_input, decoder_input=None):
upsampled = [self.upsample_1(down_input), self.upsample_2(
down_input), self.upsample_3(down_input), self.upsample_4(down_input)]
x = torch.cat(upsampled, dim=1)
x = torch.cat([x, skip_input], dim=1)
if decoder_input is not None:
x = torch.cat([x, decoder_input], dim=1)
identity = self.proj_layer(x)
out = self.double_conv(x) + identity
return out
class ResUNet(nn.Module):
def __init__(self):
super(ResUNet, self).__init__()
# Encoder
# H / 2 ; input = 192x192x6 ; output = 96x96x64 ; skip1 = 192x192x64
self.res_down1 = ResDownBlock(6, 64)
# H / 4 ; input = 96x96x64 ; output = 48x48x128 ; skip2 = 96x96x128
self.res_down2 = ResDownBlock(64, 128)
# H / 8 ; input = 48x48x128 ; output = 24x24x256 ; skip3 = 48x48x256
self.res_down3 = ResDownBlock(128, 256)
# H / 16 ; input = 24x24x256 ; output = 12x12x512 ; skip4 = 24x24x512
self.res_down4 = ResDownBlock(256, 512)
# Bridge
self.bridge = ResDoubleConv(512, 512)
# Depth Decoder
# H / 8 ; input = 24x24x1024(upscaled) 24x24x512(skip4) ; output = 24x24x512(dskip4)
self.d_res_up4 = ResUpBlock(512 + 512, 512)
# H / 4 ; input = 48x48x512(upscaled) 48x48x256(skip3) ; output = 48x48x256(dskip3)
self.d_res_up3 = ResUpBlock(512 + 256, 256)
# H / 2 ; input = 96x96x256(upscaled) 96x96x128(skip2) ; output = 96x96x128(dskip2)
self.d_res_up2 = ResUpBlock(256 + 128, 128)
# H / 1 ; input = 192x192x128(upscaled) 192x192x64(skip1) ; output = 192x192x64(dskip1)
self.d_res_up1 = ResUpBlock(128 + 64, 64)
# Depth Output
self.depth_output = nn.Conv2d(
64, 1, kernel_size=1, stride=1, bias=False) # output = 192x192x1
# Segmentation Decoder
# H / 8 ; input = 24x24x1024(upscaled) 24x24x512(dskip4) 24x24x512(skip4) ; output = 24x24x512
self.s_res_up4 = ResUpBlock(512 + 512 + 512, 512)
# H / 4 ; input = 48x48x512(upscaled) 48x48x256(dskip3) 48x48x256(skip3) ; output = 48x48x256
self.s_res_up3 = ResUpBlock(512 + 256 + 256, 256)
# H / 2 ; input = 96x96x256(upscaled) 96x96x128(dskip2) 96x96x128(skip2) ; output = 96x96x128
self.s_res_up2 = ResUpBlock(256 + 128 + 128, 128)
# H / 1 ; input = 192x192x128(upscaled) 192x192x64(dskip1) 192x192x64(skip1) ; output = 192x192x64
self.s_res_up1 = ResUpBlock(128 + 64 + 64, 64)
# Segmentation Output
self.segment_output = nn.Conv2d(
64, 1, kernel_size=1, stride=1, bias=False) # output = 192x192x1
def forward(self, input):
# Encoder
rd1, skip1_out = self.res_down1(input)
rd2, skip2_out = self.res_down2(rd1)
rd3, skip3_out = self.res_down3(rd2)
rd4, skip4_out = self.res_down4(rd3)
# Bridge
bridge = self.bridge(rd4)
# Depth Decoder
dru4 = self.d_res_up4(bridge, skip4_out)
dru3 = self.d_res_up3(dru4, skip3_out)
dru2 = self.d_res_up2(dru3, skip2_out)
dru1 = self.d_res_up1(dru2, skip1_out)
d_out = self.depth_output(dru1)
# Segmentation Decoder
sru4 = self.s_res_up4(bridge, skip4_out, dru4)
sru3 = self.s_res_up3(sru4, skip3_out, dru3)
sru2 = self.s_res_up2(sru3, skip2_out, dru2)
sru1 = self.s_res_up1(sru2, skip1_out, dru1)
s_out = self.segment_output(sru1)
return d_out, s_out
# + id="0zSe24oPZVdb" colab_type="code" colab={}
model = ResUNet().to(device)
# + id="Ua7scQqIZZgs" colab_type="code" outputId="04e1007b-c2f0-4b71-f2f6-703a017269ad" colab={"base_uri": "https://localhost:8080/", "height": 1000}
summary(model, (6, 192, 192))
# + id="DiwIxYl-ZbVx" colab_type="code" outputId="d69520aa-3493-4b17-dd9a-ee9b236c352f" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(sum(p.numel() for p in model.parameters()))
# + id="f-qJgFgla6Pw" colab_type="code" outputId="b0e59a0b-4d40-4832-86ac-b58375f2b261" colab={"base_uri": "https://localhost:8080/", "height": 34}
34997388 * 4 / 1024/1024
# + id="cxDj3DR3bFt9" colab_type="code" colab={}
| notebooks/06_DepthModel_ModelZoo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Object Oriented Programming Concepts
# <ul>
# <li>Python is an object-oriented programming language. Unlike procedure-oriented programming, where the main emphasis is on functions, object-oriented programming stresses on objects.</li><li>
#
# An object is simply a collection of data (variables) and methods (functions) that act on those data. Similarly, a class is a blueprint for that object.</li><li>
#
# We can think of a class as a sketch (prototype) of a house. It contains all the details about the floors, doors, windows, etc. Based on these descriptions we build the house. House is the object.</li><li>
#
# As many houses can be made from a house's blueprint, we can create many objects from a class. </li><li>Object-oriented Programming (OOPs) is a programming paradigm that uses objects and classes in programming.</li>
# <li>
# It aims to implement real-world entities like inheritance, polymorphisms, encapsulation, etc. in the programming.</li>
# <li>
# The main concept of OOPs is to bind the data and the functions that work on that together as a single unit so that no other part of the code can access this data. </li></ul>
# # Main Concepts of OOP
# <ul><li> Class</li>
# <li>
# Objects</li>
# <li>
# Polymorphism</li>
# <li>
# Encapsulation</li>
# <li>
# Inheritance</li></ul>
# ## Class
# <ul><li>A class is a collection of objects.</li><li> A class contains the blueprints or the prototype from which the objects are being created. </li><li>It is a logical entity that contains some attributes and methods. </li></ul>
#
# Let's take an example:<br><br>
# <img src="images/class.jpg" align="left">
# A parrot is an object, as it has the following properties:
#
# <ul><li>name, age, color as attributes</li><li>
# singing, dancing as behavior</li></ul>
#
# ### Some points on Python class:
#
# <ul><li>Classes are created by keyword class.</li><li>
# Attributes are the variables that belong to a class.</li><li>
# Attributes are always public and can be accessed using the dot (.) operator. Eg.: Myclass.Myattribute</li></ul>
# The concept of OOP in Python focuses on creating reusable code. This concept is also known as DRY (Don't Repeat Yourself).
# ### Creating an empty class
# +
# A Python program to
# demonstrate defining
# a class
class Parrot():
pass
# -
# ## Objects
# <ul><li>The object is an entity that has a state and behavior associated with it.</li><li> It may be any real-world object like a mouse, keyboard, chair, table, pen, etc.</li><li> Integers, strings, floating-point numbers, even arrays, and dictionaries, are all objects.</li><li> You’ve been using objects all along and may not even realize it.</li></ul>
# ### An object consists of :
#
# <img src="images/objects.jpg" width="300" height="360" align="left">
#
# <ul><li><b>State:</b> It is represented by the attributes of an object. It also reflects the properties of an object.</li><li>
# <b>Behavior:</b> It is represented by the methods of an object. It also reflects the response of an object to other objects.</li><li>
# <b>Identity:</b> It gives a unique name to an object and enables one object to interact with other objects.</li></ul>
# ### Creating an object
# <ul><li>An object (instance) is an instantiation of a class.</li><li> When class is defined, only the description for the object is defined. Therefore, no memory or storage is allocated.</li></ul>
obj = Parrot()
# ### Declaring an object
# +
# Python program to
# demonstrate instantiating
# a class
class Dog():
# A simple class
# attribute
attr1 = "mammal"
attr2 = "dog"
# A class method
def fun(self):
print("I'm a", self.attr1)
print("I'm a", self.attr2)
# Object instantiation
Rodger = Dog()
# Accessing class attributes
# and method through objects
print(Rodger.attr1)
Rodger.fun()
# -
# ### Understanding some basic keywords
#
# #### The self
# <ul><li><b>self</b> represents the instance of the class. By using the <b>“self”</b> we can access the attributes and methods of the class in python. It binds the attributes with the given arguments.</li><li> Python decided to do methods in a way that makes the instance to which the method belongs be passed automatically, but not received automatically: the first parameter of methods is the instance the method is called on.</li><li>
# Class methods must have an extra first parameter in the method definition. We do not give a value for this parameter when we call the method, Python provides it.</li><li>
# If we have a method that takes no arguments, then we still have to have one argument.</li><li>
# This is similar to this pointer in C++ and this reference in Java.</li></ul>
# ### Characteristics of SELF
# #### <ul><li>Self is always pointing to Current Object.</li></ul>
# +
# It is clearly seen that self and obj is referring to the same object
class check_():
def __init__(self):
print("Address of self = ",id(self))
obj = check_()
print("Address of class object = ",id(obj))
# +
# Another example using SELF
class car():
# init method or constructor
def __init__(self, model, color,):
self.model = model
self.color = color
def show(self):
print("Model is", self.model )
print("color is", self.color )
toyota = car("<NAME> 2009","blue")
kia = car("<NAME> 2020","green")
toyota.show() # same output as car.show(toyota)
kia.show() # same output as car.show(kia)
# -
# #### <ul><li>Self is the first argument to be passed in Constructor and Instance Method.</li></ul>
# +
# Self is always required as the first argument
class check:
def __init__(self):
print("This is Constructor")
object = check()
print("Worked fine")
# -
# #### <ul><li>Self is a convention and not a Python keyword.</li></ul>
# +
# Write Python3 code here
class this_is_class:
def __init__(in_place_of_self):
print("we have used another "
"parameter name in place of self")
object = this_is_class()
# -
# ### The __ __init__ __ method
# <ul><li>The __init__ method is similar to constructors in C++ and Java. </li><li>It is run as soon as an object of a class is instantiated. </li><li>The method is useful to do any initialization you want to do with your object. </li>
# <li>Constructors are used to initialize the object’s state. The task of constructors is to initialize(assign values) to the data members of the class when an object of class is created. Like methods, a constructor also contains collection of statements(i.e. instructions) that are executed at time of Object creation. It is run as soon as an object of a class is instantiated. The method is useful to do any initialization you want to do with your object.</li></ul>
# +
# A class with init method
class Person:
# init method or constructor
def __init__(self, name):
self.name = name
# Method
def say_hi(self):
print('Hello, my name is', self.name)
p = Person('<NAME>')
p.say_hi()
# +
# Another class with init method
class Person:
# init method or constructor
def __init__(self, name):
self.name = name
# Method
def say_hi(self):
print('Hello, my name is', self.name)
# Creating different objects
p1 = Person('Angel')
p2 = Person('Tabitha')
p3 = Person('Ugonna')
p1.say_hi()
p2.say_hi()
p3.say_hi()
# -
# ### Constructors in Python
# <ul><li>Constructors are generally used for instantiating an object.</li><li>The task of constructors is to initialize(assign values) to the data members of the class when an object of the class is created.</li><li> In Python the __init__() method is called the constructor and is always called when an object is created.</li></ul>
# ### Types of constructors :
#
# <ul><li><b>default constructor:</b> The default constructor is a simple constructor which doesn’t accept any arguments. Its definition has only one argument which is a reference to the instance being constructed.</li><li>
# <b>parameterized constructor:</b> constructor with parameters is known as parameterized constructor. The parameterized constructor takes its first argument as a reference to the instance being constructed known as self and the rest of the arguments are provided by the programmer.</li></ul>
# #### Default Constructor
# +
class Program:
# default constructor
def __init__(self,course):
self.course = course
# a method for printing data members
def print_Course(self):
print(self.course)
# creating object of the class
obj = Program("CSC 102 - Introduction to Problem Solving")
# calling the instance method using the object obj
obj.print_Course()
# -
# #### Parameterized Constructor
# +
class Addition:
first = 0
second = 0
answer = 0
# parameterized constructor
def __init__(self, f, s):
self.first = f
self.second = s
def display(self):
print("First number = " + str(self.first))
print("Second number = " + str(self.second))
print("Addition of two numbers = " + str(self.answer))
def calculate(self):
self.answer = self.first + self.second
# creating object of the class
# this will invoke parameterized constructor
num1 = int(input("Enter first number: "))
num2 = int(input("Enter second number: "))
obj = Addition(num1, num2)
# perform Addition
obj.calculate()
# display result
obj.display()
# -
# ### Creating a class and object with class and instance attributes
# +
class Dogs:
# class attribute
attr1 = "mammal"
# Instance attribute
def __init__(self, name):
self.name = name
# Object instantiation
dog1 = Dogs("Oscar")
dog2 = Dogs("Peaches")
# Accessing class attributes
print("Oscar is a {}".format(dog1.__class__.attr1))
print("Peaches is also a {}".format(dog2.__class__.attr1))
# Accessing instance attributes
print("My name is {}".format(dog1.name))
print("My name is {}".format(dog2.name))
# -
# ### Creating Class and objects with methods
# +
class SST:
# class attribute
prog1 = "Computer Science"
# Instance attribute
def __init__(self, name):
self.name = name
def speak(self):
print("My name is {}".format(self.name))
print("I'm studying {}".format(stud1.__class__.prog1))
# Object instantiation
stud1 = SST("<NAME>")
stud2 = SST("Eboseta ")
# Accessing class methods
stud1.speak()
stud2.speak()
# -
# ### Class and Instance Variables
# <ul><li>Instance variables are for data unique to each instance and class variables are for attributes and methods shared by all instances of the class.</li><li> Instance variables are variables whose value is assigned inside a constructor or method with self, whereas class variables are variables whose value is assigned in the class.</li></ul>
# +
# A Python program to show that the variables with a value
# assigned in the class declaration, are class variables and
# variables inside methods and constructors are instance
# variables.
# Class for Dog
class Dog:
# Class Variable
animal = 'dog'
# The init method or constructor
def __init__(self, breed, color):
# Instance Variable
self.breed = breed
self.color = color
# Objects of Dog class
Rodger = Dog("Pug", "brown")
Buzo = Dog("Bulldog", "black")
print('Rodger details:')
print('Rodger is a', Rodger.animal)
print('Breed: ', Rodger.breed)
print('Color: ', Rodger.color)
print('\nBuzo details:')
print('Buzo is a', Buzo.animal)
print('Breed: ', Buzo.breed)
print('Color: ', Buzo.color)
# Class variables can be accessed using class
# name also
print("\nAccessing class variable using class name")
print(Dog.animal)
# -
# ### Defining instance variable using the normal method.
# +
# Python program to show that we can create
# instance variables inside methods
# Class for Dog
class Dog:
# Class Variable
animal = 'dog'
# The init method or constructor
def __init__(self,breed):
# Instance Variable
self.breed = breed
# Adds an instance variable
def setColor(self, color):
self.color = color
# Retrieves instance variable
def getColor(self):
return self.color
# Object instantiation
Rodger = Dog("pug")
Rodger.setColor("brown")
print(Rodger.getColor())
# -
# ## Class Project I
# One of the key issue that have threatened organizations and institutions is that of logistics. To solve it, employers now use biometric softwares and computer vision enabled tools to verify identities of their employees and take attendance. Mrs. Jane runs a delivery business with 15 employees, but she would want a way to identify if a user is one of her employees, take attendance and assign a task to the employee for the day.
#
# Employees = "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>" , "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>".
#
# Tasks = "Loading", "Transporting", "Reveiwing Orders", "Customer Service", "Delivering Items"
#
# Your mission, should you choose to accept it, is to develop python program using your knowledge in OOP (class and objects) that takes a user's name and check if he/she exists in the list of employees, take attendance for the day and assign a task to the employess … otherwise, you politely refuse access to the system.
#
# <b>Hint:</b>
#
# <ol><li>Build a class <b>Employee()</b> that has four methods in the class; <b>check_employee()</b>, <b>take_attendance()</b>, <b>assign_task()</b> and <b>refuse_access()</b>.</li><li>
# You can <b>import random</b> module and use the <b>random.randint()</b> method to randomly select task from the list. </li></ol>
#
# ## Class Project II
# You run a delivery service, and charge people based on their location and weight of their package. The following are some of the things you consider.
#
# You charge N2000, whenever you are delivering a package with weight of 10kg and above to PAU, and N1500 when it is less.
# However, you charge N5000 whenever you deliver to Epe, a package with weight of 10kg and above, and N4000 when it is less.
#
# Develop the python program using your knowledge in OOP, that tells a user how much to pay, based on their location, and package weight.
#
#
| WEEK9/.ipynb_checkpoints/Week 9 - Object Oriented Programming Practice-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Kili Tutorial: AutoML for faster labeling with Kili Technology
# In this tutorial, we will show how to use [automated machine learning](https://en.wikipedia.org/wiki/Automated_machine_learning) (AutoML) to accelerate labeling in Kili Technology. We will apply it in the context of text classification: given a tweet, I want to classify whether it is about a real disaster or not (as introduced in [Kaggle NLP starter kit](https://www.kaggle.com/c/nlp-getting-started)).
#
# Why want to label more data when Kaggle often provides with a fully annotated training set and a testing set?
#
# - Annotate the testing set in order to have more training data once you fine-tuned an algorithm (once you are sure you do not overfit). More data almost always means better scores in machine learning.
# - As a data scientist, annotate data in order to get a feel of what data looks like and what ambiguities are.
#
# But annotating data is a time-consuming task. So we would like to help you annotate faster by fully automating machine learning models thanks to AutoML. Here is what is looks like in Kili:
#
# <img src="./img/automl.gif" alt="Drawing" style="width: 600px;"/>
#
# Additionally:
#
# For an overview of Kili, visit [kili-technology.com](https://kili-technology.com). You can also check out [Kili documentation](https://kili-technology.github.io/kili-docs).
#
# The tutorial is divided into three parts:
#
# 1. AutoML
# 2. Integrate AutoML scikit-learn pipelines
# 3. Automating labeling in Kili Technology
#
# ## 1. AutoML
# Automated machine learning (AutoML) is described as the process of automating both the choice and training of a machine learning algorithm by automatically optimizing its hyperparameters.
#
# There already exist many AutoML framework:
#
# - [H2O](http://docs.h2o.ai/h2o/latest-stable/h2o-docs/automl.html) provides with an AutoML solution with both Python and R bindings
# - [autosklearn](https://automl.github.io/auto-sklearn/master/) can be used for SKLearn pipelines
# - [TPOT](http://epistasislab.github.io/tpot) uses genetic algorithms to automatically tune your algorithms
# - [fasttext](https://fasttext.cc) has [its own AutoML module](https://fasttext.cc/docs/en/autotune.html) to find the best hyperparameters
#
# We will cover the use of `autosklearn` for automated text classification. `autosklearn` explores the hyperparameters grid as defined by SKLearn as a human would [do it manually](https://scikit-learn.org/stable/modules/grid_search.html). Jobs can be run in parallel in order to speed up the exploration process. `autosklearn` can use either [SMAC](http://ml.informatik.uni-freiburg.de/papers/11-LION5-SMAC.pdf) (Sequential Model-based Algorithm Configuration) or [random search](http://www.jmlr.org/papers/volume13/bergstra12a/bergstra12a.pdf) to select the next set of hyperparameters to test at each time.
#
# Once AutoML automatically chose and trained a classifier, we can use this classifier to make predictions. Predictions can then be inserted into Kili Technology. When labeling, labelers first see predictions before labeling. For complex tasks, this can considerably speed up the labeling.
#
# For instance, when annotating voice for [automatic speech recognition](https://en.wikipedia.org/wiki/Speech_recognition), if you use a model that pre-annotates by transcribing speeches, you more than double annotation productivity:
#
# <img src="./img/efficiency_comparison_with_without_model.png" alt="Drawing" style="width: 500px;"/>
# ## 2. Integrate AutoML scikit-learn pipelines
#
# Specifically for text classification, the following pipeline retrieves labeled and unlabeled data from Kili, builds a classifier using AutoML and then enriches back Kili's training set:
#
# <img src="./img/automl_pipeline.png" alt="Drawing" style="width: 900px;"/>
# After retrieving data, [TFIDF](https://en.wikipedia.org/wiki/Tf%E2%80%93idf) pre-processes text data by filtering out common words (such as `the`, `a`, etc) in order to make most important features stand out. These pre-processed features will be fed to a classifier.
#
# *Note:* `autosklearn` runs [better](https://automl.github.io/auto-sklearn/master/installation.html#windows-osx-compatibility) on Linux, so we recommand running code snippets inside a [Docker image](https://hub.docker.com/r/mfeurer/auto-sklearn/):
#
# ```
# docker run --rm -it -p 10000:8888 -v /local/path/to/notebook/folder:/home/kili --entrypoint "/bin/bash" mfeurer/auto-sklearn
# # # cd /home/kili && jupyter notebook --ip=0.0.0.0 --port=8888 --allow-root
# ```
# +
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.metrics import accuracy_score
MIN_DOC_FREQ = 2
NGRAM_RANGE = (1, 2)
TOP_K = 20000
TOKEN_MODE = 'word'
def ngram_vectorize(train_texts, train_labels, val_texts):
tfidf_vectorizer_params = {
'ngram_range': NGRAM_RANGE,
'dtype': 'int32',
'strip_accents': 'unicode',
'decode_error': 'replace',
'analyzer': TOKEN_MODE,
'min_df': MIN_DOC_FREQ,
}
# Learn vocab from train texts and vectorize train and val sets
tfidf_vectorizer = TfidfVectorizer(**tfidf_vectorizer_params)
x_train = tfidf_vectorizer.fit_transform(train_texts)
x_val = tfidf_vectorizer.transform(val_texts)
# Select k best features, with feature importance measured by f_classif
selector = SelectKBest(f_classif, k=min(TOP_K, x_train.shape[1]))
selector.fit(x_train, train_labels)
x_train = selector.transform(x_train).astype('float32')
x_val = selector.transform(x_val).astype('float32')
return x_train, x_val
# -
# Labeled data is split in train and test sets for validation. Then, `autosklearn` classifier is chosen and trained in a limited time.
# +
from tempfile import TemporaryDirectory
# Un comment these lines if you are not running inside autosklearn container
# # !conda install gxx_linux-64 gcc_linux-64 swig==3.0.12 --yes
# # !pip install auto-sklearn
import autosklearn
import autosklearn.classification
from sklearn.model_selection import train_test_split
def automl_train_and_predict(X, y, X_to_predict):
x, x_to_predict = ngram_vectorize(
X, y, X_to_predict)
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42)
# Auto-tuning by autosklearn
cls = autosklearn.classification.AutoSklearnClassifier(time_left_for_this_task=200,
per_run_time_limit=20,
seed=10)
cls.fit(x_train, y_train)
assert x_train.shape[1] == x_to_predict.shape[1]
# Performance metric
predictions_test = cls.predict(x_test)
print('Accuracy: {}'.format(accuracy_score(y_test, predictions_test)))
# Generate predictions
predictions = cls.predict(x_to_predict)
return predictions
# -
# ## 3. Automating labeling in Kili Technology
# Let's now feed Kili data to the AutoML pipeline. For that you will need to [create a new](https://cloud.kili-technology.com/label/projects/create-project) `Text classification` project. Assets are taken from Kaggle challenge `Real or Not? NLP with Disaster Tweets`. You can download them [here](https://www.kaggle.com/c/nlp-getting-started/data).
# Connect to Kili Technology using `kili-playground` (Kili's official [Python SDK](https://github.com/kili-technology/kili-playground) to interact with Kili API):
# +
# !pip install kili
from kili.authentication import KiliAuth
from kili.playground import Playground
email = 'YOUR EMAIL'
password = '<PASSWORD>'
project_id = 'YOUR PROJECT ID'
api_endpoint = 'https://cloud.kili-technology.com/api/label/graphql'
kauth = KiliAuth(email=email, password=password, api_endpoint=api_endpoint)
playground = Playground(kauth)
# -
# Let's insert all assets into Kili. You can download the original unannotated `test.csv` directly [on Kaggle](https://www.kaggle.com/c/nlp-getting-started/data).
# +
import pandas as pd
df = pd.read_csv('./dataset/test.csv')
content_array = []
external_id_array = []
for index, row in df.iterrows():
external_id_array.append(f'tweet_{index}')
content_array.append(row['text'])
playground.append_many_to_dataset(project_id=project_id,
content_array=content_array,
external_id_array=external_id_array,
is_honeypot_array=[False for _ in content_array],
status_array=['TODO' for _ in content_array],
json_metadata_array=[{} for asset in content_array])
# -
# Retrieve the categories of the first job that you defined in Kili interface. Learn [here](https://kili-technology.github.io/kili-docs/docs/projects/customize-interfaces) what interfaces and jobs are in Kili.
# +
project = playground.get_project(project_id=project_id)
assert 'jsonInterface' in project
json_interface = project['jsonInterface']
jobs = json_interface['jobs']
jobs_list = list(jobs.keys())
assert len(jobs_list) == 1, 'More than one job was defined in the interface'
job_name = jobs_list[0]
job = jobs[job_name]
categories = list(job['content']['categories'].keys())
print(f'Categories are: {categories}')
# -
# We continuously fetch assets from Kili Technology and apply AutoML pipeline. You can launch the next cell and go to Kili in order to label. After labeling a few assets, you'll see predictions automatically pop up in Kili!
#
# Go [here](https://github.com/kili-technology/kili-playground/blob/master/recipes/import_predictions.ipynb) to learn in more details how to insert predictions into Kili.
# +
import os
import time
import warnings
# !pip install tqdm
from tqdm import tqdm
warnings.filterwarnings('ignore')
SECONDS_BETWEEN_TRAININGS = 60
def extract_train_for_auto_ml(job_name, assets, categories, train_test_threshold=0.8):
X = []
y = []
X_to_predict = []
ids_X_to_predict = []
for asset in assets:
x = asset['content']
labels = [l for l in asset['labels'] if l['labelType'] in ['DEFAULT', 'REVIEWED']]
# If no label, add it to X_to_predict
if len(labels) == 0:
X_to_predict.append(x)
ids_X_to_predict.append(asset['externalId'])
# Otherwise add it to training examples X, y
for label in labels:
jsonResponse = label['jsonResponse'][job_name]
is_empty_label = 'categories' not in jsonResponse or len(
jsonResponse['categories']) != 1 or 'name' not in jsonResponse['categories'][0]
if is_empty_label:
continue
X.append(x)
y.append(categories.index(
jsonResponse['categories'][0]['name']))
return X, y, X_to_predict, ids_X_to_predict
while True:
print('Export assets and labels...')
assets = playground.get_assets(project_id=project_id, first=100, skip=0) ## Remove that
X, y, X_to_predict, ids_X_to_predict = extract_train_for_auto_ml(job_name, assets, categories)
version = 0
if len(X) > 5:
print('AutoML is on its way...')
predictions = automl_train_and_predict(X, y, X_to_predict)
print('Inserting predictions to Kili...')
external_id_array = []
json_response_array = []
for i, prediction in enumerate(tqdm(predictions)):
json_response = {
job_name: {
'categories': [{
'name': categories[prediction],
'confidence':100
}]
}
}
external_id_array.append(ids_X_to_predict[i])
json_response_array.append(json_response)
# Good practice: version your model so you know the result of every model
playground.create_predictions(project_id=project_id,
external_id_array=external_id_array,
model_name_array=[f'automl-{version}']*len(external_id_array),
json_response_array=json_response_array)
print('Done.\n')
time.sleep(SECONDS_BETWEEN_TRAININGS)
version += 1
# -
#
# ## Summary
# In this tutorial, we accomplished the following:
#
# We introduced the concept of AutoML as well as several of the most-used frameworks for AutoML. We demonstrated how to leverage AutoML to automatically create predictions in Kili. If you enjoyed this tutorial, check out the other Recipes for other tutorials that you may find interesting, including demonstrations of how to use Kili.
#
# You can also visit the Kili website or Kili documentation for more info!
| recipes/automl_text_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="-AOqpKzpMl_Z" executionInfo={"status": "ok", "timestamp": 1611360171527, "user_tz": 480, "elapsed": 2237, "user": {"displayName": "", "photoUrl": "", "userId": ""}} outputId="ad4e9a77-3777-4db7-ae90-cb1a60bc11e1"
# !git clone https://github.com/sbooeshaghi/colosseum.git
# + id="I15SPYZiM0D0" executionInfo={"status": "ok", "timestamp": 1611360175871, "user_tz": 480, "elapsed": 1263, "user": {"displayName": "", "photoUrl": "", "userId": ""}}
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import matplotlib.patches as mpatches
import math
from matplotlib.ticker import MaxNLocator
import matplotlib.ticker as mtick
plt.rcParams.update({'font.size': 20})
# %config InlineBackend.figure_format = 'retina'
# + id="j1bjs5j_M1RL" executionInfo={"status": "ok", "timestamp": 1611360609003, "user_tz": 480, "elapsed": 284, "user": {"displayName": "", "photoUrl": "", "userId": ""}}
df = pd.read_csv("/content/colosseum/data/experiment_2.csv")
columns = {"Dwell time (s)": "time",
"Trial": "experiment",
"Before (g)": "empty_weight",
"After (g)": "total_weight",
"Tube number": "tube_number",
"Difference (g)": "fluid_weight",
"Expected (g)": "expected",
"Percent error (%)": "error"}
df = df.rename(columns=columns)
df["fluid_vol"] = df["fluid_weight"]/1
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="dv6LQnXBM3vi" executionInfo={"status": "ok", "timestamp": 1611360609678, "user_tz": 480, "elapsed": 287, "user": {"displayName": "", "photoUrl": "", "userId": ""}} outputId="c6241985-ba63-4298-b7c1-454483368e9c"
df.head()
# + id="jJD4IOCDM4vq" executionInfo={"status": "ok", "timestamp": 1611360807652, "user_tz": 480, "elapsed": 294, "user": {"displayName": "", "photoUrl": "", "userId": ""}}
experiments = np.unique(df["experiment"].values)
tpts = np.arange(12, 252, 12)
means = df.groupby(["time"])["fluid_vol"].mean()
# + id="ZA4F_5tAkwaY" executionInfo={"status": "ok", "timestamp": 1611360809024, "user_tz": 480, "elapsed": 299, "user": {"displayName": "", "photoUrl": "", "userId": ""}} outputId="8133878e-4c91-4d1e-d770-49825bf48867" colab={"base_uri": "https://localhost:8080/"}
means
# + id="Sco6by0_ljW5" executionInfo={"status": "ok", "timestamp": 1611360813364, "user_tz": 480, "elapsed": 239, "user": {"displayName": "", "photoUrl": "", "userId": ""}} outputId="a032a61a-6967-4b99-804a-8791afdfb3b8" colab={"base_uri": "https://localhost:8080/"}
tpts
# + id="PkpzW_XIM6_A" executionInfo={"status": "ok", "timestamp": 1611360821531, "user_tz": 480, "elapsed": 253, "user": {"displayName": "", "photoUrl": "", "userId": ""}}
slope, intercept, r_value, p_value, std_err = stats.linregress(tpts, means)
# + colab={"base_uri": "https://localhost:8080/", "height": 627} id="LZWQTcJlM8RK" executionInfo={"status": "ok", "timestamp": 1611360833076, "user_tz": 480, "elapsed": 2184, "user": {"displayName": "", "photoUrl": "", "userId": ""}} outputId="aeecd8b5-66d4-4c26-cfcc-f0c32d7209aa"
fig, ax = plt.subplots(figsize=(10, 10))
ax.plot(tpts, intercept + slope*tpts, 'black', label='r$^2$ = {:.3f}'.format(r_value**2), linewidth=3)
shapes = ["o", "^", "s"]
colors = ["dimgrey", "silver", "gainsboro"]
for idx, exp in enumerate(experiments):
tmp = df[df["experiment"] == exp]
cumsum = tmp["fluid_vol"].values.cumsum()
ax.plot(tmp["time"].values, tmp["fluid_vol"].values, label="Replicate {}".format(idx+1),linestyle='None', marker=shapes[idx], color=colors[idx], markeredgecolor='black', markersize=12)
ax.set(**{
"xlabel": "Fraction dwell time [s]",
"ylabel": "Fraction size [mL]"
})
ax.legend()
fig.savefig("fraction_dwelltime.png", bbox_inches='tight', dpi=300)
plt.show()
# + id="rCNPcbj3M-hy"
| analysis/constant_flowrate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CoNSeP
import numpy as np
import matplotlib.pyplot as plt
file = r"C:\Users\Tasli\Desktop\Python_Folder_New\ECCE635\hover_net-master\dataset\training_data\consep\train\540x540_164x164\train_2_000.npy"
file
data = np.load(file)
data.shape
rgb = data[:, :, 0:3]
rgb.shape
plt.imshow(rgb)
plt.show()
ch4 = data[:, :, 3]
ch4.shape
ch5 = data[:, :, 4]
ch4.shape
print(ch4) #CHANNEL 4 -> Instance Map
print(np.amax(ch4))
print(np.amax(ch5)) #CHANNEL 5 -> Type Map
# # PanNuke
images_file = r".\dataset\PanNuke\Fold 2\images\fold2\images.npy"
data_images = np.load(images_file)
data_images.shape
types_file = r".\dataset\PanNuke\Fold 2\images\fold2\types.npy"
data_types = np.load(types_file)
data_types.shape
data_types
masks_file = r".\dataset\PanNuke\Fold 2\masks\fold2\masks.npy"
data_masks = np.load(masks_file)
data_masks.shape
ch1 = data_masks[0,:, :, 0]
print(ch1.shape)
print(np.amax(ch1))
ch2 = data_masks[0,:, :, 1]
print(ch2.shape)
print(np.amax(ch2))
ch3 = data_masks[0,:, :, 2]
print(ch3.shape)
print(np.amax(ch3))
ch4 = data_masks[0,:, :, 3]
print(ch4.shape)
print(np.amax(ch4))
ch5 = data_masks[0,:, :, 4]
print(ch5.shape)
print(np.amax(ch5))
ch6 = data_masks[0,:, :, 5]
print(ch6.shape)
print(np.amax(ch6))
| ExploringInputFiles.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import re
import pandas as pd
df = pd.read_csv("../petition_data_all_no.csv")
df
# ## 숫자가 포함된 문자열을 받으면 숫자로 바꾸기
def find_int(count):
i = (re.findall(r'\d+', count))
ints = int(''.join(map(str, i)))
return ints
# ## map을 이용하여 전체 변경
df["count"] = df["count"].map(find_int)
df
find_int(df["count"][0])
df.to_csv("../petition_data_all.csv", index=None)
df = pd.read_csv("../../petition_data_all.csv")
df
# ## progress가 브리핑 내용의 형식이 약간 달라서 정부에서 브리핑한 청원 추가
datas = df[df["progress"]=="브리핑 "]["num"]
from urllib.request import urlopen
from bs4 import BeautifulSoup
import urllib
import csv
import re
def FindPetition(num):
global kr, other
url = "https://www1.president.go.kr/petitions/"
url = url + str(num)
try:
html = urlopen(url)
except urllib.error.HTTPError as e:
print(e)
print("=============error %d ============="%num)
else:
try:
bsObj = BeautifulSoup(html.read(), "html.parser")
except urllib.exceptions.SSLError as e:
print(e)
print("=============error %d ============="%num)
else:
#print("============%d===================" % num)
progress = bsObj.find("div",{"class":"petitionsView_progress"}).get_text()
title = bsObj.find("h3",{"class":"petitionsView_title"}).get_text()
count = bsObj.find("h2",{"class":"petitionsView_count"}).get_text()
petition_all = bsObj.find("div",{"class":"petitionsView_write"}).get_text()
petition_overview = petition_all[petition_all.find("청원개요")+4:petition_all.find("답변원고")]
petition_answer = petition_all[petition_all.find("답변원고")+4:]
days = bsObj.find("ul",{"class":"petitionsView_info_list"}).get_text()
days = days.split("\n")
category = days[1]
sdays = days[2]
edays = days[3]
person = days[4]
if(num%1000==0):
print(num)
#print("1. 청원 현황 : ", progress)
#print("2. 청원 제목 : ",title)
#print("3. 청원 참가자 수 : ",count)
#print("4. 청원 내용 : ",petition_overview)
#print("4. 청원 내용 : ",petition_answer)
try:
days = str(days[4:])
progress = str(progress[1:])
title = str(title)
count = str(count)
petition_overview = str(petition_overview)
petition_answer = str(petition_answer)
category = str(category[4:])
sdays = str(sdays[4:])
edays = str(edays[4:])
person = str(person[3:])
#print(category,sdays,edays,person)
except:
print("=============error %d ============="%num)
else:
csvRow = []
csvRow.extend([num,category,sdays,edays,person,progress,title,count,petition_overview,petition_answer])
writer.writerow(csvRow)
return petition_overview
csvFile = open("briefing.csv", 'w', encoding='UTF-8')
writer = csv.writer(csvFile)
writer.writerow(["num","category","start-days","end-days","person","progress","title","count","petition_overview","petition_answer"])
for i in datas:
FindPetition(i)
csvFile.close()
df = pd.read_csv("briefing.csv")
df
| Data/Filtering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Environment Check -- Deactivate on a working host
import sys
print(sys.executable)
print(sys.version)
print(sys.version_info)
# # Numerical Methods - Single Variable Newtons Method
# The application of fundamental principles of modeling and mechanics often leads to an
# algebraic or transcendental equation that cannot be easily solved and represented in a closed
# form. In these cases a numerical method is required to obtain an estimate of the root or
# roots of the expression.
#
# Newton's method is an iterative technique that can produce good estimates of solutions to
# such equations. The method is employed by rewriting the equation in the form `f(x) = 0`,
# then successively manipulating guesses for `x` until the function evaluates to a value close
# enough to zero for the modeler to accept.
#
# 
#
# The figure above is a graph of some function whose intercept with the `x-axis` is unknown. The goal
# of Newton's method is to find this intersection (root) from a realistic first guess. Suppose
# the first guess is `x1`, shown on the figure as the right-most specific value of `x`. The value
# of the function at this location is `f(x1)`. Because `x1` is supposed to be a root the difference
# from the value zero represents an error in the estimate. Newton's method simply provides a
# recipe for corrections to this error.
#
# Provided `x1` is not near a minimum or maximum (slope of the function is not zero) then a
# better estimate of the root can be obtained by extending a tangent line from `x1, f(x1)` to
# the `x-axis`. The intersection of this line with the axis represents a better estimate of the
# root. This new estimate is `x2`. A formula for `x2` can be derived from the geometry of the triangle
# `x2,f(x1),x1`. Recall from calculus that the tangent to a function at a particular point is
# the first derivative of the function. Therefore, from the geometry of the triangle and the
# definition of tangent we can write,
#
# \begin{equation}
# tan(\theta)=\frac{df}{dx}\Biggr\vert_{x_1} = \frac{f(x_1)}{x_1 - x_2}
# \end{equation}
#
# Solving the equation for `x`2 results in a formula that expresses `x2` in terms of the first guess
# plus a correction term.
#
# \begin{equation}
# x_2=x_1 - \frac{f(x_1)}{\frac{df}{dx}\vert_{x_1}}
# \end{equation}
#
# The second term on the right hand side is the correction term to the estimate on the right
# hand side. Once `x2` is calculated we can repeat the formula substituting `x2` for `x1` and `x3` for
# `x2` in the formula. Repeated application usually leads to one of three outcomes:
# 1. a root;
# 2. divergence to +/- $\inf$ ; or
# 3. cycling.
#
# These three outcomes are discussed below in various subsections along with some remedies.
# The generalized formula is
#
# \begin{equation}
# x_{k+1}=x_{k} - \frac{ f(x_{k}) }{ \frac{df}{dx}\rvert_{x_k} }
# \label{eqn:NewtonFormula}
# \end{equation}
#
# If the derivative is evaluated using analytical derivatives the method is called Newton's
# method, if approximations to the derivative are used, it is called a quasi-Newton method.
#
# ## Newton's Method --- Using analytical derivatives
# This subsection is an example in Python of implementing Newton's method with analytical derivatives.
#
# The recipe itself is:
# 1) Write the function in proper form, and code it into a computer.
# 2) Write the derivative in proper form and code it into a computer.
# 3) Make an initial guess of the solution (0 and 1 are always convenient guesses).
# 4) Evaluate the function, evaluate the derivative, calculate their ratio.
# 5) Subtract the ratio from the current guess and save the result as the update.
# 6) Test for stopping:
# i) Did the update stay the same value? Yes, then stop, probably have a solution.
# ii) Is the function nearly zero? Yes, then stop we probably have a solution.
# iii) Have we tried too many updates? Yes, then stop the process is probably cycling, stop.
# 7) If stopping is indicated proceed to next step, otherwise proceed back to step 4.
# 8) Stopping indicated, report last update as the result (or report failure to find solution), and related information about the status of the numerical method.
#
#
# The following example illustrates these step as well as an ipython implementation of Newton's method.
#
# Suppose we wish to find a root (value of `x`) that satisfies:
#
# \begin{equation}
# f(x) = e^x - 10 cos(x) -100
# \end{equation}
#
# Then we will need to code it into a script. Here is a code fragment that will generate the prototype function
# import built in function for e^x, cosine
from math import exp, cos, sin
# Define the function
def func(x):
func = exp(x) - 10*cos(x) - 100 #using the name as the temp var
return func
# Notice in the code fragment we import three built-in functions from the Python math package, specifically $\exp()$, $\sin()$, and $\cos ()$.
# The next step is to code the derivative. In this case the derivative is
#
# \begin{equation}
# \frac{df}{dx}\vert{(x)} = e^x + 10 \sin(x)
# \end{equation}
#
# and the prototype function is coded as
def dfdx(x):
dfdx = exp(x) + 10*sin(x)
return dfdx
# Next we will need script to read in an initial guess, and ask us how many trials we will use to try to find a solution, as well as how close to zero we should be before we declare victory.
# Now for the Newton Method Implementation
# Get initial guess, use a simple error trap
yes=0
while yes == 0:
xnow = input("Enter an initial guess for Newton method \n")
try:
xnow = float(xnow)
yes =1
except:
print ("Value should be numeric, try again \n")
# Get number trials, use a simple error trap
yes=0
while yes == 0:
HowMany = input("Enter iteration maximum \n")
try:
HowMany = int(HowMany)
yes =1
except:
print ("Value should be numeric, try again \n")
# Get stopping criterion
yes=0
while yes == 0:
HowSmall = input("Enter a solution tolerance (e.g. 1e-06) \n")
try:
HowSmall= float(HowSmall)
yes =1
except:
print ("Value should be numeric, try again \n")
# The use of `HowSmall` is called a zero tolerance.
# We will use the same numerical value for two tolerance tests.
# Also notice how we are using error traps to force numeric input.
# Probably overkill for this example, but we already wrote the code in an earlier essay, so might as well reuse the code.
# Professional codes do a lot of error checking before launching into the actual processing - especially if the processing part is time consuming, its worth the time to check for obvious errors before running for a few hours then at some point failing because of an input value error that was predictable.
#
# Now back to the tolerance tests.
# The first test is to determine if the update has changed or not.
# If it has not, we may not have a correct answer, but there is no point continuing because the update is unlikely to move further.
# The test is something like
#
# \begin{equation}
# \text{IF}~\lvert x_{k+1} - x_{k} \rvert < \text{Tol.~ THEN Exit and Report Results}
# \end{equation}
#
# The second test is if the function value is close to zero.
# The structure of the test is similar, just an different argument. The second test is something like
#
# \begin{equation}
# \text{IF}~\lvert f(x_{k+1}) \rvert < \text{Tol.~ THEN Exit and Report Results}
# \end{equation}
#
# One can see from the nature of the two tests that a programmer might want to make the tolerance values different.
# This modification is left as a reader exercise.
#
# Checking for maximum iterations is relatively easy, we just include code that checks for normal exit the loop.
#
# Here is code fragment that implements the method, makes the various tests, and reports results.
# now we begin the process
count = 0
for i in range(0,HowMany,1):
xnew = xnow - func(xnow)/dfdx(xnow)
# stopping criteria -- update not changing
if abs(xnew - xnow) < HowSmall:
print ("Update not changing \n")
print("Function value =",func(xnew))
print(" Root value =",xnew)
break
else:
xnow = xnew
count = count +1
continue
# stopping criteria -- function close to zero
if abs( func(xnew) ) < HowSmall:
print ("Function value close to zero \n")
print("Function value =",func(xnew))
print(" Root value =",xnew)
break
else:
xnow = xnew
count = count +1
continue
# next step, then have either broken from the loop or iteration counted out
if count == HowMany:
print(" Iteration Limit Reached ")
print("Function value =",func(xnew))
print(" Root value =",xnew)
print("End of NewtonMethod.py ")
# Now we simply connect the three fragments, and we would have a working Python script that implements Newton's method for the example equation. The example is specific to the particular function provided, but the programmer could move the two functions `func` and `dfdx` into a user specified module, and then load that module in the program to make it even more generic. The next section will use such an approach to illustrate the ability to build a generalized Newton method and <em>only have to program the function itself</em>
# ## Newton's Method --- Using finite-differences to estimate derivatives}
# A practical difficulty in using Newton's method is determining the value of the derivative in cases where differentiation is difficult.
# In these cases we can replace the derivative by a finite difference equation and then proceed as in Newton's method.
#
# Recall from calculus that the derivative was defined as the limit of the difference quotient:
#
# \begin{equation}
# \frac{df}{dx}\vert_{x} = \lim_{\Delta x \rightarrow 0}\frac{f(x + \Delta x) - f(x) }{\Delta x}
# \end{equation}
#
# A good approximation to the derivative should be possible by using this formula with a small, but non-zero value for $\Delta x$.
#
# \begin{equation}
# \frac{df}{dx}\vert_{x} \approx \frac{f(x + \Delta x) - f(x) }{\Delta x}
# \end{equation}
#
# When one replaces the derivative with the difference formula the root finding method the resulting update formula is
#
# \begin{equation}
# x_{k+1}=x_k - \frac{f(x_k) \Delta x}{f(x_k + \Delta x)-f(x_k)}
# \end{equation}
#
# This root-finding method is called a quasi-Newton method.
#
# Here is the code fragment that we change by commenting out the analytical derivative and replacing it with a first-order finite difference approximation of the derivative. The numerical value $1e-06$ is called the step size ($\Delta x$) and should be an input value (rather than built-in to the code as shown here) like the tolerance test values, and be passed to the function as another argument.
# reset the notebook
# %reset
# import built in function for e^x, cosine
from math import exp, cos, sin
# Define the function
def func(x):
func = exp(x) - 10*cos(x) - 100 #using the name as the temp var
return func
def dfdx(x):
# dfdx = exp(x) + 10*sin(x)
dfdx = (func(x + 1e-06) - func(x) )/ (1e-06)
return (dfdx)
# Now for the Newton Method Implementation
# Get initial guess, use a simple error trap
yes=0
while yes == 0:
xnow = input("Enter an initial guess for Newton method \n")
try:
xnow = float(xnow)
yes =1
except:
print ("Value should be numeric, try again \n")
# Get number trials, use a simple error trap
yes=0
while yes == 0:
HowMany = input("Enter iteration maximum \n")
try:
HowMany = int(HowMany)
yes =1
except:
print ("Value should be numeric, try again \n")
# Get stopping criterion
yes=0
while yes == 0:
HowSmall = input("Enter a solution tolerance (e.g. 1e-06) \n")
try:
HowSmall= float(HowSmall)
yes =1
except:
print ("Value should be numeric, try again \n")
# now we begin the process
count = 0
for i in range(0,HowMany,1):
xnew = xnow - func(xnow)/dfdx(xnow)
# stopping criteria -- update not changing
if abs(xnew - xnow) < HowSmall:
print ("Update not changing \n")
print("Function value =",func(xnew))
print(" Root value =",xnew)
break
else:
xnow = xnew
count = count +1
continue
# stopping criteria -- function close to zero
if abs( func(xnew) ) < HowSmall:
print ("Function value close to zero \n")
print("Function value =",func(xnew))
print(" Root value =",xnew)
break
else:
xnow = xnew
count = count +1
continue
# next step, then have either broken from the loop or iteration counted out
if count == HowMany:
print(" Iteration Limit Reached ")
print("Function value =",func(xnew))
print(" Root value =",xnew)
print("End of NewtonMethod.py ")
# Pretty much the same result, but now we dont have to determine the analytical derivative.
| 9-MyJupyterNotebooks/10-NewtonsMethod/NewtonsMethod.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Feature Selection
#
# - 모델을 구성하는 주요 피처들을 선택
# 불 필요한 다수의 피처들로 인해 모델 성능을 떨어뜨릴 가능성 제거
# 설명 가능한 모델이 될 수 있도록 피처들을 선별
#
# - feature selection 유형
# 피처값의 분포, null, 피처 간 높은 상관도, 결정값과의 독립성등을 고려
# 모델의 피처 중요도 기반
#
# # sklearn Feature Selection 지원
#
# - RFE(Recursive Feature Elimination)
# 모델 최초 학습 후 Feature 중요도 선정
# feature 중요도가 낮은 속성들을 차례로 제거해 가면서 반복적으로 학습/평가를 수행하여
# 최적의 feature 추출
# 수행시간이 오래 걸리고, 낮은 속성들을 제거해 나가는 메커니즘이 정확한 Feature Selection을 찾는
# 목표에 정확히 부합하지 않을 수 있음
# - SelectFromModel
# 모델 최초 학습 후 선정된 Feature 중요도에 따라 평균/중앙값의 특정 비율 이상인 feature들을 선택
# +
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV, RFE
from sklearn.datasets import make_classification
# 분류를 위한 feature 개수가 25개인 데이터 1000개 생성
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# SVC classifier 선택
svc = SVC(kernel='linear')
# REFCV로 Feature들을 반복적으로 제거해가면서 학습/평가 수행
# step : 2이면 25->23->21순으로 줄여나가는 개수
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy', verbose=2)
rfecv.fit(X, y)
print('Optimal number of features : %d' % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel('Number of features selected')
plt.ylabel('Cross validation score (nb of correct classifications)')
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
# -
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=10000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# +
# SVC classifier 선택
svc = SVC(kernel='linear')
# REFCV로 Feature들을 반복적으로 제거해가면서 학습/평가 수행
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy', verbose=2)
rfecv.fit(X, y)
print('Optimal number of features:%d' % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel('Number of features selected')
plt.ylabel('Cross validation score (nb of correct classifications)')
plt.plot(range(1, len(rfecv.grid_scores_)+1), rfecv.grid_scores_)
plt.show()
# -
# SelectFromModel
# +
from sklearn.datasets import load_diabetes
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
print(diabetes.DESCR)
# +
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LassoCV
lasso = LassoCV().fit(X, y)
importance = np.abs(lasso.coef_)
feature_names = np.array(diabetes.feature_names)
plt.bar(height=importance, x=feature_names)
plt.title('Feature importances iva coefficients')
plt.show()
# +
from sklearn.feature_selection import SelectFromModel
from time import time
threshold = np.sort(importance)[-3] + 0.01
print('threshold', threshold)
sfm = SelectFromModel(lasso, threshold=threshold).fit(X, y)
print(f'Features selected by SelectFromModel:{feature_names[sfm.get_support()]}')
# -
# # Permutation importance
#
# - 특정 피처들의 값을 완전히 변조했을 때 모델 성능이 얼마나 저하되는지를 기준으로 해당 피처의 중요도를 산정
# - 학습 데이터를 제거하거나/변조하면 다시 재 학습을 수행해야 하므로 수행 시간이 오래 걸림
# - 일반적으로 테스트 데이터(검증 데이터)에 특정 피처들을 반복적으로 변조한 뒤 해당 피처의 중요도를 평균적으로 산정
#
# - Permutation importance 프로세스
# 1. 원본 모델의 기준 평가 성능을 설정.
# 2. 개별 feature 별로 아래 수행.
# 1. 설정된 iteration 값 별로 아래 수행
# a. 해당 feature로 shuffle
# b. 모델 성능 평가
# 2. 기준 평가 성능에서 모델 성능이 얼마나 저하되었는지 평가
#
# +
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Ridge
from sklearn.metrics import r2_score
diabetes = load_diabetes()
X_train, X_val, y_train, y_val = train_test_split(
diabetes.data, diabetes.target, random_state=0)
# train, predict, R2 score evaluation
model = Ridge(alpha=1e-2).fit(X_train, y_train)
y_pred = model.predict(X_val)
print('r2 score:', r2_score(y_val, y_pred))
# +
from sklearn.inspection import permutation_importance
r = permutation_importance(model, X_val, y_val, n_repeats=30, random_state=0)
# 가장 평균 permutation importance가 높은 순으로 내림차순 정렬 후
# 평균 permutation importance 값과 표준 편차 출력
for i in r.importances_mean.argsort()[::-1]:
if r.importances_mean[i] - 2 * r.importances_std[i] > 0:
print(diabetes.feature_names[i], ' ', np.round(r.importances_mean[i], 4),
' +/-', np.round(r.importances_std[i], 5))
# -
# # 왜 feature importance는 절대적인 feature selection 기준이 될 수 없는가?
#
# - Feature importance는 최적 tree 구조를 만들기 위한 피처들의 imputiry가 중요 기준임. 결정 값과 관련이 없어도 feature importance가 높아 질 수 있음.
# - Feature importance는 학습 데이터를 기반으로 생성됨. 테스트 데이터에서는 달라질 수 있음.
# - Feature importnace는 number 형의 높은 cardinality feature에 biased 되어 있음.
#
# +
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_openml
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.inspection import permutation_importance
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
# +
# titanic dataset load
X, y = fetch_openml('titanic', version=1, as_frame=True, return_X_y=True)
rng = np.random.RandomState(seed=42)
# 3가지 값으로 category값 random 설정
X['random_cat'] = rng.randint(3, size=X.shape[0])
# X건수만큼 고유한 random 값 설정
X['random_num'] = rng.randn(X.shape[0])
categorical_columns = ['pclass', 'sex', 'embarked', 'random_cat']
numerical_columns = ['age', 'sibsp', 'parch', 'fare', 'random_num']
X = X[categorical_columns + numerical_columns]
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
# null 값 처리, category 값 encoding
categorical_pipe = Pipeline([
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))
])
numerical_pipe = Pipeline([
('imputer', SimpleImputer(strategy='mean'))
])
preprocessing = ColumnTransformer(
[('cat', categorical_pipe, categorical_columns),
('num', numerical_pipe, numerical_columns)
])
rf = Pipeline([
('preprocess', preprocessing),
('classifier', RandomForestClassifier(random_state=42))
])
rf.fit(X_train, y_train)
# -
print('RF train accuracy: %0.3f' % rf.score(X_train, y_train))
print('RF test accuracy: %0.3f' % rf.score(X_test, y_test))
# +
ohe = (rf.named_steps['preprocess'].named_transformers_['cat'].named_steps['onehot'])
feature_names = ohe.get_feature_names(input_features=categorical_columns)
feature_names = np.r_[feature_names, numerical_columns]
tree_feature_importances = (
rf.named_steps['classifier'].feature_importances_)
sorted_idx = tree_feature_importances.argsort()
y_ticks = np.arange(0, len(feature_names))
fig, ax = plt.subplots()
ax.barh(y_ticks, tree_feature_importances[sorted_idx])
ax.set_yticklabels(feature_names[sorted_idx])
ax.set_yticks(y_ticks)
ax.set_title('random forest feature importances (MDI)')
fig.tight_layout()
plt.show()
# +
result = permutation_importance(rf, X_test, y_test, n_repeats=10, random_state=42, n_jobs=2)
sorted_idx = result.importances_mean.argsort()
fig, ax = plt.subplots()
ax.boxplot(result.importances[sorted_idx].T, vert=False, labels=X_test.columns[sorted_idx])
ax.set_title('Permutation Importances (test set)')
fig.tight_layout()
plt.show()
# +
result = permutation_importance(rf, X_train, y_train, n_repeats=10, random_state=42, n_jobs=2)
sorted_idx = result.importances_mean.argsort()
fig, ax = plt.subplots()
ax.boxplot(result.importances[sorted_idx].T, vert=False, labels=X_train.columns[sorted_idx])
ax.set_title('Permutation importances (train set)')
fig.tight_layout()
plt.show()
| ml/sklearn/Feature selection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/jonra1993/Python-for-Data-Science-IBM/blob/master/Module2_PythonDataStructures.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="EQYyp7XkRV5N" colab_type="text"
# #**Data Structures**
# + [markdown] id="BL5hyfoqRD-l" colab_type="text"
# ## **1. Lists and Tuples**
#
#
# + [markdown] id="jHbwL2Un4mjw" colab_type="text"
# ### **Tuples**
# + [markdown] id="9bZgpCRe1hh1" colab_type="text"
# **Tuples** are ordered sequences. They are inmutable so they can not be changed
# + id="PQwkIKHARUiN" colab_type="code" colab={}
# Example of tuple
ratings = (1, 2, 3, 4, 5)
# + id="D8cbl_PaSGeJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f8c9ab28-a468-4085-9722-d6fdd7c7af81"
# Type of tuples are always tuple
others = ('Hello', 2, 12.1)
print(type(others))
# + id="CEpfFQ-C2Hij" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9b944cec-796d-4b95-cf06-b8ff0be8c010"
# Extarct tuple content
content = ('Hello', 2, 12.1)
print(content[2])
# + [markdown] id="bb-mgxvj2lQV" colab_type="text"
# They can be sliced similar as lists
# + id="P3pzkcq72X81" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bc24ba7c-af1b-4042-b55f-793329646245"
content = ('Hello', 2, 12.1)
print(content[::2])
# + id="Ifa3H09o2skL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2e2e261a-b7de-4d26-caae-cc576f8b8e07"
# lenght of a tuple
content = ('Hello', 2, 12.1)
print(len(content))
# + id="yZW5yGzd23pJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="27eb1109-4973-4430-dc2d-0b5f342fdd96"
# sort a tuple
ratings = (10, 2, 3, 7, 0)
sorted_ratings = sorted(ratings)
print(sorted_ratings)
# + [markdown] id="XECXiIPV3hjw" colab_type="text"
# Tuples can also be nested, so they can contain other tuples.
# + id="o0PgR8Mh3btl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="792f8108-6e40-4d17-c6bb-d04b038a12ae"
# Nesting
ratings = (10, (2, 'Hi', 2.1, (2, 2.21)), 0)
print('tuple: ', ratings)
print('index: ', ratings[1][3][1])
# + [markdown] colab_type="text" id="8v4hU5314etn"
# ### **Lists**
# + [markdown] colab_type="text" id="CY_u0_dR44FP"
# **Lists** are ordered sequences. They are mutable so they can be changed
# + id="_RuImr2h3q3h" colab_type="code" colab={}
# Example of list
ratings = [1, 2, 3, 4, 5]
# + id="NNWXy-L05DED" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a7113a0e-d976-4697-abb0-43fca2ab3c6e"
# Type of List are always list
others = ['Hello', 2, 12.1]
print(type(others))
# + id="nB-siu8o5Is6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="f9598b0c-7a01-4da2-c8cc-d4d5cf811a2a"
# Nesting
ratings = [10, (2, 'Hi', 2.1, [2, 2.21]), 0]
print('tuple: ', ratings)
print('index: ', ratings[1][3][1])
# + id="KkvsDTOR5TCX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a0df09b9-38c0-489f-91bc-66e7bb778a32"
# Slicing
ratings = [10, (2, 'Hi', 2.1, [2, 2.21]), 0]
print('slices: ', ratings[::2])
# + id="xQrGpqhl5qtO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f312f39c-d2f9-4917-99ad-0c228259e606"
# Extend, add new elements to list
ratings = [10, (2, 'Hi', 2.1, [2, 2.21]), 0]
ratings.extend(['pop',2])
print(ratings)
# + id="Zfqz8OMh7EHS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="44d5350b-27ea-4bd0-9ec6-7892cd88fd60"
# Append, add just a new element to list
ratings = [10, (2, 'Hi', 2.1, [2, 2.21]), 0]
ratings.append(['pop',2])
print(ratings)
# + id="5SVV4BzK7fMi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e1fcf813-355f-401f-99a3-69deb01a2b3d"
# Change elements
ratings = [10, (2, 'Hi', 2.1, [2, 2.21]), 0]
ratings[0]= 'Jonathan'
print(ratings)
# + id="_gKPqXNC7uhT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8810bae1-df24-4e35-9677-1b9a3059773c"
# Delete elements
ratings = [10, (2, 'Hi', 2.1, [2, 2.21]), 0]
del(ratings[0])
del(ratings[-1])
print(ratings)
# + id="90q1duwS77i0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="0fdddbb6-02b0-4a5c-8f4e-dbfaeb950fef"
# Convert string into list
name = 'Jonathan'
name_list = name.split()
print(name_list)
print(type(name_list))
# + id="xIIomnP-8R8y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1b9a825f-7a28-4537-8efe-7278cf3bd092"
# Separate string using delimiter
name = 'J,o,n,a,t,h,a,n'
name_list = name.split(',')
print(name_list)
# + id="BjJpmYaf8e_Z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="286ad061-206d-45d6-cadf-e74921c6bd99"
# Aliasing (name and other_name are referencing to same object)
name = ['Jonathan', 1, 3]
other_name = name
other_name[0] = 'yo'
print(name)
# + id="tcYK_aBZ8xTN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7b921ff2-e2ea-4519-cf3c-ea50fbb988eb"
# Clone list
name = ['Jonathan', 1, 3]
other_name = name[:]
new_name = name.copy()
new_name[0] = 'h'
other_name[0] = 'yo'
print(name)
# + id="Ka1wXdPt9Jed" colab_type="code" colab={}
help(name)
# + [markdown] id="njdu541L_DIT" colab_type="text"
# ## **2. Sets**
# + [markdown] id="5iOp5ZESBeA_" colab_type="text"
# They are a type of collection. Unlike tuples and lists, they are unordered; so they do not record element position.
#
# They only have unique elements
#
# + id="PMOsf0sd_Fp8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e2449c98-a270-4ed8-e110-941a097c4f98"
# Create a set
set1 = {'rock', 'is', 'music', 'is', 'is'}
print(set1)
# + id="7m1UiaaNCAeU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="5a3c73fd-7536-437c-a9f0-9793f9f3370b"
# Create a set from list
set1 = set(['rock', 'is', 'music', 'is', 'is'])
set2 = set(('rock', 'is', 'music', 'is', 'is'))
print('set from list:', set1)
print('set from tuple:', set2)
print(type(set1))
# + [markdown] id="CT8A6tXyT_2P" colab_type="text"
# Set Operations
# + id="TdDwyf0FCWGJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="b4d0b6c4-8cac-4e0c-af9f-57a2f754d7fc"
# Add items
set1 = {'rock', 'is', 'music'}
set1.add('house')
print('set from list:', set1)
print(type(set1))
# + id="7_y7ICc0UODQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="027f8aba-85c2-4443-a6cc-bb94191a521f"
# Remove items
set1.remove('house')
print('set from list:', set1)
print(type(set1))
# + id="-iga_NorUcV1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="6cff3af5-eb41-4f15-dbcf-b8f887b9695f"
# Check if items is in set
print('rock' in set1)
print('house' in set1)
# + id="jDnCtrZbUxSH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0fbd67a6-1756-4309-dd47-16b4e75e19e6"
# Intersections of sets
set1 = {'rock', 'is', 'music'}
set2 = {'ro', 'is', 'mu'}
print(set1 & set2)
# + id="wv8WDQ0JXEMy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ed77e74f-bf6a-4764-804c-dc594235a60b"
# Difference of sets
set1 = {'rock', 'is', 'music'}
set2 = {'ro', 'is', 'mu'}
print(set1.difference(set2))
# + id="Wejc_nlzVZ-z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ae40227c-9777-4592-b50f-72952f4f94bb"
# Union of sets
set1 = {'rock', 'is', 'music'}
set2 = {'ro', 'is', 'mu'}
print(set1.union(set2))
# + id="fsw5-aw-VwMs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="aacd13d9-a11f-47ad-b06c-0b9674e0df90"
# Subset items
set1 = {'rock', 'is', 'music'}
set2 = {'rock', 'is'}
print(set2.issubset(set1))
# + id="-9VVv8nvXebM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a7cbe867-2162-4938-b99e-5d47a271674f"
# Superset items
set1 = {'rock', 'is', 'music'}
set2 = {'rock', 'is'}
print(set1.issuperset(set2))
# + [markdown] id="iAb-EZ7nWxHY" colab_type="text"
# ## **3. Dictionaries**
# + [markdown] id="E2Q-Aw5cYps7" colab_type="text"
# They are a type fo collection. They hace keys and values. The key is analogous to index
# + id="qS3wLuGWW00n" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="a40f231d-7707-44c0-96ca-4dd8b91d787d"
# Create a dictionary
dict1 = {'rock': 2,
'electronic': 3,
'regueton': 'music'}
print(dict1)
print(type(dict1))
# + id="uQZ7syg3ZUoZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="dc061e6f-1ba5-442f-b74b-40370b1ceea0"
# Extract data from a dictionary
dict1['regueton']
# + id="HFepkYC7ZsSI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9290ad53-4cbe-4808-e2d7-c84af6aa60d2"
# Add new entry
dict1['pop'] ='Selena'
print(dict1)
# + id="SdY4PFX8aFG5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="a5c10476-0cc5-4688-dcab-1c96d1ea4cd3"
# Remove entry
del(dict1['pop'])
print(dict1)
print('pop' in dict1)
# + id="IFb4N5_6aPtI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="2fad641f-221a-4ade-b331-4a1339dbc7ea"
print('keys: ', dict1.keys())
print('values: ', dict1.values())
# + id="vG1n1RITa7KT" colab_type="code" colab={}
Dict={"A":1,"B":"2","C":[3,3,3],"D":(4,4,4),'E':5,'F':6}
| Python for Data Science/Module2_PythonDataStructures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# <a href="https://colab.research.google.com/github/aviadr1/learn-advanced-python/blob/master/content/04_writing_our_own_container_types/exercise/questions.ipynb" target="_blank">
# <img src="https://colab.research.google.com/assets/colab-badge.svg"
# title="Open this file in Google Colab" alt="Colab"/>
# </a>
#
# # supporting slices in MyRange class
#
# note that the MyRange class in lecture 04 does not support slices
#
# ```
# range10 = MyRange(10)
# range10[::2]
# ---------------------------------------------------------------------------
# TypeError Traceback (most recent call last)
# <ipython-input-5-2886954a11eb> in <module>
# 8
# 9 range10 = MyRange(0, 10)
# ---> 10 list(range10[::2])
#
# <ipython-input-2-25399ac18a9d> in __getitem__(self, offset)
# 13
# 14 def __getitem__(self, offset):
# ---> 15 if self.__n <= offset:
# 16 raise IndexError('range object index out of range')
# 17
#
# TypeError: '<=' not supported between instances of 'int' and 'slice'
# ```
#
# create a new class `MyRange2` that inherits from MyRange and adds support of slices
#
# HINT: use the class `islice` from the module `itertools`
#
# expected output:
#
# >>> range10 = MyRange2(0, 10)
# >>> list(range10[::2])
# [0, 2, 4, 6, 8]
#
### useful starting point
import collections.abc
import math
class MyRange(collections.abc.Sequence):
def __init__(self, start, stop, step=1):
self.__start = start
self.__stop = stop
self.__step = step
self.__n = max(0, math.ceil((stop-start) / step))
super().__init__()
def __len__(self):
return self.__n
def __getitem__(self, offset):
if self.__n <= offset:
raise IndexError('range object index out of range')
return self.__start + offset * self.__step
def __repr__(self):
return f"{type(self).__name__}({self.__start},{self.__stop},{self.__step})"
# # 3x3 matrix
#
# Write a class called Matrix that represents a simple 3x3 matrix.
#
# namely it should support the following operations:
# you do not need to derive from any ABC
#
# ```
# >>> m = Matrix()
# >>> print(m)
# [0, 0, 0]
# [0, 0, 0]
# [0, 0, 0]
#
# >>> m[0,0] = 10
# >>> print(m)
# [10, 0, 0]
# [0, 0, 0]
# [0, 0, 0]
#
# >>> m[1,1] = 100
# >>> m[1,1]
# 100
# ```
#
#
#
#
#
# ```{toctree}
# :hidden:
# :titlesonly:
#
#
# solutions
# ```
#
| content/_build/jupyter_execute/04_writing_our_own_container_types/exercise/questions.ipynb |
# +
# Plot resisudal from 1d linear regression
# Based on https://github.com/probml/pmtk3/blob/master/demos/linregResiduals.m
import numpy as np
import matplotlib.pyplot as plt
import os
try:
import probml_utils as pml
except ModuleNotFoundError:
# %pip install git+https://github.com/probml/probml-utils.git
import probml_utils as pml
np.random.seed(0)
N = 21
x = np.linspace(0.0, 20, N)
X0 = x.reshape(N, 1)
X = np.c_[np.ones((N, 1)), X0]
w = np.array([-1.5, 1 / 9.0])
y = w[0] * x + w[1] * np.square(x)
y = y + np.random.normal(0, 1, N) * 2
w = np.linalg.lstsq(X, y, rcond=None)[0]
# print(w)
y_estim = np.dot(X, w)
plt.plot(X[:, 1], y, "o")
plt.plot(X[:, 1], y_estim, "-")
pml.savefig("linregResidualsNoBars.pdf")
plt.show()
for x0, y0, y_hat in zip(X[:, 1], y, y_estim):
plt.plot([x0, x0], [y0, y_hat], "k-")
plt.plot(X[:, 1], y, "o")
plt.plot(X[:, 1], y_estim, "-")
plt.plot(X[:, 1], y_estim, "x", color="r", markersize=12)
pml.savefig("linregResidualsBars.pdf")
plt.show()
| notebooks/book1/01/linreg_residuals_plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import birdwatcher as bw
from birdwatcher.plotting import imshow_frame
from birdwatcher.frameprocessing import FrameIterator
from birdwatcher.ffmpeg import arraytovideo
import matplotlib.pyplot as plt
# %matplotlib inline
vf = bw.testvideosmall()
# Detect movement with knn algorithm. It returns 3 arrays:
# - cd, coordinate data: which pixels were above threshold in each frame
# - cc, coordinate count: sum of pixels that were above threshold in each frame
# - cm, coordinate mean: the spatial mean of pixels thatwere above threshold in each frame
#
# These arrays are disk-based. I.e. they are written to file in the same directory, and can be accessed directly using the library Darr.
cd, cc, cm = bw.detect_movementknn(vf.filepath, ignore_firstnframes=60, History=5)
# The darr arrays can be indexed, which will return a numpy array
plt.plot(cc[:])
plt.title('number of pixels above treshold')
plt.xlabel('framenumber')
plt.ylabel('number of pixels')
plt.plot(cm[:])
plt.title('coordinates of pixels above treshold')
plt.xlabel('x coordinate')
plt.ylabel('y coordinate')
plt.legend(['left-right', 'top-bottom'])
# The situation for the raw coordinate data is more complex. You first index the frame you want, which returns an array of pixel coordinates. The number of pixels is variable.
cd_200 = cd[200]
print(cd_200)
# If you want to image the pixels, this can be done conveniently as so
frame = cd.get_frame(215)
imshow_frame(frame)
# let's look at the original frame
imshow_frame(vf.get_framebynumber(200))
| notebooks/movementdetection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Show, Don't Tell: Image Search
# *<NAME>*
#
# In this notebook I will demonstrate image search. By this I mean I will write an algorithm that accepts an image as input and will return a list of "similar" images as output.
#
# Search, in this case, means finding items that are "similar" some some specified item. In practice "similarity" needs to be defined, and different definitions may be useful for different applications. In this notebook, I define "similar" to mean that the color distribution of the pixels in the images are similar. In fact, I will refine the definition and say that similar images have pixels of similar color distributions in common regions of the image.
#
# I have a collection of images through which my algorithm will search. The algorithm, when given an image, will compute how similar each image in the collection is to the input image. This may not be the most efficient approach to search; my objective is not efficiency but to demonstrate how we can find "similar" images.
#
# We also need a way to describe the color distribution of an image. Here I bin RGB values (viewed as separate and independent channels) and use the discretized distributions to describe the colors in the image. When we have two of these distributions for an image, we then compute a metric known as the $\chi^2$-distance:
#
# $$\sum_{k = 1}^K \frac{(x_k - y_k)^2}{x_k + y_k}$$
#
# where $K$ is the number of bins, $x_k$ is the (normalized) count of the $k^{\text{th}}$ bin of one image, and $y_k$ is the equivalent number for the other image.
#
# An image is divided up into a 3x3 grid and these $\chi^2$-distances are computed for each cell of the resulting grid. The sum of these distances (across all cells) is then used as the numeric descriptor of the similarity of the two images.
#
# Our first task is to load in the image data.
import cv2
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import sys, os
import pandas as pd
# %matplotlib inline
matplotlib.rcParams['figure.figsize'] = (18, 16)
im_dir = "images"
im0 = cv2.cvtColor(cv2.imread(im_dir + "/ocean0.jpg"), cv2.COLOR_BGR2RGB)
plt.imshow(im0)
im1 = cv2.cvtColor(cv2.imread(im_dir + "/ocean1.jpg"), cv2.COLOR_BGR2RGB)
plt.imshow(im1)
# Let's now prepare tools for creating color histograms.
# +
def col_256_bins(bincount = 10):
"""Returns a NumPy array with bincount number of bins that can be used to define histogram bins"""
delta = 256/bincount
return np.array(np.arange(bincount + 1) * delta, dtype = np.uint16)
col_256_bins()
# -
np.histogram(im0[:, :, 0].flatten(), bins=col_256_bins())
np.histogram(im0[:, :, 0].flatten(), bins=col_256_bins(), density=True)
plt.hist(im0[:, :, 0].flatten(), bins=col_256_bins())
# Red
plt.hist(im0[:, :, 1].flatten(), bins=col_256_bins()) # Green
plt.hist(im0[:, :, 2].flatten(), bins=col_256_bins()) # Blue
# Now I write tools that compute a distance between two images based on the histograms computed from the images.
def chisq_dist(x, y):
"""Compute chi-square distance between histograms x and y"""
binscore = (x[0] - y[0])**2 / (x[0] + y[0]) # Putting [0] since np.histogram returns tuples
return np.nansum(binscore)
histbins = col_256_bins()
chisq_dist(np.histogram(im0[0:266, 0:400, 0].flatten(), histbins, density=True),
np.histogram(im1[0:180, 0:320, 0].flatten(), histbins, density=True))
def image_dist(x, y, bins=col_256_bins()):
"""Compute the "distance" between images x and y"""
hx, wx, _ = x.shape
hy, wy, _ = y.shape
div = 3 # Number of divisions; a div x div grid
dist = 0 # Eventual distance measure
# Iterate through the grid
for i in range(div):
for j in range(div):
hdim_x = (int((hx / div) * i), int((hx / div) * (i + 1)))
wdim_x = (int((wx / div) * j), int((wx / div) * (j + 1)))
hdim_y = (int((hy / div) * i), int((hy / div) * (i + 1)))
wdim_y = (int((wy / div) * j), int((wy / div) * (j + 1)))
subimage_x = x[hdim_x[0]:hdim_x[1], wdim_x[0]:wdim_x[1], :]
subimage_y = y[hdim_y[0]:hdim_y[1], wdim_y[0]:wdim_y[1], :]
# Iterate through dimensions
for d in range(3):
chan_x = subimage_x[:, :, d].flatten()
chan_y = subimage_y[:, :, d].flatten()
hist_x = np.histogram(chan_x, bins, density=True)[0]
hist_y = np.histogram(chan_y, bins, density=True)[0]
dist += chisq_dist(hist_x, hist_y)
return(dist)
image_dist(im0, im1)
# Let's now prepare to search images in folders to find "similar" images to an input image.
imfiles = os.listdir("images/")
imfiles
# +
image_list = [cv2.cvtColor(cv2.imread(im_dir + "/" + i), cv2.COLOR_BGR2RGB) for i in imfiles]
image_dict = dict(zip(imfiles, image_list))
def image_dist_list(image, imlist):
dists = np.zeros(len(imlist))
for i in range(len(imlist)):
dists[i] = image_dist(image, imlist[i])
return dists
# -
# Let's test the algorithm.
im0_scores = pd.Series(image_dist_list(im0, image_list), index=imfiles)
im0_scores
im0_scores.sort_values()
plt.imshow(image_dict['ocean0.jpg'])
plt.imshow(image_dict['ocean3.jpg'])
plt.imshow(image_dict['ocean4.jpg'])
plt.imshow(image_dict['city1.jpg'])
plt.imshow(image_dict['city7.jpeg'])
plt.imshow(image_dict['forest2.jpeg'])
plt.imshow(image_dict['forest8.jpg'])
# Based on this test it looks like our algorithm is doing a fair job of finding "similar" images according to our criteria. Two ocean images were matched, along with some "similar" city scapes; after that the images don't seem to bear a strong resemblance.
#
# This system could of course be improved, but this should give the basic idea of what is involved in an image search system, and it seems this simple approach already produces decent results.
| ImageSearch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Utils
#
# Common utilities for data loading and preparation
#
# +
import numpy as np
import pandas as pd
import scipy.io as sio
import os
from scipy.sparse import coo_matrix
from collections import namedtuple
import scipy.sparse as sp
SparseMatrix = namedtuple("SparseMatrix", "indices values dense_shape")
def GetInput(mat, lab, batch=1, grafi=None):
"""grafi is vector with same cardinaluty of nodes, denoting to which graph
belongs each node
"""
# numero di batch
batch_number = grafi.max() // batch # if only one graph => grafi.max() is 0 => batch_number == 0
# dataframe containing adjacency matrix
dmat = pd.DataFrame(mat, columns=["id_1", "id_2"])
# dataframe containing labels each node
dlab = pd.DataFrame(lab, columns=["lab" + str(i) for i in range(0, lab.shape[1])])
# darch=pd.DataFrame(arc, columns=["arch"+str(i) for i in range(0,arc.shape[1])])
# dataframe denoting graph belonging each node
dgr = pd.DataFrame(grafi, columns=["graph"])
# creating input : id_p, id_c, label_p, label_c, graph_belong
dresult = dmat
dresult = pd.merge(dresult, dlab, left_on="id_1", right_index=True, how='left')
dresult = pd.merge(dresult, dlab, left_on="id_2", right_index=True, how='left')
# dresult=pd.concat([dresult, darch], axis=1)
dresult = pd.merge(dresult, dgr, left_on="id_1", right_index=True, how='left')
data_batch = []
arcnode_batch = []
nodegraph_batch = []
node_in = []
# creating batch data => for each batch, redefining the id so that they start from 0 index
for i in range(0, batch_number + 1):
# getting minimum index of the current batch
grafo_indexMin = (i * batch)
grafo_indexMax = (i * batch) + batch
adj = dresult.loc[(dresult["graph"] >= grafo_indexMin) & (dresult["graph"] < grafo_indexMax)]
min_id = adj[["id_1", "id_2"]].min(axis=0).min()
#start from 0 index for the new batch
adj["id_1"] = adj["id_1"] - min_id
adj["id_2"] = adj["id_2"] - min_id
min_gr = adj["graph"].min()
adj["graph"] = adj["graph"] - min_gr
# append values to batches : id_2, lab0_1, lab1_1, lab0_2, lab1_2 (excluded first and last - id_p and graph_id)
data_batch.append(adj.values[:, :-1])
# arcMat creation
# max_id of nodes in the current batch
max_id = int(adj[["id_1", "id_2"]].max(axis=0).max())
max_gr = int(adj["graph"].max())
# getting ids of nodes (p and c)
mt = adj[["id_1", "id_2"]].values
# arcnode matrix : first shape same as arcs, second same as nodes in the batch
arcnode = np.zeros((mt.shape[0], max_id + 1))
# arcnode: state of parent node = sum (h(state of all the neighbors ,..) (of the parent node)
# => sum contributes of all the arcs involving the parent
# in j-th arc (row) => put one in the position corresponding to the parent node's column
# => found in the adjacnecy matrix in j-th row, 1 st position
# for j in range(0, mt.shape[0]):
# arcnode[j][mt[j][0]] = 1
arcnode = SparseMatrix(indices=np.stack((mt[:, 0], np.arange(len(mt))), axis=1), values=np.ones([len(mt)]),
dense_shape=[max_id + 1, len(mt)])
arcnode_batch.append(arcnode)
# nodegraph
nodegraph = np.zeros((max_id + 1, max_gr + 1))
for t in range(0, max_id + 1):
val = adj[["graph"]].loc[(adj["id_1"] == t) | (adj["id_2"] == t)].values[0]
nodegraph[t][val] = 1
nodegraph_batch.append(nodegraph)
# node number in each graph
grbtc = dgr.loc[(dgr["graph"] >= grafo_indexMin) & (dgr["graph"] < grafo_indexMax)]
#counting number nodes in current batch
node_in.append(grbtc.groupby(["graph"]).size().values)
return data_batch, arcnode_batch, nodegraph_batch, node_in
def set_load_subgraph(data_path, set_type):
# load adjacency list
types = ["train", "valid", "test"]
try:
if set_type not in types:
raise NameError('Wrong set name!')
# load adjacency list
mat = sio.loadmat(os.path.join(data_path, 'conmat{}.mat'.format(set_type)))
# load adiacenyc matrixc in sparse format
adj = coo_matrix(mat["conmat_{}set".format(set_type)].T)
adj = np.array([adj.row, adj.col]).T
# load node label
mat = sio.loadmat(os.path.join(data_path, "nodelab{}.mat".format(set_type)))
lab = np.asarray(mat["nodelab_{}set".format(set_type)]).T
# load target and convert to one-hot encoding
mat = sio.loadmat(os.path.join(data_path, "tar{}.mat".format(set_type)))
target = np.asarray(mat["target_{}set".format(set_type)]).T
# one-hot encoding of targets
labels = pd.get_dummies(pd.Series(target.reshape(-1)))
labels = labels.values
# compute inputs and arcnode
inp, arcnode, nodegraph, nodein = GetInput(adj, lab, 1, np.zeros(len(labels), dtype=int)) # last argument: graph to which each node belongs
return inp, arcnode, nodegraph, nodein, labels, lab
except Exception as e:
print("Caught exception: ", e)
exit(1)
def set_load_clique(data_path, set_type):
import load as ld
# load adjacency list
types = ["train", "validation", "test"]
train = ld.loadmat(os.path.join(data_path, "cliquedataset.mat"))
train = train["dataSet"]
try:
if set_type not in types:
raise NameError('Wrong set name!')
# load adjacency list
# take adjacency list
adj = coo_matrix(train['{}Set'.format(set_type)]['connMatrix'].T)
adj = np.array([adj.row, adj.col]).T
# take node labels
lab = np.asarray(train['{}Set'.format(set_type)]['nodeLabels']).T
# take targets and convert to one-hot encoding
target = np.asarray(train['{}Set'.format(set_type)]['targets']).T
labels = pd.get_dummies(pd.Series(target))
labels = labels.values
# compute inputs and arcnode
get_lab = lab.reshape(lab.shape[0], 1) if set_type == "train" else lab.reshape(len(labels), 1)
inp, arcnode, nodegraph, nodein = GetInput(adj, get_lab, 1,
np.zeros(len(labels), dtype=int))
return inp, arcnode, nodegraph, nodein, labels
except Exception as e:
print("Caught exception: ", e)
exit(1)
def set_load_mutag(set_type, train):
# load adjacency list
types = ["train", "validation", "test"]
try:
if set_type not in types:
raise NameError('Wrong set name!')
############ training set #############
# take adjacency list
adj = coo_matrix(train['{}Set'.format(set_type)]['connMatrix'])
adj = np.array([adj.row, adj.col]).T
# take node labels
lab = np.asarray(train['{}Set'.format(set_type)]['nodeLabels']).T
mask = coo_matrix(train['{}Set'.format(set_type)]["maskMatrix"])
# take target, generate output for each graph, and convert to one-hot encoding
target = np.asarray(train['{}Set'.format(set_type)]['targets']).T
v = mask.col
target = np.asarray([target[x] for x in v])
# target = target[target != 0] # equivalent code
labels = pd.get_dummies(pd.Series(target))
labels = labels.values
# build graph indices
gr = np.array(mask.col)
indicator = []
for j in range(0, len(gr) - 1):
for i in range(gr[j], gr[j + 1]):
indicator.append(j)
for i in range(gr[-1], adj.max() + 1):
indicator.append(len(gr) - 1)
indicator = np.asarray(indicator)
# take input, arcnode matrix, nodegraph matrix
inp, arcnode, nodegraph, nodein = GetInput(adj, lab, indicator.max() + 1, indicator)
return inp, arcnode, nodegraph, nodein, labels
except Exception as e:
print("Caught exception: ", e)
exit(1)
def set_load_general(data_path, set_type, set_name="sub_30_15"):
import load as ld
# load adjacency list
types = ["train", "validation", "test"]
train = ld.loadmat(os.path.join(data_path, "{}.mat".format(set_name)))
train = train["dataSet"]
try:
if set_type not in types:
raise NameError('Wrong set name!')
# load adjacency list
# take adjacency list
adj = coo_matrix(train['{}Set'.format(set_type)]['connMatrix'].T)
adj = np.array([adj.row, adj.col]).T
# take node labels
lab = np.asarray(train['{}Set'.format(set_type)]['nodeLabels']).T
# if clique (labels with only one dimension
if len(lab.shape) < 2:
lab = lab.reshape(lab.shape[0], 1)
# take targets and convert to one-hot encoding
target = np.asarray(train['{}Set'.format(set_type)]['targets']).T
labels = pd.get_dummies(pd.Series(target))
labels = labels.values
# compute inputs and arcnode
inp, arcnode, nodegraph, nodein = GetInput(adj, lab, 1,
np.zeros(len(labels), dtype=int))
return inp, arcnode, nodegraph, nodein, labels, lab
except Exception as e:
print("Caught exception: ", e)
exit(1)
def load_karate(path="data/karate-club/"):
"""Load karate club dataset"""
print('Loading karate club dataset...')
edges = np.loadtxt("{}edges.txt".format(path), dtype=np.int32) - 1 # 0-based indexing
edges = edges[np.lexsort((edges[:, 1], edges[:, 0]))] # reorder list of edges also by second column
features = sp.eye(np.max(edges+1), dtype=np.float32).tocsr()
idx_labels = np.loadtxt("{}mod-based-clusters.txt".format(path), dtype=np.int32)
idx_labels = idx_labels[idx_labels[:, 0].argsort()]
labels = np.eye(max(idx_labels[:, 1])+1, dtype=np.int32)[idx_labels[:, 1]] # one-hot encoding of labels
E = np.concatenate((edges, np.zeros((len(edges), 1), dtype=np.int32)), axis=1)
N = np.concatenate((features.toarray(), np.zeros((features.shape[0], 1), dtype=np.int32)), axis=1)
return E, N, labels,
def from_EN_to_GNN(E, N):
"""
:param E: # E matrix - matrix of edges : [[id_p, id_c, graph_id],...]
:param N: # N matrix - [node_features, graph_id (to which the node belongs)]
:return: # L matrix - list of graph targets [tar_g_1, tar_g_2, ...]
"""
N_full = N
N = N[:, :-1] # avoid graph_id
e = E[:, :2] # take only first tow columns => id_p, id_c
feat_temp = np.take(N, e, axis=0) # take id_p and id_c => (n_archs, 2, label_dim)
feat = np.reshape(feat_temp, [len(E), -1]) # (n_archs, 2*label_dim) => [[label_p, label_c], ...]
# creating input for gnn => [id_p, id_c, label_p, label_c]
inp = np.concatenate((E[:, 1:2], feat), axis=1)
# creating arcnode matrix, but transposed
"""
1 1 0 0 0 0 0
0 0 1 1 0 0 0
0 0 0 0 1 1 1
""" # for the indices where to insert the ones, stack the id_p and the column id (single 1 for column)
arcnode = SparseMatrix(indices=np.stack((E[:, 0], np.arange(len(E))), axis=1),
values=np.ones([len(E)]).astype(np.float32),
dense_shape=[len(N), len(E)])
# get the number of graphs => from the graph_id
num_graphs = int(max(N_full[:, -1]) + 1)
# get all graph_ids
g_ids = N_full[:, -1]
g_ids = g_ids.astype(np.int32)
# creating graphnode matrix => create identity matrix get row corresponding to id of the graph
# graphnode = np.take(np.eye(num_graphs), g_ids, axis=0).T
# substitued with same code as before
graphnode = SparseMatrix(indices=np.stack((g_ids, np.arange(len(g_ids))), axis=1),
values=np.ones([len(g_ids)]).astype(np.float32),
dense_shape=[num_graphs, len(N)])
# print(graphnode.shape)
return inp, arcnode, graphnode
| _downloads/84c3cca87362258f9c9281fcdcbd6837/gnn_utils.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # At the start of this video
# 1. We were able to use threading in our Python code.
from threading import Thread, Lock
thread_lock = Lock()
# +
# Not a good practise to use global variables
my_global_string = "Hello World"
def add_prefix(prefix_to_add):
# Adds suffix to global string
global my_global_string
# Acquire the lock over the data shared between threads
thread_lock.acquire()
# Perform operation on shared data
my_global_string = prefix_to_add + " " + my_global_string
# Release the lock
thread_lock.release()
# -
def add_suffix(suffix_to_add):
# Adds suffix to global string
global my_global_string
# Acquire the lock over the data shared between threads
thread_lock.acquire()
# Perform operation on shared data
my_global_string = my_global_string + " " + suffix_to_add
# Release the lock
thread_lock.release()
def do_threading():
thread_prefix = Thread(target=add_prefix, args=("YOLO",))
thread_suffix = Thread(target=add_suffix, args=("BYE!!",))
thread_prefix.start()
thread_suffix.start()
thread_prefix.join()
thread_suffix.join()
global my_global_string
print("Final string is {}".format(my_global_string))
do_threading()
# # Re-entrant Lock
from threading import RLock
my_re_entrant_lock = RLock()
# +
my_re_entrant_lock.acquire()
my_global_string = "yolo swag"
# If this was a conventional Lock, then this would a blocking call for the thread
# Even though the same thread is trying to acess it again.
my_re_entrant_lock.acquire()
my_global_string += " ok bye !"
my_re_entrant_lock.release()
my_re_entrant_lock.release()
# -
# # By the end of this video
# 1. We will be able to use various types of locks for threading in Python.
# 2. We will understand how the different locks work.
# 3. We will be able to implement Re-entrant locks in Python.
| Section 2/notebooks/2. 2 Using Locks with Threads.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit ('3.8.5')
# name: python385jvsc74a57bd038b4776f74ef7846326f679ebb3c9e296e2ab09fe7ffa3e73b643f9a7922e0a6
# ---
# +
import pandas as pd
import seaborn as sns
url = 'https://gist.githubusercontent.com/guilhermesilveira/1b7d5475863c15f484ac495bd70975cf/raw/16aff7a0aee67e7c100a2a48b676a2d2d142f646/projects.csv'
data = pd.read_csv(url)
change_unfinished_to_finished = {
1: 0,
0: 1
}
data['finished'] = data.unfinished.map(change_unfinished_to_finished)
x = data[['expected_hours', 'price']]
y = data['finished']
sns.scatterplot(x='expected_hours', y='price', hue='finished', data=data)
# +
import numpy as np
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
SEED = 5
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, stratify=y, random_state=SEED)
print('Training with %d items & testing with %d items' % (len(x_train), len(x_test)))
model = LinearSVC(dual=False)
model.fit(x_train, np.ravel(y_train))
predictions = model.predict(x_test)
accuracy = accuracy_score(y_test, predictions) * 100
print('Accuracy is %.2f%%' % accuracy)
# -
sns.scatterplot(x="expected_hours", y="price", hue=y_test, data=x_test)
# +
x_min = x_test.expected_hours.min()
x_max = x_test.expected_hours.max()
y_min = x_test.price.min()
y_max = x_test.price.max()
pixels = 100
x_axis = np.arange(x_min, x_max, (x_max - x_min) / pixels)
y_axis = np.arange(y_min, y_max, (y_max - y_min) / pixels)
xx, yy = np.meshgrid(x_axis, y_axis)
points = np.c_[xx.ravel(), yy.ravel()]
# +
import matplotlib.pyplot as plt
Z = model.predict(points)
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, alpha=0.3)
plt.scatter(x_test.expected_hours, x_test.price, c=y_test, s=1)
# +
from sklearn.svm import SVC
SEED = 5
np.random.seed(SEED)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, stratify=y, random_state=SEED)
print('Training with %d items & testing with %d items' % (len(x_train), len(x_test)))
model = SVC()
model.fit(x_train, np.ravel(y_train))
predictions = model.predict(x_test)
accuracy = accuracy_score(y_test, predictions) * 100
print('Accuracy is %.2f%%' % accuracy)
# +
from sklearn.preprocessing import StandardScaler
SEED = 5
np.random.seed(SEED)
raw_x_train, raw_x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, stratify=y, random_state=SEED)
print('Training with %d items & testing with %d items' % (len(x_train), len(x_test)))
scaler = StandardScaler()
scaler.fit(raw_x_train)
x_train = scaler.transform(raw_x_train)
x_test = scaler.transform(raw_x_test)
model = SVC()
model.fit(x_train, np.ravel(y_train))
predictions = model.predict(x_test)
accuracy = accuracy_score(y_test, predictions) * 100
print('Accuracy is %.2f%%' % accuracy)
# +
x_data = x_test[:,0]
y_data = x_test[:,1]
x_min = x_data.min()
x_max = x_data.max()
y_min = y_data.min()
y_max = y_data.max()
pixels = 100
x_axis = np.arange(x_min, x_max, (x_max - x_min) / pixels)
y_axis = np.arange(y_min, y_max, (y_max - y_min) / pixels)
xx, yy = np.meshgrid(x_axis, y_axis)
points = np.c_[xx.ravel(), yy.ravel()]
Z = model.predict(points)
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, alpha=0.3)
plt.scatter(x_data, y_data, c=y_test, s=1)
| site_projects.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This is for reproducing Fig.3 in Rep. Prog. Phys. 76 (2013) 036502.
# %matplotlib inline
import sys
import numpy as np
import math
import matplotlib.pyplot as plt
#
np.set_printoptions(precision=3)
#Mathematical constants
pi = np.pi
tpi = 2.0*pi
fpi = 4.0*pi
#Physical constants
sol = 137.0 #speed of light
aB = 0.05292 #nanometer
Hartree = 27.21 #eV
Atomtime = 0.02419 #fs
Atomfield = Hartree/aB #V/nm
Atomvolume = aB**3 #nm^3
ch = 1240.0 #eV * nm
chbar = 197.3 # eV * nm
halfepsc = 3.509e16 # W/cm^2 \frac{1}{2}*\epsilon_0 * c
Atomfluence = halfepsc*Atomtime*1.0e-15 # J/cm^2 ,W/cm^2 * fs = femto J/cm^2
from Keldysh_funcs.functions import *
# +
#Default value
delta = 6.50 #gap in eV
m = 0.50 #effective mass
omega = 1.55 #photon energy in eV
#Unit conversion
delta = delta/Hartree #Convert to atomic unit
omega = omega/Hartree #Convert to atomic unit
print ('++++++++')
print ('Material properties')
print ('Effective mass: m ='+str(m)+' [electron mass]')
print ('Band gap: delta ='+str(delta)+' [a.u.] ='+str(delta*Hartree)+' [eV]')
print ('Frequency: omega ='+str(omega)+' [a.u.] ='+str(omega*Hartree)+' [eV]')
print ('++++++++')
# +
NF = 1000
Fmin = 1.0/Atomfield
Fmax = 100.0/Atomfield
r = (Fmax/Fmin)**(1.0/float(NF-1))
F = np.zeros(NF,dtype=float)
W = np.zeros(NF,dtype=float)
WMP = np.zeros(NF,dtype=float)
WTUN = np.zeros(NF,dtype=float)
WTUNYS = np.zeros(NF,dtype=float)
for iF in range (NF):
F[iF] = Fmin*r**iF
W[iF] = get_W(m,delta,omega,F[iF])
WMP[iF] = get_WMP(m,delta,omega,F[iF])
WTUN[iF] = get_WTUN(m,delta,omega,F[iF])
# -
plt.figure()
plt.title('Rep. Prog. Phys. 76 (2013) 036502 based on m='+str(m)+' assumption')
plt.xscale('log')
plt.yscale('log')
plt.xlim(1.0e9,1.0e11)
plt.xlabel('E [V/m]')
plt.ylim(1.0e34,1.0e46)
plt.ylabel('W [/m^3/s]')
plt.plot(F*Atomfield*1.0e9,W/Atomvolume/Atomtime*1.0e42,label='full')
plt.plot(F*Atomfield*1.0e9,WTUN/Atomvolume/Atomtime*1.0e42,label='TUN')
plt.plot(F*Atomfield*1.0e9,WMP/Atomvolume/Atomtime*1.0e42,label='MP')
plt.grid()
plt.legend()
plt.show()
| Sample1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="qtgSE8mbSLmo"
# # Introduction to Deep Learning. Practice 1
# -
# ## Neural networks in PyTorch
import torch
# ### Basics
# +
# Create tensors.
x = torch.tensor(1., requires_grad=True)
w = torch.tensor(2., requires_grad=True)
b = torch.tensor(3., requires_grad=True)
# Build a computational graph.
y = w * x + b # y = 2 * x + 3
# Compute gradients.
y.backward()
# Print out the gradients.
print(x.grad) # x.grad = 2
print(w.grad) # w.grad = 1
print(b.grad) # b.grad = 1
# -
# ### Linear layer
# +
# Create tensors of shape (10, 3) and (10, 2).
x = torch.randn(10, 3)
y = torch.randn(10, 2)
# Build a fully connected layer.
linear = torch.nn.Linear(3, 2)
print ('w: ', linear.weight)
print ('b: ', linear.bias)
# Build loss function and optimizer.
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(linear.parameters(), lr=0.01)
# Forward pass.
pred = linear(x)
# Compute loss.
loss = criterion(pred, y)
print('loss: ', loss.item())
# Backward pass.
loss.backward()
# Print out the gradients.
print ('dL/dw: ', linear.weight.grad)
print ('dL/db: ', linear.bias.grad)
# 1-step gradient descent.
optimizer.step()
# You can also perform gradient descent at the low level.
# linear.weight.data.sub_(0.01 * linear.weight.grad.data)
# linear.bias.data.sub_(0.01 * linear.bias.grad.data)
# Print out the loss after 1-step gradient descent.
pred = linear(x)
loss = criterion(pred, y)
print('loss after 1 step optimization: ', loss.item())
# -
# ### Pytorch Tensors and Numpy
import numpy as np
# +
# Create a numpy array.
x = np.array([[1, 2], [3, 4]])
# Convert the numpy array to a torch tensor.
y = torch.from_numpy(x)
# Convert the torch tensor to a numpy array.
z = y.numpy()
print("Original numpy: ", x, sep="\n")
print("PyTorch tensor: ", y, sep="\n")
print("Converted numpy: ", z, sep="\n")
# -
# ### Datasets
# #### Use external
import torchvision
# +
# Download and construct MNIST dataset.
train_dataset = torchvision.datasets.MNIST(root='data/',
train=True,
transform=torchvision.transforms.ToTensor(),
download=True)
# Fetch one data pair (read data from disk).
image, label = train_dataset[0]
print(image.size())
print(label)
# -
import matplotlib.pyplot as plt
plt.imshow(torchvision.transforms.ToPILImage()(image))
plt.imshow(torchvision.transforms.ToPILImage()(image), interpolation="bicubic")
# +
# Data loader (this provides queues and threads in a very simple way).
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=64,
shuffle=True)
# When iteration starts, queue and thread start to load data from files.
data_iter = iter(train_loader)
# Mini-batch images and labels.
images, labels = data_iter.next()
# Actual usage of the data loader is as below.
for images, labels in train_loader:
# Training code should be written here.
pass
# -
# #### Create your own
# +
# You should build your custom dataset as below.
class CustomDataset(torch.utils.data.Dataset):
def __init__(self):
# TODO
# 1. Initialize file paths or a list of file names.
pass
def __getitem__(self, index):
# TODO
# 1. Read one data from file (e.g. using numpy.fromfile, PIL.Image.open).
# 2. Preprocess the data (e.g. torchvision.Transform).
# 3. Return a data pair (e.g. image and label).
pass
def __len__(self):
# You should change 0 to the total size of your dataset.
# Will be error
return 0
# You can then use the prebuilt data loader.
custom_dataset = CustomDataset()
train_loader = torch.utils.data.DataLoader(dataset=custom_dataset,
batch_size=64,
shuffle=True)
# -
# ### Models
# #### ResNet18
# <div style="width:image width px;
# font-size:80%;
# text-align:center;
# float: left; padding-left-right-top-bottom:0.5em;
# border-style: solid; border-color: rgba(211, 211, 211, 0.000);
# background-color: rgba(0,0, 0, 0.000;">
# <img src="./pics/resnet18.png"
# alt="alternate text"
# width=800
# style="padding-bottom:0.5em;"/>
# <div style="padding: 3px;
# width: 400px;
# word-wrap: break-word;
# text-align:justify;">
# Illustration of ResNet18 architecture by <NAME>. <br>
# <a href="https://www.pluralsight.com/guides/introduction-to-resnet"
# style="float: left;">
# Source
# </a>
# </div>
# </div>
# +
# Download and load the pretrained ResNet-18.
resnet = torchvision.models.resnet18(pretrained=True)
# If you want to finetune only the top layer of the model, set as below.
for param in resnet.parameters():
param.requires_grad = False
# Replace the top layer for finetuning.
resnet.fc = torch.nn.Linear(resnet.fc.in_features, 100) # 100 is an example.
# Forward pass.
images = torch.randn(64, 3, 224, 224)
outputs = resnet(images)
print (outputs.size()) # (64, 100)
# -
print(resnet)
# +
from torchviz import make_dot
resnet = torchvision.models.resnet18(pretrained=True)
images = torch.randn(64, 3, 224, 224)
make_dot(resnet(images), params=dict(list(resnet.named_parameters())))
# -
# ### Save model
# #### Pytorch format
# +
# Save and load the entire model.
torch.save(resnet, 'model.ckpt')
model = torch.load('model.ckpt')
# Save and load only the model parameters (recommended).
torch.save(resnet.state_dict(), 'params.ckpt')
resnet.load_state_dict(torch.load('params.ckpt'))
# -
# #### ONNX
resnet = torchvision.models.resnet18(pretrained=True)
images = torch.randn(64, 3, 224, 224)
torch.onnx.export(resnet, images, "model.onnx")
# Look at the model: https://netron.app/ (sources: https://github.com/lutzroeder/netron)
# ## Feedforward Neural Network
#
# Based on https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/01-basics/feedforward_neural_network/main.py
# +
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
input_size = 784
hidden_size = 500
num_classes = 10
num_epochs = 5
batch_size = 100
learning_rate = 0.001
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='data',
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Fully connected neural network with one hidden layer
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
model = NeuralNet(input_size, hidden_size, num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# Move tensors to the configured device
images = images.reshape(-1, 28*28).to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, 28*28).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total))
# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
# -
# ## Convolutional Neural Network
# ### Structure
# <div style="width:image width px;
# font-size:80%;
# text-align:center;
# float: left; padding-left-right-top-bottom:0.5em;
# border-style: solid; border-color: rgba(211, 211, 211, 0.000);
# background-color: rgba(0,0, 0, 0.000;">
# <img src="./pics/cnn.jpeg"
# alt="alternate text"
# width=800
# style="padding-bottom:0.5em;"/>
# <div style="padding: 3px;
# width: 400px;
# word-wrap: break-word;
# text-align:justify;">
# Illustration of CNN architecture example by <NAME>. <br>
# <a href="https://medium.com/towards-artificial-intelligence/whirlwind-tour-of-rnns-a11effb7808f"
# style="float: left;">
# Source
# </a>
# </div>
# </div>
# ### Convolution
# <div style="width:image width px;
# font-size:80%;
# text-align:center;
# float: left; padding-left-right-top-bottom:0.5em;
# border-style: solid; border-color: rgba(211, 211, 211, 0.000);
# background-color: rgba(0,0, 0, 0.000;">
# <img src="./pics/conv.gif"
# alt="alternate text"
# width=800
# style="padding-bottom:0.5em;"/>
# <div style="padding: 3px;
# width: 400px;
# word-wrap: break-word;
# text-align:justify;">
# Illustration of convolution operation on a MxNx3 image matrix with a 3x3x3 Kernel by <NAME>. <br>
# <a href="https://medium.com/towards-artificial-intelligence/whirlwind-tour-of-rnns-a11effb7808f"
# style="float: left;">
# Source
# </a>
# </div>
# </div>
# ### Pooling
# <div style="width:image width px;
# font-size:80%;
# text-align:center;
# float: left; padding-left-right-top-bottom:0.5em;
# border-style: solid; border-color: rgba(211, 211, 211, 0.000);
# background-color: rgba(0,0, 0, 0.000;">
# <img src="./pics/pooling.gif"
# alt="alternate text"
# width=800
# style="padding-bottom:0.5em;"/>
# <div style="padding: 3px;
# width: 400px;
# word-wrap: break-word;
# text-align:justify;">
# Illustration of 3x3 pooling over 5x5 convolved feature by <NAME>. <br>
# <a href="https://medium.com/towards-artificial-intelligence/whirlwind-tour-of-rnns-a11effb7808f"
# style="float: left;">
# Source
# </a>
# </div>
# </div>
# #### Types of pooling
# <div style="width:image width px;
# font-size:80%;
# text-align:center;
# float: left; padding-left-right-top-bottom:0.5em;
# border-style: solid; border-color: rgba(211, 211, 211, 0.000);
# background-color: rgba(0,0, 0, 0.000;">
# <img src="./pics/pooling_types.png"
# alt="alternate text"
# width=800
# style="padding-bottom:0.5em;"/>
# <div style="padding: 3px;
# width: 400px;
# word-wrap: break-word;
# text-align:justify;">
# Illustration of types of Pooling by <NAME>. <br>
# <a href="https://medium.com/towards-artificial-intelligence/whirlwind-tour-of-rnns-a11effb7808f"
# style="float: left;">
# Source
# </a>
# </div>
# </div>
# ### Example
#
# Based on Based on https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/02-intermediate/convolutional_neural_network/main.py
# +
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# Device configuration
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Hyper parameters
num_epochs = 5
num_classes = 10
batch_size = 100
learning_rate = 0.001
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='data/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='data/',
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Convolutional neural network (two convolutional layers)
class ConvNet(nn.Module):
def __init__(self, num_classes=10):
super(ConvNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.fc = nn.Linear(7*7*32, num_classes)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out
model = ConvNet(num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# Test the model
model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
# -
# ## Recurrent Neural Network
# <div style="width:image width px;
# font-size:80%;
# text-align:center;
# float: left; padding-left-right-top-bottom:0.5em;
# border-style: solid; border-color: rgba(211, 211, 211, 0.000);
# background-color: rgba(0,0, 0, 0.000;">
# <img src="./pics/rnn.png"
# alt="alternate text"
# width=800
# style="padding-bottom:0.5em;"/>
# <div style="padding: 3px;
# width: 400px;
# word-wrap: break-word;
# text-align:justify;">
# Illustration of The architecture of an RNN by <NAME>. <br>
# <a href="https://medium.com/towards-artificial-intelligence/whirlwind-tour-of-rnns-a11effb7808f"
# style="float: left;">
# Source
# </a>
# </div>
# </div>
# ### LSTM
#
# **(Long-short term memory)**
#
# A recurrent neural network is a network that maintains some kind of state. For example, its output could be used as part of the next input, so that information can propogate along as the network passes over the sequence.
#
# In the case of an LSTM, for each element in the sequence, there is a corresponding hidden state, which in principle can contain information from arbitrary points earlier in the sequence. We can use the hidden state to predict words in a language model, part-of-speech tags, and a myriad of other things.
#
# 
# ### Example
#
# Based on https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/02-intermediate/recurrent_neural_network/main.py
# +
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
sequence_length = 28
input_size = 28
hidden_size = 128
num_layers = 2
num_classes = 10
batch_size = 100
num_epochs = 2
learning_rate = 0.01
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='data/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='data/',
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Recurrent neural network (many-to-one)
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
# Set initial hidden and cell states
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
# Forward propagate LSTM
out, _ = self.lstm(x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size)
# Decode the hidden state of the last time step
out = self.fc(out[:, -1, :])
return out
model = RNN(input_size, hidden_size, num_layers, num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# Test the model
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
# -
# %reload_ext watermark
# %watermark -d -u -v -iv
# + [markdown] id="O7omYYd6TtOI"
# ## References
#
# 1. https://github.com/yunjey/pytorch-tutorial/
# 2. https://towardsdatascience.com/a-comprehensive-guide-to-convolutional-neural-networks-the-eli5-way-3bd2b1164a53
| 07_dl_basics_practiceA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#This cell must always on the top of the Notebook!
# %config Completer.use_jedi = False
# # How to enable Autocompletion in Jupyter Notebook application
#
# ## Overview
#
# With the rise of Data Scientists, Financial coders, Casual Developers, or Traders (aka Citizen Developers), and the rapid growth of [Jupyter](https://jupyter.org/) application, [Jupyter Notebook](https://jupyter-notebook.readthedocs.io/en/stable/) and its next-generation, [JupyterLab](https://jupyterlab.readthedocs.io/en/latest/getting_started/overview.html) is a de-facto tool of those Citizen Developers. Thanks to the [IPython kernel](https://ipython.org/), Notebook provides a powerful interactive shell, interactive data visualization, embedded documentation, and supports various programming languages such as Python (native supported), [R](https://developers.refinitiv.com/en/article-catalog/article/setup-jupyter-notebook-r), [Julia](https://datatofish.com/add-julia-to-jupyter/), and much more.
#
# The Notebook application is easier to use when comparing to other traditional IDEs (Integrated development environment) or Editors. Citizen Developers do not need much programming language or technology experience to use the Notebook application. However, The Jupyter Notebook also provides Coding and [*Magic*](https://ipython.readthedocs.io/en/stable/interactive/magics.html) features which make Notebook a friendly development environment for both Citizen and Professional Developers.
#
# This article demonstrates how to enable Python/Jupyter application coding autocompletion feature. The demo application uses content from the [Refinitiv Data Platform (RDP)](https://developers.refinitiv.com/refinitiv-data-platform/refinitiv-data-platform-apis) as an example library.
# Import RDP Libraries - Python edition
import refinitiv.dataplatform as rdp
# ## Introduction to Refinitiv Data Platform (RDP) Libraries
#
# Refinitiv provides a wide range of contents and data which require multiple technologies, delivery mechanisms, data formats, and the multiple APIs to access each content. The [RDP Libraries](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-libraries) are a suite of ease-of-use interfaces providing unified access to streaming and non-streaming data services offered within the [Refinitiv Data Platform (RDP)](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-apis). The Libraries simplified how to access data to various delivery modes such as Request-Response, Streaming, Bulk File, and Queues via a single library.
#
# Depending on the level of service and data requirements, developers can easily choose how to access data services using the appropriate access channel defined within the library.
#
# 
#
# Using the library developers can access content from all 3 of the access points - all from within the same application if required. The RDP Libraries are available in the following programming languages:
# - Refinitiv Supported Editions: Python and TypeScript/JavaScript (coming soon)
# - Community-based Edition: C#
#
# For more deep detail regarding the RDP Libraries, please refer to the following articles and tutorials:
# - [Developer Article: Discover our Refinitiv Data Platform Library part 1](https://developers.refinitiv.com/en/article-catalog/article/discover-our-refinitiv-data-platform-library-part-1).
# - [Developer Article: Discover our Refinitiv Data Platform Library part 2](https://developers.refinitiv.com/en/article-catalog/article/discover-our-refinitiv-data-platform-library-part-2).
# - [Refinitiv Data Platform Libraries Document: An Introduction page](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-libraries/documentation#refinitiv-data-platform-libraries-an-introduction).
#
# ### Disclaimer
#
# As this notebook is based on alpha version **1.0.0a7.post7** of the Python library, the method signatures, data formats, etc. are subject to change.
# Import other dependencies
import configparser as cp
import numpy as np
import pandas as pd
# You should save a text file with **filename** `rdp.cfg` having the following contents:
#
# [rdp]
# username = YOUR_RDP_EMAIL_USERNAME
# password = <PASSWORD>
# app_key = YOUR_RDP_APP_KEY
#
# This file should be readily available (e.g. in the current working directory) for the next steps.
cfg = cp.ConfigParser()
cfg.read('rdp.cfg')
# ## Python built-in help
#
# The Python programming language comes with a built-in [help() function](https://docs.python.org/3/library/functions.html#help) to display documentation of modules, functions, classes, objects, etc with the following syntax:
#
# ```
# help([object])
# ```
#
# However, the result is not always easy to read as the following examples:
#
# 
#
# or
#
# 
# +
# You can try it with yourself by running this cell
help(rdp.get_historical_price_events)
# -
# For the library that keeps involving like RDP Libraries, this is inconvenient even the RDP Library itself is easy to use.
#
# ## IPython Autocompletion
#
# Fortunately, the JupyterLab and Jupyter Notebook comes with the [IPython magic command](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-config) ```%config Completer.use_jedi = False``` that enables the library/function autocompletion (like *IntelliSense* in Microsoft development environment term).
#
# You need to set the magic command ```%config Completer.use_jedi = False``` at the **top cell** of the Notebook application (like this Notebook).
#
# 
#
# Once you have enabled and run the ```%config Completer.use_jedi = False``` magic command, you can trigger the code autocompletion by pressing the ```tab``` button after the ```"."``` character.
#
# 
#
# Let's try it yourself, please select the space just after the ```"."``` character below and hit the ```tab``` button.
rdp.
# The magic ```%config Completer.use_jedi = False``` command also enables the ```"shift + tab"``` buttons to show the function help and tooltip information.
#
# 
#
# Let's try it yourself, please select the space just after the ```"()"``` character below and hit ```shift + tab``` buttons.
rdp.get_historical_price_events()
# You can also use ```"?"``` at the end of the function (without ```()```) to display documentation for objects and methods in IPython environment.
# +
# rdp.open_platform_session?
# -
# Please note that all these autocompletion shortcuts also support the library properties/attributes as well.
#
# 
#
# Let's try it yourself, please select the space just after the ```"."``` character below and the ```hit``` tab button.
rdp.Intervals.
# The RDP Libraries let application consumes data from the following platforms
# - DesktopSession (Eikon/Refinitiv Workspace)
# - PlatformSession (RDP, Refinitiv Real-Time Optimized)
# - DeployedPlatformSession (deployed Refinitiv Real-Time/ADS)
#
# This Jupyter Notebook is focusing on the *PlatformSession* only.
# Open RDP Platform Session
session = rdp.open_platform_session(
cfg['rdp']['app_key'],
rdp.GrantPassword(
username = cfg['rdp']['username'],
password = cfg['rdp']['password']
)
)
session.get_open_state()
# We will demonstrate with use RDP Libraries *Function Layer* to request time-series data as a [Pandas DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) object. We use "?" magic to pull up documentation of the method.
# +
# rdp.get_historical_price_events?
# -
# Request Historical Data of Tesla Inc.
df_tesla = rdp.get_historical_price_events(universe = 'TSLA.O')
# Please note that all IPython helpers and autocompletion magic support all Python libraries in the Notebook environment, not limit to only RDP Libraries.
#
# 
# +
# df_tesla.head?
# -
df_tesla.head(3)
# ### Close RDP Session
# +
# -- Close Session, just calls close_session() function
rdp.close_session()
print(session.get_open_state())
# -
# ## Refinitiv Workspace CodeBook
#
# If you are [Refinitiv Workspace](https://www.refinitiv.com/en/products/refinitiv-workspace) or [Eikon](https://www.refinitiv.com/en/products/eikon-trading-software) user, you can access [*CodeBook*](https://www.refinitiv.com/en/products/codebook), the cloud-hosted Jupyter Notebook development environment for Python scripting from the application. The CodeBook is natively available in Refinitiv Workspace and Eikon as an app (**no installation required!!**), providing access to Refinitiv APIs that are already pre-installed on the cloud.
#
# One benefit of using the CodeBook app is the environment has already enabled Jupyter Notebook magics for you. You can use all code autocompletion shortcuts (```tab```, ```shift + tab```, and ```?```) without setting any IPython Magic in your Notebook.
#
# 
#
# Please see more detail regarding the CodeBook app in this [Use Eikon Data API or RDP Library in Python in CodeBook on Web Browser article](https://developers.refinitiv.com/en/article-catalog/article/use-eikon-data-api-or-rdp-library-python-codebook-web-browser).
# ## Conclusion and Next Step
#
# Some Professional Developers may think Jupyter Notebook (and JupyterLab) is just a learning tool for Casual Developers like Data Scientists. However, the tool provides enough coding/development features for a wide range of Developers. The IPython Code Autocompletion feature (and other Magics) helps developers implement the code logic and navigate through the libraries/APIs in the same experience as the traditional IDEs.
#
# There is some new development project that tries to merge the benefits/features of the traditional IDEs and Jupyter Notebook like [Netflix's Polynote](https://polynote.org/). While the JupyterLab also keeps evolving to make a better development environment and easier to use.
#
# At the same time, the [Refinitiv Data Platform (RDP) Libraries](https://developers.refinitiv.com/refinitiv-data-platform/refinitiv-data-platform-libraries) let developers rapidly access Refinitiv Platform content with a few lines of code that easy to understand and maintain. Developers can focus on implement the business logic or analysis data without worry about the connection, authentication detail with the Refinitiv Platforms.
#
#
# ## References
#
# You can find more details regarding the Refinitiv Data Platform Libraries and Jupyter Notebook from the following resources:
# * [Refinitiv Data Platform (RDP) Libraries](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-libraries) on the [Refinitiv Developer Community](https://developers.refinitiv.com/) web site.
# * [Jupyter Project Official page](https://jupyter.org/).
# * [Jupyter Notebook Shortcuts](https://towardsdatascience.com/jypyter-notebook-shortcuts-bf0101a98330).
# * [IPython Built-in magic commands](https://ipython.readthedocs.io/en/stable/interactive/magics.html).
# * [Jupyter Notebook: An Introduction](https://realpython.com/jupyter-notebook-introduction/).
# * [Jupyter Lab: Evolution of the Jupyter Notebook](https://towardsdatascience.com/jupyter-lab-evolution-of-the-jupyter-notebook-5297cacde6b).
# * [10 Compelling Reasons you Should Use JupyterLab for Data Science Coding](https://www.analyticsvidhya.com/blog/2020/06/10-powerful-reasons-jupyterlab-data-science/).
# * [RDP Libraries Quick Start Guide page](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-libraries/quick-start).
# * [RDP Libraries Tutorial page](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-libraries/tutorials).
# * [Discover our Refinitiv Data Platform Library (part 1)](https://developers.refinitiv.com/en/article-catalog/article/discover-our-refinitiv-data-platform-library-part-1).
# * [Discover our Refinitiv Data Platform Library (part 2)](https://developers.refinitiv.com/en/article-catalog/article/discover-our-refinitiv-data-platform-library-part-2).
# * [Use Eikon Data API or RDP Library in Python in CodeBook on Web Browser article](https://developers.refinitiv.com/en/article-catalog/article/use-eikon-data-api-or-rdp-library-python-codebook-web-browser).
#
# For any questions related to this article or Refinitiv Data Platform Libraries, please use the Developers Community [Q&A Forum](https://community.developers.refinitiv.com/spaces/321/refinitiv-data-platform-libraries.html).
| notebook/rdp_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#23rd march
my_list=[1,2,3]
my_list=['string',100,23.2]
len(my_list)
my_list[1:]
len(my_list)
mylist =["one","two","three"]
mylist[0]
mylist[1:]
mylist
another_list=["four","five"]
new_list=mylist + another_list
mylist
another_list
new_list
new_list[0]='ONE ALL CAPS'
new_list
new_list.append('six') # append use in add an element end of list
new_list
# +
# pop is actually going to pop off an item from the end of list
# -
new_list.pop()
new_list
popped_item=new_list.pop()
popped_item
new_list
new_list.pop(0) #position which want to remove
new_list
# +
new_list=['a','b','c','d','e']
num_list=['4','1','8','3']
# -
new_list.sort() # in this place doesn't returning any thing
new_list
my_sorted_list=new_list.sort()
type(my_sorted_list)
None # no value
new_list.sort()
my_sorted_list=new_list
my_sorted_list
num_list
num_list.sort()
num_list
num_list.reverse()
num_list
def myfunc(*args):
even=[]
for n in args:
if n%2==0:
even.append(n)
else:
pass
return even
myfunc(5,6,7,8)
def myfunc(string):
mylist=[]
for x in range(len(string)):
if(x+1)%2==0:
new_string +=string[x].upper()
else:
new_string +=syring[x].lower()
return new_string
| Python/List.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # rio-tiler-mosaic custom pixel selection
#
# Examples using defaults `pixel_selection` methods and how to create customs ones
# %pylab inline
# +
from rio_tiler.landsat8 import tile as tiler
from rio_tiler_mosaic.mosaic import mosaic_tiler
from rio_tiler_mosaic.methods import defaults
from rio_tiler_mosaic.methods.base import MosaicMethodBase
from rio_tiler.utils import array_to_image, get_colormap, linear_rescale, expression as expressionTiler
from rio_tiler.profiles import img_profiles
from rio_color.operations import parse_operations
from rio_color.utils import scale_dtype, to_math_type
from rasterio.plot import reshape_as_image
# -
# ### We use landsat 8 product ID and rio_tiler.landsat8.tile handler
assets = ["LC08_L1TP_014028_20190704_20190705_01_RT", "LC08_L1TP_015028_20190609_20190619_01_T1"]
x = 151
y = 183
z = 9
color_formula = "gamma RGB 3.5, saturation 1.7, sigmoidal RGB 15 0.35"
# ### FirstMethod: Fill with the first value avialable
# +
tile, mask = mosaic_tiler(
assets,
x,
y,
z,
tiler,
)
for ops in parse_operations(color_formula):
tile = scale_dtype(ops(to_math_type(tile)), np.uint8)
fig = figure(figsize=(30, 10))
ax = fig.add_subplot(1, 2, 1)
ax.imshow(reshape_as_image(tile))
ax = fig.add_subplot(1, 2, 2)
ax.imshow(mask)
# -
# ### MeanMethod: Get the mean from all the stack of data
# +
tile, mask = mosaic_tiler(
assets,
x,
y,
z,
tiler,
pixel_selection=defaults.MeanMethod(),
)
tile = tile.astype(np.uint16)
for ops in parse_operations(color_formula):
tile = scale_dtype(ops(to_math_type(tile)), np.uint8)
fig = figure(figsize=(30, 10))
ax = fig.add_subplot(1, 2, 1)
ax.imshow(reshape_as_image(tile))
ax = fig.add_subplot(1, 2, 2)
ax.imshow(mask)
# -
# ## Create Custom pixel_selection class
#
# ### 1. Using a 4th band for decision: highest NDVI
# +
class CustomFourthBandH(MosaicMethodBase):
"""Feed the mosaic tile with the Mean pixel value."""
@property
def data(self):
"""Return data and mask."""
if self.tile is not None:
return self.tile.data[:-1], ~self.tile.mask[0] * 255
else:
return None, None
def feed(self, tile):
"""Add data to tile."""
if self.tile is None:
self.tile = tile
return
pidex = (
numpy.bitwise_and(tile.data[-1] > self.tile.data[-1], ~tile.mask) | self.tile.mask
)
mask = numpy.where(pidex, tile.mask, self.tile.mask)
self.tile = numpy.ma.where(pidex, tile, self.tile)
self.tile.mask = mask
# Here we use rio-tiler special expression handler to pass bands names and expression
# rio-tiler.expresion will produce a 4band array (b4, b3, b2, ndvi)
expr = "b4,b3,b2,(b5-b4)/(b5+b4)"
tile, mask = mosaic_tiler(
assets,
x,
y,
z,
expressionTiler,
pixel_selection=CustomFourthBandH(),
expr=expr
)
# Because ndvi is a floating point value
# (the output from expressionTiler and thus mosaic_tiler)
# we need to cast the values to uint16
tile = tile.astype(np.uint16)
for ops in parse_operations(color_formula):
tile = scale_dtype(ops(to_math_type(tile)), np.uint8)
fig = figure(figsize=(30, 10))
ax = fig.add_subplot(1, 2, 1)
ax.imshow(reshape_as_image(tile))
ax = fig.add_subplot(1, 2, 2)
ax.imshow(mask)
# -
# ### 2. Using a 4th band for decision: highest Band 9 (Cirrus)
# +
class CustomFourthBandL(MosaicMethodBase):
"""Feed the mosaic tile with the Mean pixel value."""
@property
def data(self):
"""Return data and mask."""
if self.tile is not None:
# We remove the 4th band
return self.tile.data[:-1], ~self.tile.mask[0] * 255
else:
return None, None
def feed(self, tile):
"""Add data to tile."""
if self.tile is None:
self.tile = tile
return
pidex = (
numpy.bitwise_and(tile.data[-1] < self.tile.data[-1], ~tile.mask) | self.tile.mask
)
mask = numpy.where(pidex, tile.mask, self.tile.mask)
self.tile = numpy.ma.where(pidex, tile, self.tile)
self.tile.mask = mask
tile, mask = mosaic_tiler(
assets,
x,
y,
z,
tiler,
pixel_selection=CustomFourthBandL(),
bands=("4", "3", "2", "9"),
)
tile = tile.astype(np.uint16)
for ops in parse_operations(color_formula):
tile = scale_dtype(ops(to_math_type(tile)), np.uint8)
fig = figure(figsize=(30, 10))
ax = fig.add_subplot(1, 2, 1)
ax.imshow(reshape_as_image(tile))
ax = fig.add_subplot(1, 2, 2)
ax.imshow(mask)
# -
| example/rio-tiler-mosaic-Custom.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
'''
Initial code was copied from:
https://github.com/jason71995/Keras-GAN-Library
'''
# Loading MHAD data for action1, all persons and all repeatations of each person
from utils.data_loader import data_loader
data_object= data_loader(matlab_action_path='../gan/')
myData, mymin, mymax = data_object.actions_normalised([1], twoD_true_or_threeD_false=False)
myData.shape
# +
import tensorflow as tf
import keras.backend as K
from keras.models import Sequential
from keras.layers import Conv2D,GlobalAveragePooling2D,LeakyReLU,Conv2DTranspose,Activation,BatchNormalization
from keras.optimizers import Adam
from keras.layers import Dense
from keras import initializers
from keras.layers.core import Dropout
noise_dim = 3
def build_generator(input_shape):
generator = Sequential()
generator.add(Dense(256,
input_dim = noise_dim))
generator.add(BatchNormalization())
generator.add(LeakyReLU(0.2))
# generator.add(Dense(256))
# generator.add(BatchNormalization())
# generator.add(LeakyReLU(0.2))
generator.add(Dense(105, activation='tanh'))
return generator
def build_discriminator(input_shape):
discriminator = Sequential()
discriminator.add(Dense(256,
input_dim=105))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
# discriminator.add(Dense(256))
# discriminator.add(LeakyReLU(0.2))
# discriminator.add(Dropout(0.3))
discriminator.add(Dense(1, activation='sigmoid'))
return discriminator
def build_functions(batch_size, noise_size, image_size, generator, discriminator):
noise = K.random_normal((batch_size,) + noise_size,0.0,1.0,"float32")
real_image = K.placeholder((batch_size,) + image_size)
fake_image = generator(noise)
d_input = K.concatenate([real_image, fake_image], axis=0)
pred_real, pred_fake = tf.split(discriminator(d_input), num_or_size_splits = 2, axis = 0)
pred_real = K.clip(pred_real,K.epsilon(),1-K.epsilon())
pred_fake = K.clip(pred_fake,K.epsilon(),1-K.epsilon())
d_loss = -(K.mean(K.log(pred_real)) + K.mean(K.log(1-pred_fake)))
g_loss = -K.mean(K.log(pred_fake))
# get updates of mean and variance in batch normalization layers
d_updates = discriminator.get_updates_for([d_input])
g_updates = generator.get_updates_for([noise])
d_training_updates = Adam(lr=0.0001, beta_1=0.0, beta_2=0.9).get_updates(d_loss, discriminator.trainable_weights)
d_train = K.function([real_image, K.learning_phase()], [d_loss],d_updates + d_training_updates)
g_training_updates = Adam(lr=0.0001, beta_1=0.0, beta_2=0.9).get_updates(g_loss, generator.trainable_weights)
g_train = K.function([real_image, K.learning_phase()], [g_loss], g_updates + g_training_updates)
return d_train,g_train
# +
import keras
#from gan_libs.DCGAN import build_generator, build_discriminator, build_functions
# from gan_libs.LSGAN import build_generator, build_discriminator, build_functions
# from gan_libs.SNGAN import build_generator, build_discriminator, build_functions
# from gan_libs.WGAN_GP import build_generator, build_discriminator, build_functions
from utils.common import set_gpu_config, predict_images
from utils.draw_pose import draw_pose
import numpy as np
#set_gpu_config("0",0.5)
epoch = 50 + 1
image_size = (1,1,105)
noise_size = (1,1,3)
batch_size = 16
x_train = myData
np.random.shuffle(x_train)
generator = build_generator(noise_size)
print('gen params:',generator.count_params())
discriminator = build_discriminator(image_size)
print('dis params:',discriminator.count_params())
d_train, g_train = build_functions(batch_size, noise_size, image_size, generator, discriminator)
# generator.load_weights("e30_generator.h5".format(e))
# discriminator.load_weights("e30_discriminator.h5".format(e))
number_of_all_data = x_train.shape[0]
number_of_batches = int(number_of_all_data/batch_size)
print('Number of Batches passed in each epoch: ',number_of_batches)
import time
start_time = time.time()
is_first_epoch = True
for e in range(epoch):
index = 0
for batch in range(number_of_batches):
real_images = x_train[index:index+batch_size]
index =+ batch_size
real_images.shape = (batch_size,1,1,105)
d_loss, = d_train([real_images, 1])
g_loss, = g_train([real_images, 1])
if np.random.randint(low = 0, high = 100) == 1:
print ("[{0}/{1}] d_loss: {2:.4}, g_loss: {3:.4}".format(e, epoch, d_loss, g_loss))
#generating a sample
image = generator.predict(np.zeros(shape=(1,3)))
image = np.array(image)
draw_pose(image.reshape(105),'output',"_ThreeLatents_e{0}_batch{1}".format(e,batch))
if(is_first_epoch):
elapsed_time = time.time() - start_time
print('\n\nTime Taken for single epoch:')
print(time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
is_first_epoch = False
if e % 50 == 0 and e > 0:
generator.save_weights("_ThreeLatents_e{0}_generator.h5".format(e))
discriminator.save_weights("_ThreeLatents_e{0}_discriminator.h5".format(e))
#just monitoring:
##########################################################################################
elapsed_time = time.time() - start_time
print('\n\n\n\nNumber of parameter for the Generator and discriminator respectively:\n')
print(generator.count_params())
print('')
print(discriminator.count_params())
print('\n\nNumber of Epochs and steps for each epoch:\n')
print('epochs: ',epoch, ' batches: ', number_of_batches)
print('\n\nTime Taken:')
print(time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
##########################################################################################
#NOTE: previously in each epoch 1000 steps were iterated. in each iteration data was permitated and a batch was chosen.
#now data is shuffled first and there are number_of_data/batch_size steps to be passed in a single epoch.
# since 200 epochs and 1000 steps each was used, now 25 epochs and ˜7300 steps is used so the balance is kept.
# -
# +
import tensorflow as tf
import keras.backend as K
from keras.models import Sequential
from keras.layers import Conv2D,GlobalAveragePooling2D,LeakyReLU,Conv2DTranspose,Activation,BatchNormalization
from keras.optimizers import Adam
from keras.layers import Dense
from keras import initializers
from keras.layers.core import Dropout
noise_dim = 2
def build_generator(input_shape):
generator = Sequential()
generator.add(Dense(256,
input_dim = noise_dim))
generator.add(BatchNormalization())
generator.add(LeakyReLU(0.2))
# generator.add(Dense(256))
# generator.add(BatchNormalization())
# generator.add(LeakyReLU(0.2))
generator.add(Dense(105, activation='tanh'))
return generator
def build_discriminator(input_shape):
discriminator = Sequential()
discriminator.add(Dense(256,
input_dim=105))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
# discriminator.add(Dense(256))
# discriminator.add(LeakyReLU(0.2))
# discriminator.add(Dropout(0.3))
discriminator.add(Dense(1, activation='sigmoid'))
return discriminator
def build_functions(batch_size, noise_size, image_size, generator, discriminator):
noise = K.random_normal((batch_size,) + noise_size,0.0,1.0,"float32")
real_image = K.placeholder((batch_size,) + image_size)
fake_image = generator(noise)
d_input = K.concatenate([real_image, fake_image], axis=0)
pred_real, pred_fake = tf.split(discriminator(d_input), num_or_size_splits = 2, axis = 0)
pred_real = K.clip(pred_real,K.epsilon(),1-K.epsilon())
pred_fake = K.clip(pred_fake,K.epsilon(),1-K.epsilon())
d_loss = -(K.mean(K.log(pred_real)) + K.mean(K.log(1-pred_fake)))
g_loss = -K.mean(K.log(pred_fake))
# get updates of mean and variance in batch normalization layers
d_updates = discriminator.get_updates_for([d_input])
g_updates = generator.get_updates_for([noise])
d_training_updates = Adam(lr=0.0001, beta_1=0.0, beta_2=0.9).get_updates(d_loss, discriminator.trainable_weights)
d_train = K.function([real_image, K.learning_phase()], [d_loss],d_updates + d_training_updates)
g_training_updates = Adam(lr=0.0001, beta_1=0.0, beta_2=0.9).get_updates(g_loss, generator.trainable_weights)
g_train = K.function([real_image, K.learning_phase()], [g_loss], g_updates + g_training_updates)
return d_train,g_train
# +
import keras
#from gan_libs.DCGAN import build_generator, build_discriminator, build_functions
# from gan_libs.LSGAN import build_generator, build_discriminator, build_functions
# from gan_libs.SNGAN import build_generator, build_discriminator, build_functions
# from gan_libs.WGAN_GP import build_generator, build_discriminator, build_functions
from utils.common import set_gpu_config, predict_images
from utils.draw_pose import draw_pose
import numpy as np
#set_gpu_config("0",0.5)
epoch = 50 + 1
image_size = (1,1,105)
noise_size = (1,1,2)
batch_size = 16
x_train = myData
np.random.shuffle(x_train)
generator = build_generator(noise_size)
print('gen params:',generator.count_params())
discriminator = build_discriminator(image_size)
print('dis params:',discriminator.count_params())
d_train, g_train = build_functions(batch_size, noise_size, image_size, generator, discriminator)
# generator.load_weights("e30_generator.h5".format(e))
# discriminator.load_weights("e30_discriminator.h5".format(e))
number_of_all_data = x_train.shape[0]
number_of_batches = int(number_of_all_data/batch_size)
print('Number of Batches passed in each epoch: ',number_of_batches)
import time
start_time = time.time()
is_first_epoch = True
for e in range(epoch):
index = 0
for batch in range(number_of_batches):
real_images = x_train[index:index+batch_size]
index =+ batch_size
real_images.shape = (batch_size,1,1,105)
d_loss, = d_train([real_images, 1])
g_loss, = g_train([real_images, 1])
if np.random.randint(low = 0, high = 100) == 1:
print ("[{0}/{1}] d_loss: {2:.4}, g_loss: {3:.4}".format(e, epoch, d_loss, g_loss))
#generating a sample
image = generator.predict(np.zeros(shape=(1,2)))
image = np.array(image)
draw_pose(image.reshape(105),'output',"_TwoLatents_e{0}_batch{1}".format(e,batch))
if(is_first_epoch):
elapsed_time = time.time() - start_time
print('\n\nTime Taken for single epoch:')
print(time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
is_first_epoch = False
if e % 50 == 0 and e > 0:
generator.save_weights("_TwoLatents_e{0}_generator.h5".format(e))
discriminator.save_weights("_TwoLatents_e{0}_discriminator.h5".format(e))
#just monitoring:
##########################################################################################
elapsed_time = time.time() - start_time
print('\n\n\n\nNumber of parameter for the Generator and discriminator respectively:\n')
print(generator.count_params())
print('')
print(discriminator.count_params())
print('\n\nNumber of Epochs and steps for each epoch:\n')
print('epochs: ',epoch, ' batches: ', number_of_batches)
print('\n\nTime Taken:')
print(time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
##########################################################################################
#NOTE: previously in each epoch 1000 steps were iterated. in each iteration data was permitated and a batch was chosen.
#now data is shuffled first and there are number_of_data/batch_size steps to be passed in a single epoch.
# since 200 epochs and 1000 steps each was used, now 25 epochs and ˜7300 steps is used so the balance is kept.
# -
# +
import tensorflow as tf
import keras.backend as K
from keras.models import Sequential
from keras.layers import Conv2D,GlobalAveragePooling2D,LeakyReLU,Conv2DTranspose,Activation,BatchNormalization
from keras.optimizers import Adam
from keras.layers import Dense
from keras import initializers
from keras.layers.core import Dropout
noise_dim = 1
def build_generator(input_shape):
generator = Sequential()
generator.add(Dense(256,
input_dim = noise_dim))
generator.add(BatchNormalization())
generator.add(LeakyReLU(0.2))
# generator.add(Dense(256))
# generator.add(BatchNormalization())
# generator.add(LeakyReLU(0.2))
generator.add(Dense(105, activation='tanh'))
return generator
def build_discriminator(input_shape):
discriminator = Sequential()
discriminator.add(Dense(256,
input_dim=105))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
# discriminator.add(Dense(256))
# discriminator.add(LeakyReLU(0.2))
# discriminator.add(Dropout(0.3))
discriminator.add(Dense(1, activation='sigmoid'))
return discriminator
def build_functions(batch_size, noise_size, image_size, generator, discriminator):
noise = K.random_normal((batch_size,) + noise_size,0.0,1.0,"float32")
real_image = K.placeholder((batch_size,) + image_size)
fake_image = generator(noise)
d_input = K.concatenate([real_image, fake_image], axis=0)
pred_real, pred_fake = tf.split(discriminator(d_input), num_or_size_splits = 2, axis = 0)
pred_real = K.clip(pred_real,K.epsilon(),1-K.epsilon())
pred_fake = K.clip(pred_fake,K.epsilon(),1-K.epsilon())
d_loss = -(K.mean(K.log(pred_real)) + K.mean(K.log(1-pred_fake)))
g_loss = -K.mean(K.log(pred_fake))
# get updates of mean and variance in batch normalization layers
d_updates = discriminator.get_updates_for([d_input])
g_updates = generator.get_updates_for([noise])
d_training_updates = Adam(lr=0.0001, beta_1=0.0, beta_2=0.9).get_updates(d_loss, discriminator.trainable_weights)
d_train = K.function([real_image, K.learning_phase()], [d_loss],d_updates + d_training_updates)
g_training_updates = Adam(lr=0.0001, beta_1=0.0, beta_2=0.9).get_updates(g_loss, generator.trainable_weights)
g_train = K.function([real_image, K.learning_phase()], [g_loss], g_updates + g_training_updates)
return d_train,g_train
# +
import keras
#from gan_libs.DCGAN import build_generator, build_discriminator, build_functions
# from gan_libs.LSGAN import build_generator, build_discriminator, build_functions
# from gan_libs.SNGAN import build_generator, build_discriminator, build_functions
# from gan_libs.WGAN_GP import build_generator, build_discriminator, build_functions
from utils.common import set_gpu_config, predict_images
from utils.draw_pose import draw_pose
import numpy as np
#set_gpu_config("0",0.5)
epoch = 50 + 1
image_size = (1,1,105)
noise_size = (1,1,1)
batch_size = 16
x_train = myData
np.random.shuffle(x_train)
generator = build_generator(noise_size)
print('gen params:',generator.count_params())
discriminator = build_discriminator(image_size)
print('dis params:',discriminator.count_params())
d_train, g_train = build_functions(batch_size, noise_size, image_size, generator, discriminator)
# generator.load_weights("e30_generator.h5".format(e))
# discriminator.load_weights("e30_discriminator.h5".format(e))
number_of_all_data = x_train.shape[0]
number_of_batches = int(number_of_all_data/batch_size)
print('Number of Batches passed in each epoch: ',number_of_batches)
import time
start_time = time.time()
is_first_epoch = True
for e in range(epoch):
index = 0
for batch in range(number_of_batches):
real_images = x_train[index:index+batch_size]
index =+ batch_size
real_images.shape = (batch_size,1,1,105)
d_loss, = d_train([real_images, 1])
g_loss, = g_train([real_images, 1])
if np.random.randint(low = 0, high = 100) == 1:
print ("[{0}/{1}] d_loss: {2:.4}, g_loss: {3:.4}".format(e, epoch, d_loss, g_loss))
#generating a sample
image = generator.predict(np.zeros(shape=(1,1)))
image = np.array(image)
draw_pose(image.reshape(105),'output',"_OneLatent_1_e{0}_batch{1}".format(e,batch))
if(is_first_epoch):
elapsed_time = time.time() - start_time
print('\n\nTime Taken for single epoch:')
print(time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
is_first_epoch = False
if e % 50 == 0 and e > 0:
generator.save_weights("_OneLatent_1_e{0}_generator.h5".format(e))
discriminator.save_weights("_OneLatent_1_e{0}_discriminator.h5".format(e))
#just monitoring:
##########################################################################################
elapsed_time = time.time() - start_time
print('\n\n\n\nNumber of parameter for the Generator and discriminator respectively:\n')
print(generator.count_params())
print('')
print(discriminator.count_params())
print('\n\nNumber of Epochs and steps for each epoch:\n')
print('epochs: ',epoch, ' batches: ', number_of_batches)
print('\n\nTime Taken:')
print(time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
##########################################################################################
#NOTE: previously in each epoch 1000 steps were iterated. in each iteration data was permitated and a batch was chosen.
#now data is shuffled first and there are number_of_data/batch_size steps to be passed in a single epoch.
# since 200 epochs and 1000 steps each was used, now 25 epochs and ˜7300 steps is used so the balance is kept.
# -
# +
import tensorflow as tf
import keras.backend as K
from keras.models import Sequential
from keras.layers import Conv2D,GlobalAveragePooling2D,LeakyReLU,Conv2DTranspose,Activation,BatchNormalization
from keras.optimizers import Adam
from keras.layers import Dense
from keras import initializers
from keras.layers.core import Dropout
noise_dim = 1
def build_generator(input_shape):
generator = Sequential()
generator.add(Dense(256,
input_dim = noise_dim))
generator.add(BatchNormalization())
generator.add(LeakyReLU(0.2))
# generator.add(Dense(256))
# generator.add(BatchNormalization())
# generator.add(LeakyReLU(0.2))
generator.add(Dense(105, activation='tanh'))
return generator
def build_discriminator(input_shape):
discriminator = Sequential()
discriminator.add(Dense(256,
input_dim=105))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
# discriminator.add(Dense(256))
# discriminator.add(LeakyReLU(0.2))
# discriminator.add(Dropout(0.3))
discriminator.add(Dense(1, activation='sigmoid'))
return discriminator
def build_functions(batch_size, noise_size, image_size, generator, discriminator):
noise = K.random_normal((batch_size,) + noise_size,0.0,3.0,"float32")
real_image = K.placeholder((batch_size,) + image_size)
fake_image = generator(noise)
d_input = K.concatenate([real_image, fake_image], axis=0)
pred_real, pred_fake = tf.split(discriminator(d_input), num_or_size_splits = 2, axis = 0)
pred_real = K.clip(pred_real,K.epsilon(),1-K.epsilon())
pred_fake = K.clip(pred_fake,K.epsilon(),1-K.epsilon())
d_loss = -(K.mean(K.log(pred_real)) + K.mean(K.log(1-pred_fake)))
g_loss = -K.mean(K.log(pred_fake))
# get updates of mean and variance in batch normalization layers
d_updates = discriminator.get_updates_for([d_input])
g_updates = generator.get_updates_for([noise])
d_training_updates = Adam(lr=0.0001, beta_1=0.0, beta_2=0.9).get_updates(d_loss, discriminator.trainable_weights)
d_train = K.function([real_image, K.learning_phase()], [d_loss],d_updates + d_training_updates)
g_training_updates = Adam(lr=0.0001, beta_1=0.0, beta_2=0.9).get_updates(g_loss, generator.trainable_weights)
g_train = K.function([real_image, K.learning_phase()], [g_loss], g_updates + g_training_updates)
return d_train,g_train
# +
import keras
#from gan_libs.DCGAN import build_generator, build_discriminator, build_functions
# from gan_libs.LSGAN import build_generator, build_discriminator, build_functions
# from gan_libs.SNGAN import build_generator, build_discriminator, build_functions
# from gan_libs.WGAN_GP import build_generator, build_discriminator, build_functions
from utils.common import set_gpu_config, predict_images
from utils.draw_pose import draw_pose
import numpy as np
#set_gpu_config("0",0.5)
epoch = 50 + 1
image_size = (1,1,105)
noise_size = (1,1,1)
batch_size = 16
x_train = myData
np.random.shuffle(x_train)
generator = build_generator(noise_size)
print('gen params:',generator.count_params())
discriminator = build_discriminator(image_size)
print('dis params:',discriminator.count_params())
d_train, g_train = build_functions(batch_size, noise_size, image_size, generator, discriminator)
# generator.load_weights("e30_generator.h5".format(e))
# discriminator.load_weights("e30_discriminator.h5".format(e))
number_of_all_data = x_train.shape[0]
number_of_batches = int(number_of_all_data/batch_size)
print('Number of Batches passed in each epoch: ',number_of_batches)
import time
start_time = time.time()
is_first_epoch = True
for e in range(epoch):
index = 0
for batch in range(number_of_batches):
real_images = x_train[index:index+batch_size]
index =+ batch_size
real_images.shape = (batch_size,1,1,105)
d_loss, = d_train([real_images, 1])
g_loss, = g_train([real_images, 1])
if np.random.randint(low = 0, high = 100) == 1:
print ("[{0}/{1}] d_loss: {2:.4}, g_loss: {3:.4}".format(e, epoch, d_loss, g_loss))
#generating a sample
image = generator.predict(np.zeros(shape=(1,1)))
image = np.array(image)
draw_pose(image.reshape(105),'output',"_OneLatent_3_e{0}_batch{1}".format(e,batch))
if(is_first_epoch):
elapsed_time = time.time() - start_time
print('\n\nTime Taken for single epoch:')
print(time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
is_first_epoch = False
if e % 50 == 0 and e > 0:
generator.save_weights("_OneLatent_3_e{0}_generator.h5".format(e))
discriminator.save_weights("_OneLatent_3_e{0}_discriminator.h5".format(e))
#just monitoring:
##########################################################################################
elapsed_time = time.time() - start_time
print('\n\n\n\nNumber of parameter for the Generator and discriminator respectively:\n')
print(generator.count_params())
print('')
print(discriminator.count_params())
print('\n\nNumber of Epochs and steps for each epoch:\n')
print('epochs: ',epoch, ' batches: ', number_of_batches)
print('\n\nTime Taken:')
print(time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
##########################################################################################
#NOTE: previously in each epoch 1000 steps were iterated. in each iteration data was permitated and a batch was chosen.
#now data is shuffled first and there are number_of_data/batch_size steps to be passed in a single epoch.
# since 200 epochs and 1000 steps each was used, now 25 epochs and ˜7300 steps is used so the balance is kept.
# -
# +
#gpu details:
################----------------------------
from six.moves import cStringIO as StringIO
import gpustat
gpustats = gpustat.new_query()
fp = StringIO()
gpustats.print_formatted(
fp=fp, no_color=False, show_user=False,
show_cmd=False, show_pid=False, show_power=False, show_fan_speed=False)
result = fp.getvalue()
print('\n\n')
print(result)
################----------------------------
# -
generator.load_weights("e400_generator.h5".format(e))
discriminator.load_weights("e400_discriminator.h5".format(e))
z = -1;
for i in np.linspace(-3,3,100):
z+=1
x= np.array((-i,0,0,i,0)).reshape(1,5)
image = generator.predict(x)
image = np.array(image)
draw_pose(image.reshape(105),'output',"e{0}".format(z))
| 3d-pose-gan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
import os
import re
import gzip
# +
from itertools import zip_longest
def grouper(infile, n, fillvalue=None):
args = [iter(infile)] * n
return zip_longest(*args, fillvalue=fillvalue)
infile=os.path.join('../input_files_NETseq/wt-mmc.fastq.gz')
outfile=os.path.join('../input_files_NETseq/Trimmed_wt-mmc.fastq.gz')
with gzip.open(infile,'rt') as infile, gzip.open(outfile,'wt') as outfile:
fastq_per_read=grouper(infile,4,',')
for read in fastq_per_read:
header=read[0]
seq=read[1]
sign=read[2]
quality=read[3]
trimmed_seq=seq[0:19]
trimmed_quality=quality[0:19]
outfile.write(header + trimmed_seq + '\n' + sign + trimmed_quality + '\n')
# -
infile=os.path.join('../input_files_NETseq/Trimmed_wt-mmc.fastq.gz')
outfile=os.path.join('../input_files_NETseq/clean_Trimmed_wt-mmc.fastq.gz')
with gzip.open(infile,'rt') as infile:
lines = infile.readlines()
with gzip.open(outfile, 'wt') as outfile:
lines = filter(lambda x: x.strip(), lines)
outfile.writelines(lines)
# +
# next, run bowtie 1.0 with 'clean_Trimmed_wt-mmc.fastq.gz', following command:
# ./bowtie -m 1 -v 0 U000963_index/U000963_index --suppress 1,3,6,7,8 U000963_reads/clean__Trimmed_wt-mmc.fastq.gz > wt_mmc_NET.map
#use the amp file as input for the next jupyter notebook
| examples/code_Nudler_git_NET_get_map_files_from_fastq.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:dm] *
# language: python
# name: conda-env-dm-py
# ---
# +
import numpy as np
import pandas as pd
import warnings
import os
from tqdm import tqdm
from sklearn import preprocessing, metrics
import lightgbm as lgb
import matplotlib.pyplot as plt
import seaborn as sns
from joblib import Parallel, delayed
# %matplotlib inline
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
warnings.filterwarnings('ignore')
# -
next_action = pd.read_csv('./temp/next_action.csv')
next_action.head()
df_feature = pd.read_pickle('./temp/base_feature.plk')
print(df_feature.shape)
df_feature = next_action.merge(df_feature, how='left')
print(df_feature.shape)
df_feature['type'].value_counts()
df_feature.head()
df_feature.to_pickle('./temp/part2_feature.plk')
| part2_feature.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
def amount(p0,rate,t,comps):
"""Returns the amount after t
years starting at p0 and growing
at the given rate per year
compounded comps times per year"""
for i in range(int(t*comps)):
p0 += p0*rate/comps
return p0
def average(a,b):
return (a+b)/2
def bin_search(f,lower,upper,target):
for i in range(20):
avg = average(lower,upper)
#Be sure to change this line
#if the principal, rate or
#compounding changes:
guess = f(1000,0.08,avg,365)
if guess == target:
return guess
if guess > target:
upper = avg
else:
lower = avg
return avg
for i in [10,20,30,40,50]:
print(i,amount(1000,0.08,i,365))
# -
amount(1000,0.08,100,365)
print(bin_search(amount,50,100,1000000))
def bin_search(f,lower,upper,target):
for i in range(20):
avg = average(lower,upper)
#Be sure to change this line
#if the principal, rate or
#compounding changes:
guess = f(10000,0.08,avg,365)
if guess == target:
return guess
if guess > target:
upper = avg
else:
lower = avg
return avg
for i in [10,15,20,30,40,50,60]:
print(i,amount(10000,0.08,i,365))
print(bin_search(amount,50,60,1000000))
| Exercise05/Becoming_a_Millionaire.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Working with Pandas DataFrames
# ===========================
#
# <div class="overview">
# <p class="overview-title">Overview</p>
# <p>Questions</p>
# <ul>
# <li>How can I use pandas to examine data?
# </ul>
# <p>Objectives:</p>
# <ul>
# <li>Load a csv file into a pandas data frame.</li>
# <li>Learn the different ways to access information in a dataframe.</li>
# <li>Learn how to perform calculattions on a column of data.</li>
# <li>Learn about built-in plotting in a dataframe</li>
# </ul>
# <p>Keypoints:</p>
# <ul>
# <li>Pandas is used to examine, process, and analyze tabular data.</li>
# <li>Pandas has many useful features allowing easy data manipulation and extraction.</li>
# </ul>
# </div>
# Pandas is a Python library used for data analysis and manipulation. Within the world of data science, it is a ubiquitous and widely used library. If you are learning how to analyze data in Python, it will be almost impossible to avoid pandas.
#
# For this workshop, we will be using two types of data structues - NumPy arrays and pandas DataFrames. Pandas dataframes are specifically for data which is two dimensional (rows and columns). NumPy arrays, while similar in some ways, are meant to work with higher dimensional data. We'll see this in a future lesson.
#
# Pandas is very powerful. In this session, we'll be learning how to access information in pandas dataframes and how to do some basic manipulation and analysis. The first step in using pandas is importing it. Typically when pandas is imported, it is shortened to `pd`. We are also importing `os` here to build a file path.
import os
import pandas as pd
# The first data set we are looking at in this class is solubility data. This is a data set which gives molecular descriptors along with solubility calculated using the ESOL method. The data was obtained from [this website](http://moleculenet.ai/datasets-1) and you can read more about the method [here](https://www.moreisdifferent.com/assets/DIY_drug_discovery/Delaney_paper.pdf). The dataset has been downloaded and placed in your `data` folder.
#
# Later in the workshop, we'll be seing how we can fit this model ourselves using Python, but for now we are just going to examine the data using pandas. The data is saved in a csv file. CSV stands for "comma separated values". This is a file format where commas separate values. You can also typically read files like this into excel. In fact, initially in this session you may not yet see what advantages pandas offers over Excel. However, functionalites in pandas will be more useful once we start cleaning and fitting data.
file_path = os.path.join("data", "delaney-processed.csv")
df = pd.read_csv(file_path)
# ## Examining the data
#
# Now that we have used the pandas `read_csv` function, our data is in a variable called `df`. This variable is a **pandas dataframe**. This means it has lots of special functionalities thanks to pandas. For example, you can preview the first 5 rows using the `.head` function.
df.head()
# The `.info` function will give information about the columns and the data type of those columns. The data type will become very important later as we work with more data.
df.info()
# Pandas assigns data types to columns, and will do its best to decide the data column for each column based on what is in the column. You will see that this dataframe has `1128` values in each column.
#
# We see that the column `Minimum Degree` has the data type of `int64`. Here, `int` means `integer` and `64` means `64 bit`. The `64 bit` refers to the amount of computer memory the variable can occupy. It won't really be important for us. Similarly, `float64` means `64 bit floating point`. These are decimal numbers.
#
# The other column names which read `object` are not numeric. They might be strings or they might be something else. We'll discuss more later.
#
# The `describe` function can be used on a dataframe to quickly see statistics about columns with numerical data. If you look at the columns that statistics are computed for and compare to the data type shown from `info`, you will see that we only get statistics for columns which had `int64` or `float64` data types.
df.describe()
# ## Accessing Data
#
# One great thing about pandas dataframes is how easy it is to access information. Pandas allows you to access information in a dataframe using both data indexes and names.
#
# Pandas dataframes have rows and columns, you can see how many rows and columns using `.shape`. This will return the shape as `(num_rows, num_columns)`.
df.shape
# ### Accessing with rows and column numbers
#
# #### The `.iloc` function
# Access on a particular row and column using `.iloc` followed by square brackets and the row and column numbers you want to access. If you only put one number, it will be assumed to be the row number you want.
#
# The following gets row number 35.
df.iloc[35]
# We can also specify a number of columns by adding a second number to the slicing.
df.iloc[35, :3]
# ### Accessing with names
#
# Each dataframe has an index (rows) and columns. The rows and columns have names. For the columns, these are indicated in `.head` by the bold row at the top of the dataframe. The row names are similarly listed in bold in the left of the dataframe. The index can be named, but by default it is usually just numbered.
#
# You can see information about the index or the columns using `df.index` or `df.columns`.
df.index
df.columns
# To access part of a dataframe using the index or column names, would use the `loc` function.
#
# Because our index names are just numbers, this doesn't look that different than `iloc` when we are accessing rows.
#
# #### The `.loc` function
df.loc[35]
# However, we can now use column names
df.loc[35, "smiles"]
# #### Using column names
#
# You can pull a column of several columns of information using the syntax
#
# ```python
# df[column_name]
# ```
#
# To pull several columns, do
#
# ```python
# df[[colname1, colname2]]
# ```
#
# For example
df["Compound"]
df[["Compound", "smiles"]]
# ## Filtering Data
#
# There are two ways filtering in dataframes are typically done. Both are shown here for completeness, and because you will see both when looking at code others have written.
#
# In the first method, the strategy is that you first establish where something is true within a dataframe, then use that to filter the dataframe.
df["Polar Surface Area"] >17
# This gives us a list of values with either `True` or `False` that can be used to index into the dataframe.
df[df["Polar Surface Area"] > 17]
# This can also be accomplished using the pandas `query` function. When using the `query` function, you write your query as a string. If your column name has a space, you must surround it with backticks.
df.query("`Polar Surface Area` > 17")
# <div class="exercise-this-is-a-title exercise">
# <p class="exercise-title">Check your understanding</p>
# <p>Rewrite the following query to use the mask syntax for filtering data.</p>
#
# ```python
# df.query("`# of H-Bond Donors`>3")
# ```
#
# ```{admonition} Solution
# :class: dropdown
#
# ```python
# df[df["# of H-Bond Donors"] > 3]
# ```
# </div>
df.query("smiles.str.contains('=')")
# ## Sorting Data
#
# Pandas allows you to easily sort data using the `sort_values` method. Inside of the function call you list the column you would like to sort by. By default, the values will be sorted from lowest to highest (or `ascending`). This method will `return` a sorted dataframe.
df.sort_values("# of H-Bond Donors")
# If you include more than one column name, the dataframe will be sorted by multiple columns. First, it will be sorted by the first column indicated, then that sort will be sorted by the second. Consider the following examples. We first sort by `# of H-Bond Donors`. Within that sort, we sort by `Molecular Weight`.
df_sorted = df.sort_values(["# of H-Bond Donors", "Molecular Weight"])
df_sorted.head()
# Note the bolded indices to the left of the dataframe. After we perform our sort, these are no longer in order. In the sorted dataframe, `loc` and `iloc` do not return the same values using the same number.
df_sorted.loc[934]
df_sorted.iloc[934]
# ## Performing Operations on Multiple Cells
#
# ### Broadcasting
#
# Pandas dataframes have the convenient feature that they use something called `broadcasting`. This means that if you are doing something like subtracting a number, multiplying, etc to a column or dataframe of information, it can be done all at once instead of with a `for` loop. Consider if we wanted to express the molecular surface area in $nm^2$ instead of Å$^2$. To do this, we would need to divide each value by 100.
#
# Instead of writing a `for` loop that does this, we can just write the following code. This will return a pandas Series (one dimensional dataframe).
df["Polar Surface Area"] / 100
# To save it as a new column, we need to capture the output in a column. You can create a new column in a dataframe using the following syntax.
df["Polar Surface Area (nm^2)"] = df["Polar Surface Area"] / 100
# You can also add, subtract, or multiply two columns with one another. For example, we might want to calculate the difference between the predicted and observed values for this solubility model.
df["ESOL logP (mol/L)"] - df["measured logP (mol/L)"]
# <div class="exercise-this-is-a-title exercise">
# <p class="exercise-title">Check your understanding</p>
# <p> Save the difference between the predicted and measured solubility in a column named "difference" </p>
#
# ```{admonition} Solution
# :class: dropdown
#
# ```python
# df["difference"] = df["ESOL logP (mol/L)"] - df["measured logP (mol/L)"]
# ```
# </div>
# ### The `.applymap` method
#
# Sometimes the operations you want to do on a dataframe can't be achieved with broadcasting. One might think to use a `for` loop in times like these. However, many modern Python libraries try to make it so that you will rarely need to use a loop.
#
# In pandas, if you would like to perform an operation on every cell of a dataframe, you can use the `applymap` command. For example, if we wanted to know the number of letters in a compound name, we would use the python function `len`. The length function only works on strings, so let's see how we would use it on two columns which are strings.
df[["smiles", "Compound"]].applymap(len)
# You can also do this with custom functions. Consider this function which counts the number of C's in a string. For a SMILE string we'll consider that it counts the number of carbons.
def count_carbons(smiles):
carbon_count = 0
for letter in smiles:
if letter.lower() == "c":
carbon_count += 1
return carbon_count
df[["smiles", "Compound"]].applymap(count_carbons)
# ### The `.apply` method - for rows and columns
#
# The apply method is used to apply a function to either the rows or columns of a dataframe. If you use this on a single column, the function will be applied to every value.
df["Compound"].apply(len)
df[["smiles", "Compound"]].apply(len)
# ## Axes and Shapes
#
# We've already discussed the rows and columns in a dataframe. Also relevant here is the idea of an dataframe axis. Most functions in pandas can be used on either the rows or the columns, and can be specified by using the keyword `axis`. Typically, by default this axis will be set to 0 (rows).
#
# Axis 0 runs down the rows, while axis 1 runs across the columns. When thinking about this, consider the `iloc` function for indexing. You specify the index for the row in element 0 of this function (axis 0) and the index for the column in element 1.
#
# The `apply` method works on either rows or elements. Consider the result of what happens if we apply this function to axis 1, or the columns. Can you explain why we are seeing this answer?
df[["smiles", "Compound"]].apply(len, axis=1)
# For example, when we calculate the mean, you will get an average for every column. You do not get the average of all values. This is because the function is applied to axis 0, which runs down the dataframe.
df.mean()
# We can switch this to giving an average for each row by setting the axis to 1.
df.mean(axis=1)
# You'll notice that when we run a function on axis 0, our returned value has the same number of values as the number of columns in our original dataframe. The converse is true for applying across axis 1.
# If we select a column, the mean of all of the values are returned. This makes sense intuitively - it is how we might expect the function to behave. However, the function behaves this way because the function is acting on axis 0.
df["Polar Surface Area"].mean()
df["Polar Surface Area"].shape
# For this example, there is only one number here describing the shape of the dataframe. This is because selecting a single column of data in DataFrame results in a sightly different data type. Instead of a Pandas DataFrame, we hae a **Pandas Series**. Knowing and understanding the difference between a DataFrame and a Series will be important as you work with Pandas. There are many functions which are shared between the two data types, but there are also some that are specific to one data type or another.
#
# The concepts covered in this chapter are helpful for data exploration, and will become essential as we continue to work with pandas.
| book/1-pandas-dataframes-esol.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# # Кортежи (```tuple```)
#
# Помимо списков и словарей в Python реализовано еще несколько стандартных коллекцией. Одной из таких являются кортежи. Кортежи - это неизменяемая коллекция, которая по своему подобию очень похожа на список. Кортежи можно создавать с помощью литерала круглых скобок ```()``` или с помощью одноименной функции ```tuple()```. Кортежи имеют одноименный тип ```tuple```.
a = ()
b = tuple()
print(f'{type(a) = }')
print(f'{type(b) = }')
# Непустой кортеж можно создать, записывая в круглых скобках элементы кортежа и разделяя их запятой. Одно из отличий от списков заключается в создании коллекции из одного элемента. Кортеж из одного элемента нельзя создать с помощью выражения ```(42)```, необходимо обязательно ставить запятую, т. е. ```(42, )```. Скобки можно опускать и записывать сразу элементы через запятую, в этом случае интерпретатор тоже создаст кортеж. Для большей читаемости кода не следует опускать скобки в одноэлементных кортежах.
a = (1, 2, 3)
b = (42, )
c = 1, 2, 3
d = 0,
print(f'{a = }')
print(f'{b = }')
print(f'{c = }')
print(f'{d = }')
# В кортежах, как и в списках, можно хранить разные типы данных, в том числе изменяемые, например, списки, словари и др.
foo = (
'parker_square', # str
True, False, # bool
None, # NoneType
(42, 0.196, 1+2j), # tuple[int, float, complex]
[1, 2, 3], # list[int]
{
'monty': 'python',
'42': 'The Ultimate Question of Life, the Universe, and Everything',
}, # dict
print, # function
)
# Кортежи - это неизменяемый тип данных. Они хранят ссылки на объекты. Поэтому в кортежах можно хранить ссылку на изменяемые типы данных. При этом объекты изменяемых типов, хранимые в кортежах, по-прежнему можно изменять. Если в программе не планируется изменять какой-либо набор значений, то стоит выбрать кортеж для их хранения.
#
# Работа изменяемых и неизменяемых типов данных будет рассмотрена позденее.
a = (1, 2, [42])
a[-1].append(0)
print(f'{a = }')
# Однако, изменять сами элементы кортежа не разрешено. Попытка произвести эту операцию вызывает исключение ```TypeError```.
a = (1, 2, [42])
a[-1] = 0 # TypeError
# ## Операции с кортежами
#
# Кортежи - это неизменяемая коллекция. Поэтому они не поддерживают операции добавления, удаления и изменения элементов. Все остальные операции, аналогичнуе спискам, доступны:
# - индексирование;
# - срезы;
# - вычисление длины;
# - конкатенация;
# - умножение на константу;
# - подсчет всех вхождений элемента;
# - поиск первого вхождения элемента;
# - и другие.
# +
a = (1, 2, 3, 1)
b = 3, 4
print(f'Длина: {len(a) = }')
print(f'Конкатенация: {a + b = }')
print(f'Умножение на целочисленную константу: {3 * a = }')
print(f'Индексация: {a[0] = }')
print(f'Индексация: {a[-2] = }')
print(f'Срез: {a[1::-1]}')
print(f'Индекс первого вхождения: {a.index(1) = }')
print(f'Подсчет всех вхлждения: {a.count(1) = }')
# -
# Одним из применений кортежей является замена объектов и их имен местами. В Python выражения оцениваются слева на право (см. [документацию](https://docs.python.org/3/reference/expressions.html#evaluation-order)). Выражение ```a, b = b, a``` будет выполняться в следующем порядке. Сначала выполниться левая часть, т. е. будет создан кортеж из двух элементов со ссылками на объекты, на которые ссылаются ```b``` и ```a``` соответственно. Затем кортеж будет связан с правой частью. Так как в правой части стоят два имени, то он будет распакован. Таким образом первый элемент кортежа, которых хранит ссылку на объект из ```b``` будет связан с именем ```a```, затем тоже самое произойдет со вторым элементом кортежа, он будет связан с именем ```b```. Про замену значений читайте [обсуждение](https://stackoverflow.com/questions/14836228/is-there-a-standardized-method-to-swap-two-variables-in-python) на stackoverflow.
a, b = 42, 0
b, a = a, b
print(f'{a = }')
print(f'{b = }')
# В Python есть специальный модуль ```dis``` для просмотра байт-кода. Это специальное представление программы, которое непосредственно выполняется интерпретатором.
#
# Убедиться в порядке выполнения этого выражения можно с помощью модуля ```dis``` и его функции ```dis```, которая принимает строку Python кода.
# +
import dis
code = """
a, b = 1, 2
a, b = b, a
"""
dis.dis(code)
# -
# Блок с номером 2 отвечает за выполнение строки ```a, b = 1, 2```, а с номером 3 за строку ```a, b = b, a```. Здесь можно увидеть, что в первой строке снача создается константа в виде кортежа из двух элементов 1 и 2.
#
# ```0 LOAD_CONST 0 ((1, 2))```
#
# Затем выполняется операция распаковки. Эта операция связывает каждый элемент из кортежа с соответствующим именем в порядке следования.
# # Кортежи vs списки
#
# Для ясного понимания в каких ситуациях стоит использовать списки, а каких кортежи нужно подробнее разобрать достоинства и недостатки обоих типов данных. Основным отличием этих типов является изменяемость. Кортежи -- это неизменяемый тип данных, а списки наоборот изменяемый. Это означает, что при попытке, например, добавить к кортежу новый элемент, произойдет создание абсолютно нового объекта. Поведение списков в этом плане совершенно другое. При добавлении или удалении элементов списка пересоздание объекта происходить не будет. Не стоит забывать, что из-за неизменяемости кортежей у них нет специализированных методов для операций добавления, удаления и вставки. Это означает, что добавить элемент в кортеж можно только конкатенацией его с другим кортежем.
a = (1, 2, 3)
b = [1, 2, 3]
id_a = id(a)
id_b = id(b)
a += (196, )
b += [196]
print(f'{id_a == id(a) = }')
print(f'{id_b == id(b) = }')
# Изменять элементы кортежа также не разрешено.
a = (1, 2, 3)
a[0] = 0 # TypeError
# Поскольку размер кортежей фиксирован и у них отсутствуют методы добавления и удаления элементов, то в памяти они хранятся несколько более эффективно, чем списки. Это связано с тем, что в списках заранее выделяется несколько больший размер памяти, чем требуется для хранения элементов, для обеспечения большей эффективности добавления новых элементов.
a = list(range(10))
b = tuple(a)
print(f'Размер списка a: {a.__sizeof__()} байт')
print(f'Размер кортежа b: {b.__sizeof__()} байт')
# Быстродействие разных операций с кортежами (создание, распаковка, доступ по индексу) также отличается от быстродействия этих операций со списками. Для измерения быстродействия воспользуемся модулем ```timeit```, подробнее о нем см. в [документации](https://docs.python.org/3/library/timeit.html). Если список или кортеж состоят из значений неизменяемых типов данных, кортеж сильно выигрывает в быстродействии операций создания и распаковки. Ситуация изменяется, когда в кортеже необходимо хранить объекты изменяемых типов. В этом случае различия в быстродействии становятся не такими значительными. Операция доступа по индексу осуществляется за сравнимое время у обоих типов коллекций.
# +
import timeit
print(f'Создание списка (1): {timeit.timeit("[1, 2, 3]")}')
print(f'Создание кортежа (1): {timeit.timeit("(1, 2, 3)")}')
print(f'Распаковка списка (1): {timeit.timeit("x, y, z = [1, 2, 3]")}')
print(f'Распаковка кортежа (1): {timeit.timeit("x, y, z = (1, 2, 3)")}')
print('-' * 50)
print(f'Создание списка (2): {timeit.timeit("[1, 2, [42]]")}')
print(f'Создание кортежа (2): {timeit.timeit("(1, 2, [42])")}')
print(f'Распаковка списка (2): {timeit.timeit("x, y, z = [1, 2, [42]]")}')
print(f'Распаковка кортежа (2): {timeit.timeit("x, y, z = (1, 2, [42])")}')
print('-' * 50)
print(f'Индексация списка: {timeit.timeit("a[1]", setup="a = [1, 2, 3]", number=10_000_000)}')
print(f'Индексация кортежа: {timeit.timeit("a[1]", setup="a = (1, 2, 3)", number=10_000_000)}')
# -
# Различия в производительности заключаются в оптимизациях, которые выполняются над кортежами. Ниже приведен пример байт-кода создания списка и кортежа, сгенерированный с помощью модуля ```dis```. В приведенном байт-коде видно, что при создании списка поочередно создаются объекты, которые в него входят. С созданием кортежа дело обстоит иначе. Он создается за одну операцию. Здесь используется оптимизация, называемая [сверткой констант](https://ru.wikipedia.org/wiki/%D0%A1%D0%B2%D1%91%D1%80%D1%82%D0%BA%D0%B0_%D0%BA%D0%BE%D0%BD%D1%81%D1%82%D0%B0%D0%BD%D1%82). Если интерпретатору доступны все значения, которые будут храниться в кортеже, то он их вычисляет заранее.
# +
import dis
dis.dis('[1, 2, 3]')
# -
dis.dis('(1, 2, 3)')
# Подводя итог, различия списков и кортежей можно представить в виде таблицы.
#
# | Критерий | Список | Кортеж |
# |-------------------------|:----------------------------------:|:--------------------------:|
# | *Литералы* | ```[]``` | ```()``` |
# | *Функции* | ```list``` | ```tuple``` |
# | *Изменяемый тип* | ✔ | ✖ |
# | *Сворачивание констант* | ✖ | ✔ |
# | *Хранимые типы данных* | Любые, желательно однородного типа | Любые, разнородного типа |
# | *Копирование* | Поверхностная и глубокая копия | Глубокая копия при наличии |
# | | | вложенных изменяемых типов |
# | *Потребление памяти* | Высокое, за счет дополнительных | Ниже чем у списков |
# | | накладных расходов | |
# | *Быстродействие* | Низкое для создания, распаковки | Высокое |
#
# Стоит обратить особое внимание на то, что списки предназначены для хранения последовательностей из однородных объектов. В списке индекс означает порядок следования элемента в последовательности. Кортежи, наоборот, предназначены для хранения разнородных данных. Кортежи обладают структурой, т. е. индекс элемента не просто указывает на порядок следования элемента в последовательности, но и имеет определенный смысл. Например с помощью модуля ```time``` можно узнать текущее время и результат можно представить в виде кортежа чисел, где на первом месте всегда идет год, затем месяц и т. д. Другим примером является координаты точек в трехмерном пространстве, представимых в виде ```(x, y, z)```, или что тоже самое координаты геопозиционирования с широтой, долготой и высотой. Упорядочивать или сортировать такие последовательности не имеет смысла, иначе потеряется исходная структура данных и они станут бесполезными. Поэтому, если необходимо хранить данные, которые обладают некоторым смыслом, например данные о человеке (имя, фамилию, возраст и т. д.), то стоит выбирать кортеж.
# +
import time
date = time.localtime()
print(f'Год: {date[0]}')
print(f'Месяц: {date[1]}')
print(f'День: {date[2]}')
# -
# # Полезные ссылки
#
# - [Кортежи более эффективны, чем списки в Python? (здесь можно посмотреть как кортежи и списки представлены в виде Си структур)](https://stackoverflow.com/questions/68630/are-tuples-more-efficient-than-lists-in-python)
# - [В чем разница между списками и кортежами? (здесь хорошо описано, в какиких случаях стоит применять кортежи)](https://stackoverflow.com/questions/626759/whats-the-difference-between-lists-and-tuples)
| python_pd/03_collections/basic_collections/04_tuple.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Dabbling with Deep Learning, part I
#
# ### The MNIST dataset and logistic regression
#
# This is a shameless plug of the http://deeplearning.net/tutorial/logreg.html#logreg Deep Learning tutorial. I just keep it in my Github to be able to access it later and build upon it. Thus, the code from that site is here under their license which can be founde in the LICENSE file of this repo.
#
#
# +
from __future__ import print_function
__docformat__ = 'restructedtext en'
import six.moves.cPickle as pickle
import gzip
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
import matplotlib
# %matplotlib inline
# -
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W`
and bias vector :math:`b`. Classification is done by projecting data
points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# start-snippet-1
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(
value=numpy.zeros(
(n_in, n_out),
dtype=theano.config.floatX
),
name='W',
borrow=True
)
# initialize the biases b as a vector of n_out 0s
self.b = theano.shared(
value=numpy.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b',
borrow=True
)
# symbolic expression for computing the matrix of class-membership
# probabilities
# Where:
# W is a matrix where column-k represent the separation hyperplane for
# class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of
# hyperplane-k
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# symbolic description of how to compute prediction as class whose
# probability is maximal
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# end-snippet-1
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# start-snippet-2
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
# end-snippet-2
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type)
)
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def load_data(dataset):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
#############
# LOAD DATA #
#############
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
from six.moves import urllib
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print('Downloading data from %s' % origin)
urllib.request.urlretrieve(origin, dataset)
print('... loading data')
# Load the dataset
with gzip.open(dataset, 'rb') as f:
try:
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
except:
train_set, valid_set, test_set = pickle.load(f)
# train_set, valid_set, test_set format: tuple(input, target)
# input is a numpy.ndarray of 2 dimensions (a matrix)
# where each row corresponds to an example. target is a
# numpy.ndarray of 1 dimension (vector) that has the same length as
# the number of rows in the input. It should give the target
# to the example with the same index in the input.
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
dta = load_data("mnist.pkl.gz")
n_epochs=50
batch_size=20
learning_rate = 0.1
# +
datasets = dta
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size
# -
n_train_batches
# generate symbolic variables for input (x and y represent a
# minibatch)
x = T.matrix('x') # data, presented as rasterized images
y = T.ivector('y') # labels, presented as 1D vector of [int] labels
# construct the logistic regression class
# Each MNIST image has size 28*28
classifier = LogisticRegression(input=x, n_in=28 * 28, n_out=10)
classifier
type(y)
# +
print('... building the model')
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# generate symbolic variables for input (x and y represent a
# minibatch)
x = T.matrix('x') # data, presented as rasterized images
y = T.ivector('y') # labels, presented as 1D vector of [int] labels
# construct the logistic regression class
# Each MNIST image has size 28*28
classifier = LogisticRegression(input=x, n_in=28 * 28, n_out=10)
# the cost we minimize during training is the negative log likelihood of
# the model in symbolic format
cost = classifier.negative_log_likelihood(y)
# compiling a Theano function that computes the mistakes that are made by
# the model on a minibatch
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# +
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# start-snippet-3
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# +
###############
# TRAIN MODEL #
###############
print('... training the model')
# early-stopping parameters
patience = 5000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience // 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
test_score = 0.
start_time = timeit.default_timer()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in range(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
for i in range(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# test it on the test set
test_losses = [test_model(i)
for i in range(n_test_batches)]
test_score = numpy.mean(test_losses)
print(
(
' epoch %i, minibatch %i/%i, test error of'
' best model %f %%'
) %
(
epoch,
minibatch_index + 1,
n_train_batches,
test_score * 100.
)
)
# save the best model
with open('best_model.pkl', 'wb') as f:
pickle.dump(classifier, f)
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print(
(
'Optimization complete with best validation score of %f %%,'
'with test performance %f %%'
)
% (best_validation_loss * 100., test_score * 100.)
)
print('The code run for %d epochs, with %f epochs/sec' % (
epoch, 1. * epoch / (end_time - start_time)))
print(('The code ran for %.1fs' % ((end_time - start_time))), file=sys.stderr)
# -
def predict():
"""
An example of how to load a trained model and use it
to predict labels.
"""
# load the saved model
classifier = pickle.load(open('best_model.pkl'))
# compile a predictor function
predict_model = theano.function(
inputs=[classifier.input],
outputs=classifier.y_pred)
# We can test it on some examples from test test
dataset='mnist.pkl.gz'
datasets = load_data(dataset)
test_set_x, test_set_y = datasets[2]
test_set_x = test_set_x.get_value()
predicted_values = predict_model(test_set_x[:20])
print("Predicted values for the first 20 examples in test set:")
print(predicted_values)
predict()
# <img src="digits.png">
| lesson_1/dabbling_theano.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/woswos/bottomless-pit/blob/master/Python_Graphing.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="qyoiLqAHETC_" colab_type="code" outputId="c97c59cc-a753-43d2-d4ce-020d27601893" colab={"base_uri": "https://localhost:8080/", "height": 660}
###############################################
# Alkaline and Nickel–metal hydride battery #
###############################################
# I vs Vext >>> slope should be R
# power consumption >> percent comparison Rint & Rext
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('classic')
# #%matplotlib inline
fig = plt.figure(figsize=(12, 7), dpi=100)
fig.patch.set_facecolor('white')
plt.rcParams["font.family"] = "monospace"
plt.rcParams["font.size"] = 14
# line 1 points
x1 = [5.1, 10.0, 15.1]
y1 = [8.5449, 4.5849, 3.3846]
# plotting the line 1 points
plt.plot(x1, y1, marker='o', color='red', linestyle=' ', label = 'Alkaline Battery')
x3 = np.linspace(0, 20, 100)
y3 = y = (100 * 0.4953)/(x3 + 0.4953)
# plotting the line 2 points
plt.plot(x3, y3, color='green', label = r'Alkaline Battery Fit ($y=\frac{0.49}{x + 0.49} \times 100$)')
# line 2 points
x2 = [5.1, 10.0, 15.1]
y2 = [5.5233, 2.9176, 1.9651]
# plotting the line 2 points
plt.plot(x2, y2, marker='o', color='blue', linestyle=' ', label = 'Nickel–metal hydride Battery')
x4 = np.linspace(0, 20, 100)
y4 = y = (100 * 0.3005)/(x4 + 0.3005)
# plotting the line 2 points
plt.plot(x4, y4, color='orange', label = r'Nickel–metal hydride Battery Fit ($y=\frac{0.30}{x + 0.30} \times 100$)')
#x5 = np.linspace(0, 20, 100)
#y5 = 100-((100 * 0.5)/(x5 + 0.5))
# plotting the line 2 points
#plt.plot(x5, y5, color='brown', label = 'Nicimes 100')
#x6 = [5.1, 10.0, 15.1]
#y6 = [91.455, 95.415, 96.615]
# plotting the line 2 points
#plt.plot(x6, y6, marker='o', color='red', linestyle=' ', label = 'Nicimes 100')
#x7 = np.linspace(0, 20, 100)
#y7 = 100-((100 * 0.3)/(x5 + 0.3))
# plotting the line 2 points
#plt.plot(x7, y7, color='purple', label = 'Nicimes 100')
#x8 = [5.1, 10.0, 15.1]
#y8 = [94.477, 96.800, 98.035]
# plotting the line 2 points
#plt.plot(x8, y8, marker='o', color='blue', linestyle=' ', label = 'Nicimes 100')
plt.axis([0, 16, 0, 100])
# naming the x axis
plt.xlabel(r'$\mathregular{R_{ext}}$ (Ω)')
# naming the y axis
plt.ylabel(r'% Power Dissipated at $R_{int}$(W)')
# giving a title to my graph
#plt.title('Alkaline Battery - V vs. Ω')
# show a legend on the plot
plt.legend(loc='best')
# function to show the plot
plt.show()
#fig.savefig('plot.png')
# + id="pHpot6nj-NcQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 660} outputId="cce88cce-0c0c-4c99-b68a-0369831cf09c"
##############################################
# Alkaline and Nickel–metal hydride battery #
###############################################
# I vs Vext >>> slope should be R
# power consumption >> percent comparison Rint & Rext
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('classic')
# #%matplotlib inline
fig = plt.figure(figsize=(12, 7), dpi=100)
fig.patch.set_facecolor('white')
plt.rcParams["font.family"] = "monospace"
plt.rcParams["font.size"] = 14
# line 1 points
#x1 = [5.1, 10.0, 15.1]
#y1 = [8.5449, 4.5849, 3.3846]
# plotting the line 1 points
#plt.plot(x1, y1, marker='o', color='red', linestyle=' ', label = 'Alkaline Battery')
#x3 = np.linspace(0, 20, 100)
#y3 = y = (100 * 0.4953)/(x3 + 0.4953)
# plotting the line 2 points
#plt.plot(x3, y3, color='green', label = r'Alkaline Battery Fit ($y=\frac{0.49}{x + 0.49} \times 100$)')
# line 2 points
#x2 = [5.1, 10.0, 15.1]
#y2 = [5.5233, 2.9176, 1.9651]
# plotting the line 2 points
#plt.plot(x2, y2, marker='o', color='blue', linestyle=' ', label = 'Nickel–metal hydride Battery')
#x4 = np.linspace(0, 20, 100)
#y4 = y = (100 * 0.3005)/(x4 + 0.3005)
# plotting the line 2 points
#plt.plot(x4, y4, color='orange', label = r'Nickel–metal hydride Battery Fit ($y=\frac{0.30}{x + 0.30} \times 100$)')
x6 = [5.1, 10.0, 15.1]
y6 = [91.455, 95.415, 96.615]
# plotting the line 2 points
plt.plot(x6, y6, marker='o', color='blue', linestyle=' ', label = 'Nickel–metal hydride Battery')
x5 = np.linspace(0, 20, 100)
y5 = 100-((100 * 0.5)/(x5 + 0.5))
# plotting the line 2 points
plt.plot(x5, y5, color='orange', label = r'Nickel–metal hydride Battery Fit')
x8 = [5.1, 10.0, 15.1]
y8 = [94.477, 96.800, 98.035]
# plotting the line 2 points
plt.plot(x8, y8, marker='o', color='red', linestyle=' ', label = 'Alkaline Battery')
x7 = np.linspace(0, 20, 100)
y7 = 100-((100 * 0.3)/(x5 + 0.3))
# plotting the line 2 points
plt.plot(x7, y7, color='green', label = r'Alkaline Battery Fit')
plt.axis([0, 16, 0, 100])
# naming the x axis
plt.xlabel(r'$\mathregular{R_{ext}}$ (Ω)')
# naming the y axis
plt.ylabel(r'% Power Dissipated at $R_{ext}$(W)')
# giving a title to my graph
#plt.title('Alkaline Battery - V vs. Ω')
# show a legend on the plot
plt.legend(loc='lower right')
# function to show the plot
plt.show()
#fig.savefig('plot.png')
# + id="NdS_J_F5SBMD" colab_type="code" outputId="f162f2c8-fd7e-41af-f0de-ca8727e54f36" colab={"base_uri": "https://localhost:8080/", "height": 657}
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('classic')
# #%matplotlib inline
fig = plt.figure(figsize=(12, 7), dpi=100)
fig.patch.set_facecolor('white')
plt.rcParams["font.family"] = "monospace"
plt.rcParams["font.size"] = 14
# line 1 points
x1 = [0.29, 0.154, 0.104]
y1 = [1.477, 1.54, 1.57]
# plotting the line 1 points
plt.plot(x1, y1, marker='o', color='red', linestyle=' ', label = 'Alkaline Battery')
x3 = np.linspace(0, 0.4, 100)
y3 = 1.618 - 0.495*x3
# plotting the line 2 points
plt.plot(x3, y3, color='green', label = r'Alkaline Battery Fit ($y=1.62 - 0.49 x$)')
# line 2 points
x2 = [0.255, 0.133, 0.089]
y2 = [1.3, 1.331, 1.347]
# plotting the line 2 points
plt.plot(x2, y2, marker='o', color='blue', linestyle=' ', label = 'Nickel–metal hydride Battery')
x4 = np.linspace(0, 0.4, 100)
y4 = 1.375 - 0.310*x4
# plotting the line 2 points
plt.plot(x4, y4, color='orange', label = r'Nickel–metal hydride Battery Fit ($y=1.38 - 0.31 x$)')
plt.axis([0, 0.4, 1, 2])
# naming the x axis
plt.xlabel('Current (A)')
# naming the y axis
plt.ylabel(r'Voltage drop across $R_{ext}$ (V)')
# giving a title to my graph
#plt.title('Alkaline Battery - V vs. Ω')
# show a legend on the plot
plt.legend(loc='best')
# function to show the plot
plt.show()
#fig.savefig('plot.png')
| python graphing/Python_Graphing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Baseline Classification Model: Random Forest on Individual Data
# This file is composed of a random forest classification model to evaluate a general accuracy level of traditional ML methods in classifying our HAR data based on activity.
# __INPUT: .csv files containing the individual sensor data (plain_data.csv)__
# __OUTPUT: Random Forest Multi-Classification Model (F1 Score=)__
# ## Imports
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import plot_confusion_matrix
# ## Read in Data
df = pd.read_csv("../../10_code/40_usable_data_for_models/41_Duke_Data/plain_data.csv")
df.sample(5)
df.shape
# ## Initial RF Model
# + tags=[]
rands = np.random.choice(df.Subject_ID.unique(),11, replace=False)
print(rands)
# -
# ### Split Subjects into Test and Train Sets (n=44,11)
test = df[df['Subject_ID'].isin(rands)]
train = df[-df['Subject_ID'].isin(rands)]
# ### Feature Selection
train = train[['ACC1', 'ACC2', 'ACC3', 'TEMP', 'EDA', 'EDA', 'BVP', 'HR', 'Magnitude','Activity']]
test = test[['ACC1', 'ACC2', 'ACC3', 'TEMP', 'EDA', 'EDA', 'BVP', 'HR', 'Magnitude','Activity']]
# ### Test-train Split
X_train = train.iloc[:, :-1].values
y_train = train.iloc[:, -1].values
X_test = test.iloc[:, :-1].values
y_test = test.iloc[:, -1].values
# ### Standardize Data
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# ### Random Forest Classifier Initialization
# ### Fit and Predict using RF model
# + tags=[]
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# -
# ### Evaluate Feature Importances
# +
importances = clf.feature_importances_
std = np.std([tree.feature_importances_ for tree in clf.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X_train.shape[1]):
print(str(f+1)+ ". "+train.columns[f] + " (Importance: " + str(importances[indices[f]])+")")
# -
# ### Confusion Matrix
# + tags=[]
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
titles_options = [("Confusion matrix, without normalization", None),
("Normalized confusion matrix", 'true')]
for title, normalize in titles_options:
disp = plot_confusion_matrix(clf, X_test, y_test,
cmap=plt.cm.Blues,
normalize=normalize)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
plt.show()
# -
# ### Evaluate Accuracy & F1 Score
scores = pd.DataFrame([accuracy_score(y_test, y_pred),f1_score(y_test, y_pred,average='weighted')], columns = ['Score'], index = ['Accuracy', 'F1'])
scores
| DigitalBiomarkers-HumanActivityRecognition/10_code/60_random_forests/62_rf_individual.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Jupyter Notebooks
# ==================
#
# A notebook consists in a set of cells. These cells are interpreted either as text instruction (i.e. **markdown**) or as Python **code**.
#
# * Each cell can be edited using ``[Enter]`` key (i.e. *edit mode*). To return to the *navigation mode*, use the ``[Esc]`` key.
#
# * To switch change a cell to a Python code cell, go in navigation mode and the press ``[y]`` key. Similary, to change a cell to a markdown cell, press the ``[m]`` key.
#
# * You can run a cell by pressing ``[shift] + [Enter]`` or by pressing the "play" button in the menu.
#
# 
#
# * You can get help on a function or object by pressing ``[shift] + [tab]`` after the opening parenthesis ``function(``
#
# 
#
# * You can also get help by executing ``function?``
#
# 
#
# Some useful keyboard shortcut (*you need to be in navigation mode*):
#
# * Add a cell above the current cell -> press ``[a]``
# * Add a cell below the current cell -> press ``[b]``
# * Select several cells -> hold ``[Shift]`` + arrow ``[up]`` or ``[down]``
# * Remove the selected cell(s) -> press ``[dd]``
# * Copy the selected cell(s) -> press ``[c]``
# * Cut the selected cell(s) -> press ``[x]``
# * Paste the copy/cut cell(s) -> press ``[v]``
# # Introduction to Python
# We introduce here the Python language. Only the bare minimum
# necessary for getting started with Numpy and Scipy is addressed here.
# To learn more about the language, consider going through the
# excellent tutorial http://www.scipy-lectures.org/intro/index.html. Dedicated books
# are also available, such as http://www.diveintopython.net/.
#
# Python is a **programming language**, as are C, Fortran, BASIC, PHP,
# etc. Some specific features of Python are as follows:
#
# * an *interpreted* (as opposed to *compiled*) language. Contrary to e.g.
# C or Fortran, one does not compile Python code before executing it. In
# addition, Python can be used **interactively**: many Python
# interpreters are available, from which commands and scripts can be
# executed.
#
# * a free software released under an **open-source** license: Python can
# be used and distributed free of charge, even for building commercial
# software.
#
# * **multi-platform**: Python is available for all major operating
# systems, Windows, Linux/Unix, MacOS X, most likely your mobile phone
# OS, etc.
#
# * a very readable language with clear non-verbose syntax
#
# * a language for which a large variety of high-quality packages are
# available for various applications, from web frameworks to scientific
# computing.
#
# * a language very easy to interface with other languages, in particular C
# and C++.
#
# * Some other features of the language are illustrated just below. For
# example, Python is an object-oriented language, with dynamic typing
# (the same variable can contain objects of different types during the
# course of a program).
#
#
# See https://www.python.org/about/ for more information about
# distinguishing features of Python.
# ## 0. Hello world
#
# you type in the cells, execute commands with shift + Enter
#
# you can add whatever you want
print("Hello world")
# # 1. Basic types
#
# ## 1.1. Integers
1+1
# We can assign values to variables with `=`
a = 4
type(a)
# Note that one does not declare the type of a variable before assigning its value.
# In C, conversely, one should write:
# ```C
# int a = 4;
# ```
# ## 1.2 Floats
#
# There exists a floating point type that is created when the variable has decimal values:
c = 2.1
c
type(c)
# This gives a different result when using `Python 2` and `Python 3`
1 / 2, 1. /2
# NB: this uses an object that we'll discover later, the `tuple`
t = 2*3, 4.
type(t)
# ## 1.3. Boolean
# Similarly, boolean types are created from a comparison:
3 > 4
test = (3 > 4)
type(test)
# NB: A Python shell can therefore replace your pocket calculator, with the basic arithmetic operations ``+``, ``-``, ``*``, ``/``, ``%`` (modulo) natively implemented
# ## 1.4. Type conversion (casting)
a = 1
type(a)
b = float(a)
type(b)
a, b
a == b
(1.0 + 0.1 - 1.0) == (1.0 - 1.0 + 0.1)
abs((1.0 + 0.1 - 1.0) - (1.0 - 1.0 + 0.1)) < 1e-10
# # 2. Containers
#
# Python provides many efficient types of containers, in which collections of objects can be stored. The main ones are `list`, `tuple`, `set` and `dict`
#
#
# ## 2.1. Tuples
tt = ('truc', 3.14)
tt
tt_list = ['truc', 3.14]
tt_list
tt_list.sort()
tt[0]
# You can't change a tuple, we say that it's *immutable*
tt[0] = 1
a = 2
a_list = [ a, 3]
a_list
a = 3
a_list
# ## 2.1. Lists
#
# A list is an ordered collection of objects, that may have different types. For example:
colors = ['red', 'blue', 'green', 'black', 'white']
type(colors)
# Indexing: accessing individual objects contained in the list::
colors[2]
# WARNING: **Indexing starts at 0** (as in C), not at 1 (as in Fortran or Matlab).
#
# Counting from the end with negative indices:
colors[-2]
colors[10]
len(colors)
colors[1] = 'purple'
# Afficher la liste
# Afficher la liste
colors
colors.sort()
colors
colors2 = colors
colors2.sort(reverse = True)
colors2
colors
colors2 = colors.copy()
colors.sort()
colors
colors2
colors3 = list(colors2)
colors3.sort()
colors2
# ## 2.2. Slicing: obtaining sublists of regularly-spaced elements
#
colors
colors[1:5:2]
# **Slicing syntax**: ``colors[start:stop:stride]``
#
# NB: All slicing parameters are optional
colors
colors[3:]
colors[:3]
colors[::2]
colors[2:0:-1]
colors[7:8]
# ## 2.3. Strings
#
# Different string syntaxes (simple, double or triple quotes):
s = 'asdfdfdf'
type(s)
s = "asdfdfd"
s = """asdfadsf
asdfdf
asdfdf
"""
s
print(s)
len(s)
list(s)
s.strip().split()
s_list = s.strip().split("\n")
s2_list = s_list
s_list[2]
s_list[2:3]
s[3:10]
s + ' coucou'
s.title()
# ## 2.4 Dictionaries
# A dictionary is basically an efficient table that **maps keys to values**. It is an **unordered** container
tel = {'emmanuelle': 5752, 'sebastian': 5578}
tel
tel['emmanuelle']
tel['francis']
tel['francis'] = 5919
tel
tel.keys()
tel.values()
'francis' in tel
# It can be used to conveniently store and retrieve values
# associated with a name (a string for a date, a name, etc.). See
# https://docs.python.org/tutorial/datastructures.html#dictionaries
# for more information.
#
# NB: A dictionary can have keys (resp. values) with different types:
d = {'a':1, 'b':2, 3: 'asdf'}
d
del d['a']
tel = {'emanuelle': 5752, 'sebastian' : 5578, 'francis' : 1234}
tel
tel['emanuelle'], tel['sebastian'] = tel['sebastian'], tel['emanuelle']
tel
# ## 2.5 Sets
#
# A set contain is an unordered container, containing unique elements
s = 'truc truc bidule truc'
set(s)
len(set(s))
set([1, 5, 2, 1, 1]).union(set([1, 2, 3]))
# You can use together all types together
dd = {'truc': [1, 2, 3],
5: (1, 4, 2),
(1, 3): set(['hello', 'world'])}
dd
# NB: any immutable type can be used as a key in a dict (such as a tuple)
dd[[2, 3]] = 4
# ## 2.6. Assigment
#
# Assigment operator `=` in python does not do a copy.
# It actually works as name binding (when object are mutable).
l1 = [4, 1, 3, 2, 2, 2]
l2 = l1
l1[0] = 123
l2
l2 = l1.copy()
l1[0] = -1
l2
l1.append(5)
l1
l1.pop()
l1.insert(1, 0)
l1
# # 3. Control Flow
#
# Test, loops, etc.
if 2 ** 2 == 4:
print('Obvious')
print('YES')
# ## 3.1. Blocks are delimited by indentation!
a = 2
if a == 1:
print(2)
elif a == 2:
print(1\
)
else:
print(a)
# ## 3.2. for/range
#
# Iteration with an index, with a list, with many things !
for i in range(4):
print(i + 1)
print('-')
for i in range(1, 5):
print(i)
print('-')
for i in range(1, 10, 3):
print(i)
s
for c in s.split(" "):
print(c)
for word in ['green', 'blue', 'yellow']:
print('best color is', word)
# ## 3.3. List Comprehensions
[i ** 2 for i in range(6)]
[2 ** i for i in range(9)]
# ## 3.4 While
a = 10
b = 1
while b < a:
b = b + 1
print(b)
# Compute the decimals of Pi using the Wallis formula:
# $$
# \pi = 2 \prod_{i=1}^{100} \frac{4i^2}{4i^2 - 1}
# $$
pi = 2
eps = 1e-10
dif = 2 * eps
i = 1
while(dif > eps):
old_pi = pi
pi *= 4 * i ** 2 / (4 * i ** 2 - 1)
dif = pi - old_pi
i += 1
pi
# ## 3.4 zip / dict comprehension / itertools / enumerate
# +
s = "salut tintin"
{i: c for i, c in enumerate(s)}
# -
enumerate(s)
dict(enumerate(s))
s1 = 'machinee'; s2 = 'magiques'
zip(s1, s2)
list(zip(s1, s2))
dict(zip(s1, s2))
# # 4. Defining functions
#
# ## 4.1. Function definition
#
# Warning: Function blocks must be indented as other control-flow blocks.
def test():
print('in test function')
test()
# ## 4.2. Return statement
#
# Functions can *optionally* return values.
# Note: By default, functions return ``None``.
#
# The syntax to define a function:
#
# * the ``def`` keyword;
#
# * is followed by the function's **name**, then
#
# * the arguments of the function are given between parentheses followed
# by a colon.
#
# * the function body;
#
# * and ``return object`` for optionally returning values.
def f(x):
return x + 10
f(20)
# A function that returns several elements return a `tuple`
# +
def f():
return 1, 4
f()
# -
type(f())
# ## 4.3. Parameters
#
# Mandatory parameters (positional arguments)
#
# +
def double_it(x):
return x * 2
double_it(3)
# -
double_it()
# +
def double_it(x = 2):
return x * 2
double_it()
# -
double_it(3)
def f(x, y=2, * , z=10):
print(x, '+', y, '+', z, '=',
x + y + z)
f(5, 3, 7)
f(2)
f(5, -2)
f(5, z = -2)
f(5, z = -2, y = 3)
dd = {'y': 10, 'z': -5}
f(3, **dd)
# Prototype of all Python's functions is
def f(*args, **kwargs):
print('args=', args)
print('kwargs=', kwargs)
f(1, 3)
f(3, -2, y='truc')
# # 5. Object-oriented programming (OOP)
#
# Python supports object-oriented programming (OOP). The goals of OOP are:
#
# - to organize the code, and
# - to re-use code in similar contexts.
#
# Here is a small example: we create a `Student` class, which is an object
# gathering several custom functions (called *methods*) and variables
# (called *attributes*).
s = 'truc'
s.title()
# +
class Student(object):
def __init__(self, name, age, major='computer science'):
self.name = name
self.age = age
self.major = major
def to_string(self):
return str.join(', ', [attr + '=' + str(getattr(self, attr)) \
for attr in ['name', 'age', 'major']])
def show(self):
print(self.to_string())
anna = Student('anna', 23)
anna.show()
# -
# **Inheritance**: MasterStudent is a Student with extra mandatory `Internship` attribute
class MasterStudent(Student):
def __init__(self, name, age, intership,
major='computer science'):
Student.__init__(self, name, age, major)
self.intership = intership
def to_string(self):
s = Student.to_string(self)
return str.join(', ', [s, 'intership=' + self.intership])
djalil = MasterStudent('djalil', 22, 'pwc')
djalil.show()
| 00_python/0_python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_amazonei_mxnet_p36
# language: python
# name: conda_amazonei_mxnet_p36
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import sagemaker
from sagemaker.estimator import Estimator
from sagemaker.predictor import RealTimePredictor
from sagemaker.predictor import csv_serializer
# *Defining Hyperparameters*
interval='D' # Daily
lag=20 # The time of context
horizon=10 #The time for which it can forecast
dateformat='%Y-%m-%d'
target_stock='BMW'
covariate_stocks='CON,DAI,PAH3,VOW3'
target_column='EndPrice'
covariate_columns='StartPrice,MinPrice,MaxPrice'
num_epochs=1000
batch_size=4096
dropout_ratio=0.1
percent_train=0.85
# +
hyperparameters = {
"interval": interval,
"lag": str(lag),
"horizon": str(horizon),
"num_epochs": str(num_epochs),
"batch_size": str(batch_size),
"percent_train": str(percent_train),
# "num_units": str(num_units),
"target_stock": target_stock,
"covariate_stocks": covariate_stocks,
"target_column": target_column,
"covariate_columns": covariate_columns,
"dropout_ratio": str(dropout_ratio)
}
# -
# * TO get the stock data from s3 to notebook *
# +
train_data_location='s3://sagemaker-us-west-2-886035371869/Capstone Project/KSE - 100 Index.csv'
df=pd.read_csv(train_data_location,index_col='Date')
# +
numeric_df=pd.DataFrame(index=df.index,columns=df.columns)
'''
'''
numeric_df.head()
# +
indices=[]
for x in df.index:
x=filter(lambda x:x.isdigit() or x=='-',x)
indices.append(x)
column_names=[]
for x in df.columns:
x=filter(lambda x:x.isalpha(),x)
column_names.append(x)
# -
df.set
for index in indices:
for column in column_names:
x=filter(lambda x:x.isdigit(),df[index][column])
print(x)
numeric_df.head()
# * Now we are going to check if we are having any null values
df.isna().sum()
# # Now we are going to predict the closing values, so we are making Closing Values df
#
closing_values_df=df['Close'].copy()
closing_values_df.head()
# * Now we have to make new df which contains only numeric values,
# currently the closing values are strings and they contain a lot of empty strings in start
values=[]
for x in closing_values_df.values:
x=filter(lambda x:x.isdigit(),x)
values.append(x)
df_values=pd.Series(values)
#df_values=df_values.astype(int)
#pd.to_numeric(df_values,downcast='float')
#df_values.reindex_like(df)
'''
plt.figure(figsize=(12,6))
df_values=df_values.astype(float)
df_values.plot(title='Closing Values',color='blue')
plt.show()
'''
df_values['Index']=closing_values_df.index
#df_values.set_index(df_values['Index'],inplace=True,drop=True)
df_values.head(-10)
df_values=pd.to_numeric(df_values,downcast='float',errors='coerce')
df_values
plt.figure(figsize=(12,6))
df_values=df_values.astype(float)
df_values.plot(title='Closing Values',color='blue')
plt.show()
df_values.drop(labels='Index',axis=1,inplace=True)
df_values.head()
df_values.index.name='Date'
print(df_values.index.name)
df_values.head()
pd.to_numeric(df_values,downcast='float')
df_values.head()
df_values.squeeze(axis=1)
df_values.shape
df_values=df_values.astype(float)
plt.figure(figsize=(12,6))
df_values.plot(title='Closing Values',color='blue')
plt.show()
| Stock_Price_Prediction_Capstone.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:wells]
# language: python
# name: conda-env-wells-py
# ---
# # Striplog from CSV
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import striplog
striplog.__version__
# -
from striplog import Lexicon, Decor, Component, Legend, Interval, Striplog
# ## Make legend
#
# Most of the stuff in the dicts you made were about **display** — so they are going to make `Decor` objects. A collection of `Decor`s makes a `Legend`. A `Legend` determines how a striplog is displayed.
#
# First I'll make the components, since those are easy. I'll move `'train'` into there too, since it is to do with the rocks, not the display. If it seems weird having `'train'` in the `Component` (which is really supposed to be about direct descriptions of the rock, but the idea is that it's always the same for all specimens of that rock so it does fit here) then you could put it in `data` instead.
# +
# define your abbreviations here, this is what the CSV will read in
facies = {
's': Component({'lithology': 'sandstone', 'train':'y'}),
'os': Component({'lithology': 'oilstained', 'train':'y'}),
'is': Component({'lithology': 'clay_prone_sandstone', 'train':'y'}),
'ih': Component({'lithology': 'sandy_mudstone', 'train':'y'}),
'sh': Component({'lithology': 'mudstone', 'train':'y'}),
'bs': Component({'lithology': 'sandstone', 'train': 'n'}),
't': Component({'lithology': 'turbidite', 'train':'y'}),
'nc': Component({'lithology': 'none', 'train':'n'}),
}
# -
facies
# +
# pick how your facies will look in the log, see striplog for specfics
sandstone = Decor({
'component': facies['s'],
'colour': 'yellow',
'hatch': '.',
'width': '4',
})
oilstained = Decor({
'component': facies['os'],
'colour': 'brown',
'hatch': '/',
'width': '3',
})
clay_prone_sandstone = Decor({
'component': facies['is'],
'colour': 'greenyellow',
'hatch': '--',
'width': '3',
})
sandy_mudstone = Decor({
'component': facies['ih'],
'colour': 'darkseagreen',
'hatch': '---',
'width': '2',
})
mudstone = Decor({
'component': facies['sh'],
'colour': 'darkgray',
'hatch': '-',
'width': '1',
})
# Not sure about the best way to do this, probably better
# just to omit those intervals completely.
badsand = Decor({
'component': facies['bs'],
'colour': 'orange',
'hatch': '.',
'width': '4',
})
nocore = Decor({
'component': facies['nc'],
'colour': 'white',
'hatch': '/',
'width': '5',
})
turbidite = Decor({
'component': facies['t'],
'colour': 'green',
'hatch': 'xxx',
'width': '3',
})
# -
legend = Legend([sandstone, oilstained, badsand, clay_prone_sandstone, sandy_mudstone, mudstone, turbidite, nocore])
legend
# ## Read CSV into striplog
strip = Striplog.from_csv('test2.csv') # literally reading in the CSV file
strip[0] # quick test
# ## Deal with lithology
#
# The lithology has been turned into a component, but it's using the abbreviation... I can't figure out an elegant way to deal with this so, for now, we'll just loop over the striplog and fix it. We read the `data` item's lithology (`'s'` in the top layer), then look up the correct lithology name in our abbreviation dictionary, then add the new component in the proper place. Finally, we delete the `data` we had.
s.data
for s in strip:
lith = s.data['lithology']
s.components = [facies[lith]]
s.data = {}
strip[0]
# That's better!
strip.plot(legend)
# ## Remove non-training layers
strip
strip_train = Striplog([s for s in strip if s.primary['train'] == 'y'])
# +
strip_train
strip_train.plot(legend)
# +
# plottin' & exportin'
fig, (ax0, ax1) = plt.subplots(1, 2, sharey=True)
fig.set_size_inches(7, 11) # Set the figure size
ax0 = strip.plot(legend, ax=ax0)
ax1 = strip_train.plot(legend, ax=ax1)
ax0.set_title('Picks')
ax1.set_title('Training')
ax0.set_ylabel('Depth (m)', size=16)
plt.show()
fig.savefig('Picks_v_training.pdf', dpi=300)
# -
print(strip.to_csv())
| CSV2Striplog.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="L4SdLmyrHVrx"
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import roc_auc_score
from sklearn.feature_extraction.text import TfidfVectorizer
# + colab={"base_uri": "https://localhost:8080/", "height": 357} id="0nyuiZuTHVr3" outputId="250608bc-3bed-4f47-914d-b4cb9857c17a"
df = pd.read_csv('Amazon_Unlocked_Mobile.csv')
df.head(10)
# + id="5fiMZ2EEHVsJ"
df.dropna(inplace=True)
# + id="3DiYCQRbHVsN"
df = df[df['Rating'] != 3]
# + id="GmQsxmzPHVsQ"
df['Positively Rated'] = np.where(df['Rating'] > 3, 1, 0)
# + colab={"base_uri": "https://localhost:8080/", "height": 357} id="6yHk4AE5HVsT" outputId="11640fdf-a316-45d2-ce93-72e100812302"
df.head(10)
# + colab={"base_uri": "https://localhost:8080/"} id="Luf0a8JYHVsV" outputId="d34e4118-f178-4205-bd17-a68162db4699"
df['Positively Rated'].mean()
# + id="_qAoDM9HHVsY"
X_train, X_test, y_train, y_test = train_test_split(df['Reviews'],
df['Positively Rated'],
random_state=0)
# + colab={"base_uri": "https://localhost:8080/"} id="VqLnA_-GHVsb" outputId="7d36f8c8-a2ca-486b-dcc3-a8bb0f89505d"
print('X_train first entry:\n\n', X_train.iloc[0])
print('\n\nX_train shape: ', X_train.shape)
# + id="3Gqkef8tHVsf"
vect = CountVectorizer().fit(X_train)
# + colab={"base_uri": "https://localhost:8080/"} id="YCd-d7A7HVsh" outputId="85f9053c-626d-4f7f-c1dd-b310b979854c"
vect.get_feature_names()[::2000]
# + colab={"base_uri": "https://localhost:8080/"} id="NfZoMk23HVsj" outputId="5c3f3f22-5dd1-4a44-cd80-db78a93e003f"
len(vect.get_feature_names())
# + colab={"base_uri": "https://localhost:8080/"} id="kW8_Rs3uHVsk" outputId="b6be3d09-bdff-4332-ee23-6821034f69d1"
X_train_vectorized = vect.transform(X_train)
X_train_vectorized
# + colab={"base_uri": "https://localhost:8080/"} id="Ikn--xsgHVsm" outputId="d040886a-7003-4b30-8c8a-e4522c503a72"
model = LogisticRegression(max_iter = 1000)
#model = MultinomialNB()
model.fit(X_train_vectorized, y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="Ms7szefeHVsn" outputId="fc3360a6-9b83-4a74-bfde-94192e257b7b"
predictions = model.predict(vect.transform(X_test))
print('AUC: ', roc_auc_score(y_test, predictions))
# + colab={"base_uri": "https://localhost:8080/"} id="ik_miAfhHVso" outputId="4708777f-23e4-41cf-a5bf-ab38ad21e3f9"
feature_names = np.array(vect.get_feature_names())
sorted_coef_index = model.coef_[0].argsort()
print('Smallest Coefs:\n{}\n'.format(feature_names[sorted_coef_index[:10]]))
print('Largest Coefs: \n{}'.format(feature_names[sorted_coef_index[:-11:-1]]))
# + colab={"base_uri": "https://localhost:8080/"} id="gxnpU5f3HVsp" outputId="929dd445-68a5-4b08-f26e-54ac60aa59b5"
vect = TfidfVectorizer(min_df=5).fit(X_train)
len(vect.get_feature_names())
# + colab={"base_uri": "https://localhost:8080/"} id="9mZox91hHVsp" outputId="a6ac3531-34de-46f3-bdc7-09e5b3d162e7"
X_train_vectorized = vect.transform(X_train)
model = LogisticRegression(max_iter = 1000)
#model = MultinomialNB()
model.fit(X_train_vectorized, y_train)
predictions = model.predict(vect.transform(X_test))
print('AUC: ', roc_auc_score(y_test, predictions))
# + colab={"base_uri": "https://localhost:8080/"} id="rwww6fwUHVsr" outputId="7cf9b064-e183-4dbe-ba6e-b135a0069cf3"
feature_names = np.array(vect.get_feature_names())
sorted_tfidf_index = X_train_vectorized.max(0).toarray()[0].argsort()
print('Smallest tfidf:\n{}\n'.format(feature_names[sorted_tfidf_index[:10]]))
print('Largest tfidf: \n{}'.format(feature_names[sorted_tfidf_index[:-11:-1]]))
# + colab={"base_uri": "https://localhost:8080/"} id="BCdVOBRQHVsr" outputId="deddb568-7254-4631-e7da-034d211dfb97"
sorted_coef_index = model.coef_[0].argsort()
print('Smallest Coefs:\n{}\n'.format(feature_names[sorted_coef_index[:10]]))
print('Largest Coefs: \n{}'.format(feature_names[sorted_coef_index[:-11:-1]]))
# + colab={"base_uri": "https://localhost:8080/"} id="WRywOiDVHVss" outputId="cfd372ba-ac69-4ec3-a4aa-fafbdbd31426"
print(model.predict(vect.transform(['not an issue, phone is working',
'an issue, phone is not working'])))
# + colab={"base_uri": "https://localhost:8080/"} id="K5HHIXuVHVst" outputId="5e8ebf19-b956-4514-d999-1fe416b515a5"
#extracting 1-grams and 2-grams
vect = CountVectorizer(min_df=5, ngram_range=(1,2)).fit(X_train)
X_train_vectorized = vect.transform(X_train)
len(vect.get_feature_names())
# + colab={"base_uri": "https://localhost:8080/"} id="o-Itp57vHVsu" outputId="2f5bca29-570f-4e76-f8b4-80f7ed8ec7c0"
model = LogisticRegression(max_iter = 1000)
#model = MultinomialNB()
model.fit(X_train_vectorized, y_train)
predictions = model.predict(vect.transform(X_test))
print('AUC: ', roc_auc_score(y_test, predictions))
# + colab={"base_uri": "https://localhost:8080/"} id="lLFUO5hhHVsu" outputId="f1396540-a26d-4ade-caef-ea4f1f881fa0"
feature_names = np.array(vect.get_feature_names())
sorted_coef_index = model.coef_[0].argsort()
print('Smallest Coefs:\n{}\n'.format(feature_names[sorted_coef_index[:10]]))
print('Largest Coefs: \n{}'.format(feature_names[sorted_coef_index[:-11:-1]]))
# + colab={"base_uri": "https://localhost:8080/"} id="JbpbS33oHVsv" outputId="a3d99bb7-7a9a-4e6b-ac0b-92c693c337e7"
print(model.predict(vect.transform(['not an issue, phone is working',
'an issue, phone is not working'])))
| Amazon_Reviews.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %pylab inline
import numpy as np
import matplotlib.pyplot as plt
# +
symbols = [u'\u2B21', u'\u263A', u'\u29C6', u'\u2B14', u'\u2B1A', u'\u25A6', u'\u229E', u'\u22A0', u'\u22A1', u'\u20DF']
x = np.arange(10.)
y = np.arange(10.)
plt.figure()
for i, symbol in enumerate(symbols):
y2 = y + 4*i
plt.plot(x, y2, 'g')
for x0, y0 in zip(x, y2):
plt.text(x0, y0, symbol, fontname='STIXGeneral', size=30, va='center', ha='center', clip_on=True)
plt.show()
# -
plt.figure()
for i, symbol in enumerate(symbols):
y2 = y + 4*i
plt.plot(x, y2, 'g')
marker = "$%s$" % symbol
plt.plot(x, y2, 'k', marker=marker, markersize=30)
| MarkerMagic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# + [markdown] hideCode=false hidePrompt=false
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License.
# + [markdown] hideCode=false hidePrompt=false
# 
# + [markdown] hideCode=false hidePrompt=false
# # Automated Machine Learning
# **Beer Production Forecasting**
#
# ## Contents
# 1. [Introduction](#Introduction)
# 1. [Setup](#Setup)
# 1. [Data](#Data)
# 1. [Train](#Train)
# 1. [Evaluate](#Evaluate)
# + [markdown] hideCode=false hidePrompt=false
# ## Introduction
# This notebook demonstrates demand forecasting for Beer Production Dataset using AutoML.
#
# AutoML highlights here include using Deep Learning forecasts, Arima, Prophet, Remote Execution and Remote Inferencing, and working with the `forecast` function. Please also look at the additional forecasting notebooks, which document lagging, rolling windows, forecast quantiles, other ways to use the forecast function, and forecaster deployment.
#
# Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.
#
# Notebook synopsis:
#
# 1. Creating an Experiment in an existing Workspace
# 2. Configuration and remote run of AutoML for a time-series model exploring Regression learners, Arima, Prophet and DNNs
# 4. Evaluating the fitted model using a rolling test
# + [markdown] hideCode=false hidePrompt=false
# ## Setup
#
# + hideCode=false hidePrompt=false
import os
import azureml.core
import pandas as pd
import numpy as np
import logging
import warnings
from pandas.tseries.frequencies import to_offset
# Squash warning messages for cleaner output in the notebook
warnings.showwarning = lambda *args, **kwargs: None
from azureml.core.workspace import Workspace
from azureml.core.experiment import Experiment
from azureml.train.automl import AutoMLConfig
from matplotlib import pyplot as plt
from sklearn.metrics import mean_absolute_error, mean_squared_error
from azureml.train.estimator import Estimator
# -
# This sample notebook may use features that are not available in previous versions of the Azure ML SDK.
print("This notebook was created using version 1.33.0 of the Azure ML SDK")
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
# + [markdown] hideCode=false hidePrompt=false
# As part of the setup you have already created a <b>Workspace</b>. To run AutoML, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem.
# + hideCode=false hidePrompt=false
ws = Workspace.from_config()
# choose a name for the run history container in the workspace
experiment_name = 'beer-remote-cpu'
experiment = Experiment(ws, experiment_name)
output = {}
output['Subscription ID'] = ws.subscription_id
output['Workspace'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Run History Name'] = experiment_name
pd.set_option('display.max_colwidth', -1)
outputDf = pd.DataFrame(data = output, index = [''])
outputDf.T
# + [markdown] hideCode=false hidePrompt=false
# ### Using AmlCompute
# You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for your AutoML run. In this tutorial, you use `AmlCompute` as your training compute resource.
#
# > Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.
# + hideCode=false hidePrompt=false
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
cpu_cluster_name = "beer-cluster"
# Verify that cluster does not exist already
try:
compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',
max_nodes=4)
compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
# + [markdown] hideCode=false hidePrompt=false
# ## Data
# Read Beer demand data from file, and preview data.
# + [markdown] hideCode=false hidePrompt=false
# Let's set up what we know about the dataset.
#
# **Target column** is what we want to forecast.
#
# **Time column** is the time axis along which to predict.
#
# **Time series identifier columns** are identified by values of the columns listed `time_series_id_column_names`, for example "store" and "item" if your data has multiple time series of sales, one series for each combination of store and item sold.
#
# **Forecast frequency (freq)** This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information.
#
# This dataset has only one time series. Please see the [orange juice notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales) for an example of a multi-time series dataset.
# + hideCode=false hidePrompt=false
import pandas as pd
from pandas import DataFrame
from pandas import Grouper
from pandas import concat
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
plt.figure(figsize=(20, 10))
plt.tight_layout()
plt.subplot(2, 1, 1)
plt.title('Beer Production By Year')
df = pd.read_csv("Beer_no_valid_split_train.csv", parse_dates=True, index_col= 'DATE').drop(columns='grain')
test_df = pd.read_csv("Beer_no_valid_split_test.csv", parse_dates=True, index_col= 'DATE').drop(columns='grain')
plt.plot(df)
plt.subplot(2, 1, 2)
plt.title('Beer Production By Month')
groups = df.groupby(df.index.month)
months = concat([DataFrame(x[1].values) for x in groups], axis=1)
months = DataFrame(months)
months.columns = range(1,13)
months.boxplot()
plt.show()
# + hideCode=false hidePrompt=false
target_column_name = 'BeerProduction'
time_column_name = 'DATE'
time_series_id_column_names = []
freq = 'M' #Monthly data
# -
# ### Split Training data into Train and Validation set and Upload to Datastores
# + hideCode=false hidePrompt=false
from helper import split_fraction_by_grain
from helper import split_full_for_forecasting
train, valid = split_full_for_forecasting(df, time_column_name)
train.to_csv("train.csv")
valid.to_csv("valid.csv")
test_df.to_csv("test.csv")
datastore = ws.get_default_datastore()
datastore.upload_files(files = ['./train.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)
datastore.upload_files(files = ['./valid.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)
datastore.upload_files(files = ['./test.csv'], target_path = 'beer-dataset/tabular/', overwrite = True,show_progress = True)
from azureml.core import Dataset
train_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/train.csv')])
valid_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/valid.csv')])
test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/test.csv')])
# + [markdown] hideCode=false hidePrompt=false
# ### Setting forecaster maximum horizon
#
# The forecast horizon is the number of periods into the future that the model should predict. Here, we set the horizon to 12 periods (i.e. 12 months). Notice that this is much shorter than the number of months in the test set; we will need to use a rolling test to evaluate the performance on the whole test set. For more discussion of forecast horizons and guiding principles for setting them, please see the [energy demand notebook](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand).
# + hideCode=false hidePrompt=false
forecast_horizon = 12
# + [markdown] hideCode=false hidePrompt=false
# ## Train
#
# Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment.
#
# |Property|Description|
# |-|-|
# |**task**|forecasting|
# |**primary_metric**|This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>
# |**iteration_timeout_minutes**|Time limit in minutes for each iteration.|
# |**training_data**|Input dataset, containing both features and label column.|
# |**label_column_name**|The name of the label column.|
# |**enable_dnn**|Enable Forecasting DNNs|
#
# + hideCode=false hidePrompt=false
from azureml.automl.core.forecasting_parameters import ForecastingParameters
forecasting_parameters = ForecastingParameters(
time_column_name=time_column_name,
forecast_horizon=forecast_horizon,
freq='MS' # Set the forecast frequency to be monthly (start of the month)
)
# We will disable the enable_early_stopping flag to ensure the DNN model is recommended for demonstration purpose.
automl_config = AutoMLConfig(task='forecasting',
primary_metric='normalized_root_mean_squared_error',
experiment_timeout_hours = 1,
training_data=train_dataset,
label_column_name=target_column_name,
validation_data=valid_dataset,
verbosity=logging.INFO,
compute_target=compute_target,
max_concurrent_iterations=4,
max_cores_per_iteration=-1,
enable_dnn=True,
enable_early_stopping=False,
forecasting_parameters=forecasting_parameters)
# + [markdown] hideCode=false hidePrompt=false
# We will now run the experiment, starting with 10 iterations of model search. The experiment can be continued for more iterations if more accurate results are required. Validation errors and current status will be shown when setting `show_output=True` and the execution will be synchronous.
# + hideCode=false hidePrompt=false
remote_run = experiment.submit(automl_config, show_output= True)
# + hideCode=false hidePrompt=false
# If you need to retrieve a run that already started, use the following code
# from azureml.train.automl.run import AutoMLRun
# remote_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>')
# + [markdown] hideCode=false hidePrompt=false
# Displaying the run objects gives you links to the visual tools in the Azure Portal. Go try them!
# + [markdown] hideCode=false hidePrompt=false
# ### Retrieve the Best Model for Each Algorithm
# Below we select the best pipeline from our iterations. The get_output method on automl_classifier returns the best run and the fitted model for the last fit invocation. There are overloads on get_output that allow you to retrieve the best run and fitted model for any logged metric or a particular iteration.
# + hideCode=false hidePrompt=false
from helper import get_result_df
summary_df = get_result_df(remote_run)
summary_df
# + hideCode=false hidePrompt=false
from azureml.core.run import Run
from azureml.widgets import RunDetails
forecast_model = 'TCNForecaster'
if not forecast_model in summary_df['run_id']:
forecast_model = 'ForecastTCN'
best_dnn_run_id = summary_df['run_id'][forecast_model]
best_dnn_run = Run(experiment, best_dnn_run_id)
# + hideCode=false hidePrompt=false
best_dnn_run.parent
RunDetails(best_dnn_run.parent).show()
# + hideCode=false hidePrompt=false
best_dnn_run
RunDetails(best_dnn_run).show()
# + [markdown] hideCode=false hidePrompt=false
# ## Evaluate on Test Data
# + [markdown] hideCode=false hidePrompt=false
# We now use the best fitted model from the AutoML Run to make forecasts for the test set.
#
# We always score on the original dataset whose schema matches the training set schema.
# + hideCode=false hidePrompt=false
from azureml.core import Dataset
test_dataset = Dataset.Tabular.from_delimited_files(path = [(datastore, 'beer-dataset/tabular/test.csv')])
# preview the first 3 rows of the dataset
test_dataset.take(5).to_pandas_dataframe()
# -
compute_target = ws.compute_targets['beer-cluster']
test_experiment = Experiment(ws, experiment_name + "_test")
# + hideCode=false hidePrompt=false
import os
import shutil
script_folder = os.path.join(os.getcwd(), 'inference')
os.makedirs(script_folder, exist_ok=True)
shutil.copy('infer.py', script_folder)
# +
from helper import run_inference
test_run = run_inference(test_experiment, compute_target, script_folder, best_dnn_run, test_dataset, valid_dataset, forecast_horizon,
target_column_name, time_column_name, freq)
# -
RunDetails(test_run).show()
# +
from helper import run_multiple_inferences
summary_df = run_multiple_inferences(summary_df, experiment, test_experiment, compute_target, script_folder, test_dataset,
valid_dataset, forecast_horizon, target_column_name, time_column_name, freq)
# + hideCode=false hidePrompt=false
for run_name, run_summary in summary_df.iterrows():
print(run_name)
print(run_summary)
run_id = run_summary.run_id
test_run_id = run_summary.test_run_id
test_run = Run(test_experiment, test_run_id)
test_run.wait_for_completion()
test_score = test_run.get_metrics()[run_summary.primary_metric]
summary_df.loc[summary_df.run_id == run_id, 'Test Score'] = test_score
print("Test Score: ", test_score)
# + hideCode=false hidePrompt=false
summary_df
# -
| how-to-use-azureml/automated-machine-learning/forecasting-beer-remote/auto-ml-forecasting-beer-remote.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **This notebook is an exercise in the [Python](https://www.kaggle.com/learn/python) course. You can reference the tutorial at [this link](https://www.kaggle.com/colinmorris/booleans-and-conditionals).**
#
# ---
#
# In this exercise, you'll put to work what you have learned about booleans and conditionals.
#
# To get started, **run the setup code below** before writing your own code (and if you leave this notebook and come back later, don't forget to run the setup code again).
from learntools.core import binder; binder.bind(globals())
from learntools.python.ex3 import *
print('Setup complete.')
# # 1.
#
# Many programming languages have [`sign`](https://en.wikipedia.org/wiki/Sign_function) available as a built-in function. Python doesn't, but we can define our own!
#
# In the cell below, define a function called `sign` which takes a numerical argument and returns -1 if it's negative, 1 if it's positive, and 0 if it's 0.
# Your code goes here. Define a function called 'sign'
def sign(number):
if number < 0:
return -1
elif number == 0:
return 0
else:
return 1
# Check your answer
q1.check()
# +
#q1.solution()
# -
# # 2.
#
# We've decided to add "logging" to our `to_smash` function from the previous exercise.
# +
def to_smash(total_candies):
"""Return the number of leftover candies that must be smashed after distributing
the given number of candies evenly between 3 friends.
>>> to_smash(91)
1
"""
print("Splitting", total_candies, "candies")
return total_candies % 3
to_smash(91)
# -
# What happens if we call it with `total_candies = 1`?
to_smash(1)
# That isn't great grammar!
#
# Modify the definition in the cell below to correct the grammar of our print statement. (If there's only one candy, we should use the singular "candy" instead of the plural "candies")
def to_smash(total_candies):
"""Return the number of leftover candies that must be smashed after distributing
the given number of candies evenly between 3 friends.
>>> to_smash(91)
1
"""
if total_candies > 1:
print("Splitting", total_candies, "candies")
elif total_candies == 1:
print("There is only", total_candies,"candy")
return total_candies % 3
to_smash(91)
to_smash(1)
# To get credit for completing this problem, and to see the official answer, run the code cell below.
# +
# Check your answer (Run this code cell to receive credit!)
#q2.solution()
# -
# # 3. <span title="A bit spicy" style="color: darkgreen ">🌶️</span>
#
# In the tutorial, we talked about deciding whether we're prepared for the weather. I said that I'm safe from today's weather if...
# - I have an umbrella...
# - or if the rain isn't too heavy and I have a hood...
# - otherwise, I'm still fine unless it's raining *and* it's a workday
#
# The function below uses our first attempt at turning this logic into a Python expression. I claimed that there was a bug in that code. Can you find it?
#
# To prove that `prepared_for_weather` is buggy, come up with a set of inputs where either:
# - the function returns `False` (but should have returned `True`), or
# - the function returned `True` (but should have returned `False`).
#
# To get credit for completing this question, your code should return a <font color='#33cc33'>Correct</font> result.
# +
def prepared_for_weather(have_umbrella, rain_level, have_hood, is_workday):
# Don't change this code. Our goal is just to find the bug, not fix it!
return have_umbrella or rain_level < 5 and have_hood or not rain_level > 0 and is_workday
# Change the values of these inputs so they represent a case where prepared_for_weather
# returns the wrong answer.
have_umbrella = False
rain_level = 0.0
have_hood = False
is_workday = False
# Check what the function returns given the current values of the variables above
actual = prepared_for_weather(have_umbrella, rain_level, have_hood, is_workday)
print(actual)
# Check your answer
q3.check()
# +
#q3.hint()
#q3.solution()
# -
# # 4.
#
# The function `is_negative` below is implemented correctly - it returns True if the given number is negative and False otherwise.
#
# However, it's more verbose than it needs to be. We can actually reduce the number of lines of code in this function by *75%* while keeping the same behaviour.
#
# See if you can come up with an equivalent body that uses just **one line** of code, and put it in the function `concise_is_negative`. (HINT: you don't even need Python's ternary syntax)
# +
def is_negative(number):
if number < 0:
return True
else:
return False
def concise_is_negative(number):
return number < 0 # Your code goes here (try to keep it to one line!)
# Check your answer
q4.check()
# +
#q4.hint()
#q4.solution()
# -
# # 5a.
#
# The boolean variables `ketchup`, `mustard` and `onion` represent whether a customer wants a particular topping on their hot dog. We want to implement a number of boolean functions that correspond to some yes-or-no questions about the customer's order. For example:
def onionless(ketchup, mustard, onion):
"""Return whether the customer doesn't want onions.
"""
return not onion
# +
def wants_all_toppings(ketchup, mustard, onion):
"""Return whether the customer wants "the works" (all 3 toppings)
"""
return ketchup and mustard and onion
# Check your answer
q5.a.check()
# +
#q5.a.hint()
#q5.a.solution()
# -
# # 5b.
#
# For the next function, fill in the body to match the English description in the docstring.
# +
def wants_plain_hotdog(ketchup, mustard, onion):
"""Return whether the customer wants a plain hot dog with no toppings.
"""
return not ketchup and not mustard and not onion
# Check your answer
q5.b.check()
# +
#q5.b.hint()
#q5.b.solution()
# -
# # 5c.
#
# You know what to do: for the next function, fill in the body to match the English description in the docstring.
# +
def exactly_one_sauce(ketchup, mustard, onion):
"""Return whether the customer wants either ketchup or mustard, but not both.
(You may be familiar with this operation under the name "exclusive or")
"""
return (ketchup and not mustard) or (mustard and not ketchup)
# Check your answer
q5.c.check()
# +
#q5.c.hint()
#q5.c.solution()
# -
# # 6. <span title="A bit spicy" style="color: darkgreen ">🌶️</span>
#
# We’ve seen that calling `bool()` on an integer returns `False` if it’s equal to 0 and `True` otherwise. What happens if we call `int()` on a bool? Try it out in the notebook cell below.
#
# Can you take advantage of this to write a succinct function that corresponds to the English sentence "does the customer want exactly one topping?"?
# +
def exactly_one_topping(ketchup, mustard, onion):
"""Return whether the customer wants exactly one of the three available toppings
on their hot dog.
"""
return (int(ketchup) + int(mustard) + int(onion)) == 1
# Check your answer
q6.check()
# +
#q6.hint()
#q6.solution()
# -
# # 7. <span title="A bit spicy" style="color: darkgreen ">🌶️</span> (Optional)
#
# In this problem we'll be working with a simplified version of [blackjack](https://en.wikipedia.org/wiki/Blackjack) (aka twenty-one). In this version there is one player (who you'll control) and a dealer. Play proceeds as follows:
#
# - The player is dealt two face-up cards. The dealer is dealt one face-up card.
# - The player may ask to be dealt another card ('hit') as many times as they wish. If the sum of their cards exceeds 21, they lose the round immediately.
# - The dealer then deals additional cards to himself until either:
# - the sum of the dealer's cards exceeds 21, in which case the player wins the round
# - the sum of the dealer's cards is greater than or equal to 17. If the player's total is greater than the dealer's, the player wins. Otherwise, the dealer wins (even in case of a tie).
#
# When calculating the sum of cards, Jack, Queen, and King count for 10. Aces can count as 1 or 11 (when referring to a player's "total" above, we mean the largest total that can be made without exceeding 21. So e.g. A+8 = 19, A+8+8 = 17)
#
# For this problem, you'll write a function representing the player's decision-making strategy in this game. We've provided a very unintelligent implementation below:
def should_hit(dealer_total, player_total, player_low_aces, player_high_aces):
"""Return True if the player should hit (request another card) given the current game
state, or False if the player should stay.
When calculating a hand's total value, we count aces as "high" (with value 11) if doing so
doesn't bring the total above 21, otherwise we count them as low (with value 1).
For example, if the player's hand is {A, A, A, 7}, we will count it as 11 + 1 + 1 + 7,
and therefore set player_total=20, player_low_aces=2, player_high_aces=1.
"""
if player_total <= 11:
return True
else:
return False
# This very conservative agent *always* sticks with the hand of two cards that they're dealt.
#
# We'll be simulating games between your player agent and our own dealer agent by calling your function.
#
# Try running the function below to see an example of a simulated game:
q7.simulate_one_game()
# The real test of your agent's mettle is their average win rate over many games. Try calling the function below to simulate 50000 games of blackjack (it may take a couple seconds):
q7.simulate(n_games=50000)
# Our dumb agent that completely ignores the game state still manages to win shockingly often!
#
# Try adding some more smarts to the `should_hit` function and see how it affects the results.
# +
def should_hit(dealer_total, player_total, player_low_aces, player_high_aces):
"""Return True if the player should hit (request another card) given the current game
state, or False if the player should stay.
When calculating a hand's total value, we count aces as "high" (with value 11) if doing so
doesn't bring the total above 21, otherwise we count them as low (with value 1).
For example, if the player's hand is {A, A, A, 7}, we will count it as 11 + 1 + 1 + 7,
and therefore set player_total=20, player_low_aces=2, player_high_aces=1.
"""
if player_total <= 11:
return True
else:
return False
q7.simulate(n_games=50000)
# -
# # Keep Going
#
# Learn about **[lists and tuples](https://www.kaggle.com/colinmorris/lists)** to handle multiple items of data in a systematic way.
# ---
#
#
#
#
# *Have questions or comments? Visit the [course discussion forum](https://www.kaggle.com/learn/python/discussion) to chat with other learners.*
| Kaggle Course/Course Codes/exercise-booleans-and-conditionals.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap
def plot_examples(cms):
"""
helper function to plot two colormaps
"""
np.random.seed(19680801)
data = np.random.randn(30, 30)
fig, axs = plt.subplots(1, 2, figsize=(6, 3), constrained_layout=True)
for [ax, cmap] in zip(axs, cms):
psm = ax.pcolormesh(data, cmap=cmap, rasterized=True, vmin=-4, vmax=4)
fig.colorbar(psm, ax=ax)
plt.show()
# # Segment of a prestablished cmap
# +
GeneralCmap = cm.get_cmap('copper', 512)
newcmp = ListedColormap(GeneralCmap(np.linspace(0.0, 0.75, 256)))
plot_examples([GeneralCmap, newcmp])
# -
# # Concatenation of two cmaps
# +
Cmap1 = cm.get_cmap('copper', 512)
Cmap2 = cm.get_cmap('ocean_r', 512)
colors = np.vstack((Cmap1(np.linspace(0., .80, 256)),
Cmap2(np.linspace(0.20, .90, 256))
))
newcmp = ListedColormap(colors)
plot_examples([GeneralCmap, newcmp])
# -
| PythonCodes/PlottingTools/ColormapTester.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# %matplotlib inline
data=pd.read_csv("E:/data/mnist/mnist.csv")
data.head(10)
#checking the values in the dataset
a=data.iloc[0,1:].values
b=data.iloc[4,1:].values
#reshaping the extracted data into reasonable size
a=a.reshape(28,28).astype('uint8')
b=b.reshape(28,28).astype('uint8')
plt.imshow(a)
plt.imshow(b)
# +
#classifying x(pixels) and y (labels)variables
x=data.iloc[:,1:]
y=data.iloc[:,0]
# +
#split train and test from the dataset
xtrain,xtest,ytrain,ytest=train_test_split(x,y,test_size=0.2,random_state=4)
# -
ytrain.head(3)
# +
#using randomforestclassifier
rf=RandomForestClassifier(n_estimators=100)
# -
#fit the model
rf.fit(xtrain,ytrain)
#prediction on test data
pred=rf.predict(xtest)
pred
ytest.head(3)
# +
#check prediction accuracy
k=ytest.values
#calculate no.of correctly predicted values
count=0
for i in range(len(pred)):
if pred[i]==k[i]:
count=count+1
# -
count
#the algo run on the no.of values
len(pred)
#accuracy value
11626/12000
from sklearn.metrics import accuracy_score #using accuracy_score
accuracy_score(pred,ytest)
| Image classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # Exceptions
# Exception - also known as errors - are special objects that are said to be thrown when your script cannot run as intended. Exceptions can be raised accidentally (for instance, by dividing a number by 0), or intentionally, as we will see later below
# + [markdown] deletable=true editable=true
# ## 1. Known exceptions
# If you've made to this part of the course, you'll like have encountered some of the below exceptions.
# -
#trying to use a variable which was never create
filename
#dividing by 0
1/0
#trying to do math on two objects of different types
"3.14" + 8
#indexing beyond the length of a list
primes = [2,3,5,7,11,13]
primes[10]
#passing an inappropriate type to a function
price = input("What's the price?")
print(round(price))
#Accessing an inexistent key in a dictionary
commodities = {"CL":"Crude", "SB":"Sugar"}
print(commodities["GC"])
#Trying to change an immutable object, like a tuple
countries = ("France", "UK", "Netherlands")
countries[1] = "Spain"
# ## 2. Catching errors
# As you will see above, errors have a type. Sometimes, its a `KeyError`, at other times it is a `TypeError`. Sometimes, having errors is great: it tells you that what you wrote isn't correct, and that is much better than having a program run silently. At other times, you will want to catch errors as they occur and handle them. Exception handling takes the below form:
# ```python
# try:
# "try to execute this block"
# except ("exception type" as e):
# "do something when there is an error of that type"
# "the part in brackets is optional if you want to catch all errors"
# finally:
# "do something in any case"
# "this block is optional"
# ```
try:
kids = ["Kate", "Will", "Don"]
print("{} is my favourite kid".format(kids[4]))
except:
print("You cannot have a favourite child!")
try:
answer = 1/0
except ZeroDivisionError:
print("You tried to divide by 0")
answer = "infinity"
finally:
print("The answer is therefore", answer)
#You can silence an exception using the pass keyword
try:
answer = 1/0
except:
pass #essentially means: do nothing about it
# ## 3. Raising your own errors
# While raising errors doesn't sound that appealing, it is in reality a great way of making your code more robust. You simple use the following syntax:
# ```python
# raise Exception("Some message")
# ```
#
# You can use different exception types, `Exception` being the most generic one. Other exception types include:
# - `KeyError`, used when you are trying to access an element beyond its length
# - `ValueError`, used when an argument has an unexpected value
# - `TypeError`, used when the type of a variable is not of the expected type(s)
#
# +
#You can check arguments passed to a function
def iseven(number):
if not isinstance(number, int):
raise TypeError('The iseven function can only accept a positive integer')
return number % 2 == 0
print(iseven(4))
print(iseven("Hi!"))
# -
# ## 4. Assertions
# If you ever want to make sure some statement is True in your code, you can `assert` a statement. If the statement is True, nothing will happen. If however the statement is False, the code will lift an `AssertionError`. This could be useful at times, if only during debugging.
# +
def isprime(number):
assert number > 1
for i in range(2, number):
if number % i == 0:
return False
return True
print(isprime(-10))
# -
# Compared with raising exceptions however, assertions are relatively more limited: you can't provide a message, and it is generally more rigorous to raise Exceptions than to simply assert a statement.
| 16 - Exceptions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"></ul></div>
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"></ul></div>
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"></ul></div>
# +
# default_exp utils.utils
# +
# export
import sys
import os
import soundfile as sf
import pandas as pd
import numpy as np
import soundfile as sf
import librosa
from torch.nn import functional as F
def load_filepaths_and_text(filename: str, split: str = "|"):
with open(filename, encoding="utf-8") as f:
filepaths_and_text = [line.strip().split(split) for line in f]
return filepaths_and_text
# +
# export
import torch
import numpy as np
from scipy.signal import get_window
import librosa.util as librosa_util
def window_sumsquare(
window,
n_frames,
hop_length=200,
win_length=800,
n_fft=800,
dtype=np.float32,
norm=None,
):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm) ** 2
win_sq = librosa_util.pad_center(win_sq, n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample : min(n, sample + n_fft)] += win_sq[: max(0, min(n_fft, n - sample))]
return x
def griffin_lim(magnitudes, stft_fn, n_iters=30):
"""
PARAMS
------
magnitudes: spectrogram magnitudes
stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
"""
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
angles = angles.astype(np.float32)
angles = torch.autograd.Variable(torch.from_numpy(angles))
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
for i in range(n_iters):
_, angles = stft_fn.transform(signal)
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
return signal
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
# -
# export
def to_gpu(x):
x = x.contiguous()
if torch.cuda.is_available():
x = x.cuda(non_blocking=True)
return torch.autograd.Variable(x)
to_gpu(torch.randn(10, 10))
# +
# export
def get_mask_from_lengths(lengths: torch.Tensor, max_len: int = 0):
"""Return a mask matrix. Unmasked entires are true."""
if max_len == 0:
max_len = int(torch.max(lengths).item())
ids = torch.arange(0, max_len, device=lengths.device, dtype=torch.long)
mask = (ids < lengths.unsqueeze(1)).bool()
return mask
# +
import torch
assert (
get_mask_from_lengths(torch.LongTensor([1, 3, 2, 1]))
== torch.Tensor(
[
[True, False, False],
[True, True, True],
[True, True, False],
[True, False, False],
]
)
).all()
# +
# export
import torch.distributed as dist
def reduce_tensor(tensor, n_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= n_gpus
return rt
# -
# export
def subsequent_mask(length):
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
return mask
assert (subsequent_mask(2) == torch.tensor([[[1, 0], [1, 1]]])).all()
# export
def convert_pad_shape(pad_shape):
"""Reverse, then flatten a list of lists."""
l = pad_shape[::-1]
pad_shape = [item for sublist in l for item in sublist]
return pad_shape
convert_pad_shape([[1, 2], [3, 4], [5, 6, 7]]) == [5, 6, 7, 3, 4, 1, 2]
# export
def sequence_mask(length, max_length=None):
"""The same as get_mask_from_lengths"""
if max_length is None:
max_length = length.max()
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
return x.unsqueeze(0) < length.unsqueeze(1)
assert (
sequence_mask(torch.tensor([1, 3, 2, 1]))
== torch.Tensor(
[
[True, False, False],
[True, True, True],
[True, True, False],
[True, False, False],
]
)
).all()
assert (
sequence_mask(torch.tensor([1, 3, 2, 1]), 4)
== torch.Tensor(
[
[True, False, False, False],
[True, True, True, False],
[True, True, False, False],
[True, False, False, False],
]
)
).all()
# export
def generate_path(duration, mask):
"""
duration: [b, 1, t_x]
mask: [b, 1, t_y, t_x]
"""
device = duration.device
b, _, t_y, t_x = mask.shape
cum_duration = torch.cumsum(duration, -1)
cum_duration_flat = cum_duration.view(b * t_x)
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
path = path.view(b, t_x, t_y)
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
path = path.unsqueeze(1).transpose(2, 3) * mask
return path
# +
# export
def slice_segments(x, ids_str, segment_size=4):
ret = torch.zeros_like(x[:, :, :segment_size])
for i in range(x.size(0)):
idx_str = ids_str[i]
idx_end = idx_str + segment_size
ret[i] = x[i, :, idx_str:idx_end]
return ret
def rand_slice_segments(x, x_lengths=None, segment_size=4):
b, d, t = x.size()
if x_lengths is None:
x_lengths = t
ids_str_max = x_lengths - segment_size
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
ret = slice_segments(x, ids_str, segment_size)
return ret, ids_str
# -
# export
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
# export
def get_padding(kernel_size, dilation=1):
return int((kernel_size * dilation - dilation) / 2)
# export
@torch.jit.script
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a + input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
# export
def clip_grad_value_(parameters, clip_value, norm_type=2):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
norm_type = float(norm_type)
if clip_value is not None:
clip_value = float(clip_value)
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
if clip_value is not None:
p.grad.data.clamp_(min=-clip_value, max=clip_value)
total_norm = total_norm ** (1.0 / norm_type)
return total_norm
# +
# export
def intersperse(lst, item):
result = [item] * (len(lst) * 2 + 1)
result[1::2] = lst
return result
def intersperse_emphases(emphases):
for n in range(len(emphases)):
emphases[n][0] = 2 * emphases[n][0]
emphases[n][1] = 2 * emphases[n][1] + 1
return emphases
# -
intersperse([1, 2, 3, 4], 0) == [0, 1, 0, 2, 0, 3, 0, 4, 0]
| nbs/utils.utils.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''base'': conda)'
# language: python
# name: python37664bitbaseconda288461fb93b04d8d9a775fc77866514b
# ---
# # Setup
# +
# ## !git clone https://github.com/sqrx-mckl/driver_behaviour.git
# # %cd driver_behaviour
# from google.colab import drive
# drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 28447, "status": "ok", "timestamp": 1594165721796, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="kRwaHz8uoZcF" outputId="c4c3bf20-7cdd-4995-f8b7-d01bff95734a" tags=[]
import sys
from tensorflow.python.keras import __package__ as keras_package
print (sys.modules[keras_package].__version__)
# + [markdown] colab_type="text" id="-zH6E9gHoZcL"
# # Clustering
# I have the data sequenced from the previous event cutting algorithm
# I have some statistical analysis applied on the
#
# Now I will:
# * Apply GAN Bi-GRU on those sequences
# * get a categorisation by the GAN
# * plot in a new T-SNE / UMAP each sequences projected from the new encoding
#
# ## Source
# This is heavily inspired (with copy/paste) from the following article:
# [Data Exploration with Adversarial Autoencoders](https://towardsdatascience.com/data-exploration-with-adversarial-autoencoders-311a4e1f271b)
#
# + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" executionInfo={"elapsed": 29974, "status": "ok", "timestamp": 1594165723331, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="9gi2Qx_YoZcM" outputId="fa6ac142-2deb-4bca-c0bf-7b44be92fc9c"
import pandas as pd
import dask.dataframe as dd
from dask.diagnostics import ProgressBar
import numpy as np
from pprint import pprint
from IPython.display import display
from typing import Dict, Union, List, Callable, Tuple
from pathlib import Path
import lib.analysis
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
from tqdm.notebook import tqdm
tqdm.pandas()
# %matplotlib inline
sns.set_style('darkgrid')
sns.set_context('notebook')
pd.set_option('max_columns', None)
# + colab={} colab_type="code" executionInfo={"elapsed": 29970, "status": "ok", "timestamp": 1594165723332, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="k9a_dTWToZcQ"
folderpath = Path('data/UAH-DRIVESET-v1/')
# folderpath = Path('/content/drive/My Drive/Data/driver_behaviour/')
# + [markdown] colab_type="text" id="rqnQbmW5oZcT"
# # Model/data preparation and training
# + [markdown] colab_type="text" id="yEwZu7hyoZcU"
# ## Prepare the data
#
# Here I would like to focus on Tensorflow first and foremost, the Dask usage can be managed later.
#
# Hence I will only retrieve the data I need.
# + colab={} colab_type="code" executionInfo={"elapsed": 31409, "status": "ok", "timestamp": 1594165724776, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="pQWknWkPoZcV"
df = pd.read_parquet(Path(folderpath, 'data_ready.parquet'))
df = df.sort_values(['corner_select', 'timestamp'])
# + colab={} colab_type="code" executionInfo={"elapsed": 31407, "status": "ok", "timestamp": 1594165724779, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="7GDVWql7oZcY"
split_by = 'corner_select'
col = [
'gx',
'gy',
'yr',
'speed',
'distx',
'disty',
'time_event',
]
feat = [
'gx',
'gy',
'yr',
'speed',
]
# + [markdown] colab_type="text" id="eKFHusA6oZca"
# ## Split Train/Test
#
# Those plots are good to check the good repartition between train and test data.
# - "lib.analysis.plot_gx_gy()"
# - "lib.analysis.plot_gy_speed()"
#
# There is no really test data, as the test data is fake data created to confuse the determinators
# + colab={} colab_type="code" executionInfo={"elapsed": 31407, "status": "ok", "timestamp": 1594165724784, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="phr7aWupoZcd"
from sklearn.model_selection import train_test_split
# train_ix, test_ix = train_test_split(df[split_by].unique(), test_size=0.30)
train_ix = df[split_by].unique()
test_ix = df[split_by].unique()
df_train = df[[*feat,split_by]].where(lambda x: x[split_by].isin(train_ix)).dropna()
df_test = df[[*feat,split_by]].where(lambda x: x[split_by].isin(test_ix)).dropna()
df = df[[*col,split_by]]
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" executionInfo={"elapsed": 31401, "status": "ok", "timestamp": 1594165724786, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="N5PvkXfLoZcf" outputId="0dc002ef-fd87-4eae-bb15-a766a152c64a"
display(f'{df_train[split_by].unique().shape[0]} / {df_test[split_by].unique().shape[0]} / {df[split_by].unique().shape[0]} (train/test/total) event')
# + [markdown] colab_type="text" id="15p2DlSDoZcj"
# ## Make the Batch sequences
# + colab={} colab_type="code" executionInfo={"elapsed": 36084, "status": "ok", "timestamp": 1594165729479, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="41UB3kxZoZcj"
train_mean = df_train.mean()
train_std = df_train.std()
train_max = df_train.max()
def normalize_data(df, mean=train_mean, std=train_std, split_by=split_by) -> pd.DataFrame:
df_num = df.drop(columns=split_by).select_dtypes('number')
df_obj = df.drop(columns=split_by).select_dtypes(exclude='number')
df_num = (df_num - mean) / std
# Some specific case - mainly time:
if df_num.columns.isin(['time_event']).any():
df_num['time_event'] = df['time_event'] / df['time_event'].max()
if df_obj.empty:
df_norm = df_num
else:
df_norm = df_num.join(pd.get_dummies(df_obj))
return df_norm.join(df[split_by])
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" executionInfo={"elapsed": 36075, "status": "ok", "timestamp": 1594165729480, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="_uzjCM19oZcm" outputId="9396d9b1-daa2-4989-869a-113283460766"
df_train = normalize_data(df_train)
df_test = normalize_data(df_test)
data = normalize_data(df)
display(df_train.head())
# + [markdown] colab_type="text" id="-TeAcYMGoZcp"
# ## Pad sequences
# Unfortunetely all our sequences are not of the same size as seen in graph below
# + colab={"base_uri": "https://localhost:8080/", "height": 568} colab_type="code" executionInfo={"elapsed": 36401, "status": "ok", "timestamp": 1594165729818, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="EEbBe314oZcp" outputId="727e45c2-ef3f-4abb-8680-5a34af35d36d"
plt.figure()
df_train.groupby(split_by)[split_by].count().plot(kind='hist')
plt.title('event lengths');
# + colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["203bce43e0ee4233bd7820771f7f45f4", "adabb13d56914beb9ea0b69e8cdaadf1", "<KEY>", "0fc27f83b7bb4fdaad22e91110026e17", "01d1233aaa4d4fbcb9a6aeef585a2f43", "26a05a47a3084079878e7d2bde8f020e", "c14787e5f39e4a8cbb6a935daab01e34", "cd1be1dec2774281a0220e76f6a3c9cc", "5e5fee0ec481488aa90fbaa9c56568aa", "<KEY>", "<KEY>", "ebec7387353a4941a917df83d1db73dd", "9ad91cb4e4ea46ee9fa9f4882416e28f", "ee9714b903fb41f9b99de8fa645d30fe", "c67a7ec1732340d09b127c9ccc337bc4", "e73f38ec09c44269931deb012d82e905", "<KEY>", "4f7a23be599a44abae5d6e175334a34f", "<KEY>", "e2cc81ee11b24d01a04f0ac9d08bbd2c", "d8aba30e115846dd8aab212a92505b03", "1a60126dc30145e1831e9866877c062d", "45a0b03abe844c3a9f814ffb54db1b16", "<KEY>"]} colab_type="code" executionInfo={"elapsed": 37052, "status": "ok", "timestamp": 1594165730478, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="3iRAgnx9oZcs" outputId="45897118-7142-4ddb-d72c-6eeda563a896" tags=[]
from tensorflow.python.keras.preprocessing import sequence
def sequence_data(df, split_by=split_by) -> Tuple[pd.Index, np.array]:
dataset = df.copy()
dataset = dataset.groupby(split_by).progress_apply(
lambda x: x.drop(columns=[split_by]).values
)
dataset_idx = dataset.index
x = sequence.pad_sequences(dataset.values, padding='post', value=0, dtype='float64')
return dataset, x
train_dataset, train_x = sequence_data(df_train)
test_dataset, test_x = sequence_data(df_test)
dataset, data_x = sequence_data(data)
display(train_dataset.shape)
display(train_x.shape)
display(train_x.shape)
# + [markdown] colab_type="text" id="xqIaXqG3oZcu"
# ## Setup the Model
# -
latent_dim = 32
cat_dim = 4
# + colab={} colab_type="code" executionInfo={"elapsed": 37046, "status": "ok", "timestamp": 1594165730479, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="dqP9D1xtoZcv"
from tensorflow.python.keras.models import Sequential, Input, Model
from tensorflow.python.keras.layers import (
Bidirectional, Dropout, TimeDistributed,
BatchNormalization, PReLU, ELU,
Concatenate, RepeatVector, Subtract,
LSTM, Dense
)
# from tensorflow_core.python.keras.optimizers import adam
# + colab={} colab_type="code" executionInfo={"elapsed": 48631, "status": "ok", "timestamp": 1594165742071, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="As6iydiFoZcx"
window_size = train_x.shape[1]
input_dim = train_x.shape[2]
prior_discriminator = lib.analysis.demo_create_discriminator(latent_dim)
prior_discriminator.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
prior_discriminator.trainable = False
cat_discriminator = lib.analysis.demo_create_discriminator(cat_dim)
cat_discriminator.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
cat_discriminator.trainable = False
encoder = lib.analysis.demo_create_encoder(latent_dim, cat_dim, window_size, input_dim)
signal_in = Input(shape=(window_size, input_dim))
reconstructed_signal, encoded_repr, category, _ = encoder(signal_in)
is_real_prior = prior_discriminator(encoded_repr)
is_real_cat = cat_discriminator(category)
autoencoder = Model(signal_in, [reconstructed_signal, is_real_prior, is_real_cat])
autoencoder.compile(
loss=['mse', 'binary_crossentropy', 'binary_crossentropy'],
loss_weights=[0.99, 0.005, 0.005],
optimizer='adam'
)
# + colab={"base_uri": "https://localhost:8080/", "height": 731} colab_type="code" executionInfo={"elapsed": 48623, "status": "ok", "timestamp": 1594165742072, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="OEmvso2CoZc0" outputId="ffeaf0d2-fc37-445b-9d88-67a55eb142e3" tags=[]
display(prior_discriminator.summary())
display(cat_discriminator.summary())
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 48611, "status": "ok", "timestamp": 1594165742072, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="3CcJnBL4oZc2" outputId="b3f10687-500e-4317-c20a-9f461b375768" tags=[]
display(autoencoder.summary())
display([layer.summary() for layer in autoencoder.layers if isinstance(layer, Model)])
# + colab={} colab_type="code" executionInfo={"elapsed": 48602, "status": "ok", "timestamp": 1594165742073, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="7IOrHDhloZc4"
batches = train_x.shape[0]
batch_size=train_x.shape[1]
losses_disc = []
losses_disc_cat = []
losses_ae = []
losses_val = []
real = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
# + colab={} colab_type="code" executionInfo={"elapsed": 48596, "status": "ok", "timestamp": 1594165742074, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="LrS41f5RoZc6"
def discriminator_training(discriminator, real, fake):
def train(real_samples, fake_samples):
discriminator.trainable = True
loss_real = discriminator.train_on_batch(real_samples, real)
loss_fake = discriminator.train_on_batch(fake_samples, fake)
loss = np.add(loss_real, loss_fake) * 0.5
discriminator.trainable = False
return loss
return train
train_prior_discriminator = discriminator_training(prior_discriminator, real, fake)
train_cat_discriminator = discriminator_training(cat_discriminator, real, fake)
# + [markdown] colab_type="text" id="pougOrIdoZc9"
# ## Train the Model
# + colab={"base_uri": "https://localhost:8080/", "height": 511, "referenced_widgets": ["e7aee98c14764ba18b21a5585c6b9973", "123e64f0ea8146e28049ad2458cc0fc8", "8ac13167c269406aa9fe66af1fb40264", "fa34d8e6b85a4707a45da630dd168740", "<KEY>", "ad13c32c664e465da1e6a7a69b1a3b3e", "<KEY>", "0b1467808ba3441d8fd1a422587ebb30"]} colab_type="code" executionInfo={"elapsed": 81387, "status": "error", "timestamp": 1594165774874, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="3lK9d875oZc9" outputId="8fe093d4-7107-4fe5-f389-005f4706b733" tags=[]
pbar = tqdm(range(batches))
for _ in pbar:
ids = np.random.randint(0, train_x.shape[0], batch_size)
signals = train_x[ids]
_, latent_fake, category_fake, _ = encoder.predict(signals)
latent_real = lib.analysis.demo_sample_normal(latent_dim, batch_size)
category_real = lib.analysis.demo_sample_categories(cat_dim, batch_size)
prior_loss = train_prior_discriminator(latent_real, latent_fake)
cat_loss = train_cat_discriminator(category_real, category_fake)
losses_disc.append(prior_loss)
losses_disc_cat.append(cat_loss)
encoder_loss = autoencoder.train_on_batch(signals, [signals, real, real])
losses_ae.append(encoder_loss)
val_loss = autoencoder.test_on_batch(signals, [signals, real, real])
losses_val.append(val_loss)
pbar.set_description("[Acc. Prior/Cat: %.2f%% / %.2f%%] [MSE train/val: %f / %f]"
% (100*prior_loss[1], 100*cat_loss[1], encoder_loss[1], val_loss[1]))
# + colab={} colab_type="code" executionInfo={"elapsed": 80974, "status": "aborted", "timestamp": 1594165774470, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="Gh02stZIoZdA"
autoencoder.save_weights(str(Path(folderpath, 'dl_gan', 'autoencoder_weights.hdf')))
prior_discriminator.save_weights(str(Path(folderpath, 'dl_gan', 'prior_discriminator_weights.hdf')))
cat_discriminator.save_weights(str(Path(folderpath, 'dl_gan', 'cat_discriminator_weights.hdf')))
df_train.to_parquet(Path(folderpath, 'dl_gan', 'train_dataframe.parquet'))
np.save(Path(folderpath, 'dl_gan', 'train_array.npy'), train_x)
# +
# autoencoder.load_weights(str(Path(folderpath, 'dl_gan', 'autoencoder_weights.hdf')))
# prior_discriminator.load_weights(str(Path(folderpath, 'dl_gan', 'prior_discriminator_weights.hdf')))
# cat_discriminator.load_weights(str(Path(folderpath, 'dl_gan', 'cat_discriminator_weights.hdf')))
# + [markdown] colab_type="text" id="JAdAWiunoZdC"
# # Model performance
# + colab={} colab_type="code" executionInfo={"elapsed": 80965, "status": "aborted", "timestamp": 1594165774471, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="HX-rUzHEoZdC"
fig, axes = plt.subplots(nrows=1, ncols=3)
fig.set_size_inches(30, 6)
axes[0].plot([loss[1] for loss in losses_disc])
axes[1].plot([loss[1] for loss in losses_disc_cat])
axes[2].plot([loss[1] for loss in losses_ae])
axes[2].plot([loss[1] for loss in losses_val])
axes[0].set_title('error: prior discriminator')
axes[1].set_title('error: category discriminator')
axes[2].set_title('error: auto-encoder')
fig.show()
# + [markdown] colab_type="text" id="mFpG0NDRoZdF"
# ## Example of GAN output
#
# Here we show what the model does inside with on from left to right:
#
# 1. the input
# 2. the encoded time-serie
# 3. the cluster labeling
# 4. the decoded output
# 5. the error between decoded and input
# + colab={} colab_type="code" executionInfo={"elapsed": 81340, "status": "aborted", "timestamp": 1594165774859, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="tuHpZTl_oZdF"
test_batch = 5
fig, axes = plt.subplots(nrows=test_batch, ncols=5)
fig.set_size_inches(20, 3 * test_batch)
random_batch = np.random.randint(len(test_x), size=test_batch)
(dec, rep, cat, error) = encoder.predict(test_x[random_batch])
for i in range(test_batch):
axes[i,0].plot(test_x[i])
axes[i,1].imshow(rep[i].reshape(input_dim,int(latent_dim/input_dim)))
axes[i,2].imshow(cat[i].reshape(cat_dim, 1))
axes[i,3].plot(dec[i])
axes[i,4].plot(error[i])
fig.show()
# -
# # Data post-processing
dfout.to_parquet(Path(folderpath, 'dl_gan', 'output.parquet'))
# +
dfout = pd.concat([
pd.DataFrame(rep, index=dataset.index).add_prefix('encode_'),
pd.DataFrame(cat, index=dataset.index).add_prefix('cluster_'),
pd.DataFrame(categories, index=dataset.index, columns=['cluster']),
], axis=1)
display(dfout.head())
| notebooks/4_clustering_gan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Load-signal" data-toc-modified-id="Load-signal-1"><span class="toc-item-num">1 </span>Load signal</a></span></li><li><span><a href="#Compute-loudness" data-toc-modified-id="Compute-loudness-2"><span class="toc-item-num">2 </span>Compute loudness</a></span></li></ul></div>
# -
#
#
# # How to compute acoustic Loudness according to Zwicker method for time-varying signals
# This tutorial explains how to use MOSQITO to compute the acoustic loudness of a signal according to the Zwicker method for time_varying signals (as per ISO 532-1 section 6). For more information on the implementation and validation of the metric, you can refer to the [documentation](../docs/loudness_zwtv.md).
#
# The following commands are used to import the necessary functions.
# +
# Add MOSQITO to the Python path
import sys
sys.path.append('..')
# Import numpy
import numpy as np
# Import plot function
import matplotlib.pyplot as plt
# Import mosqito functions
from mosqito.utils import load
from mosqito.sq_metrics import loudness_zwtv
# Import MOSQITO color sheme [Optional]
from mosqito import COLORS
# To get inline plots (specific to Jupyter notebook)
# %matplotlib notebook
# -
# ## Load signal
# In this tutorial, the signal is imported from a .wav file. The tutorial [Audio signal basic operations](./tuto_signal_basic_operations.ipynb) gives more information about the syntax of the import and the other supported file types. You can use any .wav file to perform the tutorial or you can download the [woodpecker audio recording](../validations/sq_metrics/loudness_zwtv/input/ISO_532-1/Annex%20B.5/Test%20signal%2024%20%28woodpecker%29.wav) from MOSQITO that is used in the following.
# Define path to the .wav file
# To be replaced by your own path
path = "../validations/sq_metrics/loudness_zwtv/input/ISO_532-1/Annex B.5/Test signal 24 (woodpecker).wav"
# load signal
sig, fs = load(path, wav_calib=2 * 2 **0.5)
# plot signal
t = np.linspace(0, (len(sig) - 1) / fs, len(sig))
plt.figure(1)
plt.plot(t, sig, color=COLORS[0])
plt.xlabel('Time [s]')
plt.ylabel('Acoustic pressure [Pa]')
# ## Compute loudness
# The acoustic loudness is computed by using the following command line. In addition to the signal (as ndarray) and the sampling frequency, the function takes one input argument, "field_type", that can be set to "free" or "diffuse" depending on the environment of the audio signal recording.
N, N_spec, bark_axis, time_axis = loudness_zwtv(sig, fs, field_type="free")
# The loudness of the audio signal is computed as a function of time :
plt.figure(2)
plt.plot(time_axis, N, color=COLORS[0])
plt.xlabel('Time [s]')
plt.ylabel('N_zwtv [sone]')
# The specific loudness over time and critical band rate is also computed. It can be plotted over critical band rate for a specific time or over time for a specific critical band rate with the following commands:
i_time = 300
plt.figure(3)
plt.plot(bark_axis, N_spec[:, i_time], color=COLORS[0])
plt.xlabel('Critical band rate [Bark]')
plt.ylabel("N'_zwtv [sone/Bark]")
plt.title("t = {:.1f} s".format(time_axis[i_time]))
i_bark = 120
plt.figure(4)
plt.plot(time_axis, N_spec[i_bark, :], color=COLORS[0])
plt.xlabel('Time [s]')
plt.ylabel("N'_zwtv [sone/Bark]")
plt.title("Critical band rate = {:.1f} Bark".format(bark_axis[i_bark]))
# ---
from datetime import date
print("Tutorial generation date:", date.today().strftime("%B %d, %Y"))
| tutorials/tuto_loudness_zwtv.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Optimizer tweaks
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# -
#export
from exp.nb_09 import *
# ## Imagenette data
path = datasets.untar_data(datasets.URLs.IMAGENETTE_160)
# +
tfms = [make_rgb, ResizeFixed(128), to_byte_tensor, to_float_tensor]
il = ImageItemList.from_files(path, tfms=tfms)
sd = SplitData.split_by_func(il, partial(grandparent_splitter, valid_name='val'))
ll = label_by_func(sd, parent_labeler)
bs=64
train_dl,valid_dl = get_dls(ll.train,ll.valid,bs, num_workers=4)
# -
nfs = [32,64,128,256,512]
cbfs = [partial(AvgStatsCallback,accuracy),
CudaCallback,
partial(BatchTransformXCallback, norm_imagenette)]
data = DataBunch(train_dl, valid_dl, 3, 10)
nfs = [32]*4
# Having a Runner is great but not essential when the `Learner` already has everything needed in its state. We implement everything inside it directly instead of building a second object.
#export
class Learner():
def __init__(self, model, data, loss_func, opt=None, opt_func=optim.SGD, lr=None,
cbs=None, cb_funcs=None):
self.model,self.data,self.loss_func = model,data,loss_func
assert opt or lr
if not opt: opt = opt_func(model.parameters(), lr=lr)
self.opt = opt
cbs = listify(cbs)
for cbf in listify(cb_funcs):
cb = cbf()
setattr(self, cb.name, cb)
cbs.append(cb)
self.stop,self.cbs = False,[TrainEvalCallback()]+cbs
def one_batch(self, xb, yb):
try:
self.xb,self.yb = xb,yb
self('begin_batch')
self.pred = self.model(self.xb)
self('after_pred')
self.loss = self.loss_func(self.pred, self.yb)
self('after_loss')
if not self.in_train: return
self.loss.backward()
self('after_backward')
self.opt.step()
self('after_step')
self.opt.zero_grad()
except CancelBatchException: self('after_cancel_batch')
finally: self('after_batch')
def all_batches(self, dl):
self.iters = len(dl)
try:
for xb,yb in dl: self.one_batch(xb, yb)
except CancelEpochException: self('after_cancel_epoch')
def fit(self, epochs):
self.epochs,self.loss = epochs,tensor(0.)
try:
for cb in self.cbs: cb.set_runner(self)
self('begin_fit')
for epoch in range(epochs):
self.epoch = epoch
if not self('begin_epoch'): self.all_batches(self.data.train_dl)
with torch.no_grad():
if not self('begin_validate'): self.all_batches(self.data.valid_dl)
self('after_epoch')
except CancelTrainException: self('after_cancel_train')
finally: self('after_fit')
def __call__(self, cb_name):
res = False
for cb in sorted(self.cbs, key=lambda x: x._order): res = cb(cb_name) and res
return res
def get_learner(nfs, data, lr, layer, loss_func=F.cross_entropy,
cb_funcs=None, opt_func=optim.SGD, **kwargs):
model = get_cnn_model(data, nfs, layer, **kwargs)
init_cnn(model)
return Learner(model, data, loss_func, lr=lr, cb_funcs=cb_funcs, opt_func=opt_func)
learn = get_learner(nfs, data, 0.4, conv_layer, cb_funcs=cbfs)
# %time learn.fit(1)
# ## Check everything works
sched = combine_scheds([0.3, 0.7], [sched_cos(0.3, 0.6), sched_cos(0.6, 0.2)])
cbfs += [Recorder,
partial(ParamScheduler, 'lr', sched)]
learn = get_learner(nfs, data, 0.4, conv_layer, cb_funcs=cbfs)
learn.fit(1)
learn.recorder.plot_lr()
learn.recorder.plot_loss()
# ## Export
# !./notebook2script.py 09b_learner.ipynb
| dev_course/dl2/09b_learner.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Time Series Analysis and Stationarity
import warnings
warnings.filterwarnings('ignore')
# +
# %matplotlib inline
import pandas_datareader.data as web
import numpy as np
import statsmodels.tsa.api as tsa
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.stattools import acf, q_stat, adfuller
from scipy.stats import probplot, moment
import matplotlib.pyplot as plt
import seaborn as sns
# -
sns.set_style('whitegrid')
def plot_correlogram(x, lags=None, title=None):
lags = min(10, int(len(x)/5)) if lags is None else lags
with sns.axes_style('whitegrid'):
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(14, 8))
x.plot(ax=axes[0][0], title='Residuals')
x.rolling(21).mean().plot(ax=axes[0][0], c='k', lw=1)
q_p = np.max(q_stat(acf(x, nlags=lags), len(x))[1])
stats = f'Q-Stat: {np.max(q_p):>8.2f}\nADF: {adfuller(x)[1]:>11.2f}'
axes[0][0].text(x=.02, y=.85, s=stats, transform=axes[0][0].transAxes)
probplot(x, plot=axes[0][1])
mean, var, skew, kurtosis = moment(x, moment=[1, 2, 3, 4])
s = f'Mean: {mean:>12.2f}\nSD: {np.sqrt(var):>16.2f}\nSkew: {skew:12.2f}\nKurtosis:{kurtosis:9.2f}'
axes[0][1].text(x=.02, y=.75, s=s, transform=axes[0][1].transAxes)
plot_acf(x=x, lags=lags, zero=False, ax=axes[1][0])
plot_pacf(x, lags=lags, zero=False, ax=axes[1][1])
axes[1][0].set_xlabel('Lag')
axes[1][1].set_xlabel('Lag')
fig.suptitle(title, fontsize=14)
sns.despine()
fig.tight_layout()
fig.subplots_adjust(top=.9)
# ## Download Series
# Load monthly industrial production and daily NASDAQ stock market index:
industrial_production = web.DataReader('IPGMFN', 'fred', '1988', '2017-12').squeeze().dropna()
nasdaq = web.DataReader('NASDAQCOM', 'fred', '1990', '2017-12-31').squeeze().dropna()
# ## Additive Decomposition
# Time series data typically contains a mix of various patterns that can be decomposed into several components, each representing an underlying pattern category. In particular, time series often consist of the systematic components trend, seasonality and cycles, and unsystematic noise. These components can be combined in an additive, linear model, in particular when fluctuations do not depend on the level of the series, or in a non-linear, multiplicative model.
# These components can be split up automatically. statsmodels includes a simple method to split the time series into a trend, seasonal, and residual component using moving averages. We can apply it to monthly data on industrial manufacturing production with both a strong trend and seasonality component, as follows:
components = tsa.seasonal_decompose(industrial_production, model='additive')
ts = (industrial_production.to_frame('Original')
.assign(Trend=components.trend)
.assign(Seasonality=components.seasonal)
.assign(Residual=components.resid))
with sns.axes_style('white'):
ts.plot(subplots=True, figsize=(14, 8), title=['Original Series', 'Trend Component', 'Seasonal Component','Residuals'], legend=False)
plt.suptitle('Seasonal Decomposition', fontsize=14)
sns.despine()
plt.tight_layout()
plt.subplots_adjust(top=.91);
# ## Time Series Stationarity
# The statistical properties, such as the mean, variance, or autocorrelation, of a stationary time series are independent of the period, that is, they don't change over time. Hence, stationarity implies that a time series does not have a trend or seasonal effects and that descriptive statistics, such as the mean or the standard deviation, when computed for different rolling windows, are constant or do not change much over time. It reverts to its mean, and the deviations have constant amplitude, while short-term movements always look the same in the statistical sense.
#
# More formally, strict stationarity requires the joint distribution of any subset of time series observations to be independent of time with respect to all moments. So, in addition to the mean and variance, higher moments such as skew and kurtosis, also need to be constant, irrespective of the lag between different observations. In most applications, we limit stationarity to first and second moments so that the time series is covariance stationary with constant mean, variance, and autocorrelation.
# Note that we specifically allow for dependence between observations at different lags, just like we want the input data for linear regression to be correlated with the outcome. Stationarity implies that these relationships are stable, which facilitates prediction as the model can focus on learning systematic patterns that take place within stable statistical properties. It is important because classical statistical models assume that the time series input data is stationary.
# To satisfy the stationarity assumption of linear time series models, we need to transform the original time series, often in several steps. Common transformations include the application of the (natural) logarithm to convert an exponential growth pattern into a linear trend and stabilize the variance. Deflation implies dividing a time series by another series that causes trending behavior, for example dividing a nominal series by a price index to convert it into a real measure.
# ### Log Transformation
# Double check fo zero values
(nasdaq == 0).any(), (industrial_production==0).any()
nasdaq_log = np.log(nasdaq)
industrial_production_log = np.log(industrial_production)
# ### Differencing
# In many cases, de-trending is not sufficient to make the series stationary. Instead, we need to transform the original data into a series of period-to-period and/or season-to-season differences. In other words, we use the result of subtracting neighboring data points or values at seasonal lags from each other. Note that when such differencing is applied to a log-transformed series, the results represent instantaneous growth rates or returns in a financial context.
# If a univariate series becomes stationary after differencing d times, it is said to be integrated of the order of d, or simply integrated if d=1. This behavior is due to so-called unit roots.
# Differencing of log series produces instantaneous returns.
# +
nasdaq_log_diff = nasdaq_log.diff().dropna()
# seasonal differencing => yoy instantanteous returns
industrial_production_log_diff = industrial_production_log.diff(12).dropna()
# -
# ### Plot Series
# The following chart shows time series for the NASDAQ stock index and industrial production for the 30 years through 2017 in original form, as well as the transformed versions after applying the logarithm and subsequently applying first and seasonal differences (at lag 12), respectively. The charts also display the ADF p-value, which allows us to reject the hypothesis of unit-root non-stationarity after all transformations in both cases:
with sns.axes_style('dark'):
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(14, 8))
nasdaq.plot(ax=axes[0][0],
title='NASDAQ Composite Index')
axes[0][0].text(x=.03,
y=.85,
s=f'ADF: {tsa.adfuller(nasdaq.dropna())[1]:.4f}',
transform=axes[0][0].transAxes)
axes[0][0].set_ylabel('Index')
nasdaq_log.plot(ax=axes[1][0],
sharex=axes[0][0])
axes[1][0].text(x=.03, y=.85,
s=f'ADFl: {tsa.adfuller(nasdaq_log.dropna())[1]:.4f}',
transform=axes[1][0].transAxes)
axes[1][0].set_ylabel('Log')
nasdaq_log_diff.plot(ax=axes[2][0],
sharex=axes[0][0])
axes[2][0].text(x=.03, y=.85,
s=f'ADF: {tsa.adfuller(nasdaq_log_diff.dropna())[1]:.4f}',
transform=axes[2][0].transAxes)
axes[2][0].set_ylabel('Log, Diff')
industrial_production.plot(ax=axes[0][1],
title='Industrial Production: Manufacturing')
axes[0][1].text(x=.03, y=.85,
s=f'ADF: {tsa.adfuller(industrial_production)[1]:.4f}',
transform=axes[0][1].transAxes)
axes[0][1].set_ylabel('Index')
industrial_production_log.plot(ax=axes[1][1],
sharex=axes[0][1])
axes[1][1].text(x=.03, y=.85,
s=f'ADF: {tsa.adfuller(industrial_production_log.dropna())[1]:.4f}',
transform=axes[1][1].transAxes)
axes[1][1].set_ylabel('Log')
industrial_production_log_diff.plot(ax=axes[2][1],
sharex=axes[0][1])
axes[2][1].text(x=.83, y=.85,
s=f'ADF: {tsa.adfuller(industrial_production_log_diff.dropna())[1]:.4f}',
transform=axes[2][1].transAxes)
axes[2][1].set_ylabel('Log, Seasonal Diff')
sns.despine()
fig.tight_layout()
fig.align_ylabels(axes)
# ## Correlogram
# Autocorrelation (also called serial correlation) adapts the concept of correlation to the time series context: just as the correlation coefficient measures the strength of a linear relationship between two variables, the autocorrelation coefficient, $\rho_k$, measures the extent of a linear relationship between time series values separated by a given lag, $k$.
# Hence, we can calculate one autocorrelation coefficient for each of the T-1 lags in a time series; T is the length of the series. The autocorrelation function (ACF) computes the correlation coefficients as a function of the lag.
# The autocorrelation for a lag larger than 1 (that is, between observations more than one time step apart) reflects both the direct correlation between these observations and the indirect influence of the intervening data points. The partial autocorrelation removes this influence and only measures the linear dependence between data points at the given lag distance. The partial autocorrelation function (PACF) provides all the correlations that result once the effects of a correlation at shorter lags have been removed.
# There are algorithms that estimate the partial autocorrelation from the sample autocorrelation based on the exact theoretical relationship between the PACF and the ACF.
# A correlogram is simply a plot of the ACF or PACF for sequential lags, k=0,1,...,n. It allows us to inspect the correlation structure across lags at one glance. The main usage of correlograms is to detect any autocorrelation after the removal of the effects of deterministic trend or seasonality. Both the ACF and the PACF are key diagnostic tools for the design of linear time series models and we will review examples of ACF and PACF plots in the following section on time series transformations.
# ### NASDAQ (log, diff)
# We can further analyze the relevant time series characteristics for the transformed series using a Q-Q plot that compares the quantiles of the distribution of the time series observation to the quantiles of the normal distribution and the correlograms based on the ACF and PACF.
#
# For the NASDAQ plot, we notice that while there is no trend, the variance is not constant but rather shows clustered spikes around periods of market turmoil in the late 1980s, 2001, and 2008. The Q-Q plot highlights the fat tails of the distribution with extreme values more frequent than the normal distribution would suggest. The ACF and the PACF show similar patterns with autocorrelation at several lags appearing significant:
plot_correlogram(nasdaq_log_diff, lags=100, title='NASDAQ Composite (Log, Diff)')
# ### Industrial Production (log, seasonl diff)
# For the monthly time series on industrial manufacturing production, we notice a large negative outlier following the 2008 crisis as well as the corresponding skew in the Q-Q plot. The autocorrelation is much higher than for the NASDAQ returns and declines smoothly. The PACF shows distinct positive autocorrelation patterns at lag 1 and 13, and significant negative coefficients at lags 3 and 4:
plot_correlogram(industrial_production_log_diff, title='Industrial Production (Seasonal Diff)')
| 09_time_series_models/01_tsa_and_stationarity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# Let's discuss a few features of sorting in python. This is something we will treat as a "black box" for now; that is, we'll just use it as a tool given to us without understanding the implementation details. But later we will go over the details later of how these algorithms work.
#
# First, let's create a list of 50 random numbers between 0 and 99 using numpy
import numpy as np
np.random.seed(0)
arr = np.random.randint(0, 100, 50)
print(arr)
# Sorting them in python is as simple as saying
arr_sorted = sorted(arr)
print(arr_sorted)
# So what's the big deal? Well, things can get a little fancier if we are sorting a list of objects. For example, suppose we had an object which encapsulated both a string and a number. To keep things simple, we'll make each name a 3-character string with only the characters "a" "b" and "c". I'll go ahead and make a random list of such objects now
class NumStr:
def __init__(self):
"""
Randomly initialize a number and a string
"""
self.num = np.random.randint(1000)
self.name = "".join([["a", "b", "c"][i] for i in np.random.randint(0, 3, 3)])
def __str__(self):
"""
Analagous to Java's toString() function
"""
return "{}:{}".format(self.num, self.name)
np.random.seed(0)
arr = [NumStr() for i in range(100)]
# For example, here are the first two elements
print(arr[0], arr[1])
# But what happens when we try to sort these?
sorted(arr)
# Uh oh! The issue is, we haven't defined how to sort objects of type "NumStr". What we have to do is define a special function to say how to sort. This function should return something about the object which is comparable (e.g. ints by value or strings in alphabetical order). Let's define two functions for our object: one that returns the name and one that returns the number
# +
def get_name(obj):
return obj.name
def get_num(obj):
return obj.num
# -
# Now, we can pass along these functions *as parameters* to the <code>sorted</code> function. For example, let's sort them by number and print out the first 10.
arr_sorted = sorted(arr, key=get_num)
for x in arr_sorted[0:10]:
print(x)
# Let's take this result and now sort it by name
arr_sorted2 = sorted(arr_sorted, key=get_name)
for x in arr_sorted2[0:10]:
print(x)
# Sharp eyes will notice that within the ties, each chunk is sorted by number (e.g. for everything with a name aaa, they come out 347, 593, 606, and 964). This is because python's sorted uses what's known as a <b>stable sort</b>. We won't immediately need this, but it's a good thing to know and keep in our back pocket
# One more thing to mention is that the methods we passed to <code>sorted</code> were incredibly simple. There's actually a short hand for these types of one line method definitions in python known as an <a href = "https://www.programiz.com/python-programming/anonymous-function">anonymous function</a>. Below is how you could use this to sort by number again
arr_sorted = sorted(arr, key=lambda x: x.num)
| ClassExercises/Week2_Sorting/Sorting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %pylab inline
import warnings
warnings.filterwarnings("ignore")
import nolearn
from nolearn.lasagne import NeuralNet
import readdata
import cdb
from cdb import *
import lasagne
from lasagne import layers
from sklearn import metrics
import detectobjects as det
import os.path
from scipy import misc
import cv2
from progress_bar import ProgressBar
import shapefeatures
from sklearn import ensemble
opts = {'img_dir': '/media/anson/Windows/ANSON_CEN16002_MAIN_PJT/data/images_IP/',
#'models_dir': '../models/',
'annotation_dir': '/media/anson/Windows/ANSON_CEN16002_MAIN_PJT/data/annotation_IP/',
'detection_probability_threshold': 0.9,
'detection_overlap_threshold': 0.3,
'gauss': 1,
'patch_size': (300,300),
'image_downsample' : 5,
'detection_step': 5,
'patch_creation_step': 120,
'object_class': None,
'negative_training_discard_rate': .9
}
opts['patch_stride_training'] = int(opts['patch_size'][0]*0.25)
# +
trainfiles, valfiles, testfiles = readdata.create_sets(opts['img_dir'], train_set_proportion=.5,
test_set_proportion=.5)
train_y, train_X = readdata.create_patches(trainfiles, opts['annotation_dir'], opts['img_dir'], opts['patch_size'][0], opts['patch_stride_training'], grayscale=False, progressbar=True, downsample=opts['image_downsample'], objectclass=opts['object_class'], negative_discard_rate=opts['negative_training_discard_rate'])
test_y, test_X = readdata.create_patches(testfiles, opts['annotation_dir'], opts['img_dir'], opts['patch_size'][0], opts['patch_stride_training'], grayscale=False, progressbar=True, downsample=opts['image_downsample'], objectclass=opts['object_class'], negative_discard_rate=opts['negative_training_discard_rate'])
#val_y, val_X = readdata.create_patches(valfiles, opts['annotation_dir'], opts['img_dir'], opts['patch_size'][0], opts['patch_stride_training'], grayscale=False, progressbar=True, downsample=opts['image_downsample'], objectclass=opts['object_class'], negative_discard_rate=opts['negative_training_discard_rate'])
# Cut down on disproportionately large numbers of negative patches
train_X, train_y = readdata.balance(train_X, train_y, mult_neg=100)
test_X, test_y = readdata.balance(test_X, test_y, mult_neg=100)
#val_X, val_y = cdb.balance(val_X, val_y, mult_neg=100)
# Create rotated and flipped versions of the positive patches
train_X, train_y = readdata.augment_positives(train_X, train_y)
test_X, test_y = readdata.augment_positives(test_X, test_y)
#val_X, val_y = cdb.augment_positives(val_X, val_y)
print '\n'
print '%d positive training examples, %d negative training examples' % (sum(train_y), len(train_y)-sum(train_y))
print '%d positive testing examples, %d negative testing examples' % (sum(test_y), len(test_y)-sum(test_y))
print '%d patches (%.1f%% positive)' % (len(train_y)+len(test_y), 100.*((sum(train_y)+sum(test_y))/(len(train_y)+len(test_y))))
# -
# View a random selection of positive and negative patches to see if they look right
# +
N_samples_to_display = 10
pos_indices = np.where(train_y)[0]
pos_indices = pos_indices[np.random.permutation(len(pos_indices))]
for i in range(N_samples_to_display):
plt.subplot(2,N_samples_to_display,i+1)
example_pos = train_X[pos_indices[i],:,:,:]
example_pos = np.swapaxes(example_pos,0,2)
plt.imshow(example_pos)
plt.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off')
neg_indices = np.where(train_y==0)[0]
neg_indices = neg_indices[np.random.permutation(len(neg_indices))]
for i in range(N_samples_to_display,2*N_samples_to_display):
plt.subplot(2,N_samples_to_display,i+1)
example_neg = train_X[neg_indices[i],:,:,:]
example_neg = np.swapaxes(example_neg,0,2)
plt.imshow(example_neg)
plt.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off')
plt.gcf().set_size_inches(1.5*N_samples_to_display,3)
#plt.savefig('LSTM_IP_FIN/Random_pos-neg.png', bbox_inches='tight')
# -
# ### CNN training
# +
def CNN(n_epochs):
net1 = NeuralNet(
layers=[
('input', layers.InputLayer),
('conv1', layers.Conv2DLayer), #Convolutional layer. Params defined below
('pool1', layers.MaxPool2DLayer), # Like downsampling, for execution speed
('RNN',layers.RecurrentLayer),
('output', layers.DenseLayer),
],
input_shape=(None, 3, opts['patch_size'][0]/opts['image_downsample'],
opts['patch_size'][0]/opts['image_downsample']),
conv1_num_filters=7,
conv1_filter_size=(3, 3),
conv1_nonlinearity=lasagne.nonlinearities.rectify,
pool1_pool_size=(2, 2),
RNN_num_units=10,
#conv2_num_filters=12,
#conv2_filter_size=(2, 2),
#conv2_nonlinearity=lasagne.nonlinearities.rectify,
#hidden3_num_units=500,
output_num_units=2,
output_nonlinearity=lasagne.nonlinearities.softmax,
update_learning_rate=0.0001,
update_momentum=0.9,
max_epochs=n_epochs,
verbose=1,
)
return net1
cnn = CNN(100).fit(train_X, train_y)
# -
import cPickle as pickle
#sys.setrecursionlimit(10000) # you may need this if the network is large
with open("model_file_IP_new", 'wb') as f:
pickle.dump(cnn , f, -1)
#with open("model_file_IP", 'wb') as f:
with open('model_file_IP_new', 'rb') as pickle_file:
c=pickle.load(pickle_file)
y_p=c.predict_proba(test_X)
y_p
# ### Make predictions and evaluate on test data
y_pred = cnn.predict_proba(test_X)
y_pred
# +
false_positive_rate, true_positive_rate, thresholds = metrics.roc_curve(test_y, y_pred[:,1])
roc_auc = metrics.auc(false_positive_rate, true_positive_rate)
precision, recall, thresholds = metrics.precision_recall_curve(test_y, y_pred[:,1])
average_precision = metrics.average_precision_score(test_y, y_pred[:, 1])
subplot(121)
plt.title('ROC: AUC = %0.2f'% roc_auc)
plt.plot(false_positive_rate, true_positive_rate, 'b')
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.ylim([-.05, 1.05])
plt.xlim([-.05, 1.0])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
subplot(122)
plt.plot(recall, precision)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall: AP={0:0.2f}'.format(average_precision))
plt.legend(loc="lower left")
plt.gcf().set_size_inches(10,4)
#plt.savefig('GRU_IP_FIN/plasmodium-patchevaluation_new.png', bbox_inches='tight')
# -
false_positive_rate, true_positive_rate, thresholds = metrics.roc_curve(test_y, y_pred[:,1])
true_positive_rate.shape, thresholds.shape
plt.plot(true_positive_rate, thresholds,label='True positive rate')
plt.plot(false_positive_rate, thresholds, label='False positive rate')
plt.xlabel('Threshold')
plt.legend(loc='upper left')
#plt.savefig('GRU_IP_FIN/TPR-FPR_new.png', bbox_inches='tight')
q=y_pred[:,1]>0.5
q=q*1
q
from sklearn.metrics import classification_report, accuracy_score
print(classification_report(test_y, q))
print 'The accuracy is:', accuracy_score(test_y, q)
# **Examine mistakes to understand network performance: false positives.**
#
# Find the negative-labelled patches with highest prediction score
# +
neg_indices = np.where(test_y==0)[0]
neg_scores = y_pred[neg_indices,1]
neg_indices = neg_indices[neg_scores.argsort()]
neg_indices = neg_indices[::-1]
neg_scores = y_pred[neg_indices,1]
N_samples_to_display = 12
offset = 55
for i in range(N_samples_to_display,2*N_samples_to_display):
plt.subplot(2,N_samples_to_display,i+1)
example_neg = test_X[neg_indices[i+offset],:,:,:]
example_neg = np.swapaxes(example_neg,0,2)
plt.imshow(example_neg)
plt.title('%.3f' % neg_scores[i+offset])
plt.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off')
plt.gcf().set_size_inches(1.5*N_samples_to_display,3)
plt.savefig('GRU_IP_FIN/plasmodium-falsedetections.png', bbox_inches='tight')
# +
prob_range = [.9,1.]
tmp_scores = y_pred.copy()[:,1]
tmp_scores[tmp_scores<prob_range[0]] = -1
tmp_scores[tmp_scores>prob_range[1]] = -1
pos_indices = tmp_scores.argsort()
pos_indices = pos_indices[::-1]
N_samples_to_display = 12
offset = 0
for i in range(N_samples_to_display,2*N_samples_to_display):
plt.subplot(2,N_samples_to_display,i+1)
example_neg = test_X[pos_indices[i+offset],:,:,:]
example_neg = np.swapaxes(example_neg,0,2)
plt.imshow(example_neg)
plt.title('%.3f' % (tmp_scores[pos_indices[i+offset]]))
plt.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off')
plt.gcf().set_size_inches(1.5*N_samples_to_display,3)
#plt.savefig('LSTM_IP_FIN/tuberculosis-detectedpatches1.png', bbox_inches='tight')
# -
# See highest-scored test patches
# +
prob_range = [.95,1.]
tmp_scores = y_pred.copy()[:,1]
tmp_scores[tmp_scores<prob_range[0]] = -1
tmp_scores[tmp_scores>prob_range[1]] = -1
pos_indices = tmp_scores.argsort()
pos_indices = pos_indices[::-1]
N_samples_to_display = 12
offset = 0
for i in range(N_samples_to_display,2*N_samples_to_display):
plt.subplot(2,N_samples_to_display,i+1)
example_neg = test_X[pos_indices[i+offset],:,:,:]
example_neg = np.swapaxes(example_neg,0,2)
plt.imshow(example_neg)
plt.title('%.3f' % (tmp_scores[pos_indices[i+offset]]))
plt.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off')
plt.gcf().set_size_inches(1.5*N_samples_to_display,3)
plt.savefig('GRU_IP_FIN/plasmodium-detectedpatches.png', bbox_inches='tight')
# -
# See lowest scored test patches
# +
pos_indices = y_pred[:,1].argsort()
N_samples_to_display = 12
for i in range(N_samples_to_display,2*N_samples_to_display):
plt.subplot(2,N_samples_to_display,i+1)
example_neg = test_X[pos_indices[i],:,:,:]
example_neg = np.swapaxes(example_neg,0,2)
plt.imshow(example_neg)
plt.title('%.3f' % (y_pred[pos_indices[i],1]))
plt.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off')
plt.gcf().set_size_inches(1.5*N_samples_to_display,3)
plt.savefig('GRU_IP_FIN/plasmodium-testpatches-lowprob.png', bbox_inches='tight')
# -
reload(readdata)
# ### Example of objects detected in an entire image
#
# The white boxes represent annotations in the training data. Red boxes are detections by the convnet.
# +
reload(det)
fname = testfiles[76]
imfile = opts['img_dir'] + fname
opts['detection_threshold'] = 0.99
found = det.detect(imfile, cnn, opts)
im = misc.imread(imfile)
plt.box(False)
plt.xticks([])
plt.yticks([])
annofile = opts['annotation_dir'] + fname[:-3] + 'xml'
bboxes = readdata.get_bounding_boxes_for_single_image(annofile)
for bb in bboxes:
bb = bb.astype(int)
cv2.rectangle(im, (bb[0],bb[2]), (bb[1],bb[3]), (255,255,255), 2)
for f in found:
f = f.astype(int)
cv2.rectangle(im, (f[0],f[1]), (f[2],f[3]), (255,0,0), 2)
plt.gcf().set_size_inches(10,10)
plt.title('Detected objects in %s' % (imfile))
plt.imshow(im)
plt.savefig('GRU_IP_FIN/annt1.png', bbox_inches='tight')
#cv2.imwrite('detectionimages/detected-' + os.path.basename(imfile),im)
# -
# ### Evaluation: compare with classification based on morphological feature extraction
# +
featureset = [3,7,11,12,15,17]
centiles = [0,25,50,75,100]
pb = ProgressBar(train_X.shape[0])
train_X_f = []
for i in range(train_X.shape[0]):
if i % 100 == 0:
pb.step(i)
graypatch = cv2.cvtColor(np.swapaxes(train_X[i,:,:,:],0,2).astype('uint8'), cv2.COLOR_BGR2GRAY)
train_X_f.append(shapefeatures.extract(graypatch,attributes=featureset,centiles=centiles, momentfeatures=True))
train_X_f = np.vstack(train_X_f)
test_X_f = []
for i in range(test_X.shape[0]):
if i % 100 == 0:
pb.step(i)
graypatch = cv2.cvtColor(np.swapaxes(test_X[i,:,:,:],0,2).astype('uint8'), cv2.COLOR_BGR2GRAY)
test_X_f.append(shapefeatures.extract(graypatch,attributes=featureset,centiles=centiles, momentfeatures=True))
test_X_f = np.vstack(test_X_f)
# -
clf = ensemble.ExtraTreesClassifier(n_estimators=100, max_depth=5, n_jobs=-1)
clf.fit(train_X_f, train_y)
y_pred_CLF = clf.predict_proba(test_X_f)
# +
false_positive_rate_CNN, true_positive_rate_CNN, thresholds_CNN = metrics.roc_curve(test_y, y_pred[:,1])
roc_auc_CNN = metrics.auc(false_positive_rate_CNN, true_positive_rate_CNN)
precision_CNN, recall_CNN, thresholds_CNN = metrics.precision_recall_curve(test_y, y_pred[:,1])
average_precision_CNN = metrics.average_precision_score(test_y, y_pred[:, 1])
false_positive_rate_CLF, true_positive_rate_CLF, thresholds_CLF = metrics.roc_curve(test_y, y_pred_CLF[:,1])
roc_auc_CLF = metrics.auc(false_positive_rate_CLF, true_positive_rate_CLF)
precision_CLF, recall_CLF, thresholds_CLF = metrics.precision_recall_curve(test_y, y_pred_CLF[:,1])
average_precision_CLF = metrics.average_precision_score(test_y, y_pred_CLF[:, 1])
subplot(211)
plt.title('ROC' )
plt.plot(false_positive_rate_CNN, true_positive_rate_CNN, 'b', label='CNN: AUC=%.2f' % (roc_auc_CNN))
plt.plot(false_positive_rate_CLF, true_positive_rate_CLF, 'k--', label='ERT: AUC=%.2f' % (roc_auc_CLF))
plt.legend(loc='lower right')
plt.ylim([-.05, 1.05])
plt.xlim([-.05, 1.0])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
subplot(212)
plt.plot(recall_CNN, precision_CNN, label='CNN: AP=%.2f' % (average_precision_CNN))
plt.plot(recall_CLF, precision_CLF,'k--', label='ERT: AP=%.2f' % (average_precision_CLF))
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall')
plt.legend(loc="lower left")
plt.gcf().set_size_inches(4,10)
plt.savefig('figs/plasmodium-patchevaluation.png', bbox_inches='tight')
| CNN training & evaluation - intestinal parasites.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: TCC
# language: python
# name: tcc
# ---
# +
# Ignore sklearn future warning.
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
from SentiCR.SentiCR import SentiCR
from SentiSW.code.classification.classifier import Classifier
from SentiSW.code.entity.training_set_generation import get_entity
import scikitplot as skplt
import matplotlib.pyplot as plt
import pandas as pd
import pickle
# Config NLTK java file finder.
import os
java_path = "C:/Program Files (x86)/Java/jre1.8.0_241/bin/java.exe"
os.environ['JAVAHOME'] = java_path
dir_path = os.path.abspath(os.getcwd())
# Classifier's model path
SentiCRModelPath = dir_path + "/SentiCR/SentiCR/models/SentiCR_model.pkl"
SentiCRPlusModelPath = dir_path + "/SentiCR/SentiCR/models/SentiCR+ (SentiCR + converted SentiSW).pkl"
SentiSWModelPath = dir_path + "/SentiSW/data/model/sentimentClassification/classifier.pkl"
# Classifier's trainning files path
SentiCR_trainning_file_path = dir_path + '/SentiCR/SentiCR/oracle.xlsx'
SentiCR_plus_trainning_file_path = dir_path + '/SentiCR/SentiCR/SentiCR+ (SentiCR + converted SentiSW).xlsx'
SentiSW_trainning_file_path = dir_path + '/SentiSW/data/training_set_3000_original.csv'
SentiSW_trainning_no_chinese_file_path = dir_path + '/SentiSW/data/training_set_3000_no_chinese.csv'
gold_file_path = dir_path + '/Senti4SD_GoldStandard_EmotionPolarity.xlsx'
def SentiCR_classify_labels(sentence):
# Original SentiCR
#if(os.path.exists(SentiCRModelPath)):
# SentiCR+
if(os.path.exists(SentiCRPlusModelPath)):
#sentiment_analyzer = pickle.load(open(SentiCRModelPath, 'rb'))
sentiment_analyzer = pickle.load(open(SentiCRPlusModelPath, 'rb'))
else:
sentiment_analyzer = SentiCR.SentiCR()
#with open(SentiCRModelPath, 'wb') as model:
with open(SentiCRPlusModelPath, 'wb') as model:
pickle.dump(sentiment_analyzer, model)
score = sentiment_analyzer.get_sentiment_polarity(sentence)
return score
def SentiCR_classify_probas(sentence):
#if(os.path.exists(SentiCRModelPath)):
if(os.path.exists(SentiCRPlusModelPath)):
sentiment_analyzer = pickle.load(open(SentiCRPlusModelPath, 'rb'))
#sentiment_analyzer = pickle.load(open(SentiCRModelPath, 'rb'))
else:
sentiment_analyzer = SentiCR.SentiCR()
#with open(SentiCRModelPath, 'wb') as model:
with open(SentiCRPlusModelPath, 'wb') as model:
pickle.dump(sentiment_analyzer, model)
score = sentiment_analyzer.get_sentiment_polarity_probas(sentence)
return score
# -
def convert_to_binary_prediction(preds):
#negative = preds[0]
#positive = preds[1]
#diff = abs(positive + negative - 1)
#positive += (diff/2)
#negative += (diff/2)
#return [positive, negative]
return [preds[0], preds[1]]
# +
def sentiCR_predict_labels(test, name):
count = 0
sentiCR_predictions = []
for text in test:
print(count)
count += 1
sentiCR_predictions.append(SentiCR_classify_labels(text))
with open(name, 'wb') as f:
pickle.dump(sentiCR_predictions, f)
return sentiCR_predictions
def sentiCR_predict_probas(test, name):
count = 0
sentiCR_predictions = []
for text in test:
print(count)
count += 1
sentiCR_predictions.append(SentiCR_classify_probas(text))
with open(name, 'wb') as f:
pickle.dump(sentiCR_predictions, f)
return sentiCR_predictions
# -
def ROC_curve(y_true, y_pred, title, file_name):
skplt.metrics.plot_roc(y_true, y_pred, title=title)
fig = plt.gcf()
fig.set_size_inches(8, 8)
plt.savefig(file_name, dpi=100)
plt.show()
# # SentiCR dataset
sentiCR_df = pd.read_excel(SentiCR_trainning_file_path, names=['text', 'Annotation'])
sentiCR_df
sentiCR_df["Annotation"].value_counts()
# # SentiSW dataset
#
# <class 'pandas.core.frame.DataFrame'>
#
# RangeIndex: 3000 entries, 0 to 2999
#
# Data columns (total 3 columns):
#
# Column Non-Null Count Dtype
#
# - 0 issue_id 3000 non-null int64
# - 1 Annotation 3000 non-null object
# - 2 text 2993 non-null object
#
#
# First, we load the dataframe from the .csv file and remove rows containing NaN values in the 'text' column. This results in a dataframe containing 2993 rows, down from 3000.
#sentiSW_df = pd.read_csv(SentiSW_trainning_file_path)
sentiSW_df = pd.read_csv(SentiSW_trainning_no_chinese_file_path)
sentiSW_df.dropna(inplace=True)
sentiSW_df
sentiSW_df['text'] = sentiSW_df['text'].str.replace(r'[^\x00-\x7F]+', '')
sentiSW_df['text'][28]
# # Gold Standard Dataset
gold_df = pd.read_excel(gold_file_path)
gold_df['Final Label (majority voting)'] = gold_df['Final Label (majority voting)'].str.capitalize()
gold_df
# # SentiCR predictions on SentiCW's dataset
#
# classifier = SentiCR
#
# train_set = SentiCR dataset
#
# test_set = SentiSW dataset
# # Predict Labels
# +
load = False
#test = sentiSW_df['text'].tolist()
test = gold_df['Text'].tolist()
# ORIGINAL SENTICR
#name = 'pickled-predictions/sentiCR_label_predictions_on_sentiSW_dataset.pkl'
#name = 'pickled-predictions/sentiCR_label_predictions_on_sentiSW_NO_CHINESE_dataset.pkl'
# SENTICR+
name = 'pickled-predictions/SentiCR+ (SentiCR + converted SentiSW, No Chinese Characters) Labels Predictions on GoldTest Dataset.pkl'
if(load):
with open(name, 'rb') as f:
sentiCR_predictions = pickle.load(f)
else:
sentiCR_predictions = sentiCR_predict_labels(test, name)
sentiCR_predictions
# -
a = pd.DataFrame(sentiCR_predictions, columns=['SentiCR Classification'])
a['SentiCR Classification'] = a['SentiCR Classification'].map({0.0 : 'Non-Negative', -1.0 : 'Negative'})
a
# +
text = gold_df.filter(['Final Label (majority voting)'], axis=1)
#text['Final Label (majority voting)'] = text['Final Label (majority voting)'].map({'Negative': -1, 'Neutral': 0, 'Positive' : 1})
t = gold_df.filter(['Text'], axis=1)
#binary_sentiSW_df['Annotation'] = pd.Series(sentiSW, index=binary_sentiSW_df.index)
#sentiCR_plus = pd.concat([sentiCR_df, binary_sentiSW_df], ignore_index=True)
#sentiCR_plus
df = pd.concat([text, a, t], axis=1, ignore_index=False)
df
# +
import openpyxl
df.to_excel ('SentiCR+ (SentiCR + converted SentiSW) GoldTest.xlsx', index = False, header=True, engine='xlsxwriter')
# -
# # Predict Probabilities
# +
load = False
#test = sentiSW_df['text'].tolist()
test = gold_df['Text'].tolist()
# ORIGINAL SENTICR
#name = 'pickled-predictions/sentiCR_probas_predictions_on_sentiSW_NO_CHINESE_dataset.pkl'
# SENTICR+
name = 'pickled-predictions/SentiCR+ (SentiCR + converted SentiSW, No Chinese Characters) Probability Predictions on GoldTest Dataset.pkl'
if(load):
with open(name, 'rb') as f:
sentiCR_predictions = pickle.load(f)
else:
sentiCR_predictions = sentiCR_predict_probas(test, name)
sentiCR_predictions
# -
converted_predictions = [convert_to_binary_prediction(sentiCR_predictions[i][0]) for i in range(len(sentiCR_predictions))]
converted_predictions
#y_true = ['Non-Negative' if x != 'Negative' else 'Negative' for x in sentiSW_df['Annotation']]
y_true = ['Non-Negative' if x != 'Negative' else 'Negative' for x in gold_df['Final Label (majority voting)']]
y_true
# +
#roc_name = 'visualizations/sentiCR_predictions_on_sentiSW_dataset.png'
file_name = 'visualizations/SentiCR+ (SentiCR + converted SentiSW, No Chinese Characters) predictions on GoldTest dataset.png'
title = 'SentiCR+ (SentiCR + converted SentiSW, No Chinese Characters) predictions on GoldTest'
#converted_predictions = [convert_to_binary_prediction(sentiCR_predictions[i][0]) for i in range(len(sentiCR_predictions))]
#y_true = ['Non-Negative' if x != 'Negative' else 'Negative' for x in sentiSW_df['Annotation']]
#y_true = ['Non-Negative' if x != 'Negative' else 'Negative' for x in gold_df['Final Label (majority voting)']]
ROC_curve(y_true, converted_predictions, title, file_name)
# -
# # Metrics Report
# +
load = True
#test = sentiSW_df['text'].tolist()
test = gold_df['Text'].tolist()
# ORIGINAL SENTICR
#name = 'pickled-predictions/sentiCR_label_predictions_on_sentiSW_dataset.pkl'
# SENTICR+
name = 'pickled-predictions/SentiCR+ (SentiCR + converted SentiSW, No Chinese Characters) Labels Predictions on GoldTest Dataset.pkl'
if(load):
with open(name, 'rb') as f:
sentiCR_predictions = pickle.load(f)
else:
sentiCR_predictions = sentiCR_predict_labels(test, name)
sentiCR_predictions
# -
y_pred = ['Non-Negative' if i[0] == 0 else 'Negative' for i in sentiCR_predictions]
y_pred
#y_true = ['Non-Negative' if x != 'Negative' else 'Negative' for x in sentiSW_df['Annotation']]
y_true = ['Non-Negative' if x != 'Negative' else 'Negative' for x in gold_df['Final Label (majority voting)']]
y_true
# +
from sklearn.metrics import precision_recall_fscore_support
precision_recall_fscore_support(y_true, y_pred, average='macro')
# -
precision_recall_fscore_support(y_true, y_pred, average='micro')
precision_recall_fscore_support(y_true, y_pred, average='weighted')
e = pd.DataFrame()
a = pd.DataFrame([int(i[0]) for i in sentiCR_predictions] , columns=['SentiCR classification'])
a
sentiSW_df
#t['SentiSW Manual'] = sentiSW_df.filter(['Annotation'], axis=1)
t['SentiSW Manual'] = sentiSW_df['Annotation'].map({'Negative': -1, 'Neutral': 0, 'Positive' : 1})
u = pd.DataFrame()
u['Text'] = sentiSW_df['text']
#t.drop(columns=['Text'], inplace=True)
t
import openpyxl
e = pd.concat([t, a, u], axis=1)
e
e.to_excel ('sentiCR_sentiSW_NO_CHINESE.xlsx', index = False, header=True, engine='xlsxwriter')
# # Create Contradictions sheet
# +
load = True
#gold_df = gold_df.reset_index()
test = gold_df['Text'].tolist()
name = 'pickled-predictions/SentiCR+ (SentiCR + converted SentiSW, No Chinese Characters) Labels Predictions on GoldTest Dataset.pkl'
if(load):
with open(name, 'rb') as f:
sentiCR_predictions = pickle.load(f)
else:
sentiCR_predictions = sentiCR_predict_labels(test, name)
y_pred = ['Negative' if sentiCR_predictions[i][0] == -1 else 'Non-Negative' for i in range(len(sentiCR_predictions))]
y_pred = pd.Series(y_pred, name='SentiCR classification')
y_pred
aux = []
for i in range(len(gold_df)):
if( (gold_df['Final Label (majority voting)'][i] == 'Negative' and y_pred[i] == 'Non-Negative') or (gold_df['Final Label (majority voting)'][i] != 'Negative' and y_pred[i] == 'Negative') ):
aux.append([gold_df['Final Label (majority voting)'][i], y_pred[i], gold_df['Text'][i]])
# if(gold_df['Final Label (majority voting)'][i] != y_pred[i]):
# aux.append([gold_df['Final Label (majority voting)'][i], y_pred[i], gold_df['Text'][i]])
diff = pd.DataFrame(aux, columns=['GoldTest Manual', 'SentiCR classification', 'Text'])
diff
# -
import openpyxl
diff.to_excel ('SentiCR+ (SentiCR + converted SentiSW, No Chinese Characters) Contradictions.xlsx', index = False, header=True, engine='xlsxwriter')
for i in range(len(gold_df)):
print("{} {}".format(gold_df['Final Label (majority voting)'][i], y_pred[i]))
# if( (gold_df['Final Label (majority voting)'][i] != 'Negative' and y_pred[i] == 'Negative') ):
# print('oi')
gold_df = gold_df.reset_index()
gold = [-1 if i == 'Negative' else 0 for i in gold_df['Final Label (majority voting)']]
gold
# +
#gold_df = gold_df.reset_index()
gold = [-1 if i == 'Negative' else 0 for i in gold_df['Final Label (majority voting)']]
aux = []
for i in range(len(gold_df)):
if sentiCR_predictions[i][0] != gold[i]:
aux.append([gold[i], int(sentiCR_predictions[i][0]), gold_df['Text'][i]])
diff = pd.DataFrame(aux, columns=['GoldTest Manual', 'SentiCR classification', 'Text'])
#diff
import openpyxl
diff.to_excel ('sentiCR_GoldTest_contradictions.xlsx', index = False, header=True, engine='xlsxwriter')
# -
len(sentiSW_df)
sentiCR_predictions
# +
sentiSW_df = sentiSW_df.reset_index()
df = []
for i in sentiSW_df['Annotation']:
if i == 'Neutral':
df.append(0)
elif i == 'Negative':
df.append(-1)
else:
df.append(1)
aux = []
for i in range(len(sentiSW_df)):
if sentiCR_predictions[i][0] != df[i]:
aux.append([df[i], int(sentiCR_predictions[i][0]), sentiSW_df['text'][i]])
diff = pd.DataFrame(aux, columns=['SentiSW Manual', 'SentiCR classification', 'Text'])
#diff
import openpyxl
diff.to_excel ('sentiCR_sentiSW_contradictions_loll.xlsx', index = False, header=True, engine='xlsxwriter')
# -
# # Create SentiCR+ (SentiCR + SentiSW converted) train set
# +
#gold_df = gold_df.reset_index()
sentiSW = [-1 if i == 'Negative' else 0 for i in sentiSW_df['Annotation']]
binary_sentiSW_df = sentiSW_df.filter(['text'], axis=1)
binary_sentiSW_df['Annotation'] = pd.Series(sentiSW, index=binary_sentiSW_df.index)
sentiCR_plus = pd.concat([sentiCR_df, binary_sentiSW_df], ignore_index=True)
sentiCR_plus
# +
import openpyxl
sentiCR_plus.to_excel ('sentiCR_plus.xlsx', index = False, engine='xlsxwriter')
# -
test = pd.read_excel(SentiCR_plus_trainning_file_path)
test
# # Create SentiCR+ (SentiCR + SentiCR preds)
# +
preds = [int(sentiCR_predictions[i][0]) for i in range(len(sentiCR_predictions))]
preds = pd.Series(preds, name='Annotation')
preds
text = sentiSW_df.filter(['text'], axis=1)
text
classification = pd.concat([text, preds], ignore_index=False, axis=1)
df = pd.concat([sentiCR_df, classification], ignore_index=True, axis=0)
df
# +
import openpyxl
df.to_excel ('SentiCR+ (SentiCR + predicted SentiSW).xlsx', index = False, ignore_headers=True, engine='xlsxwriter')
# -
| .ipynb_checkpoints/SentiCR_classification-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# check a given year is leap year or not, using nested if-else
year = int(input("Enter a year : "))
if (year % 4) == 0:
if (year % 100) == 0:
if (year % 400) == 0:
print("{0} is a leap year".format(year))
else:
print("{0} is not a leap year".format(year))
else:
print("{0} is not a leap year".format(year))
else:
print("{0} is not a leap year".format(year))
#----------THIS IS WRONG CONCEPT-------------------------------------
# +
import calendar
def is_leap(year):
return calendar.isleap(year)
is_leap(2000)
# -
is_leap(1900)
is_leap(2020)
# +
# ------------------------RIGHT CONCEPT--------------------------------------------------
def is_leap(year):
return year % 4 ==0 and (year % 100 != 0 or year % 400 == 0)
year = int(input())
print(is_leap(year))
# +
def is_leap(year):
return year % 4 ==0 and (year % 100 != 0 or year % 400 == 0)
print(is_leap(year = int(input())))
# -
n = int(input())
for i in range(1,n+1): # Starts from 1 and consider n also
print(i,end='') # here the end = '' will take next iteration as the argument and print it in the same line.
print(*range(1,int(input())+1), sep='') # Here * is used to unpack the iterator
# Don't use end here, it will give space.
# +
# matrix_script
# help from youtube
n,m = map(int,input().split()) # take no. of rows and columns and asks for the elements in the row-wise.
l = list()
for i in range(n):
l.append(input())
print(l)
l=list(zip(*l)) # It join all 3 lists into one and in the column-wise, bcoz of '*'
print(l)
# s = '' # create an empty string
# for i in l:
# s=s+''.join(i) # make the list into string.
# print(s)
# -
s = ''
for i in l:
s=s+''.join(i)
print(s)
import re
s=re.sub(r'\b[^a-zA-Z0-9]+\b',r' ',s) #\start and end boundary, don't take presence of alpha-numerics and other than this take it and replace with a space.
print(s)
| 5Oct_HackerRank.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.2
# language: julia
# name: julia-1.6
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Plotting
# + [markdown] slideshow={"slide_type": "fragment"}
# ## Topics
# - Packages
# - Plotting & visualization
# + [markdown] slideshow={"slide_type": "slide"}
# ## Packages
# Julia has over 4000 registered packages, making packages a huge part of the Julia ecosystem.
#
# Even so, the package ecosystem still has some growing to do. Notably, we have first class function calls to other languages, providing excellent foreign function interfaces. We can easily call into python or R, for example, with `PyCall` or `Rcall`.
#
# This means that you don't have to wait until the Julia ecosystem is fully mature, and that moving to Julia doesn't mean you have to give up your favorite package/library from another language!
#
# To see all available packages, check out
#
# https://github.com/JuliaRegistries/General or https://juliaobserver.com/
# + [markdown] slideshow={"slide_type": "slide"}
# ## Installing packages
# For now, let's learn how to use a package.
#
# Before anything, we have to activate the package manager by running `using Pkg`.
#
# The first time you use a package on a given Julia installation, you need to explicitly add it
# + slideshow={"slide_type": "fragment"}
using Pkg
Pkg.add("Example")
# + [markdown] slideshow={"slide_type": "slide"}
# ## Using packages
# Every time you use Julia (start a new session at the REPL, or open a notebook for the first time, for example), you load the package with the `using` keyword
# + slideshow={"slide_type": "fragment"}
using Example
# + [markdown] slideshow={"slide_type": "fragment"}
# In the source code of `Example.jl` at https://github.com/JuliaLang/Example.jl/blob/master/src/Example.jl we see the following function declared
#
# ```julia
# hello(who::String) = "Hello, $who"
# ```
# Having loaded `Example`, we should now be able to call `hello`
# + slideshow={"slide_type": "fragment"}
hello("Julia!")
# + [markdown] slideshow={"slide_type": "slide"}
# ## Demonstration with `Colors`
# Let's play with the `Colors` package to see this in action.
#
# NOTE: Notebooks are nicely integrated to show us the colors in realtime!
# + slideshow={"slide_type": "fragment"}
Pkg.add("Colors")
# + slideshow={"slide_type": "fragment"}
using Colors
# + slideshow={"slide_type": "fragment"}
hue = 180 #[0,360] hue of the color, try changing me!
N = 10 #number of colors to create, try changing me too!
palette = sequential_palette(hue, N)
# + slideshow={"slide_type": "skip"}
palette = distinguishable_colors(64) #Maximally distinguishable colors; try me out too!
# + slideshow={"slide_type": "skip"}
hueLeft = 12
hueRight = 225
palette = diverging_palette(hueLeft, hueRight, 20) # Woah, hold on! What am I?
# + slideshow={"slide_type": "slide"}
rand(palette, 8, 8) #create 8x8 random number matrix of different values in `palette`.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Interoperability
# There are many existing codes and libraries, why should we re-use them? One of the strenghts of Julia is it's interoperability.
#
# C and Fortran, for example are natively supported. But many other languages are just one package away.
#
# Take python for example:
# + slideshow={"slide_type": "fragment"}
Pkg.add("PyCall")
using PyCall
@pyimport scipy.optimize as so
so.newton(x -> cos(x) - x, 1)
# -
# Or calling a function from the C standard library:
t = ccall(:clock, Int32, ())
# Or specifying the library (necessary if it's not standard)
ccall((:printf, "libc.so.6"), Float64, (Float64,), π)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Different libraries for different languages
# - Python with [PyCall](https://github.com/JuliaPy/PyCall.jl)
# - R with [RCall](https://github.com/JuliaInterop/RCall.jl)
# - C++ with [CxxWrap](https://github.com/JuliaInterop/CxxWrap.jl)
# - Matlab with [MATLAB](https://github.com/JuliaInterop/MATLAB.jl)
# - Java with [JavaCall](https://github.com/JuliaInterop/JavaCall.jl)
# - Mathematica with [Mathematica](https://github.com/JuliaInterop/Mathematica.jl)
# - ObjectiveC with [ObjectiveC](https://github.com/JuliaInterop/ObjectiveC.jl)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Plotting
#
# There are few different ways to plot in Julia.
#
# Here we'll use the `Plots` [package](http://docs.juliaplots.org/latest/) that tries to unify many of the underlying visualization engines to have a similar interface.
# + slideshow={"slide_type": "fragment"}
using Pkg
Pkg.add("Plots")
using Plots
# + [markdown] slideshow={"slide_type": "slide"}
# One of the advantages to `Plots.jl` is that it allows you to seamlessly change backends. For example, if you are familiar with the matplotlib library from Python, then you can keep using that via the `PyPlot` [package](https://github.com/JuliaPy/PyPlot.jl).
#
# Engines/backends include:
# - [Plotly](https://plot.ly/julia/): Plotly.jl package (`plotly()` to activate)
# - default for Plots if nothing else is installed
# - interactive plots in browser
# - `PlotlyJS` (activate with `plotlyjs()`) is the local offline version
# - [PyPlot](https://github.com/JuliaPy/PyPlot.jl): Matplotlib interface (`pyplot()` to activate)
# - All the bells and whistles of Matplotlib
# - [GR](https://github.com/jheinen/GR.jl): GR framework interface (`gr()` to activate)
# - [UnicodePlots](https://github.com/Evizero/UnicodePlots.jl): (`unicodeplots()` to activate)
# - Plots inside terminal!
#
# Also:
# - [Gadfly](http://gadflyjl.org/stable/): Wickham-Wilkinson style grammar
# For R-styled plotting.
#
# See the [docs](http://docs.juliaplots.org/v1/backends/) for more info about different backends, and their pros & cons.
# + slideshow={"slide_type": "skip"}
#Pkg.add("PlotlyJS") #let's install our own local plotly
Pkg.add("PyPlot") #let's install our own local plotly
# + slideshow={"slide_type": "skip"}
#plotly() # Plotly.jl (needs Plotly package)
#plotlyjs() # local javascript server for plotly (needs PlotlyJS)
#gr() # GR backend, (needs GR package)
pyplot() # Matplotlib interface (needs PyPlot package and up-to-date system matplotlib)
#unicodeplots() # UnicodePlots backend (needs UnicodePlots package); NOTE: does not work in notebooks
# + [markdown] slideshow={"slide_type": "slide"}
# ## Simulation
# Let's make a nice plot of the state of our simulation.
# + slideshow={"slide_type": "fragment"}
# Since our simulation only has 4 states, we'll make a palette of 4 colors.
palette = distinguishable_colors(4)
# -
# Interesting choice. First we will need to assign colors to each cell in the simulation.
# + slideshow={"slide_type": "slide"}
# The following line loads the epidemic functions from a file
include("../epidemic_no_immunity.jl")
# Let's make some cells
cells = make_cells(32,32)
# +
"Map the cells to colors for plotting"
function to_colors(cell)
if cell.status == uninfected
return palette[1]
end
if cell.status == infected
return palette[2]
end
if cell.status == recovered
return palette[3]
end
if cell.status == immune
return palette[4]
end
end
to_colors.(cells)
# +
# We can pass this to plot() to get a nicer image
plot(to_colors.(cells),legend=false, border=:none)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Random Walk
# Let's take our visualization to the next dimension. Here is a simple plot to visualize the random walk in 3D.
# + slideshow={"slide_type": "skip"}
#activate Pyplot backend (if you have not done it previously)
using Plots
pyplot()
# + slideshow={"slide_type": "slide"}
n = 1000
rw() = cumsum(randn(n)) #function for cumulative random values
# x/y/z coordinates
x = rw()
y = rw()
z = rw()
plot(x,y,z)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Animations
# Finally, let's add some time aspect to our visualizations. `Plots` has a simple `@gif` macro for this.
#
# It builds an animation using one frame per iteration loop, then creates an animated GIF from that.
# ```julia
# p = plot(1)
# @gif for x=0:0.1:5
# push!(p, 1, sin(x))
# end
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# Let's write up a simple Lorenz attractor. See https://en.wikipedia.org/wiki/Lorenz_system
# + slideshow={"slide_type": "fragment"}
n = 2000
dt = 0.02
σ, ρ, β = 9.9, 28., 8/3
global x, y, z = 1., 1., 1.
# Lorentz attractor
function lorentzAttractor(x,y,z)
x += dt * σ*(y - x)
y += dt * (x*(ρ - z) - y)
z += dt * (x*y - β*z )
return x, y, z
end
# + slideshow={"slide_type": "skip"}
#activate PyPlot backend (if you have not done it previously)
using Plots
# + slideshow={"slide_type": "slide"}
# initialize a 3D plot with 1 empty series
plt = path3d(1, xlim=(-25,25), ylim=(-25,25), zlim=(0,50),
xlab = "x", ylab = "y", zlab = "z",
title = "Lorenz Attractor", marker = 1)
# build an animated gif, saving every 10th frame
@gif for i=1:n
global x, y, z = lorentzAttractor(x,y,z)
push!(plt, x, y, z)
end every 100
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ## More examples
# For more examples, see [Plots](http://docs.juliaplots.org/latest) documentation.
# - [PyPlot](http://docs.juliaplots.org/latest/examples/pyplot/)
# - [GR](http://docs.juliaplots.org/latest/examples/gr/)
# - [Plotly](http://docs.juliaplots.org/latest/examples/plotlyjs/)
# - etc.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Summary
#
# Julia provides a powerful interoparability API for usage of various other languages.
#
# These rely on external libraries such as:
# - Python with [PyCall](https://github.com/JuliaPy/PyCall.jl)
# - R with [RCall](https://github.com/JuliaInterop/RCall.jl)
#
# Same functionality also allows to use many mature visualization libraries via Julia.
| notebooks/06_plotting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="LqiaKasFjH82" colab_type="text"
# # Custom derivative rules for JAX-transformable Python functions
#
# *mattjj@ Mar 19 2020, last updated Mar 30 2020*
#
# There are two ways to define differentiation rules in JAX:
#
# 1. using `jax.custom_jvp` and `jax.custom_vjp` to define custom differentiation rules for Python functions that are already JAX-transformable; and
# 2. defining new `core.Primitive` instances along with all their transformation rules, for example to call into functions from other systems like solvers, simulators, or general numerical computing systems.
#
# This notebook is about #1. To read instead about #2, see the [notebook on adding primitives](https://jax.readthedocs.io/en/latest/notebooks/How_JAX_primitives_work.html).
#
# For an introduction to JAX's automatic differentiation API, see [The Autodiff Cookbook](https://jax.readthedocs.io/en/latest/notebooks/autodiff_cookbook.html). This notebook assumes some familiarity with [jax.jvp](https://jax.readthedocs.io/en/latest/jax.html#jax.jvp) and [jax.grad](https://jax.readthedocs.io/en/latest/jax.html#jax.grad), and the mathematical meaning of JVPs and VJPs.
# + [markdown] id="9Fg3NFNY-2RY" colab_type="text"
# ## TL;DR
# + [markdown] id="ZgMNRtXyWIW8" colab_type="text"
# ### Custom JVPs with `jax.custom_jvp`
# + id="zXic8tr--1PK" colab_type="code" colab={}
import jax.numpy as jnp
from jax import custom_jvp
@custom_jvp
def f(x, y):
return jnp.sin(x) * y
@f.defjvp
def f_jvp(primals, tangents):
x, y = primals
x_dot, y_dot = tangents
primal_out = f(x, y)
tangent_out = jnp.cos(x) * x_dot * y + jnp.sin(x) * y_dot
return primal_out, tangent_out
# + id="RrNf588X_kJF" colab_type="code" outputId="33bc1c36-b720-442f-b385-8ebe025319e3" colab={"base_uri": "https://localhost:8080/", "height": 85}
from jax import jvp, grad
print(f(2., 3.))
y, y_dot = jvp(f, (2., 3.), (1., 0.))
print(y)
print(y_dot)
print(grad(f)(2., 3.))
# + id="1kHd3cKOWQgB" colab_type="code" colab={}
# Equivalent alternative using the defjvps convenience wrapper
@custom_jvp
def f(x, y):
return jnp.sin(x) * y
f.defjvps(lambda x_dot, primal_out, x, y: jnp.cos(x) * x_dot * y,
lambda y_dot, primal_out, x, y: jnp.sin(x) * y_dot)
# + id="Zn81cHeYWVOw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="fb5e7e94-488f-4c30-a851-5e7c0a8173bf"
print(f(2., 3.))
y, y_dot = jvp(f, (2., 3.), (1., 0.))
print(y)
print(y_dot)
print(grad(f)(2., 3.))
# + [markdown] id="N2DOGCREWXFj" colab_type="text"
# ### Custom VJPs with `jax.custom_vjp`
# + id="35ScHqhrBwPh" colab_type="code" colab={}
from jax import custom_vjp
@custom_vjp
def f(x, y):
return jnp.sin(x) * y
def f_fwd(x, y):
# Returns primal output and residuals to be used in backward pass by f_bwd.
return f(x, y), (jnp.cos(x), jnp.sin(x), y)
def f_bwd(res, g):
cos_x, sin_x, y = res # Gets residuals computed in f_fwd
return (cos_x * g * y, sin_x * g)
f.defvjp(f_fwd, f_bwd)
# + id="HpSozxKUCXgp" colab_type="code" outputId="e515914b-a65b-49ba-a746-c343308451e7" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(grad(f)(2., 3.))
# + [markdown] id="p5ypWA7XlZpu" colab_type="text"
# ## Example problems
#
# To get an idea of what problems `jax.custom_jvp` and `jax.custom_vjp` are meant to solve, let's go over a few examples. A more thorough introduction to the `jax.custom_jvp` and `jax.custom_vjp` APIs is in [the next section](#scrollTo=Dr0aNkBslfQf).
#
#
# + [markdown] id="AR02eyd1GQhC" colab_type="text"
# ### Numerical stability
#
# One application of `jax.custom_jvp` is to improve the numerical stability of differentiation.
# + [markdown] id="GksPXslaGPaW" colab_type="text"
#
# Say we want to write a function called `log1pexp`, which computes $x \mapsto \log ( 1 + e^x )$. We can write that using `jax.numpy`:
# + id="6lWbTvs40ET-" colab_type="code" outputId="739b4df9-7c64-4fb4-c0ce-c04fa7d86db4" colab={"base_uri": "https://localhost:8080/", "height": 34}
import jax.numpy as jnp
def log1pexp(x):
return jnp.log(1. + jnp.exp(x))
log1pexp(3.)
# + [markdown] id="PL36r_cD0oE8" colab_type="text"
# Since it's written in terms of `jax.numpy`, it's JAX-transformable:
# + id="XgtGKFld02UD" colab_type="code" outputId="a2722ad0-00d6-48d9-f5fa-cc87779bbe53" colab={"base_uri": "https://localhost:8080/", "height": 68}
from jax import jit, grad, vmap
print(jit(log1pexp)(3.))
print(jit(grad(log1pexp))(3.))
print(vmap(jit(grad(log1pexp)))(jnp.arange(3.)))
# + [markdown] id="o56Nr3V61PKS" colab_type="text"
# But there's a numerical stability problem lurking here:
# + id="sVM6iwIO22sB" colab_type="code" outputId="1a1624bc-15fa-43c0-a9d0-ac17c80e8097" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(grad(log1pexp)(100.))
# + [markdown] id="Zu9sR2I73wuO" colab_type="text"
# That doesn't seem right! After all, the derivative of $x \mapsto \log (1 + e^x)$ is $x \mapsto \frac{e^x}{1 + e^x}$, and so for large values of $x$ we'd expect the value to be about 1.
#
# We can get a bit more insight into what's going on by looking at the jaxpr for the gradient computation:
# + id="dO6uZlYR4TVp" colab_type="code" outputId="36835248-669f-4025-8931-783072bed9f9" colab={"base_uri": "https://localhost:8080/", "height": 119}
from jax import make_jaxpr
make_jaxpr(grad(log1pexp))(100.)
# + [markdown] id="52HR5EW26PEt" colab_type="text"
# Stepping through how the jaxpr would be evaluated, we can see that the last line would involve multiplying values that floating point math will round to 0 and $\infty$, respectively, which is never a good idea. That is, we're effectively evaluating `lambda x: (1 / (1 + jnp.exp(x))) * jnp.exp(x)` for large `x`, which effectively turns into `0. * jnp.inf`.
#
# Instead of generating such large and small values, hoping for a cancellation that floats can't always provide, we'd rather just express the derivative function as a more numerically stable program. In particular, we can write a program that more closely evaluates the equal mathematical expression $1 - \frac{1}{1 + e^x}$, with no cancellation in sight.
#
# This problem is interesting because even though our definition of `log1pexp` could already be JAX-differentiated (and transformed with `jit`, `vmap`, ...), we're not happy with the result of applying standard autodiff rules to the primitives comprising `log1pexp` and composing the result. Instead, we'd like to specify how the whole function `log1pexp` should be differentiated, as a unit, and thus arrange those exponentials better.
#
# This is one application of custom derivative rules for Python functions that are already JAX transformable: specifying how a composite function should be differentiated, while still using its original Python definition for other transformations (like `jit`, `vmap`, ...).
#
# Here's a solution using `jax.custom_jvp`:
# + id="XQt6MAuTJewG" colab_type="code" colab={}
from jax import custom_jvp
@custom_jvp
def log1pexp(x):
return jnp.log(1. + jnp.exp(x))
@log1pexp.defjvp
def log1pexp_jvp(primals, tangents):
x, = primals
x_dot, = tangents
ans = log1pexp(x)
ans_dot = (1 - 1/(1 + jnp.exp(x))) * x_dot
return ans, ans_dot
# + id="rhiMHulfKBIF" colab_type="code" outputId="45256c6b-19fd-4bac-baf8-4892298048bd" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(grad(log1pexp)(100.))
# + id="9cLDuAo6KGUu" colab_type="code" outputId="c8194a60-7e48-4fb2-d78d-569b38841b42" colab={"base_uri": "https://localhost:8080/", "height": 68}
print(jit(log1pexp)(3.))
print(jit(grad(log1pexp))(3.))
print(vmap(jit(grad(log1pexp)))(jnp.arange(3.)))
# + [markdown] id="9sVUGbGkUOqO" colab_type="text"
# Here's a `defjvps` convenience wrapper to express the same thing:
# + id="xfQTp8F7USEM" colab_type="code" colab={}
@custom_jvp
def log1pexp(x):
return jnp.log(1. + jnp.exp(x))
log1pexp.defjvps(lambda t, ans, x: (1 - 1/(1 + jnp.exp(x))) * t)
# + id="dtdh-PLaUsvw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="588a0619-0b5c-42ed-b2cf-955255b0f7a7"
print(grad(log1pexp)(100.))
print(jit(log1pexp)(3.))
print(jit(grad(log1pexp))(3.))
print(vmap(jit(grad(log1pexp)))(jnp.arange(3.)))
# + [markdown] id="V9tHAfrSF1N-" colab_type="text"
# ### Enforcing a differentiation convention
#
# A related application is to enforce a differentiation convention, perhaps at a boundary.
# + [markdown] id="l_6tdb-QGK-H" colab_type="text"
#
# Consider the function $f : \mathbb{R}_+ \mapsto \mathbb{R}_+$ with $f(x) = \frac{x}{1 + \sqrt{x}}$, where we take $\mathbb{R}_+ = [0, \infty)$. We might implement $f$ as a program like this:
# + id="AfF5P7x_GaSe" colab_type="code" colab={}
def f(x):
return x / (1 + jnp.sqrt(x))
# + [markdown] id="BVcEkF3ZGgv1" colab_type="text"
# As a mathematical function on $\mathbb{R}$ (the full real line), $f$ is not differentiable at zero (because the limit defining the derivative doesn't exist from the left). Correspondingly, autodiff produces a `nan` value:
# + id="piI0u5MiHhQh" colab_type="code" outputId="35bdee7d-41d7-44ed-b326-43d494cf71fa" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(grad(f)(0.))
# + [markdown] id="IP0H2b7ZHkzD" colab_type="text"
# But mathematically if we think of $f$ as a function on $\mathbb{R}_+$ then it is differentiable at 0 [Rudin's Principles of Mathematical Analysis Definition 5.1, or Tao's Analysis I 3rd ed. Definition 10.1.1 and Example 10.1.6]. Alternatively, we might say as a convention we want to consider the directional derivative from the right. So there is a sensible value for the Python function `grad(f)` to return at `0.0`, namely `1.0`. By default, JAX's machinery for differentiation assumes all functions are defined over $\mathbb{R}$ and thus doesn't produce `1.0` here.
#
# We can use a custom JVP rule! In particular, we can define the JVP rule in terms of the derivative function $x \mapsto \frac{\sqrt{x} + 2}{2(\sqrt{x} + 1)^2}$ on $\mathbb{R}_+$,
# + id="ksHmCkcSKQJr" colab_type="code" colab={}
@custom_jvp
def f(x):
return x / (1 + jnp.sqrt(x))
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
ans = f(x)
ans_dot = ((jnp.sqrt(x) + 2) / (2 * (jnp.sqrt(x) + 1)**2)) * x_dot
return ans, ans_dot
# + id="Gsh9ZvMTKi1O" colab_type="code" outputId="fb288478-eb53-4362-aa46-24ea18003bec" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(grad(f)(0.))
# + [markdown] id="Usbp_gxaVVea" colab_type="text"
# Here's the convenience wrapper version:
# + id="qXnrxIfaVYCs" colab_type="code" colab={}
@custom_jvp
def f(x):
return x / (1 + jnp.sqrt(x))
f.defjvps(lambda t, ans, x: ((jnp.sqrt(x) + 2) / (2 * (jnp.sqrt(x) + 1)**2)) * t)
# + id="uUU5qRmEViK1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="109052a3-2176-4d7f-b63a-09d86898ce8b"
print(grad(f)(0.))
# + [markdown] id="7J2A85wbSAmF" colab_type="text"
# ### Gradient clipping
#
# While in some cases we want to express a mathematical differentiation computation, in other cases we may even want to take a step away from mathematics to adjust the computation autodiff performs. One canonical example is reverse-mode gradient clipping.
#
# For gradient clipping, we can use `jnp.clip` together with a `jax.custom_vjp` reverse-mode-only rule:
# + id="8jfjSanIW_tJ" colab_type="code" colab={}
from functools import partial
from jax import custom_vjp
@partial(custom_vjp, nondiff_argnums=(0, 1))
def clip_gradient(lo, hi, x):
return x # identity function
def clip_gradient_fwd(lo, hi, x):
return x, None # no residual values to save
def clip_gradient_bwd(lo, hi, _, g):
return (jnp.clip(g, lo, hi),)
clip_gradient.defvjp(clip_gradient_fwd, clip_gradient_bwd)
# + id="4OLU_vf8Xw2J" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="36b37747-6330-4990-a056-04780c218d79"
import matplotlib.pyplot as plt
from jax import vmap
t = jnp.linspace(0, 10, 1000)
plt.plot(jnp.sin(t))
plt.plot(vmap(grad(jnp.sin))(t))
# + id="iS8nRuBZYLcD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="a22b17bc-df65-4531-9427-40d9ce834d38"
def clip_sin(x):
x = clip_gradient(-0.75, 0.75, x)
return jnp.sin(x)
plt.plot(clip_sin(t))
plt.plot(vmap(grad(clip_sin))(t))
# + [markdown] colab_type="text" id="CICQuI86WK4_"
# ### Python debugging
#
# Another application that is motivated by development workflow rather than numerics is to set a `pdb` debugger trace in the backward pass of reverse-mode autodiff.
#
# + [markdown] id="cgxMjNTrGjJn" colab_type="text"
#
# When trying to track down the source of a `nan` runtime error, or just examine carefully the cotangent (gradient) values being propagated, it can be useful to insert a debugger at a point in the backward pass that corresponds to a specific point in the primal computation. You can do that with `jax.custom_vjp`.
#
# We'll defer an example until the next section.
# + [markdown] id="IC7tEcr1-Fc5" colab_type="text"
# ### Implicit function differentiation of iterative implementations
#
# This example gets pretty deep in the mathematical weeds!
# + [markdown] id="szAt97t80hew" colab_type="text"
# Another application for `jax.custom_vjp` is reverse-mode differentiation of functions that are JAX-transformable (by `jit`, `vmap`, ...) but not efficiently JAX-differentiable for some reason, perhaps because they involve `lax.while_loop`. (It's not possible to produce an XLA HLO program that efficiently computes the reverse-mode derivative of an XLA HLO While loop because that would require a program with unbounded memory use, which isn't possible to express in XLA HLO, at least without side-effecting interactions through infeed/outfeed.)
#
# For example, consider this `fixed_point` routine which computes a fixed point by iteratively applying a function in a `while_loop`:
# + id="2uA8X2izXH2b" colab_type="code" colab={}
from jax.lax import while_loop
def fixed_point(f, a, x_guess):
def cond_fun(carry):
x_prev, x = carry
return jnp.abs(x_prev - x) > 1e-6
def body_fun(carry):
_, x = carry
return x, f(a, x)
_, x_star = while_loop(cond_fun, body_fun, (x_guess, f(a, x_guess)))
return x_star
# + [markdown] id="p2xFQAte19sF" colab_type="text"
# This is an iterative procedure for numerically solving the equation $x = f(a, x)$ for $x$, by iterating $x_{t+1} = f(a, x_t)$ until $x_{t+1}$ is sufficiently close to $x_t$. The result $x^*$ depends on the parameters $a$, and so we can think of there being a function $a \mapsto x^*(a)$ that is implicity defined by equation $x = f(a, x)$.
#
# We can use `fixed_point` to run iterative procedures to convergence, for example running Newton's method to calculate square roots while only executing adds, multiplies, and divides:
# + id="rDDwM8bYYzRT" colab_type="code" colab={}
def newton_sqrt(a):
update = lambda a, x: 0.5 * (x + a / x)
return fixed_point(update, a, a)
# + id="42Ydd7_6aLXU" colab_type="code" outputId="a8b367eb-0701-4a22-b4a5-7074abd375a0" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(newton_sqrt(2.))
# + [markdown] id="-yFtYWH13QWm" colab_type="text"
# We can `vmap` or `jit` the function as well:
# + id="t_YSXieT3Yyk" colab_type="code" outputId="796b9153-b5d7-4503-d496-480d9773cad7" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(jit(vmap(newton_sqrt))(jnp.array([1., 2., 3., 4.])))
# + [markdown] id="emwWIt3d3h1T" colab_type="text"
# We can't apply reverse-mode automatic differentiation because of the `while_loop`, but it turns out we wouldn't want to anyway: instead of differentiating through the implementation of `fixed_point` and all its iterations, we can exploit the mathematical structure to do something that is much more memory-efficient (and FLOP-efficient in this case, too!). We can instead use the implicit function theorem [Prop A.25 of Bertsekas's Nonlinear Programming, 2nd ed.], which guarantees (under some conditions) the existence of the mathematical objects we're about to use. In essence, we linearize at the solution and solve those linear equations iteratively to compute the derivatives we want.
#
# Consider again the equation $x = f(a, x)$ and the function $x^*$. We want to evaluate vector-Jacobian products like $v^\mathsf{T} \mapsto v^\mathsf{T} \partial x^*(a_0)$.
#
# At least in an open neighborhood around the point $a_0$ at which we want to differentiate, let's assume that the equation $x^*(a) = f(a, x^*(a))$ holds for all $a$. Since the two sides are equal as functions of $a$, their derivatives must be equal as well, so let's differentiate both sides:
#
# $\qquad \partial x^*(a) = \partial_0 f(a, x^*(a)) + \partial_1 f(a, x^*(a)) \partial x^*(a)$.
#
# Setting $A = \partial_1 f(a_0, x^*(a_0))$ and $B = \partial_0 f(a_0, x^*(a_0))$, we can write the quantity we're after more simply as
#
# $\qquad \partial x^*(a_0) = B + A \partial x^*(a_0)$,
#
# or, by rearranging,
#
# $\qquad \partial x^*(a_0) = (I - A)^{-1} B$.
#
# That means we can evaluate vector-Jacobian products like
#
# $\qquad v^\mathsf{T} \partial x^*(a_0) = v^\mathsf{T} (I - A)^{-1} B = w^\mathsf{T} B$,
#
# where $w^\mathsf{T} = v^\mathsf{T} (I - A)^{-1}$, or equivalently $w^\mathsf{T} = v^\mathsf{T} + w^\mathsf{T} A$, or equivalently $w^\mathsf{T}$ is the fixed point of the map $u^\mathsf{T} \mapsto v^\mathsf{T} + u^\mathsf{T} A$. That last characterization gives us a way to write the VJP for `fixed_point` in terms of a call to `fixed_point`! Moreover, after expanding $A$ and $B$ back out, we can see we need only to evaluate VJPs of $f$ at $(a_0, x^*(a_0))$.
#
# Here's the upshot:
# + id="g4jo-xlvdiym" colab_type="code" colab={}
from jax import vjp
@partial(custom_vjp, nondiff_argnums=(0,))
def fixed_point(f, a, x_guess):
def cond_fun(carry):
x_prev, x = carry
return jnp.abs(x_prev - x) > 1e-6
def body_fun(carry):
_, x = carry
return x, f(a, x)
_, x_star = while_loop(cond_fun, body_fun, (x_guess, f(a, x_guess)))
return x_star
def fixed_point_fwd(f, a, x_init):
x_star = fixed_point(f, a, x_init)
return x_star, (a, x_star)
def fixed_point_rev(f, res, x_star_bar):
a, x_star = res
_, vjp_a = vjp(lambda a: f(a, x_star), a)
a_bar, = vjp_a(fixed_point(partial(rev_iter, f),
(a, x_star, x_star_bar),
x_star_bar))
return a_bar, jnp.zeros_like(x_star)
def rev_iter(f, packed, u):
a, x_star, x_star_bar = packed
_, vjp_x = vjp(lambda x: f(a, x), x_star)
return x_star_bar + vjp_x(u)[0]
fixed_point.defvjp(fixed_point_fwd, fixed_point_rev)
# + id="iKzfT6d_mEoB" colab_type="code" outputId="31b93e63-6487-484b-e9a7-4b75c216fc01" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(newton_sqrt(2.))
# + id="Hmcpjr6gmtkO" colab_type="code" outputId="7f7c5025-11bb-48f1-e1df-9b30e05b6a19" colab={"base_uri": "https://localhost:8080/", "height": 51}
print(grad(newton_sqrt)(2.))
print(grad(grad(newton_sqrt))(2.))
# + [markdown] id="DvVmlaPD7W-4" colab_type="text"
# We can check our answers by differentiating `jnp.sqrt`, which uses a totally different implementation:
# + id="jj_JnI9Pm4jg" colab_type="code" outputId="0efc9081-980f-47cd-95a2-476024545692" colab={"base_uri": "https://localhost:8080/", "height": 51}
print(grad(jnp.sqrt)(2.))
print(grad(grad(jnp.sqrt))(2.))
# + [markdown] id="HowvqayEuy-H" colab_type="text"
# A limitation to this approach is that the argument `f` can't close over any values involved in differentiation. That is, you might notice that we kept the parameter `a` explicit in the argument list of `fixed_point`. While other JAX mechanisms can handle closed-over transformation-traced values in the arguments to higher-order functions (as is done for the control flow primitives like `lax.cond`, `lax.scan`, and `lax.while_loop` itself), `jax.custom_vjp` used as above cannot. A `fixed_point` routine that used a bit more of JAX's internals could have a more convenient and robust API.
# + [markdown] id="Dr0aNkBslfQf" colab_type="text"
# ## Basic usage of `jax.custom_jvp` and `jax.custom_vjp` APIs
#
# + [markdown] id="MojTOg4tmQNT" colab_type="text"
#
# ### Use `jax.custom_jvp` to define forward-mode (and, indirectly, reverse-mode) rules
#
# Here's a canonical basic example of using `jax.custom_jvp`:
# + id="nVkhbIFAOGZk" colab_type="code" colab={}
from jax import custom_jvp
import jax.numpy as jnp
# f :: a -> b
@custom_jvp
def f(x):
return jnp.sin(x)
# f_jvp :: (a, T a) -> (b, T b)
def f_jvp(primals, tangents):
x, = primals
t, = tangents
return f(x), jnp.cos(x) * t
f.defjvp(f_jvp)
# + id="fxhlECvW7Krj" colab_type="code" outputId="a727d3c5-ab82-4c0c-f77f-ac0500e84f5c" colab={"base_uri": "https://localhost:8080/", "height": 68}
from jax import jvp
print(f(3.))
y, y_dot = jvp(f, (3.,), (1.,))
print(y)
print(y_dot)
# + [markdown] id="JaoQVRzSQ9Qd" colab_type="text"
# In words, we start with a a primal function `f` that takes inputs of type `a` and produces outputs of type `b`. We associate with it a JVP rule function `f_jvp` that takes a pair of inputs representing the primal inputs of type `a` and the corresponding tangent inputs of type `T a`, and produces a pair of outputs representing the primal outputs of type `b` and tangent outputs of type `T b`. The tangent outputs should be a linear function of the tangent inputs.
# + [markdown] id="1xGky7yMOavq" colab_type="text"
# You can also use `f.defjvp` as a decorator, as in
#
# ```python
# @custom_jvp
# def f(x):
# ...
#
# @f.defjvp
# def f_jvp(primals, tangents):
# ...
# ```
# + [markdown] id="e9R-ppvdQIOC" colab_type="text"
# Even though we defined only a JVP rule and no VJP rule, we can use both forward- and reverse-mode differentiation on `f`. JAX will automatically transpose the linear computation on tangent values from our custom JVP rule, computing the VJP as efficiently as if we had written the rule by hand:
# + id="hl9Io86pQD6s" colab_type="code" outputId="e8794287-da8f-47e8-a1ce-eaa41831a591" colab={"base_uri": "https://localhost:8080/", "height": 51}
from jax import grad
print(grad(f)(3.))
print(grad(grad(f))(3.))
# + [markdown] id="MRlKe5D90svj" colab_type="text"
# For automatic transposition to work, the JVP rule's output tangents must be linear as a function of the input tangents. Otherwise a transposition error is raised.
# + [markdown] id="GRu-0yg96lXE" colab_type="text"
# Multiple arguments work like this:
# + id="JFLXlXuq6pRf" colab_type="code" colab={}
@custom_jvp
def f(x, y):
return x ** 2 * y
@f.defjvp
def f_jvp(primals, tangents):
x, y = primals
x_dot, y_dot = tangents
primal_out = f(x, y)
tangent_out = 2 * x * y * x_dot + x ** 2 * y_dot
return primal_out, tangent_out
# + id="QpKwA0oA8DfE" colab_type="code" outputId="44aba2b3-200c-4cb7-f275-4cd9404d4068" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(grad(f)(2., 3.))
# + [markdown] id="YPsPS3rdaGo2" colab_type="text"
# The `defjvps` convenience wrapper lets us define a JVP for each argument separately, and the results are computed separately then summed:
#
#
# + id="CsQIUhUkajua" colab_type="code" colab={}
@custom_jvp
def f(x):
return jnp.sin(x)
f.defjvps(lambda t, ans, x: jnp.cos(x) * t)
# + id="zfSgXrPEap-i" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b9401e3a-cbce-4aca-bad6-490524cfce7e"
print(grad(f)(3.))
# + [markdown] id="iYUCLJghbPiP" colab_type="text"
# Here's a `defjvps` example with multiple arguments:
# + id="Vx4Jv9s9bCi1" colab_type="code" colab={}
@custom_jvp
def f(x, y):
return x ** 2 * y
f.defjvps(lambda x_dot, primal_out, x, y: 2 * x * y * x_dot,
lambda y_dot, primal_out, x, y: x ** 2 * y_dot)
# + id="o9ezUYsjbbvC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="67a9f25d-89ad-4abc-908b-8a97062da239"
print(grad(f)(2., 3.))
print(grad(f, 0)(2., 3.)) # same as above
print(grad(f, 1)(2., 3.))
# + [markdown] id="nuIUkaxibVfD" colab_type="text"
# As a shorthand, with `defjvps` you can pass a `None` value to indicate that the JVP for a particular argument is zero:
# + id="z4z3esdZbTzQ" colab_type="code" colab={}
@custom_jvp
def f(x, y):
return x ** 2 * y
f.defjvps(lambda x_dot, primal_out, x, y: 2 * x * y * x_dot,
None)
# + id="jOtQfp-5btSo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="ded61c3a-14a1-4965-b988-7a7c94eeaf53"
print(grad(f)(2., 3.))
print(grad(f, 0)(2., 3.)) # same as above
print(grad(f, 1)(2., 3.))
# + [markdown] id="kZ0yc-Ihoezk" colab_type="text"
# Calling a `jax.custom_jvp` function with keyword arguments, or writing a `jax.custom_jvp` function definition with default arguments, are both allowed so long as they can be unambiguosly mapped to positional arguments based on the function signature retrieved by the standard library `inspect.signature` mechanism.
# + [markdown] id="3FGwfT67PDs9" colab_type="text"
# When you're not performing differentiation, the function `f` is called just as if it weren't decorated by `jax.custom_jvp`:
# + id="b-tB3xCHPRFt" colab_type="code" colab={}
@custom_jvp
def f(x):
print('called f!') # a harmless side-effect
return jnp.sin(x)
@f.defjvp
def f_jvp(primals, tangents):
print('called f_jvp!') # a harmless side-effect
x, = primals
t, = tangents
return f(x), jnp.cos(x) * t
# + id="xAlRea95PjA5" colab_type="code" outputId="b20e8a49-1ba3-4be0-dcb7-83b2b7cca0d5" colab={"base_uri": "https://localhost:8080/", "height": 51}
from jax import vmap, jit
print(f(3.))
# + id="dyD2ow4NmpI-" colab_type="code" outputId="19081712-d417-4678-ba19-d60f8a86bd78" colab={"base_uri": "https://localhost:8080/", "height": 85}
print(vmap(f)(jnp.arange(3.)))
print(jit(f)(3.))
# + [markdown] id="EzB75KZ5Pz7m" colab_type="text"
# The custom JVP rule is invoked during differentiation, whether forward or reverse:
# + id="hKF0xyAxPyLZ" colab_type="code" outputId="7919ce76-d1a0-478b-a9d4-cf6050250c97" colab={"base_uri": "https://localhost:8080/", "height": 68}
y, y_dot = jvp(f, (3.,), (1.,))
print(y_dot)
# + id="Z1KaEgA58MEG" colab_type="code" outputId="3554eb34-a95d-48b4-e291-f7aa0885c02f" colab={"base_uri": "https://localhost:8080/", "height": 68}
print(grad(f)(3.))
# + [markdown] id="o8JFxk3lQhOs" colab_type="text"
# Notice that `f_jvp` calls `f` to compute the primal outputs. In the context of higher-order differentiation, each application of a differentiation transform will use the custom JVP rule if and only if the rule calls the original `f` to compute the primal outputs. (This represents a kind of fundamental tradeoff, where we can't make use of intermediate values from the evaluation of `f` in our rule _and also_ have the rule apply in all orders of higher-order differentiation.)
# + id="B6PLJooTQgVp" colab_type="code" outputId="f53dba7d-fa93-4419-98b9-8c6568ac4741" colab={"base_uri": "https://localhost:8080/", "height": 85}
grad(grad(f))(3.)
# + [markdown] id="XNxAmFSsaaro" colab_type="text"
# You can use Python control flow with `jax.custom_jvp`:
# + id="kkXlSJL6adU2" colab_type="code" colab={}
@custom_jvp
def f(x):
if x > 0:
return jnp.sin(x)
else:
return jnp.cos(x)
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
ans = f(x)
if x > 0:
return ans, 2 * x_dot
else:
return ans, 3 * x_dot
# + id="QCHmJ56Na2G3" colab_type="code" outputId="fcc3fd07-6a14-47eb-e86a-a820c9917a16" colab={"base_uri": "https://localhost:8080/", "height": 51}
print(grad(f)(1.))
print(grad(f)(-1.))
# + [markdown] id="9cVdgR7ilt8l" colab_type="text"
# ### Use `jax.custom_vjp` to define custom reverse-mode-only rules
#
# While `jax.custom_jvp` suffices for controlling both forward- and, via JAX's automatic transposition, reverse-mode differentiation behavior, in some cases we may want to directly control a VJP rule, for example in the latter two example problems presented above. We can do that with `jax.custom_vjp`:
# + id="zAZk1n3dUw76" colab_type="code" colab={}
from jax import custom_vjp
import jax.numpy as jnp
# f :: a -> b
@custom_vjp
def f(x):
return jnp.sin(x)
# f_fwd :: a -> (b, c)
def f_fwd(x):
return f(x), jnp.cos(x)
# f_bwd :: (c, CT b) -> CT a
def f_bwd(cos_x, y_bar):
return (cos_x * y_bar,)
f.defvjp(f_fwd, f_bwd)
# + id="E8W-H2S0Ngdr" colab_type="code" outputId="62e3afa0-d10f-4e72-e567-9ee9b8dd7c3d" colab={"base_uri": "https://localhost:8080/", "height": 51}
from jax import grad
print(f(3.))
print(grad(f)(3.))
# + [markdown] id="yLING7qEVGGN" colab_type="text"
# In words, we again start with a a primal function `f` that takes inputs of type `a` and produces outputs of type `b`. We associate with it two functions, `f_fwd` and `f_bwd`, which describe how to perform the forward- and backward-passes of reverse-mode autodiff, respectively.
#
# The function `f_fwd` describes the forward pass, not only the primal computation but also what values to save for use on the backward pass. Its input signature is just like that of the primal function `f`, in that it takes a primal input of type `a`. But as output it produces a pair, where the first element is the primal output `b` and the second element is any "residual" data of type `c` to be stored for use by the backward pass. (This second output is analogous to [PyTorch's save_for_backward mechanism](https://pytorch.org/tutorials/beginner/examples_autograd/two_layer_net_custom_function.html).)
#
# The function `f_bwd` describes the backward pass. It takes two inputs, where the first is the residual data of type `c` produced by `f_fwd` and the second is the output cotangents of type `CT b` corresponding to the output of the primal function. It produces an output of type `CT a` representing the cotangents corresponding to the input of the primal function. In particular, the output of `f_bwd` must be a sequence (e.g. a tuple) of length equal to the number of arguments to the primal function.
# + [markdown] id="d1b5v67Oncfz" colab_type="text"
# So multiple arguments work like this:
# + id="IhMb64gkngAt" colab_type="code" colab={}
from jax import custom_vjp
@custom_vjp
def f(x, y):
return jnp.sin(x) * y
def f_fwd(x, y):
return f(x, y), (jnp.cos(x), jnp.sin(x), y)
def f_bwd(res, g):
cos_x, sin_x, y = res
return (cos_x * g * y, -sin_x * g)
f.defvjp(f_fwd, f_bwd)
# + id="EnRtIhhLnkry" colab_type="code" outputId="4d28ce02-26e5-4d04-fb4a-767892844c88" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(grad(f)(2., 3.))
# + [markdown] id="GwC26P9kn8qw" colab_type="text"
# Calling a `jax.custom_vjp` function with keyword arguments, or writing a `jax.custom_vjp` function definition with default arguments, are both allowed so long as they can be unambiguosly mapped to positional arguments based on the function signature retrieved by the standard library `inspect.signature` mechanism.
# + [markdown] id="XfH-ae8bYt6-" colab_type="text"
# As with `jax.custom_jvp`, the custom VJP rule comprised by `f_fwd` and `f_bwd` is not invoked if differentiation is not applied. If function is evaluated, or transformed with `jit`, `vmap`, or other non-differentiation transformations, then only `f` is called.
# + id="s-_Dbqi-N5Ij" colab_type="code" colab={}
@custom_vjp
def f(x):
print("called f!")
return jnp.sin(x)
def f_fwd(x):
print("called f_fwd!")
return f(x), jnp.cos(x)
def f_bwd(cos_x, y_bar):
print("called f_bwd!")
return (cos_x * y_bar,)
f.defvjp(f_fwd, f_bwd)
# + id="r0aZ79OmOAR5" colab_type="code" outputId="e62fd6d9-ec63-4a41-c4cd-5be2c8095818" colab={"base_uri": "https://localhost:8080/", "height": 51}
print(f(3.))
# + id="7ToB9BYlm6uN" colab_type="code" outputId="6835b89c-847a-4b82-ac19-1e19a7b9858c" colab={"base_uri": "https://localhost:8080/", "height": 85}
print(grad(f)(3.))
# + id="s1Pn_qCIODcF" colab_type="code" outputId="991797e7-ec9d-45b8-b88c-1548d9919334" colab={"base_uri": "https://localhost:8080/", "height": 68}
from jax import vjp
y, f_vjp = vjp(f, 3.)
print(y)
# + id="dvgQtDHaOHuo" colab_type="code" outputId="02447bcd-2885-4883-cc09-a2b352ee3864" colab={"base_uri": "https://localhost:8080/", "height": 51}
print(f_vjp(1.))
# + [markdown] id="qFIIpkFcZCNP" colab_type="text"
# **Forward-mode autodiff cannot be used on the `jax.custom_vjp` function** and will raise an error:
# + id="3RGQRbI_OSEX" colab_type="code" outputId="f971d7d3-b323-4467-bbc2-3157f3381b64" colab={"base_uri": "https://localhost:8080/", "height": 68}
from jax import jvp
try:
jvp(f, (3.,), (1.,))
except TypeError as e:
print('ERROR! {}'.format(e))
# + [markdown] id="u04I9j2dntAU" colab_type="text"
# If you want to use both forward- and reverse-mode, use `jax.custom_jvp` instead.
# + [markdown] id="YN97y7LEZbWV" colab_type="text"
# We can use `jax.custom_vjp` together with `pdb` to insert a debugger trace in the backward pass:
# + id="-DvRKsHPZk_g" colab_type="code" colab={}
import pdb
@custom_vjp
def debug(x):
return x # acts like identity
def debug_fwd(x):
return x, x
def debug_bwd(x, g):
import pdb; pdb.set_trace()
return g
debug.defvjp(debug_fwd, debug_bwd)
# + id="49GdkP4pZ2IV" colab_type="code" colab={}
def foo(x):
y = x ** 2
y = debug(y) # insert pdb in corresponding backward pass step
return jnp.sin(y)
# + [markdown] id="sGLnRcPwaKoX" colab_type="text"
# ```python
# jax.grad(foo)(3.)
#
# > <ipython-input-113-b19a2dc1abf7>(12)debug_bwd()
# -> return g
# (Pdb) p x
# DeviceArray(9., dtype=float32)
# (Pdb) p g
# DeviceArray(-0.91113025, dtype=float32)
# (Pdb) q
# ```
# + [markdown] id="DaTfAJLAl1Lb" colab_type="text"
# ## More features and details
#
# + [markdown] id="LQF_UDApl_UV" colab_type="text"
# ### Working with `list` / `tuple` / `dict` containers (and other pytrees)
#
# You should expect standard Python containers like lists, tuples, namedtuples, and dicts to just work, along with nested versions of those. In general, any [pytrees](https://github.com/google/jax/blob/master/docs/notebooks/JAX_pytrees.ipynb) are permissible, so long as their structures are consistent according to the type constraints.
#
# Here's a contrived example with `jax.custom_jvp`:
# + id="6sDLZ3dAn3P2" colab_type="code" colab={}
from collections import namedtuple
Point = namedtuple("Point", ["x", "y"])
@custom_jvp
def f(pt):
x, y = pt.x, pt.y
return {'a': x ** 2,
'b': (jnp.sin(x), jnp.cos(y))}
@f.defjvp
def f_jvp(primals, tangents):
pt, = primals
pt_dot, = tangents
ans = f(pt)
ans_dot = {'a': 2 * pt.x * pt_dot.x,
'b': (jnp.cos(pt.x) * pt_dot.x, -jnp.sin(pt.y) * pt_dot.y)}
return ans, ans_dot
def fun(pt):
dct = f(pt)
return dct['a'] + dct['b'][0]
# + id="My8pbOlPppJj" colab_type="code" outputId="8430db76-7c94-412d-d261-60614dff2dbc" colab={"base_uri": "https://localhost:8080/", "height": 34}
pt = Point(1., 2.)
print(f(pt))
# + id="a9qyiCAhqLd3" colab_type="code" outputId="4184570b-a4b1-4297-da32-746b80bfb63f" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(grad(fun)(pt))
# + [markdown] id="BWLN9tu4qWQd" colab_type="text"
# And an analogous contrived example with `jax.custom_vjp`:
# + id="QkdbwGkJqS3J" colab_type="code" colab={}
@custom_vjp
def f(pt):
x, y = pt.x, pt.y
return {'a': x ** 2,
'b': (jnp.sin(x), jnp.cos(y))}
def f_fwd(pt):
return f(pt), pt
def f_bwd(pt, g):
a_bar, (b0_bar, b1_bar) = g['a'], g['b']
x_bar = 2 * pt.x * a_bar + jnp.cos(pt.x) * b0_bar
y_bar = -jnp.sin(pt.y) * b1_bar
return (Point(x_bar, y_bar),)
f.defvjp(f_fwd, f_bwd)
def fun(pt):
dct = f(pt)
return dct['a'] + dct['b'][0]
# + id="3onW7t6nrJ4E" colab_type="code" outputId="75910cfc-dee5-461c-a646-2f9dcba8f674" colab={"base_uri": "https://localhost:8080/", "height": 34}
pt = Point(1., 2.)
print(f(pt))
# + id="ryyeKIXtrNpd" colab_type="code" outputId="e707a38e-f72f-40a6-ece4-499d21c9e5cb" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(grad(fun)(pt))
# + [markdown] id="JKTNivxbmKWO" colab_type="text"
# ### Handling non-differentiable arguments
# + [markdown] id="7g9sXSp_uc36" colab_type="text"
# Some use cases, like the final example problem, call for non-differentiable arguments to be passed to functions with custom differentiation rules, and for those arguments to also be passed to the rules themselves. In the case of `fixed_point`, the function argument `f` was such a non-differentiable argument. A similar situation arises with `jax.experimental.odeint`.
#
# + [markdown] id="9yNIOzyBCvE5" colab_type="text"
# #### `jax.custom_jvp` with `nondiff_argnums`
#
# Use the optional `nondiff_argnums` parameter to `jax.custom_jvp` to indicate arguments like these. Here's an example with `jax.custom_jvp`:
# + id="b3YMxxTBvy0I" colab_type="code" colab={}
from functools import partial
@partial(custom_jvp, nondiff_argnums=(0,))
def app(f, x):
return f(x)
@app.defjvp
def app_jvp(f, primals, tangents):
x, = primals
x_dot, = tangents
return f(x), 2. * x_dot
# + id="5W-yEw9IB34S" colab_type="code" outputId="0a0d493c-a1fd-4f76-8cec-f18522aa5774" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(app(lambda x: x ** 3, 3.))
# + id="zbVIlOmqB7_O" colab_type="code" outputId="280d4b0a-e68b-4996-dd50-d66d129c8eec" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(grad(app, 1)(lambda x: x ** 3, 3.))
# + [markdown] id="-b_B_4WaBI2D" colab_type="text"
# Notice the gotcha here: no matter where in the argument list these parameters appear, they're placed at the *start* of the signature of the corresponding JVP rule. Here's another example:
# + id="9hokWmyHBgKK" colab_type="code" colab={}
@partial(custom_jvp, nondiff_argnums=(0, 2))
def app2(f, x, g):
return f(g((x)))
@app2.defjvp
def app2_jvp(f, g, primals, tangents):
x, = primals
x_dot, = tangents
return f(g(x)), 3. * x_dot
# + id="J7GsvJTgCfS0" colab_type="code" outputId="6963fd03-c952-467d-c117-39a53253b2b9" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(app2(lambda x: x ** 3, 3., lambda y: 5 * y))
# + id="kPP8Jt1CCb1X" colab_type="code" outputId="77f7d0a0-951b-44df-fb44-540a0e968072" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(grad(app2, 1)(lambda x: x ** 3, 3., lambda y: 5 * y))
# + [markdown] id="ECbalHIkC4ts" colab_type="text"
# #### `jax.custom_vjp` with `nondiff_argnums`
# + [markdown] id="0u0jn4aWC8k1" colab_type="text"
# A similar option exists for `jax.custom_vjp`, and similarly the convention is that the non-differentiable arguments are passed as the first arguments to the rules, no matter where they appear in the original function's signature. Here's an example:
# + id="yCdu-_9GClWs" colab_type="code" colab={}
@partial(custom_vjp, nondiff_argnums=(0,))
def app(f, x):
return f(x)
def app_fwd(f, x):
return f(x), x
def app_bwd(f, x, g):
return (5 * g,)
app.defvjp(app_fwd, app_bwd)
# + id="qSgcWa1eDj4r" colab_type="code" outputId="6c34436a-1bdb-49db-a57f-b50f053c0cd4" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(app(lambda x: x ** 2, 4.))
# + id="tccagflcDmaz" colab_type="code" outputId="d28015b9-47a9-4adc-e643-3fea37ac1d46" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(grad(app, 1)(lambda x: x ** 2, 4.))
# + [markdown] id="BTEnNTk5D0sM" colab_type="text"
# See `clip_gradient` and `fixed_point` above for other usage examples.
| docs/notebooks/Custom_derivative_rules_for_Python_code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Author: <NAME>
#
# Date: 4/10/2018
#
# Version: 1.0
#
# Environment: Python 3.6.1 and Jupyter notebook
#
# Libraries used: Main libraries used for assignment:
# * re (for regular expression, included in Anaconda Python 3.6)
# * sys (to display system version, included in Anaconda Python 3.6)
# * os (for changing file directory, included in Anaconda Python 3.6)
# * warnings (to ignore any warnings thrown while execution, included in Anaconda Python 3.6)
# * pandas(for creating dataframes, included in Anaconda Python 3.6)
# * matplotlib(for plotting dataframes, included in Anaconda Python 3.6)
# * numpy(for numeric calculations, included in Anaconda Python 3.6)
# * seaborn(for plotting graphs, included in Anaconda Python 3.6)
# * Few pictures are captured from Tableau
# * datetime(for verying date time, included in Anaconda Python 3.6)
# * math(to perform calculation, included in Anaconda Python 3.6)
# # Contents
#
# ### 1- Libraries
# ### 2- Read the csv file
# ### 3- Exploring Dataframe:
# * 3-1 Printing information of DataFrame
# * 3-2 Describing the DataFrame
# * 3-3 Displaying the top data
# * 3.4 Displaying null values for each feature
#
# ### 4 -Dropping duplicates and ID=null
#
# ### 5 - Fixing Departure Date
# * 5-1 Creating a Regex function to clean the date errors
# * 5-2 function to clean the date errors & return clean data format
#
# ### 6 - Latitude and Longitude
#
# ### 7 - Post Type
#
# ### 8 - Verifying distance
# * 8-1 Analysis - Calculate the distance using the formula
# * 8-2 Solution: Drop the Journey Distance Column and rename the calculated distance as the new Journey Distance
#
# ### 9- Origin Region
# * 9-1 Analysis - Finding Outliers
# * 9-2 Finding Outliers and separting them
# * 9-3 Applying KNN model
#
#
# ### 10- Destination Region
# * 10-1 Analysis - Finding Outliers
# * 10-2 Finding Outliers and separting them
# * 10-3 Applying KNN model
#
# ### 11- Travel Time
#
# * 11-1 finding null values
# * 11-2 Validate travel time= Delivery - Departure
# * 11-3 Approach to calculate valid travel-time
# * 11-4 Calculating speed
# * 11-5 Calculating time
# * 11-6 Identfying wrong travel time
#
# ### 12 - Fixing Drone Type
# * 12-1 Drone type-1
# * 12-2 Drone type-2
# * 12-3 Drone type-3
# * 12-4 Fixing null values in Drone type
#
# ### 13- Delivery time
#
#
# ### 14- Delivery Price
#
# * 14-1 Divding the Departure time into intervals
# * 14-2 EDA on Delivery price - Detection of outliers on Drone type
# * 14-3 Clean outliers in Drone type agaisnt Delivery fare
# * 14-4 Linear Modelling to predict Delivery Price
# * 14-4-a Dividing the Delivery Price data into train and test
# * 14-4-b Finding the correlation against all variables
# * 14-4-c Further analysis on Price-Train data & Model Evaluation
# * 14-4-d Predicting Delivery Price
#
# ### 15 - Saving to csv file
#
# ### 16 - Conclusion
# ### 17- References
#
# ---------------
# ## 1-Libraries
# +
# Importing libraries for assessment 2
import pandas as pd
import numpy as np
import re
import os
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
import sys
import warnings
warnings.filterwarnings("ignore")
# +
#Printing system version
print (sys.version_info)
# -
# ## 2- Read the csv file
#reading the file and storing it in a dataframe
file_df=pd.read_csv("29389690.csv")
# +
#Printing total rows and column
file_df.shape
# -
# ## 3- Exploring Dataframe
#
# ### 3-1 Printing information of DataFrame
file_df.info()
# ### 3-2 Describing the DataFrame
# describing continuous type
file_df.describe()
# +
# Describing categorical type
file_df.describe(include=['O'])
# -
# ### 3-3 Displaying the top 5 data
file_df.head()
# ### 3.4 - Displaying null values for each feature
# +
file_df.isnull().sum()
# -
# --------------------------------------------------------------
# # Start of Data cleaning
# ------------------------------------------------------
# Assumption: Based on EDA, Departure Time and package weight is assumed to be valid
#
# # 4 -Dropping duplicates and ID=null
#
# From below analysis, there are total 60 rows where duplicate rows and ID=null are present.
# This information can be seen in dataframe=new_df
# Since ID is a unique identifer, we can't impute them and these conditions are noise in the dataframe.
#
# Removing 60 values, the new filtered count is 36997
# Making a copy of main dataframe
test_df=file_df.copy()
# +
# Dropping duplicates
test_df=test_df.drop_duplicates(keep='first')
# Dropping rows where 'Id' is null
test_df = test_df.drop(test_df[test_df.Id.isnull()].index)
# Priniting the new shape
test_df.shape
# -
# This dataframe contains all the duplicates and ID null values
new_df=file_df[~file_df.isin(test_df)]
new_df=new_df.dropna(how='all')
new_df=new_df.sort_values('Drone Type')
new_df.shape
# ### Since test_df Dataframe is cleaned with redudant data, this dataframe is used going forward
# ---
# ## 5 - Cleaning Departure Date
#
# From EDA, Date variable has 3 errors:
# * Date format error
# * February month ending with 30
# * Months wrongly ending with day 31
#
# ### Approach:
# #### 5-1 Creating a Regex function to clean the date errors
# +
#Below function accepts the improper dates and returns cleaned ones
def cleanup(date):
PATTERNS = [
r'(\d{4})-(1[3-9]|2[0-9]|3[0-9])-(\d{1,2})$',
r'(\d{4})-(02)-(29|3[0-9])$',
r'(\d{4})-(04|06|09|11)-(31)$']
PATTERNS = [re.compile(p) for p in PATTERNS]
for i, p in enumerate(PATTERNS):
res = p.match(date)
if res and i == 0: #swap month and day
return f'{res[1]}-{res[3]}-{res[2]}'
if res and i == 1: #Verifying for february and ending the month by 28
return f'{res[1]}-{res[2]}-28'
if res and i == 2: #Ending the month to 30 for April,June,September and November
return f'{res[1]}-{res[2]}-30'
# -
# #### 5-2 function to clean the date errors & return clean data format
# +
#Checking date
import datetime
filtered_file=[] # to store clean dates
# Below method validates whether date is valid or not, if invalid, exception is caught and passed to funciton(5-1) to clean
def validate(date_text):
incorrect_date=[]
try:
datetime.datetime.strptime(date_text, '%Y-%m-%d')
filtered_file.append(date_text)
except ValueError:
incorrect_date.append(date_text)
filtered_file.append(cleanup(date_text))
sample_file=test_df['Departure Date']
for each in sample_file:validate(each)
###################################################################################################################
#Creating new dataframe for filtered 'Departure Date' and adding back new filtered departure date to the data frame.
filt_df = pd.DataFrame({'Departure Date':filtered_file})
test_df.update(filt_df)
# -
test_df[test_df['Id']=='ID1657544928']
# # 6 - Latitude and Longitude
# 
#
#
# Latitude and Longitude of both Regions(Origin and Destination) are invalid for these data
#
# ### Approach:
#
# Negating the values, post this cordinates are fixed
# +
# Both regions' invalid data is subtracted and data is fixed
test_df['Origin Latitude'] = np.where(test_df['Origin Latitude']>0, -1*test_df['Origin Latitude'],test_df['Origin Latitude'])
test_df['Origin Longitude'] = np.where(test_df['Origin Longitude']<0, -1*test_df['Origin Longitude'],test_df['Origin Longitude'])
test_df['Destination Latitude'] = np.where(test_df['Destination Latitude']>0, -1*test_df['Destination Latitude'],test_df['Destination Latitude'])
test_df['Destination Longitude'] = np.where(test_df['Destination Longitude']<0, -1*test_df['Destination Longitude'],test_df['Destination Longitude'])
# -
# --------------------------------------
# # 7- Filling post type
# 
# +
#Before filling post type
print("Total null values present in Post Type=",len(test_df[test_df['Post Type'].isnull()]))
# -
# #### Based on EDA, there is pattern between ID and Post Type
#
# #### for most cases, ID starting with 5, post type is 1.0 & ID starting with 1, post type is 0.0
#
# #### Two errors are present.(post type null vales and wrong post type values)
# Based on above inference, both issues are fixed in below step
# +
# below function matches the Post type using regex match and return the correct post-type value
def clean_postype(id_data):
if re.match("ID5",id_data):
return 1.0
else:
return 0.0
test_df['Post Type']=test_df['Id'].apply(lambda x: clean_postype(x) if(str(x) != 'nan') else x)
# -
# -------------------------------------------
# # 8 - Verifying distance
#
# #### 8-1 Analysis - Calculate the distance using the formula
# +
from math import sin, cos, sqrt, atan2, radians
#Below function calculated the actual distance using the formula, considering the latitudes & longitudes of origin and destination
def calculate_dist(o_lat,o_lon,d_lat,d_lon):
R = 6378.0
lat1 = radians(o_lat)
lon1 = radians(o_lon)
lat2 = radians(d_lat)
lon2 = radians(d_lon)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = round(R * c,3)
return distance
test_df['cal_dist']=test_df.apply(lambda x: calculate_dist(x['Origin Latitude'],x['Origin Longitude'],x['Destination Latitude'],x['Destination Longitude']),axis=1)
# +
#Verifying how many journey distances are not same compared with calculated distance
filt=round(test_df['Journey Distance'],3) != test_df['cal_dist']
test_df[filt][['Id','Journey Distance','cal_dist']]
# -
# #### From above we can confirm that 14 Journey Distances are not the same.
# -------------
# ### 8-2 Solution: Drop the Journey Distance Column and rename the calculated distance as the new Journey Distance
# +
#Dropping 'Departure Date' and adding back new filtered departure date to the data frame.
test_df.drop(['Journey Distance'], axis=1, inplace=True)
#Renaming columns name from 'cal_dist' to 'Journey Distance'
test_df.rename(columns={'cal_dist':'Journey Distance'}, inplace=True)
# -
# ------------------
# # 9- Origin Region
#
# ### 9-1 Analysis - Finding Outliers
# 
#
#
# Above analysis confirms the presence of outliers in the origin regions
#
# #### Approach:
#
# 1) Find the outliers.
# 2) Separate them into clean and unclean data (which contains outliers and null values).
# 3) I am running KNN algorithm on the clean data set and predicting correct origin regions for the unclean dataset
#
#
# ##### <font color='red'>NOTE: Above method also holds good to determine destination region<font>
# +
#### Above analysis can be further confirmed by plotting boxplot and finding the outliers
# Below graph confirms the outliers present in the Origin region wrt Origin latitude are many
plt.rcParams['figure.figsize']=15,10
xi=test_df[['Origin Latitude','Origin Longitude','Origin Region']]
sns.boxplot(data=xi,x='Origin Region',y='Origin Latitude')
# -
# ### 9-2 Finding Outliers and separting them
# +
Regions=list(range(1,49))
df_OriginRegions=test_df[['Origin Latitude','Origin Longitude','Origin Region']]
#### Below method identifies the outliers by verfying the IQR range and filters them into cleaned and outlier data
def clean_outlier(data_df):
df_clean = pd.DataFrame()
df_outlier = pd.DataFrame()
for each in Regions:
df_eachRegion=data_df[data_df.iloc[:,-1]==[each]]
Q1 = df_eachRegion.quantile(0.25)
Q3 = df_eachRegion.quantile(0.75)
IQR = Q3 - Q1
clean=df_eachRegion[~((df_eachRegion < (Q1 - 1.5 * IQR)) |(df_eachRegion > (Q3 + 1.5 * IQR))).any(axis=1)]
df_clean=pd.concat([df_clean,clean])
outlier=df_eachRegion[((df_eachRegion < (Q1 - 1.5 * IQR)) |(df_eachRegion > (Q3 + 1.5 * IQR))).any(axis=1)]
df_outlier=pd.concat([df_outlier,outlier])
return(df_clean,df_outlier)
##### Below variables contain the cleaned and outlier data
cleaned_Origin_data,outlier_Origin_data=clean_outlier(df_OriginRegions)
# -
# ### 9-3 Applying KNN model
# +
#### Dividing train and test data of Origin region
# The test data contains 2 categories: Nulls and Outliers of Origin region
#--------- Fidning all nulls of Origin---------------
OR_allnull=test_df[test_df['Origin Region'].isnull()]
#--------- selecting all the outliers of Origin Region from main dataframe -------------
outlier_OR_df = test_df.index.isin(outlier_Origin_data.index)
outlier_OR_df=test_df[outlier_OR_df]
# Mapping of above both to form final Origin Region's testing data
OR_test=pd.concat([OR_allnull,outlier_OR_df])
# A Copy of cleaned data becomes training data
OR_train=cleaned_Origin_data.copy()
# +
#### obtain k-value
from sklearn import neighbors
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
params = {'n_neighbors':range(1,6)}
knn = neighbors.KNeighborsRegressor()
model = GridSearchCV(knn, params, cv=10)
model.fit(OR_train[['Origin Latitude','Origin Longitude']],OR_train['Origin Region'])
model.best_params_
# +
#### Fitting the model
from sklearn.neighbors import KNeighborsClassifier
neigh_model = KNeighborsClassifier(n_neighbors=5)
neigh_model.fit(OR_train[['Origin Latitude','Origin Longitude']],OR_train['Origin Region'])
# +
#### Predicition
OR_test['Origin Region'] = neigh_model.predict(OR_test[['Origin Latitude','Origin Longitude']])
# +
### Updating the main dataframe with the predicted values
test_df['Origin Region'] = OR_test['Origin Region'].combine_first(test_df['Origin Region'])
# -
# ##### Above data now contains clean origin regions
# ------------
# # 10 - Destination Region
#
# #### 10-1 Analysis - finding outliers
# +
## Below graph confirms the outliers present in the Origin region wrt Origin latitude are many
plt.rcParams['figure.figsize']=15,10
xi=test_df[['Destination Latitude','Destination Longitude','Destination Region']]
sns.boxplot(data=xi,x='Destination Region',y='Destination Latitude')
# -
# ### 10-2 Finding Outliers and separting them
# +
df_DestRegions=test_df[['Destination Latitude','Destination Longitude','Destination Region']]
cleaned_Dest_data,outlier_Dest_data=clean_outlier(df_DestRegions)
# -
# ### 10-3 Applying KNN model
# +
#### Dividing train and test data of Origin region
# The test data contains 2 categories: Nulls and Outliers of Destination region
#--------- Finding all nulls of Destination region ---------------
DR_allnull=test_df[test_df['Destination Region'].isnull()]
#--------- selecting all the outliers of Origin Region from main dataframe -------------
outlier_DR_df = test_df.index.isin(outlier_Dest_data.index)
outlier_DR_df=test_df[outlier_DR_df]
# Mapping of above both to form final Origin Region's testing data
DR_test=pd.concat([DR_allnull,outlier_DR_df])
# A Copy of cleaned data becomes training data
DR_train=cleaned_Dest_data.copy()
# +
#### Fitting the model
from sklearn.neighbors import KNeighborsClassifier
neigh_model = KNeighborsClassifier(n_neighbors=5)
neigh_model.fit(DR_train[['Destination Latitude','Destination Longitude']],DR_train['Destination Region'])
# +
#### Predicition
DR_test['Destination Region'] = neigh_model.predict(DR_test[['Destination Latitude','Destination Longitude']])
# +
### Updating the main dataframe with the predicted values
test_df['Destination Region'] = DR_test['Destination Region'].combine_first(test_df['Destination Region'])
# -
# --------------------
# # 11- Travel Time
#
#
# Below steps confirms there are 3 type of errors in Travel time:
# * 40 null values
# * 40 incorrect values (delivery time happened earlier to departure time)
# * 34 incorrect values (wrong time taken)
# ### 11-1 finding null values
#Making a new dataframe subset of main one
timedf=test_df[['Id','Drone Type','Departure Time','Delivery Time','Journey Distance','Travel Time']]
timedf.head()
print("There are {} null vales present in 'Travel Time column'".format(len(timedf[timedf['Travel Time'].isnull()])))
# ### 11-2 Validate travel time= Delivery - Departure
#
# Approach:
# * find the difference of delivery & departure
# * divide the data into valid and invalid
# * dividing the valid data based on Drone type-1, 2 & 3
# +
# Creating a new column difference of 'Delivery time' & Departure time. This is required to validate actual 'travel time'
# Defining a method to perform subtraction
def calculate_traveltime(data1,data2):
t1 = sum(i*j for i, j in zip(map(int, data1.split(':')), [60, 1, 1/60]))
t2 = sum(i*j for i, j in zip(map(int, data2.split(':')), [60, 1, 1/60]))
return round((t2-t1),2)
timedf['diff_Del&Dep']=timedf.apply(lambda x: calculate_traveltime(x['Departure Time'],x['Delivery Time']),axis=1)
# +
# Divide the 'Travel Time' into two separate columns:
# 1 - valid data (where travel time is equal to diff_Del&Dep)
# 2 - invalid data (where travel time is null and travel time is not equal to diff_Del&Dep)
# 1. Finding Valid data:
# 1.a) Defining a method to find valid time data
def find_valid(data1,data2):
#Adding a tolerance level of 0.01 & 0.02
if (data1 == round(data2+0.01,2)) | (data1 == data2) | (data1 == round(data2+0.02,2)):
return data1
timedf['valid_traveltime']=timedf.apply(lambda x:find_valid(x['Travel Time'],x['diff_Del&Dep']),axis=1 )
######################################################################################################
### After above step total null values present in valid data = 5387. Which cannot be a valid scenario
######################################################################################################
#1.b There are few cases where delivery were made before departure time, which cannot be a valid data.
# Example: Departure_time= 5pm & Delivery time = 4pm
# To find this, I am assumming a valid delivery should be less than 15 hrs
timedf['time_diff']= pd.to_timedelta(timedf['Delivery Time'])-pd.to_timedelta(timedf['Departure Time'])
timedf['time_diff'] = pd.to_timedelta(timedf['time_diff'].dt.seconds, unit='s')
timedf['time'] = timedf['time_diff'] / np.timedelta64(1, 'h')
# below step will find which time is less than 15hrs, this is a valid time
timedf['valid_traveltime']=timedf.apply(lambda x:x['Travel Time'] if x['time']<15 else None,axis=1 )
#################################################################################
########## Total null values after 1-a and 1-b step is 80 ######################
#################################################################################
# 2. Finding invalid data
valid_traveltime=timedf[timedf['valid_traveltime'].notnull()]
invalid_traveltime=timedf[timedf['valid_traveltime'].isnull()]
# +
#Dropping all unwanted columns
valid_traveltime.drop(['diff_Del&Dep','valid_traveltime','time_diff','time'], axis=1, inplace=True)
invalid_traveltime.drop(['diff_Del&Dep','valid_traveltime','time_diff','time'], axis=1, inplace=True)
#valid_traveltime.drop(['valid_traveltime','time_diff','time'], axis=1, inplace=True)
#invalid_traveltime.drop(['valid_traveltime','time_diff','time'], axis=1, inplace=True)
# +
# Forming 6 dataframe of different Drone type
valid_traveltime_drone1=valid_traveltime[valid_traveltime['Drone Type']==1.0]
invalid_traveltime_drone1=invalid_traveltime[invalid_traveltime['Drone Type']==1.0]
valid_traveltime_drone2=valid_traveltime[valid_traveltime['Drone Type']==2.0]
invalid_traveltime_drone2=invalid_traveltime[invalid_traveltime['Drone Type']==2.0]
valid_traveltime_drone3=valid_traveltime[valid_traveltime['Drone Type']==3.0]
invalid_traveltime_drone3=invalid_traveltime[invalid_traveltime['Drone Type']==3.0]
# -
# ##### At this step, data is cleaned with outliers, which is required for next steps
# ----------------
# ## 11-3 Approach to calculate valid travel-time
# ### Below method explains how travel time is calculated by solving simultaneous equations
#
# W.K.T
# x ->speed of takeoff
# y -> speed of landing
# z -> speed of onroute
# t -> time taken
# d -> distance travelled
#
# ###### 5x+5y= $\beta$ ------> Treat this as beta. Beta here conveys information on= Distance travelled during takeoff and landing
#
#
# Then main equation becomes, beta + on route distance = journey distance
# on route distance = time taken (t-10) * speed (z)
# ##### $\beta$+(t-10)z=d -----------> (main eqn)
#
# ---------------------------------------------
# ##### Selecting random values for Drone type=1 (Refer below picture)
#
# 
#
# let actual time t=393.21 and travelled distance d=386.712 then main eqn becomes:
# ##### * $\beta$+(396.91-10)z=390.409 -----------> (eqn-1)
#
# let t=176.72 and d=390.409 then main eqn becomes:
# ##### * $\beta$+(176.72-10)z=170.223 -----------> (eqn-2)
#
#
# --------------------------------------------
# #### Solving eq-1 and eq-2: we get
# z(speed of drone type 1) = 60 km/hr
# beta = 210.36
#
# ### Using above method, correct 'Travel Time' is calculated
#
#
# --------------------------------------------------------
#
# ### 11-4 Calculating speed
# +
import numpy as np
# Below is the linear equation which calculates speed and beta
def linear_eqn(time1,time2,dist1,dist2):
x = np.array([[1/60, (time1-10)/60],[1/60,(time2-10)/60]])
y = np.array([dist1, dist2])
beta,z=(np.linalg.solve(x,y))
return round(beta,2),round(z,2)
beta1,speed1=linear_eqn(396.91,176.72,390.409,170.223)
beta2,speed2=linear_eqn(144.01,122.85,148.845,125.922)
beta3,speed3=linear_eqn(186.23,255.73,215.387,298.795)
print("Speed of Drone type-1={}, beta1={}".format(speed1,beta1))
print("Speed of Drone type-2={}, beta2={}".format(speed2,beta2))
print("Speed of Drone type-3={}, beta3={}".format(speed3,beta3))
# -
# ### 11-5 Calculating time
# +
# Below function is required to calculate the correct travel time
def calculate_time(dist,beta,speed):
time_taken=(((dist*60)-(beta))/speed)+10
return round(time_taken,2)
# +
#Calculating new Travel time
# Approach
# calculate all 3 speeds for each droen type and validate the travel time
valid_drone1=pd.concat([valid_traveltime_drone1,invalid_traveltime_drone1])
valid_drone1['timetaken_drone1']=valid_drone1.apply(lambda x: calculate_time(x['Journey Distance'],beta1,speed1),axis=1)
valid_drone1['timetaken_drone2']=valid_drone1.apply(lambda x: calculate_time(x['Journey Distance'],beta2,speed2),axis=1)
valid_drone1['timetaken_drone3']=valid_drone1.apply(lambda x: calculate_time(x['Journey Distance'],beta3,speed3),axis=1)
valid_drone2=pd.concat([valid_traveltime_drone2,invalid_traveltime_drone2])
valid_drone2['timetaken_drone2']=valid_drone2.apply(lambda x: calculate_time(x['Journey Distance'],beta2,speed2),axis=1)
valid_drone2['timetaken_drone1']=valid_drone2.apply(lambda x: calculate_time(x['Journey Distance'],beta1,speed1),axis=1)
valid_drone2['timetaken_drone3']=valid_drone2.apply(lambda x: calculate_time(x['Journey Distance'],beta3,speed3),axis=1)
valid_drone3=pd.concat([valid_traveltime_drone3,invalid_traveltime_drone3])
valid_drone3['timetaken_drone3']=valid_drone3.apply(lambda x: calculate_time(x['Journey Distance'],beta3,speed3),axis=1)
valid_drone3['timetaken_drone2']=valid_drone3.apply(lambda x: calculate_time(x['Journey Distance'],beta2,speed2),axis=1)
valid_drone3['timetaken_drone1']=valid_drone3.apply(lambda x: calculate_time(x['Journey Distance'],beta1,speed1),axis=1)
# -
# ## 11-6 Identifying wrong Travel time
#
# #### Here, In drone type 1, there are <font color='red'>26</font> outliers, where mismatch is present.
#
# ##### Expected result : given travel time of drone type 1 should be equal to calculated timetaken_drone1
# ##### Actual result : given travel time of drone type 1 is not equal to calculated timetaken_drone1, instead equal to timetaken_drone2 for below highlighted scenario
#
# #### Solution:
# * The time taken by drone 2 is the new travel time
# * Drone type will be 2, since drone-type 2 was approximate to given travel time
#
# # ---------------------------------------------------------------------------------------------------------------
# 
# # ---------------------------------------------------------------------------------------------------------------
print("Outliers present in Drone type 1=",len(valid_drone1[valid_drone1['Travel Time'] < valid_drone1['timetaken_drone1']]))
# ---------------------------------------------------
# # 12 - Fixing invalid Drone-type
#
# ### 12 -1 : Drone type=1
sample_df=valid_drone1[valid_drone1['Travel Time'] < valid_drone1['timetaken_drone1']]
# +
# Below method cleans the invalid drone type
def correct_dronetype(traveltime,timetaken_drone1,timetaken_drone2,timetaken_drone3):
if (traveltime == timetaken_drone1) | (traveltime == round((timetaken_drone1+0.01),2))|(traveltime == round((timetaken_drone1-0.01),2)):
return 1.0
elif (traveltime == timetaken_drone2) | (traveltime == round((timetaken_drone2+0.01),2))|(traveltime == round((timetaken_drone2-0.01),2)):
return 2.0
else:
return 3.0
# -
sample_df['new']=sample_df.apply(lambda x: correct_dronetype(x['Travel Time'],x['timetaken_drone1'],x['timetaken_drone2'],x['timetaken_drone3']),axis=1)
sample_df.drop(['Drone Type','timetaken_drone1'], axis=1, inplace=True)
sample_df.rename(columns={'Travel Time':'timetaken_drone1','new':'Drone Type'}, inplace=True)
valid_drone1.update(sample_df)
valid_drone1.drop(['Travel Time','timetaken_drone2','timetaken_drone3'], axis=1, inplace=True)
valid_drone1.rename(columns={'timetaken_drone1':'Travel Time'}, inplace=True)
# ### 12 -2 : Drone type=2
print("Outliers present in Drone type 2=",len(valid_drone2[valid_drone2['Travel Time'] < valid_drone2['timetaken_drone2']]))
# +
sample_df2=valid_drone2[valid_drone2['Travel Time'] < valid_drone2['timetaken_drone2']]
sample_df2['new']=sample_df2.apply(lambda x: correct_dronetype(x['Travel Time'],x['timetaken_drone1'],x['timetaken_drone2'],x['timetaken_drone3']),axis=1)
sample_df2.drop(['Drone Type','timetaken_drone1'], axis=1, inplace=True)
sample_df2.rename(columns={'Travel Time':'timetaken_drone1','new':'Drone Type'}, inplace=True)
# -
valid_drone2.update(sample_df)
valid_drone2.drop(['Travel Time','timetaken_drone1','timetaken_drone3'], axis=1, inplace=True)
valid_drone2.rename(columns={'timetaken_drone2':'Travel Time'}, inplace=True)
# ### 12 -3 : Drone type=3
# #### There are no errors in Drone type 3, hence going ahead with concatinating all 3 dfs
#
# ------------------------------------------------
valid_drone3.drop(['Travel Time','timetaken_drone1','timetaken_drone2'], axis=1, inplace=True)
valid_drone3.rename(columns={'timetaken_drone3':'Travel Time'}, inplace=True)
# mapping all 3 dfs
test1_df=pd.concat([valid_drone1,valid_drone2,valid_drone3])
# Updating the main df
test_df.update(test1_df)
# ### 12-4 Fixing null values in Drone type
#
# Below step confirms total 10 nulls are present for Drone Type
#
# Approach:
#
# * Find the 3 travel time, for respective drone type
# * Identify which given travel time is equal to calculated drone time
# * fill the Drone type
#
#
print("Total null values in Drone Type",len(test_df[test_df['Drone Type'].isnull()]))
# +
# Finding the 3 travel time, for respective droen type
sample_nulldrone_df=test_df[test_df['Drone Type'].isnull()]
sample_nulldrone_df['timetaken_drone1']=test_df.apply(lambda x: calculate_time(x['Journey Distance'],beta1,speed1),axis=1)
sample_nulldrone_df['timetaken_drone2']=test_df.apply(lambda x: calculate_time(x['Journey Distance'],beta2,speed2),axis=1)
sample_nulldrone_df['timetaken_drone3']=test_df.apply(lambda x: calculate_time(x['Journey Distance'],beta3,speed3),axis=1)
# Filling the correct drone type, by using the function mentioned in 12-1
sample_nulldrone_df['new']=sample_nulldrone_df.apply(lambda x: correct_dronetype(x['Travel Time'],x['timetaken_drone1'],x['timetaken_drone2'],x['timetaken_drone3']),axis=1)
sample_nulldrone_df.drop(['Drone Type','timetaken_drone1','timetaken_drone2','timetaken_drone3'], axis=1, inplace=True)
sample_nulldrone_df.rename(columns={'new':'Drone Type'}, inplace=True)
# -
test_df.update(sample_nulldrone_df)
# ----------------------------
# # 13- Delivery time
#
#
# ### Formula 'Delivery time' = Departure time + Time taken
# +
# Below steps calculates the correct Delivery time, based on Departure time and Travel time
import time
# below required to convert inton date format
test_df['new_deptime'] = pd.to_timedelta(test_df['Departure Time']) / np.timedelta64(1, 'm')
#below calculates the delivery time
test_df['new_deltime'] = test_df['new_deptime']+test_df['Travel Time']
test_df['new_deltime'] = test_df['new_deltime']*60
test_df['calculated_deliverytime']=test_df['new_deltime'].apply(lambda x: time.strftime("%H:%M:%S",time.gmtime(x)))
# Dropping unwanted columns and saving the final dataframe
test_df.drop(['new_deptime','new_deltime','Delivery Time'], axis=1, inplace=True)
test_df.rename(columns={'calculated_deliverytime':'Delivery Time'}, inplace=True)
# -
# ---------------------------
# # 14- Delivery Price
#
# Approach : Delivery Price can be predicted based on linear modelling. Before that, final data is analyzed for outliers and post cleaning, price is predicted
#
# -----
# ## 14-1 Divding the Departure time into intervals
#
# This step is required to analyze its affects on linear model prediction
# +
# Below method is used to divide the Departure time into intervals of 1-morning 2-afternoon 3-night.
# Range is morning (6:00:00 - 11:59:59), afternoon(12:00:00 - 20:59:59), and night (21:00 - 5:59:59)
def interval(data):
data= data.split(':')
hour=int(data[0])
time_interval = [1,2,3] # 1-morning 2-afternoon 3-night
if 6 <= hour < 12:
return time_interval[0]
elif 12 <= hour < 21:
return time_interval[1]
else:
return time_interval[2]
test_df['interval']=test_df.apply(lambda x: interval(x['Departure Time']),axis=1)
# -
# --------
#
# ## 14-2 EDA on Delivery price
# +
# Below graph plots all features against Delivery fare
sns.pairplot(test_df,x_vars=['Drone Type','Post Type','Package Weight','Origin Region','Destination Region','Journey Distance','interval'],y_vars='Delivery Fare',kind='reg')
# -
# ##### Drone type seem to have outliers and needs treatment
# ##### Detection of outliers on Drone type
# +
# Below plots the box plot Drone type against Delivery fare
plt.rcParams['figure.figsize']=15,10
xi=test_df[['Delivery Fare','Drone Type']]
sns.boxplot(data=xi,x='Drone Type',y='Delivery Fare')
# -
# #### Analyis: Above confirms, there are outliers drone type versus delivery fare and hence needs removal of these outliers in order to proceed with the delivery fare prediction
#
# -------------
# ### 14-3 Clean outliers in Drone type agaisnt Delivery fare
# +
### This step calculated the outliers and clean them
drone=list(range(1,4))
df_drone_deliveryfare=test_df[['Delivery Fare','Drone Type']]
#### Below method identifies the outliers by verfying the IQR range and filters them into cleaned and outlier data
def clean_outlier(data_df):
df_clean = pd.DataFrame()
df_outlier = pd.DataFrame()
for each in drone:
drone_df=data_df[data_df.iloc[:,-1]==[each]]
Q1 = drone_df.quantile(0.25)
Q3 = drone_df.quantile(0.75)
IQR = Q3 - Q1
clean=drone_df[~((drone_df < (Q1 - 1.5 * IQR)) |(drone_df > (Q3 + 1.5 * IQR))).any(axis=1)]
df_clean=pd.concat([df_clean,clean])
outlier=drone_df[((drone_df < (Q1 - 1.5 * IQR)) |(drone_df > (Q3 + 1.5 * IQR))).any(axis=1)]
df_outlier=pd.concat([df_outlier,outlier])
return(df_clean,df_outlier)
##### Below variables contain the cleaned and outlier data
cleaned_data,outlier_data=clean_outlier(df_drone_deliveryfare)
# +
# This step is required to map the cleaned data with the main data. Result is clean dataframe is extracted
clean_df = test_df.index.isin(cleaned_data.index)
clean_df=test_df[clean_df]
# +
# Plotting and verfying for outlier removal
plt.rcParams['figure.figsize']=15,10
xi=clean_df[['Delivery Fare','Drone Type']]
sns.boxplot(data=xi,x='Drone Type',y='Delivery Fare')
# -
print(" {} rows were present before outlier removal, and {} rows present after outlier removal".format (len(test_df),len(clean_df)))
# -------
# ### 14-4 Linear Modelling to predict Delivery Price
#
# #### 14-4-a Dividing the Delivery Price data into train and test
# +
#Taking all numerical features
lr_df=clean_df[['Drone Type','Post Type','Package Weight','Journey Distance','interval','Delivery Fare']]
lr_df.head()
# +
# Dividing the Delivery price data into train and test
#### Train contains all the not null values of delivery price
#### Test contains all the null values of delivery price
train_fare_df=lr_df[lr_df['Delivery Fare'].notnull()]
test_fare_df=lr_df[lr_df['Delivery Fare'].isnull()]
# -
# #### 14-4-b Finding correlation
# +
# Plotting the correlation and analysis step
corr = train_fare_df.corr()
corr.style.background_gradient()
# -
# #### Analysis:
# 1) Drone Type & Delivery Fare are highly correlated
# 2) Post Type,interval & Delivery Fare are correlated at medium level
# 3) Package weight,Journey Distance & Delivery Fare ar correlated at low level
# 4) Origin Region, Destination Region very less correlation, <font color='red'>hence can be removed for modelling <font>
#
# #### 14-4-c Further analysis on Price-Train data & Model Evaluation
# +
#Splitting the data on price training data.
## This step is required to evaluate model
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(train_fare_df.iloc[:,:-1],train_fare_df.iloc[:,-1:], random_state = 111)
# +
from sklearn.linear_model import LinearRegression
# Linear model is applied on X_train and y_train
lm_full = LinearRegression() # object of Linear Regression
lm_full.fit(X_train,y_train)
# +
# Predicting the price based on X_test
y_pred=lm_full.predict(X_test)
# +
# Model Evaluation
# Finding R-Square
print ('R-squared for this model = ',lm_full.score(X_test,y_test))
# Finding RMSE
print('\nRMSE of the model=',np.sqrt(mean_squared_error(y_pred,y_test)))
# -
plt.scatter(y_pred,y_test)
plt.xlabel('y_pred')
plt.ylabel('y_test')
plt.title('Linear Fit Graph')
# ##### The relation between given and predicted fare is linear in relation & R-squared = 0.99 and RMSE = 3.347. Based on above inference, I am concluding that model is good for predicting
#
# ------------
#
# #### 14-4-d Predicting Delivery Price
#
# The final model is run on predicting the unknown values of Delivery Price
# +
# All the null values in test_fare_df is predicted
test_fare_df['Delivery Fare'][test_fare_df['Delivery Fare'].isnull()] = lm_full.predict(test_fare_df.drop(['Delivery Fare'],axis=1))
test_fare_df['Delivery Fare']=round(test_fare_df['Delivery Fare'],2)
# +
# Updating the main dataframe
test_df['Delivery Fare'][test_df['Delivery Fare'].isnull()] = test_fare_df['Delivery Fare']
test_df.drop(['interval'], axis=1, inplace=True) # dropping the interval column
# +
#Verifying for any duplicate values.
test_df[test_df.duplicated]
#This confirms no duplicates values are present.
# -
# ----------
#
# # 15 - Saving to csv file
# +
# saving to csv file
test_df.to_csv('29389690_solution.csv', encoding='utf-8', index=False)
# -
test_df[test_df['Id']=='ID1625703837']
# # 16 - Conclusion:
#
# * 36987 rows are the final cleaned data out of total 37057 rows.
# * Origin_Latitude: Total number of errors fixed: 20,
# * Destination_Latitude: Total number of errors fixed: 20,
# * Destination_Longitude: Total number of errors fixed 20,
# * Post_Type: Total number of errors fixed: 20,
# * Distance: Total number of errors: fixed: 14,
# * Both Origin and Destination regions are fixed, based on KNN model
# * Origin_Region: Total number of errors fixed: 30,
# * Destination_Region: Total number of errors fixed: 30,
# * Departure_Date: Total number of errors fixed: 220,
# * Drone_Type: Total number of errors fixed: 36,
# * Travel_Time: Total number of errors fixed: 40,
# * Fare: Total number of errors fixed: 30
# * Duplicated rows and Id=nulls are dropped
# * Null_Id: Total number of errors fixed: 23
# * Departure_Time: Total number of errors fixed: 40,
# * Finally, Delivery Fare is calculated based on Linear model
#
# # 17- References
# * 5 - https://codereview.stackexchange.com/questions/200176/cleaning-up-date-strings-in-python
# * 5-2 - https://stackoverflow.com/questions/16870663/how-do-i-validate-a-date-string-format-in-python
# * 8 - https://stackoverflow.com/questions/19412462/getting-distance-between-two-points-based-on-latitude-longitude
# * 9-2,10-2 - https://towardsdatascience.com/ways-to-detect-and-remove-the-outliers-404d16608dba
# * 9-3 - https://www.analyticsvidhya.com/blog/2018/08/k-nearest-neighbor-introduction-regression-python/
#
#
| Wrangling-Data-Cleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Uso básico de cuadernos Jupyter.
# # Fundamentos de Python.
# # Tipos básicos de Python: cadenas, enteros y flotantes.
# # Contenedores de Python: variables, listas, tuplas, conjuntos y diccionarios.
# # Fechas en Python.
# # Métodos básicos de cadenas, números, fechas, listas y diccionarios.
| basico/Fundamentos de Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## AIF module demo
# ### Import modules
# + tags=[]
import sys
import matplotlib.pyplot as plt
import numpy as np
sys.path.append('..')
# %load_ext autoreload
# %autoreload 2
# -
# ### Classic Parker AIF
# Create a Parker AIF object. This can be used to return arterial plasma Gd concentration for any time points.
# +
import aifs
# Create the AIF object
aif = aifs.parker(hct=0.42)
# Plot concentration for specific times
t = np.linspace(0.,100.,1000)
c_ap = aif.c_ap(t)
plt.plot(t, c_ap)
plt.xlabel('time (s)')
plt.ylabel('concentration (mM)')
plt.title('Classic Parker');
# -
# ### Patient-specific AIF
# Create an individual AIF object based on a series of time-concentration data points.
# The object can then be used to generate concentrations at arbitrary times.
# +
# define concentration-time measurements
t = np.array([19.810000,59.430000,99.050000,138.670000,178.290000,217.910000,257.530000,297.150000,336.770000,376.390000,416.010000,455.630000,495.250000,534.870000,574.490000,614.110000,653.730000,693.350000,732.970000,772.590000,812.210000,851.830000,891.450000,931.070000,970.690000,1010.310000,1049.930000,1089.550000,1129.170000,1168.790000,1208.410000,1248.030000])
c_p = np.array([-0.004937,0.002523,0.002364,0.005698,0.264946,0.738344,1.289008,1.826013,1.919158,1.720187,1.636699,1.423867,1.368308,1.263610,1.190378,1.132603,1.056400,1.066964,1.025331,1.015179,0.965908,0.928219,0.919029,0.892000,0.909929,0.865766,0.857195,0.831985,0.823747,0.815591,0.776007,0.783767])
# create AIF object from measurements
aif = aifs.patient_specific(t, c_p)
# get AIF conc at original temporal resolution
c_p_lowres = aif.c_ap(t)
# get (interpolated) AIF conc at higher temporal resolution
t_highres = np.linspace(0., max(t), 200) # required time points
c_p_highres = aif.c_ap(t_highres)
plt.plot(t, c_p, 'o', label='original')
plt.plot(t, c_p_lowres, 'x', label='low res')
plt.plot(t_highres, c_p_highres, '-', label='high res')
plt.legend()
plt.xlabel('time (s)')
plt.ylabel('concentration (mM)')
plt.title('Individual AIF');
| src/original/MJT_UoEdinburghUK/demo/demo_aif.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="SzKwuqYESWwm"
# ##### Copyright 2021 The Cirq Developers
# + cellView="form" id="4yPUsdJxSXFq"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="zC1qlUJoSXhm"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://quantumai.google/cirq/qcvv/xeb_theory>"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/qcvv/xeb_theory.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/quantumlib/Cirq/blob/master/docs/qcvv/xeb_theory.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/Cirq/docs/qcvv/xeb_theory.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a>
# </td>
# </table>
# + id="bd9529db1c0b"
try:
import cirq
except ImportError:
print("installing cirq...")
# !pip install --quiet cirq
print("installed cirq.")
# + [markdown] id="07034e5e3982"
# # Cross Entropy Benchmarking Theory
#
# Cross entropy benchmarking uses the properties of random quantum programs to determine the fidelity of a wide variety of circuits. When applied to circuits with many qubits, XEB can characterize the performance of a large device. When applied to deep, two-qubit circuits it can be used to accurately characterize a two-qubit interaction potentially leading to better calibration.
# + id="1348715511ca"
# Standard imports
import numpy as np
import cirq
from cirq.contrib.svg import SVGCircuit
# + [markdown] id="26129d0ff1c0"
# ## The action of random circuits with noise
# An XEB experiment collects data from the execution of random circuits
# subject to noise. The effect of applying a random circuit with unitary $U$ is
# modeled as $U$ followed by a depolarizing channel. The result is that the
# initial state $|𝜓⟩$ is mapped to a density matrix $ρ_U$ as follows:
#
# $$
# |𝜓⟩ → ρ_U = f |𝜓_U⟩⟨𝜓_U| + (1 - f) I / D
# $$
#
# where $|𝜓_U⟩ = U|𝜓⟩$, $D$ is the dimension of the Hilbert space, $I / D$ is the
# maximally mixed state, and $f$ is the fidelity with which the circuit is
# applied.
#
# For this model to be accurate, we require $U$ to be a random circuit that scrambles errors. In practice, we use a particular circuit ansatz consisting of random single-qubit rotations interleaved with entangling gates.
# + [markdown] id="d940bfde9209"
# ### Possible single-qubit rotations
# These 8*8 possible rotations are chosen randomly when constructing the circuit.
#
# Geometrically, we choose 8 axes in the XY plane to perform a quarter-turn (pi/2 rotation) around. This is followed by a rotation around the Z axis of 8 different magnitudes.
# + id="bb896019c42a"
exponents = np.linspace(0, 7/4, 8)
exponents
# + id="81e2ce9562a5"
import itertools
SINGLE_QUBIT_GATES = [
cirq.PhasedXZGate(x_exponent=0.5, z_exponent=z, axis_phase_exponent=a)
for a, z in itertools.product(exponents, repeat=2)
]
SINGLE_QUBIT_GATES[:10], '...'
# + [markdown] id="72ff411420ef"
# ### Random circuit
#
# We use `random_rotations_between_two_qubit_circuit` to generate a random two-qubit circuit. Note that we provide the possible single-qubit rotations from above and declare that our two-qubit operation is the $\sqrt{i\mathrm{SWAP}}$ gate.
# + id="50f3e9622ff8"
import cirq_google as cg
from cirq.experiments import random_quantum_circuit_generation as rqcg
q0, q1 = cirq.LineQubit.range(2)
circuit = rqcg.random_rotations_between_two_qubit_circuit(
q0, q1,
depth=4,
two_qubit_op_factory=lambda a, b, _: cirq.SQRT_ISWAP(a, b),
single_qubit_gates=SINGLE_QUBIT_GATES
)
SVGCircuit(circuit)
# + [markdown] id="b422486e30c9"
# ## Estimating fidelity
#
# Let $O_U$ be an observable that is diagonal in the computational
# basis. Then the expectation value of $O_U$ on $ρ_U$ is given by
#
# $$
# Tr(ρ_U O_U) = f ⟨𝜓_U|O_U|𝜓_U⟩ + (1 - f) Tr(O_U / D).
# $$
#
# This equation shows how $f$ can be estimated, since $Tr(ρ_U O_U)$ can be
# estimated from experimental data, and $⟨𝜓_U|O_U|𝜓_U⟩$ and $Tr(O_U / D)$ can be
# computed.
#
# Let $e_U = ⟨𝜓_U|O_U|𝜓_U⟩$, $u_U = Tr(O_U / D)$, and $m_U$ denote the experimental
# estimate of $Tr(ρ_U O_U)$. We can write the following linear equation (equivalent to the
# expression above):
#
# $$
# m_U = f e_U + (1-f) u_U \\
# m_U - u_U = f (e_U - u_U)
# $$
# + id="1cef06bfac12"
# Make long circuits (which we will truncate)
MAX_DEPTH = 100
N_CIRCUITS = 10
circuits = [
rqcg.random_rotations_between_two_qubit_circuit(
q0, q1,
depth=MAX_DEPTH,
two_qubit_op_factory=lambda a, b, _: cirq.SQRT_ISWAP(a, b),
single_qubit_gates=SINGLE_QUBIT_GATES)
for _ in range(N_CIRCUITS)
]
# + id="9bd38c9d20c8"
# We will truncate to these lengths
cycle_depths = np.arange(1, MAX_DEPTH + 1, 9)
cycle_depths
# + [markdown] id="b573f20ea0d2"
# ### Execute circuits
# Cross entropy benchmarking requires sampled bitstrings from the device being benchmarked *as well as* the true probabilities from a noiseless simulation. We find these quantities for all `(cycle_depth, circuit)` permutations.
# + id="de9e2414d46f"
pure_sim = cirq.Simulator()
# Pauli Error. If there is an error, it is either X, Y, or Z
# with probability E_PAULI / 3
E_PAULI = 5e-3
noisy_sim = cirq.DensityMatrixSimulator(noise=cirq.depolarize(E_PAULI))
# These two qubit circuits have 2^2 = 4 probabilities
DIM = 4
records = []
for cycle_depth in cycle_depths:
for circuit_i, circuit in enumerate(circuits):
# Truncate the long circuit to the requested cycle_depth
circuit_depth = cycle_depth * 2 + 1
assert circuit_depth <= len(circuit)
trunc_circuit = circuit[:circuit_depth]
# Pure-state simulation
psi = pure_sim.simulate(trunc_circuit)
psi = psi.final_state_vector
pure_probs = np.abs(psi)**2
# Noisy execution
meas_circuit = trunc_circuit + cirq.measure(q0, q1)
sampled_inds = noisy_sim.sample(meas_circuit, repetitions=10_000).values[:,0]
sampled_probs = np.bincount(sampled_inds, minlength=DIM) / len(sampled_inds)
# Save the results
records += [{
'circuit_i': circuit_i,
'cycle_depth': cycle_depth,
'circuit_depth': circuit_depth,
'pure_probs': pure_probs,
'sampled_probs': sampled_probs,
}]
print('.', end='', flush=True)
# + [markdown] id="9902c82e0ff3"
# ## What's the observable
#
# What is $O_U$? Let's define it to be the observable that gives the sum of all probabilities, i.e.
#
# $$
# O_U |x \rangle = p(x) |x \rangle
# $$
#
# for any bitstring $x$. We can use this to derive expressions for our quantities of interest.
#
# $$
# e_U = \langle \psi_U | O_U | \psi_U \rangle \\
# = \sum_x a_x^* \langle x | O_U | x \rangle a_x \\
# = \sum_x p(x) \langle x | O_U | x \rangle \\
# = \sum_x p(x) p(x)
# $$
#
# $e_U$ is simply the sum of squared ideal probabilities. $u_U$ is a normalizing factor that only depends on the operator. Since this operator has the true probabilities in the definition, they show up here anyways.
#
# $$
# u_U = \mathrm{Tr}[O_U / D] \\
# = 1/D \sum_x \langle x | O_U | x \rangle \\
# = 1/D \sum_x p(x)
# $$
#
# For the measured values, we use the definition of an expectation value
# $$
# \langle f(x) \rangle_\rho = \sum_x p(x) f(x)
# $$
# It becomes notationally confusing because remember: our operator on basis states returns the ideal probability of that basis state $p(x)$. The probability of observing a measured basis state is estimated from samples and denoted $p_\mathrm{est}(x)$ here.
#
# $$
# m_U = \mathrm{Tr}[\rho_U O_U] \\
# = \langle O_U \rangle_{\rho_U} = \sum_{x} p_\mathrm{est}(x) p(x)
# $$
# + id="9770fc8cf5ba"
for record in records:
e_u = np.sum(record['pure_probs']**2)
u_u = np.sum(record['pure_probs']) / DIM
m_u = np.sum(record['pure_probs'] * record['sampled_probs'])
record.update(
e_u=e_u,
u_u=u_u,
m_u=m_u,
)
# + [markdown] id="e139a1abca2b"
# Remember:
#
# $$
# m_U - u_U = f (e_U - u_U)
# $$
#
# We estimate f by performing least squares
# minimization of the sum of squared residuals
#
# $$
# \sum_U \left(f (e_U - u_U) - (m_U - u_U)\right)^2
# $$
#
# over different random circuits. The solution to the
# least squares problem is given by
#
# $$
# f = (∑_U (m_U - u_U) * (e_U - u_U)) / (∑_U (e_U - u_U)^2)
# $$
# + id="2698b1ce5218"
import pandas as pd
df = pd.DataFrame(records)
df['y'] = df['m_u'] - df['u_u']
df['x'] = df['e_u'] - df['u_u']
df['numerator'] = df['x'] * df['y']
df['denominator'] = df['x'] ** 2
df.head()
# + [markdown] id="f526271c8364"
# ### Fit
#
# We'll plot the linear relationship and least-squares fit while we transform the raw DataFrame into one containing fidelities.
# + id="705fe27d592f"
# %matplotlib inline
from matplotlib import pyplot as plt
# Color by cycle depth
import seaborn as sns
colors = sns.cubehelix_palette(n_colors=len(cycle_depths))
colors = {k: colors[i] for i, k in enumerate(cycle_depths)}
_lines = []
def per_cycle_depth(df):
fid_lsq = df['numerator'].sum() / df['denominator'].sum()
cycle_depth = df.name
xx = np.linspace(0, df['x'].max())
l, = plt.plot(xx, fid_lsq*xx, color=colors[cycle_depth])
plt.scatter(df['x'], df['y'], color=colors[cycle_depth])
global _lines
_lines += [l] # for legend
return pd.Series({'fidelity': fid_lsq})
fids = df.groupby('cycle_depth').apply(per_cycle_depth).reset_index()
plt.xlabel(r'$e_U - u_U$', fontsize=18)
plt.ylabel(r'$m_U - u_U$', fontsize=18)
_lines = np.asarray(_lines)
plt.legend(_lines[[0,-1]], cycle_depths[[0,-1]], loc='best', title='Cycle depth')
plt.tight_layout()
# + [markdown] id="9703fbf361fd"
# ### Fidelities
# + id="dcb216997aeb"
plt.plot(
fids['cycle_depth'],
fids['fidelity'],
marker='o',
label='Least Squares')
xx = np.linspace(0, fids['cycle_depth'].max())
# In XEB, we extract the depolarizing fidelity, which is
# related to (but not equal to) the Pauli error.
# For the latter, an error involves doing X, Y, or Z with E_PAULI/3
# but for the former, an error involves doing I, X, Y, or Z with e_depol/4
e_depol = E_PAULI / (1 - 1/DIM**2)
# The additional factor of four in the exponent is because each layer
# involves two moments of two qubits (so each layer has four applications
# of a single-qubit single-moment depolarizing channel).
plt.plot(xx, (1-e_depol)**(4*xx), label=r'$(1-\mathrm{e\_depol})^{4d}$')
plt.ylabel('Circuit fidelity', fontsize=18)
plt.xlabel('Cycle Depth $d$', fontsize=18)
plt.legend(loc='best')
plt.yscale('log')
plt.tight_layout()
# + id="e931726da2af"
from cirq.experiments.xeb_fitting import fit_exponential_decays
# Ordinarily, we'd use this function to fit curves for multiple pairs.
# We add our qubit pair as a column.
fids['pair'] = [(q0, q1)] * len(fids)
fit_df = fit_exponential_decays(fids)
fit_row = fit_df.iloc[0]
print(f"Noise model fidelity: {(1-e_depol)**4:.3e}")
print(f"XEB layer fidelity: {fit_row['layer_fid']:.3e} +- {fit_row['layer_fid_std']:.2e}")
| docs/qcvv/xeb_theory.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df = pd.read_csv('ACS_16_5YR_B04006.csv', encoding = 'Latin-1')
df.head()
df['STATE'] = df['GEO.display-label'].str.extract('.+,(.+)')
df['STATE'] = df.STATE.str.strip()
df = df.groupby('STATE').sum().reset_index().drop(columns = 'GEO.id2')
df.to_csv('acs_fixed.csv')
df.head()
df.head()
df.dtypes
| more mapping/ancestry/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Summary ###
# This assignment uses deep learning (neural networks) to predict BitCoin prices using an LSTM model
# ### Which model has a lower loss? ###
LSTM Stock Predictor Using Closing Prices had a loss: 0.0245 and was less volatile
# ### Which model tracks the actual values better over time? ###
The FNG loss definitely track actual values better over time but the loss was greater - The FNG loss: 0.1013
# ### Which window size works best for the model? ###
I found that using a window size of 10 was .
| Summary.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Training on Cloud AI Platform
#
# **Learning Objectives**
# - Use CAIP to run a distributed training job
#
# ## Introduction
# After having testing our training pipeline both locally and in the cloud on a susbset of the data, we can submit another (much larger) training job to the cloud. It is also a good idea to run a hyperparameter tuning job to make sure we have optimized the hyperparameters of our model.
#
# This notebook illustrates how to do distributed training and hyperparameter tuning on Cloud AI Platform.
#
# To start, we'll set up our environment variables as before.
PROJECT = "qwiklabs-gcp-00-34ffb0f0dc65" # Replace with your PROJECT
BUCKET = "cloud-training-bucket" # Replace with your BUCKET
REGION = "us-central1" # Choose an available region for Cloud AI Platform
TFVERSION = "1.14" # TF version for CAIP to use
import os
os.environ["BUCKET"] = BUCKET
os.environ["PROJECT"] = PROJECT
os.environ["REGION"] = REGION
os.environ["TFVERSION"] = TFVERSION
# + language="bash"
# gcloud config set project $PROJECT
# gcloud config set compute/region $REGION
# -
# Next, we'll look for the preprocessed data for the babyweight model and copy it over if it's not there.
# + language="bash"
# if ! gsutil ls -r gs://$BUCKET | grep -q gs://$BUCKET/babyweight/preproc; then
# gsutil mb -l ${REGION} gs://${BUCKET}
# # copy canonical set of preprocessed files if you didn't do previous notebook
# gsutil -m cp -R gs://cloud-training-demos/babyweight gs://${BUCKET}
# fi
# + language="bash"
# gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
# -
# In the previous labs we developed our TensorFlow model and got it working on a subset of the data. Now we can package the TensorFlow code up as a Python module and train it on Cloud AI Platform.
#
# ## Train on Cloud AI Platform
#
# Training on Cloud AI Platform requires two things:
# - Configuring our code as a Python package
# - Using gcloud to submit the training code to Cloud AI Platform
#
# ### Move code into a Python package
#
# A Python package is simply a collection of one or more `.py` files along with an `__init__.py` file to identify the containing directory as a package. The `__init__.py` sometimes contains initialization code but for our purposes an empty file suffices.
#
# The bash command `touch` creates an empty file in the specified location, the directory `babyweight` should already exist.
# + language="bash"
# touch babyweight/trainer/__init__.py
# -
# We then use the `%%writefile` magic to write the contents of the cell below to a file called `task.py` in the `babyweight/trainer` folder.
# #### **Exercise 1**
#
# The cell below write the file `babyweight/trainer/task.py` which sets up our training job. Here is where we determine which parameters of our model to pass as flags during training using the `parser` module. Look at how `batch_size` is passed to the model in the code below. Use this as an example to parse arguements for the following variables
# - `nnsize` which represents the hidden layer sizes to use for DNN feature columns
# - `nembeds` which represents the embedding size of a cross of n key real-valued parameters
# - `train_examples` which represents the number of examples (in thousands) to run the training job
# - `eval_steps` which represents the positive number of steps for which to evaluate model
# - `pattern` which specifies a pattern that has to be in input files. For example '00001-of' would process only one shard. For this variable, set 'of' to be the default.
#
# Be sure to include a default value for the parsed arguments above and specfy the `type` if necessary.
# +
# %%writefile babyweight/trainer/task.py
import argparse
import json
import os
import tensorflow as tf
from . import model
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--bucket",
help="GCS path to data. We assume that data is in \
gs://BUCKET/babyweight/preproc/",
required=True
)
parser.add_argument(
"--output_dir",
help="GCS location to write checkpoints and export models",
required=True
)
parser.add_argument(
"--batch_size",
help="Number of examples to compute gradient over.",
type=int,
default=512
)
parser.add_argument(
"--job-dir",
help="this model ignores this field, but it is required by gcloud",
default="junk"
)
# TODO: Your code goes here
# TODO: Your code goes here
# TODO: Your code goes here
# TODO: Your code goes here
# TODO: Your code goes here
# Parse arguments
args = parser.parse_args()
arguments = args.__dict__
# Pop unnecessary args needed for gcloud
arguments.pop("job-dir", None)
# Assign the arguments to the model variables
output_dir = arguments.pop("output_dir")
model.BUCKET = arguments.pop("bucket")
model.BATCH_SIZE = arguments.pop("batch_size")
model.TRAIN_STEPS = (
arguments.pop("train_examples") * 1000) / model.BATCH_SIZE
model.EVAL_STEPS = arguments.pop("eval_steps")
print ("Will train for {} steps using batch_size={}".format(
model.TRAIN_STEPS, model.BATCH_SIZE))
model.PATTERN = arguments.pop("pattern")
model.NEMBEDS = arguments.pop("nembeds")
model.NNSIZE = arguments.pop("nnsize")
print ("Will use DNN size of {}".format(model.NNSIZE))
# Append trial_id to path if we are doing hptuning
# This code can be removed if you are not using hyperparameter tuning
output_dir = os.path.join(
output_dir,
json.loads(
os.environ.get("TF_CONFIG", "{}")
).get("task", {}).get("trial", "")
)
# Run the training job
model.train_and_evaluate(output_dir)
# -
# In the same way we can write to the file `model.py` the model that we developed in the previous notebooks.
#
# #### **Exercise 2**
#
# Complete the TODOs in the code cell below to create out `model.py`. We'll use the code we wrote for the Wide & Deep model. Look back at your `3_tensorflow_wide_deep` notebook and copy/paste the necessary code from that notebook into its place in the cell below.
# +
# %%writefile babyweight/trainer/model.py
import shutil
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
BUCKET = None # set from task.py
PATTERN = "of" # gets all files
# Determine CSV and label columns
# TODO: Your code goes here
# Set default values for each CSV column
# TODO: Your code goes here
# Define some hyperparameters
TRAIN_STEPS = 10000
EVAL_STEPS = None
BATCH_SIZE = 512
NEMBEDS = 3
NNSIZE = [64, 16, 4]
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(prefix, mode, batch_size):
def _input_fn():
def decode_csv(value_column):
# TODO: Your code goes here
# Use prefix to create file path
file_path = "gs://{}/babyweight/preproc/{}*{}*".format(
BUCKET, prefix, PATTERN)
# Create list of files that match pattern
file_list = tf.gfile.Glob(filename=file_path)
# Create dataset from file list
# TODO: Your code goes here
# In training mode, shuffle the dataset and repeat indefinitely
# TODO: Your code goes here
dataset = # TODO: Your code goes here
# This will now return batches of features, label
return dataset
return _input_fn
# Define feature columns
def get_wide_deep():
# TODO: Your code goes here
return wide, deep
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
# TODO: Your code goes here
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=feature_placeholders)
# create metric for hyperparameter tuning
def my_rmse(labels, predictions):
pred_values = predictions["predictions"]
return {"rmse": tf.metrics.root_mean_squared_error(
labels=labels, predictions=pred_values)}
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
# TODO: Your code goes here
# -
# ## Train locally
#
# After moving the code to a package, make sure it works as a standalone. Note, we incorporated the `--pattern` and `--train_examples` flags so that we don't try to train on the entire dataset while we are developing our pipeline. Once we are sure that everything is working on a subset, we can change the pattern so that we can train on all the data. Even for this subset, this takes about *3 minutes* in which you won't see any output ...
# #### **Exercise 3**
#
# Fill in the missing code in the TODOs below so that we can run a very small training job over a single file (i.e. use the `pattern` equal to "00000-of-") with 1 train step and 1 eval step
# + language="bash"
# echo "bucket=${BUCKET}"
# rm -rf babyweight_trained
# export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight
# python -m trainer.task \
# --bucket= # TODO: Your code goes here
# --output_dir= # TODO: Your code goes here
# --job-dir=./tmp \
# --pattern= # TODO: Your code goes here
# --train_examples= # TODO: Your code goes here
# --eval_steps= # TODO: Your code goes here
# -
# ## Making predictions
#
# The JSON below represents an input into your prediction model. Write the input.json file below with the next cell, then run the prediction locally to assess whether it produces predictions correctly.
# %%writefile inputs.json
{"is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
# #### **Exercise 4**
#
# Finish the code in cell below to run a local prediction job on the `inputs.json` file we just created. You will need to provide two additional flags
# - one for `model-dir` specifying the location of the model binaries
# - one for `json-instances` specifying the location of the json file on which you want to predict
# + language="bash"
# MODEL_LOCATION=$(ls -d $(pwd)/babyweight_trained/export/exporter/* | tail -1)
# echo $MODEL_LOCATION
# gcloud ai-platform local predict # TODO: Your code goes here
# -
# ## Training on the Cloud with CAIP
#
# Once the code works in standalone mode, you can run it on Cloud AI Platform. Because this is on the entire dataset, it will take a while. The training run took about <b> an hour </b> for me. You can monitor the job from the GCP console in the Cloud AI Platform section.
# #### **Exercise 5**
#
# Look at the TODOs in the code cell below and fill in the missing information. Some of the required flags are already there for you. You will need to provide the rest.
# + language="bash"
# OUTDIR=gs://${BUCKET}/babyweight/trained_model
# JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
# echo $OUTDIR $REGION $JOBNAME
# gsutil -m rm -rf $OUTDIR
# gcloud ai-platform jobs submit training $JOBNAME \
# --region= # TODO: Your code goes here
# --module-name= # TODO: Your code goes here
# --package-path= # TODO: Your code goes here
# --job-dir= # TODO: Your code goes here
# --staging-bucket=gs://$BUCKET \
# --scale-tier= #TODO: Your code goes here
# --runtime-version= #TODO: Your code goes here
# -- \
# --bucket=${BUCKET} \
# --output_dir=${OUTDIR} \
# --train_examples=200000
# -
# When I ran it, I used train_examples=2000000. When training finished, I filtered in the Stackdriver log on the word "dict" and saw that the last line was:
# <pre>
# Saving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186
# </pre>
# The final RMSE was 1.03 pounds.
# <h2> Optional: Hyperparameter tuning </h2>
# <p>
# All of these are command-line parameters to my program. To do hyperparameter tuning, create hyperparam.xml and pass it as --configFile.
# This step will take <b>1 hour</b> -- you can increase maxParallelTrials or reduce maxTrials to get it done faster. Since maxParallelTrials is the number of initial seeds to start searching from, you don't want it to be too large; otherwise, all you have is a random search.
#
# #### **Exercise 6**
#
# We need to create a .yaml file to pass with our hyperparameter tuning job. Fill in the TODOs below for each of the parameters we want to include in our hyperparameter search.
# %writefile hyperparam.yaml
trainingInput:
scaleTier: STANDARD_1
hyperparameters:
hyperparameterMetricTag: rmse
goal: MINIMIZE
maxTrials: 20
maxParallelTrials: 5
enableTrialEarlyStopping: True
params:
- parameterName: batch_size
type: # TODO: Your code goes here
minValue: # TODO: Your code goes here
maxValue: # TODO: Your code goes here
scaleType: # TODO: Your code goes here
- parameterName: nembeds
type: # TODO: Your code goes here
minValue: # TODO: Your code goes here
maxValue: # TODO: Your code goes here
scaleType: # TODO: Your code goes here
- parameterName: nnsize
type: # TODO: Your code goes here
minValue: # TODO: Your code goes here
maxValue: # TODO: Your code goes here
scaleType: # TODO: Your code goes here
# + language="bash"
# OUTDIR=gs://${BUCKET}/babyweight/hyperparam
# JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
# echo $OUTDIR $REGION $JOBNAME
# gsutil -m rm -rf $OUTDIR
# gcloud ai-platform jobs submit training $JOBNAME \
# --region=$REGION \
# --module-name=trainer.task \
# --package-path=$(pwd)/babyweight/trainer \
# --job-dir=$OUTDIR \
# --staging-bucket=gs://$BUCKET \
# --scale-tier=STANDARD_1 \
# --config=hyperparam.yaml \
# --runtime-version=$TFVERSION \
# -- \
# --bucket=${BUCKET} \
# --output_dir=${OUTDIR} \
# --eval_steps=10 \
# --train_examples=20000
# -
# <h2> Repeat training </h2>
# <p>
# This time with tuned parameters (note last line)
# + language="bash"
# OUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned
# JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
# echo $OUTDIR $REGION $JOBNAME
# gsutil -m rm -rf $OUTDIR
# gcloud ai-platform jobs submit training $JOBNAME \
# --region=$REGION \
# --module-name=trainer.task \
# --package-path=$(pwd)/babyweight/trainer \
# --job-dir=$OUTDIR \
# --staging-bucket=gs://$BUCKET \
# --scale-tier=STANDARD_1 \
# --runtime-version=$TFVERSION \
# -- \
# --bucket=${BUCKET} \
# --output_dir=${OUTDIR} \
# --train_examples=20000 --batch_size=35 --nembeds=16 --nnsize=281
# -
# Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| courses/machine_learning/deepdive/05_review/labs/5_train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Sentiment Analysis
#
# ## Using XGBoost in SageMaker
#
# _Deep Learning Nanodegree Program | Deployment_
#
# 1. Batch Transform
# 2. Hyperparameter Tuning
# 3. Updating a Model
# ### Downloading the data
#
# Download the dataset from the paper. This dataset contains different movie review and its sentiment where 1 = Positive Review, and 0 = Negative Review.
#
# > <NAME>., et al. [Learning Word Vectors for Sentiment Analysis](http://ai.stanford.edu/~amaas/data/sentiment/). In _Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies_. Association for Computational Linguistics, 2011.
#
# Use magic commands for direct download from the web.
# %mkdir ../data
# !wget -O ../data/aclImdb_v1.tar.gz http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
# !tar -zxf ../data/aclImdb_v1.tar.gz -C ../data
# +
import os
import glob
# Read in the IMBD dataset
# Store the positive and negative review
# Separetly for the training and testing dataset.
def read_imdb_data(data_dir='../data/aclImdb'):
data = {}
labels = {}
# Store the train and test data separetly.
for data_type in ['train', 'test']:
data[data_type] = {}
labels[data_type] = {}
# Grouped by sentiments
for sentiment in ['pos', 'neg']:
data[data_type][sentiment] = []
labels[data_type][sentiment] = []
path = os.path.join(data_dir, data_type,
sentiment, '*.txt')
files = glob.glob(path)
for f in files:
with open(f) as review:
data[data_type][sentiment].append(review.read())
# Here we represent a positive review by '1' and a negative review by '0'
labels[data_type][sentiment].append(1 if sentiment == 'pos' else 0)
assert len(data[data_type][sentiment]) == len(labels[data_type][sentiment]), \
"{}/{} data size does not match labels size".format(data_type, sentiment)
return data, labels
# -
data, labels = read_imdb_data()
print("IMDB reviews: train = {} pos / {} neg, test = {} pos / {} neg".format(
len(data['train']['pos']), len(data['train']['neg']),
len(data['test']['pos']), len(data['test']['neg'])))
# +
from sklearn.utils import shuffle
def prepare_imdb_data(data, labels):
"""Prepare training and test sets from IMDb movie reviews."""
#Combine positive and negative reviews and labels
data_train = data['train']['pos'] + data['train']['neg']
data_test = data['test']['pos'] + data['test']['neg']
labels_train = labels['train']['pos'] + labels['train']['neg']
labels_test = labels['test']['pos'] + labels['test']['neg']
#Shuffle reviews and corresponding labels within training and test sets
data_train, labels_train = shuffle(data_train, labels_train)
data_test, labels_test = shuffle(data_test, labels_test)
# Return a unified training data, test data, training labels, test labets
return data_train, data_test, labels_train, labels_test
# -
train_X, test_X, train_y, test_y = prepare_imdb_data(data, labels)
print("IMDb reviews (combined): train = {}, test = {}".format(len(train_X), len(test_X)))
train_X[100]
# ### Processing the data
#
# Format the reviews so that we can process into vectors to put into our model.
import nltk
nltk.download("stopwords")
from nltk.corpus import stopwords
from nltk.stem.porter import *
stemmer = PorterStemmer()
# NLTK library allows to remove the stopwards and save the stems of the words
# +
import re
from bs4 import BeautifulSoup
def review_to_words(review):
text = BeautifulSoup(review, "html.parser").get_text() # Remove HTML tags
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # Convert to lower case
words = text.split() # Split string into words
words = [w for w in words if w not in stopwords.words("english")] # Remove stopwords
words = [PorterStemmer().stem(w) for w in words] # stem
return words
# +
import pickle
cache_dir = os.path.join("../cache", "sentiment_analysis") # where to store cache files
os.makedirs(cache_dir, exist_ok=True) # ensure cache directory exists
def preprocess_data(data_train, data_test, labels_train, labels_test,
cache_dir=cache_dir, cache_file="preprocessed_data.pkl"):
"""Convert each review to words; read from cache if available."""
# If cache_file is not None, try to read from it first
cache_data = None
if cache_file is not None:
try:
with open(os.path.join(cache_dir, cache_file), "rb") as f:
cache_data = pickle.load(f)
print("Read preprocessed data from cache file:", cache_file)
except:
pass # unable to read from cache, but that's okay
# If cache is missing, then do the heavy lifting
if cache_data is None:
words_train = [review_to_words(review) for review in data_train]
words_test = [review_to_words(review) for review in data_test]
# Write to cache file for future runs
if cache_file is not None:
cache_data = dict(words_train=words_train, words_test=words_test,
labels_train=labels_train, labels_test=labels_test)
with open(os.path.join(cache_dir, cache_file), "wb") as f:
pickle.dump(cache_data, f)
print("Wrote preprocessed data to cache file:", cache_file)
else:
# Unpack data loaded from cache file
words_train, words_test, labels_train, labels_test = (cache_data['words_train'],
cache_data['words_test'], cache_data['labels_train'], cache_data['labels_test'])
return words_train, words_test, labels_train, labels_test
# -
# Preprocess data
train_X, test_X, train_y, test_y = preprocess_data(train_X, test_X, train_y, test_y)
# ### Extract Bag-of-Words features
#
# For the model we will be implementing, rather than using the reviews directly, we are going to transform each review into a Bag-of-Words feature representation. Keep in mind that 'in the wild' we will only have access to the training set so our transformer can only use the training set to construct a representation.
# +
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.externals import joblib
# joblib is an enhanced version of pickle that is more efficient for storing NumPy arrays
def extract_BoW_features(words_train, words_test, vocabulary_size=5000,
cache_dir=cache_dir, cache_file="bow_features.pkl"):
"""Extract Bag-of-Words for a given set of documents, already preprocessed into words."""
# If cache_file is not None, try to read from it first
cache_data = None
if cache_file is not None:
try:
with open(os.path.join(cache_dir, cache_file), "rb") as f:
cache_data = joblib.load(f)
print("Read features from cache file:", cache_file)
except:
pass # unable to read from cache, but that's okay
# If cache is missing, then do the heavy lifting
if cache_data is None:
# Fit a vectorizer to training documents and use it to transform them
# NOTE: Training documents have already been preprocessed and tokenized into words;
# pass in dummy functions to skip those steps, e.g. preprocessor=lambda x: x
vectorizer = CountVectorizer(max_features=vocabulary_size,
preprocessor=lambda x: x, tokenizer=lambda x: x) # already preprocessed
features_train = vectorizer.fit_transform(words_train).toarray()
# Apply the same vectorizer to transform the test documents (ignore unknown words)
features_test = vectorizer.transform(words_test).toarray()
# NOTE: Remember to convert the features using .toarray() for a compact representation
# Write to cache file for future runs (store vocabulary as well)
if cache_file is not None:
vocabulary = vectorizer.vocabulary_
cache_data = dict(features_train=features_train, features_test=features_test,
vocabulary=vocabulary)
with open(os.path.join(cache_dir, cache_file), "wb") as f:
joblib.dump(cache_data, f)
print("Wrote features to cache file:", cache_file)
else:
# Unpack data loaded from cache file
features_train, features_test, vocabulary = (cache_data['features_train'],
cache_data['features_test'], cache_data['vocabulary'])
# Return both the extracted features as well as the vocabulary
return features_train, features_test, vocabulary
# -
# Extract Bag of Words features for both training and test datasets
train_X, test_X, vocabulary = extract_BoW_features(train_X, test_X)
# ### Classification using XGBoost
#
# Now that we have created the feature representation of our training (and testing) data, it is time to start setting up and using the XGBoost classifier provided by SageMaker.
#
# For SageMakers Batch Transform functionality, we must save the processed data onto Amazon S3
# +
import pandas as pd
val_X = pd.DataFrame(train_X[:10000])
train_X = pd.DataFrame(train_X[10000:])
val_y = pd.DataFrame(train_y[:10000])
train_y = pd.DataFrame(train_y[10000:])
# -
# The documentation for the XGBoost algorithm in SageMaker requires that the saved datasets should contain no headers or index and that for the training and validation data, the label should occur first for each sample.
#
# For more information about this and other algorithms, the SageMaker developer documentation can be found on __[Amazon's website.](https://docs.aws.amazon.com/sagemaker/latest/dg/)__
# First we make sure that the local directory in which we'd like to store the training and validation csv files exists.
data_dir = '../data/xgboost'
if not os.path.exists(data_dir):
os.makedirs(data_dir)
# First, save the test data to test.csv in the data_dir directory. Note that we do not save the associated ground truth
# labels, instead we will use them later to compare with our model output.
pd.DataFrame(test_X).to_csv(os.path.join(data_dir, 'test.csv'), header=False, index=False)
pd.concat([val_y, val_X], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False)
pd.concat([train_y, train_X], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False)
# To save a bit of memory we can set text_X, train_X, val_X, train_y and val_y to None.
test_X = train_X = val_X = train_y = val_y = None
# ### Uploading Training / Validation files to S3
#
# Amazon's S3 service allows us to store files that can be access by both the built-in training models such as the XGBoost model we will be using as well as custom models such as the one we will see a little later.
#
# Two different approaches to dealing with SageMaker
# 1. Low Level Functionality - flexbile functionality
# 2. High Level Functionality - quicker and intuitive
#
# Recall the method `upload_data()` which is a member of object representing our current SageMaker session. What this method does is upload the data to the default bucket (which is created if it does not exist) into the path described by the key_prefix variable.
#
# For additional resources, see the __[SageMaker API documentation](http://sagemaker.readthedocs.io/en/latest/)__ and in addition the __[SageMaker Developer Guide.](https://docs.aws.amazon.com/sagemaker/latest/dg/)__
# +
import sagemaker
session = sagemaker.Session() # Store the current SageMaker session
# S3 prefix (which folder will we use)
prefix = 'sentiment-xgboost'
test_location = session.upload_data(os.path.join(data_dir, 'test.csv'), key_prefix=prefix)
val_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix)
train_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix)
# -
# ### Creating the XGBoost model
#
# Now that the data has been uploaded it is time to create the XGBoost model. To begin with, we need to do some setup. At this point it is worth discussing what a model is in SageMaker. It is easiest to think of a model of comprising three different objects in the SageMaker ecosystem, which interact with one another.
#
# - Model Artifacts : The actual machine learning model with all parameters and necessary data
# - Training Code (Container) : Code to create the artifacts by training the model
# - Inference Code (Container) : Code to utilize the trained model with given artifacts
from sagemaker import get_execution_role
# Our current execution role is require when creating the model as the training
# and inference code will need to access the model artifacts.
role = get_execution_role()
# +
# We need to retrieve the location of the container which is provided by Amazon for using XGBoost.
# As a matter of convenience, the training and inference code both use the same container.
from sagemaker.amazon.amazon_estimator import get_image_uri
container = get_image_uri(session.boto_region_name, 'xgboost')
# -
# Create the XGBoost model
xgb = sagemaker.estimator.Estimator(container, # The location of the container we wish to use
role, # What is our current IAM Role
train_instance_count=1, # How many compute instances
train_instance_type='ml.m4.xlarge', # What kind of compute instances
output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix),
sagemaker_session=session)
# Define the hypterparameters of the model
xgb.set_hyperparameters(max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.8,
silent=0,
objective='binary:logistic',
early_stopping_rounds=10,
num_round=500)
# ### Fit the XGBoost model
#
# Now that our model has been set up we simply need to attach the training and validation datasets and then ask SageMaker to set up the computation.
s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='csv')
s3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='csv')
xgb.fit({'train': s3_input_train, 'validation': s3_input_validation})
# ### Testing the model
#
# SageMaker provides the Batch Transform method which is a convenient way to perform infernce on a large dataset that is not realtime.
#
# To perform a Batch Transformation we need to first create a transformer objects from our trained estimator object.
# Define the transformer
xgb_transformer = xgb.transformer(instance_count = 1, instance_type = 'ml.m4.xlarge')
# Transform the dataset inside the test_location, define the structure of testset
# by the content_type and split_type
xgb_transformer.transform(test_location, content_type='text/csv', split_type='Line')
# Transform job runs on the background, wait method shows the progression
xgb_transformer.wait()
# Move the result of transform job to the local directory
# !aws s3 cp --recursive $xgb_transformer.output_path $data_dir
# Read in the output from the model, and round the predictions which are in form of probability
predictions = pd.read_csv(os.path.join(data_dir, 'test.csv.out'), header=None)
predictions = [round(num) for num in predictions.squeeze().values]
from sklearn.metrics import accuracy_score
# calculate the accuracy of our model on the test dataset
accuracy_score(test_y, predictions)
# ### Using the Hyperparameter Tuner
#
# SageMaker has its own hyperparameter tuner that we can use on different machine learning models. We define the tuner and fit so that it finds the appropriate hyperparams in given range.
# +
# First, make sure to import the relevant objects used to construct the tuner
from sagemaker.tuner import IntegerParameter, ContinuousParameter, HyperparameterTuner
xgb_hyperparameter_tuner = HyperparameterTuner(estimator = xgb, # The estimator object to use as the basis for the training jobs.
objective_metric_name = 'validation:rmse', # The metric used to compare trained models.
objective_type = 'Minimize', # Whether we wish to minimize or maximize the metric.
max_jobs = 6, # The total number of models to train
max_parallel_jobs = 3, # The number of models to train in parallel
hyperparameter_ranges = {
'max_depth': IntegerParameter(3, 12),
'eta' : ContinuousParameter(0.05, 0.5),
'min_child_weight': IntegerParameter(2, 8),
'subsample': ContinuousParameter(0.5, 0.9),
'gamma': ContinuousParameter(0, 10),
})
# -
s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='csv')
s3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='csv')
# Find the hyperparams using the train and validation dataset
xgb_hyperparameter_tuner.fit({'train': s3_input_train, 'validation': s3_input_validation})
# View the progress
xgb_hyperparameter_tuner.wait()
# We can fetch the best model that our tuner has found
# The rest of the testing is the same as above, using the transformer
xgb_attached = sagemaker.estimator.Estimator.attach(xgb_hyperparameter_tuner.best_training_job())
# ### Looking at New Data
#
# Once we obtain new data, we might want to change our model to apply the insights gotten from the new dataset.
import new_data
# Fetch the new data
new_X, new_Y = new_data.get_new_data()
# +
# We must process this new data to feed it into the model
vectorizer = CountVectorizer(vocabulary=vocabulary,
preprocessor=lambda x: x, tokenizer=lambda x: x)
new_XV = vectorszer.transform(new_X).toarray()
# -
# Save the new dataset and upload it to S3 to feed into the model
pd.DataFrame(new_XV).to_csv(os.path.join(data_dir, 'new_data.csv'), header=False, index=False)
new_data_location = session.upload_data(os.path.join(data_dir, 'new_data.csv'), key_prefix=prefix)
# Test the new dataset with the current model
xgb_transformer.transform(new_data_location, content_type='text/csv', split_type='Line')
xgb_transformer.wait()
# Fetch the results to local dir
# !aws s3 cp --recursive $xgb_transformer.output_path $data_dir
# Process the results
predictions = pd.read_csv(os.path.join(data_dir, 'new_data.csv.out'), header=None)
predictions = [round(num) for num in predictions.squeeze().values]
# Check for accuracy
accuracy_score(new_Y, predictions)
# ### Finding the problem
# Deploy the model first to diagnose the problem
xgb_predictor = xgb.deploy(initial_instance_count = 1, instance_type = 'ml.m4.xlarge')
# +
from sagemaker.predictor import csv_serializer
# We need to tell the endpoint what format the data we are sending is in so that SageMaker can perform the serialization.
xgb_predictor.content_type = 'text/csv'
xgb_predictor.serializer = csv_serializer
# +
# Building the generator for the dataset so that we can view
# Individual sample that is not being tested well
def get_sample(in_X, in_XV, in_Y):
for idx, smp in enumerate(in_X):
res = round(float(xgb_predictor.predict(in_XV[idx])))
if res != in_Y[idx]:
yield smp, in_Y[idx]
gn = get_sample(new_X, new_XV, new_Y)
# -
# Analyze the distribution of frequent words for the new dataset
new_vectorizer = CountVectorizer(max_features=5000,
preprocessor=lambda x: x, tokenizer=lambda x: x)
new_vectorizer.fit(new_X)
# +
# Compare the word distributions of two datasets
original_vocabulary = set(vocabulary.keys())
new_vocabulary = set(new_vectorizer.vocabulary_.keys())
print(new_vocabulary - original_vocabulary)
print(original_vocabulary - new_vocabulary)
# -
# ### Building the new model
#
# To reflect the change in word distribution, we want to generate new dictionary and train a new model with it.
# +
new_XV = new_vectorizer.transform(new_X).toarray()
# Earlier we shuffled the training dataset so to make things simple we can just assign
# the first 10 000 reviews to the validation set and use the remaining reviews for training.
new_val_X = pd.DataFrame(new_XV[:10000])
new_train_X = pd.DataFrame(new_XV[10000:])
new_val_y = pd.DataFrame(new_Y[:10000])
new_train_y = pd.DataFrame(new_Y[10000:])
new_X = None
# +
# Overwrite the dataset to save storage spacee
pd.DataFrame(new_XV).to_csv(os.path.join(data_dir, 'new_data.csv'), header=False, index=False)
pd.concat([new_val_y, new_val_X], axis=1).to_csv(os.path.join(data_dir, 'new_validation.csv'), header=False, index=False)
pd.concat([new_train_y, new_train_X], axis=1).to_csv(os.path.join(data_dir, 'new_train.csv'), header=False, index=False)
# -
new_val_y = new_val_X = new_train_y = new_train_X = new_XV = None
# Upload the new datasets on S3
new_data_location = session.upload_data(os.path.join(data_dir, 'new_data.csv'), key_prefix=prefix)
new_val_location = session.upload_data(os.path.join(data_dir, 'new_validation.csv'), key_prefix=prefix)
new_train_location = session.upload_data(os.path.join(data_dir, 'new_train.csv'), key_prefix=prefix)
# +
# Define New Mdoel to train
new_xgb = sagemaker.estimator.Estimator(container, # The location of the container we wish to use
role, # What is our current IAM Role
train_instance_count=1, # How many compute instances
train_instance_type='ml.m4.xlarge', # What kind of compute instances
output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix),
sagemaker_session=session)
new_xgb.set_hyperparameters(max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.8,
silent=0,
objective='binary:logistic',
early_stopping_rounds=10,
num_round=500)
# +
# Train the new model with new dataset
s3_new_input_train = sagemaker.s3_input(s3_data=new_train_location, content_type='csv')
s3_new_input_validation = sagemaker.s3_input(s3_data=new_val_location, content_type='csv')
new_xgb.fit({'train': s3_new_input_train, 'validation': s3_new_input_validation})
# -
# Check the accuracy on the new model using Batch Transform
new_xgb_transformer = new_xgb.transformer(instance_count = 1, instance_type = 'ml.m4.xlarge')
new_xgb_transformer.transform(new_data_location, content_type='text/csv', split_type='Line')
new_xgb_transformer.wait()
# Bring the result into local dir
# !aws s3 cp --recursive $new_xgb_transformer.output_path $data_dir
# +
# Check the accuracy of the new model on the new dataset
predictions = pd.read_csv(os.path.join(data_dir, 'new_data.csv.out'), header=None)
predictions = [round(num) for num in predictions.squeeze().values]
accuracy_score(new_Y, predictions)
# +
# Empty the previous cache on the previous dataset
# Load the preprocessed_data.pkl which is cache on new dataset words
cache_data = None
with open(os.path.join(cache_dir, "preprocessed_data.pkl"), "rb") as f:
cache_data = pickle.load(f)
print("Read preprocessed data from cache file:", "preprocessed_data.pkl")
test_X = cache_data['words_test']
test_Y = cache_data['labels_test']
# Here we set cache_data to None so that it doesn't occupy memory
cache_data = None
# -
test_X = new_vectorizer.transform(test_X).toarray()
pd.DataFrame(test_X).to_csv(os.path.join(data_dir, 'test.csv'), header=False, index=False)
# Upload the newly vectorized test file and batch transform
test_location = session.upload_data(os.path.join(data_dir, 'test.csv'), key_prefix=prefix)
new_xgb_transformer.transform(test_location, content_type='text/csv', split_type='Line')
new_xgb_transformer.wait()
# Fetch the finished result
# !aws s3 cp --recursive $new_xgb_transformer.output_path $data_dir
# Get the accuracy
predictions = pd.read_csv(os.path.join(data_dir, 'test.csv.out'), header=None)
predictions = [round(num) for num in predictions.squeeze().values]
accuracy_score(test_Y, predictions)
# ### Updating the Model
#
# To update the model, we only have to update the existing endpoint so that there is no interruption with the service.
new_xgb_transformer.model_name
# +
from time import gmtime, strftime
new_xgb_endpoint_config_name = "sentiment-update-xgboost-endpoint-config-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
#
new_xgb_endpoint_config_info = session.sagemaker_client.create_endpoint_config(
EndpointConfigName = new_xgb_endpoint_config_name,
ProductionVariants = [{
"InstanceType": "ml.m4.xlarge",
"InitialVariantWeight": 1,
"InitialInstanceCount": 1,
"ModelName": new_xgb_transformer.model_name,
"VariantName": "XGB-Model"
}])
# -
# Update the original endpoint with the newly configured endpoint!
session.sagemaker_client.update_endpoint(EndpointName=xgb_predictor.endpoint, EndpointConfigName=new_xgb_endpoint_config_name)
# ### Cleaning up
# Make sure all endpoints are stopped,
# and all files deleted
# First we will remove all of the files contained in the data_dir directory
# !rm $data_dir/*
# And then we delete the directory itself
# !rmdir $data_dir
# Similarly we will remove the files in the cache_dir directory and the directory itself
# !rm $cache_dir/*
# !rmdir $cache_dir
session.wait_for_endpoint(xgb_predictor.endpoint)
xgb_predictor.delete_endpoint()
| Sentiment Inference (XGB)/.ipynb_checkpoints/IMDB Sentiment Analysis - XGBoost-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
dfs = []
dfs.append(["off", pd.read_csv("vibration_1625603864.7574449.csv", index_col=False)])
dfs.append(["1 (with full at end)", pd.read_csv("vibration_1625604065.842192.csv", index_col=False)])
dfs.append(["2", pd.read_csv("vibration_1625604197.0055935.csv", index_col=False)])
dfs.append(["3", pd.read_csv("vibration_1625604235.14805.csv", index_col=False)])
dfs.append(["4", pd.read_csv("vibration_1625604267.021674.csv", index_col=False)])
dfs.append(["5", pd.read_csv("vibration_1625604337.1129477.csv", index_col=False)])
dfs.append(["6", pd.read_csv("vibration_1625604372.2732892.csv", index_col=False)])
dfs.append(["7", pd.read_csv("vibration_1625604409.6699996.csv", index_col=False)])
dfs.append(["Full", pd.read_csv("vibration_1625604452.1975186.csv", index_col=False)])
# +
import matplotlib.pyplot as plt
import datetime as dt
import matplotlib.dates as md
import numpy as np
# %matplotlib inline
plt.figure(figsize=[10, 10])
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
def plot_levels(plt, df0):
label = df0[0]
df0 = df0[1]
x=[dt.datetime.fromtimestamp(ts) for ts in df0["time_s"] ]
df0 = df0[10:]
x = x[10:]
total = np.sqrt( np.array(df0["imu_x_g"])* np.array(df0["imu_x_g"]) + \
np.array(df0["imu_y_g"])* np.array(df0["imu_y_g"]) + \
np.array(df0["imu_z_g"])* np.array(df0["imu_z_g"]) )
plt.plot(x, total, label=label)
for d in dfs:
plot_levels(plt, d)
plt.legend()
plt.show()
# +
import matplotlib.pyplot as plt
import matplotlib.dates as md
import numpy as np
import datetime as dt
import time
n=20
duration=1000
now=time.mktime(time.localtime())
timestamps=np.linspace(now,now+duration,n)
dates=[dt.datetime.fromtimestamp(ts) for ts in timestamps]
values=np.sin((timestamps-now)/duration*2*np.pi)
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
plt.plot(dates,values)
plt.show()
# -
total
df0
df0
dfs
| data/vibration_test_2021Jul06/Data Exploration Vibration 2021-Jul-06.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="STZO0OMlBh8_"
# **Summary:**
#
# This notebook shows how to train a Bert model for the RACE dataset by using the Huggingface *transformers* and *datasets* libraries.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 732, "status": "ok", "timestamp": 1604939342130, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08728759232976284064"}, "user_tz": -60} id="axMB1LkSDQen" outputId="a7d7dacb-7a8b-4abe-c713-daaee44169dc"
# !nvidia-smi
# + [markdown] id="q8KHdvHpB2Qa"
# **Clone and install**
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"elapsed": 48605, "status": "ok", "timestamp": 1604939390021, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08728759232976284064"}, "user_tz": -60} id="8klpyPsOv3hS" outputId="4498a86c-6e54-46d0-eff2-3b318786474b"
# !git clone https://github.com/huggingface/transformers
# %cd transformers
# !pip install .
# !pip install -r ./examples/requirements.txt
# %cd ..
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 59527, "status": "ok", "timestamp": 1604939400958, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08728759232976284064"}, "user_tz": -60} id="iXFLOjg8pz-H" outputId="d9b20b80-3fbd-4e9b-b2e1-7fc4a2181d44"
# !pip install datasets
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 59525, "status": "ok", "timestamp": 1604939400970, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08728759232976284064"}, "user_tz": -60} id="UhwiUjAMqzJr" outputId="7d758ec4-f48b-4c6d-8257-9f7eec6d9c01"
# cd transformers/examples/multiple-choice/
# + [markdown] id="8oCh2msxA8Gz"
# **Generic imports**
# + id="Acq3GXvDqW7i"
import logging
import os
import numpy as np
# + [markdown] id="-EbnJKS7A_Th"
# **Huggingface imports**
# + id="GQphBbtSqk6n"
from datasets import load_dataset
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from utils_multiple_choice import *
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 65458, "status": "ok", "timestamp": 1604939406929, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08728759232976284064"}, "user_tz": -60} id="EudACmOEOu0Y" outputId="4f0d96ac-2636-4e02-c019-9dbe8ea85b34"
# cd ../../..
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 92645, "status": "ok", "timestamp": 1604939434132, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08728759232976284064"}, "user_tz": -60} id="a2W0LJiKc2uR" outputId="c5e7a3d8-602d-40b6-b225-ad99c252f9de"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="V2ClzgocBG6s"
# **Load dataset and model**
# + colab={"base_uri": "https://localhost:8080/", "height": 465, "referenced_widgets": ["0c919f089b9d494a9abbee9a6f15d203", "477ffdcd36c74d70ad8381829cc63d30", "c3efd06a41d34266ae6695e1db152656", "3256a7e82194496db9128d530938983a", "67bffb5835d24872b11a637a8e27e5ec", "79d2353d4246447c9e5901de94898d25", "7c495ae42366493bab5ce9163190a848", "364014a1c8314e99a91d9b71a53ec57a", "9b93d067e776474e9fa7c803e31ea127", "dc679d4df13d4f68b5a619c8e8ca34a7", "79ae4dac6de6410aaba66b67d4fac830", "<KEY>", "51f22a39259b46abbe80585bde2a2c38", "2fb7a45938d04a55b21f1e21acc8d509", "9a1b2ad9c8c44177b60960e1fd595601", "124e607b6d1e49b4ac4a62d0509d6d19", "5977ace89f7046738a04d42ab608a766", "ef163b3a1967418ab2531ad389476623", "c68e3ac139a1469486df3ed471e9cc50", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "35dd76af69e94a3198ce18893bf11f55", "<KEY>", "<KEY>", "ee63b51fe4264c42a2874d28f0a86ff8", "43861b8ef3d54f608e985dd0e0f2db2c", "<KEY>", "bd8e72d97a874dc68101d2d4f0adf0db", "85c0641ef48e4ad780181471b85cd5e0", "02a8b5b2d46b4c2eba66ea789c4e0f9d", "c705d8022d894d3f89227ea0082ead46", "b16ef474ad8549fb9c8c46e2f45b4849", "8a7f96b8de234aee91870be32bb76e63", "91173d4193ce4da7a9a52f2ac8d8a6dd", "232ac936352243c0b87c709e679b19e9", "22f57255efee4f8498ac6b98ee1ba80c", "46e4234f44f2429e85bc48d86bd6ed86", "1732487e9d0548659f14e1ce48eb4f9b", "<KEY>", "<KEY>", "<KEY>", "15ddd20b8d934339b191137651823bec", "<KEY>", "ac6749503c654e0e9add7c67aebe2951", "07924714f21440e9946cc4eef908dcca", "<KEY>", "a329f4ea10144f8d9e40504bc76a21b4", "<KEY>", "40825d74f97640ddae71a1e8318a840d", "37a753a77d194b58ae618544a230220d", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "de784552779d4dd6a0bb3b1ba4aae9d4", "<KEY>", "4a6452014e134d7c8d70148a17def6d4", "10b0ec9d5cab4e298cf98d0876ac5855", "<KEY>", "22a131e5471f459d8f91b27ee803cac2", "202d95f0f0b3492f988697963ce7167a", "<KEY>", "<KEY>", "4f16962a84a144d197136ab18cac2260", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "dfcd495861a44dedae8fca2ef8788c7a", "47f17f62d0d04c8aa9dc7dded8b758b4"]} executionInfo={"elapsed": 135567, "status": "ok", "timestamp": 1604939477070, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08728759232976284064"}, "user_tz": -60} id="o3iY8-AyqqP8" outputId="43196465-19ce-462c-be8a-3127f36aef16"
dataset = load_dataset("race", "middle")
model = AutoModelForMultipleChoice.from_pretrained('bert-base-uncased')
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
# + [markdown] id="9y31ANZuBOqZ"
# **Training args**
# + id="ZPiQkKr_sQKv"
label_list = ["0", "1", "2", "3"]
max_seq_length = 468
batch_size = 4
training_args = TrainingArguments(
"test-multichoice",
evaluation_strategy = "epoch",
learning_rate=2e-5,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
num_train_epochs=3,
weight_decay=0.01,
load_best_model_at_end=True,
)
# + [markdown] id="vfJK4AiXBTDI"
# **Create examples from HuggingFace Dataset**
# + id="m5ymuY-tsbR1"
train_examples = []
data_train = dataset['train']
for ex in data_train:
race_id = ex["example_id"]
article = ex["article"]
truth = str(ord(ex["answer"]) - ord("A"))
question = ex["question"]
options = ex["options"]
train_examples.append(
InputExample(
example_id=race_id,
question=question,
contexts=[article, article, article, article], # this is not efficient but convenient
endings=[options[0], options[1], options[2], options[3]],
label=truth,
)
)
val_examples = []
data_val = dataset['validation']
for ex in data_val:
race_id = ex["example_id"]
article = ex["article"]
truth = str(ord(ex["answer"]) - ord("A"))
question = ex["question"]
options = ex["options"]
val_examples.append(
InputExample(
example_id=race_id,
question=question,
contexts=[article, article, article, article], # this is not efficient but convenient
endings=[options[0], options[1], options[2], options[3]],
label=truth,
)
)
# + [markdown] id="Ttsn5nzzBZoZ"
# **Convert examples to features**
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 570982, "status": "ok", "timestamp": 1604939916071, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08728759232976284064"}, "user_tz": -60} id="xEk7ODK0sNCK" outputId="ac53b83a-4112-4530-b65a-327e166189df"
train_features = convert_examples_to_features(
train_examples,
label_list,
max_seq_length,
tokenizer,
)
val_features = convert_examples_to_features(
val_examples,
label_list,
max_seq_length,
tokenizer,
)
# + [markdown] id="vQls9VTGBc8n"
# **Create trainer**
# + id="5rnvZjuKr7ME"
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def compute_metrics(p):
preds = np.argmax(p.predictions, axis=1)
return {"acc": simple_accuracy(preds, p.label_ids)}
# Initialize our Trainerlo
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_features,
eval_dataset=val_features,
compute_metrics=compute_metrics,
)
# + [markdown] id="Znviko-tBe2B"
# **Train and eval**
# + colab={"base_uri": "https://localhost:8080/", "height": 187} executionInfo={"elapsed": 16151057, "status": "ok", "timestamp": 1604955497838, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08728759232976284064"}, "user_tz": -60} id="qPzivHt6xvep" outputId="7f495d43-3637-4c8e-c937-79a3c292ea5f"
trainer.train()
# + colab={"base_uri": "https://localhost:8080/", "height": 55} executionInfo={"elapsed": 16244762, "status": "ok", "timestamp": 1604955592331, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08728759232976284064"}, "user_tz": -60} id="Rqi88baZxzSU" outputId="b9a2cf87-33c8-4713-9b3e-c2660f1862b1"
trainer.evaluate()
# + [markdown] id="b8eKrE5XLtyB"
# **Save model**
# + id="woNeNQ1TLtG-"
try:
trainer.save_model("drive/My Drive/bbu_race_m/")
tokenizer.save_pretrained("drive/My Drive/bbu_race_m/")
except:
trainer.save_model("bbu_race_m/")
tokenizer.save_pretrained("bbu_race_m/")
| BERT-RACE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: cienciadedatos
# language: python
# name: cienciadedatos
# ---
# +
# Libraries
# ==============================================================================
# %load_ext autoreload
# %autoreload 2
import sys
sys.path.insert(1, '/home/ximo/Documents/GitHub/skforecast')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from skforecast.utils import check_y
from skforecast.utils import check_exog
from skforecast.utils import preprocess_y
from skforecast.utils import preprocess_last_window
from skforecast.utils import preprocess_exog
from skforecast.utils import expand_index
from skforecast.utils import check_predict_input
# +
# Unit test check_input_predict
# ==============================================================================
import pytest
from pytest import approx
import numpy as np
import pandas as pd
from skforecast.utils import check_predict_input
def test_check_input_predict_exception_when_fitted_is_False():
'''
Test exception is raised when fitted is False.
'''
with pytest.raises(Exception):
check_predict_input(
steps = 5,
fitted = False,
included_exog = False,
index_type = None,
index_freq = None,
window_size = None,
last_window = None,
exog = None,
exog_type = None,
exog_col_names = None,
max_steps = None,
)
def test_check_input_predict_exception_when_steps_is_lower_than_1():
'''
Test exception is steps is a value lower than 1.
'''
with pytest.raises(Exception):
check_predict_input(
steps = -5,
fitted = True,
included_exog = False,
index_type = None,
index_freq = None,
window_size = None,
last_window = None,
exog = None,
exog_type = None,
exog_col_names = None,
max_steps = None,
)
def test_check_input_predict_exception_when_steps_is_greater_than_max_steps():
'''
'''
with pytest.raises(Exception):
check_predict_input(
steps = 20,
fitted = True,
included_exog = False,
index_type = None,
index_freq = None,
window_size = None,
last_window = None,
exog = None,
exog_type = None,
exog_col_names = None,
max_steps = 10,
)
def test_check_input_predict_exception_when_exog_is_not_none_and_included_exog_is_false():
'''
'''
with pytest.raises(Exception):
check_predict_input(
steps = 5,
fitted = True,
included_exog = False,
index_type = None,
index_freq = None,
window_size = None,
last_window = None,
exog = np.arange(10),
exog_type = None,
exog_col_names = None,
max_steps = None,
)
def test_check_input_predict_exception_when_exog_is_none_and_included_exog_is_true():
'''
'''
with pytest.raises(Exception):
check_predict_input(
steps = 5,
fitted = True,
included_exog = True,
index_type = None,
index_freq = None,
window_size = None,
last_window = None,
exog = None,
exog_type = None,
exog_col_names = None,
max_steps = None,
)
def test_check_input_predict_exception_when_len_exog_is_less_than_steps():
'''
'''
with pytest.raises(Exception):
check_predict_input(
steps = 10,
fitted = True,
included_exog = True,
index_type = None,
index_freq = None,
window_size = None,
last_window = None,
exog = np.arange(5),
exog_type = None,
exog_col_names = None,
max_steps = None,
)
def test_check_input_predict_exception_when_exog_is_not_padas_series_or_dataframe():
'''
'''
with pytest.raises(Exception):
check_predict_input(
steps = 10,
fitted = True,
included_exog = True,
index_type = None,
index_freq = None,
window_size = 5,
last_window = None,
exog = np.arange(10),
exog_type = None,
exog_col_names = None,
max_steps = None,
)
def test_check_input_predict_exception_when_exog_has_missing_values():
'''
'''
with pytest.raises(Exception):
check_predict_input(
steps = 10,
fitted = True,
included_exog = True,
index_type = None,
index_freq = None,
window_size = 5,
last_window = None,
exog = pd.Series([1, 2, 3, np.nan]),
exog_type = None,
exog_col_names = None,
max_steps = None,
)
def test_check_input_predict_exception_when_exog_is_not_of_exog_type():
'''
'''
with pytest.raises(Exception):
check_predict_input(
steps = 10,
fitted = True,
included_exog = True,
index_type = None,
index_freq = None,
window_size = None,
last_window = None,
exog = np.arange(10),
exog_type = pd.Series,
exog_col_names = None,
max_steps = None,
)
def test_check_input_predict_exception_when_exog_is_dataframe_without_columns_in_exog_col_names():
'''
'''
with pytest.raises(Exception):
check_predict_input(
steps = 2,
fitted = True,
included_exog = True,
index_type = None,
index_freq = None,
window_size = None,
last_window = None,
exog = pd.DataFrame(np.arange(10).reshape(5,2), columns=['col1', 'col2']),
exog_type = pd.DataFrame,
exog_col_names = ['col1', 'col3'],
max_steps = None,
)
def test_check_input_predict_exception_when_exog_index_is_not_of_index_type():
'''
'''
with pytest.raises(Exception):
check_predict_input(
steps = 10,
fitted = True,
included_exog = True,
index_type = pd.DatetimeIndex,
index_freq = None,
window_size = None,
last_window = None,
exog = pd.Series(np.arange(10)),
exog_type = pd.Series,
exog_col_names = None,
max_steps = None,
)
def test_check_input_predict_exception_when_exog_index_frequency_is_not_index_freq():
'''
'''
with pytest.raises(Exception):
check_predict_input(
steps = 10,
fitted = True,
included_exog = True,
index_type = pd.DatetimeIndex,
index_freq = 'Y',
window_size = None,
last_window = None,
exog = pd.Series(np.arange(10), index=pd.date_range(start='1/1/2018', periods=10)),
exog_type = pd.Series,
exog_col_names = None,
max_steps = None,
)
def test_check_input_predict_exception_when_length_last_window_is_lower_than_window_size():
'''
'''
with pytest.raises(Exception):
check_predict_input(
steps = 10,
fitted = True,
included_exog = True,
index_type = pd.RangeIndex,
index_freq = None,
window_size = 10,
last_window = pd.Series(np.arange(5)),
exog = pd.Series(np.arange(10)),
exog_type = pd.Series,
exog_col_names = None,
max_steps = None,
)
def test_check_input_predict_exception_when_last_window_is_not_padas_series():
'''
'''
with pytest.raises(Exception):
check_predict_input(
steps = 10,
fitted = True,
included_exog = True,
index_type = pd.RangeIndex,
index_freq = None,
window_size = 5,
last_window = np.arange(5),
exog = pd.Series(np.arange(10)),
exog_type = pd.Series,
exog_col_names = None,
max_steps = None,
)
def test_check_input_predict_exception_when_last_window_has_missing_values():
'''
'''
with pytest.raises(Exception):
check_predict_input(
steps = 10,
fitted = True,
included_exog = True,
index_type = pd.RangeIndex,
index_freq = None,
window_size = 5,
last_window = pd.Series([1, 2, 3, 4, 5, np.nan]),
exog = pd.Series(np.arange(10)),
exog_type = pd.Series,
exog_col_names = None,
max_steps = None,
)
# -
test_check_input_predict_exception_when_fitted_is_False()
test_check_input_predict_exception_when_steps_is_lower_than_1()
test_check_input_predict_exception_when_steps_is_greater_than_max_steps()
test_check_input_predict_exception_when_exog_is_not_none_and_included_exog_is_false()
test_check_input_predict_exception_when_exog_is_none_and_included_exog_is_true()
test_check_input_predict_exception_when_exog_is_not_padas_series_or_dataframe()
test_check_input_predict_exception_when_exog_has_missing_values()
test_check_input_predict_exception_when_len_exog_is_less_than_steps()
test_check_input_predict_exception_when_exog_is_not_of_exog_type()
test_check_input_predict_exception_when_exog_is_dataframe_without_columns_in_exog_col_names()
test_check_input_predict_exception_when_exog_index_is_not_of_index_type()
test_check_input_predict_exception_when_exog_index_frequency_is_not_index_freq()
test_check_input_predict_exception_when_length_last_window_is_lower_than_window_size()
test_check_input_predict_exception_when_last_window_is_not_padas_series()
test_check_input_predict_exception_when_last_window_has_missing_values()
# +
import pytest
import numpy as np
import pandas as pd
from skforecast.utils import exog_to_multi_output
def test_exog_to_multi_output_when_lags_3_steps_2_exog_numpy_1d():
'''
Test exog_to_multi_output results when using lags 3, steps 2 and exog is a
1d numpy array.
'''
exog = np.arange(10)
results = exog_to_multi_output(exog=exog, steps=2)
expected = np.array([[0, 1],
[1, 2],
[2, 3],
[3, 4],
[4, 5],
[5, 6],
[6, 7],
[7, 8],
[8, 9]])
assert results == approx(expected)
def test_exog_to_multi_output_when_lags_3_steps_2_exog_numpy_array_2d():
'''
Test exog_to_multi_output results when using lags 3, steps 2 and exog is a
2d numpy array.
'''
exog = np.column_stack([np.arange(100, 110), np.arange(1000, 1010)])
results = exog_to_multi_output(exog=exog, steps=2)
expected = np.array([[ 100, 101, 1000, 1001],
[ 101, 102, 1001, 1002],
[ 102, 103, 1002, 1003],
[ 103, 104, 1003, 1004],
[ 104, 105, 1004, 1005],
[ 105, 106, 1005, 1006],
[ 106, 107, 1006, 1007],
[ 107, 108, 1007, 1008],
[ 108, 109, 1008, 1009]])
assert results == approx(expected)
def test_exog_to_multi_output_when_lags_2_steps_3_exog_numpy_array_2d():
'''
Test exog_to_multi_output results when using lags 2, steps 3 and exog is a
2d numpy array.
'''
exog = np.column_stack([np.arange(100, 110), np.arange(1000, 1010)])
results = exog_to_multi_output(exog=exog, steps=3)
expected = np.array([[ 100, 101, 102, 1000, 1001, 1002],
[ 101, 102, 103, 1001, 1002, 1003],
[ 102, 103, 104, 1002, 1003, 1004],
[ 103, 104, 105, 1003, 1004, 1005],
[ 104, 105, 106, 1004, 1005, 1006],
[ 105, 106, 107, 1005, 1006, 1007],
[ 106, 107, 108, 1006, 1007, 1008],
[ 107, 108, 109, 1007, 1008, 1009]])
assert results == approx(expected)
# -
test_exog_to_multi_output_when_lags_3_steps_2_exog_numpy_1d()
test_exog_to_multi_output_when_lags_3_steps_2_exog_numpy_array_2d()
test_exog_to_multi_output_when_lags_2_steps_3_exog_numpy_array_2d()
| notebooks/develop/skforecast_utils.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="O4O67-MytR_g" executionInfo={"status": "ok", "timestamp": 1642132251389, "user_tz": 360, "elapsed": 3457, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="ed16790f-0578-4b5d-be24-95934246d686"
# !pip install pandas==1.1.0
# + colab={"base_uri": "https://localhost:8080/"} id="-17a9dnSq7um" executionInfo={"status": "ok", "timestamp": 1642132253198, "user_tz": 360, "elapsed": 1812, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="1083395c-f950-4e91-cd54-c047ba9b95f2"
from google.colab import drive
drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/"} id="xRNRENZ4rYaW" executionInfo={"status": "ok", "timestamp": 1642132253198, "user_tz": 360, "elapsed": 11, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="6b7ef6fd-f2b6-45a6-9159-cad83fd4030a"
# %cd '/content/drive/My Drive/Datasets/Cursos'
# !ls
# + id="gHDsPe4sruW6" executionInfo={"status": "ok", "timestamp": 1642132253199, "user_tz": 360, "elapsed": 9, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}}
import pandas as pd
import numpy as np
# + colab={"base_uri": "https://localhost:8080/", "height": 468} id="Cfx0EGznr326" executionInfo={"status": "ok", "timestamp": 1642132253199, "user_tz": 360, "elapsed": 9, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="b2d539a4-211a-4bb5-fa1c-fc33c2acfef0"
london = pd.read_csv('london_bikes.csv')
london
# + colab={"base_uri": "https://localhost:8080/"} id="sN7TrzOfsCvb" executionInfo={"status": "ok", "timestamp": 1642132253199, "user_tz": 360, "elapsed": 8, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="6462302a-469c-4e14-d923-1ac45810ba8e"
#reconocer tipo de variables en dataframe
london.dtypes
# + colab={"base_uri": "https://localhost:8080/"} id="UN-vzIopsJD8" executionInfo={"status": "ok", "timestamp": 1642132253199, "user_tz": 360, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="9c0b9e2d-0de0-49f3-a666-b12c156ffbb6"
#cambiar formato a fecha
london['timestamp'] = pd.to_datetime(london['timestamp'])
london['timestamp']
# + colab={"base_uri": "https://localhost:8080/"} id="DsrdXQq7sd0Z" executionInfo={"status": "ok", "timestamp": 1642132253200, "user_tz": 360, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="a07d8819-d5e4-413f-fc4a-41eec8e7abd9"
#nueva columna llaada "hora"
london['hour'] = london['timestamp'].dt.hour
london['hour']
# + [markdown] id="HLc9BcS2ulJs"
# # iloc
# Permite hacer una selección de basada en posiciones enteras, también admite el uso de slices.
#
# En otras palabras iloc permite seleccionar posiciones de un DataFrame basándose en su índice numérico.
#
# Dicha selección puede guardar
#
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="MCGfbxOptds6" executionInfo={"status": "ok", "timestamp": 1642132329043, "user_tz": 360, "elapsed": 300, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="a44ff59f-1c18-4496-f514-a7d4ddb9189d"
#quitar tiempo de dataframe
df = london.iloc[:, 1:]
#[:, 1:] == todas las filas del dataframe comenzando a guardar desde la segunda columna hasta el final del dataframe
df
# + colab={"base_uri": "https://localhost:8080/"} id="db4vVEp_t8MP" executionInfo={"status": "ok", "timestamp": 1642132450428, "user_tz": 360, "elapsed": 291, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="c0a60598-236f-4c91-eebc-4da34ab01e7f"
np.sin(df['wind_speed']**2) + 10
# + colab={"base_uri": "https://localhost:8080/"} id="rSoqsJpquNqZ" executionInfo={"status": "ok", "timestamp": 1642132484460, "user_tz": 360, "elapsed": 290, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="38fd7622-f8c6-480c-b0ba-0212c579600c"
#diferencia de temperatura
df['t1'] - df['t2']
# + colab={"base_uri": "https://localhost:8080/"} id="b6-RpWYpuTZh" executionInfo={"status": "ok", "timestamp": 1642132934233, "user_tz": 360, "elapsed": 492, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="f463f5ab-3eac-421b-8df7-ffa3714be776"
#datos con indice par
df['t1'].iloc[::2] - df['t2']
# + colab={"base_uri": "https://localhost:8080/"} id="fTZJgT7Nv7Rq" executionInfo={"status": "ok", "timestamp": 1642132997982, "user_tz": 360, "elapsed": 319, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="7c4089e9-4c3c-413d-f6ed-70050bd40965"
#Completar variables nulas con un valor insertado
df['t1'].iloc[::2].sub(df['t2'], fill_value = 1000)
# + colab={"base_uri": "https://localhost:8080/"} id="J-tI3U7ZwCGa" executionInfo={"status": "ok", "timestamp": 1642133037307, "user_tz": 360, "elapsed": 282, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="8c835ceb-e7b6-4425-e71c-7462ae5553ad"
#divisiones entre colunmas
#"inf" == dividido entre 0
df['t1'] / df['t2']
# + colab={"base_uri": "https://localhost:8080/"} id="GObcESHhwgBI" executionInfo={"status": "ok", "timestamp": 1642133099557, "user_tz": 360, "elapsed": 292, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="196b8600-d01d-491a-b463-25dda73e5949"
#producto punto (se multiplica cada valor por si mismo y se muestra la suma de todo )
df['t1'].dot(df['t1'])
# + [markdown] id="NZLgtpiDw_gM"
# # Funciones lambdas
#
# Definir una funcion en una sola linea
# + id="YvCK4rVfw_HO" executionInfo={"status": "ok", "timestamp": 1642133222214, "user_tz": 360, "elapsed": 336, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}}
def fun_x(x):
y= x**2 + 1
return y
# + colab={"base_uri": "https://localhost:8080/"} id="Vt9Yl4VVxIHl" executionInfo={"status": "ok", "timestamp": 1642133234066, "user_tz": 360, "elapsed": 350, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="a16ba98e-6a84-4765-9565-46df190842e0"
fun_x(10)
# + colab={"base_uri": "https://localhost:8080/"} id="yT-RlM-XxLD-" executionInfo={"status": "ok", "timestamp": 1642133252250, "user_tz": 360, "elapsed": 302, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="284c7ed2-1331-472e-ecee-79637747516d"
np.arange(-5,6)
# + colab={"base_uri": "https://localhost:8080/"} id="mDBuHGp0xPru" executionInfo={"status": "ok", "timestamp": 1642133282727, "user_tz": 360, "elapsed": 316, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="2f2201dc-a124-4e91-caaf-991e80498ba4"
np.arange(-5,6).shape
# + colab={"base_uri": "https://localhost:8080/"} id="vyzlXWqkxWsQ" executionInfo={"status": "ok", "timestamp": 1642133309157, "user_tz": 360, "elapsed": 314, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="65d7bec5-ee99-4fce-cdb0-4a0d6f37896d"
fun_x(np.arange(-5,6))
# + colab={"base_uri": "https://localhost:8080/"} id="4YHHkerXxgtx" executionInfo={"status": "ok", "timestamp": 1642133393529, "user_tz": 360, "elapsed": 284, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="79c02a35-b267-4178-b664-1923b6e6cbb0"
# apply == aplicar una funcion
df['hour'].apply(fun_x)
# + id="u7t0Y8xAyDbG" executionInfo={"status": "ok", "timestamp": 1642133519411, "user_tz": 360, "elapsed": 925, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}}
def fun_2(x, a=1, b=0):
y= x**2 + a*x + b
return y
# + colab={"base_uri": "https://localhost:8080/"} id="vpgwVmCkyP-J" executionInfo={"status": "ok", "timestamp": 1642133541985, "user_tz": 360, "elapsed": 298, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="8ba8648d-9dfd-41ef-c16e-9dc9c777afdf"
fun_2(10, a=20, b=-100)
# + colab={"base_uri": "https://localhost:8080/"} id="Rmm812x8ybhr" executionInfo={"status": "ok", "timestamp": 1642133621589, "user_tz": 360, "elapsed": 284, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="a0fd6287-5878-4415-efef-28e2f64404ae"
df['hour'].apply(fun_2, args = (20, -100))
# + colab={"base_uri": "https://localhost:8080/"} id="YJrgjdMVyq3s" executionInfo={"status": "ok", "timestamp": 1642133705473, "user_tz": 360, "elapsed": 297, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="a309457d-0eae-4ef9-ebce-774e5abc9e0a"
#uso de funciones lambda
#cambiar medicion de temperatura
df['t1'].apply(lambda x: x+273)
# + colab={"base_uri": "https://localhost:8080/"} id="ttbICc7pzAuJ" executionInfo={"status": "ok", "timestamp": 1642133761610, "user_tz": 360, "elapsed": 300, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="4c8df2f1-2a17-4d03-9da9-8f896b498e13"
#promedio de todas las columnas
df.apply(lambda x: x.mean())
# + colab={"base_uri": "https://localhost:8080/"} id="aOuE5Pb5zLjq" executionInfo={"status": "ok", "timestamp": 1642133819092, "user_tz": 360, "elapsed": 1718, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="9d9df80e-c392-4808-a945-623856e8719a"
#promedio de todas las filas
df.apply(lambda x: x.mean(), axis=1)
# + colab={"base_uri": "https://localhost:8080/"} id="L9Zc3eM0zc2e" executionInfo={"status": "ok", "timestamp": 1642133853365, "user_tz": 360, "elapsed": 1394, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="0b406f80-ece4-48ef-9544-df9761de5491"
#desviacion estandar
df.apply(lambda x: x.std(), axis=1)
# + colab={"base_uri": "https://localhost:8080/"} id="EBRJ_cVUziNq" executionInfo={"status": "ok", "timestamp": 1642133913763, "user_tz": 360, "elapsed": 338, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjV4Wfggw818V8zg8TQqMLEIBnslnO3PfpJ2-_-2g=s64", "userId": "07032201873942492446"}} outputId="94d95099-fca9-42f8-f206-6d701dcc0c55"
#operaciones entre columnas
df.apply(lambda x: x['t1'] - x['t2'], axis=1)
| MathFunctionsLondonBikes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
# =================================================================================================================
# # Lecture Notes: Decision Trees
#
#
# ##### D.Vidotto, Data Mining: JBI030 2019/2020
#
#
# =================================================================================================================
# Decision trees perform binary splits in the features space, and can therefore be represented as [binary trees](https://en.wikipedia.org/wiki/Binary_tree). They provide an highly interpretable Data Mining tool, whose results can be easily communicated. Their downside is that they easily tend to overfit the training dataset, and in general do not always perform great. In this notebook, we will explore both Classification and Regression Trees algorithms.
#
# Students are assumed to have understood the following concepts during the class:
#
# * components of a decision tree
# * node purity/impurity
# * Gini index, entropy index
# * the decision tree algorithm
# * regression trees
# * tree depth
#
# In this notebook, we will review the following topics:
#
# 1. Decision Trees: Introduction
# * examples
# * definitions
# 1. Decision Tree Algorithm
# * Classification Trees
# * Gini index and Entropy
# * choosing the splits
# * Regression Trees
# 1. Tree depth and other hyperparameters
# 1. Feature Importance
# 1. Pathological Cases
# 1. Other Remarks
# 1. Examples in Python
# * Classification Trees
# * Regression Trees
# 1. Example on the Heart Dataset
#
#
# ## 1. Decision Trees: Introduction
# ### 1.1 Example 1
# Suppose we have the following dataset:
#
# <img src="./img/decision_trees/data_example.png" width="300" height="50"/>
#
# Training a decision tree on this dataset produces the following decision boundaries:
#
# <img src="./img/decision_trees/tree_boundary.png" width="300" height="50"/>
#
# As we can notice, decision trees seek to find *rectangular regions* (hyper-rectangles in higher dimensions) in the space of the features, such that within each such partition there are as many data points as possible from the same class. Notice that finding rectangular regions really means that the boundaries (i.e., the splits) found by Decision Trees are *axes-aligned*.
#
#
# From these decision boundaries, we can extrapolate the following *decision rules*:
#
# ```python
# |--- if X2 <= 13.55:
# | |--- if X1 <= 13.13:
# | | |--- return Class 3
# | |--- else:
# | | |--- return Class 2
# |----|else:
# | |--- return Class 1
# ```
#
# Try to compare the rules found by the tree with the decision boundaries, and convince yourself that they are conveying the same information!
# <br>
# Another, equivalent (and surely most notorius!) way to describe the output of a tree is by using the *tree representation*:
#
# <br>
# <img src="./img/decision_trees/example_tree.png" width="250" height="50"/>
# <br>
#
# Before delving into the definitions of the components of a tree, try to interpret the tree by yourself (and compare it with its rules and decision boundaries).
#
#
# ### 1.2 Decision Tree: Definitions
#
# A decision tree is composed by *nodes*, *branches* and *leaves*:
#
# * **nodes** are used to denote the splits performed by the tree (or to denote a final prediction)
# * each split involves one feature of the dataset, which is *tested* against a value
# * if the test is true, units will go down one of the two **branches** (=edges) of the node; if it is false, units will go down the other branch
# * the **root** node is the top-most node, where the first split occurs
# * the **internal nodes** (a.k.a. decision nodes) are all the other nodes present at lower *levels* of the tree
# * the **leaves** (a.k.a. terminal nodes) are nodes at the last level of the tree; they represent the *prediction* made by the tree, given the values tested at higher levels (for Classification trees, leaves predict a class or the class probabilities; for Regression trees, leaves predict a number)
# * **children** nodes are the nodes generated when splitting a node; the corresponding node that generates the split is called **parent** node
# * the length of the longest path from the root to a leaf is called the **depth** of the tree
# * the number of nodes in the tree is called **size** of the tree
#
# <br>
# <img src="./img/decision_trees/tree_def.png" width="450" height="50"/>
# <br>
#
# Depending on the nature of the feature chosen for the split, we can encounter two types of test inside each node:
# * if the feature is continuous (or discrete), then the test involves a *inequality* (as in the previous example) against a value within the range of that feature
# * if the feature is categorical, then the test performed is an *equality* test (as in the example below) against one of the categories of the feature of interest
#
# Decision trees seek to train the smallest possible tree (i.e., with the lowest possible number of nodes) that classifies correctly as many units as possible (i.e., having most units from the same class within the regions detected by the tree).
#
# Intepreting a tree is very easy: we just need to consider the values of the features present in the nodes, and understand to what leaf node they lead. The final prediction is given by the *combined interaction* of the features present at each level of the tree. For example, in the example above, when $X_2$ takes on a values smaller than 13.55, and $X_1$ a value smaller than 13.13, then we fall into the region of Class 3.
#
# We will see that datasets involving a larger number of predictors that trees may not (and indeed they often do not in practice) use the whole set of features in their nodes, but only choose some of them (performing in this way automatic feature selection). <br>
# Performing predictions with a tree is also very easy. Once we know the values of the features for the observation we want to predict, we must test them against the values present in each of the nodes, drop every time the unit down the correct branches, and finally predict the value reported by the corresponding leaf node.
#
# ### 1.3 Example 2
# We now consider the *weather* dataset with 5 features and 1 categorical output measured for $n=6$ different days. The features involve a bunch of metheorological and temporal conditions (i.e., temperature, outlook, humidity level, wind strength, and a dummy indicating whether the corresponding day was in a weekend or not). The class indicates whether the corresponding day we went out to play football, or not.
#
# The dataset is the following one:
#
# <br>
#
# |Example| Outlook | Weekend | Humidity | Wind | Temperature | Played Football? |
# |:----:|:------:|:-----:|:-----:|:-----:|:-----:|:-----:|
# |1| Sunny | Y | Normal | Weak | 24 | Y |
# |2| Sunny | N | Normal | Strong | 18 | Y |
# |3| Sunny | Y | Normal | Strong | 30 | Y |
# |4| Overcast | N | High | Strong| 20 | N |
# |5| Rainy | N | Normal | Weak | 21 | N |
# |6| Sunny | Y| Normal | Strong | 16 | N |
#
# <br>
#
# Suppose we obtain a tree like this:
#
# <br>
# <img src="./img/decision_trees/football_tree_1.png" width="300" height="50"/>
# <br>
#
# Answer the following questions:
# 1. Is there any observation miss-classified by the tree?
# 1. Can you come up with a better tree? (Remember: *better* in context of decision trees means that there are as fewer miss-classification as possible -possibly 0- with the smallest possible number of nodes)
# 1. Suppose we have a new observation: $\mathbf{x}^*=[Sunny,\ No,\ High,\ Strong,\ 22]$. How would the tree that you detected in the previous point classify it? Do you need the values of all features in order to perform the prediction?
# 1. Write down the tree in form of *rules*
#
# ## 2. Decision Tree Algorithm
# To perform training, decision trees follow a procedure known as [divide-and-conquer](https://en.wikipedia.org/wiki/Divide-and-conquer_algorithm): they recursively perform a partition of the data into subsets, which in turn are partitioned into other subsets; the partitioning continues until the algorithm finds the most possible *homogeneous* regions.
#
# ### 2.1 Classification Trees
# #### 2.1.1 Measures of Impurity
# With classification trees, *homogeneity* of a region is defined in terms of *node purity*. Consider this example: we have $n=10$ units, 5 of them come from Class 0, and other 5 of them come from Class 1. We find two possible ways to perform the partition:
# * Case 1: Region 1 = [0, 0, 0, 1, 1]; Region 2 = [1, 1, 1, 0, 0]
# * Case 2: Region 2 = [0, 0, 0, 0 ,1, 0]; Region 2 = [1, 1, 1, 1]
#
# In the first case, two regions are found that contain five observations each. In region 1, the *majority class* is Class 0 having three units vs. 2 units of the minority class (Class 1). The situation is reversed in region 2, where the majority class is Class 1, also with three unis. In the second case, region 1 contains six observations, while region 2 contains four observations. Region 1 contains 5 units from class 0 and only 1 from class 1, while region 2 has four units all belonging to class 1. In this second case, we see that region 2 is completely *pure*, and region 1 is almost pure. The regions found by the first partition, on the other hand, are more *impure* (less homogeneous). Intuitively, then, we should prefer the second partition, as it leads to purest nodes (which, in turn, means that we can grow a smaller tree, and terminate the algorithm faster).
#
# Decision trees use indicators that measure node *impurity* to decide how to perform the split. In particular, the two most common impurity measures used by classification trees are the *Gini index* and the *entropy* measure.
#
# **The Gini Impurity Index**. Suppose we have $C\geq2$ classes, and let's that a region resulting from partition $m$ with $R_m$. There are in total $M$ nodes, so that that the whole, original space of the data is $R=R_1\cup R_2\cdots\cup R_m\cdots\cup R_M$. We can define the probability of observing a unit from class $c \in \{1,...,C\}$ in region $R_m$ with:
#
# $$ p_{c,m} = \frac{|R_{c,m}|}{|R_m|} = \frac{\#\ of\ units\ in\ region\ R_m\ from\ class\ c}{total\ number\ of\ units\ in\ region\ R_m}$$
#
# The Gini impurity for the $m$-th partition is then defined as:
#
# $$ G(R_m) = \sum_{c=1}^{C}p_{c,m}(1-p_{c,m}) $$
#
# $G(R_m)$ reaches a minimum of 0 when all units in the region come from the same class (the class distribution is maximually homogeneous), while it reaches a maximum when the classes are evenly distributed in the region (class distribution is maximally heterogeneous). (Check this by yoursefl, for example with the two class case!). This makes the Gini impurity index a suitable indicator for deciding on the splitting of the tree's nodes.
#
# Let's suppose we need to perform a partition of a region $R_m$, and we are evaluating the Gini index of the split at a point $X_j= x_j$ for feature $X_j$ (how to select such point will be explained shortly). This split will then yield two candidate sub-regions, a 'left' and a 'right' one:
#
# $$R_L = \{X|X_j\leq x_j\}\ and\ R_R = \{X|X_j > x_j\} \ s.t.\ R_m=R_L\cup R_R$$
#
#
# <br>
# <img src="./img/decision_trees/split_r_l.png" width="200" height="50"/>
# <br>
#
# The overall impurtity yielded by this split is:
#
# $$ Q(R_m) = \frac{|R_L|}{|R_m|}G(R_L) + \frac{|R_R|}{|R_m|}G(R_R) $$
#
# where:
# * $\frac{|R_L|}{|R_m|}$ and $\frac{|R_R|}{|R_m|}$ are the fractions of units from region $m$ that fall into the left and right branch, respectively
# * $G(R_L)$ and $G(R_R)$ are the Gini impurities calculated within each of the new sub-regions
# * therefore, $Q(R_m)$ is just a weighted average of the Gini indices calculated within each sub-region
#
# As an example, let's calculate the Gini impurities for the two potential splits considered above:
# * Case 1: Region 1 = [0, 0, 0, 1, 1]; Region 2 = [1, 1, 1, 0, 0]
# * Case 2: Region 2 = [0, 0, 0, 0 ,1, 0]; Region 2 = [1, 1, 1, 1]
#
# The impurity of the split in Case 1 is:
#
# $$Q(R_m) = \left[\frac{1}{2}\left(\frac{3}{5}\cdot\frac{2}{5}+\frac{2}{5}\cdot\frac{3}{5}\right)\right] +
# \left[\frac{1}{2}\left(\frac{2}{5}\cdot\frac{3}{5}+\frac{3}{5}\cdot\frac{2}{5}\right)\right] = 0.48
# $$
#
# The impurity of the split in Case 2 is:
# $$Q(R_m) = \left[\frac{3}{5}\left(\frac{5}{6}\cdot\frac{1}{6}+\frac{1}{6}\cdot\frac{5}{6}\right)\right] +
# \left[\frac{2}{5}\left(0\cdot1+0\cdot 1\right)\right] \approx 0.167
# $$
#
# We see that the impurity index is lower in the second case, confirming our intuition that the second split yields regions that are more pure.
#
# **Entropy.** A second way to measure impurity comes from [information theory](https://en.wikipedia.org/wiki/Information_theory): the *entropy*. If we take a categorical distribution with $C$ categories, it results clear that we reach maximum heterogeneity when the distribution is uniform; that is, the probability of all categories are the same ($p_1=p_2=...p_C$). *Entropy* measure how "close" we are to such undesired distribution. Using the quantities introduced above, the entropy of a region $R_m$ is defined as
#
# $$E(R_m) = -\sum_{c=1}^{C}p_{c,m}log(p_{c,m})$$
#
# Lower values of the entropy (whose minimum is at 0, like the Gini impurity) indicate larger distance from the uniform distribution, and therefore higher homogeneity (the nodes are more pure); and vice versa for higher values of $E(R_m)$. (By convention, when some $p_c=0$, log($p_c$) is set equal to 0).
#
# Similarly to what seen with the Gini impurity, the total entropy induced by a split is defined as a weighted average of the entropies observed in the left and right branch:
#
# $$ Q(R_m) = \frac{|R_L|}{|R_m|}E(R_L) + \frac{|R_R|}{|R_m|}E(R_R) $$
#
# Let's consider again the example of the two partitions above. With partition 1, the overall entropy is:
#
# The impurity of the split in Case 1 is:
#
# $$Q(R_m)=\left[\frac{1}{2}\left(-\frac{3}{5}\cdot log(\frac{3}{5})-\frac{2}{5}\cdot log(\frac{2}{5})\right)\right] +
# \left[\frac{1}{2}\left(-\frac{2}{5}\cdot log(\frac{2}{5})-\frac{3}{5}\cdot log(\frac{3}{5})\right)\right] \approx 0.67
# $$
#
# The entropy of the split in Case 2 is:
# $$Q(R_m)=\left[\frac{3}{5}\left(-\frac{5}{6}\cdot log(\frac{5}{6})-\frac{1}{6}\cdot log(\frac{1}{6})\right)\right] +
# \left[\frac{2}{5}\left(-1\cdot log(1)-0\right)\right] \approx 0.27
# $$
#
# Once again, the first split produces a more impure (=more uniformly distributed) node, leading to a higher entropy.
#
# **Gini vs. Entropy** Which one to choose between Gini impurity and Entropy? Usually the two yield very similar trees, and so it doesn't really matter [in practice](https://github.com/rasbt/python-machine-learning-book/blob/master/faq/decision-tree-binary.md). In the two-class scenario, we can plot both functions as a measure of $p_1$ (probability of class 1):
#
# <br>
# <img src="./img/decision_trees/gini_entropy.png" width="500" height="50"/>
# <br>
#
# As you can see, the two functions are a rescaled version of one another. It is therefore sensible that the two measures lead to analogous results.
#
# #### 2.1.2 Choosing the splitting values
# The test values for the root and internal nodes are determined in a very simple way: by trying them all! That is, at each iteration we consider one feature at a time, and all possible values taken on by the feature, sorted in increasing order (in practice, intermediate values between the observed ones are considered). With continuous features, the partition separates the data points into regions having the feature *smaller* or *larger* than the value under examination; for categorical features, the split separates the data points into regions having the feature *equal to* or *different from* the category under examination.
#
# This is the algorithm written in pseudo-code for classification trees (it is assumed that an impurity measure $Q$ is already chosen beforehand):
#
# [1] for $j=1,...,J$ sort the values of $X_j$ in ascending order <br>
# [2] choose the root node: calculate $Q(R)$ at $x_{ij}$ $\forall\ i,\ j$, and perform the split with $x_{ij}$ that minimizes $Q(R)$ <br>
# [3] **repeat**: <br>
# | **for** each leaf node $m$ in the current level **do**: <br>
# | **for** j $\in$ 1,...,J: <br>
# | **for** all $x_{ij}$'s inside $R_m$ not present in parent nodes: <br>
# | compute $Q(R_m)$ <br>
# | **end for** <br>
# | **end for** <br>
# | perform the split at $x_{ij}$ that minimizes $Q(R_m)$ <br>
# | **end for** <br>
# | **until** all nodes are pure OR no further split is possible <br>
#
#
# The algorithm is quite intuitive. It continues until all the partitions performed of the tree lead to completely pure regions, or when no further split is possible. This latter case can happen, for example, when two or more units from different classes have the same values of the features (noisy observations). Another remark is that it is perfectly possible to have the same feature evaluated in more than one node; the condition is that if a value of the feature has already been used by nodes at higher levels, it cannot be 'reused' for subsequent splits (which makes sense, intuitively).
#
# **Information Gain.** When evaluating a split for a node $R_m$, we choose the one that leads to the lowest overall entropy/Gini impurity of a region $R_m$, $Q(R_m)$. Equivalently, we can evaluate the one that maximizes the *Information Gain*, defined as the loss in entropy. Information Gain for the split of a node $m$, whose entropy measure of its parent $m-1$ is $Q(R_{m-1})$ is defined as:
# $$IG(R_{m-1},R_{m}) = Q(R_{m-1}) - Q(R_{m}).$$
#
#
# #### 2.1.3 Example on the Weather Dataset
# Let's check now how to compute *manually* a Classification tree on the weather data. Once again, the dataset is:
#
# <br>
#
# |Example| Outlook | Weekend | Humidity | Wind | Temperature | Played Football? |
# |:----:|:------:|:-----:|:-----:|:-----:|:-----:|:-----:|
# |1| Sunny | Y | Normal | Weak | 24 | Y |
# |2| Sunny | N | Normal | Strong | 18 | Y |
# |3| Sunny | Y | Normal | Strong | 30 | Y |
# |4| Overcast | N | High | Strong| 20 | N |
# |5| Rainy | N | Normal | Weak | 21 | N |
# |6| Sunny | Y| Normal | Strong | 16 | N |
#
# <br>
#
#
# In order to perform the splits, we will use the Gini impurity index. $Q(R_m; X_j, x_j)$ denotes the impurity of the split in $R_m$ evaluated at values $x_j$ of feature $X_j$ (for example, $Q(R; Outlook,\ Sunny)$ is the overall impurity evaluated at the root node when performing the split with `Outlook=Sunny`; inequalities (*smaller than*) will hold for the continuous feature *Temperature*.
#
# The overall dataset impurity (*before* performing the first split) is: $G(R) = 0.5$ (both classes have equal probabilities). We now have to decide what is the *feature-value* pair for the first split. Let's consider the Gini impurities for all possible splits.
#
# For example, when we consider `Outlook=Sunny` (where the two branches are going to contain 4 and 2 units, the first one having 3 units from class `Yes` and 1 from class `No`; and the second one having both units from the `No` class) the overall Gini impurity becomes:
#
# $$Q(R; Outlook,\ Sunny) = \frac{2}{3}\left[\frac{3}{4}\cdot\frac{1}{4}+\frac{1}{4}\cdot\frac{3}{4}\right] +
# \frac{1}{3} \cdot 0 = 0.125.$$
#
# In the following table you can find the values of the Gini impurities for all possible splits (notice that for continuous features, we use numbers that are in-between the observed values).
#
#
# | $$X_j, x_j$$ | $$Q(R; X_j, x_j)$$ |
# |:----:|:------:|
# | **Outlook = Sunny** | **0.125** |
# | Outlook = Overcast | 0.20 |
# | Outlook = Rainy | 0.20 |
# | Weekend = Y | 0.222|
# | Weekend = N | 0.222 |
# | Humidity = Normal | 0.20 |
# | Humidity = High | 0.20 |
# | Wind = Weak | 0.25 |
# | Wind = Strong | 0.25 |
# | Temperature $\leq 17$ | 0.20 |
# | Temperature $\leq 19$ | 0.25 |
# | Temperature $\leq 20.5$ | 0.222 |
# | **Temperature $\leq 22.5$** | **0.125** |
# | Temperature $\leq 27$ | 0.20 |
#
# <br>
#
#
# There are two values the minimize the Gini impurity: `Outlook=Sunny` and `Temperature` $\leq 22.5$. Indeed, they both are able to create one leaf node that is completely pure. In this case we can arbitrarily choose one of the two options; we will go with `Outlook=Sunny`, as it was evaluated first. Therefore, at the root node we will insert the test `Outlook=Sunny`:
#
# <br>
# <img src="./img/decision_trees/football_tree_split_1.png" width="300" height="50"/>
# <br>
#
# The tree gives us information about the node-specific class counts and Gini impurities.
#
# We have now two nodes; one of the two, being completely pure, is a leaf (when `Outlook` $\neq$ `Sunny`), and the other one, not yet pure (when `Outlook=Sunny`), must be split again. We continue to partition the space of the units present in the right node. In the following table you can find the Gini indices re-calculated for this branch (notice that in this case we don't need to check anymore the predictor `Outlook`, since we have singled out all the observations that belong to the `Sunny` category):
#
# | $$X_j, x_j$$ | $$Q(R; X_j, x_j)$$ |
# |:----:|:------:|
# | Weekend = Y | 0.167 |
# | Weekend = N | 0.167 |
# | Humidity = Normal | 0.1875 |
# | Humidity = High | NA |
# | Wind = Weak | 0.167 |
# | Wind = Strong | 0.167 |
# | **Temperature $\leq 17$** | **0** |
# | Temperature $\leq 21$ | 0.125 |
# | Temperature $\leq 27$ | 0.167 |
#
# <br>
#
# By splitting at `Temperature` $\leq 17$, we could actually get a Gini index of $0$. This means that the two branches created by this partition are pure. This will be then the second split we are going to perform. The new tree (including class distributions and Gini impurity indices) looks like the following one:
#
#
# <br>
# <img src="./img/decision_trees/football_tree_split_2.png" width="400" height="50"/>
# <br>
#
# Since all nodes are pure, we terminate the algorithm here: this is the final tree. (As an exercise, try to repeat the algorithm on your own, this time using the entropy instead of the Gini impurity).
# By considering the class distribution within each leaf node, we can also predict the class probabilities. For example, we can say that for a new day with *Outlook=Sunny* and *Temperature* = 20, we will play footbal with probability 1 (because the node is completely pure).
#
# Other information on the estimation of decision trees can be found in the `scikit-learn` [documentation](https://scikit-learn.org/stable/modules/tree.html#mathematical-formulation).
#
# ### 2.2 Regression Trees
# The algorithm for regression decision trees is the same as the one of classification trees, with a couple of adjustments. First, the predictions of the model are given by the mean of the output within each region. Second, the loss function is not given by the Gini index or the entropy, but by loss functions typical of regression. For example, we can use the region-specific mean squared error :
#
# $$MSE(R_m) = \frac{1}{|R_m|}\sum_{i \in R_m} (y_i - \hat{y}_{R_m})^2$$
#
# where $\hat{y}_{R_m}$ is the average calculated within node $m$; alternatively, we can use the region-specific mean-absolut-error (notice that with Decision Trees we don't need to differentiate the loss function, ando so using MAE as loss is not problematic):
#
# $$MAE(R_m) = \frac{1}{|R_m|}\sum_{i \in R_m} |y_i - \hat{y}_{R_m}|. $$
#
# Thus, we can see that a region *homogeneity* is now defined in terms of *distances* of the observations from their mean. As done for classification trees, the total $MSE$ (or $MAE$) of a split can be determined by taking the weighted average of the branch-specific $MSE$'s (or $MAE$'s).
#
#
# In the following plots, we see an example dataset with a continuous output $Y$ and one featue $X$, and the predictions performed by a regression tree along the range of $X$:
#
#
# <br>
# <img src="./img/decision_trees/regression_tree_preds.png" width="700" height="50"/>
# <br>
#
#
# So we see that what a regression tree really does is to learn a 'step function' (where the value at each step is given by the average of $Y$ within each region). The ensuing tree representation is the following (compare the tree predictions and regions with the ones of the plot):
#
#
# <br>
# <img src="./img/decision_trees/regr_tree.png" width="300" height="50"/>
# <br>
#
#
# ## 3. Tree depth and other hyperparameters
# Decision trees grown until all nodes are pure are *nonparametric models*, as the tree depth is determined by the data, and therefore the number of resulting leaves and nodes is not pre-determined a priori; the approximate depth of a well-balanced decision tree is $\mathcal{O}(log_2(n))$.
#
# Unrestricted decision trees often learn too closely (and therefore overfit) the training dataset. A way to mitigate overfit (and therefore reducing the model's variance) is by making the tree *parametric*, that is, by somehow limiting the degree of freedoms of the tree during training. This can be accomplished by controlling the maximum *depth* the tree can achieve, so that the number of parameter it can learn are fixed a priori (similar to what happens with linear models, for example). By doing so, the algorithm described in the last section will stop when the tree has reached the maximum depth required (if pure leaves have not been found earlier). Examine the following decision boundaries, obtained after fitting Classification Trees with varying *maximum depth*.
#
# <br>
# <img src="./img/decision_trees/tree_boundary_max_depth.png" width="900" height="50"/>
# <br>
#
# You can see that increasing *maximum_depth* leads to more irregular (more dataset-specific) boundaries. The decision tree trained on the dataset above having maximum depth 2 is:
# <br>
# <img src="./img/decision_trees/bin_tree_2_depth.png" width="400" height="50"/>
# <br>
#
# Note that, as we don't have pure nodes anymore, the predicted class probability within each regions can differ from 1 and 0 now. For example, a unit with $X_2=3$ has probability 0.74 to belong to class 1, probability 0.21 to belong to class 2, and probability 0.05 to belong to class 3.
#
# With maximum depth 5 we obtain the following tree:
#
# <br>
# <img src="./img/decision_trees/bin_tree_5_depth.png" width="800" height="50"/>
# <br>
#
# As we increase the maximum depth, trees become harder to read and interpret. However, trees with too low maximum depth can be underfitting, for some datasets. It is therefore important to control for such hyperparameter, by tuning it (for example, with cross-validation). For the dataset above, the estimated training set and test set accuracies for varying max depth are:
#
# <br>
# <img src="./img/decision_trees/test_train_accuracies.png" width="400" height="50"/>
# <br>
#
# The model learns the training data points closer and closer as we increse the depth of the tree, but the test accuracy actually decreases after a max depth of 2 (therefore, this seems to be the ideal value for this data).
#
# There are other ways in which the complexity of a decision tree can be controlled. These include:
#
# * minimum number of units (called *samples* in scikit-learn) that are required in a node before it can be splitted; a lower number of such hyperparameter implies more complex trees
# * minimum number of units required in a leaf node; a lower number means that we can have smaller leaves, and therefore more complex trees
# * maximum number of leaf nodes; the higher, the more complex the tree
# * minimum decrease in impurity when performing a split; if the decrease in impurity (Gini, entropy, MSE, MAE, etc.) when decreasing a split does not exceed this value, the split won't be performed and therefore the tree will be simpler with larger values of such hyperparameter
# * in general, modifying these hyperparameters won't have the same effect of modifying the maximum depth; for example, by imposing a specific minimum number of units, trees might become more or less imbalanced towards the right or left side (you can experiment with these hyperparameters in the demo's of Section 7 and 8 below)
#
# There are other ways to control for tree complexity. They won't be covered here, but you can learn more about it in the documentation [here](https://scikit-learn.org/stable/modules/tree.html#tips-on-practical-use) and [here](https://scikit-learn.org/stable/modules/tree.html#minimal-cost-complexity-pruning) (note that *pruning* means, in this context, decreasing the tree size to make the model simpler).
#
#
# ## 4. Feature Importances
# Decision Trees are able to perform automatic feature selection during their training step, as features need to be selected for the tests in the internal node. Therefore, features that do not appear in the tests are not considered relevant by decision trees.
#
# The selected variables can be ranked by their importance. Here, importance is calculated by summing the weighted loss in impurity (that is, $Q(R_m)$ weighted by the fraction of instances in the node being split by feature $X_j$), and normalizing them in such a way that they sum to 1. Values with higher values contribue the most to the improvement in node purity and, therefore, in determining the final predictions. Note, furthermore, that splits at top and higher levels usually lead to larger decreases in impurity.
#
# **Note**: importantly, this way of measuring feature importance does not give us any indication about the *direction* of the relationship between the predictors and the output. Moreover, feature importances do not consider interaction effects between the features.
#
# ## 5. Main Issues
# ### 5.1 Non-axes-aligned data
# As all Data Mining and Machine Learning algorithms, also decision trees make assumptions on the data. One of the assumptions of decision trees is that the feature space can be partitioned into rectangular, orthogonal regions. Of course, this can become problematic when the data are not axes-aligned. You can see an example in the following figure.
#
# <br>
# <img src="./img/decision_trees/non_axes_aligned.png" width="500" height="50"/>
# <br>
#
# In this case, the two classes can easily be separated by a linear decision boundary. However, because of their assumptions, decision trees are not able to detect diagonal decision boundaries, and fail to catch the linear relationship.
#
# A (sometimes effective) solution to this is to apply PCA to the data before fitting a tree. In this way, data points are rotated towards the directions of their maximum variation, and the task for decision trees becomes easier. Here, you can see the decision regions detected by the tree after transforming the same data with PCA.
#
# <br>
# <img src="./img/decision_trees/pca_aligned.png" width="300" height="50"/>
# <br>
#
#
# ### 5.2 Extrapolation
# Decision trees are also not good at extrapolating, which means performing predictions outside the range of the observed values. This is problematic, for instance, in time series, where trends are common and future observations need to be forecasted. You can see a regression example in this figure, where the prediction of a Regression tree is compared with the one of a linear regression model:
#
# <br>
# <img src="./img/decision_trees/extrapolating.png" width="600" height="50"/>
# <br>
#
#
# ### 5.3 Instability
# Decision trees are also very high-variance and unstable models (this is why they tend to overfit easily). With minimal perturbations of the dataset, their predictions can vary dramatically. The two datasets in the next figure differ with each other by only two data point of Class 2, removed in the data of the rightmost plot. As you can see, the decision regions are rather different from each other despite the minimal change.
#
# <br>
# <img src="./img/decision_trees/instability.png" width="600" height="50"/>
# <br>
#
# ## 6. Other Remarks
# * The type of decision trees described here are known in the literature as CART (*Classification And Regression Trees*), as they allow for both classification and regression tasks. Other famous decision trees in the literature (that came also chronologically earlier) are *ID3*, devised only for categorical (input and output) features, and *C4.5*, which also allows for continuous input features. You can find more information about such trees in the [documentation](https://scikit-learn.org/stable/modules/tree.html#tree-algorithms-id3-c4-5-c5-0-and-cart) of scikit-learn
#
# * As already remarked, decision trees tend to overfit the data quite easily; however, when used as *base learners* for ensemble methods such as Bagging and Boosting (which we will encounter towards the end of the course), they become extremely effective and produce highly reliable predictions
#
# * Assuming a fully-grown balanced tree, the computational complexity of the decision tree algorithm during training is $\mathcal{O}(np\ log_2(n))$, while the computational cost of a prediction (to traverse the whole tree, from root to node) is $\mathcal{O}(log_2(n))$
#
# * Similar to the Naive Bayes method, Decision Trees do not need a lot of preprocessing in the data, given the nature of their algorithm. In particular, continuous features need not be scaled, since trees are invariant to monotonic transformations of the features; furthermore, categorical features need not be encoded, as trees just care about decreasing impurity at each split. However, decision trees in scikit-learn cannot work with categories directly, and so features in scikit-learn need to be encoded also for decision trees
#
# * Although decision trees can also account for the presence of missing data in theory (for instance, by sending the instance down both both branches when the split is on a missing value, and weighting the prediction based on branch size), scikit-learn does not support missing values. Thus, missing information should be treated before fitting a decision tree on scikit-learn.
#
# * Like Naive Bayes, Decision Trees do not need to be used with OvR or OvO approaches for the multiclass case, as they can handle multiple classes simultaneously.
#
# * More information about Decision Trees can be found in the [documentation](https://scikit-learn.org/stable/modules/tree.html#)
#
#
# ## 7. Examples in Python
# ### 7.1 Classification Trees
# Classification trees can be called, trained, and used for prediction in Python with the [DecisionTreeClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html) function, which is in the `sklearn.tree` module.
#
# To show how it works, we will make use of the *reduced Iris dataset*, already introduced in the notebook of the Naive Bayes lecture.
# +
from sklearn.model_selection import train_test_split
data = pd.read_csv("./data/decision_trees/iris_reduced.csv")
X = data.iloc[:,:2]
y = data.iloc[:,2]
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=1, test_size=0.2)
plt.figure(figsize=(5,5))
plt.plot(X_train.loc[y_train==0].iloc[:,0], X_train.loc[y_train==0].iloc[:,1], "bo", label="0:setosa" )
plt.plot(X_train.loc[y_train==1].iloc[:,0], X_train.loc[y_train==1].iloc[:,1], "go", label="1:versicolor" )
plt.plot(X_train.loc[y_train==2].iloc[:,0], X_train.loc[y_train==2].iloc[:,1], "yo", label="2:virginica" )
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend()
plt.show()
# -
# Let's initialize and train a DecisionTree classifier, with `max_depth`=5, and using the Gini index to decrease the impurities at each split. We also initialize the random seed, as scikit-learn randomly permutes the features before training:
from sklearn.tree import DecisionTreeClassifier
tree_mod = DecisionTreeClassifier(max_depth=5, criterion='gini', random_state=1)
tree_mod.fit(X_train, y_train)
# We start by assessing the binary structure (rules) of the tree (in form of text):
from sklearn.tree import export_text
tree_rules = export_text(tree_mod, feature_names=list(X_train))
print(tree_rules)
# Let's now see the values of the first test unit:
X_test.iloc[0]
# According to the tree, such unit should be predicted in Class 2 (the class of Iris Virginica), as it falls exactly in the last leaf at the bottom of the tree. Let's confirm our intuition:
tree_mod.predict(X_test.iloc[0].to_numpy().reshape(1,-1))
# Which is indeed the case. It is also possible to obtain the values of the right and left splits, and of the nodes, as explained in the [documentation](https://scikit-learn.org/stable/auto_examples/tree/plot_unveil_tree_structure.html).
# We can also obtain a nicer representation of the tree:
from sklearn.tree import plot_tree
plt.figure(figsize=(10,10))
plot_tree(tree_mod, feature_names=list(X_train), class_names=['setosa', 'versicolor', 'virginica'], filled=False,
fontsize=10, rounded=True)
plt.show()
# The plot is telling us, for example, that the total Gini index of the dataset (i.e., before starting to split the data) is 0.667, and that the probability of class virginica in the region of the feature space where `petal_width` $\geq 1.75$ and $4.75 <$ `petal_length` $\leq 4.85$ is 2/3 (check this result by yourself). As you can see, by imposing a maximum depth to the tree, some of the leaves of the final tree are not pure.
#
# Let's check the predicted probabilities for the first five test units:
tree_mod.predict_proba(X_test[:5])
# Such units are probably in the interior of the regions, where the estimated tree is more certain about the classification of the units, as all the flowers in the test set are predict with absolute confidence (probability of 1). Let's assess the accuracies of the model in the training and test set:
print("Training Accuracy: {0:.2f}; test accuracy:{1:.2f}".format(tree_mod.score(X_train, y_train),
tree_mod.score(X_test, y_test)))
# This tree does an amazing job in both the training and test set. This is probably due to the fact the the classes can be easily split into axes-aligned regions, which makes decision trees perform greatly. What is the feature that decreases the tree impurity the most? We can assess it with:
tree_mod.feature_importances_
# This is telling us that `petal_width` contributes to improve about 57% of the overall node impurity, and `petal_length` about 43%. Overall, both features seem to be quite important to determine the splits and the classifications of the *iris* flowers. Let's conclude by plotting the decision regions of the trained tree:
# +
xx1, xx2 = np.meshgrid(np.linspace(X.iloc[:,0].min()-0.5, X.iloc[:,0].max()+0.5, 100),
np.linspace(X.iloc[:,1].min()-0.2, X.iloc[:,1].max()+0.5, 100))
zz = tree_mod.predict(np.c_[xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)
plt.figure(figsize=(5,5))
plt.plot(X_train.loc[y_train==0].iloc[:,0], X_train.loc[y_train==0].iloc[:,1], "bo", label="0:setosa" )
plt.plot(X_train.loc[y_train==1].iloc[:,0], X_train.loc[y_train==1].iloc[:,1], "go", label="1:versicolor" )
plt.plot(X_train.loc[y_train==2].iloc[:,0], X_train.loc[y_train==2].iloc[:,1], "yo", label="2:virginica" )
plt.contour(xx1, xx2, zz, levels=2, colors=['b', 'g', 'y'])
plt.xlabel("petal length")
plt.ylabel("petal width")
plt.legend()
plt.show()
# -
# Exercise: Try to play around with the various hyperparameters introduced in section 3 of this notebook, and check how model performance and decision boundaries change.
# ### 7.2 Regression Trees
# Also regression trees are in the `scikitlearn.tree` module. They can be called with the function [DecisionTreeRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html#sklearn.tree.DecisionTreeRegressor). To ease visualization, we are going to fit a regression tree on a small demo dataset with just one predictor. Of course, the commands used here are the same that you would use with multivariate features.
data = pd.read_csv("./data/decision_trees/demo_regression.csv")
data.head()
data.shape
# +
x = data['x']
y = data['y']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=1)
plt.figure(figsize=(6,4))
plt.plot(x_train, y_train, "bo")
plt.xlabel("x")
plt.ylabel("y")
plt.title("Regression: Demo Data")
plt.show()
# -
# The data seem to be somehow sinusoidal, with some noise around the sine curve. Let's check how a regression tree, trained with maxium depth equal to 10 and MAE loss, performs in this case!
# +
from sklearn.tree import DecisionTreeRegressor
# This step is necessary as we are working with a one-dimensional feature:
x_train_np = x_train.to_numpy().reshape(-1,1)
x_test_np = x_test.to_numpy().reshape(-1,1)
tree_reg_mod = DecisionTreeRegressor(max_depth=5, criterion='mae')
tree_reg_mod.fit(x_train_np, y_train)
# -
# The tree structure is:
from sklearn.tree import export_text
tree_rules = export_text(tree_reg_mod, feature_names='x')
print(tree_rules)
# As you can see, the interpretation can become a little overwhelming with large trees. The tree representation is as follows:
from sklearn.tree import plot_tree
plt.figure(figsize=(20,20))
plot_tree(tree_reg_mod, feature_names=list('x'), filled=False,
fontsize=10, rounded=True)
plt.show()
# The prediction of the first five test data are:
tree_reg_mod.predict(x_test_np[:5])
# The $R^2$ score (default for regression estimators in `scikit-learn`) is:
tree_reg_mod.score(x_test_np, y_test)
# This can be interpreted as the percentage of variability in the output of the test data that the predictions given by the tree are able to capture. We are not going to explore feature importances, as in presence of only one feature this will be given 100% importance. We terminate this little demo by confronting the model predictions with the training data:
# +
x_range = np.linspace(x_train_np.min(), x_train_np.max(), 100)
reg_pred = tree_reg_mod.predict(x_range.reshape(-1,1))
plt.figure(figsize=(6,4))
plt.plot(x_train, y_train, "bo")
plt.plot(x_range, reg_pred, "r-", label="Reg. Tree Predictions")
plt.xlabel("x")
plt.ylabel("y")
plt.title("Regression: Demo Data")
plt.legend(loc=3)
plt.show()
# -
# Exercise: similar to what done for the classification tree, manipulate the hyperparameters of the regression tree to see how it modifies predictions and performance.
# ## 8. Example on the Heart Dataset
# We are now going to train a decision tree classifier on the Heart Dataset, as done for previous classifiers. This time, we will use the entropy to evaluate the splits at each node; we will, furthermore, tune the hyperparameters `min_samples_split`, which regulates the minimum amount of instances required in a node in order to proceed with the split. (As an exercise, try to repeat the training by tuning a grid of hyperparameters simultaneously: for example, `max_depth`, `min_leaf_nodes`, `min_samples_leaf`, and as many others as you wish). Note also that we are going to work with the already preprocessed dataset, although, for example, scaling the continuous data is not really necessary with decision trees (as already observed in section 6).
# 1. Load datasets
data_train = pd.read_csv("./data/heart_data/heart_train_processed.csv")
data_test = pd.read_csv("./data/heart_data/heart_test_processed.csv")
X_train = data_train.drop("y", axis=1)
X_test = data_test.drop("y", axis=1)
y_train = data_train["y"]
y_test = data_test["y"]
# +
# Prepare the grid and tune with GridSearchCV
from sklearn.model_selection import GridSearchCV
# Set parameter grid
min_samples_split_grid = [1, 2, 5, 8, 10, 12, 15, 20, 25, 30, 40, 50]
tuning_parameters = [{'min_samples_split': min_samples_split_grid}]
# Launch GridSearchCV
clf = GridSearchCV(DecisionTreeClassifier(criterion='entropy', random_state=1), tuning_parameters, cv=10, n_jobs=-1, verbose=2 )
clf.fit(X_train, y_train)
print("CV Grid Search: Done")
# -
clf.best_estimator_
# The CV accuracy is maximized with 10 minimums samples per node. Let's see how the CV score varies with such hyperparameter:
cv_res = pd.DataFrame(clf.cv_results_).sort_values(by="param_min_samples_split")
plt.figure(figsize=(10, 4))
plt.plot(np.linspace(1, len(min_samples_split_grid), len(min_samples_split_grid)),
cv_res["mean_test_score"], "b-", label="CV Scores")
plt.xlabel("Min. Samples Split")
plt.ylabel("CV Accuracy")
plt.xticks(np.linspace(1, len(min_samples_split_grid), len(min_samples_split_grid)), min_samples_split_grid)
plt.show()
# The best CV score is then:
clf.best_score_
# Which is lower than the CV scored observed with linear models and Naive Bayes. Let's now assess the test score:
clf.best_estimator_.score(X_test, y_test)
# That is a huge drop with respect to the models encountered so far! Probably the data in the Heart Dataset are not axes-aligned.
#
# Let's inspect the best decision tree found by cross-validation. It has final depth:
clf.best_estimator_.get_depth()
# Visualizing the whole tree can take too much space (you can try to export it with the function [export_graphviz](https://scikit-learn.org/stable/modules/generated/sklearn.tree.export_graphviz.html); it requires the graphviz package installed on your local machine). We can, however, ask `plot_tree` to plot only the first two levels of the three by setting `max_depth=2`:
plt.figure(figsize=(15,10))
plot_tree(clf.best_estimator_, feature_names=list(X_train.columns), filled=False,
fontsize=10, rounded=True, max_depth=2)
plt.show()
# Being at the root node, belonging or not to the `Thal_normal` group seems to be the most discriminant factor detected by the tree. At the second level we find features such as `ChestPain` (`asymptomatic` group) and `Ca`. Let's assess the feature importances of the tree:
plt.figure(figsize=(8,5))
plt.barh(range(X_train.shape[1]), np.sort(clf.best_estimator_.feature_importances_), color='b')
plt.xticks(np.arange(clf.best_estimator_.feature_importances_.min(), clf.best_estimator_.feature_importances_.max(),0.05))
plt.yticks(range(X_train.shape[1]), X_train.columns[np.argsort(clf.best_estimator_.feature_importances_)])
plt.show()
# The most important feature detected by the tree to predict heart disease is the one corresponding to the indicator of the `Thal_normal` category. Other important features include the `ChestPain_asymptomatic`group, `OldPeak`, `RestBP`, and `Age`. How these variable interact with the classes and with other features must be inferred by exploring the tree.
| 8. decision_trees.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py3]
# language: python
# name: conda-env-py3-py
# ---
# # RR_LYRAE Dataset Analysis and Efficiency of Various Algorithms on The Given Data
# these are import statements to import libraries for python and for the command to import linear Discriminator there is an alternative code too from sklearn.lda import LDA this works in latest version through my notebook is running an older version so the old version of the code is being used sk
# -------------------------------------------------------------------------------------------------------------------
# A brief about RR_Lyrae Stars
# RR Lyrae variables are periodic variable stars, commonly found in globular clusters and are used as standard candles to measure (extra)galactic distances.
# RR Lyraes are pulsating horizontal branch ageing stars of spectral class A or F, with a mass of around half the Sun's. They are thought to have previously shed mass during the Red-giant branch phase, and consequently, they were once stars with similar or slightly less mass than the Sun, around 0.8 solar masses.
# -------------------------------------------------------------------------------------------------------------------
# Algorithms used
# 1. Decision Tree
# 2. SVM
# 3. Linear Discriminator
# 4. K-Neighbours
# 5. Logistic Regression
# +
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from astroML.datasets import fetch_rrlyrae_combined
from astroML.utils import completeness_contamination
from astroML.utils import split_samples
from astroML.plotting import setup_text_plots
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
# -
# Let's define the text properties.
setup_text_plots(fontsize=8, usetex=True)
# We import and split data into test and train.Whenever we use an ML module we divide the data into two sets so that we can train the module and then get the prediction efficiency. also, we are converting the data into two pandas dataframe this will allow proper data manipulation later in the notebook.This library make the data visualization better
X, y = fetch_rrlyrae_combined()
xd = pd.DataFrame(data=X)
yd = pd.DataFrame(data=y)
(X_train, X_test), (y_train, y_test) = split_samples(X, y, [0.75, 0.25], random_state=1)
# + active=""
# We construct a Decision tree this is a type of classification algorithm
# -
clf = DecisionTreeClassifier(random_state=1, criterion='entropy')
# We fit our data to train the decision tree there are two data sets we have made x_train and y_train
# we have the variable in x and we need to generate output in form of y.
# basically we have x as inputs and we need to predict the value of the y (1,0) ie rr_lyare or not.
clf.fit(X_train, y_train)
# We generate a prediction score
y_out = clf.predict(X_test)
clf.predict_proba(X_test, check_input=True)
# we genrate average accuracy score
clf.score(X_test,y_test)
# we have the accuracy score that defines how well algorithm can predict the data values also
# we see how many mistakes have been made by the algorithm
# +
tot_neg = np.sum(y_test == 0)
tot_pos = np.sum(y_test == 1)
fn = np.sum((y_test == 1) & (y_out == 0))
tn = np.sum((y_test == 0) & (y_out == 0))
fp = np.sum((y_test == 0) & (y_out == 1))
tp = np.sum((y_test == 1) & (y_out == 1))
print( "----------------------------------------------------------------")
print ("partial training set: (%i non-variable, %i RR Lyrae)"
% (np.sum(y_train == 0), np.sum(y_train == 1)))
print( "positive = RR Lyrae, negative = non-variable")
print ("false positives: %i (%.1f%%)" % (fp, fp * 100. / (fp + tp)))
print ("false negatives: %i (%.1f%%)" % (fn, fn * 100. / (fn + tn)))
# -
# we genrate a SVM and also we have done crossvalidation the accuracy is good
clf =SVC(random_state=1)
clf.fit(X_train, y_train)
clf.score(X_test,y_test)
print(clf.predict(X_test))
# +
#scores = cross_val_score(clf,xd,yd, cv=5)
#print(scores)
# -
# scatter plot of the data this shows that which ones are stars and rr_lyrae stars in which frequency and time domain.
# Matplotlib is used here and full data is utilized
# +
stars = (y == 0)
rrlyrae = (y == 1)
ax = plt.axes()
ax.plot(X[stars, 0], X[stars, 1], '.', ms=5, c='b', label='stars')
ax.plot(X[rrlyrae, 0], X[rrlyrae, 1], '.', ms=5, c='r', label='RR-Lyrae')
ax.legend(loc=10)
ax.set_xlabel('$u-g$')
ax.set_ylabel('$g-r$')
ax.set_xlim(0.7, 1.4)
ax.set_ylim(-0.2, 0.4)
plt.show()
# -
# this shows the plot via seaborn we have various subplots explaining the distribution of various frequency and time domains that define a star in multiple distributions.for the type of RR_lyre and a normal star
# 0 star
# 1,2,3 Types of RR_Lyrae
sns.set(style="ticks", color_codes=True)
sns.pairplot(xd, diag_kind="kde", markers="+",plot_kws=dict(s=50, edgecolor="b", linewidth=1),diag_kws=dict(shade=True))
plt.show()
# this shows the density of type of stars in particular range there are supposed to be dots but due to large number of data points they look like bars but all of them have specific zones
sns.set(style="whitegrid", color_codes=True)
sns.stripplot( data=xd, jitter=True)
plt.show()
# this plot shows the distrubution of data in y this shows which catagoery the star is this is not much helpful but this is integeral part of the system
sns.pairplot(yd, diag_kind="kde", markers="+",plot_kws=dict(s=50, edgecolor="b", linewidth=1),diag_kws=dict(shade=True))
plt.show()
# we can see the data in the tabular format
print(xd)
print(yd)
# here we are defining other algorithms like the previous SVM but we will be trying regression to linear one the kernel used in SVM was linear and the regression is logistics.
# then we have other classification algorithms our goal as mentioned above will be to plot curves to see which one is most suitable fo the data.
# a hint the Logistic regression doesn't support class weights as auto if you try out dict else it will be none but the balanced setting should be used to avoid any bias towards any classes.
# +
def compute_models(*args):
names = []
probs = []
for classifier, kwargs in args:
print (classifier.__name__)
clf = classifier(**kwargs)
clf.fit(X_train, y_train)
y_probs = clf.predict_proba(X_test)[:, 1]
names.append(classifier.__name__)
probs.append(y_probs)
return names, probs
names, probs = compute_models(
(LDA, {}),
(LogisticRegression,
dict(class_weight='balanced')),
(KNeighborsClassifier,
dict(n_neighbors=10)),
(DecisionTreeClassifier,
dict(random_state=0, max_depth=12,
criterion='entropy')),
)
# -
# finally, this segment will test different algo's efficiency and see the ROC curve to decide which one is most suitable.
# Custom Labels for the algorithms are defined please check underlying code for full form the abbreviations used
# +
fig = plt.figure(figsize=(12, 8.5))
fig.subplots_adjust(left=0.1, right=0.95, bottom=0.15, top=0.9, wspace=0.25)
# ax2 will show roc curves
ax1 = plt.subplot(121)
# ax1 will show completeness/efficiency
ax2 = plt.subplot(122)
labels = dict(LinearDiscriminantAnalysis='LDA',
KNeighborsClassifier='KNN',
DecisionTreeClassifier='DT',
LogisticRegression='LR')
# -
# the slopes of the KNN and DT are quite good and fits the criterion for a good algorithm thus we can say that these two are good ones. the curves for LR and LDA are quite steep.The neural network may have higher performance but the time taken will be a negative factor
# +
thresholds = np.linspace(0, 1, 1001)[:-1]
# iterate through and show results
for name, y_prob in zip(names, probs):
fpr, tpr, thresh = roc_curve(y_test, y_prob)
# add (0, 0) as first point
fpr = np.concatenate([[0], fpr])
tpr = np.concatenate([[0], tpr])
ax1.plot(fpr, tpr, label=labels[name])
comp = np.zeros_like(thresholds)
cont = np.zeros_like(thresholds)
for i, t in enumerate(thresholds):
y_pred = (y_prob >= t)
comp[i], cont[i] = completeness_contamination(y_pred, y_test)
ax2.plot(1 - cont, comp, label=labels[name])
ax1.set_xlim(0, 0.04)
ax1.set_ylim(0, 1.02)
ax1.xaxis.set_major_locator(plt.MaxNLocator(5))
ax1.set_xlabel('false positive rate')
ax1.set_ylabel('true positive rate')
ax1.legend(loc=4)
ax2.set_xlabel('efficiency')
ax2.set_ylabel('completeness')
ax2.set_xlim(0, 1.0)
ax2.set_ylim(0.2, 1.02)
plt.show()
# -
| RR_Lyare.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Strings
# [Datastics Lab](https://github.com/datasticslab/ISCB) | Created by [<NAME>](https://github.com/jubayer-hossain)
# ## Previous
# - What is Python?
# - Why Python Rocks in Research?
# - Little bit history of Python
# - Variables, Expressions, Comments
# - Data Types
# - Printing Messages
# - Operators in Python
# - Python Data Type Conversion
# - Python User Inputs
# - Algorithms and Flowcharts
# - `if` statement
# - `else` statement
# - `elif` statement
# - `while` loop
# - `range()` function
# - `for` loop
# - `pass` statement
# - `break` statement
# - `continue` statement
# - Python Function
# - Function Arguments / Parameters
# ## Today
# - Strings
# - String Manipulation
# - String Methods
# ## String Operators
# ### The `+` Operator
# The + operator concatenates strings. It returns a string consisting of the operands joined together.
s1 = "Ub"
s2 = "er"
s1+s2
s3 = "Fizz"
s4 = "Buzz"
s3+s4
# ### The `*` Operator
# The * operator creates multiple copies of a string. If `s` is a string and `n` is an integer, either of the following expressions returns a string consisting of `n` concatenated copies of `s`
# * $s \times n$
# * $n \times s$
s = "Fizz"
n = 3
# n * s
s * n
# s * n
n * s
# __Note__
#
# The multiplier operand n must be an integer. You’d think it would be required to be a positive integer, but amusingly, it can be zero or negative, in which case the result is an empty string.
"foo" * -8
"foo" * .5
# ### The `in` Operator
# Python also provides a membership operator that can be used with strings. The in operator returns True if the first operand is contained within the second, and False otherwise
text = "I love Bangladesh!"
# membership check
"love" in text
# Dhaka in text?
"Dhaka" in text
# Dhaka not in text?
"Dhaka" not in text
# ## Built-in String Function
# ### `ord(c)`
# Returns an integer of the given character.
# ASCII value of a
ord('a')
# ASCII value of A
ord('A')
# ASCII value of #
ord("#")
# ### `chr(i)`
# Converts an integer to character
# Convert integer to character 97 == a
chr(97)
# Convert integer to character 65 == A
chr(65)
# ### `len(s)`
# Returns the length of a string
# length of a string
S = "<NAME>!"
len(S)
# ### `str(param)`
# Returns a string representation of a string
# 10 into '10'
str(10)
# 10.5 into '10.5'
str(10.5)
# ## String Indexing
# In Python, strings are ordered sequences of character data, and thus can be indexed in this way. Individual characters in a string can be accessed by specifying the string name followed by a number in square brackets ([]).
# String indexing in Python is zero-based: the first character in the string has index 0, the next has index 1, and so on. The index of the last character will be the length of the string minus one.
#
# 
# forward indexing
country = "Bangladesh"
# check length
len(country)
country[0]
country[1]
country[2]
country[3]
country[4]
country[5]
country[6]
country[7]
country[8]
country[9]
country[10]
# reverse indexing
country = "Bangladesh"
country[-1]
country[-2]
country[-3]
country[-4]
country[-5]
country[-6]
country[-7]
country[-8]
country[-9]
country[-10]
# ## String Slicing
# 
# slicing
country = "Bangladesh"
country[0:2] # 0 included but 2 excluded
country[3:5]
country[2:len(country)]
country[-1:-3]
# ## String Operation Function
# ### `s.lower()`
# Returns all characters in lowercase.
name = "Rahim"
name.lower()
# ### `s.upper()`
# Returns all characters in uppercase
text = "fizzbuzz"
text.upper()
# ### `s.capitalize()`
# Converts first letter into uppercase
text = "fizzbuzz"
text.capitalize()
# ### `s.title()`
# Convert first letter lowercase to uppercase in a sentence
text2 = "i love bangladesh!"
text2.title()
# ### `s.split(t)`
# Converts a sentence into List
# Split a sentence into list
sent = "Dhaka is the capital of Bangladesh!"
sent.split(" ")
# ### `s.strip()`
# Removes whitespace from text(both side)
text4 = " FooBazar"
text4.strip()
# ### `s.rstrip()`
# Removes whitespace from right end.
text5 = " FooBazar "
text5.rstrip()
# ### `s.lstrip()`
# Removes whitespace from left end
text6 = " FooBazar "
text6.lstrip()
# ## `s.find(t)`
# Returns substring position.if the string not found return `-1`
text7 = "Datastics Lab"
text7.find("s")
text7 = "Datastics Lab"
text7.find("Lab")
text7 = "Datastics Lab"
text7.find("foo")
# ## `s.count(t)`
# Returns the number of character in a sentence or word
country = "Japan"
country.count("a")
# ### `s.replace(u, v)`
# Replace value in a sequence of character(string)
text8 = "Datastics lab"
text8.replace("l", "L")
# ## Word Comparison Functions
# ### `s.startswith(t)`
text9 = "Lab for Making Insights from Data!"
text9.startswith("Lab")
text9 = "Lab for Making Insights from Data!"
text9.startswith("lab")
# ### s.endswith(t)
text10 = "Lab for Making Insights from Data!"
text10.endswith("!")
text10 = "Lab for Making Insights from Data!"
text10.endswith("?")
# ### `s.isupper()`
text11 = "Lab for Making Insights from Data!"
text11.isupper()
# ### s.islower()
text12 = "Lab for Making Insights from Data!"
text12.islower()
# ### s.istitle()
text13 = "Lab for Making Insights from Data!"
text13.istitle()
# ### `s.isalpha()`
text14 = "Lab for Making Insights from Data!"
text14.isalpha()
# ### `s.isdigit()`
text15 = "Lab for Making Insights from Data!"
text15.isdigit()
# ### `s.isalnum()`
text16 = "Lab for Making Insights from Data!"
text16.isalnum()
# ## String Iteration
S = "Bangladesh!"
for i in S:
print(i)
S = "Bangladesh!"
for index, val in enumerate(S):
print(index, val)
# ## Resources
# - https://www.python.org/doc/essays/blurb/
# - https://dev.to/duomly/10-reasons-why-learning-python-is-still-a-great-idea-5abh
# - https://www.stat.washington.edu/~hoytak/blog/whypython.html
# - https://www.programiz.com/python-programming
| book/_build/jupyter_execute/python/(J) Python Data Structures-I.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Library imports
import scipy
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def evaluate(t, y):
NBodies = int(len(y)/6)
solved_vector = np.zeros(y.size)
for i in range(NBodies):
ioffset = i*6
solved_vector[ioffset:ioffset+3] = y[ioffset+3:ioffset+6]
for j in range(NBodies):
joffset = j*6
if i != j:
d = y[ioffset:ioffset+3] - y[joffset:joffset+3]
r = np.sqrt(np.sum(d**2))
a = d*G*masses[j]/(r**3)
solved_vector[ioffset+3:ioffset+6] += a
return solved_vector
def rk4(t,dt,y,evaluate):
k1 = dt * evaluate(t, y)
k2 = dt * evaluate(t + 0.5*dt, y + 0.5*k1)
k3 = dt * evaluate(t + 0.5*dt, y + 0.5*k2)
k4 = dt * evaluate(t + dt, y + k3)
y_new = y + (1/6.)*(k1+ 2*k2 + 2*k3 + k4)
return y_new
def run(T, dt, y0, masses, evaluate, t0 = 0):
nsteps = int((T-t0)/dt)
history = np.empty((nsteps+1, len(y0)))
history[0, :] = y0
t = t0
for i in range(nsteps):
history[i+1] = rk4(t, dt, history[i,:], evaluate)
t += dt
return history
def getEnergyAndAMomentum(y, masses):
v = getVelocities(y).copy().reshape((-1,6))[:,:3]
m = masses.copy().reshape((-1,1))
KE = 0.5 * np.sum(np.sum( m * v**2 ))
yAux = y.copy().reshape((-1,6))
x = yAux[:,0].reshape(-1, 1)
y = yAux[:,1].reshape(-1, 1)
z = yAux[:,2].reshape(-1, 1)
dx = x.T - x
dy = y.T - y
dz = z.T - z
inv_r = np.sqrt(dx**2 + dy**2 + dz**2)
inv_r[inv_r>0] = 1.0/inv_r[inv_r>0]
PE = G * np.sum(np.sum(np.triu(-(m*m.T)*inv_r,1)))
return KE, PE
# Variable declaration 1
G = -6.67*(10**-11)
t0 = 0
T = 6.221e6
dt = 3600
y0 = np.array([3.84e8, 0, 0, 0, 1000, 0,
0, 0, 0, 0, 0, 0])
masses = np.array([7.347e22, 5.97e24])
# Variable declaration 2 -> Saturn, Janus and Epithemeus
G = -6.67*(10**-11)
t0 = 0
T = 6.221e6
dt = 360
y0 = np.array([0, 0, 0, 0, 0, 0, #Saturn
0.11e9, 0.93e8, -0.17e8, -0.99e4, 0.12e6, -0.8e-4, #Janus
0.19e8, -0.14e9,0.15e8,0.16e6, 0.17e4, -0.15e4]) #Epithemeus
masses = np.array([5.68e26, 1.89e18, 5.26e17])
# Variable declaration 4 -> figure 8
G = -1
t0 = 0
T = 5
dt = 0.1
y0 = np.array([0.97000436, -0.24308753, 0, 0.93240737/2, 0.86473146/2, 0,
-0.97000436, 0.24308753,0, 0.93240737/2, 0.86473146/2, 0,
0,0,0,-0.93240737, -0.86473146,0])
masses = np.array([1, 1, 1])
# Calculation
history = run(T, dt, y0, masses, evaluate)
# +
# Plot the results
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z');
ax.plot3D(history[:,0], history[:,1], history[:,2])
ax.plot3D(history[:,6], history[:,7], history[:,8])
ax.plot3D(history[:,12], history[:,13], history[:,14])
# -
| Project source codes/Python codes/.ipynb_checkpoints/RungeKutta4-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %%capture
import os
import site
os.sys.path.insert(0, '/home/schirrmr/code/reversible/reversible2/')
os.sys.path.insert(0, '/home/schirrmr/braindecode/code/braindecode/')
os.sys.path.insert(0, '/home/schirrmr/code/explaining/reversible//')
# %cd /home/schirrmr/
# %load_ext autoreload
# %autoreload 2
import numpy as np
import logging
log = logging.getLogger()
log.setLevel('INFO')
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
level=logging.INFO, stream=sys.stdout)
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import cm
# %matplotlib inline
# %config InlineBackend.figure_format = 'png'
matplotlib.rcParams['figure.figsize'] = (12.0, 1.0)
matplotlib.rcParams['font.size'] = 14
import seaborn
seaborn.set_style('darkgrid')
from reversible.sliced import sliced_from_samples
from numpy.random import RandomState
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import copy
import math
import itertools
from reversible.plot import create_bw_image
import torch as th
from braindecode.torch_ext.util import np_to_var, var_to_np
from reversible.revnet import ResidualBlock, invert, SubsampleSplitter, ViewAs, ReversibleBlockOld
from spectral_norm import spectral_norm
from conv_spectral_norm import conv_spectral_norm
def display_text(text, fontsize=18):
fig = plt.figure(figsize=(12,0.1))
plt.title(text, fontsize=fontsize)
plt.axis('off')
display(fig)
plt.close(fig)
# +
from braindecode.datasets.bbci import BBCIDataset
from braindecode.mne_ext.signalproc import mne_apply
# we loaded all sensors to always get same cleaning results independent of sensor selection
# There is an inbuilt heuristic that tries to use only EEG channels and that definitely
# works for datasets in our paper
#train_loader = BBCIDataset('/data/schirrmr/schirrmr/HGD-public/reduced/train/13.mat')
#test_loader = BBCIDataset('/data/schirrmr/schirrmr/HGD-public/reduced/test/13.mat')
start_cnt = BBCIDataset('/data/schirrmr/schirrmr/HGD-public/reduced/train/4.mat',).load()
start_cnt = start_cnt.drop_channels(['STI 014'])
def car(a):
return a - np.mean(a, keepdims=True, axis=0)
start_cnt = mne_apply(
car, start_cnt)
start_cnt = start_cnt.reorder_channels(['C3', 'C4'])
# +
from collections import OrderedDict
from braindecode.datautil.trial_segment import create_signal_target_from_raw_mne
marker_def = OrderedDict([('Right Hand', [1]), ('Left Hand', [2],),
('Rest', [3]), ('Feet', [4])])
ival = [500,1500]
from braindecode.mne_ext.signalproc import mne_apply, resample_cnt
from braindecode.datautil.signalproc import exponential_running_standardize, bandpass_cnt
log.info("Resampling train...")
cnt = resample_cnt(start_cnt, 250.0)
log.info("Standardizing train...")
cnt = mne_apply(lambda a: exponential_running_standardize(a.T ,factor_new=1e-3, init_block_size=1000, eps=1e-4).T,
cnt)
cnt = resample_cnt(cnt, 4.0)
cnt = resample_cnt(cnt, 8.0)
#cnt = mne_apply(
# lambda a: bandpass_cnt(a, 0, 2, cnt.info['sfreq'],
# filt_order=10,
# axis=1), cnt)
train_set = create_signal_target_from_raw_mne(cnt, marker_def, ival)
# +
x_right = train_set.X[train_set.y == 0]
x_rest = train_set.X[train_set.y == 2]
inputs_a = np_to_var(x_right[:160,0:1,:,None], dtype=np.float32).cuda()
inputs_b = np_to_var(x_rest[:160,0:1,:,None], dtype=np.float32).cuda()
inputs = [inputs_a, inputs_b]
# +
fig = plt.figure(figsize=(8,4))
for i_class in range(2):
ins = var_to_np(inputs[i_class].squeeze())
bps = np.abs(np.fft.rfft(ins.squeeze()))
plt.plot(np.fft.rfftfreq(ins.squeeze().shape[1], d=1/ins.squeeze().shape[1]), np.median(bps, axis=0))
plt.title("Spectrum")
plt.xlabel('Frequency [Hz]')
plt.ylabel('Amplitude')
plt.legend(['Real Right', 'Fake Right', 'Real Rest', 'Fake Rest'])
display(fig)
plt.close(fig)
# +
fig, axes = plt.subplots(16,20, figsize=(14,14), sharex=True, sharey=True)
for i_class in range(2):
for i_example in range(len(inputs[i_class])):
i_row = i_example // 10
i_col = i_example % 10
i_col += i_class * 10
axes[i_row][i_col].plot(var_to_np(inputs[i_class][i_example]).squeeze(),
color=seaborn.color_palette()[i_class])
fig.suptitle('Input signals')
from matplotlib.lines import Line2D
lines = [Line2D([0], [0], color=seaborn.color_palette()[i_class],) for i_class in range(2)]
labels = ['Right', 'Rest',]
axes[0][-1].legend(lines, labels, bbox_to_anchor=(1,1,0,0))
# -
from matplotlib.lines import Line2D
plt.figure(figsize=(10,6))
for i_class in range(1):
plt.plot(var_to_np(inputs[i_class].squeeze()).T, color=seaborn.color_palette()[i_class],lw=0.5);
lines = [Line2D([0], [0], color=seaborn.color_palette()[i_class],) for i_class in range(2)]
plt.legend(lines, ['Right', 'Rest',], bbox_to_anchor=(1,1,0,0))
plt.title('Input signals')
# +
def rev_block(n_c, n_i_c):
return ReversibleBlockOld(
nn.Sequential(
nn.Conv2d(n_c // 2, n_i_c,(3,1), stride=1, padding=(1,0),bias=True),
nn.ReLU(),
nn.Conv2d(n_i_c, n_c // 2,(3,1), stride=1, padding=(1,0),bias=True)),
nn.Sequential(
nn.Conv2d(n_c // 2, n_i_c,(3,1), stride=1, padding=(1,0),bias=True),
nn.ReLU(),
nn.Conv2d(n_i_c, n_c // 2,(3,1), stride=1, padding=(1,0),bias=True))
)
def res_block(n_c, n_i_c):
return ResidualBlock(
nn.Sequential(
nn.Conv2d(n_c, n_i_c, (3,1), stride=1, padding=(1,0),bias=True),
nn.ReLU(),
nn.Conv2d(n_i_c, n_c, (3,1), stride=1, padding=(1,0),bias=True)),
)
# +
from discriminator import ProjectionDiscriminator
from reversible.revnet import SubsampleSplitter, ViewAs
from reversible.util import set_random_seeds
from reversible.revnet import init_model_params
from torch.nn import ConstantPad2d
import torch as th
from conv_spectral_norm import conv_spectral_norm
from disttransform import DSFTransform
set_random_seeds(2019011641, True)
feature_model = nn.Sequential(
SubsampleSplitter(stride=[2,1],chunk_chans_first=False),
rev_block(2,32),
rev_block(2,32),
SubsampleSplitter(stride=[2,1],chunk_chans_first=True),
rev_block(4,32),
rev_block(4,32),
SubsampleSplitter(stride=[2,1],chunk_chans_first=True),
rev_block(8,32),
rev_block(8,32),
ViewAs((-1,8,1,1), (-1,8))
)
feature_model.cuda()
from braindecode.torch_ext.util import set_random_seeds
adv_model = nn.Sequential(
nn.Conv2d(1,16, (3,1), stride=1, padding=(1,0),bias=True),
res_block(16,32),
res_block(16,32),
nn.AvgPool2d((2,1)),
res_block(16,32),
res_block(16,32),
nn.AvgPool2d((2,1)),
res_block(16,32),
res_block(16,32),
ViewAs((-1,16,2,1), (-1,32)),
)
adv_model = ProjectionDiscriminator(adv_model,32,2,)
adv_model.cuda()
from reversible.training import hard_init_std_mean
n_dims = inputs[0].shape[1]
n_clusters = 1
means_per_cluster = [th.autograd.Variable(th.ones(n_dims).cuda(), requires_grad=True)
for _ in range(n_clusters)]
# keep in mind this is in log domain so 0 is std 1
stds_per_cluster = [th.autograd.Variable(th.zeros(n_dims).cuda(), requires_grad=True)
for _ in range(n_clusters)]
for i_class in range(n_clusters):
this_outs = feature_model(inputs[i_class])
means_per_cluster[i_class].data = th.mean(this_outs, dim=0).data
stds_per_cluster[i_class].data = th.log(th.std(this_outs, dim=0)).data
# override phase
means_per_cluster[i_class].data[len(stds_per_cluster[i_class])//2:] = 0
stds_per_cluster[i_class].data[len(stds_per_cluster[i_class])//2:] = 0
from copy import deepcopy
optimizer = th.optim.Adam(
[
{'params': list(feature_model.parameters()),
'lr': 1e-3,
'weight_decay': 0},], betas=(0,0.9))
optim_dist = th.optim.Adam(
[
{'params': means_per_cluster + stds_per_cluster,
'lr': 1e-2,
'weight_decay': 0},], betas=(0,0.9))
optim_adv = th.optim.Adam([{
'params': adv_model.parameters(),
'lr': 4e-3, 'weight_decay': 0.00}],#lr 0.0004
betas=(0,0.9))
# +
from reversible.gaussian import get_gauss_samples
from reversible.uniform import get_uniform_samples
from ampphase import (amp_phase_to_x_y, get_amp_phase_samples, amp_phase_sample_to_x_y,
outs_to_amp_phase, to_amp_phase,
switch_to_other_class)
from reversible.gaussian import get_gauss_samples
from reversible.uniform import get_uniform_samples
from reversible.revnet import invert
import pandas as pd
from gradient_penalty import gradient_penalty
import time
df = pd.DataFrame()
g_loss = np_to_var([np.nan],dtype=np.float32)
g_grad = np.nan
d_loss = np_to_var([np.nan],dtype=np.float32)
d_grad = np.nan
gradient_loss = np_to_var([np.nan],dtype=np.float32)
# -
n_epochs = 1001
rng = RandomState(349384)
for i_epoch in range(n_epochs):
start_time = time.time()
optim_adv.zero_grad()
optimizer.zero_grad()
optim_dist.zero_grad()
outs = feature_model(inputs[0])
mean_out = th.mean(outs, dim=1)
demeaned_out = outs - mean_out.unsqueeze(1)
new_means = np_to_var(np.abs(rng.randn(160) + 2), dtype=np.float32).cuda()
remeaned_out = demeaned_out * 0 + new_means.unsqueeze(1)
inverted = invert(feature_model, remeaned_out)
y = np_to_var([0]).cuda()
score_fake = adv_model(inverted, y)
if (i_epoch % 10) != 0:
this_inputs = inputs[0]
score_real = adv_model(this_inputs, y)
gradient_loss = gradient_penalty(adv_model, this_inputs, inverted[:(len(this_inputs))], y)
d_loss = -score_real.mean() + score_fake.mean() + gradient_loss * 10
d_loss.backward()
d_grad = np.mean([th.sum(p.grad **2).item() for p in adv_model.parameters()])
else:
g_loss = -th.mean(score_fake)
g_loss.backward()
g_grad = np.mean([th.sum(p.grad **2).item() for p in feature_model.parameters()])
if (i_epoch % 10) != 0:
optim_adv.step()
else:
optimizer.step()
optim_dist.step()
end_time = time.time()
epoch_row = {
'd_loss': d_loss.item(),
'g_loss': g_loss.item(),
'grad_loss': gradient_loss.item(),
#'o_real': th.mean(score_real).item(),
#'o_fake': th.mean(score_fake).item(),
'g_grad': g_grad,
'd_grad': d_grad,
'runtime': end_time -start_time,}
df = df.append(epoch_row, ignore_index=True)
if i_epoch % (max(1,n_epochs // 20)) == 0:
display_text("Epoch {:d}".format(i_epoch))
display(df.iloc[-5:])
if i_epoch % (max(1,n_epochs // 20)) == 0:
fig = plt.figure(figsize=(12,4))
plt.plot(var_to_np(inverted.squeeze()).T, color='black', lw=0.7);
display(fig)
plt.close(fig)
a = th.mean(outs.detach(), dim=0)
b = np_to_var(sorted(np.abs(rng.randn(320) + 2)), dtype=np.float32).cuda()#th.linspace(0,10,100).cuda()
c = a.unsqueeze(0) + b.unsqueeze(1)
from matplotlib import rcParams, cycler
inverted = invert(feature_model, c)
cmap = plt.cm.coolwarm
N = len(c)
with plt.rc_context({'axes.prop_cycle': cycler(color=cmap(np.linspace(0, 1, N)))}):
fig = plt.figure(figsize=(12,4))
plt.plot(var_to_np(inverted).squeeze().T);
display(fig)
plt.close(fig)
# +
fig = plt.figure(figsize=(8,4))
for i_class in range(1):
ins = var_to_np(inputs[i_class].squeeze())
bps = np.abs(np.fft.rfft(ins.squeeze()))
plt.plot(np.fft.rfftfreq(ins.squeeze().shape[1], d=1/ins.squeeze().shape[1]), np.median(bps, axis=0))
outs = feature_model(inputs[0])
mean_out = th.mean(outs, dim=1)
demeaned_out = outs - mean_out.unsqueeze(1)
new_means = np_to_var(np.abs(rng.randn(160) + 2), dtype=np.float32).cuda()
remeaned_out = demeaned_out * 0 + new_means.unsqueeze(1)
inverted = var_to_np(invert(feature_model, remeaned_out))
bps = np.abs(np.fft.rfft(inverted.squeeze()))
plt.plot(np.fft.rfftfreq(inverted.squeeze().shape[1], d=1/ins.squeeze().shape[1]), np.median(bps, axis=0),
color=seaborn.color_palette()[i_class], ls='--')
plt.title("Spectrum")
plt.xlabel('Frequency [Hz]')
plt.ylabel('Amplitude')
plt.legend(['Real Right', 'Fake Right', 'Real Rest', 'Fake Rest'])
display(fig)
plt.close(fig)
| notebooks/wavelet/BhNo8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests as req
import pandas as pd
from config_local import census_api
from census import Census
# read places
xls = pd.ExcelFile('Project1_AmazonSites.xlsx')
places_df=xls.parse('AmazonCities', dtype=str)
places = places_df[['PlaceCode','StateCode']]
places
codes_df = xls.parse('CensusFields',parse_cols=1)
c = Census(census_api, year=2016)
# home_rent =[]
for p in places.values:
place_state = {'for':'place:'+ str(p[0]),'in':'state:'+str(p[1])}
census_data = c.acs5.get(("NAME", "B25077_001E", "B25064_001E", "B01003_001E",
"B19013_001E", "B25105_001E","B25004_004E","B25004_002E"
),
place_state)
print(census_data)
home_rent.append({'City':census_data[0]['NAME'],
'Median home value':census_data[0]['B25077_001E'],
'Median gross rent':census_data[0]['B25064_001E'],
'Population':census_data[0]['B01003_001E'],
'Income':census_data[0]['B19013_001E'],
'Median Mortgage Cost':census_data[0]['B25105_001E'],
'Homeowner Vacancy Rate':census_data[0]['B25004_004E'],
'Rental Vacancy Rate':census_data[0]['B25004_002E']
})
#time.sleep(10)
print(home_rent)
#get dataframe
housing_df = pd.DataFrame(home_rent)
housing_df
# Estimate affordable housing cost/income
housing_df['home owner afford'] = (12*housing_df['Median Mortgage Cost'])/housing_df['Income']
housing_df['rent afford'] = (12*housing_df['Median gross rent'])/housing_df['Income']
housing_df
housing_df.to_csv("housing.csv")
# +
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(8,5))
housing_df = housing_df.sort_values('home owner afford')
sns.barplot(y='City',x='home owner afford',data=housing_df)
plt.xlabel("% if income to cover housing costs")
plt.title("Affordable House Ownership")
plt.show()
# -
plt.figure(figsize=(8,5))
housing_df = housing_df.sort_values('rent afford')
sns.barplot(y='City',x='rent afford',data=housing_df)
plt.xlabel("% if income to cover renting costs")
plt.title("Affordable Rent")
plt.show()
# +
#combined chart
# -
housing_df_temp_own = housing_df.copy()
housing_df_temp_rent = housing_df.copy()
housing_df_temp_own['RentOwn'] = 'own'
housing_df_temp_own['Afford'] = housing_df_temp_own['home owner afford']
housing_df_temp_rent['RentOwn'] = 'rent'
housing_df_temp_rent['Afford'] = housing_df_temp_own['rent afford']
frames= [housing_df_temp_own, housing_df_temp_rent]
housing_df_rentOwn = pd.concat(frames, ignore_index= 'True')
sns.factorplot(kind='bar',x='RentOwn',y='Afford', hue='City',data=housing_df_rentOwn, size=10)
plt.title("Housing Affordability")
plt.show()
# +
#By City
sns.barplot(x="Afford", y="City", hue="RentOwn", data=housing_df_rentOwn)
plt.title("Housing Affordability")
plt.show()
# +
# Vacancy
vacancy_df = housing_df[['Homeowner Vacancy Rate','Rental Vacancy Rate']]
pa = vacancy_df.plot.bar(stacked=True,figsize=(20,7))
# Set a title for the chart
plt.title("Housing vacancy rate Per City")
# PandasPlot.set_xticklabels() can be used to set the tick labels as well
pa.set_xticklabels(housing_df["City"])
plt.show()
# -
| HousingSchools/Test/affortdablehousing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="bp3gVvgKqk2O" outputId="c3f70a81-ba21-41d2-84e3-2c7c95abbfa2" colab={"base_uri": "https://localhost:8080/"}
# !pip install -Uqq fastbook
import fastbook
# fastbook.setup_book()
from fastbook import *
from fastai.vision.all import *
# Libraries needed for web-scraping:
# !pip install kora -q
# !pip install Pillow
from kora.selenium import wd as webdriver
from pathlib import Path
# + colab={"base_uri": "https://localhost:8080/", "height": 17} id="5ZmVDnrGa0aJ" outputId="521e5929-b3f1-4d1e-e5c5-dd9aa0e62891"
dest = '/content/DataML'
path = untar_data (URLs.PETS, dest=dest) / 'images'
# def is_cat(x):
# return x[0].isupper()
# dls = ImageDataLoaders.from_name_func(
# path=path,
# fnames=get_image_files(path),
# valid_pct=0.2, seed=42,
# label_func=is_cat,
# item_tfms=Resize(224)
# )
# + id="oJ2v7vqFbB5A"
import shutil
def catagorize_files(input_path, dest_path):
if not os.path.exists(dest_path):
os.mkdir(dest_path)
os.mkdir(dest_path / 'Dog')
os.mkdir(dest_path / 'Cat')
# os.mkdir(dest_path / 'Keanu')
files = input_path.ls()
for f_ind in range(len(files)):
if files[f_ind].name[0].isupper():
shutil.copy(files[f_ind], dest_path/'Cat'/files[f_ind].name)
else:
shutil.copy(files[f_ind], dest_path/'Dog'/files[f_ind].name)
# + id="XtTAXPNubB0l"
dest_path = Path('/content/DataML/my_images')
catagorize_files(path, dest_path)
# + colab={"base_uri": "https://localhost:8080/", "height": 145} id="LiyYjuwP0W9Y" outputId="85eba73f-c887-4825-c1bf-ef5effa59e7a"
img_path = (dest_path/'Cat').ls()[34]
img = PILImage.create(img_path)
# show_image(img);
img.to_thumb(128)
# + [markdown] id="PM83IEXMn25E"
# We can check if it was done correctly:
# + id="WiV0X0hYn2OH"
sample_list = list((dest_path/'Cat').ls()[:10])
# + id="2w-4sS6dda2G" outputId="d1528ace-2189-47ce-c365-9b41c665573f" colab={"base_uri": "https://localhost:8080/", "height": 198}
img = PILImage.create(sample_list[5])
# show_image(img);
img.to_thumb(256)
# + [markdown] id="G7goe0b3qKSc"
# Now, let's add Keanu:
#
# + [markdown] id="RX-gzfocbSRj"
# Import the web-scraping function
# + id="GbiBS-mHK44R"
from kora.selenium import wd as webdriver
from pathlib import Path
# + id="fzptjNYFqNnK" colab={"base_uri": "https://localhost:8080/"} outputId="293bde8a-7b0e-49b7-ad37-812ee9bd592f"
# !git clone https://github.com/spahlavian/web-scraper.git
# + id="PJKgN2dwc32M"
import sys
sys.path.append("/content/web-scraper")
from webScrapper import search_and_download
# + id="1CDkyn_hqs_T" outputId="abd7c718-cb12-47a3-cbc4-7f06f4f831b9" colab={"base_uri": "https://localhost:8080/"}
search_term = 'keanu reeves headshot'
dest_path = Path('/content/DataML/my_images')
search_and_download(
search_term=search_term,
wd=webdriver,
target_path=dest_path/'Keanu',
number_of_images=50,
sleep_time=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 321} id="KuyWW6hXst9E" outputId="7f071fb2-c599-44b4-c052-60e085e11a40"
img_path = get_image_files(dest_path/'Keanu')[3]
print(img_path)
img = PILImage.create(img_path)
show_image(img);
# + id="qz9fChkxO7i-"
def label_func(x):
return x.parent.name
dls = ImageDataLoaders.from_path_func(
path=dest_path,
fnames=get_image_files(dest_path),
valid_pct=0.2,
seed=42,
label_func=label_func,
item_tfms=Resize(224)
)
# + colab={"base_uri": "https://localhost:8080/", "height": 270, "referenced_widgets": ["516450692720471985359d3eb3086157", "b0767621df2e405e8168da1f77922a00", "5e8033d505d946c2aad206f84f38698f", "9163f5f5be8d4f2aacfe5aa47ae23b58", "<KEY>", "<KEY>", "2ee778c7cb184ab9891bff78fc5e57af", "46fd8f52974345d5979381f7f4fa1e4e"]} id="z_LJmQwi8KDc" outputId="8d03c70e-42d0-4c52-804f-189f8fd9a091"
learn = cnn_learner(dls, resnet34, metrics=error_rate)
learn.fine_tune(1)
# + colab={"base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": ["027d1fc8ea4a4d878c21493d9da5a2b3", "8ce09344fd894eae833f410c5e88c9f4", "fc67a3b79ceb4f8da444b270f7dabece"]} id="5SoCRDiV8KBU" outputId="ebfe4928-929d-4355-aed5-1b3e3b0ceedd"
uploader = widgets.FileUpload()
uploader
# + colab={"base_uri": "https://localhost:8080/", "height": 227} id="vg6l8Jvd8J_H" outputId="1151253d-bfd0-40f0-a159-fda1809115d0"
img = PILImage.create(uploader.data[0])
prediction,_,probs = learn.predict(img)
print(f'Prediction: {prediction}')
print(probs)
# + colab={"base_uri": "https://localhost:8080/"} id="v6Dv7ivk8J9g" outputId="1eb594a3-2e73-415b-b697-236fc309594a"
dls.vocab
| Chapter1/Intro_to_DL_04.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from math import floor, ceil
import numpy as np
def roundoff(N, n):
b = N
c = floor(N)
# Counting the no. of digits
# to the left of decimal point
# in the given no.
i = 0;
while(b >= 1):
b = b / 10
i = i + 1
d = n - i
b = N
b = b * pow(10, d)
e = b + 0.5
if (float(e) == float(ceil(b))):
f = (ceil(b))
h = f - 2
if (h % 2 != 0):
e = e - 1
j = floor(e)
m = pow(10, d)
j = j / m
return j
# + tags=[]
def np_roundoff(numbers , significant_digits):
i=0
while(i < len(numbers)):
numbers[i] = roundoff(numbers[i], significant_digits)
i =i+1
return numbers
# -
def GENP(A, b):
'''
Gaussian elimination with no pivoting.
% input: A is an n x n nonsingular matrix
% b is an n x 1 vector
% output: x is the solution of Ax=b.
% post-condition: A and b have been modified.
'''
n = len(A)
if b.size != n:
raise ValueError("Invalid argument: incompatible sizes between A & b.", b.size, n)
for pivot_row in range(n-1):
for row in range(pivot_row+1, n):
multiplier = A[row][pivot_row]/A[pivot_row][pivot_row]
multiplier = r(multiplier, sd) # roundoff
#the only one in this column since the rest are zero
A[row][pivot_row] = multiplier
for col in range(pivot_row + 1, n):
A[row][col] = A[row][col] - multiplier*A[pivot_row][col]
A[row][col]=r(A[row][col],sd) # roundoff
#Equation solution column
b[row] = b[row] - multiplier*b[pivot_row]
b[row] = r(b[row],sd) #roundoff
x = np.zeros(n)
k = n-1
x[k] = b[k]/A[k,k]
x[k] = r(x[k],sd)
while k >= 0:
x[k] = (b[k] - np.dot(A[k,k+1:],x[k+1:]))/A[k,k]
x[k] = r(x[k],sd)
k = k-1
return x
# +
def GEPP(A, b, doPricing = True):
'''
Gaussian elimination with partial pivoting.
input: A is an n x n numpy matrix
b is an n x 1 numpy array
output: x is the solution of Ax=b
with the entries permuted in
accordance with the pivoting
done by the algorithm
post-condition: A and b have been modified.
'''
n = len(A)
if b.size != n:
raise ValueError("Invalid argument: incompatible sizes between"+
"A & b.", b.size, n)
# k represents the current pivot row. Since GE traverses the matrix in the
# upper right triangle, we also use k for indicating the k-th diagonal
# column index.
# Elimination
for k in range(n-1):
if doPricing:
# Pivot
maxindex = abs(A[k:,k]).argmax() + k
if A[maxindex, k] == 0:
raise ValueError("Matrix is singular.")
# Swap
if maxindex != k:
A[[k,maxindex]] = A[[maxindex, k]]
b[[k,maxindex]] = b[[maxindex, k]]
else:
if A[k, k] == 0:
raise ValueError("Pivot element is zero. Try setting doPricing to True.")
#Eliminate
for row in range(k+1, n):
multiplier = A[row,k]/A[k,k]
multiplier = r(multiplier,sd) # roundoff
A[row, k:] = A[row, k:] - multiplier*A[k, k:]
A[row,k:] = nr(A[row,k:],sd) # roundoff
b[row] = b[row] - multiplier*b[k]
b[row] = r(b[row],sd) # roundoff
# Back Substitution
x = np.zeros(n)
for k in range(n-1, -1, -1):
x[k] = (b[k] - np.dot(A[k,k+1:],x[k+1:]))/A[k,k]
x[k] = r(x[k], sd) # roundoff
return x
# +
sds=[3,4,5,6]
eq_1 = np.random.uniform(low=5.0, high=10.3, size=(2,)).tolist()
eq_2 = np.random.uniform(low=5.0, high=20.3, size=(2,)).tolist()
coeff = np.array([eq_1, eq_2])
rhs = np.random.uniform(low=5.0, high=20.3, size=(2,)).tolist()
rhs = np.array(rhs)
rhs = rhs.T
r = roundoff
nr = np_roundoff
# -
print('Gauss elimination without pivoting ...')
print('coeffients - ', coeff)
print()
print('rhs -', rhs)
print()
for sig_digit in sds:
sd = sig_digit
res = GENP(np.copy(coeff), np.copy(rhs))
print(f'significant_digit= {sd}, X1 = {res[0]}, X2= {res[1]}')
print('Gauss elimination with pivoting ...')
print('coeffients - ', coeff)
print()
print('rhs -', rhs)
print()
for sig_digit in sds:
sd = sig_digit
res = GEPP(np.copy(coeff), np.copy(rhs))
print(f'significant_digit= {sd}, X1 = {res[0]}, X2= {res[1]}')
| assignment_1/q1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import os
import numpy as np
import pandas as pd
raw_data_path = os.path.join(os.path.pardir, 'data', 'raw')
train_file_path = os.path.join(raw_data_path, 'train.csv')
test_file_path = os.path.join(raw_data_path, 'test.csv')
train_df = pd.read_csv(train_file_path, index_col='PassengerId')
test_df = pd.read_csv(test_file_path, index_col='PassengerId')
type(train_df)
train_df.info()
test_df.info()
test_df['Survived'] = -888
df = pd.concat((train_df, test_df), axis=0)
df.info()
df.head()
df.head(10)
df.tail()
df.Name
df[['Name', 'Age']]
df.loc[5:10]
df.loc[5:10, 'Age' : 'Pclass']
df.loc[5:10, ['Survived', 'Fare', 'Embarked']]
df.iloc[5:10, 3:8]
male_passengers = df.loc[df.Sex == 'male', :]
print "Number of male passengers: {0}".format(len(male_passengers))
female_passengers = df.loc[df.Sex == 'female', :]
print "Number of female passengers: {0}".format(len(female_passengers))
first_class_male_passengers = df.loc[(df.Sex == 'male') & (df.Pclass == 1), :]
print "Number of 1st class male passengers: {0}".format(len(first_class_male_passengers))
# ## Summary statistics
df.describe()
# Centrality measures
print "Mean fare: {0}".format(df.Fare.mean())
print "Median fare: {0}".format(df.Fare.median())
# Dispersion measures
print "Min fare: {0}".format(df.Fare.min())
print "Max fare: {0}".format(df.Fare.max())
print "Fare range: {0}".format(df.Fare.max() - df.Fare.min())
print "25th percentile: {0}".format(df.Fare.quantile(0.25))
print "50th percentile: {0}".format(df.Fare.quantile(0.50))
print "75th percentile: {0}".format(df.Fare.quantile(0.75))
print "Variance fare: {0}".format(df.Fare.var())
print "Standart deviation fare: {0}".format(df.Fare.std())
# %matplotlib inline
df.Fare.plot(kind='box')
# ***
df.describe(include='all')
df.Sex.value_counts()
df.Sex.value_counts(normalize=True)
df[df.Survived != -888].Survived.value_counts()
df[df.Survived != -888].Survived.value_counts(normalize=True)
df.Pclass.value_counts()
df[df.Survived == 1].Pclass.value_counts()
df[df.Pclass == 1].Survived.value_counts()
df[df.Pclass == 2].Survived.value_counts()
df[df.Pclass == 2].Survived.value_counts()
df.Pclass.value_counts().plot(kind='bar')
df.Sex.value_counts().plot(kind='pie')
df.Pclass.value_counts().plot(kind='bar',
rot=0,
title='Classwise passenger count',
color='c');
# ## Distributions
df.Age.plot(kind='hist', title='Ages of People');
df.Age.plot(kind='kde', title='Density plot for age');
df.Fare.plot(kind='hist', title='Fare');
print "skewness for age: {0:.2f}".format(df.Age.skew())
print "skewness for fare: {0:.2f}".format(df.Fare.skew())
df.plot.scatter(x="Age", y="Fare", title="Scatter plot: Age vs Fare", alpha=0.1);
df.plot.scatter(x="Pclass", y="Fare", title="Scatter plot: Passenger Class vs Fare", alpha=0.1);
# ## Grouping
df.groupby('Sex').Age.std()
df.groupby(['Pclass']).Fare.median()
df.groupby(['Pclass'])['Fare', 'Age'].median()
df.groupby(['Pclass']).agg({'Fare': 'mean', 'Age': 'median'})
aggregations = {
"Fare": {
"mean_Fare": "mean",
"median_Fare": "median",
"max_Fare": max,
"min_Fare": np.min
},
"Age": {
"mean_Age": "mean",
"median_Age": "median",
"max_Age": max,
"min_Age": np.min,
"range_Age": lambda x: max(x)-min(x)
},
}
df.groupby(['Pclass']).agg(aggregations)
df.groupby(['Pclass', 'Embarked']).Fare.median()
# ## Crostab
pd.crosstab(df.Sex, df.Pclass)
pd.crosstab(df.Sex, df.Pclass).plot(kind="bar")
# ## Pivot table
df.pivot_table(index="Sex", columns="Pclass", values="Age", aggfunc="mean")
# # Feature: Embarked
df[df.Embarked.isnull()]
df.Embarked.value_counts()
pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].Embarked)
df.groupby(['Pclass', 'Embarked']).Fare.median()
df.groupby(['Pclass', 'Embarked']).Fare.mean()
df.Embarked.fillna('C', inplace=True)
df[df.Embarked.isnull()]
df.info()
# ## Missing value: Fare
df[df.Fare.isnull()]
median_fare = df.loc[(df.Pclass == 3) & (df.Embarked == "S"), "Fare"].median()
print median_fare
df.Fare.fillna(median_fare, inplace=True)
df[df.Fare.isnull()]
# ## Missing Value: Age
df[df.Age.isnull()]
# Replace all missing values with mean value?
df.Age.plot(kind="hist", bins=20);
df.Age.mean()
# Replace all missing values with median value of each gender?
df.groupby('Sex').Age.mean()
df.groupby('Sex').Age.median()
df[df.Age.notnull()].boxplot("Age", "Sex")
# +
# age_sex_median = df.groupby('Sex').Age.transform('median)
# df.Age.fillna(age_sex_median, inplace=True)
# -
# Replace with median age of passenger class?
df[df.Age.notnull()].boxplot("Age", "Pclass")
# +
# pclass_age_median = df.groupby('Pclass').Age.transform('median)
# df.Age.fillna(pclass_age_median, inplace=True)
# -
# Replace with median age of title?
def get_title(name):
first_name_with_title = name.split(',')[1]
title = first_name_with_title.split('.')[0]
return title.strip().lower()
df.Name.map(get_title)
df.Name.map(get_title).unique()
def get_title_from_list(name):
titles = {'mr': 'Mr',
'mrs': 'Mrs',
'miss': 'Miss',
'master': 'Master',
'don': 'Sir',
'rev': 'Sir',
'dr': 'Officer',
'mme': 'Mrs',
'ms': 'Mrs',
'major': 'Officer',
'lady': 'Lady',
'sir': 'Sir',
'mlle': 'Miss',
'col': 'Officer',
'capt': 'Officer',
'the countess': 'Lady',
'jonkheer': 'Sir',
'dona': 'Sir'
}
first_name_with_title = name.split(',')[1]
title = first_name_with_title.split('.')[0]
return titles[title.strip().lower()]
df['Title'] = df.Name.map(get_title_from_list)
df.head()
df[df.Age.notnull()].boxplot('Age', 'Title')
title_age_median = df.groupby('Title').Age.transform('median')
df.Age.fillna(title_age_median, inplace=True)
df.info()
# ## Outliers
df.Age.plot(kind="hist", bins=20)
df[df.Age > 70]
# ## Fare
df.Fare.plot(kind="hist", bins=10)
df.boxplot("Fare")
df[df.Fare == df.Fare.max()]
log_fare = np.log(df.Fare + 1)
log_fare.plot(kind="hist")
pd.qcut(df.Fare, 4)
pd.qcut(df.Fare, 4, labels = ["very low", "low", "high", "very high"])
pd.qcut(df.Fare, 4, labels = ["very low", "low", "high", "very high"]).value_counts().plot(kind="bar")
df['Fare_Bin'] = pd.qcut(df.Fare, 4, labels = ["very_low", "low", "high", "very_high"])
# ## Feature Engineering
# ### Age state
df["AgeState"] = np.where(df["Age"] >= 18, "Adult", "Child")
df.AgeState.value_counts()
pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].AgeState)
# ### Family size
df["FamilySize"] = df.Parch + df.SibSp + 1
df.FamilySize.plot(kind="hist")
df[df.FamilySize == df.FamilySize.max()]
pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].FamilySize)
# ### Motherhood
df["IsMother"] = np.where((df.Sex == "female") & (df.Parch > 0) & (df.Age > 18) & (df.Title != "Miss"), 1, 0)
pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].IsMother)
# ### Deck
df.Cabin
df.Cabin.unique()
df[df.Cabin=="T"]
df.loc[df.Cabin == "T", "Cabin"] = np.NaN
df.Cabin.unique()
# +
def get_deck(cabin):
return np.where(pd.notnull(cabin), str(cabin)[0].upper(), "Z")
df["Deck"] = df.Cabin.map(get_deck)
# -
df.Deck.value_counts()
pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].Deck)
# ## Categorical feature engineering
df['IsMale'] = np.where(df.Sex=="male", 1 ,0)
df = pd.get_dummies(df,
columns=["Deck", "Pclass", "Title",
"Fare_Bin", "Embarked", "AgeState"])
print df.info()
# ### Drop and reorder columns
df.drop(["Cabin", "Name", "Ticket", "Parch", "SibSp", "Sex"], axis=1, inplace=True)
columns = [c for c in df.columns if c != "Survived"]
columns = ["Survived"] + columns
df = df[columns]
df.info()
# ### Save processed dataset
processed_data_path = os.path.join(os.path.pardir, "data", "processed")
write_train_path = os.path.join(processed_data_path, "train.csv")
write_test_path = os.path.join(processed_data_path, "test.csv")
# df[df.Survived != -888].to_csv("C:/Users/z003t26s.AD001/Documents/doing_data_science_with_python/titanic/src/data/processed/train.csv")
df[df.Survived != -888].to_csv(write_train_path)
columns = [c for c in df.columns if c != "Survived"]
#df.loc[df.Survived == -888, columns].to_csv("C:/Users/z003t26s.AD001/Documents/doing_data_science_with_python/titanic/src/data/processed/test.csv")
df.loc[df.Survived == -888, columns].to_csv(write_test_path)
# ## Advanced visualization
import matplotlib.pyplot as plt
# %matplotlib inline
plt.hist(df.Age)
plt.hist(df.Age, bins=20)
plt.show()
plt.hist(df.Age, bins=20)
plt.title("Histogram : Age")
plt.xlabel("Age")
plt.ylabel("Frequency")
plt.show()
# +
f, (ax1, ax2) = plt.subplots(1,2,figsize=(14,3))
ax1.hist(df.Fare, bins=20, color="cyan")
ax1.set_title("Fare histogram")
ax1.set_xlabel("Fare")
ax1.set_ylabel("Frequency")
ax2.hist(df.Age, bins=20, color="tomato")
ax2.set_title("Age histogram")
ax2.set_xlabel("Age")
ax2.set_ylabel("Frequency")
plt.show()
# -
| notebooks/2_exploring_and_processing_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src='https://www.icos-cp.eu/sites/default/files/2017-11/ICOS_CP_logo.png' width="400" align="right"/> <br clear="all" />
# # Characterization of atmospheric measurement stations
# For questions and feedback contact <EMAIL>
#
# To use the tool, <span style="background-color: #FFFF00">run all the Notebook cells</span> (see image below).
#
# <img src="network_characterization/screenshots_for_into_texts/how_to_run.PNG" align="left"> <br clear="all" />
#
# ### Background
# This notebook makes it possible to generate characterisations of atmospheric stations based on their influence areas. The influence area of a station is determined by the transport model STILT and the result is called a footprint. To generate the characterisation visualizations the average footprint for the user-defined date rage is combined with an anthropogenic emissions database, a biogenic flux model and ancillary data layers. Any point in the STILT domain with footprints (not necessarily existing stations) will appear in the STILT stations dropdown and can be analyzed. More details are found in <a href="./station_characterization/specifications.pdf" target="_blank">the specifications document</a>.
# #### Output
# If the user chooses to save the output, it will end up in a folder called "<a href= "../output" target=”blank”>output</a>" in the home directory. In the output folder there will be a subfolder called “station_characterization” once the tool has been run with "<ins>Save the output</ins>" selected.
# #### Settings
# A "settings file" (settings.json) which specifies all the settings used to produce the output is also downloaded with the output. It can in turn be uploaded to replicate the run in the future by uploading it to the optional "Load settings from file" which is the last option in the tool.
# + language="javascript"
# IPython.OutputArea.prototype._should_scroll = function(lines) {return false;}
# -
import sys
sys.path.append('./station_characterization')
import gui
| notebooks/icos_jupyter_notebooks/station_characterization.ipynb |