code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import json
from keras.models import Model
from keras.layers import Input
from keras.layers.convolutional import Conv2D
from keras import backend as K
from collections import OrderedDict
def format_decimal(arr, places=6):
return [round(x * 10**places) / 10**places for x in arr]
DATA = OrderedDict()
# ### pipeline 0
# +
random_seed = 1000
data_in_shape = (8, 8, 2)
layers = [
Conv2D(4, (3,3), strides=(1,1), padding='valid', data_format='channels_last', activation='relu', use_bias=True),
Conv2D(4, (3,3), strides=(1,1), padding='valid', data_format='channels_last', activation='relu', use_bias=True),
Conv2D(4, (3,3), strides=(1,1), padding='valid', data_format='channels_last', activation='relu', use_bias=True)
]
input_layer = Input(shape=data_in_shape)
x = layers[0](input_layer)
for layer in layers[1:-1]:
x = layer(x)
output_layer = layers[-1](x)
model = Model(inputs=input_layer, outputs=output_layer)
np.random.seed(random_seed)
data_in = 2 * np.random.random(data_in_shape) - 1
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(random_seed + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
DATA['pipeline_00'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
# -
# ### export for Keras.js tests
# +
import os
filename = '../../test/data/pipeline/00.json'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
json.dump(DATA, f)
# -
print(json.dumps(DATA))
| notebooks/pipeline/pipeline_00.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 3473, "status": "ok", "timestamp": 1650381592643, "user": {"displayName": "<NAME>", "userId": "11682434087231211858"}, "user_tz": -360} id="7YU1R8XeDB1p" outputId="dd5c5ab9-fab2-4187-92ce-dd5a993fe986"
import pickle
import numpy as np
import time
import random
import matplotlib
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 2267, "status": "ok", "timestamp": 1650381594902, "user": {"displayName": "<NAME>", "userId": "11682434087231211858"}, "user_tz": -360} id="4PM6_C2tDNqd" outputId="933b663c-f672-40fb-c55c-5b9abfa1b11b"
# from google.colab import drive
# drive.mount('/content/drive')
# + executionInfo={"elapsed": 17, "status": "ok", "timestamp": 1650381594904, "user": {"displayName": "<NAME>", "userId": "11682434087231211858"}, "user_tz": -360} id="E7zO-f4RDUHn"
# FEATURES_FOLDER = '/content/drive/MyDrive/spring22/thesis/gait-seq/features/'
FEATURES_FOLDER = '../../features/'
# + [markdown] id="5ZUzQoYQDB1u"
# ## Preparing dataset:
# + executionInfo={"elapsed": 15, "status": "ok", "timestamp": 1650381594906, "user": {"displayName": "<NAME>", "userId": "11682434087231211858"}, "user_tz": -360} id="Pjq7ejZdDB1w"
def prepare_dataset(features: str = 'upcv1/train/coordinates'):
print(f"Getting {features} features...")
with open(file=f'{FEATURES_FOLDER}{features}.pickle', mode='rb+') as file:
X, y = pickle.load(file=file)
S, W, C = X.shape
assert S == y.shape[0]
print(f"""
{X.shape}, {y.shape}
Selected sequences: {S}
Sequence window size: {W}
Feature vector size: {C}
Unique subjects: {len(np.unique(y))}""")
return X, y
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 23634, "status": "ok", "timestamp": 1650381618528, "user": {"displayName": "<NAME>", "userId": "11682434087231211858"}, "user_tz": -360} id="fnttuokZD1b9" outputId="ac2bdd91-0afc-48a1-af97-a01c48072c29"
"""
-- dataset: upcv1, ks20, oumvlp
-- augmentation: rotation and flipping
-- feature combinations: coordinates, velocity, distance, orientation
-- dimentionality reduction: PCA w/ varying dimension
"""
FEATURES = [
('oumvlp/v0/train/coordinates', 'oumvlp/v0/test/coordinates'),
]
train_features, test_features = FEATURES[0]
X_train, y_train = prepare_dataset(features=train_features)
X_test, y_test = prepare_dataset(features=test_features)
# +
def get_pca(X, R=None):
"""
R - desired reduced dimension size
"""
C, F, L = X.shape
if R is None:
R = L
X = X.reshape(C*F, L)
pca = PCA(n_components=R).fit(X)
return pca
def transform_pca(pca, X):
C, F, L = X.shape
X = X.reshape(C*F, -1)
return pca.transform(X).reshape(C, F, -1)
# +
# pca = get_pca(X_train, R=None)
# X_train = transform_pca(pca, X_train)
# X_test = transform_pca(pca, X_test)
# X_train.shape
# + [markdown] id="6flK1VDeDB1z"
# ## Set Parameters:
#
# + executionInfo={"elapsed": 26, "status": "ok", "timestamp": 1650381618994, "user": {"displayName": "<NAME>", "userId": "11682434087231211858"}, "user_tz": -360} id="sk2sarpPDB10"
n_classes = len(np.unique(y_train))
# n_steps - timesteps per series
# n_input - num input parameters per timestep
training_data_count, n_steps, n_input = X_train.shape
test_data_count, _, _ = X_test.shape
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.convert_to_tensor(0.001)
# Hidden layer num of features
n_hidden = 64
batch_size = 128
epochs = 30
# + [markdown] id="ODzv6GojDB11"
# ## Utility functions for training:
# + executionInfo={"elapsed": 24, "status": "ok", "timestamp": 1650381618995, "user": {"displayName": "<NAME>", "userId": "11682434087231211858"}, "user_tz": -360} id="dW8zJ4-gDB12"
def LSTM(_X, _weights, _biases):
# model architecture based on "guillaume-chevalier" and "aymericdamien" under the MIT license.
_X = tf.transpose(_X, [1, 0, 2]) # permute n_steps and batch_size
_X = tf.reshape(_X, [-1, n_input])
# Rectified Linear Unit activation function used
_X = tf.nn.relu(tf.matmul(_X, _weights['hidden']) + _biases['hidden'])
# Split data because rnn cell need a list of inputs for the RNN inner loop
_X = tf.split(_X, n_steps, 0)
# Define two stacked LSTM cells (two recurrent layers deep) with tensorflow
lstm_cell_1 = tf.nn.rnn_cell.LSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cell_2 = tf.nn.rnn_cell.LSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cells = tf.nn.rnn_cell.MultiRNNCell([lstm_cell_1, lstm_cell_2], state_is_tuple=True)
outputs, states = tf.nn.static_rnn(lstm_cells, _X, dtype=tf.float32)
# A single output is produced, in style of "many to one" classifier, refer to http://karpathy.github.io/2015/05/21/rnn-effectiveness/ for details
lstm_last_output = outputs[-1]
# Linear activation
return tf.matmul(lstm_last_output, _weights['out']) + _biases['out']
def extract_batch_size(_train, _labels, _unsampled, batch_size):
# Fetch a "batch_size" amount of data and labels from "(X|y)_train" data.
# Elements of each batch are chosen randomly, without replacement, from X_train with corresponding label from Y_train
# unsampled_indices keeps track of sampled data ensuring non-replacement. Resets when remaining datapoints < batch_size
shape = list(_train.shape)
shape[0] = batch_size
batch_s = np.empty(shape)
batch_labels = np.empty((batch_size,1))
for i in range(batch_size):
# Loop index
# index = random sample from _unsampled (indices)
index = random.choice(_unsampled)
batch_s[i] = _train[index]
batch_labels[i] = _labels[index]
_unsampled.remove(index)
return batch_s, batch_labels, _unsampled
def one_hot(y_):
# One hot encoding of the network outputs
# e.g.: [[5], [0], [3]] --> [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]
y_ = y_.reshape(len(y_))
n_values = int(np.max(y_)) + 1
return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS
# + [markdown] id="-yXsz_hODB14"
# ## Build the network:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 8955, "status": "ok", "timestamp": 1650381627930, "user": {"displayName": "<NAME>", "userId": "11682434087231211858"}, "user_tz": -360} id="0xhQBOfUDB14" outputId="7f4a6b44-b595-400d-af22-e4e8979018d3"
# Graph input/output
x = tf.placeholder(tf.float32, [None, n_steps, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
# Graph weights
weights = {
'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])), # Hidden layer weights
'out': tf.Variable(tf.random_normal([n_hidden, n_classes], mean=1.0))
}
biases = {
'hidden': tf.Variable(tf.random_normal([n_hidden])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
pred = LSTM(x, weights, biases)
# Loss, optimizer and evaluation
l2 = 0.001 * sum(
tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()
)
# L2 loss prevents this overkill neural network to overfit the data
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred)) + l2 # Softmax loss
# if decaying_learning_rate:
# learning_rate = tf.train.exponential_decay(init_learning_rate, global_step*batch_size, decay_steps, decay_rate, staircase=True)
#decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps) # exponentially decayed learning rate
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost,global_step=global_step) # Adam Optimizer
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# + [markdown] id="ZAd9C9BfDB16"
# ## Train the network:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 376185, "status": "ok", "timestamp": 1650382004110, "user": {"displayName": "<NAME>", "userId": "11682434087231211858"}, "user_tz": -360} id="fiCXokJ5DB16" outputId="42814d33-5909-44ea-d85f-4ee7e2387bd5"
print(f"Training on {train_features}")
test_losses = []
test_accuracies = []
train_losses = []
train_accuracies = []
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
init = tf.global_variables_initializer()
sess.run(init)
# Perform Training steps with "batch_size" amount of data at each loop.
# Elements of each batch are chosen randomly, without replacement, from X_train,
# restarting when remaining datapoints < batch_size
step = 1
time_start = time.time()
unsampled_indices = list(range(0,len(X_train)))
for i in range(epochs):
if len(unsampled_indices) < batch_size:
unsampled_indices = list(range(0,len(X_train)))
batch_xs, raw_labels, unsampled_indicies = extract_batch_size(X_train, y_train, unsampled_indices, batch_size)
batch_ys = one_hot(raw_labels)
# check that encoded output is same length as num_classes, if not, pad it
if len(batch_ys[0]) < n_classes:
temp_ys = np.zeros((batch_size, n_classes))
temp_ys[:batch_ys.shape[0],:batch_ys.shape[1]] = batch_ys
batch_ys = temp_ys
# Fit training using batch data
_, loss, acc = sess.run(
[optimizer, cost, accuracy],
feed_dict={
x: batch_xs,
y: batch_ys
}
)
train_losses.append(loss)
train_accuracies.append(acc)
# Evaluation on the test set (no learning made here - just evaluation for diagnosis)
test_loss, test_acc = sess.run(
[cost, accuracy],
feed_dict={
x: X_test,
y: one_hot(y_test)
}
)
test_losses.append(test_loss)
test_accuracies.append(test_acc)
# print(f"Epoch #{i} Loss = {loss:.2f} Accuracy = {acc:.2f}")
if i % 100 == 0:
print(f"Epoch {i}/{epochs} Loss={loss:.2f} Accuracy={acc:.2f} Test Loss={test_loss:.2f} Test Accuracy={test_acc:.2f}")
# Performance on test data
one_hot_predictions, accuracy, final_loss = sess.run(
[pred, accuracy, cost],
feed_dict={
x: X_test,
y: one_hot(y_test)
}
)
# test_losses.append(final_loss)
# test_accuracies.append(accuracy)
print(f"Training finished. Loss = {final_loss} Accuracy = {accuracy}")
print(f"Duration: {time.time() - time_start} sec")
# + [markdown] id="I8E-J5qiDB17"
# ## Results:
# + colab={"base_uri": "https://localhost:8080/", "height": 573} executionInfo={"elapsed": 1020, "status": "ok", "timestamp": 1650382218001, "user": {"displayName": "<NAME>", "userId": "11682434087231211858"}, "user_tz": -360} id="Szch-FjTTlLd" outputId="dab87d08-a3b2-4865-95ad-8c358fd6d6c6"
# summarize history for accuracy
plt.plot(train_accuracies)
plt.plot(test_accuracies)
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='lower right')
plt.show()
# summarize history for loss
plt.plot(train_losses)
plt.plot(test_losses)
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 960} executionInfo={"elapsed": 1034, "status": "ok", "timestamp": 1650382227011, "user": {"displayName": "<NAME>", "userId": "11682434087231211858"}, "user_tz": -360} id="7UBqfiEqDB18" outputId="e70714d5-0221-4be1-fb6b-2e1857f23b69"
predictions = one_hot_predictions.argmax(1)
print("Testing Accuracy: {}%".format(100*accuracy))
print("Precision: {}%".format(100*metrics.precision_score(y_test, predictions, average="weighted")))
print("Recall: {}%".format(100*metrics.recall_score(y_test, predictions, average="weighted")))
print("f1_score: {}%".format(100*metrics.f1_score(y_test, predictions, average="weighted")))
print("Confusion Matrix: Created using test set of {} datapoints, normalised to % of each class in the test dataset".format(len(y_test)))
confusion_matrix = metrics.confusion_matrix(y_test, predictions)
normalised_confusion_matrix = np.array(confusion_matrix, dtype=np.float32)/np.sum(confusion_matrix)*100
# Plot Results:
plt.figure(figsize=(12, 12))
plt.imshow(
normalised_confusion_matrix,
interpolation='nearest',
cmap=plt.cm.Blues
)
plt.title("Confusion matrix \n(normalised to % of total test data)")
plt.colorbar()
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
# -
| notebooks/lstm-gait-classifier-oumvlp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python3
# ---
# # '96 Sonics: 3 Regex Methods to Split Names
# ## Table of Contents
# 1. Introduction
# 2. Install & Import Packages
# 3. Scrape and Display Logo
# 4. Scrape Roster and Convert to Dataframe
# 5. **Method 1**: .replace()
# 6. **Method 2**: splitname function, .apply()
# 7. **Method 3**: .extract(), dictionarize
# ## 1. Introduction
#
# Today, we'll walk through 3 simple regex methods to split names into first and last names in a dataframe. We'll work with roster data for the '96 Seattle SuperSonics, one of my all-time favorite teams. Led by <NAME> and coach <NAME>, they reached the Finals that year, losing to the Bulls in 6.
#
# We'll use BeautifulSoup to scrape roster data from Basketball Reference after a search for Seattle SuperSonics (https://www.basketball-reference.com/teams/SEA/1996.html) and convert to a dataframe. Once we have our players, we'll split player names into first and last names using 3 regex methods, walking through the regex logic and methodology in each.
#
# This simple exercise could be useful for anyone working with string name fields in, for example, customer, applicant, or patient data where first and last names are combined. A similar logic can be used for any string fields that need to be separated (e.g., countries and their capitals). Let's dive in.
# ## 2. Install & Import Packages
# +
import pandas as pd
import numpy as np
# Web scraping using BeautifulSoup and converting to pandas dataframe
import requests
import urllib.request
import json # library to handle JSON files
from pandas.io.json import json_normalize # tranform JSON file into a pandas dataframe
from urllib.request import urlopen
from bs4 import BeautifulSoup
# !pip install lxml # Install lxml parser as it's faster than the built-in html parser
# Displaying images
from IPython.display import Image
from IPython.core.display import HTML
# -
# ## 3. Scrape and Display Logo
# Load image using Image method we imported from iPython display and image url
Image(url= "https://d2p3bygnnzw9w3.cloudfront.net/req/202008171/tlogo/bbr/SEA-1996.png", width=180, height=90)
# ## 4. Scrape Roster & Convert to Dataframe
# Specify url and get html from page
url = "https://www.basketball-reference.com/teams/SEA/1996.html"
html = urlopen(url)
# Create BeautifulSoup object using lxml parser we imported
soup = BeautifulSoup(html, 'lxml')
type(soup)
# Print title of the page, we see that it's the 1995-96 SeattleSupersonics Roster and Stats page
title = soup.title
print(title)
# extracting the raw table inside that webpage
table = soup.find_all('table')
# Scrape just the table for Sonics roster, which is the 1st table and convert it into a dataframe
sonics = pd.read_html(str(table[0]), index_col=None, header=0)[0]
sonics
# Keep only Player' column as that's the only one we'll need here
keep=['Player']
sonics = sonics[keep]
sonics
# Check dataframe info, we have 13 players all of data type object
sonics.info()
# ## 5. Method 1: .replace()
# Create columns for first and last names and populate each with full player names. For first names, we'll replace all characters after a space with empty string. For last names, we'll replace all characters before a space with empty string.
# +
# So I want to create two new columns and apply a regex to the projection of the "Player" column.
# Create 'First' column as copy of 'Player' column,
sonics['First']=sonics['Player']
# Replace al characters after space with empty string
# [ ].*: [ ] means space, . means any single character, * means an unlimited number of times
sonics['First']=sonics['First'].replace("[ ].*", "", regex=True)
# Create 'Last' column as copy of 'Player' column
sonics['Last']=sonics['Player']
# Replace al characters before space with empty string
# .*[ ]: . means any single character, * means an unlimited number of times, [ ] means space
sonics["Last"]=sonics["Last"].replace(".*[ ]", "", regex=True)
# Taking a look, we see the names split into first and last name columns
sonics
# -
# ## 6. Method 2: splitname function, .apply()
# We'll define a function splitname with an argument for row, which is a Series object of a single row indexed by column values. For each row, we'll extract the first name by creating a 'First' column for which we'll split the player name on the space (" ") and take the first result ([0]) as the new entry in the series. We'll do the same for 'Last', but extract the last result ([-1]) as the new entry in the series. Lastly, we'll use the apply() function on the player column (.apply automatically merges series with dataframe).
# Delete 'First' and 'Last' columns so we have only our original 'Player' column
del(sonics['First'], sonics['Last'])
sonics.head()
# +
# Define splitname function that splits string into two pieces on single row of data
def splitname(row):
row['First']=row['Player'].split(" ")[0] # Extract first name and create new entry in series
row['Last']=row['Player'].split(" ")[-1] # Extract last name and create new entry in series
return row
# Aplly splitname function to column of players
sonics = sonics.apply(splitname, axis='columns')
# Taking a look, we see the names split into first and last name columns
sonics
# -
# ## 7. Method 3: .extract(), dictionarize
# The .extract function is part of the .str attribute of a Series. It takes a regex input of groups we we want to capture that are then output as columns.
# Delete 'First' and 'Last' columns so we have only our original 'Player' column
del(sonics['First'], sonics['Last'])
sonics.head()
# +
# Define regex pattern
# (^[\w]*): () for 1st group, ^ signifies start of string, [\w] means any word character, * means unlimited number of times
# (?:.*): () for 2nd group, ?: means non-capturing, . means any character, * means unlimited number of times, means space
# ([\w\-]*$): () for 3rd group, [\w\-] means any word character or hyphen (for hyphenated last names), * means unlimited number of times, $ signifies end of string
pattern = "(^[\w]*)(?:.* )([\w\-]*$)"
# Extract pattern from Player names series and output as columns
sonics['Player'].str.extract(pattern)
# +
# We can dictionarize to get columns labeled First and Last (instead of the 0 and 1 column headings above)
# (?P<First>^[\w]*): () for 1st group, ?P<First> means dictionary label 'First', ^ signifies start of string, [\w] means any word character, * means unlimited number of times
# (?:.*): () for 2nd group, ?: means non-capturing, . means any character, * means unlimited number of times, means space
# (?P<Last>[\w\-]*$): () for 3rd group, ?P<Last> means dictionary label 'Last', [\w\-] means any word character or hyphen (for hyphenated last names), * means unlimited number of times, $ signifies end of string
pattern="(?P<First>^[\w]*)(?:.* )(?P<Last>[\w\-]*$)"
# Now call extract
names=sonics['Player'].str.extract(pattern)
names
# -
# Add these first and last names to our sonics dataframe
sonics['First']=names['First']
sonics['Last']=names['Last']
sonics
| Sonics, Clean and Transform Using Regex.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import pandas as pd
import json
import math
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfpage import PDFTextExtractionNotAllowed
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import (
LAParams,
LTContainer,
LTTextLine,
LTFigure
)
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 300
import matplotlib.patches as patches
from pdf2image import convert_from_path, convert_from_bytes
import pdfminer.high_level
def extract_objects(layout, extracted_objects):
""" extract text,image recursively """
if not isinstance(layout, LTContainer):
return
for obj in layout:
if isinstance(obj, LTTextLine):
extracted_objects.append({
"type": "text",
"text": obj.get_text(),
"bbox": {
"x1": obj.bbox[0],
'x2': obj.bbox[2],
'y1': obj.bbox[1],
'y2': obj.bbox[3]
}
})
# recursive call
elif isinstance(obj, LTFigure):
extracted_objects.append({
"type": "image",
"bbox": {
"x1": obj.bbox[0],
'x2': obj.bbox[2],
'y1': obj.bbox[1],
'y2': obj.bbox[3]
}
})
extract_objects(obj, extracted_objects)
_p = []
def extract_pdf(pdf_file_path):
extracted_page_data = []
with open(path, "rb") as f:
parser = PDFParser(f)
document = PDFDocument(parser)
if not document.is_extractable:
raise PDFTextExtractionNotAllowed
laparams = LAParams(all_texts=True)
rsrcmgr = PDFResourceManager()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
pages = list(PDFPage.create_pages(document))
for page_no, page in enumerate(pages):
interpreter.process_page(page)
layout = device.get_result()
global _p
_p.append(page)
contents = []
extract_objects(layout, contents)
page_data = {
"bbox": {
"x1": page.mediabox[0],
"x2": page.mediabox[2],
"y1": page.mediabox[1],
"y2": page.mediabox[3],
},
"contents": contents
}
extracted_page_data.append(page_data)
return extracted_page_data
def write_text(page):
for content in page['contents']:
if content['type'] == 'text':
print(content['text'])
print()
# +
# page_no = 0
# plot_page(pages[page_no], pdf_images[page_no])
# -
def calculate_distance_of_two_box(bbox1, bbox2):
c1x1 = min(bbox1['x1'], bbox1['x2'])
c1x2 = max(bbox1['x1'], bbox1['x2'])
c1y1 = min(bbox1['y1'], bbox1['y2'])
c1y2 = max(bbox1['y1'], bbox1['y2'])
c2x1 = min(bbox2['x1'], bbox2['x2'])
c2x2 = max(bbox2['x1'], bbox2['x2'])
c2y1 = min(bbox2['y1'], bbox2['y2'])
c2y2 = max(bbox2['y1'], bbox2['y2'])
# X distance
x_d = 0
if (c1x1 <= c2x1 <= c1x2) or (c2x1 <= c1x1 <= c2x2):
x_d = 0
else:
x_d = min(abs(c1x2-c2x1), abs(c1x1-c2x2))
# Y distance
y_d = 0
if (c1y1 <= c2y1 <= c1y2) or (c2y1 <= c1y1 <= c2y2):
y_d = 0
else:
y_d = min(abs(c1y2-c2y1), abs(c1y1-c2y2))
return math.sqrt(x_d**2+y_d**2)
def make_distance_matrix(contents):
N = len(contents)
distance_matrix = np.zeros((N, N))
for i in range(0, N):
c1 = contents[i]
for j in range(i+1, N):
c2 = contents[j]
if c1['type'] != c2['type']:
# make Image and Text as different cluster (long distance)
# ToDo : Find Caption
distance = 999
else:
distance = calculate_distance_of_two_box(c1['bbox'], c2['bbox'])
distance_matrix[i][j] = distance
distance_matrix[j][i] = distance
return distance_matrix
# +
def make_cluster(distance_matrix, threshold):
N = len(distance_matrix)
content_cluster_ids = np.zeros(N)
current_making_cluster_id = 1
for i in range(N):
if content_cluster_ids[i] == 0:
# 未割当コンテンツ
apply_to_cluster(i, current_making_cluster_id, content_cluster_ids, distance_matrix, threshold)
current_making_cluster_id += 1
return content_cluster_ids
def apply_to_cluster(target_content_id, target_cluster_id, content_cluster_ids, distance_matrix, threshold):
""" 深さ優先でクラスタリングしていく """
if content_cluster_ids[target_content_id] != 0:
# すでにクラスタに割り当てられていた対象コンテンツ
return 0
# 対象コンテンツをクラスタに割り当て
content_cluster_ids[target_content_id] = target_cluster_id
# 対象コンテンツの近傍コンテンツも同じクラスタに割り当て
nums = 1
for j in range(len(distance_matrix)):
distance = distance_matrix[target_content_id][j]
if distance < threshold:
nums += apply_to_cluster(j, target_cluster_id, content_cluster_ids, distance_matrix, threshold)
# そのクラスタに属するコンテンツ数を返す
return nums
# +
def make_full_text(contents):
full_text = ""
for content in contents:
if content['type'] == 'text':
full_text += content['text']
return full_text
def make_entire_bbox(contents):
min_x = 99999
max_x = -99999
min_y = 99999
max_y = -99999
for content in contents:
bbox = content['bbox']
min_x = min(min_x, bbox['x1'], bbox['x2'])
max_x = max(max_x, bbox['x1'], bbox['x2'])
min_y = min(min_y, bbox['y1'], bbox['y2'])
max_y = max(max_y, bbox['y1'], bbox['y2'])
return {
"x1": min_x,
"x2": max_x,
"y1": min_y,
"y2": max_y
}
# -
def convert_page_to_cluster(page):
contents = page['contents']
distance_matrix = make_distance_matrix(page['contents'])
cluster_ids = make_cluster(distance_matrix, threshold=5)
clusters = {}
di
for i, content in enumerate(contents):
cluster_id = cluster_ids[i]
content['cluster_id'] = cluster_id
if cluster_id not in clusters:
clusters[cluster_id] = {
"contents": [ content, ]
}
else:
clusters[cluster_id]['contents'].append(content)
for cid, cluster in clusters.items():
cluster_contents = cluster['contents']
cluster['full_text'] = make_full_text(cluster_contents)
cluster['bbox'] = make_entire_bbox(cluster_contents)
cluster['type'] = 'cluster'
return clusters
# +
import random
def generate_random_color():
return "#"+''.join([random.choice('0123456789ABCDEF') for i in range(6)])
colors = [ generate_random_color() for i in range(100)]
# -
def plot_page(page, page_image, plot_all_contents=False):
image_w, image_h = page_image.size
pdf_w = page['bbox']['x2']- page['bbox']['x1']
pdf_h = page['bbox']['y2']- page['bbox']['y1']
w_scale = image_w/pdf_w
h_scale = image_h/pdf_h
# Plot Page
fig = plt.figure()
ax = plt.axes()
if plot_all_contents:
target = "contents"
else:
target = "clusters"
for content_num, content in enumerate(page[target]):
bbox = content['bbox']
content_w = (bbox['x2'] - bbox['x1'])*w_scale
content_h = (bbox['y2'] - bbox['y1'])*h_scale
if content['type'] == 'image':
xy = (bbox['x1']*w_scale, bbox['y1']*h_scale)
r = patches.Rectangle(xy=xy, width=content_w,height=content_h, ec='#00FF00', fill=False, linestyle='solid', linewidth = 0.2)
ax.add_patch(r)
elif content['type'] == 'text':
xy = (bbox['x1']*w_scale, bbox['y1']*h_scale)
r = patches.Rectangle(xy=xy, width=content_w,height=content_h, ec='#FF0000', fill=False, linestyle='solid', linewidth = 0.1)
ax.add_patch(r)
elif content['type'] == 'cluster':
xy = (bbox['x1']*w_scale, bbox['y1']*h_scale)
c = colors[content_num]
r = patches.Rectangle(xy=xy, width=content_w,height=content_h, ec=c, fill=False, linestyle='solid', linewidth = 0.5)
ax.add_patch(r)
ax.set_xlim(0,image_w)
ax.set_ylim(0,image_h)
ax.set_aspect('equal')
plt.imshow(np.flipud(page_image))
plt.show()
dm = make_distance_matrix(pages[2]['contents'])
make_cluster(dm, 10)
for i, content in enumerate(pages[2]['contents']):
content['cluster_id'] = cluster_ids[i]
pages[2]['clusters'][1]['full_text']
# +
path = "sample.pdf"
pages = extract_pdf(path)
pdf_images = convert_from_path(path)
for i in range(len(pages)):
clusters = convert_page_to_cluster(pages[i])
pages[i]['clusters'] = list(clusters.values())
# -
page_id = 4
plot_page(pages[page_id], pdf_images[page_id], False)
for count, page in enumerate(pages):
break
print("Page {}".format(count+1))
page_image = pdf_images[count]
plot_page(page, page_image)
break
distances = distance_matrix.flatten()
# +
image_x1 = 80.047
image_x2 = 527.239
image_y1 = 531.945
image_y2 = 735.930
w = image_x2 - image_x1
h = image_y2 - image_y1
# -
def plot_virtual_page(page):
pdf_w = page['bbox']['x2']- page['bbox']['x1']
pdf_h = page['bbox']['y2']- page['bbox']['y1']
# Plot Page
fig = plt.figure()
ax = plt.axes()
for content in page['contents']:
bbox = content['bbox']
content_w = (bbox['x2'] - bbox['x1'])
content_h = (bbox['y2'] - bbox['y1'])
if content['type'] == 'image':
print(bbox)
r = patches.Rectangle(xy=(bbox['x1'], bbox['y1']), width=content_w,height=content_h, ec='#FF0000', fill=False, linestyle='solid', linewidth = 0.2)
ax.add_patch(r)
elif content['type'] == 'text':
r = patches.Rectangle(xy=(bbox['x1'], bbox['y1']), width=content_w,height=content_h, ec='#000000', fill=False, linestyle='dashed',linewidth = 0.1)
ax.add_patch(r)
ax.set_xlim(0,pdf_w)
ax.set_ylim(0,pdf_h)
ax.set_aspect('equal')
# plt.imshow(np.flipud(page_image))
# plt.gca().invert_yaxis()
plt.show()
plot_virtual_page(pages[0])
| extract_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import folium
from folium import plugins
import pandas as pd
import branca
from folium.plugins import HeatMap
import numpy as np
import re
data = pd.read_csv('jt_dataset.txt' , index_col=['Event ID'])
df = pd.DataFrame(data)
df['Event Date'] = pd.to_datetime(df['Event Date'])
df['Year'] = df['Event Date'].dt.year
def significance_to_numeric(x):
if x == 'Common':
return 0
if x == 'Notable':
return 1
if x == 'Significant':
return 2
if x == 'Critical':
return 3
df['Significance Score'] = df['Significance'].apply(significance_to_numeric)
df2017 = df[df.Year == 2017]
map1 = folium.Map(location=[df2017['Latitude'].mean(), df2017['Longitude'].mean()], zoom_start=2)
for i in range (0, len(df2017)):
significance = df2017['Significance Score'].iloc[i]
if significance == 0:
folium.CircleMarker([df2017['Latitude'].iloc[i], df2017['Longitude'].iloc[i]],
radius = 3,
fill = True,
fill_color = 'yellow',
popup = ("Country: {Country}<br>" "Significance: {Significance}<br>" "Event Description: {Title}"
).format(Country = re.sub(r"[^a-zA-Z0-9_]+", ' ',df2017['Country'].iloc[i]),
Significance = df2017['Significance'].iloc[i],
Title = re.sub(r"[^a-zA-Z0-9_]+", ' ', df2017['Title'].iloc[i])),
color = 'yellow',
opacity = 0.6,
fill_opacity = 0.6).add_to(map1)
elif significance == 1:
folium.CircleMarker([df2017['Latitude'].iloc[i], df2017['Longitude'].iloc[i]],
radius = 3,
fill = True,
fill_color = 'orange',
popup = ("Country: {Country}<br>" "Significance: {Significance}<br>" "Event Description: {Title}"
).format(Country = re.sub(r"[^a-zA-Z0-9_]+", ' ',df2017['Country'].iloc[i]),
Significance = df2017['Significance'].iloc[i],
Title = re.sub(r"[^a-zA-Z0-9_]+", ' ', df2017['Title'].iloc[i])),
color = 'orange',
opacity = 0.6,
fill_opacity = 0.6).add_to(map1)
elif significance == 2:
folium.CircleMarker([df2017['Latitude'].iloc[i], df2017['Longitude'].iloc[i]],
radius = 3,
fill = True,
fill_color = 'red',
popup = ("Country: {Country}<br>" "Significance: {Significance}<br>" "Event Description: {Title}"
).format(Country = re.sub(r"[^a-zA-Z0-9_]+", ' ',df2017['Country'].iloc[i]),
Significance = df2017['Significance'].iloc[i],
Title = re.sub(r"[^a-zA-Z0-9_]+", ' ', df2017['Title'].iloc[i])),
color = 'red',
opacity = 0.6,
fill_opacity = 0.6).add_to(map1)
else:
folium.CircleMarker([df2017['Latitude'].iloc[i], df2017['Longitude'].iloc[i]],
radius = 3,
fill = True,
fill_color = 'darkred',
popup = ("Country: {Country}<br>" "Significance: {Significance}<br>" "Event Description: {Title}"
).format(Country = re.sub(r"[^a-zA-Z0-9_]+", ' ',df2017['Country'].iloc[i]),
Significance = df2017['Significance'].iloc[i],
Title = re.sub(r"[^a-zA-Z0-9_]+", ' ', df2017['Title'].iloc[i])),
color = 'darkred',
opacity = 0.6,
fill_opacity = 0.6).add_to(map1)
map1.save("C:\\Users\\hpuzzang\\Documents\\Terrorism Maps\\2017_popup_map.html")
# -
| Terrorism Data/2017 Terrorism Map.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/vainaijr/HowToTrainYourMAMLPytorch/blob/master/maml%2B%2B.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="_bgT2a-_HIbT" colab_type="code" colab={}
# mount google drive
from google.colab import drive
drive.mount('/gdrive')
# + colab_type="code" id="0y79kypKJ_J3" colab={}
# # copy files from google colab to google drive
# !cp -r omniglot_1_8_0.1_64_5_0/ ../gdrive/My\ Drive/
# !cp -r runs/ ../gdrive/My\ Drive/
# + colab_type="code" id="f5ohNe6LJ_JX" colab={}
# retrieve files from google drive to google colab
# !cp -r ../gdrive/My\ Drive/omniglot_1_8_0.1_64_5_0/ ./omniglot_1_8_0.1_64_5_0/
# !cp -r ../gdrive/My\ Drive/runs/ ./runs/
# + id="avyMhUz2yTg4" colab_type="code" colab={}
# clone repository, for dataset and json files
# !git clone https://github.com/AntreasAntoniou/HowToTrainYourMAMLPytorch.git
# !mv HowToTrainYourMAMLPytorch/* ./
# + id="ZfXN4qJM9zSr" colab_type="code" colab={}
# !rm -r HowToTrainYourMAMLPytorch/
# + id="lX1pF70a-KZG" colab_type="code" outputId="bf311efe-ffaa-4b08-d4a9-b5c7191da8f2" colab={"base_uri": "https://localhost:8080/", "height": 246}
# ngrok, dynamic visualization
# !wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip
# !unzip ngrok-stable-linux-amd64.zip
# + id="nw5CGKbw-KYC" colab_type="code" colab={}
# logs are saved in ./runs directory
LOG_DIR = './runs'
get_ipython().system_raw(
'tensorboard --logdir {} --host 0.0.0.0 --port 6006 &'
.format(LOG_DIR)
)
# + id="EoqoTvFz-KVe" colab_type="code" outputId="d7254665-941b-4ba0-cf8a-813f8c5de3c2" colab={"base_uri": "https://localhost:8080/", "height": 34}
get_ipython().system_raw('./ngrok http 6006 &')
# ! curl -s http://localhost:4040/api/tunnels | python3 -c \
# "import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'])"
# + id="aTf0w978-RNd" colab_type="code" colab={}
# for using TPU
# # !pip install \
# # http://storage.googleapis.com/pytorch-tpu-releases/tf-1.13/torch-1.0.0a0+1d94a2b-cp36-cp36m-linux_x86_64.whl \
# # http://storage.googleapis.com/pytorch-tpu-releases/tf-1.13/torch_xla-0.1+5622d42-cp36-cp36m-linux_x86_64.whl
# + id="dlYcoGLm-RMF" colab_type="code" outputId="3c771328-b72f-41ca-a166-510602dfecf0" colab={"base_uri": "https://localhost:8080/", "height": 1005}
# for using ngrok, tensorboard with PyTorch, dynamic visualization, we need to upgrade tb-nightly
# %%shell
pip install --upgrade tb-nightly
# + id="CXSGKa4l-RKj" colab_type="code" outputId="78cc8153-1aab-4643-9e7c-32410f7f65fc" colab={"base_uri": "https://localhost:8080/", "height": 34}
from __future__ import print_function, unicode_literals, division
from IPython.core.debugger import set_trace
from IPython.display import HTML, Math
from pprint import pprint
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torchvision import datasets
from torch.nn.utils.weight_norm import WeightNorm
from torch.optim.lr_scheduler import StepLR
from torch.optim import Adam
from torchvision.transforms import Compose, RandomHorizontalFlip, RandomResizedCrop, ToTensor, Normalize, \
CenterCrop, Resize, ColorJitter, ToPILImage, RandomCrop
import torchvision.models as models
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset, sampler
from torchvision.utils import make_grid, save_image
from torch.utils.tensorboard import SummaryWriter
import torch.utils.checkpoint as cp
# for using TPU
# import torch_xla
# import torch_xla
# import torch_xla_py.utils as xu
# import torch_xla_py.xla_model as xm
import json, glob, time, math, os, datetime, string, random, re, warnings, itertools, \
logging, numbers, csv, pickle, tqdm
from copy import copy
import numpy as np
from PIL import Image, ImageEnhance, ImageFile
from collections import OrderedDict
import concurrent.futures
ImageFile.LOAD_TRUNCATED_IMAGES = True
print("CUDA available: ", torch.cuda.is_available())
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
os.environ['DATASET_DIR'] = './datasets'
# for writing into tensorboard
writer = SummaryWriter()
# + id="d8LgqZ6qy1L7" colab_type="code" colab={}
class rotate_image(object):
def __init__(self, k, channels):
self.k = k
self.channels = channels
def __call__(self, image):
if self.channels == 1 and len(image.shape) == 3:
image = image[:, :, 0]
image = np.expand_dims(image, axis=2)
elif self.channels == 1 and len(image.shape) == 4:
image = image[:, :, :, 0]
image = np.expand_dims(image, axis=3)
image = np.rot90(image, k=self.k).copy()
return image
# + id="GZ6A6o8oJLil" colab_type="code" colab={}
class torch_rotate_image(object):
def __init__(self, k, channels):
self.k = k
self.channels = channels
def __call__(self, image):
rotate = RandomRotation(degrees=self.k * 90)
if image.shape[-1] == 1:
image = image[:, :, 0]
image = Image.fromarray(image)
image = rotate(image)
image = np.array(image)
if len(image.shape) == 2:
image = np.expand_dims(image, axis=2)
return image
# + id="Fcg1Bxs3ythc" colab_type="code" colab={}
def augment_image(image, k, channels, augment_bool, args, dataset_name):
transform_train, transform_evaluation = get_transforms_for_dataset(dataset_name=dataset_name,
args=args, k=k)
if len(image.shape) > 3:
images = [item for item in image]
output_images = []
for image in images:
if augment_bool is True:
for transform_current in transform_train:
image = transform_current(image)
else:
for transform_current in transform_evaluation:
image = transform_current(image)
output_images.append(image)
image = torch.stack(output_images)
else:
if augment_bool is True:
# meanstd transformation
for transform_current in transform_train:
image = transform_current(image)
else:
for transform_current in transform_evaluation:
image = transform_current(image)
return image
# + id="9NVCa1zGywce" colab_type="code" colab={}
def get_transforms_for_dataset(dataset_name, args, k):
if "cifar10" in dataset_name or "cifar100" in dataset_name:
transform_train = [
RandomCrop(32, padding=4),
RandomHorizontalFlip(),
ToTensor(),
Normalize(args.classification_mean, args.classification_std)]
transform_evaluate = [
ToTensor(),
Normalize(args.classification_mean, args.classification_std)]
elif 'omniglot' in dataset_name:
transform_train = [rotate_image(k=k, channels=args.image_channels), ToTensor()]
transform_evaluate = [ToTensor()]
elif 'imagenet' in dataset_name:
transform_train = [Compose([
ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])]
transform_evaluate = [Compose([
ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])]
return transform_train, transform_evaluate
# + id="8E3bStPExiaz" colab_type="code" outputId="79a03442-1fd5-4c6e-bef5-9b478671579a" colab={"base_uri": "https://localhost:8080/", "height": 132}
class FewShotLearningDatasetParallel(Dataset):
def __init__(self, args):
"""
A data provider class inheriting from Pytorch's Dataset class. It takes care of creating task sets for
our few-shot learning model training and evaluation
:param args: Arguments in the form of a Bunch object. Includes all hyperparameters necessary for the
data-provider. For transparency and readability reasons to explicitly set as self.object_name all arguments
required for the data provider, such that the reader knows exactly what is necessary for the data provider/
"""
self.data_path = args.dataset_path
self.dataset_name = args.dataset_name
self.data_loaded_in_memory = False
self.image_height, self.image_width, self.image_channel = args.image_height, args.image_width, args.image_channels
self.args = args
self.indexes_of_folders_indicating_class = args.indexes_of_folders_indicating_class
self.reverse_channels = args.reverse_channels
self.labels_as_int = args.labels_as_int
self.train_val_test_split = args.train_val_test_split
self.current_set_name = "train"
self.num_target_samples = args.num_target_samples
self.reset_stored_filepaths = args.reset_stored_filepaths
val_rng = np.random.RandomState(seed=args.val_seed)
val_seed = val_rng.randint(1, 999999)
train_rng = np.random.RandomState(seed=args.train_seed)
train_seed = train_rng.randint(1, 999999)
test_rng = np.random.RandomState(seed=args.val_seed)
test_seed = test_rng.randint(1, 999999)
args.val_seed = val_seed
args.train_seed = train_seed
args.test_seed = test_seed
self.init_seed = {"train": args.train_seed, "val": args.val_seed, 'test': args.val_seed}
self.seed = {"train": args.train_seed, "val": args.val_seed, 'test': args.val_seed}
self.num_of_gpus = args.num_of_gpus
self.batch_size = args.batch_size
self.train_index = 0
self.val_index = 0
self.test_index = 0
self.augment_images = False
self.num_samples_per_class = args.num_samples_per_class
self.num_classes_per_set = args.num_classes_per_set
self.rng = np.random.RandomState(seed=self.seed['val'])
self.datasets = self.load_dataset()
self.indexes = {"train": 0, "val": 0, 'test': 0}
self.dataset_size_dict = {
"train": {key: len(self.datasets['train'][key]) for key in list(self.datasets['train'].keys())},
"val": {key: len(self.datasets['val'][key]) for key in list(self.datasets['val'].keys())},
'test': {key: len(self.datasets['test'][key]) for key in list(self.datasets['test'].keys())}}
self.label_set = self.get_label_set()
self.data_length = {name: np.sum([len(self.datasets[name][key])
for key in self.datasets[name]]) for name in self.datasets.keys()}
print("data", self.data_length)
self.observed_seed_set = None
def load_dataset(self):
"""
Loads a dataset's dictionary files and splits the data according to the train_val_test_split variable stored
in the args object.
:return: Three sets, the training set, validation set and test sets (referred to as the meta-train,
meta-val and meta-test in the paper)
"""
rng = np.random.RandomState(seed=self.seed['val'])
if self.args.sets_are_pre_split == True:
data_image_paths, index_to_label_name_dict_file, label_to_index = self.load_datapaths()
dataset_splits = dict()
for key, value in data_image_paths.items():
key = self.get_label_from_index(index=key)
bits = key.split("/")
set_name = bits[0]
class_label = bits[1]
if set_name not in dataset_splits:
dataset_splits[set_name] = {class_label: value}
else:
dataset_splits[set_name][class_label] = value
else:
data_image_paths, index_to_label_name_dict_file, label_to_index = self.load_datapaths()
total_label_types = len(data_image_paths)
num_classes_idx = np.arange(len(data_image_paths.keys()), dtype=np.int32)
rng.shuffle(num_classes_idx)
keys = list(data_image_paths.keys())
values = list(data_image_paths.values())
new_keys = [keys[idx] for idx in num_classes_idx]
new_values = [values[idx] for idx in num_classes_idx]
data_image_paths = dict(zip(new_keys, new_values))
# data_image_paths = self.shuffle(data_image_paths)
x_train_id, x_val_id, x_test_id = int(self.train_val_test_split[0] * total_label_types), \
int(np.sum(self.train_val_test_split[:2]) * total_label_types), \
int(total_label_types)
print(x_train_id, x_val_id, x_test_id)
x_train_classes = (class_key for class_key in list(data_image_paths.keys())[:x_train_id])
x_val_classes = (class_key for class_key in list(data_image_paths.keys())[x_train_id:x_val_id])
x_test_classes = (class_key for class_key in list(data_image_paths.keys())[x_val_id:x_test_id])
x_train, x_val, x_test = {class_key: data_image_paths[class_key] for class_key in x_train_classes}, \
{class_key: data_image_paths[class_key] for class_key in x_val_classes}, \
{class_key: data_image_paths[class_key] for class_key in x_test_classes},
dataset_splits = {"train": x_train, "val":x_val , "test": x_test}
if self.args.load_into_memory is True:
print("Loading data into RAM")
x_loaded = {"train": [], "val": [], "test": []}
for set_key, set_value in dataset_splits.items():
print("Currently loading into memory the {} set".format(set_key))
x_loaded[set_key] = {key: np.zeros(len(value), ) for key, value in set_value.items()}
# for class_key, class_value in set_value.items():
with tqdm.tqdm(total=len(set_value)) as pbar_memory_load:
with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor:
# Process the list of files, but split the work across the process pool to use all CPUs!
for (class_label, class_images_loaded) in executor.map(self.load_parallel_batch, (set_value.items())):
x_loaded[set_key][class_label] = class_images_loaded
pbar_memory_load.update(1)
dataset_splits = x_loaded
self.data_loaded_in_memory = True
return dataset_splits
def load_datapaths(self):
"""
If saved json dictionaries of the data are available, then this method loads the dictionaries such that the
data is ready to be read. If the json dictionaries do not exist, then this method calls get_data_paths()
which will build the json dictionary containing the class to filepath samples, and then store them.
:return: data_image_paths: dict containing class to filepath list pairs.
index_to_label_name_dict_file: dict containing numerical indexes mapped to the human understandable
string-names of the class
label_to_index: dictionary containing human understandable string mapped to numerical indexes
"""
dataset_dir = os.environ['DATASET_DIR']
data_path_file = "{}/{}.json".format(dataset_dir, self.dataset_name)
print(dataset_dir)
print(data_path_file)
self.index_to_label_name_dict_file = "{}/map_to_label_name_{}.json".format(dataset_dir, self.dataset_name)
self.label_name_to_map_dict_file = "{}/label_name_to_map_{}.json".format(dataset_dir, self.dataset_name)
if not os.path.exists(data_path_file):
self.reset_stored_filepaths = True
if self.reset_stored_filepaths == True:
if os.path.exists(data_path_file):
os.remove(data_path_file)
self.reset_stored_filepaths = False
try:
data_image_paths = self.load_from_json(filename=data_path_file)
label_to_index = self.load_from_json(filename=self.label_name_to_map_dict_file)
index_to_label_name_dict_file = self.load_from_json(filename=self.index_to_label_name_dict_file)
print(data_image_paths, index_to_label_name_dict_file, label_to_index)
return data_image_paths, index_to_label_name_dict_file, label_to_index
except:
print("Mapped data paths can't be found, remapping paths..")
data_image_paths, code_to_label_name, label_name_to_code = self.get_data_paths()
self.save_to_json(dict_to_store=data_image_paths, filename=data_path_file)
self.save_to_json(dict_to_store=code_to_label_name, filename=self.index_to_label_name_dict_file)
self.save_to_json(dict_to_store=label_name_to_code, filename=self.label_name_to_map_dict_file)
return self.load_datapaths()
def save_to_json(self, filename, dict_to_store):
with open(os.path.abspath(filename), 'w') as f:
json.dump(dict_to_store, fp=f)
def load_from_json(self, filename):
with open(filename, mode="r") as f:
load_dict = json.load(fp=f)
return load_dict
def load_test_image(self, filepath):
"""
Tests whether a target filepath contains an uncorrupted image. If image is corrupted, attempt to fix.
:param filepath: Filepath of image to be tested
:return: Return filepath of image if image exists and is uncorrupted (or attempt to fix has succeeded),
else return None
"""
image = None
try:
image = Image.open(filepath)
except RuntimeWarning:
os.system("convert {} -strip {}".format(filepath, filepath))
print("converting")
image = Image.open(filepath)
except:
print("Broken image")
if image is not None:
return filepath
else:
return None
def get_data_paths(self):
"""
Method that scans the dataset directory and generates class to image-filepath list dictionaries.
:return: data_image_paths: dict containing class to filepath list pairs.
index_to_label_name_dict_file: dict containing numerical indexes mapped to the human understandable
string-names of the class
label_to_index: dictionary containing human understandable string mapped to numerical indexes
"""
print("Get images from", self.data_path)
data_image_path_list_raw = []
labels = set()
for subdir, dir, files in os.walk(self.data_path):
for file in files:
if (".jpeg") in file.lower() or (".png") in file.lower() or (".jpg") in file.lower():
filepath = os.path.abspath(os.path.join(subdir, file))
label = self.get_label_from_path(filepath)
data_image_path_list_raw.append(filepath)
labels.add(label)
labels = sorted(labels)
idx_to_label_name = {idx: label for idx, label in enumerate(labels)}
label_name_to_idx = {label: idx for idx, label in enumerate(labels)}
data_image_path_dict = {idx: [] for idx in list(idx_to_label_name.keys())}
with tqdm.tqdm(total=len(data_image_path_list_raw)) as pbar_error:
with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor:
# Process the list of files, but split the work across the process pool to use all CPUs!
for image_file in executor.map(self.load_test_image, (data_image_path_list_raw)):
pbar_error.update(1)
if image_file is not None:
label = self.get_label_from_path(image_file)
data_image_path_dict[label_name_to_idx[label]].append(image_file)
return data_image_path_dict, idx_to_label_name, label_name_to_idx
def get_label_set(self):
"""
Generates a set containing all class numerical indexes
:return: A set containing all class numerical indexes
"""
index_to_label_name_dict_file = self.load_from_json(filename=self.index_to_label_name_dict_file)
return set(list(index_to_label_name_dict_file.keys()))
def get_index_from_label(self, label):
"""
Given a class's (human understandable) string, returns the numerical index of that class
:param label: A string of a human understandable class contained in the dataset
:return: An int containing the numerical index of the given class-string
"""
label_to_index = self.load_from_json(filename=self.label_name_to_map_dict_file)
return label_to_index[label]
def get_label_from_index(self, index):
"""
Given an index return the human understandable label mapping to it.
:param index: A numerical index (int)
:return: A human understandable label (str)
"""
index_to_label_name = self.load_from_json(filename=self.index_to_label_name_dict_file)
return index_to_label_name[index]
def get_label_from_path(self, filepath):
"""
Given a path of an image generate the human understandable label for that image.
:param filepath: The image's filepath
:return: A human understandable label.
"""
label_bits = filepath.split("/")
label = "/".join([label_bits[idx] for idx in self.indexes_of_folders_indicating_class])
if self.labels_as_int:
label = int(label)
return label
def load_image(self, image_path, channels):
"""
Given an image filepath and the number of channels to keep, load an image and keep the specified channels
:param image_path: The image's filepath
:param channels: The number of channels to keep
:return: An image array of shape (h, w, channels), whose values range between 0.0 and 1.0.
"""
if not self.data_loaded_in_memory:
image = Image.open(image_path)
if 'omniglot' in self.dataset_name:
image = image.resize((self.image_height, self.image_width), resample=Image.LANCZOS)
image = np.array(image, np.float32)
if channels == 1:
image = np.expand_dims(image, axis=2)
else:
image = image.resize((self.image_height, self.image_width)).convert('RGB')
image = np.array(image, np.float32)
image = image / 255.0
else:
image = image_path
return image
def load_batch(self, batch_image_paths):
"""
Load a batch of images, given a list of filepaths
:param batch_image_paths: A list of filepaths
:return: A numpy array of images of shape batch, height, width, channels
"""
image_batch = []
if self.data_loaded_in_memory:
for image_path in batch_image_paths:
image_batch.append(image_path)
image_batch = np.array(image_batch, dtype=np.float32)
#print(image_batch.shape)
else:
image_batch = [self.load_image(image_path=image_path, channels=self.image_channel)
for image_path in batch_image_paths]
image_batch = np.array(image_batch, dtype=np.float32)
image_batch = self.preprocess_data(image_batch)
return image_batch
def load_parallel_batch(self, inputs):
"""
Load a batch of images, given a list of filepaths
:param batch_image_paths: A list of filepaths
:return: A numpy array of images of shape batch, height, width, channels
"""
class_label, batch_image_paths = inputs
image_batch = []
if self.data_loaded_in_memory:
for image_path in batch_image_paths:
image_batch.append(np.copy(image_path))
image_batch = np.array(image_batch, dtype=np.float32)
else:
#with tqdm.tqdm(total=1) as load_pbar:
image_batch = [self.load_image(image_path=image_path, channels=self.image_channel)
for image_path in batch_image_paths]
#load_pbar.update(1)
image_batch = np.array(image_batch, dtype=np.float32)
image_batch = self.preprocess_data(image_batch)
return class_label, image_batch
def preprocess_data(self, x):
"""
Preprocesses data such that their shapes match the specified structures
:param x: A data batch to preprocess
:return: A preprocessed data batch
"""
x_shape = x.shape
x = np.reshape(x, (-1, x_shape[-3], x_shape[-2], x_shape[-1]))
if self.reverse_channels is True:
reverse_photos = np.ones(shape=x.shape)
for channel in range(x.shape[-1]):
reverse_photos[:, :, :, x.shape[-1] - 1 - channel] = x[:, :, :, channel]
x = reverse_photos
x = x.reshape(x_shape)
return x
def reconstruct_original(self, x):
"""
Applies the reverse operations that preprocess_data() applies such that the data returns to their original form
:param x: A batch of data to reconstruct
:return: A reconstructed batch of data
"""
x = x * 255.0
return x
def shuffle(self, x, rng):
"""
Shuffles the data batch along it's first axis
:param x: A data batch
:return: A shuffled data batch
"""
indices = np.arange(len(x))
rng.shuffle(indices)
x = x[indices]
return x
def get_set(self, dataset_name, seed, augment_images=False, step):
"""
Generates a task-set to be used for training or evaluation
:param set_name: The name of the set to use, e.g. "train", "val" etc.
:return: A task-set containing an image and label support set, and an image and label target set.
"""
#seed = seed % self.args.total_unique_tasks
rng = np.random.RandomState(seed)
selected_classes = rng.choice(list(self.dataset_size_dict[dataset_name].keys()),
size=self.num_classes_per_set, replace=False)
rng.shuffle(selected_classes)
k_list = rng.randint(0, 4, size=self.num_classes_per_set)
k_dict = {selected_class: k_item for (selected_class, k_item) in zip(selected_classes, k_list)}
episode_labels = [i for i in range(self.num_classes_per_set)]
class_to_episode_label = {selected_class: episode_label for (selected_class, episode_label) in
zip(selected_classes, episode_labels)}
x_images = []
y_labels = []
for class_entry in selected_classes:
choose_samples_list = rng.choice(self.dataset_size_dict[dataset_name][class_entry],
size=self.num_samples_per_class + self.num_target_samples, replace=False)
class_image_samples = []
class_labels = []
for sample in choose_samples_list:
choose_samples = self.datasets[dataset_name][class_entry][sample]
x_class_data = self.load_batch([choose_samples])[0]
k = k_dict[class_entry]
x_class_data = augment_image(image=x_class_data, k=k,
channels=self.image_channel, augment_bool=augment_images,
dataset_name=self.dataset_name, args=self.args)
class_image_samples.append(x_class_data)
class_labels.append(int(class_to_episode_label[class_entry]))
class_image_samples = torch.stack(class_image_samples)
x_images.append(class_image_samples)
y_labels.append(class_labels)
x_images = torch.stack(x_images)
y_labels = np.array(y_labels, dtype=np.float32)
support_set_images = x_images[:, :self.num_samples_per_class]
support_set_labels = y_labels[:, :self.num_samples_per_class]
target_set_images = x_images[:, self.num_samples_per_class:]
target_set_labels = y_labels[:, self.num_samples_per_class:]
writer.add_images('support_set_images', support_set_images, step)
writer.add_images('target_set_images', target_set_image, step)
return support_set_images, target_set_images, support_set_labels, target_set_labels, seed
def __len__(self):
total_samples = self.data_length[self.current_set_name]
return total_samples
def length(self, set_name):
self.switch_set(set_name=set_name)
return len(self)
def set_augmentation(self, augment_images):
self.augment_images = augment_images
def switch_set(self, set_name, current_iter=None):
self.current_set_name = set_name
if set_name == "train":
self.update_seed(dataset_name=set_name, seed=self.init_seed[set_name] + current_iter)
def update_seed(self, dataset_name, seed=100):
self.seed[dataset_name] = seed
def __getitem__(self, idx):
support_set_images, target_set_image, support_set_labels, target_set_label, seed = \
self.get_set(self.current_set_name, seed=self.seed[self.current_set_name] + idx,
augment_images=self.augment_images, idx)
return support_set_images, target_set_image, support_set_labels, target_set_label, seed
def reset_seed(self):
self.seed = self.init_seed
# + id="PzzTQac1xOJW" colab_type="code" colab={}
class MetaLearningSystemDataLoader(object):
def __init__(self, args, current_iter=0):
"""
Initializes a meta learning system dataloader. The data loader uses the Pytorch DataLoader class to parallelize
batch sampling and preprocessing.
:param args: An arguments NamedTuple containing all the required arguments.
:param current_iter: Current iter of experiment. Is used to make sure the data loader continues where it left
of previously.
"""
self.num_of_gpus = args.num_of_gpus
self.batch_size = args.batch_size
self.samples_per_iter = args.samples_per_iter
self.num_workers = args.num_dataprovider_workers
self.total_train_iters_produced = 0
self.dataset = FewShotLearningDatasetParallel(args=args)
self.batches_per_iter = args.samples_per_iter
self.full_data_length = self.dataset.data_length
self.continue_from_iter(current_iter=current_iter)
self.args = args
def get_dataloader(self):
"""
Returns a data loader with the correct set (train, val or test), continuing from the current iter.
:return:
"""
return DataLoader(self.dataset, batch_size=(self.num_of_gpus * self.batch_size * self.samples_per_iter),
shuffle=False, num_workers=self.num_workers, drop_last=True)
def continue_from_iter(self, current_iter):
"""
Makes sure the data provider is aware of where we are in terms of training iterations in the experiment.
:param current_iter:
"""
self.total_train_iters_produced += (current_iter * (self.num_of_gpus * self.batch_size * self.samples_per_iter))
def get_train_batches(self, total_batches=-1, augment_images=False):
"""
Returns a training batches data_loader
:param total_batches: The number of batches we want the data loader to sample
:param augment_images: Whether we want the images to be augmented.
"""
if total_batches == -1:
self.dataset.data_length = self.full_data_length
else:
self.dataset.data_length["train"] = total_batches * self.dataset.batch_size
self.dataset.switch_set(set_name="train", current_iter=self.total_train_iters_produced)
self.dataset.set_augmentation(augment_images=augment_images)
self.total_train_iters_produced += (self.num_of_gpus * self.batch_size * self.samples_per_iter)
for sample_id, sample_batched in enumerate(self.get_dataloader()):
yield sample_batched
def get_val_batches(self, total_batches=-1, augment_images=False):
"""
Returns a validation batches data_loader
:param total_batches: The number of batches we want the data loader to sample
:param augment_images: Whether we want the images to be augmented.
"""
if total_batches == -1:
self.dataset.data_length = self.full_data_length
else:
self.dataset.data_length['val'] = total_batches * self.dataset.batch_size
self.dataset.switch_set(set_name="val")
self.dataset.set_augmentation(augment_images=augment_images)
for sample_id, sample_batched in enumerate(self.get_dataloader()):
yield sample_batched
def get_test_batches(self, total_batches=-1, augment_images=False):
"""
Returns a testing batches data_loader
:param total_batches: The number of batches we want the data loader to sample
:param augment_images: Whether we want the images to be augmented.
"""
if total_batches == -1:
self.dataset.data_length = self.full_data_length
else:
self.dataset.data_length['test'] = total_batches * self.dataset.batch_size
self.dataset.switch_set(set_name='test')
self.dataset.set_augmentation(augment_images=augment_images)
for sample_id, sample_batched in enumerate(self.get_dataloader()):
yield sample_batched
# + id="HCshYy8PvXly" colab_type="code" colab={}
def extract_top_level_dict(current_dict):
"""
Builds a graph dictionary from the passed depth_keys, value pair. Useful for dynamically passing external params
:param depth_keys: A list of strings making up the name of a variable. Used to make a graph for that params tree.
:param value: Param value
:param key_exists: If none then assume new dict, else load existing dict and add new key->value pairs to it.
:return: A dictionary graph of the params already added to the graph.
"""
output_dict = dict()
for key in current_dict.keys():
name = key.replace("layer_dict.", "")
top_level = name.split(".")[0]
sub_level = ".".join(name.split(".")[1:])
if top_level not in output_dict:
if sub_level == "":
output_dict[top_level] = current_dict[key]
else:
output_dict[top_level] = {sub_level: current_dict[key]}
else:
new_item = {key: value for key, value in output_dict[top_level].items()}
new_item[sub_level] = current_dict[key]
output_dict[top_level] = new_item
#print(current_dict.keys(), output_dict.keys())
return output_dict
# + id="brbh1ISiqIwN" colab_type="code" colab={}
class MetaConv2dLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, use_bias, groups=1, dilation_rate=1):
"""
A MetaConv2D layer. Applies the same functionality of a standard Conv2D layer with the added functionality of
being able to receive a parameter dictionary at the forward pass which allows the convolution to use external
weights instead of the internal ones stored in the conv layer. Useful for inner loop optimization in the meta
learning setting.
:param in_channels: Number of input channels
:param out_channels: Number of output channels
:param kernel_size: Convolutional kernel size
:param stride: Convolutional stride
:param padding: Convolution padding
:param use_bias: Boolean indicating whether to use a bias or not.
"""
super(MetaConv2dLayer, self).__init__()
num_filters = out_channels
self.stride = int(stride)
self.padding = int(padding)
self.dilation_rate = int(dilation_rate)
self.use_bias = use_bias
self.groups = int(groups)
self.weight = nn.Parameter(torch.empty(num_filters, in_channels, kernel_size, kernel_size))
nn.init.xavier_uniform_(self.weight)
if self.use_bias:
self.bias = nn.Parameter(torch.zeros(num_filters))
def forward(self, x, params=None):
"""
Applies a conv2D forward pass. If params are not None will use the passed params as the conv weights and biases
:param x: Input image batch.
:param params: If none, then conv layer will use the stored self.weights and self.bias, if they are not none
then the conv layer will use the passed params as its parameters.
:return: The output of a convolutional function.
"""
if params is not None:
params = extract_top_level_dict(current_dict=params)
if self.use_bias:
(weight, bias) = params["weight"], params["bias"]
else:
(weight) = params["weight"]
bias = None
else:
#print("No inner loop params")
if self.use_bias:
weight, bias = self.weight, self.bias
else:
weight = self.weight
bias = None
out = F.conv2d(input=x, weight=weight, bias=bias, stride=self.stride,
padding=self.padding, dilation=self.dilation_rate, groups=self.groups)
return out
# + id="5_yy9Vluuu1E" colab_type="code" colab={}
class MetaBatchNormLayer(nn.Module):
def __init__(self, num_features, device, args, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True, meta_batch_norm=True, no_learnable_params=False,
use_per_step_bn_statistics=False):
"""
A MetaBatchNorm layer. Applies the same functionality of a standard BatchNorm layer with the added functionality of
being able to receive a parameter dictionary at the forward pass which allows the convolution to use external
weights instead of the internal ones stored in the conv layer. Useful for inner loop optimization in the meta
learning setting. Also has the additional functionality of being able to store per step running stats and per step beta and gamma.
:param num_features:
:param device:
:param args:
:param eps:
:param momentum:
:param affine:
:param track_running_stats:
:param meta_batch_norm:
:param no_learnable_params:
:param use_per_step_bn_statistics:
"""
super(MetaBatchNormLayer, self).__init__()
self.num_features = num_features
self.eps = eps
self.affine = affine
self.track_running_stats = track_running_stats
self.meta_batch_norm = meta_batch_norm
self.num_features = num_features
self.device = device
self.use_per_step_bn_statistics = use_per_step_bn_statistics
self.args = args
self.learnable_gamma = self.args.learnable_bn_gamma
self.learnable_beta = self.args.learnable_bn_beta
if use_per_step_bn_statistics:
self.running_mean = nn.Parameter(torch.zeros(args.number_of_training_steps_per_iter, num_features),
requires_grad=False)
self.running_var = nn.Parameter(torch.ones(args.number_of_training_steps_per_iter, num_features),
requires_grad=False)
self.bias = nn.Parameter(torch.zeros(args.number_of_training_steps_per_iter, num_features),
requires_grad=self.learnable_beta)
self.weight = nn.Parameter(torch.ones(args.number_of_training_steps_per_iter, num_features),
requires_grad=self.learnable_gamma)
else:
self.running_mean = nn.Parameter(torch.zeros(num_features), requires_grad=False)
self.running_var = nn.Parameter(torch.zeros(num_features), requires_grad=False)
self.bias = nn.Parameter(torch.zeros(num_features),
requires_grad=self.learnable_beta)
self.weight = nn.Parameter(torch.ones(num_features),
requires_grad=self.learnable_gamma)
if self.args.enable_inner_loop_optimizable_bn_params:
self.bias = nn.Parameter(torch.zeros(num_features),
requires_grad=self.learnable_beta)
self.weight = nn.Parameter(torch.ones(num_features),
requires_grad=self.learnable_gamma)
self.backup_running_mean = torch.zeros(self.running_mean.shape)
self.backup_running_var = torch.ones(self.running_var.shape)
self.momentum = momentum
def forward(self, input, num_step, params=None, training=False, backup_running_statistics=False):
"""
Forward propagates by applying a bach norm function. If params are none then internal params are used.
Otherwise passed params will be used to execute the function.
:param input: input data batch, size either can be any.
:param num_step: The current inner loop step being taken. This is used when we are learning per step params and
collecting per step batch statistics. It indexes the correct object to use for the current time-step
:param params: A dictionary containing 'weight' and 'bias'.
:param training: Whether this is currently the training or evaluation phase.
:param backup_running_statistics: Whether to backup the running statistics. This is used
at evaluation time, when after the pass is complete we want to throw away the collected validation stats.
:return: The result of the batch norm operation.
"""
if params is not None:
params = extract_top_level_dict(current_dict=params)
(weight, bias) = params["weight"], params["bias"]
#print(num_step, params['weight'])
else:
#print(num_step, "no params")
weight, bias = self.weight, self.bias
if self.use_per_step_bn_statistics:
running_mean = self.running_mean[num_step]
running_var = self.running_var[num_step]
if params is None:
if not self.args.enable_inner_loop_optimizable_bn_params:
bias = self.bias[num_step]
weight = self.weight[num_step]
else:
running_mean = None
running_var = None
if backup_running_statistics and self.use_per_step_bn_statistics:
self.backup_running_mean.data = copy(self.running_mean.data)
self.backup_running_var.data = copy(self.running_var.data)
momentum = self.momentum
output = F.batch_norm(input, running_mean, running_var, weight, bias,
training=True, momentum=momentum, eps=self.eps)
return output
def restore_backup_stats(self):
"""
Resets batch statistics to their backup values which are collected after each forward pass.
"""
if self.use_per_step_bn_statistics:
self.running_mean = nn.Parameter(self.backup_running_mean.to(device=self.device), requires_grad=False)
self.running_var = nn.Parameter(self.backup_running_var.to(device=self.device), requires_grad=False)
def extra_repr(self):
return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \
'track_running_stats={track_running_stats}'.format(**self.__dict__)
# + id="LsbTVhQ9vply" colab_type="code" colab={}
class MetaLayerNormLayer(nn.Module):
def __init__(self, input_feature_shape, eps=1e-5, elementwise_affine=True):
"""
A MetaLayerNorm layer. A layer that applies the same functionality as a layer norm layer with the added
capability of being able to receive params at inference time to use instead of the internal ones. As well as
being able to use its own internal weights.
:param input_feature_shape: The input shape without the batch dimension, e.g. c, h, w
:param eps: Epsilon to use for protection against overflows
:param elementwise_affine: Whether to learn a multiplicative interaction parameter 'w' in addition to
the biases.
"""
super(MetaLayerNormLayer, self).__init__()
if isinstance(input_feature_shape, numbers.Integral):
input_feature_shape = (input_feature_shape,)
self.normalized_shape = torch.Size(input_feature_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = nn.Parameter(torch.Tensor(*input_feature_shape), requires_grad=False)
self.bias = nn.Parameter(torch.Tensor(*input_feature_shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
"""
Reset parameters to their initialization values.
"""
if self.elementwise_affine:
self.weight.data.fill_(1)
self.bias.data.zero_()
def forward(self, input, num_step, params=None, training=False, backup_running_statistics=False):
"""
Forward propagates by applying a layer norm function. If params are none then internal params are used.
Otherwise passed params will be used to execute the function.
:param input: input data batch, size either can be any.
:param num_step: The current inner loop step being taken. This is used when we are learning per step params and
collecting per step batch statistics. It indexes the correct object to use for the current time-step
:param params: A dictionary containing 'weight' and 'bias'.
:param training: Whether this is currently the training or evaluation phase.
:param backup_running_statistics: Whether to backup the running statistics. This is used
at evaluation time, when after the pass is complete we want to throw away the collected validation stats.
:return: The result of the batch norm operation.
"""
if params is not None:
params = extract_top_level_dict(current_dict=params)
bias = params["bias"]
else:
bias = self.bias
#print('no inner loop params', self)
return F.layer_norm(
input, self.normalized_shape, self.weight, bias, self.eps)
def restore_backup_stats(self):
pass
def extra_repr(self):
return '{normalized_shape}, eps={eps}, ' \
'elementwise_affine={elementwise_affine}'.format(**self.__dict__)
# + id="-Up-rbPipql9" colab_type="code" colab={}
class MetaConvNormLayerReLU(nn.Module):
def __init__(self, input_shape, num_filters, kernel_size, stride, padding, use_bias, args, normalization=True,
meta_layer=True, no_bn_learnable_params=False, device=None):
"""
Initializes a BatchNorm->Conv->ReLU layer which applies those operation in that order.
:param args: A named tuple containing the system's hyperparameters.
:param device: The device to run the layer on.
:param normalization: The type of normalization to use 'batch_norm' or 'layer_norm'
:param meta_layer: Whether this layer will require meta-layer capabilities such as meta-batch norm,
meta-conv etc.
:param input_shape: The image input shape in the form (b, c, h, w)
:param num_filters: number of filters for convolutional layer
:param kernel_size: the kernel size of the convolutional layer
:param stride: the stride of the convolutional layer
:param padding: the bias of the convolutional layer
:param use_bias: whether the convolutional layer utilizes a bias
"""
super(MetaConvNormLayerReLU, self).__init__()
self.normalization = normalization
self.use_per_step_bn_statistics = args.per_step_bn_statistics
self.input_shape = input_shape
self.args = args
self.num_filters = num_filters
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.use_bias = use_bias
self.meta_layer = meta_layer
self.no_bn_learnable_params = no_bn_learnable_params
self.device = device
self.layer_dict = nn.ModuleDict()
self.build_block()
def build_block(self):
x = torch.zeros(self.input_shape)
out = x
self.conv = MetaConv2dLayer(in_channels=out.shape[1], out_channels=self.num_filters,
kernel_size=self.kernel_size,
stride=self.stride, padding=self.padding, use_bias=self.use_bias)
out = self.conv(out)
if self.normalization:
if self.args.norm_layer == "batch_norm":
self.norm_layer = MetaBatchNormLayer(out.shape[1], track_running_stats=True,
meta_batch_norm=self.meta_layer,
no_learnable_params=self.no_bn_learnable_params,
device=self.device,
use_per_step_bn_statistics=self.use_per_step_bn_statistics,
args=self.args)
elif self.args.norm_layer == "layer_norm":
self.norm_layer = MetaLayerNormLayer(input_feature_shape=out.shape[1:])
out = self.norm_layer(out, num_step=0)
out = F.leaky_relu(out)
print(out.shape)
def forward(self, x, num_step, params=None, training=False, backup_running_statistics=False):
"""
Forward propagates by applying the function. If params are none then internal params are used.
Otherwise passed params will be used to execute the function.
:param input: input data batch, size either can be any.
:param num_step: The current inner loop step being taken. This is used when we are learning per step params and
collecting per step batch statistics. It indexes the correct object to use for the current time-step
:param params: A dictionary containing 'weight' and 'bias'.
:param training: Whether this is currently the training or evaluation phase.
:param backup_running_statistics: Whether to backup the running statistics. This is used
at evaluation time, when after the pass is complete we want to throw away the collected validation stats.
:return: The result of the batch norm operation.
"""
batch_norm_params = None
conv_params = None
activation_function_pre_params = None
if params is not None:
params = extract_top_level_dict(current_dict=params)
if self.normalization:
if 'norm_layer' in params:
batch_norm_params = params['norm_layer']
if 'activation_function_pre' in params:
activation_function_pre_params = params['activation_function_pre']
conv_params = params['conv']
out = x
out = self.conv(out, params=conv_params)
if self.normalization:
out = self.norm_layer.forward(out, num_step=num_step,
params=batch_norm_params, training=training,
backup_running_statistics=backup_running_statistics)
out = F.leaky_relu(out)
return out
def restore_backup_stats(self):
"""
Restore stored statistics from the backup, replacing the current ones.
"""
if self.normalization:
self.norm_layer.restore_backup_stats()
# + id="qujCuA12uLby" colab_type="code" colab={}
class MetaNormLayerConvReLU(nn.Module):
def __init__(self, input_shape, num_filters, kernel_size, stride, padding, use_bias, args, normalization=True,
meta_layer=True, no_bn_learnable_params=False, device=None):
"""
Initializes a BatchNorm->Conv->ReLU layer which applies those operation in that order.
:param args: A named tuple containing the system's hyperparameters.
:param device: The device to run the layer on.
:param normalization: The type of normalization to use 'batch_norm' or 'layer_norm'
:param meta_layer: Whether this layer will require meta-layer capabilities such as meta-batch norm,
meta-conv etc.
:param input_shape: The image input shape in the form (b, c, h, w)
:param num_filters: number of filters for convolutional layer
:param kernel_size: the kernel size of the convolutional layer
:param stride: the stride of the convolutional layer
:param padding: the bias of the convolutional layer
:param use_bias: whether the convolutional layer utilizes a bias
"""
super(MetaNormLayerConvReLU, self).__init__()
self.normalization = normalization
self.use_per_step_bn_statistics = args.per_step_bn_statistics
self.input_shape = input_shape
self.args = args
self.num_filters = num_filters
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.use_bias = use_bias
self.meta_layer = meta_layer
self.no_bn_learnable_params = no_bn_learnable_params
self.device = device
self.layer_dict = nn.ModuleDict()
self.build_block()
def build_block(self):
x = torch.zeros(self.input_shape)
out = x
if self.normalization:
if self.args.norm_layer == "batch_norm":
self.norm_layer = MetaBatchNormLayer(self.input_shape[1], track_running_stats=True,
meta_batch_norm=self.meta_layer,
no_learnable_params=self.no_bn_learnable_params,
device=self.device,
use_per_step_bn_statistics=self.use_per_step_bn_statistics,
args=self.args)
elif self.args.norm_layer == "layer_norm":
self.norm_layer = MetaLayerNormLayer(input_feature_shape=out.shape[1:])
out = self.norm_layer.forward(out, num_step=0)
self.conv = MetaConv2dLayer(in_channels=out.shape[1], out_channels=self.num_filters,
kernel_size=self.kernel_size,
stride=self.stride, padding=self.padding, use_bias=self.use_bias)
self.layer_dict['activation_function_pre'] = nn.LeakyReLU()
out = self.layer_dict['activation_function_pre'].forward(self.conv.forward(out))
print(out.shape)
def forward(self, x, num_step, params=None, training=False, backup_running_statistics=False):
"""
Forward propagates by applying the function. If params are none then internal params are used.
Otherwise passed params will be used to execute the function.
:param input: input data batch, size either can be any.
:param num_step: The current inner loop step being taken. This is used when we are learning per step params and
collecting per step batch statistics. It indexes the correct object to use for the current time-step
:param params: A dictionary containing 'weight' and 'bias'.
:param training: Whether this is currently the training or evaluation phase.
:param backup_running_statistics: Whether to backup the running statistics. This is used
at evaluation time, when after the pass is complete we want to throw away the collected validation stats.
:return: The result of the batch norm operation.
"""
batch_norm_params = None
if params is not None:
params = extract_top_level_dict(current_dict=params)
if self.normalization:
if 'norm_layer' in params:
batch_norm_params = params['norm_layer']
conv_params = params['conv']
else:
conv_params = None
#print('no inner loop params', self)
out = x
if self.normalization:
out = self.norm_layer.forward(out, num_step=num_step,
params=batch_norm_params, training=training,
backup_running_statistics=backup_running_statistics)
out = self.conv.forward(out, params=conv_params)
out = self.layer_dict['activation_function_pre'].forward(out)
return out
def restore_backup_stats(self):
"""
Restore stored statistics from the backup, replacing the current ones.
"""
if self.normalization:
self.norm_layer.restore_backup_stats()
# + id="LLwmEEAgv5EO" colab_type="code" colab={}
class MetaLinearLayer(nn.Module):
def __init__(self, input_shape, num_filters, use_bias):
"""
A MetaLinear layer. Applies the same functionality of a standard linearlayer with the added functionality of
being able to receive a parameter dictionary at the forward pass which allows the convolution to use external
weights instead of the internal ones stored in the linear layer. Useful for inner loop optimization in the meta
learning setting.
:param input_shape: The shape of the input data, in the form (b, f)
:param num_filters: Number of output filters
:param use_bias: Whether to use biases or not.
"""
super(MetaLinearLayer, self).__init__()
b, c = input_shape
self.use_bias = use_bias
self.weights = nn.Parameter(torch.ones(num_filters, c))
nn.init.xavier_uniform_(self.weights)
if self.use_bias:
self.bias = nn.Parameter(torch.zeros(num_filters))
def forward(self, x, params=None):
"""
Forward propagates by applying a linear function (Wx + b). If params are none then internal params are used.
Otherwise passed params will be used to execute the function.
:param x: Input data batch, in the form (b, f)
:param params: A dictionary containing 'weights' and 'bias'. If params are none then internal params are used.
Otherwise the external are used.
:return: The result of the linear function.
"""
if params is not None:
params = extract_top_level_dict(current_dict=params)
if self.use_bias:
(weight, bias) = params["weights"], params["bias"]
else:
(weight) = params["weights"]
bias = None
else:
pass
#print('no inner loop params', self)
if self.use_bias:
weight, bias = self.weights, self.bias
else:
weight = self.weights
bias = None
# print(x.shape)
out = F.linear(input=x, weight=weight, bias=bias)
return out
# + id="FoWzgA2qtpT9" colab_type="code" colab={}
class VGGReLUNormNetwork(nn.Module):
def __init__(self, im_shape, num_output_classes, args, device, meta_classifier=True):
"""
Builds a multilayer convolutional network. It also provides functionality for passing external parameters
to be
used at inference time. Enables inner loop optimization readily.
:param im_shape: The input image batch shape.
:param num_output_classes: The number of output classes of the network.
:param args: A named tuple containing the system's hyperparameters.
:param device: The device to run this on.
:param meta_classifier: A flag indicating whether the system's meta-learning (inner-loop) functionalities
should
be enabled.
"""
super(VGGReLUNormNetwork, self).__init__()
b, c, self.h, self.w = im_shape
self.device = device
self.total_layers = 0
self.args = args
self.upscale_shapes = []
self.cnn_filters = args.cnn_num_filters
self.input_shape = list(im_shape)
self.num_stages = args.num_stages
self.num_output_classes = num_output_classes
if args.max_pooling:
print("Using max pooling")
self.conv_stride = 1
else:
print("Using strided convolutions")
self.conv_stride = 2
self.meta_classifier = meta_classifier
self.build_network()
print("meta network params")
for name, param in self.named_parameters():
print(name, param.shape)
def build_network(self):
"""
Builds the network before inference is required by creating some dummy inputs with the same input as the
self.im_shape tuple. Then passes that through the network and dynamically computes input shapes and
sets output shapes for each layer.
"""
x = torch.zeros(self.input_shape)
out = x
self.layer_dict = nn.ModuleDict()
self.upscale_shapes.append(x.shape)
for i in range(self.num_stages):
self.layer_dict['conv{}'.format(i)] = MetaConvNormLayerReLU(input_shape=out.shape,
num_filters=self.cnn_filters,
kernel_size=3, stride=self.conv_stride,
padding=self.args.conv_padding,
use_bias=True, args=self.args,
normalization=True,
meta_layer=self.meta_classifier,
no_bn_learnable_params=False,
device=self.device)
out = self.layer_dict['conv{}'.format(i)](out, training=True, num_step=0)
if self.args.max_pooling:
out = F.max_pool2d(input=out, kernel_size=(2, 2), stride=2, padding=0)
if not self.args.max_pooling:
out = F.avg_pool2d(out, out.shape[2])
self.encoder_features_shape = list(out.shape)
out = out.view(out.shape[0], -1)
self.layer_dict['linear'] = MetaLinearLayer(input_shape=(out.shape[0], np.prod(out.shape[1:])),
num_filters=self.num_output_classes, use_bias=True)
out = self.layer_dict['linear'](out)
print("VGGNetwork build", out.shape)
def forward(self, x, num_step, params=None, training=False, backup_running_statistics=False):
"""
Forward propages through the network. If any params are passed then they are used instead of stored params.
:param x: Input image batch.
:param num_step: The current inner loop step number
:param params: If params are None then internal parameters are used. If params are a dictionary with keys the
same as the layer names then they will be used instead.
:param training: Whether this is training (True) or eval time.
:param backup_running_statistics: Whether to backup the running statistics in their backup store. Which is
then used to reset the stats back to a previous state (usually after an eval loop, when we want to throw away stored statistics)
:return: Logits of shape b, num_output_classes.
"""
param_dict = dict()
if params is not None:
param_dict = extract_top_level_dict(current_dict=params)
# print('top network', param_dict.keys())
for name, param in self.layer_dict.named_parameters():
path_bits = name.split(".")
layer_name = path_bits[0]
if layer_name not in param_dict:
param_dict[layer_name] = None
out = x
for i in range(self.num_stages):
out = self.layer_dict['conv{}'.format(i)](out, params=param_dict['conv{}'.format(i)], training=training,
backup_running_statistics=backup_running_statistics,
num_step=num_step)
if self.args.max_pooling:
out = F.max_pool2d(input=out, kernel_size=(2, 2), stride=2, padding=0)
if not self.args.max_pooling:
out = F.avg_pool2d(out, out.shape[2])
out = out.view(out.size(0), -1)
out = self.layer_dict['linear'](out, param_dict['linear'])
return out
def zero_grad(self, params=None):
if params is None:
for param in self.parameters():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
else:
for name, param in params.items():
if param.requires_grad == True:
if param.grad is not None:
if torch.sum(param.grad) > 0:
print(param.grad)
param.grad.zero_()
params[name].grad = None
def restore_backup_stats(self):
"""
Reset stored batch statistics from the stored backup.
"""
for i in range(self.num_stages):
self.layer_dict['conv{}'.format(i)].restore_backup_stats()
# + id="-P0Tewg7iNRF" colab_type="code" colab={}
class GradientDescentLearningRule(nn.Module):
"""Simple (stochastic) gradient descent learning rule.
For a scalar error function `E(p[0], p_[1] ... )` of some set of
potentially multidimensional parameters this attempts to find a local
minimum of the loss function by applying updates to each parameter of the
form
p[i] := p[i] - learning_rate * dE/dp[i]
With `learning_rate` a positive scaling parameter.
The error function used in successive applications of these updates may be
a stochastic estimator of the true error function (e.g. when the error with
respect to only a subset of data-points is calculated) in which case this
will correspond to a stochastic gradient descent learning rule.
"""
def __init__(self, device, learning_rate=1e-3):
"""Creates a new learning rule object.
Args:
learning_rate: A postive scalar to scale gradient updates to the
parameters by. This needs to be carefully set - if too large
the learning dynamic will be unstable and may diverge, while
if set too small learning will proceed very slowly.
"""
super(GradientDescentLearningRule, self).__init__()
assert learning_rate > 0., 'learning_rate should be positive.'
self.learning_rate = torch.ones(1) * learning_rate
self.learning_rate.to(device)
def update_params(self, names_weights_dict, names_grads_wrt_params_dict, num_step, tau=0.9):
"""Applies a single gradient descent update to all parameters.
All parameter updates are performed using in-place operations and so
nothing is returned.
Args:
grads_wrt_params: A list of gradients of the scalar loss function
with respect to each of the parameters passed to `initialise`
previously, with this list expected to be in the same order.
"""
updated_names_weights_dict = dict()
for key in names_weights_dict.keys():
updated_names_weights_dict[key] = names_weights_dict[key] - self.learning_rate * \
names_grads_wrt_params_dict[
key]
return updated_names_weights_dict
# + id="t4ogOl5vqpqM" colab_type="code" colab={}
class LSLRGradientDescentLearningRule(nn.Module):
"""Simple (stochastic) gradient descent learning rule.
For a scalar error function `E(p[0], p_[1] ... )` of some set of
potentially multidimensional parameters this attempts to find a local
minimum of the loss function by applying updates to each parameter of the
form
p[i] := p[i] - learning_rate * dE/dp[i]
With `learning_rate` a positive scaling parameter.
The error function used in successive applications of these updates may be
a stochastic estimator of the true error function (e.g. when the error with
respect to only a subset of data-points is calculated) in which case this
will correspond to a stochastic gradient descent learning rule.
"""
def __init__(self, device, total_num_inner_loop_steps, use_learnable_learning_rates, init_learning_rate=1e-3):
"""Creates a new learning rule object.
Args:
init_learning_rate: A postive scalar to scale gradient updates to the
parameters by. This needs to be carefully set - if too large
the learning dynamic will be unstable and may diverge, while
if set too small learning will proceed very slowly.
"""
super(LSLRGradientDescentLearningRule, self).__init__()
print(init_learning_rate)
assert init_learning_rate > 0., 'learning_rate should be positive.'
self.init_learning_rate = torch.ones(1) * init_learning_rate
self.init_learning_rate.to(device)
self.total_num_inner_loop_steps = total_num_inner_loop_steps
self.use_learnable_learning_rates = use_learnable_learning_rates
def initialise(self, names_weights_dict):
self.names_learning_rates_dict = nn.ParameterDict()
for idx, (key, param) in enumerate(names_weights_dict.items()):
self.names_learning_rates_dict[key.replace(".", "-")] = nn.Parameter(
data=torch.ones(self.total_num_inner_loop_steps + 1) * self.init_learning_rate,
requires_grad=self.use_learnable_learning_rates)
def reset(self):
# for key, param in self.names_learning_rates_dict.items():
# param.fill_(self.init_learning_rate)
pass
def update_params(self, names_weights_dict, names_grads_wrt_params_dict, num_step, tau=0.1):
"""Applies a single gradient descent update to all parameters.
All parameter updates are performed using in-place operations and so
nothing is returned.
Args:
grads_wrt_params: A list of gradients of the scalar loss function
with respect to each of the parameters passed to `initialise`
previously, with this list expected to be in the same order.
"""
updated_names_weights_dict = dict()
for key in names_grads_wrt_params_dict.keys():
updated_names_weights_dict[key] = names_weights_dict[key] - \
self.names_learning_rates_dict[key.replace(".", "-")][num_step] \
* names_grads_wrt_params_dict[
key]
return updated_names_weights_dict
# + id="nJtmTvZ8nqYN" colab_type="code" colab={}
def set_torch_seed(seed):
"""
Sets the pytorch seeds for current experiment run
:param seed: The seed (int)
:return: A random number generator to use
"""
rng = np.random.RandomState(seed=seed)
torch_seed = rng.randint(0, 999999)
torch.manual_seed(seed=torch_seed)
return rng
# + id="iS8_p22sp3hW" colab_type="code" colab={}
class MAMLFewShotClassifier(nn.Module):
def __init__(self, im_shape, device, args):
"""
Initializes a MAML few shot learning system
:param im_shape: The images input size, in batch, c, h, w shape
:param device: The device to use to use the model on.
:param args: A namedtuple of arguments specifying various hyperparameters.
"""
super(MAMLFewShotClassifier, self).__init__()
self.args = args
self.device = device
self.batch_size = args.batch_size
self.use_cuda = args.use_cuda
self.im_shape = im_shape
self.current_epoch = 0
self.rng = set_torch_seed(seed=args.seed)
self.classifier = VGGReLUNormNetwork(im_shape=self.im_shape, num_output_classes=self.args.
num_classes_per_set,
args=args, device=device, meta_classifier=True).to(device=self.device)
self.task_learning_rate = args.task_learning_rate
self.inner_loop_optimizer = LSLRGradientDescentLearningRule(device=device,
init_learning_rate=self.task_learning_rate,
total_num_inner_loop_steps=self.args.number_of_training_steps_per_iter,
use_learnable_learning_rates=self.args.learnable_per_layer_per_step_inner_loop_learning_rate)
self.inner_loop_optimizer.initialise(
names_weights_dict=self.get_inner_loop_parameter_dict(params=self.classifier.named_parameters()))
print("Inner Loop parameters")
for key, value in self.inner_loop_optimizer.named_parameters():
print(key, value.shape)
self.use_cuda = args.use_cuda
self.device = device
self.args = args
self.to(device)
print("Outer Loop parameters")
for name, param in self.named_parameters():
if param.requires_grad:
print(name, param.shape, param.device, param.requires_grad)
self.optimizer = optim.Adam(self.trainable_parameters(), lr=args.meta_learning_rate, amsgrad=False)
self.scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer=self.optimizer, T_max=self.args.total_epochs,
eta_min=self.args.min_learning_rate)
def get_per_step_loss_importance_vector(self):
"""
Generates a tensor of dimensionality (num_inner_loop_steps) indicating the importance of each step's target
loss towards the optimization loss.
:return: A tensor to be used to compute the weighted average of the loss, useful for
the MSL (Multi Step Loss) mechanism.
"""
loss_weights = np.ones(shape=(self.args.number_of_training_steps_per_iter)) * (
1.0 / self.args.number_of_training_steps_per_iter)
decay_rate = 1.0 / self.args.number_of_training_steps_per_iter / self.args.multi_step_loss_num_epochs
min_value_for_non_final_losses = 0.03 / self.args.number_of_training_steps_per_iter
for i in range(len(loss_weights) - 1):
curr_value = np.maximum(loss_weights[i] - (self.current_epoch * decay_rate), min_value_for_non_final_losses)
loss_weights[i] = curr_value
curr_value = np.minimum(
loss_weights[-1] + (self.current_epoch * (self.args.number_of_training_steps_per_iter - 1) * decay_rate),
1.0 - ((self.args.number_of_training_steps_per_iter - 1) * min_value_for_non_final_losses))
loss_weights[-1] = curr_value
loss_weights = torch.Tensor(loss_weights).to(device=self.device)
return loss_weights
def get_inner_loop_parameter_dict(self, params):
"""
Returns a dictionary with the parameters to use for inner loop updates.
:param params: A dictionary of the network's parameters.
:return: A dictionary of the parameters to use for the inner loop optimization process.
"""
param_dict = dict()
for name, param in params:
if param.requires_grad:
if self.args.enable_inner_loop_optimizable_bn_params:
param_dict[name] = param.to(device=self.device)
else:
if "norm_layer" not in name:
param_dict[name] = param.to(device=self.device)
return param_dict
def apply_inner_loop_update(self, loss, names_weights_copy, use_second_order, current_step_idx):
"""
Applies an inner loop update given current step's loss, the weights to update, a flag indicating whether to use
second order derivatives and the current step's index.
:param loss: Current step's loss with respect to the support set.
:param names_weights_copy: A dictionary with names to parameters to update.
:param use_second_order: A boolean flag of whether to use second order derivatives.
:param current_step_idx: Current step's index.
:return: A dictionary with the updated weights (name, param)
"""
self.classifier.zero_grad(names_weights_copy)
grads = torch.autograd.grad(loss, names_weights_copy.values(),
create_graph=use_second_order)
names_grads_wrt_params = dict(zip(names_weights_copy.keys(), grads))
names_weights_copy = self.inner_loop_optimizer.update_params(names_weights_dict=names_weights_copy,
names_grads_wrt_params_dict=names_grads_wrt_params,
num_step=current_step_idx)
return names_weights_copy
def get_across_task_loss_metrics(self, total_losses, total_accuracies):
losses = dict()
losses['loss'] = torch.mean(torch.stack(total_losses))
losses['accuracy'] = np.mean(total_accuracies)
return losses
def forward(self, data_batch, epoch, use_second_order, use_multi_step_loss_optimization, num_steps, training_phase):
"""
Runs a forward outer loop pass on the batch of tasks using the MAML/++ framework.
:param data_batch: A data batch containing the support and target sets.
:param epoch: Current epoch's index
:param use_second_order: A boolean saying whether to use second order derivatives.
:param use_multi_step_loss_optimization: Whether to optimize on the outer loop using just the last step's
target loss (True) or whether to use multi step loss which improves the stability of the system (False)
:param num_steps: Number of inner loop steps.
:param training_phase: Whether this is a training phase (True) or an evaluation phase (False)
:return: A dictionary with the collected losses of the current outer forward propagation.
"""
x_support_set, x_target_set, y_support_set, y_target_set = data_batch
[b, ncs, spc] = y_support_set.shape
self.num_classes_per_set = ncs
total_losses = []
total_accuracies = []
per_task_target_preds = [[] for i in range(len(x_target_set))]
self.classifier.zero_grad()
for task_id, (x_support_set_task, y_support_set_task, x_target_set_task, y_target_set_task) in \
enumerate(zip(x_support_set,
y_support_set,
x_target_set,
y_target_set)):
task_losses = []
task_accuracies = []
per_step_loss_importance_vectors = self.get_per_step_loss_importance_vector()
names_weights_copy = self.get_inner_loop_parameter_dict(self.classifier.named_parameters())
n, s, c, h, w = x_target_set_task.shape
x_support_set_task = x_support_set_task.view(-1, c, h, w)
y_support_set_task = y_support_set_task.view(-1)
x_target_set_task = x_target_set_task.view(-1, c, h, w)
y_target_set_task = y_target_set_task.view(-1)
for num_step in range(num_steps):
support_loss, support_preds = self.net_forward(x=x_support_set_task,
y=y_support_set_task,
weights=names_weights_copy,
backup_running_statistics=
True if (num_step == 0) else False,
training=True, num_step=num_step)
names_weights_copy = self.apply_inner_loop_update(loss=support_loss,
names_weights_copy=names_weights_copy,
use_second_order=use_second_order,
current_step_idx=num_step)
if use_multi_step_loss_optimization and training_phase and epoch < self.args.multi_step_loss_num_epochs:
target_loss, target_preds = self.net_forward(x=x_target_set_task,
y=y_target_set_task, weights=names_weights_copy,
backup_running_statistics=False, training=True,
num_step=num_step)
task_losses.append(per_step_loss_importance_vectors[num_step] * target_loss)
else:
if num_step == (self.args.number_of_training_steps_per_iter - 1):
target_loss, target_preds = self.net_forward(x=x_target_set_task,
y=y_target_set_task, weights=names_weights_copy,
backup_running_statistics=False, training=True,
num_step=num_step)
task_losses.append(target_loss)
per_task_target_preds[task_id] = target_preds.detach().cpu().numpy()
_, predicted = torch.max(target_preds.data, 1)
accuracy = predicted.float().eq(y_target_set_task.data.float()).cpu().float()
task_losses = torch.sum(torch.stack(task_losses))
total_losses.append(task_losses)
total_accuracies.extend(accuracy)
if not training_phase:
self.classifier.restore_backup_stats()
losses = self.get_across_task_loss_metrics(total_losses=total_losses,
total_accuracies=total_accuracies)
for idx, item in enumerate(per_step_loss_importance_vectors):
losses['loss_importance_vector_{}'.format(idx)] = item.detach().cpu().numpy()
return losses, per_task_target_preds
def net_forward(self, x, y, weights, backup_running_statistics, training, num_step):
"""
A base model forward pass on some data points x. Using the parameters in the weights dictionary. Also requires
boolean flags indicating whether to reset the running statistics at the end of the run (if at evaluation phase).
A flag indicating whether this is the training session and an int indicating the current step's number in the
inner loop.
:param x: A data batch of shape b, c, h, w
:param y: A data targets batch of shape b, n_classes
:param weights: A dictionary containing the weights to pass to the network.
:param backup_running_statistics: A flag indicating whether to reset the batch norm running statistics to their
previous values after the run (only for evaluation)
:param training: A flag indicating whether the current process phase is a training or evaluation.
:param num_step: An integer indicating the number of the step in the inner loop.
:return: the crossentropy losses with respect to the given y, the predictions of the base model.
"""
preds = self.classifier.forward(x=x, params=weights,
training=training,
backup_running_statistics=backup_running_statistics, num_step=num_step)
loss = F.cross_entropy(input=preds, target=y)
return loss, preds
def trainable_parameters(self):
"""
Returns an iterator over the trainable parameters of the model.
"""
for param in self.parameters():
if param.requires_grad:
yield param
def train_forward_prop(self, data_batch, epoch):
"""
Runs an outer loop forward prop using the meta-model and base-model.
:param data_batch: A data batch containing the support set and the target set input, output pairs.
:param epoch: The index of the currrent epoch.
:return: A dictionary of losses for the current step.
"""
losses, per_task_target_preds = self.forward(data_batch=data_batch, epoch=epoch,
use_second_order=self.args.second_order and
epoch > self.args.first_order_to_second_order_epoch,
use_multi_step_loss_optimization=self.args.use_multi_step_loss_optimization,
num_steps=self.args.number_of_training_steps_per_iter,
training_phase=True)
return losses, per_task_target_preds
def evaluation_forward_prop(self, data_batch, epoch):
"""
Runs an outer loop evaluation forward prop using the meta-model and base-model.
:param data_batch: A data batch containing the support set and the target set input, output pairs.
:param epoch: The index of the currrent epoch.
:return: A dictionary of losses for the current step.
"""
losses, per_task_target_preds = self.forward(data_batch=data_batch, epoch=epoch, use_second_order=False,
use_multi_step_loss_optimization=True,
num_steps=self.args.number_of_evaluation_steps_per_iter,
training_phase=False)
return losses, per_task_target_preds
def meta_update(self, loss):
"""
Applies an outer loop update on the meta-parameters of the model.
:param loss: The current crossentropy loss.
"""
self.optimizer.zero_grad()
loss.backward()
if 'imagenet' in self.args.dataset_name:
for name, param in self.classifier.named_parameters():
if param.requires_grad:
param.grad.data.clamp_(-10, 10) # not sure if this is necessary, more experiments are needed
self.optimizer.step()
def run_train_iter(self, data_batch, epoch):
"""
Runs an outer loop update step on the meta-model's parameters.
:param data_batch: input data batch containing the support set and target set input, output pairs
:param epoch: the index of the current epoch
:return: The losses of the ran iteration.
"""
epoch = int(epoch)
self.scheduler.step(epoch=epoch)
if self.current_epoch != epoch:
self.current_epoch = epoch
if not self.training:
self.train()
x_support_set, x_target_set, y_support_set, y_target_set = data_batch
x_support_set = torch.Tensor(x_support_set).float().to(device=self.device)
x_target_set = torch.Tensor(x_target_set).float().to(device=self.device)
y_support_set = torch.Tensor(y_support_set).long().to(device=self.device)
y_target_set = torch.Tensor(y_target_set).long().to(device=self.device)
data_batch = (x_support_set, x_target_set, y_support_set, y_target_set)
losses, per_task_target_preds = self.train_forward_prop(data_batch=data_batch, epoch=epoch)
self.meta_update(loss=losses['loss'])
losses['learning_rate'] = self.scheduler.get_lr()[0]
self.optimizer.zero_grad()
self.zero_grad()
return losses, per_task_target_preds
def run_validation_iter(self, data_batch):
"""
Runs an outer loop evaluation step on the meta-model's parameters.
:param data_batch: input data batch containing the support set and target set input, output pairs
:param epoch: the index of the current epoch
:return: The losses of the ran iteration.
"""
if self.training:
self.eval()
x_support_set, x_target_set, y_support_set, y_target_set = data_batch
x_support_set = torch.Tensor(x_support_set).float().to(device=self.device)
x_target_set = torch.Tensor(x_target_set).float().to(device=self.device)
y_support_set = torch.Tensor(y_support_set).long().to(device=self.device)
y_target_set = torch.Tensor(y_target_set).long().to(device=self.device)
data_batch = (x_support_set, x_target_set, y_support_set, y_target_set)
losses, per_task_target_preds = self.evaluation_forward_prop(data_batch=data_batch, epoch=self.current_epoch)
# losses['loss'].backward() # uncomment if you get the weird memory error
# self.zero_grad()
# self.optimizer.zero_grad()
return losses, per_task_target_preds
def save_model(self, model_save_dir, state):
"""
Save the network parameter state and experiment state dictionary.
:param model_save_dir: The directory to store the state at.
:param state: The state containing the experiment state and the network. It's in the form of a dictionary
object.
"""
state['network'] = self.state_dict()
torch.save(state, f=model_save_dir)
def load_model(self, model_save_dir, model_name, model_idx):
"""
Load checkpoint and return the state dictionary containing the network state params and experiment state.
:param model_save_dir: The directory from which to load the files.
:param model_name: The model_name to be loaded from the direcotry.
:param model_idx: The index of the model (i.e. epoch number or 'latest' for the latest saved model of the current
experiment)
:return: A dictionary containing the experiment state and the saved model parameters.
"""
filepath = os.path.join(model_save_dir, "{}_{}".format(model_name, model_idx))
state = torch.load(filepath)
state_dict_loaded = state['network']
self.load_state_dict(state_dict=state_dict_loaded)
return state
# + id="UmUi50wLrYr4" colab_type="code" colab={}
def save_to_json(filename, dict_to_store):
with open(os.path.abspath(filename), 'w') as f:
json.dump(dict_to_store, fp=f)
def load_from_json(filename):
with open(filename, mode="r") as f:
load_dict = json.load(fp=f)
return load_dict
def save_statistics(experiment_name, line_to_add, filename="summary_statistics.csv", create=False):
summary_filename = "{}/{}".format(experiment_name, filename)
if create:
with open(summary_filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(line_to_add)
else:
with open(summary_filename, 'a') as f:
writer = csv.writer(f)
writer.writerow(line_to_add)
return summary_filename
def load_statistics(experiment_name, filename="summary_statistics.csv"):
data_dict = dict()
summary_filename = "{}/{}".format(experiment_name, filename)
with open(summary_filename, 'r') as f:
lines = f.readlines()
data_labels = lines[0].replace("\n", "").split(",")
del lines[0]
for label in data_labels:
data_dict[label] = []
for line in lines:
data = line.replace("\n", "").split(",")
for key, item in zip(data_labels, data):
data_dict[key].append(item)
return data_dict
def build_experiment_folder(experiment_name):
experiment_path = os.path.abspath(experiment_name)
saved_models_filepath = "{}/{}".format(experiment_path, "saved_models")
logs_filepath = "{}/{}".format(experiment_path, "logs")
samples_filepath = "{}/{}".format(experiment_path, "visual_outputs")
if not os.path.exists(experiment_path):
os.makedirs(experiment_path)
if not os.path.exists(logs_filepath):
os.makedirs(logs_filepath)
if not os.path.exists(samples_filepath):
os.makedirs(samples_filepath)
if not os.path.exists(saved_models_filepath):
os.makedirs(saved_models_filepath)
outputs = (saved_models_filepath, logs_filepath, samples_filepath)
outputs = (os.path.abspath(item) for item in outputs)
return outputs
def get_best_validation_model_statistics(experiment_name, filename="summary_statistics.csv"):
"""
Returns the best val epoch and val accuracy from a log csv file
:param log_dir: The log directory the file is saved in
:param statistics_file_name: The log file name
:return: The best validation accuracy and the epoch at which it is produced
"""
log_file_dict = load_statistics(filename=filename, experiment_name=experiment_name)
d_val_loss = np.array(log_file_dict['total_d_val_loss_mean'], dtype=np.float32)
best_d_val_loss = np.min(d_val_loss)
best_d_val_epoch = np.argmin(d_val_loss)
return best_d_val_loss, best_d_val_epoch
def create_json_experiment_log(experiment_log_dir, args, log_name="experiment_log.json"):
summary_filename = "{}/{}".format(experiment_log_dir, log_name)
experiment_summary_dict = dict()
for key, value in vars(args).items():
experiment_summary_dict[key] = value
experiment_summary_dict["epoch_stats"] = dict()
timestamp = datetime.datetime.now().timestamp()
experiment_summary_dict["experiment_status"] = [(timestamp, "initialization")]
experiment_summary_dict["experiment_initialization_time"] = timestamp
with open(os.path.abspath(summary_filename), 'w') as f:
json.dump(experiment_summary_dict, fp=f)
def update_json_experiment_log_dict(key, value, experiment_log_dir, log_name="experiment_log.json"):
summary_filename = "{}/{}".format(experiment_log_dir, log_name)
with open(summary_filename) as f:
summary_dict = json.load(fp=f)
summary_dict[key].append(value)
with open(summary_filename, 'w') as f:
json.dump(summary_dict, fp=f)
def change_json_log_experiment_status(experiment_status, experiment_log_dir, log_name="experiment_log.json"):
timestamp = datetime.datetime.now().timestamp()
experiment_status = (timestamp, experiment_status)
update_json_experiment_log_dict(key="experiment_status", value=experiment_status,
experiment_log_dir=experiment_log_dir, log_name=log_name)
def update_json_experiment_log_epoch_stats(epoch_stats, experiment_log_dir, log_name="experiment_log.json"):
summary_filename = "{}/{}".format(experiment_log_dir, log_name)
with open(summary_filename) as f:
summary_dict = json.load(fp=f)
epoch_stats_dict = summary_dict["epoch_stats"]
for key in epoch_stats.keys():
entry = float(epoch_stats[key])
if key in epoch_stats_dict:
epoch_stats_dict[key].append(entry)
else:
epoch_stats_dict[key] = [entry]
summary_dict['epoch_stats'] = epoch_stats_dict
with open(summary_filename, 'w') as f:
json.dump(summary_dict, fp=f)
return summary_filename
# + id="xCz8qhr0w2g3" colab_type="code" colab={}
class ExperimentBuilder(object):
def __init__(self, args, data, model, device):
"""
Initializes an experiment builder using a named tuple (args), a data provider (data), a meta learning system
(model) and a device (e.g. gpu/cpu/n)
:param args: A namedtuple containing all experiment hyperparameters
:param data: A data provider of instance MetaLearningSystemDataLoader
:param model: A meta learning system instance
:param device: Device/s to use for the experiment
"""
self.args, self.device = args, device
self.model = model
self.saved_models_filepath, self.logs_filepath, self.samples_filepath = build_experiment_folder(
experiment_name=self.args.experiment_name)
self.total_losses = dict()
self.state = dict()
self.state['best_val_acc'] = 0.
self.state['best_val_iter'] = 0
self.state['current_iter'] = 0
self.state['current_iter'] = 0
self.start_epoch = 0
self.max_models_to_save = self.args.max_models_to_save
self.create_summary_csv = False
if self.args.continue_from_epoch == 'from_scratch':
self.create_summary_csv = True
elif self.args.continue_from_epoch == 'latest':
checkpoint = os.path.join(self.saved_models_filepath, "train_model_latest")
print("attempting to find existing checkpoint", )
if os.path.exists(checkpoint):
self.state = \
self.model.load_model(model_save_dir=self.saved_models_filepath, model_name="train_model",
model_idx='latest')
self.start_epoch = int(self.state['current_iter'] / self.args.total_iter_per_epoch)
else:
self.args.continue_from_epoch = 'from_scratch'
self.create_summary_csv = True
elif int(self.args.continue_from_epoch) >= 0:
self.state = \
self.model.load_model(model_save_dir=self.saved_models_filepath, model_name="train_model",
model_idx=self.args.continue_from_epoch)
self.start_epoch = int(self.state['current_iter'] / self.args.total_iter_per_epoch)
self.data = data(args=args, current_iter=self.state['current_iter'])
print("train_seed {}, val_seed: {}, at start time".format(self.data.dataset.seed["train"],
self.data.dataset.seed["val"]))
self.total_epochs_before_pause = self.args.total_epochs_before_pause
self.state['best_epoch'] = int(self.state['best_val_iter'] / self.args.total_iter_per_epoch)
self.epoch = int(self.state['current_iter'] / self.args.total_iter_per_epoch)
self.augment_flag = True if 'omniglot' in self.args.dataset_name.lower() else False
self.start_time = time.time()
self.epochs_done_in_this_run = 0
print(self.state['current_iter'], int(self.args.total_iter_per_epoch * self.args.total_epochs))
def build_summary_dict(self, total_losses, phase, summary_losses=None):
"""
Builds/Updates a summary dict directly from the metric dict of the current iteration.
:param total_losses: Current dict with total losses (not aggregations) from experiment
:param phase: Current training phase
:param summary_losses: Current summarised (aggregated/summarised) losses stats means, stdv etc.
:return: A new summary dict with the updated summary statistics information.
"""
if summary_losses is None:
summary_losses = dict()
for key in total_losses:
summary_losses["{}_{}_mean".format(phase, key)] = np.mean(total_losses[key])
summary_losses["{}_{}_std".format(phase, key)] = np.std(total_losses[key])
return summary_losses
def build_loss_summary_string(self, summary_losses):
"""
Builds a progress bar summary string given current summary losses dictionary
:param summary_losses: Current summary statistics
:return: A summary string ready to be shown to humans.
"""
output_update = ""
for key, value in zip(list(summary_losses.keys()), list(summary_losses.values())):
if "loss" in key or "accuracy" in key:
value = float(value)
output_update += "{}: {:.4f}, ".format(key, value)
return output_update
def merge_two_dicts(self, first_dict, second_dict):
"""Given two dicts, merge them into a new dict as a shallow copy."""
z = first_dict.copy()
z.update(second_dict)
return z
def train_iteration(self, train_sample, sample_idx, epoch_idx, total_losses, current_iter, pbar_train):
"""
Runs a training iteration, updates the progress bar and returns the total and current epoch train losses.
:param train_sample: A sample from the data provider
:param sample_idx: The index of the incoming sample, in relation to the current training run.
:param epoch_idx: The epoch index.
:param total_losses: The current total losses dictionary to be updated.
:param current_iter: The current training iteration in relation to the whole experiment.
:param pbar_train: The progress bar of the training.
:return: Updates total_losses, train_losses, current_iter
"""
x_support_set, x_target_set, y_support_set, y_target_set, seed = train_sample
data_batch = (x_support_set, x_target_set, y_support_set, y_target_set)
if sample_idx == 0:
print("shape of data", x_support_set.shape, x_target_set.shape, y_support_set.shape,
y_target_set.shape)
losses, _ = self.model.run_train_iter(data_batch=data_batch, epoch=epoch_idx)
for key, value in zip(list(losses.keys()), list(losses.values())):
if key not in total_losses:
total_losses[key] = [float(value)]
else:
total_losses[key].append(float(value))
train_losses = self.build_summary_dict(total_losses=total_losses, phase="train")
train_output_update = self.build_loss_summary_string(losses)
pbar_train.update(1)
pbar_train.set_description("training phase {} -> {}".format(self.epoch, train_output_update))
current_iter += 1
return train_losses, total_losses, current_iter
def evaluation_iteration(self, val_sample, total_losses, pbar_val, phase):
"""
Runs a validation iteration, updates the progress bar and returns the total and current epoch val losses.
:param val_sample: A sample from the data provider
:param total_losses: The current total losses dictionary to be updated.
:param pbar_val: The progress bar of the val stage.
:return: The updated val_losses, total_losses
"""
x_support_set, x_target_set, y_support_set, y_target_set, seed = val_sample
data_batch = (
x_support_set, x_target_set, y_support_set, y_target_set)
losses, _ = self.model.run_validation_iter(data_batch=data_batch)
for key, value in zip(list(losses.keys()), list(losses.values())):
if key not in total_losses:
total_losses[key] = [float(value)]
else:
total_losses[key].append(float(value))
val_losses = self.build_summary_dict(total_losses=total_losses, phase=phase)
val_output_update = self.build_loss_summary_string(losses)
pbar_val.update(1)
pbar_val.set_description(
"val_phase {} -> {}".format(self.epoch, val_output_update))
return val_losses, total_losses
def test_evaluation_iteration(self, val_sample, model_idx, sample_idx, per_model_per_batch_preds, pbar_test):
"""
Runs a validation iteration, updates the progress bar and returns the total and current epoch val losses.
:param val_sample: A sample from the data provider
:param total_losses: The current total losses dictionary to be updated.
:param pbar_test: The progress bar of the val stage.
:return: The updated val_losses, total_losses
"""
x_support_set, x_target_set, y_support_set, y_target_set, seed = val_sample
data_batch = (
x_support_set, x_target_set, y_support_set, y_target_set)
losses, per_task_preds = self.model.run_validation_iter(data_batch=data_batch)
per_model_per_batch_preds[model_idx].extend(list(per_task_preds))
test_output_update = self.build_loss_summary_string(losses)
pbar_test.update(1)
pbar_test.set_description(
"test_phase {} -> {}".format(self.epoch, test_output_update))
return per_model_per_batch_preds
def save_models(self, model, epoch, state):
"""
Saves two separate instances of the current model. One to be kept for history and reloading later and another
one marked as "latest" to be used by the system for the next epoch training. Useful when the training/val
process is interrupted or stopped. Leads to fault tolerant training and validation systems that can continue
from where they left off before.
:param model: Current meta learning model of any instance within the few_shot_learning_system.py
:param epoch: Current epoch
:param state: Current model and experiment state dict.
"""
model.save_model(model_save_dir=os.path.join(self.saved_models_filepath, "train_model_{}".format(int(epoch))),
state=state)
model.save_model(model_save_dir=os.path.join(self.saved_models_filepath, "train_model_latest"),
state=state)
print("saved models to", self.saved_models_filepath)
def pack_and_save_metrics(self, start_time, create_summary_csv, train_losses, val_losses, state, step):
"""
Given current epochs start_time, train losses, val losses and whether to create a new stats csv file, pack stats
and save into a statistics csv file. Return a new start time for the new epoch.
:param start_time: The start time of the current epoch
:param create_summary_csv: A boolean variable indicating whether to create a new statistics file or
append results to existing one
:param train_losses: A dictionary with the current train losses
:param val_losses: A dictionary with the currrent val loss
:return: The current time, to be used for the next epoch.
"""
epoch_summary_losses = self.merge_two_dicts(first_dict=train_losses, second_dict=val_losses)
if 'per_epoch_statistics' not in state:
state['per_epoch_statistics'] = dict()
for key, value in epoch_summary_losses.items():
if key not in state['per_epoch_statistics']:
state['per_epoch_statistics'][key] = [value]
else:
state['per_epoch_statistics'][key].append(value)
epoch_summary_string = self.build_loss_summary_string(epoch_summary_losses)
epoch_summary_losses["epoch"] = self.epoch
epoch_summary_losses['epoch_run_time'] = time.time() - start_time
if create_summary_csv:
self.summary_statistics_filepath = save_statistics(self.logs_filepath, list(epoch_summary_losses.keys()),
create=True)
self.create_summary_csv = False
start_time = time.time()
writer.add_scalar('epoch_summary_losses', epoch_summary_losses["epoch"], step)
print("epoch {} -> {}".format(epoch_summary_losses["epoch"], epoch_summary_string))
self.summary_statistics_filepath = save_statistics(self.logs_filepath,
list(epoch_summary_losses.values()))
return start_time, state
def evaluated_test_set_using_the_best_models(self, top_n_models):
per_epoch_statistics = self.state['per_epoch_statistics']
val_acc = np.copy(per_epoch_statistics['val_accuracy_mean'])
val_idx = np.array([i for i in range(len(val_acc))])
sorted_idx = np.argsort(val_acc, axis=0).astype(dtype=np.int32)[::-1][:top_n_models]
sorted_val_acc = val_acc[sorted_idx]
val_idx = val_idx[sorted_idx]
print(sorted_idx)
print(sorted_val_acc)
top_n_idx = val_idx[:top_n_models]
per_model_per_batch_preds = [[] for i in range(top_n_models)]
per_model_per_batch_targets = [[] for i in range(top_n_models)]
test_losses = [dict() for i in range(top_n_models)]
for idx, model_idx in enumerate(top_n_idx):
self.state = \
self.model.load_model(model_save_dir=self.saved_models_filepath, model_name="train_model",
model_idx=model_idx + 1)
with tqdm.tqdm(total=int(self.args.num_evaluation_tasks / self.args.batch_size)) as pbar_test:
for sample_idx, test_sample in enumerate(
self.data.get_test_batches(total_batches=int(self.args.num_evaluation_tasks / self.args.batch_size),
augment_images=False)):
#print(test_sample[4])
per_model_per_batch_targets[idx].extend(np.array(test_sample[3]))
per_model_per_batch_preds = self.test_evaluation_iteration(val_sample=test_sample,
sample_idx=sample_idx,
model_idx=idx,
per_model_per_batch_preds=per_model_per_batch_preds,
pbar_test=pbar_test)
# for i in range(top_n_models):
# print("test assertion", 0)
# print(per_model_per_batch_targets[0], per_model_per_batch_targets[i])
# assert np.equal(np.array(per_model_per_batch_targets[0]), np.array(per_model_per_batch_targets[i]))
per_batch_preds = np.mean(per_model_per_batch_preds, axis=0)
#print(per_batch_preds.shape)
per_batch_max = np.argmax(per_batch_preds, axis=2)
per_batch_targets = np.array(per_model_per_batch_targets[0]).reshape(per_batch_max.shape)
#print(per_batch_max)
accuracy = np.mean(np.equal(per_batch_targets, per_batch_max))
accuracy_std = np.std(np.equal(per_batch_targets, per_batch_max))
test_losses = {"test_accuracy_mean": accuracy, "test_accuracy_std": accuracy_std}
_ = save_statistics(self.logs_filepath,
list(test_losses.keys()),
create=True, filename="test_summary.csv")
summary_statistics_filepath = save_statistics(self.logs_filepath,
list(test_losses.values()),
create=False, filename="test_summary.csv")
print(test_losses)
print("saved test performance at", summary_statistics_filepath)
def run_experiment(self):
"""
Runs a full training experiment with evaluations of the model on the val set at every epoch. Furthermore,
will return the test set evaluation results on the best performing validation model.
"""
with tqdm.tqdm(initial=self.state['current_iter'],
total=int(self.args.total_iter_per_epoch * self.args.total_epochs)) as pbar_train:
while (self.state['current_iter'] < (self.args.total_epochs * self.args.total_iter_per_epoch)) and (self.args.evaluate_on_test_set_only == False):
for train_sample_idx, train_sample in enumerate(
self.data.get_train_batches(total_batches=int(self.args.total_iter_per_epoch *
self.args.total_epochs) - self.state[
'current_iter'],
augment_images=self.augment_flag)):
# print(self.state['current_iter'], (self.args.total_epochs * self.args.total_iter_per_epoch))
train_losses, total_losses, self.state['current_iter'] = self.train_iteration(
train_sample=train_sample,
total_losses=self.total_losses,
epoch_idx=(self.state['current_iter'] /
self.args.total_iter_per_epoch),
pbar_train=pbar_train,
current_iter=self.state['current_iter'],
sample_idx=self.state['current_iter'])
if self.state['current_iter'] % self.args.total_iter_per_epoch == 0:
total_losses = dict()
val_losses = dict()
with tqdm.tqdm(total=int(self.args.num_evaluation_tasks / self.args.batch_size)) as pbar_val:
for _, val_sample in enumerate(
self.data.get_val_batches(total_batches=int(self.args.num_evaluation_tasks / self.args.batch_size),
augment_images=False)):
val_losses, total_losses = self.evaluation_iteration(val_sample=val_sample,
total_losses=total_losses,
pbar_val=pbar_val, phase='val')
if val_losses["val_accuracy_mean"] > self.state['best_val_acc']:
print("Best validation accuracy", val_losses["val_accuracy_mean"])
writer.add_scalar("best validation accuracy", val_losses["val_accuracy_mean"],
self.epoch)
self.state['best_val_acc'] = val_losses["val_accuracy_mean"]
self.state['best_val_iter'] = self.state['current_iter']
self.state['best_epoch'] = int(
self.state['best_val_iter'] / self.args.total_iter_per_epoch)
self.epoch += 1
self.state = self.merge_two_dicts(first_dict=self.merge_two_dicts(first_dict=self.state,
second_dict=train_losses),
second_dict=val_losses)
self.save_models(model=self.model, epoch=self.epoch, state=self.state)
self.start_time, self.state = self.pack_and_save_metrics(start_time=self.start_time,
create_summary_csv=self.create_summary_csv,
train_losses=train_losses,
val_losses=val_losses,
state=self.state,
step=self.epoch)
self.total_losses = dict()
self.epochs_done_in_this_run += 1
save_to_json(filename=os.path.join(self.logs_filepath, "summary_statistics.json"),
dict_to_store=self.state['per_epoch_statistics'])
if self.epochs_done_in_this_run >= self.total_epochs_before_pause:
print("train_seed {}, val_seed: {}, at pause time".format(self.data.dataset.seed["train"],
self.data.dataset.seed["val"]))
sys.exit()
self.evaluated_test_set_using_the_best_models(top_n_models=5)
# + id="ClIPkzMEXvKS" colab_type="code" colab={}
from torch import cuda
def get_args():
import argparse
import os
import torch
import json
parser = argparse.ArgumentParser(description='Welcome to the MAML++ training and inference system')
parser.add_argument('--batch_size', nargs="?", type=int, default=32, help='Batch_size for experiment')
parser.add_argument('--image_height', nargs="?", type=int, default=28)
parser.add_argument('--image_width', nargs="?", type=int, default=28)
parser.add_argument('--image_channels', nargs="?", type=int, default=1)
parser.add_argument('--reset_stored_filepaths', type=str, default="False")
parser.add_argument('--reverse_channels', type=str, default="False")
parser.add_argument('--num_of_gpus', type=int, default=1)
parser.add_argument('--indexes_of_folders_indicating_class', nargs='+', default=[-2, -3])
parser.add_argument('--train_val_test_split', nargs='+', default=[0.73982737361, 0.26, 0.13008631319])
parser.add_argument('--samples_per_iter', nargs="?", type=int, default=1)
parser.add_argument('--labels_as_int', type=str, default="False")
parser.add_argument('--seed', type=int, default=104)
parser.add_argument('--gpu_to_use', type=int)
parser.add_argument('--num_dataprovider_workers', nargs="?", type=int, default=4)
parser.add_argument('--max_models_to_save', nargs="?", type=int, default=5)
parser.add_argument('--dataset_name', type=str, default="omniglot_dataset")
parser.add_argument('--dataset_path', type=str, default="datasets/omniglot_dataset")
parser.add_argument('--reset_stored_paths', type=str, default="False")
parser.add_argument('--experiment_name', nargs="?", type=str, )
parser.add_argument('--architecture_name', nargs="?", type=str)
parser.add_argument('--continue_from_epoch', nargs="?", type=str, default='latest', help='Continue from checkpoint of epoch')
parser.add_argument('--dropout_rate_value', type=float, default=0.3, help='Dropout_rate_value')
parser.add_argument('--num_target_samples', type=int, default=15, help='Dropout_rate_value')
parser.add_argument('--second_order', type=str, default="False", help='Dropout_rate_value')
parser.add_argument('--total_epochs', type=int, default=200, help='Number of epochs per experiment')
parser.add_argument('--total_iter_per_epoch', type=int, default=500, help='Number of iters per epoch')
parser.add_argument('--min_learning_rate', type=float, default=0.00001, help='Min learning rate')
parser.add_argument('--meta_learning_rate', type=float, default=0.001, help='Learning rate of overall MAML system')
parser.add_argument('--meta_opt_bn', type=str, default="False")
parser.add_argument('--task_learning_rate', type=float, default=0.1, help='Learning rate per task gradient step')
parser.add_argument('--norm_layer', type=str, default="batch_norm")
parser.add_argument('--max_pooling', type=str, default="False")
parser.add_argument('--per_step_bn_statistics', type=str, default="False")
parser.add_argument('--num_classes_per_set', type=int, default=20, help='Number of classes to sample per set')
parser.add_argument('--cnn_num_blocks', type=int, default=4, help='Number of classes to sample per set')
parser.add_argument('--number_of_training_steps_per_iter', type=int, default=1, help='Number of classes to sample per set')
parser.add_argument('--number_of_evaluation_steps_per_iter', type=int, default=1, help='Number of classes to sample per set')
parser.add_argument('--cnn_num_filters', type=int, default=64, help='Number of classes to sample per set')
parser.add_argument('--cnn_blocks_per_stage', type=int, default=1,
help='Number of classes to sample per set')
parser.add_argument('--num_samples_per_class', type=int, default=1, help='Number of samples per set to sample')
parser.add_argument('--name_of_args_json_file', type=str, default="./experiment_config/omniglot_maml++-omniglot_1_8_0.1_64_5_0.json")
# parser.add_argument('--num_stages', default=4)
# parser.add_argument('--conv_padding', default=True)
args = parser.parse_args('')
args_dict = vars(args)
if args.name_of_args_json_file is not "None":
args_dict = extract_args_from_json(args.name_of_args_json_file, args_dict)
for key in list(args_dict.keys()):
if str(args_dict[key]).lower() == "true":
args_dict[key] = True
elif str(args_dict[key]).lower() == "false":
args_dict[key] = False
if key == "dataset_path":
args_dict[key] = os.path.join(os.environ['DATASET_DIR'], args_dict[key])
print(key, os.path.join(os.environ['DATASET_DIR'], args_dict[key]))
print(key, args_dict[key], type(args_dict[key]))
args = Bunch(args_dict)
args.use_cuda = torch.cuda.is_available()
if args.gpu_to_use == -1:
args.use_cuda = False
if args.use_cuda:
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_to_use)
device = cuda.current_device()
else:
device = torch.device('cpu')
return args, device
class Bunch(object):
def __init__(self, adict):
self.__dict__.update(adict)
def extract_args_from_json(json_file_path, args_dict):
import json
summary_filename = json_file_path
with open(summary_filename) as f:
summary_dict = json.load(fp=f)
for key in summary_dict.keys():
if "continue_from" in key:
pass
elif "gpu_to_use" in key:
pass
else:
args_dict[key] = summary_dict[key]
return args_dict
# + id="GmWqo4GQ66oG" colab_type="code" outputId="bf944b05-3d91-4b35-bd3e-506d94ab78e5" colab={"base_uri": "https://localhost:8080/", "height": 2675}
# Combines the arguments, model, data and experiment builders to run an experiment
args, device = get_args()
print(args.image_channels)
model = MAMLFewShotClassifier(args=args, device=device,
im_shape=(2, args.image_channels, args.image_height, args.image_width))
# maybe_unzip_dataset(args=args)
data = MetaLearningSystemDataLoader
maml_system = ExperimentBuilder(model=model, data=data, args=args, device=device)
maml_system.run_experiment()
# + id="gMlXOtEcMl8B" colab_type="code" colab={}
| maml++.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.12 64-bit (''minerl'': conda)'
# name: python3612jvsc74a57bd0861f5c9d309eea441f39908c944bb9e7b23b75b8ced426f8c48b5d0e22d31433
# ---
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
# +
import torch
import numpy as np
import matplotlib.pylab as plt
from os.path import join
from pathlib import Path
from models.VQVAE import VectorQuantizerEMA, Encoder, Decoder
import pytorch_lightning as pl
from torchvision.utils import make_grid
from customLoader import CustomMinecraftData
from torchvision.transforms import transforms
# -
class VQVAE(pl.LightningModule):
def __init__(self, num_hiddens, num_residual_layers, num_residual_hiddens,
num_embeddings, embedding_dim, commitment_cost, decay=0,
batch_size=256, lr=0.001, split=0.95, img_size=64):
super(VQVAE, self).__init__()
self.batch_size = batch_size
self.lr = lr
self.split = split
self._encoder = Encoder(3, num_hiddens,
num_residual_layers,
num_residual_hiddens)
# self._pre_vq_conv = nn.Conv2d(in_channels=num_hiddens,
# out_channels=embedding_dim,
# kernel_size=1,
# stride=1)
if decay > 0.0:
self._vq_vae = VectorQuantizerEMA(num_embeddings, embedding_dim,
commitment_cost, decay)
else:
self._vq_vae = VectorQuantizer(num_embeddings, embedding_dim,
commitment_cost)
self._decoder = Decoder(num_hiddens,
num_hiddens,
num_residual_layers,
num_residual_hiddens)
self.example_input_array = torch.rand(batch_size, 3, img_size, img_size)
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (1.0,1.0,1.0))
])
def forward(self, x):
z = self._encoder(x)
# z = self._pre_vq_conv(z)
loss, quantized, perplexity, _ = self._vq_vae(z)
x_recon = self._decoder(quantized)
return loss, x_recon, perplexity
def training_step(self, batch, batch_idx):
vq_loss, data_recon, perplexity = self(batch)
recon_error = F.mse_loss(data_recon, batch)
loss = recon_error + vq_loss
self.log('loss/train', loss, on_step=False, on_epoch=True)
self.log('perplexity/train', perplexity, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
vq_loss, data_recon, perplexity = self(batch)
recon_error = F.mse_loss(data_recon, batch)
loss = recon_error + vq_loss
self.log('loss/val', loss, on_step=False, on_epoch=True)
self.log('perplexity/val', perplexity, on_step=False, on_epoch=True)
if batch_idx == 0:
grid = make_grid(data_recon[:64].cpu().data)
grid = grid.permute(1,2,0)
self.logger.experiment.log({"Images": [wandb.Image(grid.numpy())]})
return loss
def configure_optimizers(self):
return torch.optim.Adam(params=self.parameters(), lr=self.lr, weight_decay=1e-5)
def train_dataloader(self):
train_dataset = CustomMinecraftData('CustomTrajectories1', 'train', self.split, transform=self.transform)
train_dataloader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=2)
return train_dataloader
def val_dataloader(self):
val_dataset = CustomMinecraftData('CustomTrajectories1', 'val', self.split, transform=self.transform)
val_dataloader = DataLoader(val_dataset, batch_size=self.batch_size, shuffle=False, num_workers=2)
return val_dataloader
def get_centroids(self, idx):
z_idx = torch.tensor(idx).cuda()
embeddings = torch.index_select(self._vq_vae._embedding.weight.detach(), dim=0, index=z_idx)
embeddings = embeddings.view((1,2,2,64))
embeddings = embeddings.permute(0, 3, 1, 2).contiguous()
return self._decoder(embeddings)
def save_encoding_indices(self, x):
z = self._encoder(x)
z = self._pre_vq_conv(z)
_, _, _, encoding_indices = self._vq_vae(z)
return encoding_indices
conf = {
'split': 0.95,
'lr': 0.001,
'batch_size': 256,
'num_hiddens': 64,
'num_residual_hiddens': 32,
'num_residual_layers': 2,
'embedding_dim': 256,
'num_embeddings': 10,
'commitment_cost': 0.25,
'decay': 0.99
}
vqvae = VQVAE(**conf).cuda()
vqvae.eval()
vqvae._vq_vae._embedding.weight
# +
path = '../results/vqvae_0.2/mineRL/y77fc26u/checkpoints/epoch=808-step=61483.ckpt'
path = '../results/vqvae_0.1/mineRL/2wgoga4p/checkpoints/epoch=833-step=62549.ckpt'
path = '../results/vqvae_0.3/mineRL/1c4o6jgy/checkpoints/epoch=499-step=37999.ckpt'
path = '../results/vqvae_2.0/mineRL/kbsmulhw/checkpoints/epoch=49-step=4499.ckpt'
path = '../results/vqvae_3.1/mineRL/8ykl37mx/checkpoints/epoch=59-step=5399.ckpt'
path = '../results/vqvae_3.0/mineRL/2vm6qyu3/checkpoints/epoch=59-step=5399.ckpt'
path = '../results/vqvae_sweep_0/mineRL/8bbcrmgb/checkpoints/epoch=34-step=3149.ckpt'
path = '../results/vqvae_CW0_realistic_pixels.0/mineRL/nqxebkf7/checkpoints/epoch=39-step=4999.ckpt'
path = '../results/vqvae_exp_1/mineRL/mlq3n3be/checkpoints/epoch=39-step=11679.ckpt'
path = '../results/vqvae_CW0-4_realistic_pixels.0/mineRL/1bvif2wt/checkpoints/epoch=13-step=6839.ckpt'
checkpoint = torch.load(path)
# -
vqvae.load_state_dict(checkpoint['state_dict'])
vqvae._vq_vae._embedding.weight
for i in range(10):
out = vqvae.get_centroids(i)
img = out.squeeze().permute(1,2,0).detach().cpu().numpy()
img = img + 0.5
img[img>1] = 1
plt.imshow(img)
plt.show()
#plt.imsave(f"../goal_states/sweep_vqvae/centroid_{i}.png", img)
fig, axes = plt.subplots(ncols=10, figsize=(18,10))
for i, ax in enumerate(axes):
out = vqvae.get_centroids(i)
img = out.squeeze().permute(1,2,0).detach().cpu().numpy()
img = img + 0.5
img[img>1] = 1
ax.imshow(img)
ax.axis('off')
#plt.show()
plt.savefig(f'/home/juanjo/Pictures/Minecraft/centroides/vqvae_CW0-4_realistic_pixels.0.png', transparent=True)
| src/jupyter/VQVAE_Centroids.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Examen Extraordinario
# # Fundamentos de la Informática
#
# # Tiempo: 2h15
# ## *Rellena la siguiente información*:
#
# ### Nombre completo:
# ### Grupo:
# **NOTA. Al terminar, el estudiante debe subir el notebook en formato PDF y en formato IPYNB al Moodle en este orden (a la sección con nombre "EXAMEN EXTRAORDINARIO").**
# **Para convertir el notebook a formato PDF pulsar Ctrl+P y seleccionar "Salvar a PDF" en el desplegable de selección de impresoras**
# ## Ej1. Diccionarios (2 ptos)
#
# Utiliza el diccionario definido en la siguiente celda y la teoría de bucles de Python para generar la siguiente salida:
#
# In spain with capital madrid there is a population of 46.78 millons
# In france with capital paris there is a population of 66.03 millons
# In germany with capital berlin there is a population of 80.62 millons
# In norway with capital oslo there is a population of 5.08 millons
# In italy with capital rome there is a population of 60.37 millons
# In grece with capital athens there is a population of 10.72 millons
# In austria with capital vienna there is a population of 8.86 millons
# In slovakia with capital bratislava there is a population of 5.45 millons
# The average population of those countries is: 35489375 people
europe = { 'spain': { 'capital':'madrid', 'population':46.777 },
'france': { 'capital':'paris', 'population':66.033 },
'germany': { 'capital':'berlin', 'population':80.622 },
'norway': { 'capital':'oslo', 'population':5.084 },
'italy': { 'capital':'rome', 'population':60.365 },
'grece': { 'capital':'athens', 'population':10.721 },
'austria': { 'capital':'vienna', 'population':8.859 },
'slovakia': { 'capital':'bratislava', 'population':5.454 }
}
# ### Solución:
mean=[]
for k,v in europe.items():
print("In %s with capital %s there is a population of %.2f millons" % (k, v["capital"], v["population"]))
mean.append(v["population"])
print("The average population of those countries is: %.0f people" % (sum(mean)*1000000/len(europe)))
# ## Ej2. Leer Ficheros (2 ptos)
#
# El fichero "path.csv" contiene la información del movimiento de un vehículo. Este vehículo solo puede moverse en tramos rectos, aunque entre tramo y tramo puede cambiar de dirección. El fichero contine tres columnas. Una primera llamada **"step"** que indica el orden de cada tramo, una segunda llamada **"direction"** que indica la dirección en la que se mueve un vehículo en el tramo correspondiente y una tercera llamada **"length"** que indica la distancia recorrida en ese tramo. Por ejemplo:
#
# step|direction|length
# -|-|-
# 1|N|1.3
# 2|SE|2
# 3|E|0.5
# 4|S|1
#
# El fichero anterior implicaría un movimiento en 4 tramos que se inicia con una trayectoria de dirección norte y de longitud 1.3 seguida de una nueva trayectoria de dirección sudeste de longitud 2, a continuación una nueva trayectoria de dirección este y longitud 0.5 y por último, un último tramo de dirección sur y longitud 1.
#
# Generar un código Python que sea capaz de leer el fichero "path.csv" y generar una lista de tuplas que tenga este aspecto y que funcione para cualquier longitud del fichero:
#
# path=[("N",1.3),("SE",2),("E",0.5),("S",1)]
# +
path=[]
with open("path.csv", "r") as f:
header= f.readline().split(";")
for row in f:
step, direction, length =row.split(";")
path.append((direction, float( length.replace('\n','') )))
path
# -
# ## Ej3. Plot Path (2 Ptos)
#
# Un vehículo se mueve a partir de una secuencia de direcciones como esta:
#
# path=[('E', 1.0), ('SE', 2), ('NE', 0.5), ('N', 2.0), ('SE', 1.0), ('E', 2.0), ('E', 0.5),
# ('NW', 3.0), ('NW', 1.0), ('SW', 1.0), ('E', 1.0), ('NW', 1.0), ('NW', 2.0), ('E', 3.0),
# ('E', 1.5), ('S', 1.0), ('SE', 3.0), ('W', 1.0), ('S', 1.0), ('E', 1.0), ('SW', 1.0), ('SW', 1.0)]
#
# Y desde un punto de origen como este:
#
# origen=(0,0)
#
# Realizar un código Python que dibuje la trayectoria de ese vehículo. Para ello, habrá que ir generando dos vectores, uno para las X's y otro para las Y's para cada nuevo paso de la trayectoria. Inicialmente los vectores X e Y sólo contienen el valor del punto de origen.
#
# $X = [0]$
# $Y = [0]$
#
# La coordenada siguiente tendrá que actualizarse en función de la dirección de la variable **"path"**. Por ejemplo, si el origen de coordenadas es (0,0) y el primer paso indica la dirección "E" y un desplazamiento de longitud "1.0", habrá que actualizar los vectores X e Y de manera que nos movamos esa distancia hacia el Este:
#
# $X = [0,1]$
# $Y = [0,0]$
#
# Si el siguiente paso indica "SE" y un desplazamiento de longitud 2, habrá que actualizar los vectores X e Y de manera que nos movamos esa distancia hacia el Sudeste:
#
# $X = [0,1,2*\sqrt{2}/2]$
# $Y = [0,0,-2*\sqrt{2}/2]$
#
# Tened en cuenta que si la dirección del siguiente tramo es "NE","SE","SW" o "NW" habrá que utilizar el seno y el coseno de 45º ($\sqrt{2}/2$) con el signo correspondiente para conseguir las coordenadas del vehículo. Esto quiere decir que si el vehículo se mueve en dirección NE ambas coordenadas se verán afectadas con un signo positivo mientras que si el vehículo se mueve en dirección SE, la coordenada X se verá afectada con signo positivo mientras que la coordenada Y lo será con signo negativo.
# Y así sucesivamente con el propósito de obtener una visualización similar a esta:
#
# 
#
# El código debe implementarse en forma de una función que tome como argumentos la variable **"path"** que es una lista de tuplas, y la variable **"origen"** que es una tupla con dos coordenadas:
#
# def plot_path(path,origen):
# ...
# ...
# ### Solución:
# +
import numpy as np
import matplotlib.pyplot as plt
path=[('E', 1.0), ('SE', 2), ('NE', 0.5), ('N', 2.0), ('SE', 1.0), ('E', 2.0), ('E', 0.5),
('NW', 3.0), ('NW', 1.0), ('SW', 1.0), ('E', 1.0), ('NW', 1.0), ('NW', 2.0), ('E', 3.0),
('E', 1.5), ('S', 1.0), ('SE', 3.0), ('W', 1.0), ('S', 1.0), ('E', 1.0), ('SW', 1.0), ('SW', 1.0)]
origen=(0,0)
def plot_path (path, origen):
X=[origen[0]]
Y=[origen[1]]
for direction,length in path:
if direction=="N":
X.append(X[-1])
Y.append(Y[-1]+length)
elif direction=="NE":
X.append(X[-1]+length*np.sqrt(2)/2)
Y.append(Y[-1]+length*np.sqrt(2)/2)
elif direction=="E":
X.append(X[-1]+length)
Y.append(Y[-1])
elif direction=="SE":
X.append(X[-1]+length*np.sqrt(2)/2)
Y.append(Y[-1]-length*np.sqrt(2)/2)
elif direction=="S":
X.append(X[-1])
Y.append(Y[-1]-length)
elif direction=="SW":
X.append(X[-1]-length*np.sqrt(2)/2)
Y.append(Y[-1]-length*np.sqrt(2)/2)
elif direction=="W":
X.append(X[-1]-length)
Y.append(Y[-1])
elif direction=="NW":
X.append(X[-1]-length*np.sqrt(2)/2)
Y.append(Y[-1]+length*np.sqrt(2)/2)
plt.figure(figsize=(8,5))
plt.plot(X,Y)
plt.plot(X,Y,".b")
plt.text(X[0],Y[0]+0.1, r'Start >>')
plt.text(X[-1]+0.2,Y[-1], r'>> End')
plt.title("RANDOM PATH")
plt.show()
# -
plot_path (path, origen)
# ## Ej4. Random Walks (2 ptos.)
#
# Haz un programa en python que en función del valor aleatorio que tome una variable numérica entre 1 y 8 genere una secuencia de puntos cardinales. Por ejemplo, cuando esa variable aleatoria valga 1, la dirección elegida sera Norte. Cuando sea 2 la dirección elegida será Noreste, cuando sea 3 la direacción será Este y así sucesivamente.
#
# 
#
# Cada uno de esos puntos cardinales se utilizará para construir una tupla en la que el segundo miembro será el resultado de otra variable numérica aleatoria decimal esta vez entre 0 y 4. El código debe generar tantas tuplas como le indiquemos en la variable **"number_of_steps"**. Por ejemplo para:
#
# number_of_steps=22
#
# La salida podría ser:
#
# path=[('E', 1.0), ('SE', 2), ('NE', 0.5), ('N', 2.0), ('SE', 1.0), ('E', 2.0), ('E', 0.5),
# ('NW', 3.0), ('NW', 1.0), ('SW', 1.0), ('E', 1.0), ('NW', 1.0), ('NW', 2.0), ('E', 3.0),
# ('E', 1.5), ('S', 1.0), ('SE', 3.0), ('W', 1.0), ('S', 1.0), ('E', 1.0), ('SW', 1.0), ('SW', 1.0)]
#
# El código debe implementarse en forma de una función que tome como único argumento la variable **"number_of_steps"** y que devuelva la lista de tuplas aleatorias:
#
# def random_path(number_of_steps):
# ...
# ...
# return path
#
# Considerar usar las funciones de Numpy:
#
# * np.random.randint
# * np.random.random
# * np.round
#
# Cuya ayuda se puede consultar desde Jupyter.
# ### Solución:
# +
import numpy as np
def random_path(number_of_steps):
path=[]
for i in range(number_of_steps):
dice = np.random.randint(1,9)
length = np.round(np.random.random()*4,1)
if dice ==1:
path.append(("N",length))
elif dice == 2:
path.append(("NE",length))
elif dice == 3:
path.append(("E",length))
elif dice == 4:
path.append(("SE",length))
elif dice == 5:
path.append(("S",length))
elif dice == 6:
path.append(("SW",length))
elif dice == 7:
path.append(("W",length))
elif dice == 8:
path.append(("NW",length))
return path
# -
number_of_steps=22
path=random_path(number_of_steps)
print(path)
# ## Ej5. Salvar ficheros (2 ptos.)
#
# Crear un código en python que a partir de una variable como esta
#
# path=[('SW', 0.1), ('SW', 2.4), ('NE', 0.4), ('W', 0.0), ('N', 2.1), ('NW', 1.2), ('S', 2.2), ('E', 1.5), ('SW', 3.3), ('W', 0.2), ('E', 3.0), ('W', 0.9), ('E', 0.8), ('N', 2.1), ('E', 2.2), ('NE', 3.7), ('E', 2.3), ('NE', 3.5), ('SE', 3.3), ('SE', 3.7), ('NE', 0.7), ('E', 0.2)]
#
# Guarde esa información en un ficheor csv con este formato:
#
# step|direction|length
# -|-|-
# 1|SW|0.1
# 2|SW|2.4
# 3|NE|0.4
# 4|W|0.0
# ...|...|...
#
#
# El fichero debe llamarse **"randompath.csv"** y el separador debe ser el símbolo **";"**
#
# ### Solución 1:
# +
path=[('SW', 0.1), ('SW', 2.4), ('NE', 0.4), ('W', 0.0), ('N', 2.1), ('NW', 1.2), ('S', 2.2), ('E', 1.5), ('SW', 3.3), ('W', 0.2), ('E', 3.0), ('W', 0.9), ('E', 0.8), ('N', 2.1), ('E', 2.2), ('NE', 3.7), ('E', 2.3), ('NE', 3.5), ('SE', 3.3), ('SE', 3.7), ('NE', 0.7), ('E', 0.2)]
with open("randompath1.csv", "w") as f:
f.write("step;direction;length\n")
item=1
for direction,length in path:
f.write("{};{};{}\n".format(item,direction,length))
item=item+1
# -
# ### Solución 2:
import csv
with open('randompath2.csv', mode='w',newline='') as f:
header=['step','direction','length']
writer=csv.writer(f, delimiter=';')
writer.writerow(header)
item=1
for direction,length in path:
writer.writerow([item,direction,length])
item=item+1
| Repositorio/20200624 - Examen Extraordinario/examen_extraordinario_practica_ESP_long.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Jf9nV2jnDYg3"
# loading data from kaggle to colab
# + id="4T-yFkm0l8RZ"
# !pip install -q kaggle
# + id="hPE-4uXrmI-z"
from google.colab import files
files.upload()
# + id="K_hADSv7mTGy"
# !mkdir ~/.kaggle
# + id="BqXUFHrompje"
# ! cp kaggle.json ~/.kaggle/
# + id="q5hsib2pmY4A"
# ! kaggle competitions download -c ngwl-predict-customer-churn --force
# + [markdown] id="jR0uRqMRDcgA"
# imports
# + id="n8WWfbvbz6uw"
import warnings
warnings.filterwarnings('ignore')
# + id="gg3mPWNFmbhM"
import pandas as pd
import numpy as np
import itertools
import gc
from datetime import datetime, timedelta, date
# + id="GQ4mPelO7671" outputId="7bb56109-0ac4-4d8f-d81b-088e48c7c4f2" colab={"base_uri": "https://localhost:8080/", "height": 35}
from google.colab import drive
drive.mount('/content/gdrive')
# + id="G9L5ynkks_bJ" outputId="ae890e21-6147-4c85-8ba9-38873949feb0" colab={"base_uri": "https://localhost:8080/", "height": 35}
addresses = pd.read_csv('addresses.csv.zip')
addresses.shape
# + id="RaGIswsHvIZX" outputId="01dbacd0-7769-4c49-85a5-32117d36e663" colab={"base_uri": "https://localhost:8080/", "height": 35}
ship1 = pd.read_csv('shipments2020-01-01.csv.zip')
ship2 = pd.read_csv('shipments2020-03-01.csv.zip')
ship3 = pd.read_csv('shipments2020-04-30.csv.zip')
ship4 = pd.read_csv('shipments2020-06-29.csv.zip')
ship1.shape, ship2.shape, ship3.shape, ship4.shape
# + id="KxpcXLEJoiap" outputId="57083b49-ccb7-4840-ee25-c42c9a966b46" colab={"base_uri": "https://localhost:8080/", "height": 35}
#concatenate all shipments
all_shipments = pd.concat([ship1, ship2, ship3, ship4])
del ship1, ship2, ship3, ship4
gc.collect()
# + id="RVObutkwqHML" outputId="9bb87883-c0f7-4f3d-9d2c-cec066d6a873" colab={"base_uri": "https://localhost:8080/", "height": 35}
all_shipments.shape
# + id="pARjORh2r32U" outputId="56ad4375-492c-4406-b54f-e7959c0334f1" colab={"base_uri": "https://localhost:8080/", "height": 35}
#get phone_id from addresses
all_shipments = all_shipments.merge(addresses, left_on='ship_address_id', right_on='id', how='left').drop(['id'], axis=1).drop_duplicates()
all_shipments.shape
# + id="Uv08vYhAtfdG"
#get calendar month from order completion timestamp
all_shipments['month'] = pd.to_datetime(all_shipments.order_completed_at).dt.month
# + [markdown] id="eDtf_5zs-8Ks"
# ### features: nr of cancelled/completed orders
# + id="nIMLUmZQtvTK"
features = []
#aggregate features for each month (April-Sep)
for month in range(4, 10):
#take 3 months history
temp = all_shipments[(all_shipments.month<month)&(all_shipments.month>=month-3)]
#leave only cancelled/complete states
temp = temp[temp['s.order_state'].isin(['complete', 'canceled'])]
#get nr of cancelled/completed orders for each customer at each month
f = temp.pivot_table(index=['phone_id'], columns=['month', 's.order_state'], aggfunc='size', fill_value=0)
#rename columns
f.columns = ['canc_1', 'comp_1', 'canc_2', 'comp_2', 'canc_3', 'comp_3']
#change indices
f.index = f.index.astype(str)+'_2020-0'+str(month)
features.append(f)
features_all = pd.concat(features)
# + id="kk2FsN2O8B68"
#save features
features_all.to_pickle('/content/gdrive/My Drive/cancelled_completed_features.pkl')
# + [markdown] id="WNAQKeel_Agk"
# ### features: statistics from shipments
# + id="AX0ZSvKB_Ddv"
features = []
for month in range(4, 10):
temp = all_shipments[(all_shipments.month<month)&(all_shipments.month>=month-3)]
f = temp.groupby(['phone_id', 'month']).agg({'retailer':['nunique'], 'total_cost':[np.mean, 'max', 'min'],
'total_weight':[np.mean, 'max', 'min'], 'rate': [np.count_nonzero, 'sum', 'max']}).unstack()
stat1, stat2 = ['mean', 'max', 'min'], ['count_not_zero', 'sum', 'max']
cols = ['ret_nr']+['cost_'+stat for stat in stat1]+['weight_'+stat for stat in stat1]+['rate_'+stat for stat in stat2]
months = [1, 2, 3]
f.columns = [pair[0]+'_'+str(pair[1]) for pair in itertools.product(cols, months)]
f.index = f.index.astype(str)+'_2020-0'+str(month)
features.append(f)
features_all = pd.concat(features).fillna(-1)
# + id="MujMN_xfDv8A"
#save data
features_all.to_pickle('/content/gdrive/My Drive/retailer_other_stats_features.pkl')
# + [markdown] id="achNdazbTWPz"
# ### features: statistics on delivery time
# + id="oPn051zUTdaj"
#get shipment duration in hours
all_shipments['duration'] = (pd.to_datetime(all_shipments.shipped_at, format='%Y-%m-%d %H:%M:%S') - pd.to_datetime(all_shipments.shipment_starts_at, format='%Y-%m-%d %H:%M:%S')).astype('timedelta64[h]')
# + id="1jwbOc4Shzo7" outputId="8bc45067-2cf2-4c73-c6f8-00400718af31" colab={"base_uri": "https://localhost:8080/", "height": 35}
lb = all_shipments.duration.quantile(0.01)
ub = all_shipments.duration.quantile(0.99)
lb, ub
# + id="t1RUF11Nh7JO"
#change outliers to nan
all_shipments['duration'] = np.where((all_shipments['duration']<lb)|(all_shipments['duration']>ub), np.nan, all_shipments['duration'])
# + id="jcH8UV0ZVuOS"
features = []
for month in range(4, 10):
temp = all_shipments[(all_shipments.month<month)&(all_shipments.month>=month-3)]
f = temp.groupby(['phone_id', 'month']).agg({'duration':[np.mean, 'max', 'min']}).unstack()
stats = ['mean', 'max', 'min']
cols = ['duration_'+stat for stat in stats]
months = [1, 2, 3]
f.columns = [pair[0]+'_'+str(pair[1]) for pair in itertools.product(cols, months)]
f.index = f.index.astype(str)+'_2020-0'+str(month)
features.append(f)
features_all = pd.concat(features).fillna(-1)
# + id="vD7Ytbipj-Vb"
#save data
features_all.to_pickle('/content/gdrive/My Drive/duration_features.pkl')
# + [markdown] id="AU2P2NCRlt4F"
# ### features: nr of messages received
# + id="LeaYimn-kXhW" outputId="b09615b8-913d-4d4a-b1c5-d67bc3c7e58a" colab={"base_uri": "https://localhost:8080/", "height": 35}
messages = pd.read_csv('messages.csv.zip')
messages.shape
# + id="7gvAy6VMnmO7"
#change timestamp to date
messages.sent = pd.to_datetime(messages.sent,unit='s')
#get month
messages['month'] = messages.sent.dt.month
# + id="AlkfKI5Ao3Ty"
#get nr of messages per month
agg_messages = messages.groupby(['user_id', 'month']).sent.count().reset_index()
# + id="FTBEp0LfpOWX"
#get phone_id from shipments
agg_messages = agg_messages.merge(all_shipments[['user_id', 'phone_id']], left_on='user_id', right_on='user_id').drop_duplicates()
# + id="aguymPkqqT80"
features = []
for month in range(4, 10):
temp = agg_messages[(agg_messages.month<month)&(agg_messages.month>=month-3)]
f = temp.groupby(['phone_id', 'month']).sent.sum().unstack()
months = [1, 2, 3]
f.columns = ['messages_sent'+'_'+str(m) for m in months]
f.index = f.index.astype(str)+'_2020-0'+str(month)
features.append(f)
features_all = pd.concat(features).fillna(0)
# + id="Q6HPvg37ryEO"
#save data
features_all.to_pickle('/content/gdrive/My Drive/messages_sent.pkl')
# + [markdown] id="bIc5dnNdsaML"
# #### features: nr of messages received per type
# + id="KkK2DlmSr9ST" outputId="24b2f9d1-1520-4457-ecae-5b264852e4ac" colab={"base_uri": "https://localhost:8080/", "height": 35}
actions = pd.read_csv('actions.csv')
actions.shape
# + id="xw4s5le7skKP" outputId="583cd8e6-f4d5-4fc1-e9a1-011d855bb388" colab={"base_uri": "https://localhost:8080/", "height": 35}
#get action type
messages = messages.merge(actions[['id', 'type']], left_on='action_id', right_on='id', how='left').drop(['id'], axis=1)
messages.shape
# + id="wWVgqkX5tBib"
agg_messages_type = messages.groupby(['user_id', 'month', 'type']).sent.count().reset_index()
# + id="wNkjOBlcur3Z"
agg_messages_type = agg_messages_type.merge(all_shipments[['user_id', 'phone_id']], left_on='user_id', right_on='user_id').drop_duplicates()
# + id="TbiQwJDsu1gP" outputId="b123cce2-30c7-45c2-b7ab-146d6662f1d0" colab={"base_uri": "https://localhost:8080/", "height": 88}
agg_messages_type.type.value_counts()
# + id="gwYBnxuOvAme" outputId="586dab84-2ab9-41ee-f1b9-d0981344f66a" colab={"base_uri": "https://localhost:8080/", "height": 52}
#sms were started only in August
agg_messages_type[agg_messages_type.type=='sms'].month.value_counts()
# + id="RcLX7EwDvcFq"
#changed message types to push/other
agg_messages_type.loc[agg_messages_type.type!='push', 'type'] = 'other'
# + id="oFjIEvCsvxih" outputId="3f539581-eef8-4a9b-b6f6-125dbf0a8058" colab={"base_uri": "https://localhost:8080/", "height": 70}
agg_messages_type.type.value_counts()
# + id="auotQxa-wPEU" outputId="bce92589-2d3f-493a-db0e-e25a4e829f97" colab={"base_uri": "https://localhost:8080/", "height": 124}
features = []
for month in range(4, 10):
temp = agg_messages_type[(agg_messages_type.month<month)&(agg_messages_type.month>=month-3)]
f = temp.groupby(['phone_id', 'month', 'type']).sent.sum().unstack().unstack()
months = [1, 2, 3]
types = ['other', 'push']
f.columns = [pair[0]+'_'+str(pair[1]) for pair in itertools.product(types, months)]
f.index = f.index.astype(str)+'_2020-0'+str(month)
print(month, f.shape)
features.append(f)
features_all = pd.concat(features).fillna(0)
# + id="Z0DQ03fNficq" outputId="0f7bcb9e-ffe6-48d7-918d-ef8eb7d18787" colab={"base_uri": "https://localhost:8080/", "height": 35}
features_all.shape
# + id="1BYnFKKyU7Nr"
#save data
features_all.to_csv('/content/gdrive/My Drive/messages_sent_by_type.csv')
# + id="RkL08_KUwbJ0"
#save data
features_all.to_pickle('/content/gdrive/My Drive/messages_sent_by_type.pkl')
# + [markdown] id="_tYgX-xn7tnC"
# ### user profile features
# + id="ryM5nlvbwg1A" outputId="94d3f9ad-9d2d-4b16-baba-1d6d6cc75e65" colab={"base_uri": "https://localhost:8080/", "height": 35}
users = pd.read_csv('user_profiles.csv.zip')
users.shape
# + id="t9OcDDRV78Hl" outputId="43c6d930-ba26-458a-bc73-557121369304" colab={"base_uri": "https://localhost:8080/", "height": 35}
#add phone_id
users = users.merge(all_shipments[['user_id', 'phone_id']], left_on='user_id', right_on='user_id').drop_duplicates()
users.shape
# + id="iSn0gUxc8Jy4" outputId="57755ce3-845f-499d-b595-661c18623c6f" colab={"base_uri": "https://localhost:8080/", "height": 141}
#extract city from shipments
city = all_shipments.groupby('phone_id')['s.city_name'].apply(lambda x:x.value_counts().index[0])
# + id="X8f7A3MbeY6u"
users = users.merge(city, left_on='phone_id', right_index=True)
users.drop(['user_id'], axis=1, inplace=True)
# + id="Sitgzg3B-y2U"
#change birthdate to age
def get_age(bdate):
today = date.today()
return today.year - bdate.year - ((today.month, today.day) < (bdate.month, bdate.day))
users['bdate'] = pd.to_datetime(users['bdate'], errors='coerce')
users['age'] = users.bdate.apply(get_age)
# + id="ytDx38qOB9_z"
users.drop(['bdate'], axis=1, inplace=True)
# + id="0D4Fo5OUCFCW"
users.rename(columns={'s.city_name':'city'}, inplace=True)
# + id="N735cKmNCMUE"
#save data
users.to_pickle('/content/gdrive/My Drive/user_features.pkl')
# + id="zy-D2_pDCQgT"
users.to_csv('/content/gdrive/My Drive/user_features.csv')
| ngwl_features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exploring the summary data of flights from Brazil
# ## 1. Introduction
# One of the industries that was very impacted by the COVID-19 pandemic was the air transport industry. The data shared by ANAC - the Brazilian Civil Aviation Authority - presents how it was affected. Let's explore this data in this Jupyter notebook and see if we can answer some questions and get some insights.
# The data used in this notebook may be found at:
#
# - https://www.gov.br/anac/pt-br/assuntos/dados-e-estatisticas/dados-estatisticos/arquivos/resumo_anual_2019.csv
# - https://www.gov.br/anac/pt-br/assuntos/dados-e-estatisticas/dados-estatisticos/arquivos/resumo_anual_2020.csv
# - https://www.gov.br/anac/pt-br/assuntos/dados-e-estatisticas/dados-estatisticos/arquivos/resumo_anual_2021.csv
# ## 2. Importing the libraries and data clean-up
# First of all, let's import the libraries we are going to use:
import os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import unidecode
# I am using the Seaborn library instead of matplotlib. I am also using the unidecode library to convert the column names to a more friendly format.
# Now the files are loaded and merged into a single dataframe.
# +
folder = r'C:\Users\thiag\data\ANAC-transport'
dffiles = ['resumo_anual_2019.csv',
'resumo_anual_2020.csv',
'resumo_anual_2021.csv']
df = pd.concat([pd.read_csv(os.path.join(folder, x),
sep=';', encoding=('ISO-8859-1'))
for x in dffiles])
# -
# Let's look at the data.
print(df.head())
# The following can be observed about the column names:
# - They are written in Portuguese and contain accentuation;
# - They are all in upper case letters;
# - They contain spaces and parenthesis.
#
# To facilitate readability we will modify the column names by:
# - Replacing the spaces with underlines "_";
# - Removing the parenthesis;
# - Making all letters lowercase; and
# - Removing the accents.
# This convention is called snake_case and, even though not standard, it is frequently used. For more information, refer to: https://en.wikipedia.org/wiki/Snake_case
# +
print("Column names before changes:\n")
print(df.columns)
df.columns = [unidecode.unidecode(z.lower())
.replace(' ','_')
.replace('(','')
.replace(')','')
for z in df.columns]
df.to_csv('3years.csv', sep=';', index=False)
print("Column names after changes:\n")
print(df.columns)
# -
# This looks better.
# Let's add some new columns to this dataframe, to support our analysis:
# - Since we are looking for a cronologic observation, it is insteresting to concatenate the calendar months and years into a single variable called 'data' (Portuguese for date. I am keeping Portuguese names for consistency). Let's also add a column named 'quarto' (Portuguese for quarter) to concatenate variables around the months of the year 3-by-3.
# - We can also infer the routes from the origin and destination airport variables (respectivelly called aeroporto_de_origem_sigla and aeroporto_de_destino_sigla). A variable named 'rota' (Portuguese for route) will be created to store the 'origin->destination' string. Another variable with the names of the airports (instead of the codes) will be created (and alled 'rota_nome') for readability (not everyone knows all airport codes).
# - Dividing RPK for ASK we get the load factor, which is a very important metric for airlines economics. This variable will also be created.
#
# +
df['data'] = [str(x['ano']) + '-' + "{:02}".format(x['mes'])
for index, x in df.iterrows()]
df['rota'] = [str(x['aeroporto_de_origem_sigla']) + '->' +
str(x['aeroporto_de_destino_sigla'])
for index, x in df.iterrows()]
df['rota_nome'] = [str(x['aeroporto_de_origem_nome']) + '->' +
str(x['aeroporto_de_destino_nome'])
for index, x in df.iterrows()]
df['load_factor'] = df['rpk']/df['ask']
def quarter(x):
year = x['ano']
mes = x['mes']
if mes in [1, 2, 3]:
quarter = str(year) + '-Q1'
elif mes in [4, 5, 6]:
quarter = str(year) + '-Q2'
elif mes in [7, 8, 9]:
quarter = str(year) + '-Q3'
elif mes in [10, 11, 12]:
quarter = str(year) + '-Q4'
return quarter
df['quarter'] = df.apply(quarter, axis=1)
# -
# ## 3. Airline metrics for efficiency and capacity
# Since there is no data dictionary, it is now a good time to talk about some interesting variables:
# - RPK meaning "Revenue Passenger Kilometers" is an air transport industry metric that aggregates the number of paying passengers and the quantity of kilometers traveled by them. It is calculated by multiplying the number of paying passengers by the distance traveled in kilometers.
# - ASK meaning "Available Seat Kilometers" is similar to the RPK but instead of using the paying passengers, the passenger capacity (number of seats available in the aircraft) is multiplied by the traveled distance.
# - RTK (for "Revenue tonne kilometres") measures the revenue cargo load in tonnes multiplied by the distance flown in kilometers.
# - ATK (for "Available tonne kilometres") measures the aircraft capacity of cargo load in tonnes multiplied by the distance flown in kilometers.
#
# The dataframe presents not only the value of these parameters but also the variables that compose their formula. Therefore, let's make a consistency check, verifying it is possible to reproduce their values through the variables.
# The formulas of the variables are:
# $ RPK = \frac{\sum{PayingPassengers} \ \times \ distance}{\sum{flights}} $
#
# $ ASK = \frac{\sum{Seats} \ \times \ distance}{\sum{flights}} $
#
# $ RTK = \frac{(AvgWeight \ \times \ \sum{PayingPassengers \ + \ BaggageWeight \ + \ CargoWeight \ + \ MailWeight) } \ \times \ distance}{1000 \ \times \ \sum{flights}} $
#
# $ ASK = \frac{\sum{Payload} \ \times \ distance}{1000 \ \times \ \sum{flights}} $
# +
dummy = []
for index, x in df.iterrows():
if x['decolagens'] == 0:
dummy.append(abs(x['rpk']) < 1000)
else:
dummy.append(abs(x['rpk'] - x['passageiros_pagos']*x['distancia_voada_km']/x['decolagens']) < 1000)
print('The number of rpk values that correspond to rpk calculation is: {:.2f}%'.format(100*sum(dummy)/len(dummy)))
df['rpk_calc']= df['passageiros_pagos']*df['distancia_voada_km']/df['decolagens']
del dummy
dummy = []
for index, x in df.iterrows():
if x['decolagens'] == 0:
dummy.append(abs(x['ask']) < 1000)
else:
dummy.append(abs(x['ask'] - x['assentos']*x['distancia_voada_km']/x['decolagens']) < 1000)
print('The number of ask values that correspond to ask calculation is: {:.2f}%'.format(100*sum(dummy)/len(dummy)))
df['ask_calc']=df['assentos']*df['distancia_voada_km']/df['decolagens']
del dummy
dummy = []
for index, x in df.iterrows():
if x['decolagens'] == 0:
dummy.append(abs(x['rtk']) < 1000)
else:
dummy.append(abs(x['rtk'] - (75*x['passageiros_pagos']+x['carga_paga_kg']+x['correio_kg']+x['bagagem_kg'] )*
x['distancia_voada_km']/(1000*x['decolagens'])) < 1000)
print('The number of rtk values that correspond to rtk calculation is: {:.2f}%'.format(100*sum(dummy)/len(dummy)))
df['rtk_calc']=(75*df['passageiros_pagos']+df['carga_paga_kg']+df['correio_kg']+df['bagagem_kg']
)*df['distancia_voada_km']/(1000*df['decolagens'])
del dummy
dummy = []
for index, x in df.iterrows():
if x['decolagens'] == 0:
dummy.append(abs(x['atk']) < 1000)
else:
dummy.append(abs(x['atk'] - x['payload']*x['distancia_voada_km']/(1000*x['decolagens'])) < 1000)
print('The number of atk values that correspond to atk calculation is: {:.2f}%'.format(100*sum(dummy)/len(dummy)))
df['atk_calc']=df['payload']*df['distancia_voada_km']/(1000*df['decolagens'])
del dummy
# -
# We can see that the consistency is variable, and is specifically lower for RTK values.
#
# One clear disadvantage of the calculated RTK is that the same average weight (75 kg) was used for all passengers of all airlines. This assumption implies that Brazilian and foreign companies use (or have to use) the same value for passenger weight to do their flight planning, which may not be true.
#
# Let's observe if being a Brazilian airline or foreign airline has an effect in the relationship between reported RTK and calculated RTK:
#
#
sns.scatterplot(x=df['rtk'],y=df['rtk_calc'],hue=df['empresa_nacionalidade'])
# We can see clearly that the line y=x has many Brazilian airlines into it, but not foreign. Also, there is a second line below the y=x line, suggesting a different tendency for some foreign airlines.
#
# By optimizing the error between RKT and calculated RTK for Brazilian airlines and foreign airlines separately, we arrive at the following values:
# - Brazilian airlines have 75kg as the best average value for passenger weight;
# - Foreign airlines have 90kg as the best average value for passenger weight.
#
# These numbers come from an optimization exercise is found in the article below:
#
# ## PUT LINK HERE
# With this knowledge, let's calculate again the RTK:
# +
dummy = []
rtk_calc = []
for index, x in df.iterrows():
if x['empresa_nacionalidade'] == 'BRASILEIRA':
avgw = 75
elif x['empresa_nacionalidade'] == 'ESTRANGEIRA':
avgw = 90
if x['decolagens'] == 0:
rtk = float('NaN')
dummy.append(abs(x['rtk']) < 1000)
else:
rtk = (avgw*x['passageiros_pagos']+x['carga_paga_kg']+x['correio_kg']+x['bagagem_kg']
)*x['distancia_voada_km']/(1000*x['decolagens'])
dummy.append(abs(x['rtk'] - rtk) < 1000)
rtk_calc.append(rtk)
print('The number of rtk values that correspond to rtk calculation is: {:.2f}%'.format(100*sum(dummy)/len(dummy)))
df['rtk_calc'] = rtk_calc
del dummy, rtk_calc, rtk
# -
# We see now that the match of RTK values passed from 56.28% to 58.93%. Let's also reprint the previous graphic with the corrected calculated RTK.
sns.scatterplot(x=df['rtk'],y=df['rtk_calc'],hue=df['empresa_nacionalidade'])
# We can see that the second tendency line is gone, since we have took into consideration its behaviour in our model.
# ## 4. Evolution of number of flights
# After the consistency check, let's take a look on how the number of flights has evolved through time in our database flights.
# +
df1 = pd.DataFrame(df.groupby(by=['data','ano']).agg('sum')['decolagens'])
df1.reset_index(inplace=True)
ax = sns.catplot(x='data', y='decolagens', data=df1, kind='bar', hue='ano', height=6, aspect=10/6,
sharey=True)
ax.set_xticklabels(rotation=90, ha="right")
ax.fig.suptitle('# Flights per month')
# -
# We can see the number of flights diminishes drastically after March 2020. Interestingly enough, this should not be credited only to the pandemic effect, since January is the month with most flights in an year.
# Let's verify what are the busiest airports (according to number of takeoffs) in Brazil, according to ANAC's data:
# +
df2 = pd.DataFrame(df.groupby(by=['aeroporto_de_origem_nome']).agg('sum')['decolagens'])
df2 = df2.sort_values(by=['decolagens'], ascending=False)
print(df2[:10])
df2.reset_index(inplace=True)
ax = sns.catplot(x='aeroporto_de_origem_nome', y='decolagens',
data=df2[:20], kind='bar', color='b', sharey=True)
ax.set_xticklabels(rotation=90, ha="right")
ax.fig.suptitle('# Flights per airport')
# -
# Finally, let's verify which airports have been most affected by the travel restrictions.
#
# "Most affected" here is defined by the difference between the quantity of flights in the first quarter of 2020, when the COVID-19 was discovered and started imposing restrictions and the first quarter of 2019. With that we account for the sazonality of the air traffic. We make this calculation for each airport, absolute (in difference of flight quantities) and in percentage.
#
# Absolute numbers are important to observe since an airport that had 1 flight in 2019 and had 0 in 2020 has lost 100% of its flights, but has lost only one flight. We'd like to avoid accounting cases like this as the most impactful, since they shouldn't be.
# +
df3 = pd.DataFrame(df.groupby(by=['quarter',
'aeroporto_de_origem_nome']).agg('sum')[
'decolagens'])
df3.reset_index(inplace=True)
df3 = df3.pivot(index='aeroporto_de_origem_nome',columns=['quarter'],values='decolagens').fillna(0)
df3['delta2020'] = df3['2019-Q2']-df3['2020-Q2']
df3['delta_perc2020'] = 100*df3['delta2020']/df3['2019-Q2']
df3['delta_perc2021'] = 100*(df3['2019-Q1']-df3['2021-Q1'])/df3['2019-Q1']
df3 = df3.sort_values(by='delta2020',ascending=False)
print(df3[: 10])
# -
# We can see that the list of most impacted airports (refer to variable 'delta_perc2020') is identical to the list of most busy airports (except for the order).
#
# We can also see, by comparing the first quarter of 2021 with the first quarter of 2019 that the gap is closing for all airports, and airports like Campinas and Recife have currently only 14% less flights than they had two years ago - which is very good news for the aviation in Brazil.
#
# As of now, the COVID-19 vaccination is progressing and ICAO and the airlines are make decisions to improve the safety of air travel while containing the spread of diseases. Therefore, measures are being taken to allow these air travel numbers to return to 2019 as fast as possible. I look forward to see the airports busy again!
| anac_eda-small.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from trainer import Trainer
from utils import init_logger, load_tokenizer, set_seed, MODEL_CLASSES, MODEL_PATH_MAP
from data_loader import load_and_cache_examples
import argparse
# +
parser = argparse.ArgumentParser()
parser.add_argument("--task", default="nsmc", type=str, help="The name of the task to train")
parser.add_argument("--model_dir", default="./model", type=str, help="Path to save, load model")
parser.add_argument("--data_dir", default="./data", type=str, help="The input data dir")
parser.add_argument("--train_file", default="ratings_train.txt", type=str, help="Train file")
parser.add_argument("--test_file", default="ratings_test.txt", type=str, help="Test file")
parser.add_argument("--model_type", default="kobert", type=str, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument('--seed', type=int, default=42, help="random seed for initialization")
parser.add_argument("--train_batch_size", default=32, type=int, help="Batch size for training.")
parser.add_argument("--eval_batch_size", default=64, type=int, help="Batch size for evaluation.")
parser.add_argument("--max_seq_len", default=50, type=int, help="The maximum total input sequence length after tokenization.")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs", default=5.0, type=float, help="Total number of training epochs to perform.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=2000, help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=2000, help="Save checkpoint every X updates steps.")
parser.add_argument("--do_train", type=bool, default=True, help="Whether to run training.")
parser.add_argument("--do_eval", type=bool, default=True, help="Whether to run eval on the test set.")
parser.add_argument("--no_cuda", type=bool, default=False, help="Avoid using CUDA when available")
args = parser.parse_args([])
args.model_name_or_path = MODEL_PATH_MAP[args.model_type]
# -
MODEL_PATH_MAP[args.model_type]
# ## Load tokenizer, build dataset
tokenizer = load_tokenizer(args)
train_dataset = load_and_cache_examples(args, tokenizer, mode="train")
dev_dataset = None
test_dataset = load_and_cache_examples(args, tokenizer, mode="test")
# ## Train
trainer = Trainer(args, train_dataset, dev_dataset, test_dataset)
trainer.train()
# ## Inference
# input : sample_pred_in.txt, output : sample_pred_out.txt
# +
from predict import predict
parser = argparse.ArgumentParser()
parser.add_argument("--input_file", default="sample_pred_in.txt", type=str, help="Input file for prediction")
parser.add_argument("--output_file", default="sample_pred_out.txt", type=str, help="Output file for prediction")
parser.add_argument("--model_dir", default="./model", type=str, help="Path to save, load model")
parser.add_argument("--batch_size", default=32, type=int, help="Batch size for prediction")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
pred_config = parser.parse_args([])
predict(pred_config)
# -
| Train_and_Eval.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import cv2
import csv
import os
import matplotlib.pyplot as plt
import seaborn as sns
import keras
import h5py
from keras.models import Sequential
from keras.layers import Dense, Conv2D , MaxPool2D , Flatten , Dropout , BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix
from keras.callbacks import ReduceLROnPlateau
# +
def get_canny_edge(image):
grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Convert from RGB to HSV
HSVImaage = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# Finding pixels with itensity of skin
lowerBoundary = np.array([0, 40, 30], dtype="uint8")
upperBoundary = np.array([43, 255, 254], dtype="uint8")
skinMask = cv2.inRange(HSVImaage, lowerBoundary, upperBoundary)
# blurring of gray scale using medianBlur
skinMask = cv2.addWeighted(skinMask, 0.5, skinMask, 0.5, 0.0)
skinMask = cv2.medianBlur(skinMask, 5)
skin = cv2.bitwise_and(grayImage, grayImage, mask=skinMask)
# cv2.imshow("masked2",skin)
# . canny edge detection
canny = cv2.Canny(skin, 60, 60)
# plt.imshow(img2, cmap = 'gray')
return canny, skin
# -
# # Part 1 : Data preprocessing
#Loading dataset images and labels from csv files
def load_dataset(filename, n, h, w):
data = []
with open(filename, 'r') as csvfile:
# creating a csv reader object
csvreader = csv.reader(csvfile)
# extracting each data row one by one
for row in csvreader:
data.append(row)
x_data = np.zeros((n, h * w), dtype=float)
y_data = []
path = "/home/jayant/PycharmProjects/Indian sign language character recognition/"
i = 0
for row in data:
current_image_path = path + row[0]
y_data.append(int(row[1]))
current_image = cv2.imread(current_image_path)#, cv2.IMREAD_GRAYSCALE)
canny_image = get_canny_edge(current_image)[0]
# normalize and store the image
x_data[i] = (np.asarray(canny_image).reshape(1, 128 * 128)) / 255
i += 1
return x_data, y_data
x_train, y_train = load_dataset("/home/jayant/PycharmProjects/Indian sign language character recognition/Dataset/train.csv",28520,128,128)
x_test, y_test = load_dataset("/home/jayant/PycharmProjects/Indian sign language character recognition/Dataset/test.csv",7130,128,128)
y = y_test
from sklearn.preprocessing import LabelBinarizer
label_binarizer = LabelBinarizer()
y_train = label_binarizer.fit_transform(y_train)
y_test = label_binarizer.fit_transform(y_test)
#Reshaping the data from 1-D to 3-D as required through input by CNN's
x_train = x_train.reshape(-1,128,128,1)
x_test = x_test.reshape(-1,128,128,1)
f, ax = plt.subplots(2,5)
f.set_size_inches(10, 10)
k = 0
for i in range(2):
for j in range(5):
ax[i,j].imshow(x_train[k].reshape(128, 128) , cmap = "gray")
k += 1
plt.tight_layout()
# +
# With data augmentation to prevent overfitting
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range = 0.1, # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
datagen.fit(x_train)
# -
# # Part 2 : Model training
learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy', patience = 2, verbose=1,factor=0.5, min_lr=0.00001)
model = Sequential()
model.add(Conv2D(75 , (3,3) , strides = 1 , padding = 'same' , activation = 'relu' , input_shape = (128,128,1)))
model.add(BatchNormalization())
model.add(MaxPool2D((2,2) , strides = 2 , padding = 'same'))
model.add(Conv2D(50 , (3,3) , strides = 1 , padding = 'same' , activation = 'relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(MaxPool2D((2,2) , strides = 2 , padding = 'same'))
model.add(Conv2D(25 , (3,3) , strides = 1 , padding = 'same' , activation = 'relu'))
model.add(BatchNormalization())
model.add(MaxPool2D((2,2) , strides = 2 , padding = 'same'))
model.add(Flatten())
model.add(Dense(units = 512 , activation = 'relu'))
model.add(Dropout(0.3))
model.add(Dense(units = 31 , activation = 'softmax'))
model.compile(optimizer = 'adam' , loss = 'categorical_crossentropy' , metrics = ['accuracy'])
model.summary()
history = model.fit(datagen.flow(x_train,y_train, batch_size = 8) ,epochs = 20 , validation_data = (x_test, y_test) , callbacks = [learning_rate_reduction])
print("Accuracy of the model is - " , model.evaluate(x_test,y_test)[1]*100 , "%")
# reference link : https://machinelearningmastery.com/save-load-keras-deep-learning-models/
# save model and architecture to single file
model.save("model_final.h5")
#save model into json format and weights in different file
# serialize model to JSON
model_json = model.to_json()
with open("model_json_format.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model_json_final.h5")
print("Saved model to disk")
# +
epochs = [i for i in range(20)]
fig , ax = plt.subplots(1,2)
train_acc = history.history['accuracy']
train_loss = history.history['loss']
val_acc = history.history['val_accuracy']
val_loss = history.history['val_loss']
fig.set_size_inches(16,9)
ax[0].plot(epochs , train_acc , 'go-' , label = 'Training Accuracy')
ax[0].plot(epochs , val_acc , 'ro-' , label = 'Testing Accuracy')
ax[0].set_title('Training & Validation Accuracy')
ax[0].legend()
ax[0].set_xlabel("Epochs")
ax[0].set_ylabel("Accuracy")
ax[1].plot(epochs , train_loss , 'g-o' , label = 'Training Loss')
ax[1].plot(epochs , val_loss , 'r-o' , label = 'Testing Loss')
ax[1].set_title('Testing Accuracy & Loss')
ax[1].legend()
ax[1].set_xlabel("Epochs")
ax[1].set_ylabel("Loss")
plt.show()
# -
x,y = load_dataset("/home/jayant/PycharmProjects/Indian sign language character recognition/Dataset/test.csv",7130,128,128)
print(type(y[0]))
predictions = model.predict_classes(x_test)
print(predictions)
predictions +=1
# +
classes = ["Class " + str(i) for i in range(32)]# if i != 9]
#classes = [0,1,2,3,4,5,6,7,8,9,10,11,12]
print(classes)
print(classification_report(y, predictions, target_names = classes))
# -
cm = confusion_matrix(y,predictions)
cm = pd.DataFrame(cm , index = [i for i in range(32)] , columns = [i for i in range(32)])
plt.figure(figsize = (15,15))
sns.heatmap(cm,cmap= "Blues", linecolor = 'black' , linewidth = 1 , annot = True, fmt='')
correct = np.nonzero(predictions == y)[0]
i = 0
for c in correct[:6]:
plt.subplot(3,2,i+1)
plt.imshow(x_test[c].reshape(128,128), cmap="gray", interpolation='none')
plt.title("Predicted Class {},Actual Class {}".format(predictions[c], y[c]))
plt.tight_layout()
i += 1
import tensorflow as tf
path ="/home/jayant/PycharmProjects/Indian-sign-language-recognition-master/data/Z/1198.jpg"
mod = tf.keras.models.load_model('model_final.h5')
img = cv2.imread(path)
img = get_canny_edge(img)[0]
#img = cv2.resize(img,(128,128))
img = img.reshape((1,128,128,1))
predictions = mod.predict(img)
print(predictions)
score = tf.nn.softmax(predictions[0])
classes = ['1', '2','3', '4', '5', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'I', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U','W',
'X', 'Y', 'Z']
#classes = ['1', '2', '3','4','5', '7', '8','9','C','L','O','U']
print(
"This image most likely belongs to {} "
.format(classes[np.argmax(score)])
)
| ISL-main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Bitcoin prediction with Hierarchical Temporal Memory ML by Numenta
# Based on and modified from an example in their core repo:
#
# https://github.com/htm-community/htm.core/blob/master/py/htm/examples/hotgym.py
#
# License will be the same - GNU Affero:
#
# https://github.com/htm-community/htm.core/blob/master/LICENSE.txt
# +
import csv
import datetime
import os
import numpy as np
import random
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
from jupyterthemes import jtplot
jtplot.style()
# #%config InlineBackend.figure_format = 'svg'
plt.rcParams['figure.dpi'] = 150
plt.rcParams["figure.figsize"] = (11,8)
from htm.bindings.sdr import SDR, Metrics
from htm.encoders.rdse import RDSE, RDSE_Parameters
from htm.encoders.date import DateEncoder
from htm.bindings.algorithms import SpatialPooler
from htm.bindings.algorithms import TemporalMemory
from htm.algorithms.anomaly_likelihood import AnomalyLikelihood #FIXME use TM.anomaly instead, but it gives worse results than the py.AnomalyLikelihood now
from htm.bindings.algorithms import Predictor
_EXAMPLE_DIR = os.path.abspath('')
_INPUT_FILE_PATH = os.path.join(_EXAMPLE_DIR, "bitcoin_all.csv")
# copied from https://github.com/htm-community/htm.core/blob/master/py/htm/examples/hotgym.py and modified
# i guess license will be the same
parameters = {
# there are 2 (3) encoders: "value" (RDSE) & "time" (DateTime weekend, timeOfDay)
'enc': {
"value" :
{'resolution': 0.88, 'size': 800, 'sparsity': 0.02},
"time":
{'timeOfDay': (30, 1), 'weekend': 21}
},
'predictor': {'sdrc_alpha': 0.1},
'sp': {'boostStrength': 3.0,
'columnCount': 238,
'localAreaDensity': 0.06395604395604396,
'potentialPct': 0.95,
'synPermActiveInc': 0.04,
'synPermConnected': 0.13999999999999999,
'synPermInactiveDec': 0.06},
'tm': {'activationThreshold': 17,
'cellsPerColumn': 20,
'initialPerm': 0.11,
'maxSegmentsPerCell': 128,
'maxSynapsesPerSegment': 64,
'minThreshold': 10,
'newSynapseCount': 128,
'permanenceDec': 0.1,
'permanenceInc': 0.1},
'anomaly': {
'likelihood':
{#'learningPeriod': int(math.floor(self.probationaryPeriod / 2.0)),
#'probationaryPeriod': self.probationaryPeriod-default_parameters["anomaly"]["likelihood"]["learningPeriod"],
'probationaryPct': 0.1,
'reestimationPeriod': 100} #These settings are copied from NAB
}
}
# +
import pprint
print("Parameters:")
pprint.pprint(parameters, indent=4)
print("")
# Read the input file.
records = []
with open(_INPUT_FILE_PATH, "r") as fin:
reader = csv.reader(fin)
headers = next(reader)
for record in reader:
records.append(record)
x_g = []
y_g = []
for r in records:
x_g.append(datetime.datetime.strptime(r[0], "%Y-%m-%d %H:%M:%S"))
y_g.append(float(r[1]))
plt.xlabel("Time")
plt.ylabel("Val")
plt.plot(x_g, y_g)
# +
print("running....")
# Make the Encoders. These will convert input data into binary representations.
dateEncoder = DateEncoder(timeOfDay= parameters["enc"]["time"]["timeOfDay"],
weekend = parameters["enc"]["time"]["weekend"])
scalarEncoderParams = RDSE_Parameters()
scalarEncoderParams.size = parameters["enc"]["value"]["size"]
scalarEncoderParams.sparsity = parameters["enc"]["value"]["sparsity"]
scalarEncoderParams.resolution = parameters["enc"]["value"]["resolution"]
scalarEncoder = RDSE( scalarEncoderParams )
encodingWidth = (dateEncoder.size + scalarEncoder.size)
enc_info = Metrics( [encodingWidth], 999999999 )
# Make the HTM. SpatialPooler & TemporalMemory & associated tools.
spParams = parameters["sp"]
sp = SpatialPooler(
inputDimensions = (encodingWidth,),
columnDimensions = (spParams["columnCount"],),
potentialPct = spParams["potentialPct"],
potentialRadius = encodingWidth,
globalInhibition = True,
localAreaDensity = spParams["localAreaDensity"],
synPermInactiveDec = spParams["synPermInactiveDec"],
synPermActiveInc = spParams["synPermActiveInc"],
synPermConnected = spParams["synPermConnected"],
boostStrength = spParams["boostStrength"],
wrapAround = True
)
sp_info = Metrics( sp.getColumnDimensions(), 999999999 )
tmParams = parameters["tm"]
tm = TemporalMemory(
columnDimensions = (spParams["columnCount"],),
cellsPerColumn = tmParams["cellsPerColumn"],
activationThreshold = tmParams["activationThreshold"],
initialPermanence = tmParams["initialPerm"],
connectedPermanence = spParams["synPermConnected"],
minThreshold = tmParams["minThreshold"],
maxNewSynapseCount = tmParams["newSynapseCount"],
permanenceIncrement = tmParams["permanenceInc"],
permanenceDecrement = tmParams["permanenceDec"],
predictedSegmentDecrement = 0.0,
maxSegmentsPerCell = tmParams["maxSegmentsPerCell"],
maxSynapsesPerSegment = tmParams["maxSynapsesPerSegment"]
)
tm_info = Metrics( [tm.numberOfCells()], 999999999 )
# setup likelihood, these settings are used in NAB
anParams = parameters["anomaly"]["likelihood"]
probationaryPeriod = int(math.floor(float(anParams["probationaryPct"])*len(records)))
learningPeriod = int(math.floor(probationaryPeriod / 2.0))
anomaly_history = AnomalyLikelihood(learningPeriod= learningPeriod,
estimationSamples= probationaryPeriod - learningPeriod,
reestimationPeriod= anParams["reestimationPeriod"])
predictor = Predictor( steps=[1, 90], alpha=parameters["predictor"]['sdrc_alpha'] )
# Resolution is how accurate the prediction should be to some dollar amount
# resolution 1000 means the prediction will only be accurate within 1000
# That is fine for bitcoin, but the graph will be broken if it is something like dogecoin,
# which is worth under 1 cent
# Predictor resolution needs to match data scale, or else it will take a long time to process.
#Should be based on data fluctuation range I think
resolutions_choices = {
10: [10000, 999999999999], # max between 10000 - ∞
1: [1000, 10000],
0.1: [100, 1000],
0.01: [0, 10]
}
predictor_resolution = 10
for res in resolutions_choices:
price_range = resolutions_choices[res]
if max(y_g) >= price_range[0] and max(y_g) <= price_range[1]:
predictor_resolution = res
print("predictor_resolution")
print(predictor_resolution)
# Iterate through every datum in the dataset, record the inputs & outputs.
inputs = []
anomaly = []
anomalyProb = []
predictions = {1: [], 90: []}
for count, record in enumerate(records):
# Convert date string into Python date object.
dateString = datetime.datetime.strptime(record[0], "%Y-%m-%d %H:%M:%S")
# Convert data value string into float.
consumption = float(record[1])
inputs.append( consumption )
# Call the encoders to create bit representations for each value. These are SDR objects.
dateBits = dateEncoder.encode(dateString)
consumptionBits = scalarEncoder.encode(consumption)
# Concatenate all these encodings into one large encoding for Spatial Pooling.
encoding = SDR( encodingWidth ).concatenate([consumptionBits, dateBits])
enc_info.addData( encoding )
# Create an SDR to represent active columns, This will be populated by the
# compute method below. It must have the same dimensions as the Spatial Pooler.
activeColumns = SDR( sp.getColumnDimensions() )
# Execute Spatial Pooling algorithm over input space.
sp.compute(encoding, True, activeColumns)
sp_info.addData( activeColumns )
# Execute Temporal Memory algorithm over active mini-columns.
tm.compute(activeColumns, learn=True)
tm_info.addData( tm.getActiveCells().flatten() )
# Predict what will happen, and then train the predictor based on what just happened.
pdf = predictor.infer( tm.getActiveCells() )
for n in (1, 90):
if pdf[n]:
predictions[n].append( np.argmax( pdf[n] ) * predictor_resolution )
else:
predictions[n].append(float('nan'))
anomalyLikelihood = anomaly_history.anomalyProbability( consumption, tm.anomaly )
anomaly.append( tm.anomaly )
anomalyProb.append( anomalyLikelihood )
predictor.learn(count, tm.getActiveCells(), int(consumption / predictor_resolution))
# Print information & statistics about the state of the HTM.
print("Encoded Input", enc_info)
print("")
print("Spatial Pooler Mini-Columns", sp_info)
print(str(sp))
print("")
print("Temporal Memory Cells", tm_info)
print(str(tm))
print("")
# Shift the predictions so that they are aligned with the input they predict.
for n_steps, pred_list in predictions.items():
for x in range(n_steps):
pred_list.insert(0, float('nan'))
pred_list.pop()
# Calculate the predictive accuracy, Root-Mean-Squared
accuracy = {1: 0, 90: 0}
accuracy_samples = {1: 0, 90: 0}
for idx, inp in enumerate(inputs):
for n in predictions: # For each [N]umber of time steps ahead which was predicted.
val = predictions[n][ idx ]
if not math.isnan(val):
accuracy[n] += (inp - val) ** 2
accuracy_samples[n] += 1
for n in sorted(predictions):
accuracy[n] = (accuracy[n] / accuracy_samples[n]) ** .5
print("Predictive Error (RMS)", n, "steps ahead:", accuracy[n])
# Show info about the anomaly (mean & std)
print("Anomaly Mean", np.mean(anomaly))
print("Anomaly Std ", np.std(anomaly))
# +
# Plot the Predictions and Anomalies.
print("Graph of training progress through the set. Gets more accurated as it gets further through the set:")
plt.subplot(2,1,1)
plt.title("Predictions")
plt.xlabel("Time")
plt.ylabel("Power Consumption")
plt.plot(np.arange(len(inputs)), y_g,
np.arange(len(inputs)), predictions[1],
np.arange(len(inputs)), predictions[90])
plt.legend(labels=('Input', '1 Step Prediction, Shifted 1 step', '5 Step Prediction, Shifted 5 steps'))
plt.subplot(2,1,2)
plt.title("Anomaly Score")
plt.xlabel("Time")
plt.ylabel("Power Consumption")
inputs = np.array(inputs) / max(inputs)
plt.plot(np.arange(len(inputs)), inputs,
np.arange(len(inputs)), anomaly)
plt.legend(labels=('Input', 'Anomaly Score'))
plt.subplots_adjust(hspace=0.4)
plt.show()
print("-accuracy[5]:", -accuracy[90])
# +
#print(records)
goal_len = len(records) + 90
while len(records) < goal_len:
record = records[-1]
# Convert date string into Python date object.
dateString = datetime.datetime.strptime(record[0], "%Y-%m-%d %H:%M:%S")
dateStringPlusOne = (datetime.datetime.strptime(record[0], "%Y-%m-%d %H:%M:%S")+datetime.timedelta(days=1))
dateStringPlusOne = dateStringPlusOne.strftime("%Y-%m-%d %H:%M:%S")
# Convert data value string into float.
consumption = float(record[1])
#inputs.append( consumption )
# Call the encoders to create bit representations for each value. These are SDR objects.
dateBits = dateEncoder.encode(dateString)
consumptionBits = scalarEncoder.encode(consumption)
# Concatenate all these encodings into one large encoding for Spatial Pooling.
encoding = SDR( encodingWidth ).concatenate([consumptionBits, dateBits])
enc_info.addData( encoding )
# Create an SDR to represent active columns, This will be populated by the
# compute method below. It must have the same dimensions as the Spatial Pooler.
activeColumns = SDR( sp.getColumnDimensions() )
# Execute Spatial Pooling algorithm over input space.
sp.compute(encoding, True, activeColumns)
sp_info.addData( activeColumns )
# Execute Temporal Memory algorithm over active mini-columns.
tm.compute(activeColumns, learn=False)
tm_info.addData( tm.getActiveCells().flatten() )
# Predict what will happen, and then add to records for next prediction
pdf = predictor.infer( tm.getActiveCells() )
#for n in (1):#, 5):
if pdf[1]:
#predictions[n].append( np.argmax( pdf[n] ) * predictor_resolution )
records.append([dateStringPlusOne, np.argmax( pdf[1] ) * predictor_resolution])
else:
records.append([dateStringPlusOne, records[-1][1]])
#predictions[n].append(float('nan'))
y_g2 = []
for r in records:
#x_g.append(datetime.datetime.strptime(r[0], "%Y-%m-%d %H:%M:%S"))
y_g2.append(float(r[1]))
#print(y_)
plt.subplot(2,1,1)
plt.title("Prediction 90 days out")
plt.xlabel("Time")
plt.ylabel("Val")
plt.plot(np.arange(len(y_g2)), y_g2, np.arange(len(y_g)), y_g)
plt.legend(labels=('Predicted data', 'Real data'))
y_g = y_g[-30:]
y_g2 = y_g2[-120:]
y_g2 = y_g2[0:len(y_g) + 14]
plt.subplot(2,1,2)
plt.title("Prediction 14 days out")
plt.xlabel("Time")
plt.ylabel("Val")
plt.plot(np.arange(len(y_g2)), y_g2, np.arange(len(y_g)), y_g)
plt.legend(labels=('Predicted data', 'Real data'))
plt.subplots_adjust(hspace=0.4)
plt.show()
import sys
csv = "i,ds,y\n"
for i in range(len(records)):
csv += "_," + ",".join(map(str, records[i]))
csv += "\n"
f = open("./predictions-cache/bitcoin.csv", "w")#+sys.argv[1], "w")
f.write(csv)
f.close()
# -
# Better than the LSTM neural net's prediction, which just copied exactly what happened the previous 90 days and pasted it at the end of the current graph, as a cheat to get to high accuracy.
#
# This looks a little more believeable as forecasting real data, but a downside of this method I see is that it isn't able to grasp the trend. The forecasted data fluctuates around an average with no upward downward overall trend. This effect isn't as noticeable with smaller timeframes so I will just hide it by not predicting beyond 14 days or so.
#
# Wasn't going for super high accuracy or anything to actually use in a real trading scenario anyway, more of just a fun project/neat feature. Accurately predicting markets is a full time job in and of itself and even then it's still hard/impossible to get it perfect.
#
# Leaving as per day granularity for now as this at least looks passable. Tried to switch it to hourly as in cryptodash-client <90d data is displayed by the hour, but model fell apart. Maybe todo to tweak and get that working eventually but this is fine for now.
| cryptodash-prediction/price prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Workspace \#2: ``constants.py``
import sys
sys.path.insert(0, '../')
import constants as ct
# The content of constants.py
dir(ct)
print(str(ct._sigma_v_*ct._light_speed_/1000.)+" km/s")
print("No. of sigmas for bandwidth: %f\nFraction of Gaussian within bandwidth: %f\nBandwidth in units of central frequency: %f" % (ct._tophat_width_, ct._f_Delta_, ct._deltaE_over_E_))
# E.g.: some SKA parameters:
print('SKA low frequencies [GHz]: %s %s' %(ct._nu_min_ska_low_, ct._nu_max_ska_low_))
print('SKA mid frequencies [GHz]: %s %s' %(ct._nu_min_ska_mid_, ct._nu_max_ska_mid_))
# E.g.: some peak lightcurve quantities $(t_{\rm peak}, L_{\rm peak})$ distribution parameters
#
#
# Taken from Table 4 in Bietenholz et al. ([arXiv:2011.11737](https://arxiv.org/abs/2011.11737)), for log-normal distributions.
print('All SNe (D < 100 Mpc):')
print('\t\t\tmu [mean]', '\t sigma [standard deviation]\n...................................................................')
print('log_10(t_pk/days)\t %s \t\t\t %s' %(ct._mu_log10_tpk_, ct._sig_log10_tpk_))
print('log_10(L_pk/cgs)\t %s \t\t\t %s' %(ct._mu_log10_Lpk_, ct._sig_log10_Lpk_))
| workspace_notebooks/ws-2_constants.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 0.0. IMPORTS
# +
import math
import pandas as pd
import numpy as np
import inflection
import seaborn as sns
import matplotlib.pyplot as plt
from IPython.display import Image
# -
# ## 0.1. Helper Functions
# + [markdown] heading_collapsed=true
# ## 0.2. Loading data
#
# + hidden=true
df_sales_raw = pd.read_csv ('C:/Users/Daniela/Google Drive/data_science_producao/data/train.csv', low_memory = False)
df_store_raw = pd.read_csv ('C:/Users/Daniela/Google Drive/data_science_producao/data/store.csv', low_memory = False)
#merge
df_raw = pd.merge(df_sales_raw, df_store_raw, how = 'left', on = 'Store')
# + hidden=true
df_raw.sample()
# + [markdown] heading_collapsed=true
# # 1.0. DESCRICAO DOS DADOS
# + hidden=true
df1 = df_raw.copy()
# + [markdown] hidden=true
# ## 1.1 Rename Columns
# + hidden=true
cols_old = ['Store', 'DayOfWeek', 'Date', 'Sales', 'Customers', 'Open', 'Promo', 'StateHoliday',
'SchoolHoliday', 'StoreType', 'Assortment','CompetitionDistance', 'CompetitionOpenSinceMonth',
'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek', 'Promo2SinceYear', 'PromoInterval']
snakecase = lambda x: inflection.underscore(x)
cols_new = list(map(snakecase, cols_old))
#rename
df1.columns = cols_new
# + [markdown] hidden=true
# ## 1.2. Data Dimension
# + hidden=true
print ('Número de Rows: {}'.format(df1.shape[0]))
print ('Número de Columns: {}'.format(df1.shape[1]))
# + [markdown] hidden=true
#
# ## 1.3. Data Type
# + hidden=true
df1['date'] = pd.to_datetime(df1['date'])
df1.dtypes
# + [markdown] hidden=true
# ## 1.4. Check NA
# + hidden=true
df1.isna().sum()
# + [markdown] hidden=true
# ## 1.5. Fillout NA
# + hidden=true
#competition_distance
df1['competition_distance']= df1['competition_distance'].apply(lambda x: 200000.0 if math.isnan(x) else x)
#competition_open_since_month
df1['competition_open_since_month'] = df1.apply(lambda x: x['date'].month if math.isnan(x['competition_open_since_month']) else x['competition_open_since_month'], axis = 1)
#competition_open_since_year
df1['competition_open_since_year'] = df1.apply(lambda x: x['date'].year if math.isnan(x['competition_open_since_year']) else x['competition_open_since_year'], axis = 1)
#promo2_since_week
df1['promo2_since_week'] = df1.apply(lambda x: x['date'].week if math.isnan(x['promo2_since_week']) else x['promo2_since_week'], axis = 1)
#promo2_since_year
df1['promo2_since_year'] = df1.apply(lambda x: x['date'].year if math.isnan(x['promo2_since_year']) else x['promo2_since_year'], axis = 1)
#promo_interval
month_map = {1: 'Jan', 2: 'Fev', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'}
df1['promo_interval'].fillna(0, inplace = True)
df1['month_map'] = df1['date'].dt.month.map(month_map)
df1['is_promo'] = df1[['promo_interval', 'month_map']].apply(lambda x: 0 if x['promo_interval'] == 0 else 1 if x['month_map'] in x['promo_interval'].split(',') else 0, axis=1)
# + hidden=true
df1.isna().sum()
# + [markdown] hidden=true
# ## 1.6. Change type
# + hidden=true
df1.dtypes
# + hidden=true
df1['competition_open_since_month'] = df1['competition_open_since_month'].astype('int64')
df1['competition_open_since_year'] = df1['competition_open_since_year'].astype('int64')
df1['promo2_since_week'] = df1['promo2_since_week'].astype('int64')
df1['promo2_since_year'] = df1['promo2_since_year'].astype('int64')
# + [markdown] hidden=true
# ## 1.7 Descriptive statistical
# + hidden=true
num_attirbutes= df1.select_dtypes(include = ['int64', 'float64'])
cat_attirbutes= df1.select_dtypes(exclude = ['int64', 'float64', 'datetime64[ns]'])
# + [markdown] hidden=true
# ### 1.7.1. Numerical Attribute
# + hidden=true
#Central Tendency - mean, median
ct1 = pd.DataFrame(num_attirbutes.apply(np.mean)).T
ct2 = pd.DataFrame(num_attirbutes.apply(np.median)).T
#Dispersion - std, min, max. range, std, skew, kurtosis
d1 = pd.DataFrame(num_attirbutes.apply(np.std)).T
d2 = pd.DataFrame(num_attirbutes.apply(min)).T
d3 = pd.DataFrame(num_attirbutes.apply(max)).T
d4 = pd.DataFrame(num_attirbutes.apply(lambda x: x.max() - x.min())).T
d5 = pd.DataFrame(num_attirbutes.apply(lambda x: x.skew())).T
d6 = pd.DataFrame(num_attirbutes.apply(lambda x: x.kurtosis())).T
#Concatenar
m = pd.concat([d2, d3, d4, ct1, ct2, d1, d5,d6]).T.reset_index()
m.columns = ['Attribute', 'min', 'max', 'range', 'mean', 'median', 'std', 'skew', 'Kurtosis']
m
# + [markdown] hidden=true
# ### 1.7.2. categorical attributes
# + hidden=true
cat_attirbutes.apply(lambda x: x.unique().shape[0])
# + hidden=true
aux1= df1[(df1['state_holiday'] != '0') & (df1['sales'] > 0)]
fig = plt.figure(figsize=(15,5))
plt.subplot(1,3,1)
sns.boxplot(x= 'store_type', y = 'sales', data= aux1)
plt.subplot(1,3,2)
sns.boxplot(x= 'state_holiday', y = 'sales', data= aux1)
plt.subplot(1,3,3)
sns.boxplot(x= 'assortment', y = 'sales', data= aux1)
fig.tight_layout()
# -
# # 2.0. FEATURE ENGINEERING
df2 = df1.copy()
# ## 2.1. Mapa Mental de Hipóteses
Image('img/MindMapHypothesis.png')
# ## 2.2. Criação de Hipóteses
# ### 2.2.1. Hipóteses Lojas
# **1.** Lojas com maior número de funcionárioa deveria vender mais.
#
# **2.** Lojas com maior capacidade de estoque deveriam vender mais.
#
# **3.** Lojas com maior porte deveriam vender mais.
#
# **4.** Lojas com maior sortimentos deveriam vender mais.
#
# **5.** Lojas com competidores mais próximos deveriam vender menos.
#
# **6.** Lojas com competidores à mais tempo deveriam vender mais.
# ### 2.2.2. Hipóteses Produto
# **1.** Lojas que investem mais em marketing deveriam vender mais.
#
# **2.** Lojas com maior exposição de produtos deveriam vender mais.
#
# **3.** Lojas com produtos com preço menor deveriam vender mais.
#
# **4.** Lojas com promoções mais agressivas (com descontos maiores), deveriam vender mais.
#
# **5.** Lojas com promoções mais ativas por mais tempo deveriam vender mais.
#
# **6.** Lojas com mais dias de promoção deveriam vender mais.
#
# **7.** Lojas com mais promoções consecutivas deveriam vender mais.
# ### 2.2.3 Hipóteses Tempo
# **1.** Lojas abertas durante o feriado de Natal deveriam vender mais.
#
# **2.** Lojas deveriam vender mais ao longo dos anos.
#
# **3.** Lojas deveriam vender mais no segundo semestre de ano.
#
# **4.** Lojas deveriam vender mais depois do dia 10 de cada mês.
#
# **5.** Lojas deveriam vender menos aos finais de semana.
#
# **6.** Lojas deveriam vender menos durante os feriados escolares.
# ## 2.2.4 Lista Final de hipóteses priorizadas
# **1.** Lojas com maior sortimentos deveriam vender mais.
#
# **2.** Lojas com competidores mais próximos deveriam vender menos.
#
# **3.** Lojas com competidores à mais tempo deveriam vender mais.
# **4.** Lojas com promoções mais ativas por mais tempo deveriam vender mais.
#
# **5.** Lojas com mais dias de promoção deveriam vender mais.
#
# **6.** Lojas com mais promoções consecutivas deveriam vender mais.
# **7.** Lojas abertas durante o feriado de Natal deveriam vender mais.
#
# **8.** Lojas deveriam vender mais ao longo dos anos.
#
# **9.** Lojas deveriam vender mais no segundo semestre de ano.
#
# **10.** Lojas deveriam vender mais depois do dia 10 de cada mês.
#
# **11.** Lojas deveriam vender menos aos finais de semana.
#
# **12.** Lojas deveriam vender menos durante os feriados escolares.
#
# ## 2.3. Feature Engineering
# +
# year
df2['year'] = df2['date'].dt.year
# month
df2['month'] = df2['date'].dt.month
# day
df2['day'] = df2['date'].dt.day
# week of year
df2['week_of_year'] = df2['date'].dt.weekofyear #isocalendar().week --> vai ser assim nas próximas versões
#year week
df2['year_week'] = df2['date'].dt.strftime('%Y-%W')
# competition since
df2['competition_since'] = df2.apply(lambda x: datetime.datetime( year= x['competition_open_since_year'], month = x['competition_open_since_month'], day = 1), axis = 1)
df2['competition_time_month'] = ((df2['date'] - df2['competition_since'])/30).apply(lambda x: x.days).astype('int64')
# promo since
df2['promo_since'] = df2['promo2_since_year'].astype(str) + '-' + df2['promo2_since_week'].astype(str)
df2['promo_since'] = df2['promo_since'].apply(lambda x: datetime.datetime.strptime(x + '-1', '%Y-%W-%w') - datetime.timedelta(days=7))
df2['promo_time_week'] = ((df2['date']-df2['promo_since'])/7).apply(lambda x: x.days).astype('int64')
#assortment
df2['assortment'] = df2['assortment'].apply(lambda x: 'basic' if x == 'a' else 'extra' if x == 'b' else 'extended')
# state holiday
df2['state_holiday'] = df2['state_holiday'].apply(lambda x: 'public holiday' if x == 'a' else 'easter holiday' if x == 'b' else 'christmas' if x == 'c' else 'regular day' )
# -
# # 3.0 FILTRAGEM DE VARIÁVEL
df3 = df2.copy()
df3.head()
# ## 3.1. Filtragem das linhas
df3= df3[(df3['open'] != 0) & (df3['sales']> 0)]
# ## 3.2 FIltragem de colunas
clos_drop = ['customers', 'open', 'promo_interval', 'month_map']
df3 = df3.drop(clos_drop, axis =1 )
| m03_v01_sobre_sales_prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="l2HdqNTcHh5J" outputId="79cb41fa-fda8-4576-fe85-3ff75522157a"
from google.colab import drive
drive.mount('/content/drive', force_remount= True)
# + colab={"base_uri": "https://localhost:8080/"} id="xzh1zggPgKP_" outputId="14c1ed0c-589f-4e42-9f5d-80fc8d5bfd4a"
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
# You might not have tqdm, which gives you nice progress bars
# !pip install tqdm
from tqdm.notebook import tqdm
import os
import copy
import cv2
# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
print("Using the GPU!")
else:
print("WARNING: Could not find GPU! Using CPU only")
# + colab={"base_uri": "https://localhost:8080/", "height": 387} id="GuobbBZkgeYE" outputId="ca0d097a-539a-4855-c9b7-910708fa801a"
# # #!/usr/bin/env python3
# # Download the 56 zip files in Images_png in batches
# import urllib.request
# # URLs for the zip files
# links = [
# 'https://nihcc.box.com/shared/static/vfk49d74nhbxq3nqjg0900w5nvkorp5c.gz',
# 'https://nihcc.box.com/shared/static/i28rlmbvmfjbl8p2n3ril0pptcmcu9d1.gz',
# 'https://nihcc.box.com/shared/static/f1t00wrtdk94satdfb9olcolqx20z2jp.gz',
# 'https://nihcc.box.com/shared/static/0aowwzs5lhjrceb3qp67ahp0rd1l1etg.gz',
# 'https://nihcc.box.com/shared/static/v5e3goj22zr6h8tzualxfsqlqaygfbsn.gz',
# 'https://nihcc.box.com/shared/static/asi7ikud9jwnkrnkj99jnpfkjdes7l6l.gz',
# 'https://nihcc.box.com/shared/static/jn1b4mw4n6lnh74ovmcjb8y48h8xj07n.gz', #Pneumonia
# 'https://nihcc.box.com/shared/static/tvpxmn7qyrgl0w8wfh9kqfjskv6nmm1j.gz', #Pneumonothorax
# 'https://nihcc.box.com/shared/static/upyy3ml7qdumlgk2rfcvlb9k6gvqq2pj.gz',
# 'https://nihcc.box.com/shared/static/l6nilvfa9cg3s28tqv1qc1olm3gnz54p.gz',
# 'https://nihcc.box.com/shared/static/hhq8fkdgvcari67vfhs7ppg2w6ni4jze.gz',
# 'https://nihcc.box.com/shared/static/ioqwiy20ihqwyr8pf4c24eazhh281pbu.gz'
# ]
# for idx, link in enumerate(links):
# fn = f' images_{idx + 1:03d}.tar.gz'
# print('downloading'+fn+'...')
# urllib.request.urlretrieve(link, '/content/drive/MyDrive/Pneumonia/' + fn) # download the zip file
# print("Download complete. Please check the checksums")
# + colab={"base_uri": "https://localhost:8080/"} id="cQ8VXGcIcKCV" outputId="f1f28958-d383-43cf-ddd0-c7a73bb7e6ed"
# %cd '/content/drive/MyDrive/Pneumonia/'
# + id="qz46bHg2l0ug"
import os
# #!ls
# #!mkdir pneumonia_images
# #!unzip images_001.tar.gz -d pneumonia_images
for i in range(12):
os.system(f'tar -xvf " images_{i+1:03d}.tar.gz"') #-C 'pneumonia_images'
# + id="8v8sSH2siUid"
!
# + id="ErGt2qmDmTbm"
import torch
from torch.utils.data import Dataset
from PIL import Image
import os
class ChestXrayDataSet(Dataset):
def __init__(self, data_dir, image_list_file, transform=None):
"""
Args:
data_dir: path to image directory.
image_list_file: path to the file containing images
with corresponding labels.
transform: optional transform to be applied on a sample.
"""
image_names = []
labels = []
with open(image_list_file, "r") as f:
for line in f:
items = line.split()
image_name= items[0]
label = items[1:]
label = [int(i) for i in label]
image_name = os.path.join(data_dir, image_name)
image_names.append(image_name)
labels.append(label)
self.image_names = image_names
self.labels = labels
self.transform = transform
def __getitem__(self, index):
"""
Args:
index: the index of item
Returns:
image and its labels
"""
image_name = self.image_names[index]
image = Image.open(image_name).convert('RGB')
label = self.labels[index]
if self.transform is not None:
image = self.transform(image)
return image, torch.FloatTensor(label)
def __len__(self):
return len(self.image_names)
# + id="KJHDQhUWgGPU"
def initialize_model(model_name, num_classes, resume_from = None):
# Initialize these variables which will be set in this if statement. Each of these
# variables is model specific.
# The model (nn.Module) to return
model_ft = None
# The input image is expected to be (input_size, input_size)
input_size = 0
# You may NOT use pretrained models!!
use_pretrained = False
# By default, all parameters will be trained (useful when you're starting from scratch)
# Within this function you can set .requires_grad = False for various parameters, if you
# don't want to learn them
if model_name == "resnet":
""" Resnet18
"""
models.resnet50
model_ft = models.resnet18(pretrained=use_pretrained)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "alexnet":
""" Alexnet
"""
model_ft = models.alexnet(pretrained=use_pretrained)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 224
elif model_name == "vgg":
""" VGG11_bn
"""
model_ft = models.vgg11_bn(pretrained=use_pretrained)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 224
elif model_name == "squeezenet":
""" Squeezenet
"""
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
model_ft.num_classes = num_classes
input_size = 224
elif model_name == "densenet":
""" Densenet
"""
model_ft = models.densenet121(pretrained=use_pretrained)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
input_size = 224
else:
raise Exception("Invalid model name!")
if resume_from is not None:
print("Loading weights from %s" % resume_from)
model_ft.load_state_dict(torch.load(resume_from))
return model_ft, input_size
# + id="o_0m0jYzgLmc"
def get_dataloaders(input_size, batch_size, shuffle = True):
# How to transform the image when you are loading them.
# you'll likely want to mess with the transforms on the training set.
# For now, we resize/crop the image to the correct input size for our network,
# then convert it to a [C,H,W] tensor, then normalize it to values with a given mean/stdev. These normalization constants
# are derived from aggregating lots of data and happen to produce better results.
data_transforms = {
'train': transforms.Compose([
transforms.Resize(input_size),
transforms.CenterCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(20),
transforms.RandomAffine(degrees = 0, translate = (0.05, 0.05)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(input_size),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize(input_size),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
}
# Create training and validation datasets
image_datasets = {x: ChestXrayDataSet("images", x + "_list.txt", data_transforms[x]) for x in data_transforms.keys()}
print(sum(len(ds) for ds in image_datasets.values()))
# Create training and validation dataloaders
# Never shuffle the test set
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=False if x != 'train' else shuffle, num_workers=4) for x in data_transforms.keys()}
return dataloaders_dict
# + id="K7XwMGk6e4OG"
def train_model(model, dataloaders, criterion, optimizer, save_dir = None, save_all_epochs=False, num_epochs=25):
'''
model: The NN to train
dataloaders: A dictionary containing at least the keys
'train','val' that maps to Pytorch data loaders for the dataset
criterion: The Loss function
optimizer: The algorithm to update weights
(Variations on gradient descent)
num_epochs: How many epochs to train for
save_dir: Where to save the best model weights that are found,
as they are found. Will save to save_dir/weights_best.pt
Using None will not write anything to disk
save_all_epochs: Whether to save weights for ALL epochs, not just the best
validation error epoch. Will save to save_dir/weights_e{#}.pt
'''
since = time.time()
val_acc_history = []
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
# TQDM has nice progress bars
for inputs, labels in tqdm(dataloaders[phase]):
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
# Get model outputs and calculate loss
outputs = model(inputs)
loss = criterion(outputs, labels)
# torch.max outputs the maximum value, and its index
# Since the input is batched, we take the max along axis 1
# (the meaningful outputs)
_, preds = torch.max(outputs, 1)
# backprop + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if phase == 'val':
val_acc_history.append(epoch_acc)
if save_all_epochs:
torch.save(model.state_dict(), os.path.join(save_dir, f'weights_{epoch}.pt'))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# save and load best model weights
torch.save(best_model_wts, os.path.join(save_dir, 'weights_best.pt'))
model.load_state_dict(best_model_wts)
return model, val_acc_history
# + id="Ej9IvQMqgQ9Q"
def make_optimizer(model):
# Get all the parameters
params_to_update = model.parameters()
print("Params to learn:")
for name, param in model.named_parameters():
if param.requires_grad == True:
print("\t",name)
# Use SGD
optimizer = optim.Adam(params_to_update, lr=0.0003)
return optimizer
def get_loss():
# Create an instance of the loss function
criterion = nn.BCEWithLogitsLoss()
return criterion
# + id="KVX-58oaha5f"
# Models to choose from [resnet, alexnet, vgg, squeezenet, densenet]
# You can add your own, or modify these however you wish!
model_name = "resnet"
# Number of classes in the dataset
# Miniplaces has 100
num_classes = 14
# Batch size for training (change depending on how much memory you have)
# You should use a power of 2.
batch_size = 128
# Shuffle the input data?
shuffle_datasets = True
# Number of epochs to train for
num_epochs = 1
### IO
# Path to a model file to use to start weights at
resume_from = None
# Directory to save weights to
save_dir = f"weights_{model_name}"
os.makedirs(save_dir, exist_ok=True)
# Save weights for all epochs, not just the best one
save_all_epochs = False
# + colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["0307b4450370457aab3b8e69ef8ae3c8", "3d39f652286b44eb82b3bbcef0851145", "5e1dc12f59cb4084b3caa9a635a46212", "d4c9c3caf09749feb619f3a69c0eeb8a", "1ca3067cf8e64e65afa2468a19171828", "a76aaed880624ce997d9b0416a0693ba", "9c632bf575474e63b9ebfaaed1c1c771", "0871d745bb7646f190ef2590d14e74dd"]} id="xdXLPFT1e1A2" outputId="f54f1eac-e0e5-4bc8-810d-cfdb3c3d7313"
# Initialize the model for this run
model, input_size = initialize_model(model_name = model_name, num_classes = num_classes, resume_from = resume_from)
dataloaders = get_dataloaders(input_size, batch_size, shuffle_datasets)
criterion = get_loss()
# Move the model to the gpu if needed
model = model.to(device)
optimizer = make_optimizer(model)
# Train the model!
trained_model, validation_history = train_model(model=model, dataloaders=dataloaders, criterion=criterion, optimizer=optimizer,
save_dir=save_dir, save_all_epochs=save_all_epochs, num_epochs=num_epochs)
# + colab={"base_uri": "https://localhost:8080/"} id="V6yJq6fFhj3j" outputId="4a71e57c-e49f-4398-b135-362a3e29e734"
# !ls -1 images | wc -l
# + id="THlXbtegije9"
| Copy_of_Pneumonia_CV.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: kaggle
# language: python
# name: kaggle
# ---
import os
import pickle
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
from sklearn.preprocessing import MultiLabelBinarizer
from Scripts.CreateTrainingBatches import CreateTrainingBatches
# +
with open(os.path.join('Data','data_X_y.p'), 'rb') as handle:
data_X_y = pickle.load(handle)
with open(os.path.join('Data','training_params.p'), 'rb') as handle:
training_params = pickle.load(handle)
# -
from Scripts.Word2VecUtilities import Word2VecUtilities
w2v_model= Word2VecUtilities.load_word2vector_model(os.path.join('Data','w2v_model.h5'))
X_train, X_valid, X_test = [data_X_y['X_train'],data_X_y['X_valid'], data_X_y['X_test']]
y_train, y_valid, y_test = [data_X_y['y_train'],data_X_y['y_valid'], data_X_y['y_test']]
def return_actual_text(x, rev_vocab_dict):
actual_text = " ".join([rev_vocab_dict[word_id] for word_id in x if rev_vocab_dict[word_id]!='my_dummy'])
return actual_text
rev_vocab_dict = training_params['rev_vocab_dict']
return_actual_text(X_train[0], rev_vocab_dict), y_train[0]
vocab_size = training_params['vocab_size']
mlb = MultiLabelBinarizer()
X_train = mlb.fit_transform(X_train)
X_valid = mlb.transform(X_valid)
create_training_batches_object = CreateTrainingBatches(X_train, y_train, X_valid, y_valid)
def print_metrics(np_prob, np_y):
neg_accuracy = np.mean((np_prob<0.5)[(np_y==0)])
pos_accuracy = np.mean((np_prob>0.5)[(np_y==1)])
accuracy = np.mean((pos_accuracy, neg_accuracy))
print('Negative accuracy',neg_accuracy)
print('Positive accuracy',pos_accuracy)
print('Accuracy', accuracy)
return accuracy
# Given an input $x$, the model should be able to map it to $\hat{y}$
# \begin{align}
# \hat{y} & = \sigma(w^Tx+b) \\
# \end{align}
# The paramters of the model are the weights $w$ and the bias $b$
# +
tf.reset_default_graph()
learning_rate = 0.01
doc_vectors = tf.placeholder(dtype=tf.float32,shape=[None, vocab_size], name='doc_vectors')
weights = tf.Variable(tf.random_uniform([doc_vectors.get_shape().as_list()[1], 1], minval=-1.0, maxval=1.0), name='weights')
bias = tf.zeros(1, name='bias')
logits = tf.matmul(doc_vectors, weights) + bias
prob = tf.nn.sigmoid(logits, name='prob')
# -
# The loss is an estimate the difference between the true label $y$ and the predicted label $\hat{y}$
# \begin{align}
# \ L(\hat{y},y) & = - [y log(\hat{y}) + (1 - y) log(1 - \hat{y})] \\
# \end{align}
y = tf.placeholder(tf.float32, [None, 1], name='y')
losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits, name='x_entropy')
loss = tf.reduce_mean(losses, name='loss')
# 
#
# Update the Parameter to minimze the loss
# Here $\alpha$ is the learning rate and $\frac{\partial L}{\partial w}$ is the partial derivative of Loss $L$ with respect to weight $w$
# \begin{align}
# \ w = w - \alpha \frac{\partial L}{\partial w}\\
# \ b = b - \alpha \frac{\partial L}{\partial b}\\
# \end{align}
with tf.name_scope('train'):
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss, name='train_op')
file_writer = tf.summary.FileWriter('tf_logs/logistic_regression', tf.get_default_graph())
init = tf.global_variables_initializer()
saver = tf.train.Saver()
sess = tf.InteractiveSession()
init.run()
highest_validation_accuracy = 0.5
for i in range(500):
x_train_samples, y_train_samples = create_training_batches_object.create_training_data()
_, np_prob, np_y, np_loss = sess.run([training_op, prob, y, loss],
feed_dict={doc_vectors: x_train_samples, y: y_train_samples})
if i%100==0:
print('Epoch', i, 'Loss',np_loss)
x_valid_samples, y_valid_samples = create_training_batches_object.create_validation_data()
np_prob, np_y, np_loss = sess.run([prob, y, loss],
feed_dict={doc_vectors: x_valid_samples, y: y_valid_samples})
validation_accuracy = print_metrics(np_prob, np_y)
if validation_accuracy > highest_validation_accuracy:
print('Saved model with highest accuracy')
saver.save(sess, os.path.join('Models', 'tf_models','model.ckpt'))
highest_validation_accuracy = validation_accuracy
print('-----------------------------')
# +
tf.reset_default_graph()
learning_rate = 0.01
doc_vectors = tf.placeholder(dtype=tf.float32,shape=[None, vocab_size], name='doc_vectors')
layer_one_output = fully_connected(doc_vectors, 100, activation_fn=tf.nn.relu)
logits = fully_connected(layer_one_output,1, activation_fn=None)
prob = tf.nn.sigmoid(logits, name='prob')
# -
y = tf.placeholder(tf.float32, [None, 1], name='y')
losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits, name='x_entropy')
loss = tf.reduce_mean(losses, name='loss')
with tf.name_scope('train'):
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss, name='train_op')
init = tf.global_variables_initializer()
saver = tf.train.Saver()
sess = tf.InteractiveSession()
init.run()
highest_validation_accuracy = 0.5
for i in range(500):
x_train_samples, y_train_samples = create_training_batches_object.create_training_data()
_, np_prob, np_y, np_loss = sess.run([training_op, prob, y, loss],
feed_dict={doc_vectors: x_train_samples, y: y_train_samples})
if i%100==0:
print('Epoch', i, 'Loss',np_loss)
x_valid_samples, y_valid_samples = create_training_batches_object.create_validation_data()
np_prob, np_y, np_loss = sess.run([prob, y, loss],
feed_dict={doc_vectors: x_valid_samples, y: y_valid_samples})
validation_accuracy = print_metrics(np_prob, np_y)
if validation_accuracy > highest_validation_accuracy:
print('Saved model with highest accuracy')
saver.save(sess, os.path.join('Models', 'tf_models','model.ckpt'))
highest_validation_accuracy = validation_accuracy
print('-----------------------------')
X_train, X_valid, X_test = [data_X_y['X_train'],data_X_y['X_valid'], data_X_y['X_test']]
y_train, y_valid, y_test = [data_X_y['y_train'],data_X_y['y_valid'], data_X_y['y_test']]
create_training_batches_object = CreateTrainingBatches(X_train, y_train, X_valid, y_valid)
# +
embedding_matrix = training_params['embedding_matrix']
tf.reset_default_graph()
X = tf.placeholder(dtype=tf.int32,shape=[None, 300], name='X')
y = tf.placeholder(tf.float32, [None, 1], name='y')
tf_keep_prob = tf.placeholder(tf.float32, name='tf_keep_prob')
tf_embedding_matrix = tf.Variable(initial_value=embedding_matrix, trainable=False,
dtype=tf.float32, name='tf_embedding_matrix')
X_embeddings = tf.nn.embedding_lookup(tf_embedding_matrix, X, name='X_embeddings')
# -
n_neurons = 50
learning_rate = 0.01
# +
with tf.variable_scope('RNN', initializer=tf.contrib.layers.xavier_initializer()):
rnn_cell = tf.contrib.rnn.GRUCell(num_units=n_neurons)
outputs, states = tf.nn.dynamic_rnn(rnn_cell, X_embeddings, dtype=tf.float32)
doc_vectors = tf.concat(states, 1, name='conc_outputs')
logits = fully_connected(doc_vectors, 1, activation_fn=None)
prob = tf.nn.sigmoid(logits, name='prob')
x_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits, name='x_entropy')
loss = tf.reduce_mean(x_entropy, name='loss')
with tf.name_scope('train'):
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss, name='train_op')
# -
init = tf.global_variables_initializer()
saver = tf.train.Saver()
sess = tf.InteractiveSession()
init.run()
highest_validation_accuracy = 0.5
for i in range(500):
x_train_samples, y_train_samples = create_training_batches_object.create_training_data()
_, np_prob, np_y, np_loss = sess.run([training_op, prob, y, loss],
feed_dict={X: x_train_samples,
y: y_train_samples,
tf_keep_prob: 0.8})
if i%100==0:
print('Epoch', i, 'Loss',np_loss)
x_valid_samples, y_valid_samples = create_training_batches_object.create_validation_data()
np_prob, np_y, np_loss = sess.run([prob, y, loss],
feed_dict={X: x_valid_samples,
y: y_valid_samples,
tf_keep_prob: 1})
validation_accuracy = print_metrics(np_prob, np_y)
if validation_accuracy > highest_validation_accuracy:
print('Saved model with highest accuracy')
saver.save(sess, os.path.join('Models', 'tf_models','model.ckpt'))
highest_validation_accuracy = validation_accuracy
print('-----------------------------')
| AllModels.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Plotting Fun With Matlotlib (and some Seaborn assistance)
# > Numerous times I've found myself in a situation where data is not yet in a datawarehouse or available in a visualization tool (such as Looker), but charts have been expected for scheduled reports
#
# - toc: false
# - branch: master
# - badges: true
# - categories: [viz, jupyter]
# - hide: true
# - search_exclude: false
# Numerous times I've found myself in a situation where data is not yet in a datawarehouse or available in a visualization tool (such as Looker), but nonetheless charts were expected for scheduled reports. This is a gallery of some of the charts I've made. It's important to note that [<NAME>'s](https://twitter.com/chris1610) post on [Effectively Using Matplotlib](https://pbpython.com/effective-matplotlib.html) was instrumental to finally grokking how to interact with that charting library.
# +
# %matplotlib inline
import numpy as np
from numpy.random import randn
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
sns.set_context("talk")
np.random.seed(12345)
# -
# ## Create Some Fake Data
# Most of the charts I deal with in Marketing Analytics / Business Operations have been based on timeseries data. In prepping for this post, I found simulating timeseries, especially the patterns I see frequently in marketing analytics, not as straightforward. Luckily a few different people on Twitter were helpful, especially this suggestion from [<NAME>](https://twitter.com/shaharkadmiel):
# > twitter: https://twitter.com/shaharkadmiel/status/1257598824056569856
# +
# set the time range
time = pd.date_range(
start = '2018-01-01',
end = '2019-12-31',
freq = 'D'
)
# leverage sine waves to create daily data with weekly patterns and a trend that shows growth over time
amp = 1000
freq = 52 / 365
data = amp * np.sin(freq * 2 * np.pi * np.arange(time.size))
noise = 2 * np.random.rand(time.size) - 1
data += 0.2 * data.ptp() * noise
trend = np.power(np.arange(time.size),1.25)
data += trend
data += 50000
data = np.around(data,decimals=0).astype(int)
# -
def generate_series(time,amp,freq,power,vertical_shift):
data = amp * np.sin(freq * 2 * np.pi * np.arange(time.size))
noise = 2 * np.random.rand(time.size) - 1
data += 0.2 * data.ptp() * noise
trend = np.power(np.arange(time.size),power)
data += trend
data += vertical_shift
data = np.around(data,decimals=0).astype(int)
return data
date_range = pd.date_range(start = '2018-01-01', end = '2019-12-31',freq = 'D')
# +
web_traffic = generate_series(date_range,10000,(52 / 365),1.45,50000)
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(date_range,web_traffic)
# +
acct_create = generate_series(date_range,1000,(52 / 365),0.95,1000)
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(date_range,acct_create)
# -
traffic = pd.DataFrame(web_traffic,index=time,columns=['website_traffic'])
traffic.index.name = 'date'
accts = pd.DataFrame(acct_create,index=time,columns=['accounts_created'])
accts.index.name = 'date'
df = pd.concat([traffic,accts],axis=1)
df = df.reset_index()
df.head()
df.groupby(pd.Grouper(key='date',freq='MS')).sum().plot(subplots=True, layout=(1,2), figsize=(14, 6), sharey=False, ylim = 0);
df.groupby(pd.Grouper(key='date',freq='MS')).sum().assign(signup_rate = lambda x: x['accounts_created'] / x['website_traffic']).loc[:,'signup_rate'].plot()
| _notebooks/2020-05-05-plotting-with-matplotlib.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hierarchical Drift-Diffusion Model
#
# Same approach as in:
#
# <NAME>., <NAME>., & <NAME>. (2010). Visual fixations and the computation and comparison of value in simple choice. Nature neuroscience, 13(10), 1292-1298.
#
# <NAME>., <NAME>., <NAME>., & <NAME>. (2014). Eye tracking and pupillometry are indicators of dissociable latent decision processes. Journal of Experimental Psychology: General, 143(4), 1476.
#
| HDDM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab_type="code"
# !pip install autokeras
# !pip install git+https://github.com/keras-team/keras-tuner.git@1.0.2rc2
# + [markdown] colab_type="text"
# ## A Simple Example
# The first step is to prepare your data. Here we use the [IMDB
# dataset](https://keras.io/datasets/#imdb-movie-reviews-sentiment-classification) as
# an example.
#
# + colab_type="code"
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import imdb
from sklearn.datasets import load_files
dataset = tf.keras.utils.get_file(
fname="aclImdb.tar.gz",
origin="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz",
extract=True,
)
# set path to dataset
IMDB_DATADIR = os.path.join(os.path.dirname(dataset), 'aclImdb')
classes = ['pos', 'neg']
train_data = load_files(os.path.join(IMDB_DATADIR, 'train'), shuffle=True, categories=classes)
test_data = load_files(os.path.join(IMDB_DATADIR, 'test'), shuffle=False, categories=classes)
x_train = np.array(train_data.data)
y_train = np.array(train_data.target)
x_test = np.array(test_data.data)
y_test = np.array(test_data.target)
print(x_train.shape) # (25000,)
print(y_train.shape) # (25000, 1)
print(x_train[0][:50]) # this film was just brilliant casting
# + [markdown] colab_type="text"
# The second step is to run the [TextClassifier](/text_classifier).
# As a quick demo, we set epochs to 2.
# You can also leave the epochs unspecified for an adaptive number of epochs.
#
# + colab_type="code"
import autokeras as ak
# Initialize the text classifier.
clf = ak.TextClassifier(
overwrite=True,
max_trials=1) # It only tries 1 model as a quick demo.
# Feed the text classifier with training data.
clf.fit(x_train, y_train, epochs=2)
# Predict with the best model.
predicted_y = clf.predict(x_test)
# Evaluate the best model with testing data.
print(clf.evaluate(x_test, y_test))
# + [markdown] colab_type="text"
# ## Validation Data
# By default, AutoKeras use the last 20% of training data as validation data.
# As shown in the example below, you can use `validation_split` to specify the percentage.
#
# + colab_type="code"
clf.fit(x_train,
y_train,
# Split the training data and use the last 15% as validation data.
validation_split=0.15)
# + [markdown] colab_type="text"
# You can also use your own validation set
# instead of splitting it from the training data with `validation_data`.
#
# + colab_type="code"
split = 5000
x_val = x_train[split:]
y_val = y_train[split:]
x_train = x_train[:split]
y_train = y_train[:split]
clf.fit(x_train,
y_train,
epochs=2,
# Use your own validation set.
validation_data=(x_val, y_val))
# + [markdown] colab_type="text"
# ## Customized Search Space
# For advanced users, you may customize your search space by using
# [AutoModel](/auto_model/#automodel-class) instead of
# [TextClassifier](/text_classifier). You can configure the
# [TextBlock](/block/#textblock-class) for some high-level configurations, e.g., `vectorizer`
# for the type of text vectorization method to use. You can use 'sequence', which uses
# [TextToInteSequence](/block/#texttointsequence-class) to convert the words to
# integers and use [Embedding](/block/#embedding-class) for embedding the
# integer sequences, or you can use 'ngram', which uses
# [TextToNgramVector](/block/#texttongramvector-class) to vectorize the
# sentences. You can also do not specify these arguments, which would leave the
# different choices to be tuned automatically. See the following example for detail.
#
# + colab_type="code"
import autokeras as ak
input_node = ak.TextInput()
output_node = ak.TextBlock(block_type='ngram')(input_node)
output_node = ak.ClassificationHead()(output_node)
clf = ak.AutoModel(
inputs=input_node,
outputs=output_node,
overwrite=True,
max_trials=1)
clf.fit(x_train, y_train, epochs=2)
# + [markdown] colab_type="text"
# The usage of [AutoModel](/auto_model/#automodel-class) is similar to the
# [functional API](https://www.tensorflow.org/guide/keras/functional) of Keras.
# Basically, you are building a graph, whose edges are blocks and the nodes are intermediate outputs of blocks.
# To add an edge from `input_node` to `output_node` with
# `output_node = ak.[some_block]([block_args])(input_node)`.
#
# You can even also use more fine grained blocks to customize the search space even
# further. See the following example.
#
# + colab_type="code"
import autokeras as ak
input_node = ak.TextInput()
output_node = ak.TextToIntSequence()(input_node)
output_node = ak.Embedding()(output_node)
# Use separable Conv layers in Keras.
output_node = ak.ConvBlock(separable=True)(output_node)
output_node = ak.ClassificationHead()(output_node)
clf = ak.AutoModel(
inputs=input_node,
outputs=output_node,
overwrite=True,
max_trials=1)
clf.fit(x_train, y_train, epochs=2)
# + [markdown] colab_type="text"
# ## Data Format
# The AutoKeras TextClassifier is quite flexible for the data format.
#
# For the text, the input data should be one-dimensional
# For the classification labels, AutoKeras accepts both plain labels, i.e. strings or
# integers, and one-hot encoded encoded labels, i.e. vectors of 0s and 1s.
#
# We also support using [tf.data.Dataset](
# https://www.tensorflow.org/api_docs/python/tf/data/Dataset?version=stable) format for
# the training data.
#
# + colab_type="code"
train_set = tf.data.Dataset.from_tensor_slices(((x_train, ), (y_train, ))).batch(32)
test_set = tf.data.Dataset.from_tensor_slices(((x_test, ), (y_test, ))).batch(32)
clf = ak.TextClassifier(
overwrite=True,
max_trials=2)
# Feed the tensorflow Dataset to the classifier.
clf.fit(train_set, epochs=2)
# Predict with the best model.
predicted_y = clf.predict(test_set)
# Evaluate the best model with testing data.
print(clf.evaluate(test_set))
# + [markdown] colab_type="text"
# ## Reference
# [TextClassifier](/text_classifier),
# [AutoModel](/auto_model/#automodel-class),
# [TextBlock](/block/#textblock-class),
# [TextToInteSequence](/block/#texttointsequence-class),
# [Embedding](/block/#embedding-class),
# [TextToNgramVector](/block/#texttongramvector-class),
# [ConvBlock](/block/#convblock-class),
# [TextInput](/node/#textinput-class),
# [ClassificationHead](/block/#classificationhead-class).
#
| docs/ipynb/text_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SageMath 7.0
# language: ''
# name: sagemath
# ---
# # Orthogonale Diagonalisierung
#
# - Gegeben: Matrix $A \in M( n \times n)$
# - Gesucht: Diagonalisierende Matrix $Q$ sodass $Q^T A Q = D$ wobei $D$ die Eigenwerte von A in der Hauptdiagonale enthält
# input:
A = matrix([
[1, -1, 2],
[-1, 1, 2],
[2, 2, -2]
])
#
V = []
EW = []
i = 0
for x in A.eigenvectors_left():
for y in x[1]:
EW.append(x[0])
show(LatexExpr(r"\lambda_{} = ".format(i)), x[0])
V.append(vector([val for val in y]))
show(LatexExpr(r"v_{} = ".format(i)), y)
i += 1
# optional input: EW, V if chosen in different order
#EW = []
V = [
vector([-1, -1, 2]),
vector([2,0,1]),
vector([-1,1,0]),
]
# Orthonormalisieren mit Gram-Schmidt Verfahren
#
# $$ w_1 = \frac{1}{\| v_1 \|} v_1 $$
# $$ u_i = v_i - \sum_{k=1}^{i-1} \langle v_i, w_k \rangle w_k $$
# $$ w_i = \frac{1}{\| u_i \|} u_i $$
W = []
U = []
W.append((1/(V[0].norm())) * V[0])
show(LatexExpr("v_0 ="), V[0])
show(LatexExpr("w_0 ="), W[0])
print
for i, v in enumerate(V[1:]):
show(LatexExpr("v_{} =".format(i+1)), v)
s = sum([(v.dot_product(w) * w) for w in W])
u = v - s
show(LatexExpr("u_{} =".format(i+1)), u)
U.append(u)
w = (1/u.norm()) * u
show(LatexExpr("w_{} =".format(i+1)), w)
W.append(w)
print
Q = matrix(W).T
show(Q)
D = Q.T * A *Q
show(D)
assert D.ncols() == D.nrows(), "not a n x n matrix!"
for i in range(D.ncols()):
assert D[i,i] == EW[i], "D[{1},{1}] != EW[{1}]".format(i)
| nrla_orthogonale_diagonalisierung.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
people = {
'first': ['Corey', 'Jane', 'John'],
'last': ['Schafer', 'Doe', 'Doe'],
'email': ['<EMAIL>', '<EMAIL>', '<EMAIL>']
}
import pandas as pd
df = pd.DataFrame(people)
df
df['first'] + ' ' + df['last']
df['full_name'] = df['first'] + ' ' + df['last']
df
df.drop(columns=['first', 'last'], inplace=True)
df
df['full_name'].str.split(' ', expand=True)
df[['first', 'last']] = df['full_name'].str.split(' ', expand=True)
df
df.append({'first': 'Tony'}, ignore_index=True)
people = {
'first': ['Tony', 'Steve'],
'last': ['Stark', 'Rogers'],
'email': ['<EMAIL>', '<EMAIL>']
}
df2 = pd.DataFrame(people)
df2
df.append(df2, ignore_index=True, sort=False)
df = df.append(df2, ignore_index=True, sort=False)
df
df.drop(index=4)
filt = df['last'] == 'Doe'
df.drop(index=df[filt].index)
| Python/Pandas/06-Add-Remove-Rows-Columns/Snippets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import codecs, json
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
year = '2018'
data_dir = '../data/' + year + '/'
file_name = 'chicago-crimes-' + year
# +
# %%time
# set input data file path
parquet_data_dir = data_dir + 'crimes-' + year + '.snappy.parq'
print('Loading crime data from: {}'.format(parquet_data_dir))
# load crimes parquet data into dask df
crimes = dd.read_parquet(parquet_data_dir, index='Date')
# load all data into memory
crimes = crimes.persist()
print('Crime data loaded into memory.')
# log records count and data frame stats
print('Crime data stats:')
print('---------------------------------------')
print('{:,} total records in {} partitions'.format(len(crimes), crimes.npartitions))
print('DataFrame size: {:,}'.format(crimes.size.compute()))
# -
crimes
# get crime geo data for mapping, drop na
crime_geo = crimes[['PrimaryType',
'Block',
'Description',
'LocationDescription',
'CommunityArea',
'Arrest',
'Domestic',
'Latitude',
'Longitude',
'Ward']].dropna()
print('All Crimes:', len(crime_geo))
# converts crimes data to json
def to_json_file(file_path, data):
json.dump(data,
codecs.open(file_path, 'w', encoding='utf-8'),
separators=(',', ':'), sort_keys=False, indent=0)
# %%time
# output crimes data in raw json to see how large it gets
geo_data_columns = ['Latitude', 'Longitude', 'Block', 'LocationDescription',
'PrimaryType', 'Description', 'Arrest', 'Domestic', 'Ward']
to_json_file(data_dir + file_name + '.json',
crime_geo[geo_data_columns].compute().values.tolist())
# %%time
# dish it out in snappy parquet for comparison
crime_geo.to_parquet(data_dir + file_name + '.parquet', compression='SNAPPY')
# create pandas dataframe for conversion to arrow
crime_geo_df = crime_geo[geo_data_columns].compute()
crime_geo_df.info()
# convert pandas data frame to arrow table
crime_geo_table = pa.Table.from_pandas(crime_geo_df)
crime_geo_table
# %%time
# write arrow table to a single parquet file, just to test it
pq.write_table(crime_geo_table, data_dir + file_name + '.parq')
# %%time
# read parquet file created with arrow with dask for compatibility check
ddf = dd.read_parquet(data_dir + file_name + '.parq', index='Date')
print('{:,} total records in {} partitions'.format(len(ddf), ddf.npartitions))
print('DataFrame size: {:,}'.format(ddf.size.compute()))
ddf
# %%time
# read parquet file with arrow
table = pq.read_table(data_dir + file_name + '.parq')
table
# %%time
# convert it to pandas data frame
df = table.to_pandas()
df.info()
# %%time
# write arrow stream to disk
writer = pa.RecordBatchFileWriter(data_dir + file_name + '.arrow', table.schema)
writer.write_table(table)
writer.close()
# %%time
# read back binary arrow file from disk
reader = pa.RecordBatchFileReader(data_dir + file_name + '.arrow')
read_table = reader.read_all()
read_table
| notebooks/chicago-crimes-geo-data-export.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Seal detection API demo
#
# In areas with limited tree cover and large wildlife, aerial wildlife population surveys are often more efficient than surveys using camera traps or "boots on the ground". The downside of aerial surveys is that even in areas dense with wildlife, the *vast* majority of images are empty, which makes population counting laborious. In this notebook, we demonstrate the ability of a machine learning model – trained in PyTorch and hosted as an API via the [AI for Earth API Platform](https://github.com/Microsoft/AIforEarth-API-Development/blob/master/Quickstart.md) – to detect large wildlife in an arctic environment.
#
# The API underlying this demo is *not* a production or public API, rather it demonstrates ongoing work and the potential for automating incredibly tedious manual annotation.
#
# Images courtesy of NOAA. The entire training data set is available on [lila.science](http://lila.science/datasets/arcticseals), an collaboration between AI for Earth and several external partners to make training data available for conservation problems.
#
# Contact <EMAIL> with questions.
# ### Imports and constants
# +
import requests
from io import BytesIO
import random
random.seed(0)
import glob
import PIL.Image
import numpy as np
import os
from IPython.core.display import Image, display
api_address = 'http://sealsapi.southcentralus.cloudapp.azure.com:8088/'
test_image_folder = '/data/seals_blob1_test/'
with open('./seals_api_key.txt', 'rt') as fi:
api_key = fi.read().strip()
print(requests.get(api_address).text)
# %autosave 0
# -
# ### Retrieve and display IR image
image_path = random.choice(glob.glob(os.path.join(test_image_folder, '*_THERM-16BIT-N.PNG')))
upload_image = PIL.Image.fromarray((np.array(PIL.Image.open(image_path))/256).astype(np.uint8))
upload_image
# ### Call API and display results
upload_bytes = BytesIO()
upload_image.save(upload_bytes, 'PNG')
upload_bytes.seek(0)
r = requests.post(api_address + 'v1/detection_api/detect?api_key='+api_key,
data=upload_bytes,
headers={'Content-Type':'image/png'})
assert r.status_code == 200, r.text
result_image = PIL.Image.open(BytesIO(r.content))
result_image
# ### Retrieve and display color image (for information only)
image_basename = os.path.split(image_path[:image_path.index('_THERM-16BIT-N.PNG')])[1]
color_image_path = os.path.join(test_image_folder, image_basename + '_COLOR-8-BIT.JPG')
color_image = PIL.Image.open(color_image_path)
color_image.thumbnail((700,700), PIL.Image.ANTIALIAS)
color_image
# ### Full-size color image
# It takes a while after the cell computation has completed until the color image is loaded.
#
# Click with the middle mouse button on the image to scroll around in any direction.
# Displaying the color image takes a while
display(Image(color_image_path, unconfined=True))
| src/SealDetectionAPI/seal-api-demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.9 64-bit (''azureml_py36'': conda)'
# name: python3
# ---
# # rinna GPT-2 モデルの Fine Tuning
# HuggingFace の transformers ライブラリを用いて [rinna gpt-2](https://huggingface.co/rinna/japanese-gpt2-medium) モデルの Fine Tuning を行います。
# ## 事前準備
# 必要なライブラリをインポートします。
# +
from azureml.core import Experiment, Workspace, Environment
from azureml.core.compute import ComputeTarget
from azureml.core import ScriptRunConfig
from azureml.core.runconfig import PyTorchConfiguration
import os
os.makedirs('src', exist_ok=True)
# -
# Azure ML Workspace へ接続します。
ws = Workspace.from_config()
# 実験 Experiment の名称
model_experiment = Experiment(ws, name="rinna-gpt2-exp")
# PyTorch の設定
distr_config = PyTorchConfiguration()
# 環境 Environment の設定
hf_ort_env = Environment.from_dockerfile(name='rinna-docker-env', dockerfile='Dockerfile')
#hf_ort_env.build(ws)
# スクリプトの引数の定義
script_params = ['--num_train_epochs', 1,'--output_dir', './outputs', '--model_name_or_path', 'rinna/japanese-gpt2-medium']
# ## モデル学習
# `ScriptRunConfig` を用いて Azure Machine Learning Compute Cluster 上で学習ができるように設定します。
model_run_config = ScriptRunConfig(
source_directory='./src',
script='./train.py',
arguments=script_params,
compute_target=ComputeTarget(workspace=ws, name="gpuinstance"),
environment=hf_ort_env,
distributed_job_config=distr_config)
# モデル学習の開始
run = model_experiment.submit(model_run_config)
run
# +
#run.wait_for_completion(show_output=False)
| examples/rinna-gpt2-train/python-sdk/run_trainer_remote.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 ('base')
# language: python
# name: python3
# ---
# # Fenômeno de Runge
#
# ### Prof. <NAME> <<EMAIL>>
#
# Maio 2022
#
import numpy as np
import sys
import matplotlib.pyplot as plt
def lagrange(x, y, x_samples):
y_samples = np.zeros_like(x_samples)
for xi, yi in zip(x, y):
y_samples += yi*np.prod([(x_samples - xj)/(xi - xj)
for xj in x if xi!=xj], axis=0)
return y_samples
# +
#Runge function
f = lambda x: 1/(1 + 25*x**2)
n= 20
#Dados
x = np.linspace(-1, 1, n)
# Locais onde quero calcular (interpolar) -
# vou colocar mais pontos para ver como se conporta entre os pontos dados
x_full = np.linspace(-1, 1, 10000)
y_full = lagrange(x, f(x), x_full)
plt.plot(x, f(x), "k")
plt.plot(x_full, y_full)
plt.xlabel("x")
plt.ylabel("y")
plt.show()
print("n=", n, " Max Error=", np.max(np.abs(y_full-f(x_full))))
# +
# https://notebook.community/tclaudioe/Scientific-Computing/SC1/07_Polynomial_Interpolation_1D
def Chebyshev(xmin,xmax,n=5):
# This function calculates the n Chebyshev points and plots or returns them depending on ax
ns = np.arange(1,n+1)
x = np.cos((2*ns-1)*np.pi/(2*n))
y = np.sin((2*ns-1)*np.pi/(2*n))
plt.figure(figsize=(10,5))
plt.ylim(-0.1,1.1)
plt.xlim(-1.1,1.1)
plt.plot(np.cos(np.linspace(0,np.pi)),np.sin(np.linspace(0,np.pi)),'k-')
plt.plot([-2,2],[0,0],'k-')
plt.plot([0,0],[-1,2],'k-')
for i in range(len(y)):
plt.plot([x[i],x[i]],[0,y[i]],'r-')
plt.plot([0,x[i]],[0,y[i]],'r-')
plt.plot(x,[0]*len(x),'bo',label='Chebyshev points')
plt.plot(x,y,'ro')
plt.xlabel('$x$')
plt.title('n = '+str(n))
plt.grid(True)
plt.legend(loc='best')
plt.show()
def Chebyshev_points(xmin,xmax,n):
ns = np.arange(1,n+1)
x = np.cos((2*ns-1)*np.pi/(2*n))
return (xmin+xmax)/2 + (xmax-xmin)*x/2
# +
from ipywidgets import interact, fixed, IntSlider
interact(Chebyshev,xmin=fixed(-1),xmax=fixed(1),n=(2,50))
# +
#Runge function
f = lambda x: 1/(1 + 25*x**2)
n= 20
#Dados
x = Chebyshev_points(-1,1,n)
# Locais onde quero calcular (interpolar) -
# vou colocar mais pontos para ver como se conporta entre os pontos dados
x_full = np.linspace(-1, 1, 10000)
y_full = lagrange(x, f(x), x_full)
plt.plot(x, f(x), "k")
plt.plot(x_full, y_full)
plt.xlabel("x")
plt.ylabel("y")
plt.show()
print("n=", n, " Max Error=", np.max(np.abs(y_full-f(x_full))))
# -
| runge.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# name: python3
# ---
# # Data Analysis
# This module will explore the stage of data analysis using the lens of a bias-aware methodology. We will make use of Jupyter notebooks to aid our exploratory data analysis, in order to understand how social, cognitive, and statistical biases interact and affect downstream stages in the research and innovation lifecycle. However, you do not need to have any experience with Python or Jupyter Notebooks to follow along.
#
# Exploratory data analysis is an important stage for hypothesis generation or uncovering possible limitations of the dataset that can arise from missing data, in turn identifying the need for any subsequent augmentation of the dataset to deal with possible class imbalances. However, there are also risks that stem from cognitive biases (e.g. confirmation bias) that can create cascading effects that effect downstream tasks (e.g. model reporting).
#
# We will look at the following stages of data analysis:
#
# 1. Importing Data
# 2. Describing the Data
# 3. Analysing the Data
# 4. Querying the Data
# 5. Visualising the Data
from matplotlib import rcParams, cycler
import matplotlib.pyplot as plt
import numpy as np
plt.ion()
# +
# Fixing random state for reproducibility
np.random.seed(19680801)
N = 10
data = [np.logspace(0, 1, 100) + np.random.randn(100) + ii for ii in range(N)]
data = np.array(data).T
cmap = plt.cm.coolwarm
rcParams['axes.prop_cycle'] = cycler(color=cmap(np.linspace(0, 1, N)))
from matplotlib.lines import Line2D
custom_lines = [Line2D([0], [0], color=cmap(0.), lw=4),
Line2D([0], [0], color=cmap(.5), lw=4),
Line2D([0], [0], color=cmap(1.), lw=4)]
fig, ax = plt.subplots(figsize=(10, 5))
lines = ax.plot(data)
ax.legend(custom_lines, ['Cold', 'Medium', 'Hot']);
| guidebooks/_build/html/_sources/rri/chapter3/data_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Problem 1: Visualize a static map (8 points)
#
# Create a static map using the skills you leared in [lesson 5: static maps](https://automating-gis-processes.github.io/site/notebooks/L5/static_maps.html). The map should contain multiple layers of data (at least two different data sets), and you should pay attention to the classification and visualization (colors, edges etc.) when creating the map. Write your code into a notebook file (`.ipynb`) or a python script file (`.py`) and store the output map(s) in `.png` format under the `docs` folder.
#
# **Topic of the map:**
# - You can either use the data sets we have already used during this course (eg. the Travel Time Matrix, or population grid), or you can select some other data set of your interest (for example, statistics in postal code areas).
# - Feel free to adapt examples provided in this course! You can do further calculations based on the datasets or use the analysis outputs that we have done earlier in the course (for example, the dominance areas or travel times for shopping centers).
#
# **Criteria:**
# - The map should have multiple layers on it
# - The map should portay some kind of classification and/or an analysis output (not just the raw data).
# - Consider [good cartographic practices](https://www.gislounge.com/ten-things-to-consider-when-making-a-map/) (map extent, zoom level, color choises, legend, credit data sources etc.)
# - The map should demonstrate skills learned during [lesson 5: static maps](https://automating-gis-processes.github.io/site/notebooks/L5/static_maps.html) and throughout this course.
#
# **Output:**
# - Remember to commit the code and input data (or at least a link to input data)
# - Save your map(s) as png image in the **`docs`** folder
# +
#load packages
import geopandas as gpd
from pyproj import CRS
import matplotlib.pyplot as plt
# %matplotlib inline
# Filepaths
grid_fp = "data/dataE5/TravelTimes_to_5975375_RailwayStation.shp"
roads_fp = "data/dataE5/roads.shp"
metro_fp = "data/dataE5/metro.shp"
addresses_fp = "data/dataE5/addresses.shp"
# Read files
grid = gpd.read_file(grid_fp)
roads = gpd.read_file(roads_fp)
metro = gpd.read_file(metro_fp)
addresses = gpd.read_file(addresses_fp)
# -
#print CRS:s
print(roads.crs)
print(metro.crs)
print(grid.crs)
print(addresses.crs)
# Reproject to ETRS89 based on the grid crs:
roads = roads.to_crs(crs=grid.crs)
metro = metro.to_crs(crs=grid.crs)
addresses = addresses.to_crs(crs=grid.crs)
#are they in the same coordinate system?
roads.crs == metro.crs == grid.crs == addresses.crs
roads.head()
metro.head()
addresses.head()
grid.head()
# +
# replace the no-data values
import numpy as np
len(grid)
cols = ["walk_d","geometry"]
grid[cols] = grid[cols].replace({'-1':np.nan, -1:np.nan})
#drop rows containing no-data values
grid.dropna(inplace=True)
# +
# For better control of the figure and axes, use the plt.subplots function before plotting the layers
# https://matplotlib.org/3.1.0/gallery/subplots_axes_and_figures/subplots_demo.html#a-figure-with-just-one-subplot
# Control figure size in here
fig, ax = plt.subplots(figsize=(12,8))
ax.set_title('Walking distance in Helsinki Metropolitan Area (m)')
# Visualize the travel times into 9 classes using "Quantiles" classification scheme
grid.plot(ax=ax, column="walk_d", linewidth=0.03, cmap="OrRd", scheme="equal_interval", k=8, alpha=0.8, legend=True)
# Add metro on top of the previous map
addresses.plot(ax=ax, marker='*', color="red", markersize=100, legend=True)
# Add roads on top of the grid
# (use ax parameter to define the map on top of which the second items are plotted)
roads.plot(ax=ax, color="black", linewidth=1.5, legend=True)
# Add metro on top of the previous map
metro.plot(ax=ax, color="yellow", linewidth=1.5, alpha=0.3, legend=True)
# Remove the empty white-space around the axes
plt.tight_layout()
# Save the figure as png file with resolution of 300 dpi
outfp = "docs/map1.png"
plt.savefig(outfp, dpi=500)
| Exercise-5-Problem-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
path = "../src/data/"
a507 = pd.read_csv(path + 'A507.csv')
a501 = pd.read_csv(path + 'A501.csv')
a502 = pd.read_csv(path + 'A502.csv')
hall = pd.read_csv(path + 'Hall.csv')
def boxplot(csv):
ap = list(csv.ap_id.unique())
rssi = [csv.loc[csv.ap_id == i].rssi for i in ap]
plt.figure(figsize=(14,8))
plt.boxplot(rssi);
plt.xticks(range(1,len(ap)+1),ap, rotation=60);
plt.title(str(csv.room_id[0]))
plt.show();
def histograms(csv1, csv2):
fig, ax = plt.subplots(1,2, figsize=(12,5))
ax[0].hist(csv1.rssi)
ax[0].set_title(str(csv1.room_id[0]))
ax[1].hist(csv2.rssi)
ax[1].set_title(str(csv2.room_id[0]))
ax[0].grid()
ax[1].grid()
plt.tight_layout()
plt.show();
def grouped_histogram(csv1, csv2, csv3, csv4):
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(12,5))
ax1.hist(csv1.rssi, color= 'blue', alpha=0.7, label=str(csv1.room_id[0]))
ax1.hist(csv2.rssi, color= 'red', alpha=0.7, label=str(csv2.room_id[0]))
ax1.set_title(f'{str(csv1.room_id[0])} and {str(csv2.room_id[0])}')
ax1.grid()
ax1.legend(loc='best')
ax2.hist(csv3.rssi, color= 'blue', alpha=0.7, label=str(csv3.room_id[0]))
ax2.hist(csv4.rssi, color= 'red', alpha=0.7, label=str(csv4.room_id[0]))
ax2.set_title(f'{str(csv3.room_id[0])} and {str(csv4.room_id[0])}')
ax2.grid()
ax2.legend(loc='best')
plt.tight_layout()
plt.show();
for csv in [a507, a501, a502, hall]:
print(f'{str(csv.room_id[0])} - There is {len(csv.ap_id.unique())} Access points.')
histograms(a507,hall)
histograms(a501, a502)
grouped_histogram(a507, hall, a501, hall)
grouped_histogram(a502, hall, a501, a502)
grouped_histogram(a501, a507, a502, a507)
a507.loc[a507.rssi > -40].ap_id.unique()
boxplot(a507)
boxplot(hall)
boxplot(a501)
boxplot(a502)
| notebook/data_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # NetCDF handling
#
# NetCDF formatted files are much faster to read and write for large datasets. In order to make the most of this, the `ScmRun` objects have the ability to read and write netCDF files.
# +
# NBVAL_IGNORE_OUTPUT
import traceback
from glob import glob
import numpy as np
import seaborn as sns
import xarray as xr
import pandas as pd
from scmdata.run import ScmRun, run_append
from scmdata.netcdf import nc_to_run
# -
pd.set_option("display.width", 120)
pd.set_option("display.max_columns", 15)
pd.set_option("display.max_colwidth", 80)
pd.set_option("display.min_rows", 20)
# ## Helper bits and piecs
OUT_FNAME = "/tmp/out_runs.nc"
def new_timeseries(
n=100,
count=1,
model="example",
scenario="ssp119",
variable="Surface Temperature",
unit="K",
region="World",
cls=ScmRun,
**kwargs,
):
data = np.random.rand(n, count) * np.arange(n)[:, np.newaxis]
index = 2000 + np.arange(n)
return cls(
data,
columns={
"model": model,
"scenario": scenario,
"variable": variable,
"region": region,
"unit": unit,
**kwargs,
},
index=index,
)
# Let's create an `ScmRun` which contains a few variables and a number of runs. Such a dataframe would be used to store the results from an ensemble of simple climate model runs.
# NBVAL_IGNORE_OUTPUT
runs = run_append(
[
new_timeseries(
count=3,
variable=[
"Surface Temperature",
"Atmospheric Concentrations|CO2",
"Radiative Forcing",
],
unit=["K", "ppm", "W/m^2"],
run_id=run_id,
)
for run_id in range(10)
]
)
runs.metadata["source"] = "fake data"
runs
# ## Reading/Writing to NetCDF4
# ### Basics
#
# Writing the runs to disk is easy. The one trick is that each variable and dimension combination must have unique metadata. If they do not, you will receive an error message like the below.
try:
runs.to_nc(OUT_FNAME, dimensions=["region"])
except ValueError:
traceback.print_exc(limit=0, chain=False)
# In our dataset, there is more than one "run_id" per variable hence we need to use a different dimension, `run_id`, because this will result in each variable's remaining metadata being unique.
runs.to_nc(OUT_FNAME, dimensions=["run_id"])
# The output netCDF file can be read using the `from_nc` method, `nc_to_run` function or directly using `xarray`.
# NBVAL_IGNORE_OUTPUT
runs_netcdf = ScmRun.from_nc(OUT_FNAME)
runs_netcdf
# NBVAL_IGNORE_OUTPUT
nc_to_run(ScmRun, OUT_FNAME)
# NBVAL_IGNORE_OUTPUT
xr.load_dataset(OUT_FNAME)
# The additional `metadata` in `runs` is also serialized and deserialized in the netCDF files. The `metadata` of the loaded `ScmRun` will also contain some additional fields about the file creation.
# NBVAL_IGNORE_OUTPUT
assert "source" in runs_netcdf.metadata
runs_netcdf.metadata
# ### Splitting your data
#
# Sometimes if you have complicated ensemble runs it might be more efficient to split the data into smaller subsets.
#
# In the below example we iterate over scenarios to produce a netCDF file per scenario.
# +
large_run = []
# 10 runs for each scenario
for sce in ["ssp119", "ssp370", "ssp585"]:
large_run.extend(
[
new_timeseries(
count=3,
scenario=sce,
variable=[
"Surface Temperature",
"Atmospheric Concentrations|CO2",
"Radiative Forcing",
],
unit=["K", "ppm", "W/m^2"],
paraset_id=paraset_id,
)
for paraset_id in range(10)
]
)
large_run = run_append(large_run)
# also set a run_id (often we'd have paraset_id and run_id,
# one which keeps track of the parameter set we've run and
# the other which keeps track of the run in a large ensemble)
large_run["run_id"] = large_run.meta.index.values
large_run
# -
# Data for each scenario can then be loaded independently instead of having to load all the data and then filtering
for sce_run in large_run.groupby("scenario"):
sce = sce_run.get_unique_meta("scenario", True)
sce_run.to_nc(
"/tmp/out-{}-sparse.nc".format(sce),
dimensions=["run_id", "paraset_id"],
)
# NBVAL_IGNORE_OUTPUT
ScmRun.from_nc("/tmp/out-ssp585-sparse.nc").filter("Surface Temperature").line_plot()
# For such a data set, since both `run_id` and `paraset_id` vary, both could be added as dimensions in the file.
#
# The one problem with this approach is that you get very sparse arrays because the data is written on a 100 x 30 x 90 (time points x paraset_id x run_id) grid but there's only 90 timeseries so you end up with 180 timeseries worth of nans (although this is a relatively small problem because the netCDF files use compression to minismise the impact of the extra nan values).
# NBVAL_IGNORE_OUTPUT
xr.load_dataset("/tmp/out-ssp585-sparse.nc")
# NBVAL_IGNORE_OUTPUT
# Load all scenarios
run_append([ScmRun.from_nc(fname) for fname in glob("/tmp/out-ssp*-sparse.nc")])
# An alternative to the sparse arrays is to specify the variables in the `extras` attribute. If possible, this adds the metadata to the netCDF file as an extra co-ordinate, which uses one of the dimensions as it's co-ordinate. If using one of the dimensions as a co-ordinate would not specify the metadata uniquely, we add the extra as an additional co-ordinate, which itself has co-ordinates of `_id`. This `_id` co-ordinate provides a unique mapping between the extra metadata and the timeseries.
for sce_run in large_run.groupby("scenario"):
sce = sce_run.get_unique_meta("scenario", True)
sce_run.to_nc(
"/tmp/out-{}-extras.nc".format(sce),
dimensions=["run_id"],
extras=["paraset_id"],
)
# `paraset_id` is uniquely defined by `run_id` so we don't end up with an extra `_id` co-ordinate.
# NBVAL_IGNORE_OUTPUT
xr.load_dataset("/tmp/out-ssp585-extras.nc")
# NBVAL_IGNORE_OUTPUT
ScmRun.from_nc("/tmp/out-ssp585-extras.nc").filter("Surface Temperature").line_plot()
# If we use dimensions and extra such that our extra co-ordinates are not uniquely defined by the regions, an `_id` dimension is automatically added to ensure we don't lose any information.
large_run.to_nc(
"/tmp/out-extras-sparse.nc",
dimensions=["scenario"],
extras=["paraset_id", "run_id"],
)
# NBVAL_IGNORE_OUTPUT
xr.load_dataset("/tmp/out-extras-sparse.nc")
# ### Multi-dimensional data
#
# **scmdata** can also handle having more than one dimension. This can be especially helpful if you have output from a number of models (IAMs), scenarios, regions and runs.
# +
multi_dimensional_run = []
for model in ["AIM", "GCAM", "MESSAGE", "REMIND"]:
for sce in ["ssp119", "ssp370", "ssp585"]:
for region in ["World", "R5LAM", "R5MAF", "R5ASIA", "R5OECD", "R5REF"]:
multi_dimensional_run.extend(
[
new_timeseries(
count=3,
model=model,
scenario=sce,
region=region,
variable=[
"Surface Temperature",
"Atmospheric Concentrations|CO2",
"Radiative Forcing",
],
unit=["K", "ppm", "W/m^2"],
paraset_id=paraset_id,
)
for paraset_id in range(10)
]
)
multi_dimensional_run = run_append(multi_dimensional_run)
multi_dimensional_run
# -
multi_dim_outfile = "/tmp/out-multi-dimensional.nc"
multi_dimensional_run.to_nc(
multi_dim_outfile,
dimensions=("region", "model", "scenario", "paraset_id"),
)
# NBVAL_IGNORE_OUTPUT
xr.load_dataset(multi_dim_outfile)
# +
# NBVAL_IGNORE_OUTPUT
multi_dim_loaded_co2_conc = ScmRun.from_nc(multi_dim_outfile).filter(
"Atmospheric Concentrations|CO2"
)
seaborn_df = multi_dim_loaded_co2_conc.long_data()
seaborn_df.head()
# -
# NBVAL_IGNORE_OUTPUT
sns.relplot(
data=seaborn_df,
x="time",
y="value",
units="paraset_id",
estimator=None,
hue="scenario",
style="model",
col="region",
col_wrap=3,
kind="line",
)
| notebooks/netcdf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import os
import numpy as np
import pandas as pd
import datetime as dt
# # Reflect Tables into SQLAlchemy ORM
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, inspect, func
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
# +
#find column names for tables
inspector = inspect(engine)
measurement_col = inspector.get_columns('measurement')
print("table: 'mesurement' info: ")
for c in measurement_col:
print(c['name'], c["type"])
print("------------------------------------------")
station_col = inspector.get_columns('station')
print("table: 'station' info: ")
for c in station_col:
print(c['name'], c["type"])
# -
# # Exploratory Climate Analysis
# +
# Design a query to retrieve the last 12 months of precipitation data and plot the results
data_prcp = session.query(Measurement.date,Measurement.prcp).\
filter(func.strftime("%Y-%m-%d", Measurement.date) >= '2016-08-23').\
order_by(Measurement.date).all()
# Load data to a dataframe
prcp_df = pd.DataFrame(data_prcp, columns=['Date', 'Precipitation'])
# Sort dataframe by date
prcp_df = prcp_df.sort_values(by='Date', ascending=True)
#precipitation_df = precipitation_df.dropna()
# For setting up the index
# precipitation_df.set_index('Date', inplace=True)
# Resize the plot
plt.figure(figsize=(10,5));
# Plot the precipitation
plt.bar(prcp_df["Date"], prcp_df["Precipitation"],
align='center', width=2);
# Add labels and Title
plt.xlabel('Date',fontweight="bold", fontsize=12);
plt.ylabel('Precipitation (in)',fontweight="bold", fontsize=12);
plt.title('Precipitation by Date' ,fontweight="bold", fontsize=12);
# Add Grids
plt.grid(b=bool, which='major', axis='y', color='grey', alpha=0.55);
# Add ticks
plt.xticks("", rotation=90, fontweight="bold", alpha=0.55);
plt.yticks(rotation=0, fontweight="bold", alpha=0.55, fontsize=12)
# Save Plot
plt.savefig('Images/Plot01_Precipitation.png',bbox_inches='tight')
#Show the Plot
plt.show()
# -
# Use Pandas to calcualte the summary statistics for the precipitation data
# precipitation_df.set_index('Date', inplace=True)
prcp_df.describe()
# Design a query to show how many stations are available in this dataset?
session.query(func.count(Station.id)).all()
# What are the most active stations? (i.e. what stations have the most rows)?
# List the stations and the counts in descending order.
session.query(Station.station, func.count(Station.id)).\
filter(Station.station == Measurement.station).\
group_by(Measurement.station).\
order_by(func.count(Station.station).desc()).all()
# Using the station id from the previous query, calculate the lowest temperature recorded,
# highest temperature recorded, and average temperature most active station?
session.query(Measurement.station, func.min(Measurement.tobs),
func.max(Measurement.tobs), func.avg(Measurement.tobs)).\
filter(Measurement.station == "USC00519281").\
group_by(Measurement.station).all()
# +
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
temp = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.station == "USC00519281").\
filter(func.strftime("%Y-%m-%d", Measurement.date) >= '2016-08-23').all()
# Temperature dataframe cleanning process
temp_df = pd.DataFrame(temp, columns=['Date', 'Temperature'])
temp_df = temp_df.sort_values(by='Date', ascending=True)
temp_df.set_index('Date', inplace=True)
# Resize Plot
plt.figure(figsize=[8,5]);
# Define Histogram paramaters
n, bins, patches = plt.hist(x=temp_df["Temperature"], bins=12,
color='royalblue',alpha=0.9);
# Labeling, tickers and grids
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Temperature (F)',fontweight="bold", alpha=0.8, fontsize=12)
plt.ylabel('Frequency',fontweight="bold", alpha=0.8, fontsize=12);
plt.xticks(fontsize=15, fontweight="bold", alpha=0.55);
plt.yticks(fontsize=15, fontweight="bold", alpha=0.55);
plt.title('Temperature Frequency Histogram \n for Station:"USC00519281"',
fontweight="bold", alpha=0.9, fontsize=12);
# Save Plot
plt.savefig('Images/Plot02_Temp_by_frequency.png')
#Show the Plot
plt.show()
# +
# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d'
# and return the minimum, average, and maximum temperatures for that range of dates
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
# function usage example
print(calc_temps('2012-02-28', '2012-03-05'))
# +
# Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax
# for your trip using the previous year's data for those same dates.
# Vacation Dates
initial_vac_date = "2016-12-01"
final_vac_date = "2016-12-16"
# Previous Year Dates
initial_prev_year_date = "2015-12-01"
final_prev_year_date = "2015-12-16"
# Max, min and average calculation
min_vac_temp = calc_temps(initial_prev_year_date, final_prev_year_date)[0][0]
avg_vac_temp = calc_temps(initial_prev_year_date, final_prev_year_date)[0][1]
max_vac_temp = calc_temps(initial_prev_year_date, final_prev_year_date)[0][2]
print(min_vac_temp, avg_vac_temp, max_vac_temp)
# +
# Plot the results from your previous query as a bar chart.
# Use "Trip Avg Temp" as your Title
# Use the average temperature for the y value
# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)
error_prev_year = max_vac_temp-min_vac_temp
# Resize Plot
plt.figure(figsize=[2,4]);
# Define Bar and Error paramaters
plt.bar(1, avg_vac_temp, yerr=error_prev_year, align='center', alpha=0.5,
ecolor='black', capsize=7)
# Labeling the plot
plt.ylabel('Temperature (F)',fontweight="bold", alpha=0.8, fontsize=12)
plt.xticks(fontsize=15, fontweight="bold", alpha=0.55)
plt.yticks(fontsize=15, fontweight="bold", alpha=0.55)
plt.title('Trip\n Average Temperature',
fontweight="bold", alpha=0.9, fontsize=12)
plt.grid(b=None, which='major', axis='x')
plt.tick_params(
axis='x',
which='both',
bottom=False,
top=False,
labelbottom=False)
# Save Plot
plt.savefig('Images/Plot03_Trip_Avg_Temp.png', bbox_inches='tight')
#Show the Plot
plt.show()
# +
# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.
# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation
session.query(Station.station, Station.name, Station.latitude, Station.longitude, Station.elevation).\
filter(Station.station == Measurement.station).\
filter(Measurement.date >= initial_prev_year_date).filter(Measurement.date <= final_vac_date).\
group_by(Measurement.station).\
order_by(Measurement.prcp.desc()).all()
# -
# ## Optional Challenge Assignment
# +
# Create a query that will calculate the daily normals
# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)
def daily_normals(date):
"""Daily Normals.
Args:
date (str): A date string in the format '%m-%d'
Returns:
A list of tuples containing the daily normals, tmin, tavg, and tmax
"""
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all()
daily_normals("01-01")
# -
| climate_starter_hw.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import datetime
from datetime import date
from pymongo import MongoClient
client = MongoClient()
db = client.HackerNews
hn_2018 = db.hn_2018
hn_2017 = db.hn_2017
hn_2016 = db.hn_2016
months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
years = ['2016', '2017', '2018']
collections = [hn_2016, hn_2017, hn_2018]
'''
timestamp 1451606400 means:
In your local time zone: Friday 1st January 2016 08:00:00 AM
UTC: Friday 1st January 2016 12:00:00 AM
'''
for year in years:
print("=== Year {} ===".format(year))
for i in range(len(months)):
if i == 12:
print(i)
print(months[i%12], date(int(year)+1,i%12,1), int((date(int(year) + 1,i%12,1)- date(1970,1,1)).total_seconds()))
else:
print(months[i], date(int(year),i+1,1), int((date(int(year),i+1,1)- date(1970,1,1)).total_seconds()))
year_count = 0
maus = []
for year in years:
print("=== Year {} ===".format(year))
mau = []
for i in range(len(months)):
# print(collections[year_count])
cursor = collections[year_count].find()
# print(cursor)
if i == 11:
start_time = int((date(int(year),12,1)- date(1970,1,1)).total_seconds())
end_time = int((date(int(year) + 1,1,1)- date(1970,1,1)).total_seconds())
# print(start_time, end_time)
temp = []
for j in cursor:
if int(j['time']) > start_time and int(j['time']) < end_time:
temp.append(j)
usr_profiles = set([i['by'] for i in temp])
mau.append(len(usr_profiles))
print("{} {} MAU: {}".format(months[12], date(int(year),12,1), len(usr_profiles)))
else:
start_time = int(((date(int(year), i+1, 1)) - date(1970,1,1)).total_seconds())
end_time = int(((date(int(year), i+2, 1)) - date(1970,1,1)).total_seconds())
temp = []
for j in cursor:
if int(j['time']) > start_time and int(j['time']) < end_time:
temp.append(j)
usr_profiles = set([i['by'] for i in temp])
mau.append(len(usr_profiles))
print("{} {} MAU: {}".format(months[i], date(int(year),i + 1,1), len(usr_profiles)))
year_count += 1
maus.append(mau)
maus
# # Plotting
from bokeh.io import show
from bokeh.io import output_file
from bokeh.plotting import figure
for i in range(len(maus)):
year = years[i]
count = maus[i]
output_file('mau_{}.html'.format(year))
p = figure(x_range=months, plot_height=400, plot_width=600,
title= "Monthly Active Users for Year {}".format(year))
p.xaxis.major_label_text_font_size = "10pt"
p.vbar(x=months, top=count, width=0.3)
p.xgrid.grid_line_color = None
p.y_range.start = 0
show(p)
# +
output_file('mau.html')
p = figure(x_range=months, plot_height=1000, plot_width=1600,
title= "Monthly Active Users")
p.xaxis.major_label_text_font_size = "14pt"
p.vbar(x=months, top=count, width=0.5)
p.xgrid.grid_line_color = None
p.y_range.start = 0
show(p)
# -
# # Others
# +
# cursor = hn_2018.find({'time':{'$gt' : "1517443200"}, 'time':{'$lt': "1519862400"}})
# +
# cursor = hn_2018.find()
# +
# temp = []
# for i in cursor:
# if int(i['time']) > 1517443200 and int(i['time']) < 1519862400:
# temp.append(i)
# +
# usr_profiles = set([i['by'] for i in temp])
# +
# len(usr_profiles), len(temp)
# -
# fix this
count = []
for i in range(1,12):
cursor = collections[0].find()
print("month: {}".format(i))
start_month = int((date(2016,i,1)- date(1970,1,1)).total_seconds())
end_month = int((date(2016,i+1,1)- date(1970,1,1)).total_seconds())
temp = []
for i in cursor:
if int(i['time']) > start_month and int(i['time']) < end_month:
temp.append(i)
usr_profiles = set([i['by'] for i in temp])
print("active users per month: {}".format(len(usr_profiles)))
count.append(len(usr_profiles))
| notebooks/MAU.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Import Python libraries
from typing import *
import os
import ibm_watson
import ibm_watson.natural_language_understanding_v1 as nlu
import ibm_cloud_sdk_core
import pandas as pd
import sys
# And of course we need the text_extensions_for_pandas library itself.
_PROJECT_ROOT = "../.."
try:
import text_extensions_for_pandas as tp
except ModuleNotFoundError as e:
# If we're running from within the project source tree and the parent Python
# environment doesn't have the text_extensions_for_pandas package, use the
# version in the local source tree.
if not os.getcwd().endswith("market"):
raise e
if _PROJECT_ROOT not in sys.path:
sys.path.insert(0, _PROJECT_ROOT)
import text_extensions_for_pandas as tp
if "IBM_API_KEY" not in os.environ:
raise ValueError("IBM_API_KEY environment variable not set. Please create "
"a free instance of IBM Watson Natural Language Understanding "
"(see https://www.ibm.com/cloud/watson-natural-language-understanding) "
"and set the IBM_API_KEY environment variable to your instance's "
"API key value.")
api_key = os.environ.get("IBM_API_KEY")
service_url = os.environ.get("IBM_SERVICE_URL")
natural_language_understanding = ibm_watson.NaturalLanguageUnderstandingV1(
version="2021-01-01",
authenticator=ibm_cloud_sdk_core.authenticators.IAMAuthenticator(api_key)
)
natural_language_understanding.set_service_url(service_url)
# Github notebook gists will be this wide: ------------------>
# Screenshots of this notebook should be this wide: ----------------------------->
# -
# # Market Intelligence with Pandas and IBM Watson
#
# In this article, we'll show how to perform an example market intelligence task using [Watson Natural Language Understanding](https://www.ibm.com/cloud/watson-natural-language-understanding?cm_mmc=open_source_technology) and our open source library [Text Extensions for Pandas](https://ibm.biz/text-extensions-for-pandas).
#
# *Market intelligence* is an important application of natural language processing. In this context, "market intelligence" means "finding useful facts about customers and competitors in news articles". This article focuses on a market intelligence task: **extracting the names of executives from corporate press releases**.
#
# Information about a company's leadership has many uses. You could use that information to identify points of contact for sales or partnership discussions. Or you could estimate how much attention a company is giving to different strategic areas. Some organizations even use this information for recruiting purposes.
#
# Press releases are a good place to find the names of executives, because these articles often feature quotes from company leaders. Here's an example quote from an [IBM press release](https://newsroom.ibm.com/2020-12-02-IBM-Named-a-Leader-in-the-2020-IDC-MarketScape-For-Worldwide-Advanced-Machine-Learning-Software-Platform) from December 2020:
#
# 
#
# This quote contains information about the name of an executive:
# 
#
# This snippet is an example of the general pattern that we will look for:
# * The article contains a quotation.
# * The person to whom the quotation is attributed is mentioned by name.
#
# The key challenge that we need to address is the many different forms that this pattern can take. Here are some examples of variations that we would like to capture:
#
# 
#
# We'll deal with this variability by using general-purpose semantic models. These models extract high-level facts from formal text. The text could express a given fact in many different ways, but all of those different forms produce the same output.
#
# Semantic models can save a lot of work. There's no need to label separate training data or write separate rules or for all of the variations of our target pattern. A small amount of code can capture all these variations at once.
#
# Let's get started!
# # Use IBM Watson to identify people quoted by name.
#
# IBM Watson Natural Language Understanding includes a model called `semantic_roles` that performs [Semantic Role Labeling](https://en.wikipedia.org/wiki/Semantic_role_labeling). You can think of Semantic Role Labeling as finding *subject-verb-object* triples:
# * The actions that occurred in the text (the verb),
# * Who performed each action (the subject), and
# * On whom the action was performed (the object).
#
# If take our example executive quote and feed it through the semantic_roles model, we get the following raw output:
# + tags=[]
response = natural_language_understanding.analyze(
text='''"By combining the power of AI with the flexibility and agility of \
hybrid cloud, our clients are driving innovation and digitizing their operations \
at a fast pace," said <NAME>, general manager, Data and AI, IBM.''',
return_analyzed_text=True,
features=nlu.Features(
semantic_roles=nlu.SemanticRolesOptions()
)).get_result()
response
# -
# That format is a bit hard to read. Let's use our open-source library, [Text Extensions for Pandas](https://ibm.biz/text-extensions-for-pandas), to convert it to a Pandas DataFrame:
# +
import text_extensions_for_pandas as tp
dfs = tp.io.watson.nlu.parse_response(response)
dfs["semantic_roles"]
# -
# Now we can see that the `semantic_roles` model has identified four subject-verb-object triples. Each row of this DataFrame contains one triple. In the first row, the verb is "to be", and in the last row, the verb is "to say".
#
# The last row is where things get interesting for us, because the verb "to say" indicates that *someone made a statement*. And that's exactly the high-level pattern we're looking for. Let's filter the DataFrame down to that row and look at it more closely.
dfs["semantic_roles"][dfs["semantic_roles"]["action.normalized"] == "say"]
# The subject in this subject-verb-object triple is "<NAME>, general manager, Data and AI, IBM", and the object is the quote from Mr. Hernandez.
#
# This model's output has captured the general action of "\[person\] says \[quotation\]". Different variations of that general pattern will produce the same output. If we move the attribution to the middle of the quote, we get the same result:
response = natural_language_understanding.analyze(
text='''"By combining the power of AI with the flexibility and agility of \
hybrid cloud,” said <NAME>, general manager, Data and AI, IBM, “our \
clients are driving innovation and digitizing their operations at a fast pace."''',
return_analyzed_text=True,
features=nlu.Features(semantic_roles=nlu.SemanticRolesOptions())).get_result()
dfs = tp.io.watson.nlu.parse_response(response)
dfs["semantic_roles"][dfs["semantic_roles"]["action.normalized"] == "say"]
# If we change the past-tense verb "said" to the present-tense "says", we get the same result again:
response = natural_language_understanding.analyze(
text='''"By combining the power of AI with the flexibility and agility of \
hybrid cloud, our clients are driving innovation and digitizing their operations \
at a fast pace," says <NAME>, general manager, Data and AI, IBM.''',
return_analyzed_text=True,
features=nlu.Features(semantic_roles=nlu.SemanticRolesOptions())).get_result()
dfs = tp.io.watson.nlu.parse_response(response)
dfs["semantic_roles"][dfs["semantic_roles"]["action.normalized"] == "say"]
# All the different variations that we talked about earlier will produce the same result. This model lets us capture them all with very little code. All we need to do is to run the model and filter the outputs down to the verb we're looking for.
#
# So far we've been looking at one paragraph. Let's rerun the same process on the entire press release.
#
# As before, we can run the document through Watson Natural Language Understanding's Python interface and tell Watson to run its semantic_roles model. Then we use Text Extensions for Pandas to convert the model results to a DataFrame:
# +
DOC_URL = "https://newsroom.ibm.com/2020-12-02-IBM-Named-a-Leader-in-the-2020-IDC-MarketScape-For-Worldwide-Advanced-Machine-Learning-Software-Platform"
# Make the request
response = natural_language_understanding.analyze(
url=DOC_URL, # NLU will fetch the URL for us.
return_analyzed_text=True,
features=nlu.Features(
semantic_roles=nlu.SemanticRolesOptions()
)).get_result()
# Convert the output of the `semantic_roles` model to a DataFrame
semantic_roles_df = tp.io.watson.nlu.parse_response(response)["semantic_roles"]
semantic_roles_df.head()
# -
# If we filter down to the subject-verb-object triples for the verb "to say", we can see that this document has quite a few examples of the "person says statement" pattern:
quotes_df = semantic_roles_df[semantic_roles_df["action.normalized"] == "say"]
quotes_df
# The DataFrame `quotes_df` contains all the instances of the "person says statement" pattern that the model has found. We want to filter this set down to cases where the subject (the person making the statement) is mentioned by name. We also want to extract that name.
#
# ## Identifying person names
# In this press release, all three instances of the "person says statement" pattern happen to have a name in the subject. But there will not always be a name. Consider this example sentence from [another IBM press release](https://newsroom.ibm.com/2021-04-08-IBM-Consumer-Study-Points-to-Potential-Recovery-of-Retail-and-Travel-Industries-as-Consumers-Receive-the-COVID-19-Vaccine):
#
# > 27 percent of Gen Z surveyed said they will increase outside \
# interaction, compared to 19 percent of Gen X surveyed and only 16 percent of \
# those surveyed over 55.
#
# Here, the subject for the verb "said" is "27 percent of Gen Z surveyed". That subject that does not include a person name.
# +
# Do not include this cell in the blog.
# Show that the `semantic_roles` model produces the output we described above.
response = natural_language_understanding.analyze(
text='''27 percent of Gen Z surveyed said they will increase outside \
interaction, compared to 19 percent of Gen X surveyed and only 16 percent of \
those surveyed over 55.''',
return_analyzed_text=True,
features=nlu.Features(semantic_roles=nlu.SemanticRolesOptions())).get_result()
# Convert the output of the `semantic_roles` model to a DataFrame
tp.io.watson.nlu.parse_response(response)["semantic_roles"]
# -
# How can we find the matches where the subject contains a person's name? Fortunately for us, Watson Natural Language Understanding has a model for exactly that task. The `entities` model in this Watson service finds named entity mentions. A named entity mention is a place where the document mentions an *entity* like a person or company by the entity's *name*.
#
# This model will find person names with high accuracy. The code below tells the Watson service to run the entities model and retrieve mentions. Then we convert the result to a DataFrame using Text Extensions for Pandas:
response = natural_language_understanding.analyze(
url=DOC_URL,
return_analyzed_text=True,
features=nlu.Features(
# Ask Watson to find mentions of named entities
entities=nlu.EntitiesOptions(mentions=True),
# Also divide the document into words. We'll use these in just a moment.
syntax=nlu.SyntaxOptions(tokens=nlu.SyntaxOptionsTokens()),
)).get_result()
entity_mentions_df = tp.io.watson.nlu.parse_response(response)["entity_mentions"]
entity_mentions_df.head()
# The `entities` model's output contains mentions of many types of entity. For this application, we need
# mentions of person names. Let's filter our DataFrame down to just those types of mentions:
person_mentions_df = entity_mentions_df[entity_mentions_df["type"] == "Person"]
person_mentions_df.tail(4)
# ## Tying it all together
#
# Now we have two pieces of information that we need to combine:
# * Instances of the "person said statement" pattern from the `semantic_roles` model
# * Mentions of person names from the `entities` model
#
# We need to align the "subject" part of the semantic role labeler's output with the person mentions. We can use the span manipulation facilities of Text Extensions for Pandas to do this.
#
# *Spans* are a common concept in natural language processing. A span represents a region of the document, usually as begin and end offsets and a reference to the document's text. Text Extensions for Pandas adds a special `SpanDtype` data type to Pandas DataFrames. With this data type, you can define a DataFrame with one or more columns of span data. For example, the column called "span" in the DataFrame above is of the `SpanDtype` data type. The first span in this column, `[1288, 1304): '<NAME>'`, shows that the name "<NAME>" occurs between locations 1288 and 1304 in the document.
#
# The output of the `semantic_roles` model doesn't contain location information. But that's ok, because it's easy to create your own spans. We just need to use some string matching to recover the missing locations:
# +
# Retrieve the full document text from the entity mentions output.
doc_text = entity_mentions_df["span"].array.document_text
# Filter down to just the rows and columns we're interested in
subjects_df = quotes_df[["subject.text"]].copy().reset_index(drop=True)
# Use String.index() to find where the strings in "subject.text" begin
subjects_df["begin"] = pd.Series(
[doc_text.index(s) for s in subjects_df["subject.text"]], dtype=int)
# Compute end offsets and wrap the <begin, end, text> triples in a SpanArray
subjects_df["end"] = subjects_df["begin"] + subjects_df["subject.text"].str.len()
subjects_df["span"] = tp.SpanArray(doc_text, subjects_df["begin"],
subjects_df["end"])
subjects_df = subjects_df.drop(columns=["begin", "end"])
subjects_df
# -
# Now we have a column of span data for the `semantic_roles` model's output, and we can align these spans with the spans of person mentions. Text Extensions for Pandas includes built-in span operations. One of these operations, `contain_join()`, takes two columns of span data and identifies all pairs of spans where the first span contains the second span. We can use this operation to find all the places where the span from the `semantic_roles` model contains a span from the output of the `entities` model:
execs_df = tp.spanner.contain_join(subjects_df["span"],
person_mentions_df["span"],
"subject", "person")
execs_df[["person"]]
# To recap: With a few lines of Python code, we've identified places in the article where the article quoted a person by name. For each of those quotations, we've identified the person name and its location in the document (the `person` column in the DataFrame above).
#
# Here's all the code we've just created, condensed down to a single Python function:
# +
# In the blog post, this will be a Github gist.
# See https://gist.github.com/frreiss/038ac63ef20eed323a5637f9ddb2de8d
import pandas as pd
import text_extensions_for_pandas as tp
import ibm_watson
import ibm_watson.natural_language_understanding_v1 as nlu
import ibm_cloud_sdk_core
def find_persons_quoted_by_name(doc_url, api_key, service_url) -> pd.DataFrame:
# Ask Watson Natural Language Understanding to run its "semantic_roles"
# and "entities" models.
natural_language_understanding = ibm_watson.NaturalLanguageUnderstandingV1(
version="2021-01-01",
authenticator=ibm_cloud_sdk_core.authenticators.IAMAuthenticator(api_key)
)
natural_language_understanding.set_service_url(service_url)
nlu_results = natural_language_understanding.analyze(
url=doc_url,
return_analyzed_text=True,
features=nlu.Features(
entities=nlu.EntitiesOptions(mentions=True),
semantic_roles=nlu.SemanticRolesOptions())).get_result()
# Convert the output of Watson Natural Language Understanding to DataFrames.
dataframes = tp.io.watson.nlu.parse_response(nlu_results)
entity_mentions_df = dataframes["entity_mentions"]
semantic_roles_df = dataframes["semantic_roles"]
# Extract mentions of person names
person_mentions_df = entity_mentions_df[entity_mentions_df["type"] == "Person"]
# Extract instances of subjects that made statements
quotes_df = semantic_roles_df[semantic_roles_df["action.normalized"] == "say"]
subjects_df = quotes_df[["subject.text"]].copy().reset_index(drop=True)
# Retrieve the full document text from the entity mentions output.
doc_text = entity_mentions_df["span"].array.document_text
# Filter down to just the rows and columns we're interested in
subjects_df = quotes_df[["subject.text"]].copy().reset_index(drop=True)
# Use String.index() to find where the strings in "subject.text" begin
subjects_df["begin"] = pd.Series(
[doc_text.index(s) for s in subjects_df["subject.text"]], dtype=int)
# Compute end offsets and wrap the <begin, end, text> triples in a SpanArray column
subjects_df["end"] = subjects_df["begin"] + subjects_df["subject.text"].str.len()
subjects_df["span"] = tp.SpanArray(doc_text, subjects_df["begin"], subjects_df["end"])
# Align subjects with person names
execs_df = tp.spanner.contain_join(subjects_df["span"],
person_mentions_df["span"],
"subject", "person")
# Add on the document URL.
execs_df["url"] = doc_url
return execs_df[["person", "url"]]
# +
# Don't include this cell in the blog post.
# Verify that the code above works
find_persons_quoted_by_name(DOC_URL, api_key, service_url)
# -
# This function, `find_persons_quoted_by_name()`, turns a press release into a list of executive names. Here's the output that we get if we pass a year's worth articles from the ["Announcements" section of ibm.com](https://newsroom.ibm.com/announcements) through it:
# +
# Don't include this cell in the blog post.
# Load press release URLs from a file
with open("ibm_press_releases.txt", "r") as f:
lines = [l.strip() for l in f.readlines()]
ibm_press_release_urls = [l for l in lines if len(l) > 0 and l[0] != "#"]
# -
executive_names = pd.concat([
find_persons_quoted_by_name(url, api_key, service_url)
for url in ibm_press_release_urls
])
executive_names
# Now we've turned 191 press releases into a DataFrame with 301 executive names.
# That's a lot of power packed into one screen's worth of code! To find out more about the advanced semantic models that let us do so much with so little code, check out Watson Natural Language Understanding [here](https://www.ibm.com/cloud/watson-natural-language-understanding?cm_mmc=open_source_technology)!
# +
# Alternate version of adding spans to subjecs: Use dictionary matching.
# This method is currently problematic because we don't have payloads
# for dictionary entries. We have to use exact string matching to map the
# original strings back to the dictionary matches.
# Create a dictionary from the strings in quotes_df["subject.text"].
tokenizer = tp.io.spacy.simple_tokenizer()
dictionary = tp.spanner.extract.create_dict(quotes_df["subject.text"], tokenizer)
# Match the dictionary against the document text.
doc_text = entity_mentions_df["span"].array.document_text
tokens = tp.io.spacy.make_tokens(doc_text, tokenizer)
matches_df = tp.spanner.extract_dict(tokens, dictionary, output_col_name="span")
matches_df["subject.text"] = matches_df["span"].array.covered_text # Join key
# Merge the dictionary matches back with the original strings.
subjects_df = quotes_df[["subject.text"]].merge(matches_df)
subjects_df
| tutorials/market/Market_Intelligence_Part1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import pandas as pd
from pathlib import Path
from sklearn.manifold import TSNE
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
# ## Data Preparation
path = Path("crypto_data.csv")
df = pd.read_csv(path)
df
df = df[df['IsTrading']==True]
#drop the IsTrading column from the dataframe.
df = df.drop(columns=['IsTrading'])
#Remove all rows that have at least one null value.
df = df.dropna(how='any',axis=0)
#TotalCoinsMined > 0
df = df[df['TotalCoinsMined']>0]
#delete the CoinName
df = df.drop(columns=['CoinName'])
df = df.drop(columns=['Unnamed: 0'])
df
# +
algorithms = {}
algorithmsList = df['Algorithm'].unique().tolist()
for i in range(len(algorithmsList)):
algorithms[algorithmsList[i]] = i
proofType = {}
proofTypeList = df['ProofType'].unique().tolist()
for i in range(len(proofTypeList)):
proofType[proofTypeList[i]] = i
df = df.replace(({'Algorithm':algorithms}))
df = df.replace(({'ProofType':proofType}))
df.dtypes
# -
# Standarize data with StandarScaler
scaler = StandardScaler()
scaled_data = scaler.fit_transform(df[['TotalCoinsMined', 'TotalCoinSupply']])
df1 = pd.DataFrame(scaled_data, columns=df.columns[2:])
df1['Algorithm']=df['Algorithm'].values
df1['ProofType']=df['ProofType'].values
df1
# ## Dimensionality Reduction
#PCA
pca = PCA(n_components=.99)
df_pca = pca.fit_transform(df1)
df_pca = pd.DataFrame(
data=df_pca, columns=["principal component 1", "principal component 2"]
)
df_pca.head()
pca.explained_variance_ratio_.sum()
# ## Cluster Analysis with k-Means
# +
#Create an elbow plot
inertia = []
k = list(range(1, 11))
# Looking for the best k
for i in k:
km = KMeans(n_clusters=i, random_state=1234)
km.fit(df1)
inertia.append(km.inertia_)
# Define a DataFrame to plot the Elbow Curve using hvPlot
elbow_data = {"k": k, "inertia": inertia}
df_elbow = pd.DataFrame(elbow_data)
plt.plot(df_elbow['k'], df_elbow['inertia'])
plt.xticks(range(1,11))
plt.xlabel('Number of clusters')
plt.ylabel('Inertia')
plt.show()
# -
df_pca = pd.DataFrame(
data=df_pca,
columns=["principal component 1", "principal component 2"],
)
df_pca.head()
# +
# Initialize the K-Means model
model = KMeans(n_clusters=2, random_state=1234)
# Fit the model
model.fit(df_pca)
# Predict clusters
predictions = model.predict(df_pca)
# Add the predicted class columns
df_pca["class"] = model.labels_
df_pca.head()
# +
df1
model.fit(df1)
# Predict clusters
predictions = model.predict(df1)
df2 = df1
# Add the predicted class columns
df2["class"] = model.labels_
df3 = df2.drop(['class'], axis=1)
labels = df2['class']
df3
# -
# TSNE
tsne = TSNE(learning_rate=35)
# Reduce dimensions
tsne_features = tsne.fit_transform(df3)
tsne_features.shape
# +
# Prepare to plot the dataset
# The first column of transformed features
df3['x'] = tsne_features[:,0]
# The second column of transformed features
df3['y'] = tsne_features[:,1]
# -
# Visualize the clusters
plt.scatter(df3['x'], df3['y'])
plt.show()
labels.value_counts()
# Visualize the clusters with color
plt.scatter(df3['x'], df3['y'], c=labels)
plt.show()
| classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.13 64-bit (''drlnd-cont-control'': conda)'
# name: python3
# ---
# # Continuous Control
#
# ---
#
# Congratulations for completing the second project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893) program! In this notebook, you will learn how to control an agent in a more challenging environment, where the goal is to train a creature with four arms to walk forward. **Note that this exercise is optional!**
#
# ### 1. Start the Environment
#
# We begin by importing the necessary packages. If the code cell below returns an error, please revisit the project instructions to double-check that you have installed [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Installation.md) and [NumPy](http://www.numpy.org/).
from unityagents import UnityEnvironment
import numpy as np
# Next, we will start the environment! **_Before running the code cell below_**, change the `file_name` parameter to match the location of the Unity environment that you downloaded.
#
# - **Mac**: `"path/to/Crawler.app"`
# - **Windows** (x86): `"path/to/Crawler_Windows_x86/Crawler.exe"`
# - **Windows** (x86_64): `"path/to/Crawler_Windows_x86_64/Crawler.exe"`
# - **Linux** (x86): `"path/to/Crawler_Linux/Crawler.x86"`
# - **Linux** (x86_64): `"path/to/Crawler_Linux/Crawler.x86_64"`
# - **Linux** (x86, headless): `"path/to/Crawler_Linux_NoVis/Crawler.x86"`
# - **Linux** (x86_64, headless): `"path/to/Crawler_Linux_NoVis/Crawler.x86_64"`
#
# For instance, if you are using a Mac, then you downloaded `Crawler.app`. If this file is in the same folder as the notebook, then the line below should appear as follows:
# ```
# env = UnityEnvironment(file_name="Crawler.app")
# ```
env = UnityEnvironment(file_name='./Crawler_Linux/Crawler.x86_64')
# Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# ### 2. Examine the State and Action Spaces
#
# Run the code cell below to print some information about the environment.
# +
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents
num_agents = len(env_info.agents)
print('Number of agents:', num_agents)
# size of each action
action_size = brain.vector_action_space_size
print('Size of each action:', action_size)
# examine the state space
states = env_info.vector_observations
state_size = states.shape[1]
print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))
print('The state for the first agent looks like:', states[0])
# -
# ### 3. Take Random Actions in the Environment
#
# In the next code cell, you will learn how to use the Python API to control the agent and receive feedback from the environment.
#
# Once this cell is executed, you will watch the agent's performance, if it selects an action at random with each time step. A window should pop up that allows you to observe the agent, as it moves through the environment.
#
# Of course, as part of the project, you'll have to change the code so that the agent is able to use its experience to gradually choose better actions when interacting with the environment!
env_info = env.reset(train_mode=False)[brain_name] # reset the environment
states = env_info.vector_observations # get the current state (for each agent)
scores = np.zeros(num_agents) # initialize the score (for each agent)
while True:
actions = np.random.randn(num_agents, action_size) # select an action (for each agent)
actions = np.clip(actions, -1, 1) # all actions between -1 and 1
env_info = env.step(actions)[brain_name] # send all actions to tne environment
next_states = env_info.vector_observations # get next state (for each agent)
rewards = env_info.rewards # get reward (for each agent)
dones = env_info.local_done # see if episode finished
scores += env_info.rewards # update the score (for each agent)
states = next_states # roll over states to next time step
if np.any(dones): # exit loop if episode finished
break
print('Total score (averaged over agents) this episode: {}'.format(np.mean(scores)))
# When finished, you can close the environment.
# +
# env.close()
# -
# ### 4. It's Your Turn!
#
# Now it's your turn to train your own agent to solve the environment! When training the environment, set `train_mode=True`, so that the line for resetting the environment looks like the following:
# ```python
# env_info = env.reset(train_mode=True)[brain_name]
# ```
# +
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return (-lim, lim)
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, layer_units=[1024, 512, 256, 128]):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.layer_units = layer_units
for i in range(len(layer_units)):
layer_name = f'fc{i+1}'
units = layer_units[i]
if i == 0:
self.__setattr__(layer_name, nn.Linear(state_size, units))
else:
prev_units = layer_units[i-1]
self.__setattr__(layer_name, nn.Linear(prev_units, units))
prev_units = layer_units[-1]
self.__setattr__(f'fc{len(layer_units) + 1}', nn.Linear(prev_units, action_size))
self.reset_parameters()
def reset_parameters(self):
for i in range(len(self.layer_units)):
layer_name = f'fc{i+1}'
layer = self.__getattr__(layer_name)
layer.weight.data.uniform_(*hidden_init(layer))
last_layer_name = f'fc{len(self.layer_units) + 1}'
layer = self.__getattr__(last_layer_name)
layer.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
for i in range(len(self.layer_units)):
layer_name = f'fc{i+1}'
layer = self.__getattr__(layer_name)
if i == 0:
x = torch.relu(layer(state))
else:
x = torch.relu(layer(x))
last_layer_name = f'fc{len(self.layer_units) + 1}'
layer = self.__getattr__(last_layer_name)
return torch.tanh(layer(x))
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, layer_units=[1024, 512, 256, 128]):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
self.layer_units = layer_units
for i in range(len(layer_units)):
layer_name = f'fc{i+1}'
units = layer_units[i]
if i == 0:
self.__setattr__(layer_name, nn.Linear(state_size, units))
elif i == 1:
prev_units = layer_units[i-1]
self.__setattr__(layer_name, nn.Linear(prev_units + action_size, units))
else:
prev_units = layer_units[i-1]
self.__setattr__(layer_name, nn.Linear(prev_units, units))
prev_units = layer_units[-1]
self.__setattr__(f'fc{len(layer_units) + 1}', nn.Linear(prev_units, 1))
self.reset_parameters()
def reset_parameters(self):
for i in range(len(self.layer_units)):
layer_name = f'fc{i+1}'
layer = self.__getattr__(layer_name)
layer.weight.data.uniform_(*hidden_init(layer))
last_layer_name = f'fc{len(self.layer_units) + 1}'
layer = self.__getattr__(last_layer_name)
layer.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
for i in range(len(self.layer_units)):
layer_name = f'fc{i+1}'
layer = self.__getattr__(layer_name)
if i == 0:
xs = torch.relu(layer(state))
x = torch.cat((xs, action), dim=1)
else:
x = torch.relu(layer(x))
last_layer_name = f'fc{len(self.layer_units) + 1}'
layer = self.__getattr__(last_layer_name)
return layer(x)
# +
# https://github.com/rlcode/per.git
class SumTree:
write = 0
def __init__(self, capacity):
self.capacity = capacity
self.tree = np.zeros(2 * capacity - 1)
self.data = np.zeros(capacity, dtype=object)
self.n_entries = 0
def _propagate(self, idx, change):
parent = (idx - 1) // 2
self.tree[parent] += change
if parent != 0:
self._propagate(parent, change)
def _retrieve(self, idx, s):
left = 2 * idx + 1
right = left + 1
if left >= len(self.tree):
return idx
if s <= self.tree[left]:
return self._retrieve(left, s)
else:
return self._retrieve(right, s - self.tree[left])
def total(self):
return self.tree[0]
def add(self, p, data):
idx = self.write + self.capacity - 1
self.data[self.write] = data
self.update(idx, p)
self.write += 1
if self.write >= self.capacity:
self.write = 0
if self.n_entries < self.capacity:
self.n_entries += 1
def update(self, idx, p):
change = p - self.tree[idx]
self.tree[idx] = p
self._propagate(idx, change)
def get(self, s):
idx = self._retrieve(0, s)
dataIdx = idx - self.capacity + 1
return (idx, self.tree[idx], self.data[dataIdx])
# -
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, buffer_size, batch_size, seed, device, e: float = 0.01, a: float = 0.6, beta: float = 0.4, beta_increment_per_sampling = 1e-3):
"""Initialize a ReplayBuffer object.
Params
======
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
"""
self.e = e
self.a = a
self.beta = beta
self.beta_increment_per_sampling = beta_increment_per_sampling
self.tree = SumTree(buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
self.device = device
def _get_priority(self, error):
return (np.abs(error) + self.e) ** self.a
def add(self, error, sample):
"""Add a new experience to memory."""
state, action, reward, next_state, done = sample
e = self.experience(state, action, reward, next_state, done)
p = self._get_priority(error)
self.tree.add(p, e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = []
idxs = []
segment = self.tree.total() / self.batch_size
priorities = []
self.beta = np.min([1., self.beta + self.beta_increment_per_sampling])
i = 0
while i < self.batch_size:
a = segment * i
b = segment * (i + 1)
s = random.uniform(a, b)
idx, p, data = self.tree.get(s)
if not isinstance(data, tuple):
continue
priorities.append(p)
experiences.append(data)
idxs.append(idx)
i += 1
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(self.device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(self.device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(self.device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(self.device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(self.device)
sampling_probabilities = priorities / self.tree.total()
is_weight = np.power(self.tree.n_entries * sampling_probabilities, -self.beta)
is_weight /= is_weight.max()
return (states, actions, rewards, next_states, dones), idxs, is_weight
def update(self, idx, error):
p = self._get_priority(error)
self.tree.update(idx, p)
def __len__(self):
"""Return the current size of internal memory."""
return self.tree.n_entries
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.1, scale=1.0, scale_decay=1.0):
"""Initialize parameters and noise process."""
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.seed = random.seed(seed)
self.initial_scale = scale
self.scale = scale
self.scale_decay = scale_decay
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
self.scale = self.initial_scale
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
# dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
scale = self.scale
self.scale *= self.scale_decay
return self.state * scale
# +
import numpy as np
import random
import copy
from collections import namedtuple, deque
import torch
import torch.nn.functional as F
import torch.optim as optim
class Agent():
"""Interacts with and learns from the environment."""
def __init__(
self,
state_size: int,
action_size: int,
random_seed: int,
buffer_size: int=int(1e5),
batch_size: int=128,
gamma: float=0.99,
tau: float=1e-3,
lr_actor: float=1e-4,
lr_critic: float=1e-3,
weight_decay: float=0.0,
noise_scale: float=1.0,
noise_decay: float=1.0,
device: str="cpu"
):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
random_seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
self.batch_size = batch_size
self.gamma = gamma
self.tau = tau
self.device = device
# Actor Network (w/ Target Network)
self.actor_local = Actor(state_size, action_size, random_seed).to(device)
self.actor_target = Actor(state_size, action_size, random_seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=lr_actor)
# Critic Network (w/ Target Network)
self.critic_local = Critic(state_size, action_size, random_seed).to(device)
self.critic_target = Critic(state_size, action_size, random_seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=lr_critic, weight_decay=weight_decay)
self.clone_weights(self.actor_target, self.actor_local) # ADDED
self.clone_weights(self.critic_target, self.critic_local) # ADDED
# Noise process
self.noise = OUNoise(action_size, random_seed, scale=noise_scale, scale_decay=noise_decay)
# Replay memory
self.memory = ReplayBuffer(buffer_size, batch_size, random_seed, device)
def clone_weights(self, w1, w0): # ADDED
for p1, p0 in zip(w1.parameters(), w0.parameters()):
p1.data.copy_(p0.data)
def step(self, state, action, reward, next_state, done):
"""Save experience in replay memory, and use random sample from buffer to learn."""
# Save experience / reward
s = torch.from_numpy(state).float().to(self.device)
ns = torch.from_numpy(next_state).float().to(self.device)
r = torch.from_numpy(reward).float().to(self.device)
d = torch.from_numpy(done).float().to(self.device)
with torch.no_grad():
oan = self.actor_local(ns)
oqtn = self.critic_local(ns, oan)
oqt = reward + (self.gamma * oqtn.cpu().numpy() * (1 - done))
an = self.actor_target(ns)
qtn = self.critic_target(ns, an)
qt = reward + (self.gamma * qtn.cpu().numpy() * (1 - done))
errors = np.abs(oqt - qt).squeeze()
for i, error in enumerate(errors):
self.memory.add(error, (state[i], action[i], reward[i], next_state[i], done[i]))
# Learn, if enough samples are available in memory
if len(self.memory) > self.batch_size:
experiences, idxs, is_weights = self.memory.sample()
self.learn(experiences, idxs, is_weights)
def act(self, state, add_noise=True):
"""Returns actions for given state as per current policy."""
state = torch.from_numpy(state).float().to(self.device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
if add_noise:
action += self.noise.sample()
return np.clip(action, -1, 1)
def reset(self):
self.noise.reset()
def learn(self, experiences, idxs, is_weights):
"""Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
where:
actor_target(state) -> action
critic_target(state, action) -> Q-value
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (self.gamma * Q_targets_next * (1 - dones))
# Compute critic loss
Q_expected = self.critic_local(states, actions)
critic_loss = F.mse_loss(Q_expected, Q_targets)
# update experience priorities
errors = torch.abs(Q_expected - Q_targets).cpu().detach().numpy()
# print(f"errors: {errors.shape}, idxs: {len(idxs)}")
for i in range(self.batch_size):
idx = idxs[i]
self.memory.update(idx, errors[i])
# Minimize the loss
self.critic_optimizer.zero_grad()
critic_loss.backward()
# torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1) # ADDED
self.critic_optimizer.step()
# ---------------------------- update actor ---------------------------- #
# Compute actor loss
actions_pred = self.actor_local(states)
actor_loss = -self.critic_local(states, actions_pred).mean()
# Minimize the loss
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
self.soft_update(self.critic_local, self.critic_target, self.tau)
self.soft_update(self.actor_local, self.actor_target, self.tau)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
def save(self, path :str) -> None:
if not os.path.exists(path):
os.makedirs(path)
torch.save(self.actor_local.state_dict(), f'{path}/crawler-checkpoint_actor.pth')
torch.save(self.critic_local.state_dict(), f'{path}/crawler-checkpoint_critic.pth')
torch.save(self.actor_target.state_dict(), f'{path}/crawler-checkpoint_actor_target.pth')
torch.save(self.critic_target.state_dict(), f'{path}/crawler-checkpoint_critic_target.pth')
def load(self, path: str) -> None:
self.actor_local.load_state_dict(torch.load(f'{path}/crawler-checkpoint_actor.pth'))
self.actor_target.load_state_dict(torch.load(f'{path}/crawler-checkpoint_actor_target.pth'))
self.critic_local.load_state_dict(torch.load(f'{path}/crawler-checkpoint_critic.pth'))
self.critic_target.load_state_dict(torch.load(f'{path}/crawler-checkpoint_critic_target.pth'))
# +
from tqdm import tqdm
def ddpg(
agent,
env,
n_episodes=1000,
max_t=300,
save_dir=None,
):
scores_window = deque(maxlen=100)
scores = []
avg_scores = []
solved = False
with tqdm(total=n_episodes) as progress:
for i_episode in range(1, n_episodes+1):
env_info = env.reset(train_mode=True)[brain_name]
states = env_info.vector_observations
num_agents = len(env_info.agents)
score = np.zeros(num_agents)
agent.reset()
for t in range(max_t):
actions = agent.act(states)
env_info = env.step(actions)[brain_name]
next_states = env_info.vector_observations
rewards = env_info.rewards
rewards = np.expand_dims(np.asanyarray(rewards), axis=1)
dones = env_info.local_done
dones = np.expand_dims(np.asanyarray(dones), axis=1)
agent.step(states, actions, rewards, next_states, dones)
states = next_states
score += np.squeeze(rewards)
if np.any(dones):
break
score = np.mean(score)
scores_window.append(score)
scores.append(score)
avg_score = np.mean(scores_window)
avg_scores.append(avg_score)
progress.set_postfix({"Avg. Score": f"{avg_score:.2f}"})
progress.update()
if i_episode >=100 and np.mean(scores_window) >= 3000.0:
print(f"Environment solved at {i_episode} episodes with Avg. score: {avg_score:.2f}")
agent.save(save_dir)
solved = True
break
return scores, avg_scores, solved
# +
# %%time
import pickle
import os
import re
device = "cuda" if torch.cuda.is_available() else "cpu"
n_episodes = 100000
# device = "cpu"
agent = Agent(
state_size,
action_size,
random_seed=123,
buffer_size=int(1e8),
batch_size=1024,
lr_actor=1e-4,
lr_critic=1e-4,
gamma=0.99,
noise_scale=3.0,
noise_decay=9e-4,
device=device)
save_dir = "crawler-model"
progress_path = 'crawler-progress'
continue_from_cp = False
cp_sequence = 0
if os.path.exists(save_dir) and os.path.isdir(save_dir) and len(os.listdir(save_dir)) > 0:
agent.load(save_dir)
continue_from_cp = True
cp_sequence = max([int(re.match(r'.+-(\d+)\.pkl$', d).group(1)) for d in os.listdir(progress_path) if re.match(r'.+-(\d+)\.pkl$', d) is not None])
cp_sequence += 1
# Run experiment
scores, avg_scores, solved = ddpg(agent, env, n_episodes=n_episodes, max_t=2000)
if not solved:
agent.save(save_dir)
os.makedirs(progress_path, exist_ok=True)
with open(f'{progress_path}/scores-{cp_sequence}.pkl', mode='wb') as f:
obj = {
'scores': scores,
'avg_scores': avg_scores
}
pickle.dump(obj, f)
if continue_from_cp:
scores = []
avg_scores = []
for i in range(cp_sequence + 1):
with open(f'{progress_path}/scores-{i}.pkl', mode='rb') as f:
obj = pickle.load(f)
scores.extend(obj['scores'])
avg_scores.extend(obj['avg_scores'])
# +
import matplotlib.pyplot as plt
def plot_scores(scores, avg_scores):
"""plot scores."""
fig = plt.figure(figsize=(12, 10))
ax = fig.add_subplot(111)
x = np.arange(len(scores))
y = scores
plt.plot(x, y, label="scores")
plt.plot(x, avg_scores, label="avg. scores")
plt.ylabel("Score", fontsize=14)
plt.xlabel("Episode #", fontsize=14)
plt.title("Agent progress over episodes", fontsize=16)
plt.show()
# -
plot_scores(scores, avg_scores)
def play(agent, env, num_agents, n_episodes=5, max_t=1000):
"""play.
Uses the provided agent to play the game.
There is no training in this code, only playing.
"""
scores = []
with tqdm(total=n_episodes) as progress:
for i_episode in range(1, n_episodes + 1):
env_info = env.reset(train_mode=False)[brain_name]
state = env_info.vector_observations
score = np.zeros(num_agents)
for _ in range(max_t):
# Let's not add noise here as we want to exploit the learned behaviour
action = agent.act(state, add_noise=False)
env_info = env.step(action)[brain_name]
next_state, reward, done = env_info.vector_observations, env_info.rewards, env_info.local_done
state = next_state
score += reward
if np.any(done):
break
score = np.mean(score)
scores.append(score)
progress.set_postfix({"Avg. Score": f"{np.mean(scores):.2f}"})
progress.update()
play(agent, env, num_agents)
#
| continuous-control/Crawler.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="MpxvQ14HFYM1"
from google.colab import drive
drive.mount('/content/drive/')
# + id="OIJwvszjF4AZ"
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# + id="QsmkWna44Fdc"
project_path = '/content/drive/My Drive/Technical Debt/Codes/Traditional Model/'## we will store our data in this drive
# + id="tg0tjgVC4JKu"
def clean_text(text):
text = re.sub(r"\n", " ", text)
text = re.sub(r"\r", " ", text)
return text
# + id="Vq2PZVPxP8Y5"
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import re
# + id="7HLkBop16E6J"
'''
#df_total=pd.read_excel(project_path+'R_issues_comments_list_sentence_level_labeled.xlsx')
df_total=pd.read_excel(project_path+'R_issues_comments_list_sentence_level_labeled_New.xlsx')
df_total = df_total[['Sentence Text','TD (Sentence Level)']]
df_total = df_total.rename({'Sentence Text': 'text', 'TD (Sentence Level)': 'labels'}, axis='columns')
#print(df_total)
df_total['text']=df_total['text'].apply(str)
#print(df_total)
df_total['text']=df_total['text'].map(lambda x: clean_text(x))
#df_total["labels"] = df_total["labels"].apply(lambda x: x.replace("REQUIREMENTS", "REQUIREMENT"))
#df_total['text'] =df_total['text'].str.lower()
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2), stop_words='english')
features = tfidf.fit_transform(df_total.text).toarray()
labels = df_total.labels'''
# + id="RrHt2rkuPXlY"
import pickle
train_data = pickle.load(open(project_path+"Train_Test_Data/train_df.pkl","rb"))
test_data = pickle.load(open(project_path+"Train_Test_Data/test_df.pkl","rb"))
# + id="X2PY7i_8SCKA"
from sklearn.feature_extraction.text import CountVectorizer
#TfidfVectorizer
tfidf = CountVectorizer(stop_words='english')
tfidf.fit_transform(train_data.text)
X_train = tfidf.transform(train_data.text).toarray()
y_train = train_data.labels
X_test = tfidf.transform(test_data.text).toarray()
y_test = test_data.labels
# + id="CWl7a8j06Rf7"
## models
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from sklearn.model_selection import cross_val_score
models = [
LinearSVC()
]
for model in models:
model_name = model.__class__.__name__
#X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.33)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Performance of "+model_name)
print(classification_report(y_test,y_pred))
| codes/stage-1/Stage1_SVM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from scipy import stats
import os
import glob
import numpy as np
outpath = r'./dataFolders/Output/Step6_v5/'
outpath_fig = r'./dataFolders/Output/Step6_v5/Figure/'
def in_ranges(x,bins):
return [((x>=y[0])&(x<=y[1])) for y in bins]
notoutliers = pd.read_csv(path + 'AllLight_EveryMoth_notOutliers.csv')
notoutliers['DiscoveryTime'] = notoutliers.DiscoveryTime.div(100,axis = 'index')
notoutliers = notoutliers.drop(columns = ['Unnamed: 0', 'Unnamed: 0.1', 'Unnamed: 0.1.1'])
notoutliers.head()
test = notoutliers.name.str.split('_', expand = True)
notoutliers['lightLevel'] = test[0]
# find the visit number that have high exploratoin times
notoutliers.loc[(notoutliers['lightLevel']=='L50') & (notoutliers['DiscoveryTime']>15), 'trialNum']
Bins = [(0,4), (10,20), (12,22), (14,24), (17,24), (10, 15), (15,20), (20,25)]
label = ['(0,4)' ,'(10,20)', '(12,22)', '(14,24)', '(17,24)', '(10, 15)', '(15,20)', '(20,25)']
binned = notoutliers['trialNum'].apply(lambda x: pd.Series(in_ranges(x,Bins), label))
notoutliers = notoutliers.join(binned)
for l, sub_df in notoutliers.groupby('lightLevel'):
for bin_label in label:
sub_sub_df = sub_df[sub_df[bin_label] == True]
n = len(sub_sub_df)
print('%s and %s bin has %i visits' %(l, bin_label, n))
notoutliers = notoutliers.to_csv(outpath + 'AllLight_EveryMoth_notOutliers_withDifferentBins.csv')
# ## generate and store the pde and data for plotting later
from scipy.stats import gaussian_kde
bin_center = np.linspace(0,40,100)
delta = np.diff(bin_center)[0]
notoutliers = pd.read_csv(outpath + 'AllLight_EveryMoth_notOutliers_withDifferentBins.csv')
# ### store pde
# +
SampleSize = pd.DataFrame(columns = ['L0.1', 'L50'], index = label)
pde_df = pd.DataFrame(columns = label, index = bin_center)
for l, subdf in notoutliers.groupby('lightLevel'):
for bin_label in label:
df = subdf[subdf[bin_label] == True]
data = df.DiscoveryTime
SampleSize.loc[bin_label,l] = len(data)
kde = gaussian_kde(data)
temp = kde.pdf(bin_center)
temp /= delta * np.sum(temp)
pde_df.loc[:,bin_label]=temp
pde_df.to_csv(outpath+ l + '_DifferentBins_pde.csv')
SampleSize.to_csv(outpath+ '_DifferentBins_samplesize.csv')
# -
# ## plot the different bins
low_pde = pd.read_csv(outpath + 'L0.1' + '_DifferentBins_pde.csv')
high_pde = pd.read_csv(outpath + 'L50' + '_DifferentBins_pde.csv')
low_pde.head()
SampleSize
# +
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
plt.rcParams['font.size'] = '8'
# -
def plotPDE(ax, labels, early, late, color_early, color_late, early_data, late_data, p_value, early_N, late_N):
ax.plot(early, labels, color = color_early)
ax.plot([-0.015]*len(early_data), early_data,
'_',
ms = 5,
# color = color[0],
mec = color_early,
mew = 0.3
)
ax.plot(late, labels, color = color_late)
ax.plot([-0.025]*len(late_data), late_data,
'_',
ms = 5,
# color = color[0],
mec = color_late,
mew = 0.3
)
ax.text(0.5, 0.5, 'p = '+'%0.3f' %p_value, transform=ax.transAxes)
ax.text(0.5, 0.7, 'n = ' + str(early_N), transform=ax.transAxes, color = color_early)
ax.text(0.5, 0.6, 'n = ' + str(late_N), transform=ax.transAxes, color = color_late)
def selectlist(lst, *indices):
return (lst[i] for i in indices)
# +
w = 7
h = 7/3
f1 = plt.figure(figsize = (w,h))
bins_set = ['(10,20)', '(12,22)', '(14,24)', '(17,24)']
low_early_data=notoutliers.loc[(notoutliers['(0,4)'] == True) & (notoutliers['lightLevel'] == 'L0.1'),
'DiscoveryTime'].values
high_early_data=notoutliers.loc[(notoutliers['(0,4)'] == True) & (notoutliers['lightLevel'] == 'L50'),
'DiscoveryTime'].values
low_early_N = len(low_early_data)
high_early_N = len(high_early_data)
x_shift = 0
for binlabel in bins_set:
high_late_data = notoutliers.loc[(notoutliers[binlabel] == True) & (notoutliers['lightLevel'] == 'L50'),
'DiscoveryTime'].values
low_late_data = notoutliers.loc[(notoutliers[binlabel] == True) & (notoutliers['lightLevel'] == 'L0.1'),
'DiscoveryTime'].values
high_late_N = len(high_late_data)
low_late_N = len(low_late_data)
_,p_high = stats.mannwhitneyu(high_early_data, high_late_data)
_,p_low = stats.mannwhitneyu(low_early_data, low_late_data)
# plot high light level
ax_high = f1.add_axes((0 + x_shift, 0.5 , 1/4.5, 0.45))
plotPDE(ax_high, high_pde['Unnamed: 0'], high_pde['(0,4)'], high_pde[binlabel],
'green', 'palevioletred',
high_early_data, high_late_data,
p_high, high_early_N, high_late_N)
ax_high.set_title(binlabel)
# plot low light level
ax_low = f1.add_axes((0 + x_shift, 0 , 1/4.5, 0.45))
plotPDE(ax_low, low_pde['Unnamed: 0'], low_pde['(0,4)'], low_pde[binlabel],
'green', 'palevioletred',
low_early_data, low_late_data,
p_low, low_early_N, low_late_N)
x_shift+=1/4
for axes in f1.axes:
axes.set_xlim([-0.04, 0.22])
axes.set_ylim([-0.7, 30])
for axes in list(selectlist(f1.axes, 0,2,4,6)):
axes.set_xticks([])
for axes in list(selectlist(f1.axes, 2, 3, 4, 5, 6, 7)):
axes.set_yticks([])
f1.savefig(outpath_fig + 'Learning_differentBins.pdf',
bbox_inches = 'tight')
# +
w = 7*2
h = 7/3*2
f2 = plt.figure(figsize = (w,h))
bins_set = ['(10, 15)', '(15,20)', '(20,25)']
low_early_data=notoutliers.loc[(notoutliers['(0,4)'] == True) & (notoutliers['lightLevel'] == 'L0.1'),
'DiscoveryTime'].values
high_early_data=notoutliers.loc[(notoutliers['(0,4)'] == True) & (notoutliers['lightLevel'] == 'L50'),
'DiscoveryTime'].values
low_early_N = len(low_early_data)
high_early_N = len(high_early_data)
x_shift = 0
for binlabel in bins_set:
high_late_data = notoutliers.loc[(notoutliers[binlabel] == True) & (notoutliers['lightLevel'] == 'L50'),
'DiscoveryTime'].values
low_late_data = notoutliers.loc[(notoutliers[binlabel] == True) & (notoutliers['lightLevel'] == 'L0.1'),
'DiscoveryTime'].values
high_late_N = len(high_late_data)
low_late_N = len(low_late_data)
_,p_high = stats.mannwhitneyu(high_early_data, high_late_data)
_,p_low = stats.mannwhitneyu(low_early_data, low_late_data)
# plot high light level
ax_high = f2.add_axes((0 + x_shift, 0.5 , 1/4.5, 0.5))
plotPDE(ax_high, high_pde['Unnamed: 0'], high_pde['(0,4)'], high_pde[binlabel],
'green', 'palevioletred',
high_early_data, high_late_data,
p_high, high_early_N, high_late_N)
ax_high.set_title(binlabel)
# plot low light level
ax_low = f2.add_axes((0 + x_shift, 0 , 1/4.5, 0.5))
plotPDE(ax_low, low_pde['Unnamed: 0'], low_pde['(0,4)'], low_pde[binlabel],
'green', 'palevioletred',
low_early_data, low_late_data,
p_low, low_early_N, low_late_N)
x_shift+=1/4
for axes in f1.axes:
axes.set_xlim([-0.04, 0.22])
# ax.set_ylim([y0,y1])
# x0,x1 = ax.get_xlim()
# print(ax.get_xlim())
f1.savefig(outpath_fig + 'L0.1_firstLastPDE.pdf')
f1.clf()
# -
| Step6g_Probability_Distributions_DifferentBins.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import torch
array = [[1,2,3],[4,5,6]]
first_array = np.array(array) # 2x3 array
print("Array Type: {}".format(type(first_array))) # type
print("Array Shape: {}".format(np.shape(first_array))) # shape
print(first_array)
# ### convert numpy array to tensor
# pytorch array
tensor = torch.Tensor(array)
print("Array Type: {}".format(tensor.type)) # type
print("Array Shape: {}".format(tensor.shape)) # shape
print(tensor)
# ### Ones matrix
# +
print("Numpy {}\n".format(np.ones((3,3))))
# pytorch ones
print(torch.ones((3,3)))
# -
# ### random numbers
# +
print("Numpy {}\n".format(np.random.rand(3,3)))
# pytorch random
print(torch.rand(3,3))
# -
# ### to & fro conversions
# +
array = np.random.rand(3,2)
print("{} {}\n".format(type(array),array))
# +
# from numpy to tensor
from_numpy_to_tensor = torch.from_numpy(array)
print("{}\n".format(from_numpy_to_tensor))
# -
# from tensor to numpy
tensor = from_numpy_to_tensor
from_tensor_to_numpy = tensor.numpy()
print("{} {}\n".format(type(from_tensor_to_numpy),from_tensor_to_numpy))
import torch
from torch.autograd import Variable
import torch.nn as nn
import warnings
import torchvision
# +
##http://howieko.com/projects/classifying_flowers_pytorch/
##https://www.kaggle.com/aradhyamathur/flower-classification-pytorch
##https://katba-caroline.com/what-flower-is-this-developing-an-image-classifier-with-deep-learning-using-pytorch/
# +
#https://medium.com/datadriveninvestor/creating-a-pytorch-image-classifier-da9db139ba80
# -
| section2/Lecture18_torch_array.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Importing useful things
from qiskit_nature.drivers import PySCFDriver
from qiskit_nature.transformers import FreezeCoreTransformer
from qiskit_nature.problems.second_quantization.electronic import ElectronicStructureProblem
from qiskit_nature.mappers.second_quantization import ParityMapper, BravyiKitaevMapper, JordanWignerMapper
from qiskit_nature.converters.second_quantization.qubit_converter import QubitConverter
from qiskit_nature.circuit.library import HartreeFock
from qiskit.circuit.library import TwoLocal
from qiskit_nature.circuit.library import UCCSD, PUCCD, SUCCD
from qiskit import Aer
from qiskit.algorithms.optimizers import COBYLA, L_BFGS_B, SPSA, SLSQP
from qiskit_nature.algorithms.ground_state_solvers.minimum_eigensolver_factories import NumPyMinimumEigensolverFactory
from qiskit_nature.algorithms.ground_state_solvers import GroundStateEigensolver
from qiskit.algorithms import VQE
from IPython.display import display, clear_output
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import Unroller
import numpy as np
# # 1. Initializing the atomic structure of LiH using PySCF driver
#
# First we initialize the LiH molecule (this code was already provided in the exercise) in the most stable configuration of inter-atomic distance i.e. at 1.5474Å between the Li and H atoms.
molecule = 'Li 0.0 0.0 0.0; H 0.0 0.0 1.5474'
driver = PySCFDriver(atom=molecule)
qmolecule = driver.run()
# Then we initialize the backend as the `statevector_simulator` for running the VQE problem
backend = Aer.get_backend('statevector_simulator')
# We derive the useful information like number of electrons (`n_el`), number of molecular orbitals (`n_mo`), number of spin orbitals (`n_so`), number of qubits (`n_q`, which is actually equal to `n_so`), and the nuclear repulsion energy of the molecule (`e_nn`).
n_el = qmolecule.num_alpha + qmolecule.num_beta
n_mo = qmolecule.num_molecular_orbitals
n_so = 2 * qmolecule.num_molecular_orbitals
n_q = 2 * qmolecule.num_molecular_orbitals
e_nn = qmolecule.nuclear_repulsion_energy
# # 2. Defining the problem
#
# The electronic structure of Hydrogen ($H$) is $[1s^1]$, and for Lithium ($Li$) it is $[1s^2 2s^1]$.
#
# From the above electronic structures, we can see that only the 1st and 2nd molecular orbitals participate in the chemical reaction between $H$ and $Li$ to for $LiH$. The LiH molecule has the following electronic structure:-
#
# $$
# LiH = 1s^2 2s^2
# \tag{2.1}
# $$
#
# The above configuration of the LiH molecule is a stable configuration as it has all the molecular orbitals fully filled, essentially giving it noble-gas-like reactivity properties. The electronegativity of the overall molecule decreases and hence the energy of the reaction also falls down when compared to the inital total energy of the reactants.
#
# Therefore, from the above analysis, we can see that we only need to consider the molecular orbitals number 1 and 2 for this experiment. The other empty orbitals can be removed as they don't play a role the reaction. For the removal process I have used the `FreezeCoreTransformer` from `qiskit_nature.transformers` to remove the orbitals \[3, 4\] as shown below. This gives us a good approximation of the energy value and also doesn't overutilize the resources (in this case, qubits) of the quantum system on which the experiment is being run.
qmolecule_transformers = [FreezeCoreTransformer(freeze_core=True, remove_orbitals=[3, 4])]
# After applying the `FreezeCoreTransformer`, we have successfully reduced our problem into only calculating the energy for the useful parts of our chemical reaction.
#
# Now I use this instance of the `FreezeCoreTransformer` to derive an electronic structure problem using the `ElectronicStructureProblem` class in the `qiskit_nature.problems.second_quantization.electronic` package.
#
# This gives us access to the second quantized operators of the problem Hamiltonian in terms of the creation ($a^ \dagger$) and annihilation ($a$) fermionic operators. This converts the problem into description of a fermionic system having $N$ single partical states (representing a spin orbital each) which can either be empty or occupied. With this process we can convert the k-local hamiltonian into a 2-local representation. The general representation of the 2-local Hamiltonian looks like:-
#
# $$
# H = \sum _{i,j} h_{ij} a^\dagger _i a_j \ + \ \frac{1}{2} \sum _{i,j,k,l} h_{ijkl} a^\dagger_i a^\dagger_j a_k a_l
# \tag{2.2}
# $$
#
# Once we map the spin orbital electronic structure to qubits, we can easily perform a tensor product of the quantum states to derive the full quantum wavefunction of the molecule. Using this wavefunction then, we can perform quantum operations to find the grund-state energy eigenvalue of the LiH molecule. The above hamiltonian is stored in the variable `main_op`.
# +
# Define the problem
problem = ElectronicStructureProblem(driver, q_molecule_transformers=qmolecule_transformers)
second_q_ops = problem.second_q_ops() # Generate the second-quantized operators
main_op = second_q_ops[0] # Hamiltonian
print('Number of molecular orbitals: ', problem.molecule_data_transformed.num_molecular_orbitals)
print('Number of spin orbitals: ', 2 * problem.molecule_data_transformed.num_molecular_orbitals)
# -
# # 3. Mapping Fermions to Qubits
#
# Once we get the problem hamiltonian, we now have to map the fermionic operators onto the qubits so that e can encode the chemical information of the $LiH$ molecule onto our quantum computer.
#
# Since, we have 3 molecular orbitals (and 6 spin orbitals) associated to our $LiH$ simulation problem, we will have to use atleast 6-qubits to successfully map the fermionic operators to qubits using Jordan-Wigner or Bravyi-Kitaev mapping. However, if we use Parity mapping, it can reduce the number of qubits to just 5-qubits through qubit tapering of fermionic spin-parity symmetries. For this, I use the `ParityMapping` function from the `qiskit_nature.mappers.second_quantization` package.
#
# We can further optimize our resource usage by finding $\mathbb{Z}_2$ -symmetries in the system as we only work with the s-orbital here and hence we get one $\mathbb{Z}_2$ -symmetry. This reduces one more qubit from our bag of qubit resource and finally we end up with having just 4-qubits for our $LiH$ molecule simulation which is the most optimal number of qubits that we can get.
# Qubit Converter
mapper = ParityMapper()
converter = QubitConverter(mapper=mapper, two_qubit_reduction=True, z2symmetry_reduction='auto')
print(converter.z2symmetries)
# Then, we finally generate our qubit operators which are derived from the fermionic operators after this whole conversion procedure. The variable `qubit_op` stores the qubit operators as an array of spare pauli operations applied to initialise the qubits into the proper state corresponding to the quantum wavefunction of the $LiH$ molecule.
# The fermionic operators are mapped to qubit operators
num_particles = (problem.molecule_data_transformed.num_alpha,
problem.molecule_data_transformed.num_beta)
qubit_op = converter.convert(main_op, num_particles=num_particles, sector_locator=problem.symmetry_sector_locator)
qubit_op
# Now using the electronic structure problem hamiltonian and the qubit operator, we can diagonalize the actual hamiltonian to get the exactly calculated ground-state energy eigenvalue for the $LiH$ molecule ans store it in the variable `exact_energy` for future reference.
# +
def exact_diagonalizer(problem, converter):
solver = NumPyMinimumEigensolverFactory()
calc = GroundStateEigensolver(converter, solver)
result = calc.solve(problem)
return result
result_exact = exact_diagonalizer(problem, converter)
exact_energy = np.real(result_exact.eigenenergies[0])
print("Exact electronic energy", exact_energy)
# -
# # 4. Generating the initial state
#
# For generating the initial state, we resort to the Hartree-Fock perturbation theory in order to approximate the qubit operator `qubit_op` as a string of Pauli gates that can be applied to initialize the state of the system.
#
# We can achieve this by using the `HartreeFock` class found in the `qiskit_nature.circuit.library` package like so,
# HartreeFock Initial state
num_particles = (problem.molecule_data_transformed.num_alpha,
problem.molecule_data_transformed.num_beta)
num_spin_orbitals = 2 * problem.molecule_data_transformed.num_molecular_orbitals
init_state = HartreeFock(num_spin_orbitals, num_particles, converter)
init_state.draw('mpl')
# # 5. Developing the Problem Ansatz
#
# **Ansatz** is referred an educated guess or an assumption for solving a particular problem. It proivdes us with an initial estimate of the solution framework of any mathematical problem. We use an ansatz in this as we can never be exactly sure about the transformations that one needs to apply on the initial Hartree-Fock state for a molecular simulation problem to get the ground-state energy eigenvalue. Hence, we make an initial assumption and then keep changing the parameters/variables in the ansatz to optimize our final result.
#
# After the initial state is generated and stored in the variable `init_state`, we work on developing the Ansatz for the problem.
#
# One point we can straightaway notice is that our Hamiltonian in $eq. 2.2$ is a 2-local hamiltonian. Hence, the ansatz that we need will likely be such that it will have some single-qubit parameterized rotations and every adjacent qubit in the circuit will be entangled so as to simulate the effect of near-range interactions between the fermions of the $LiH$ molecule. Hence, we can fulfill these constrainsts of the ansatz by using the `TwoLocal` variational form from the `qiskit.circuit.library` package. Therefore, the code for the same will look like so,
# +
# Ansatz Generation Parameters
rotation_blocks = ['ry']
entanglement_blocks = 'cx'
entanglement = 'linear'
repetitions = 1
skip_final_rotation_layer = False
# Instantiating the TwoLocal variational form as the problem ansatz
ansatz_temp = TwoLocal(qubit_op.num_qubits, rotation_blocks, entanglement_blocks, reps=repetitions,
entanglement=entanglement, skip_final_rotation_layer=skip_final_rotation_layer)
# Prepending the initial state to the ansatz
ansatz_temp.compose(init_state, front=True, inplace=True)
# Transpiling the ansatz to have only 'u' and 'cx' gates
from qiskit import transpile
ansatz_t = transpile(ansatz_temp, basis_gates=['u', 'cx'], optimization_level=3)
ansatz_t.draw('mpl')
# -
# Finally, we run our VQE algorithm on the above circuit to iteratively optimize the circuit parameters and find the ground-state energy eigenvaluue for the $LiH$ molecule problem like so,
# +
# Print the progress of optimization process
def callback(eval_count, parameters, mean, std):
# Overwrites the same line when printing
display("Evaluation: {}, Energy: {}, Std: {}".format(eval_count, mean, std))
clear_output(wait=True)
# Set initial parameters of the ansatz
try:
initial_point = [0.01] * len(ansatz_t.ordered_parameters)
except:
initial_point = [0.01] * ansatz_t.num_parameters
# Define the optimizer to use
optimizer = SLSQP(maxiter=3000, tol=0.0001)
# Initialize the VQE problem
algorithm = VQE(ansatz_t,
optimizer=optimizer,
quantum_instance=backend,
callback=callback,
initial_point=initial_point)
# Compute minimum energy eigenvalue of the ansatz
result = algorithm.compute_minimum_eigenvalue(qubit_op)
print('Optimal VQE Energy: ', result.optimal_value)
print(result)
print('Completed!')
# -
# Once we get the optimal value from our VQE result, we check if the solution is within the error margin of $4 mHa$ and also count the number of CNOT gates which is the cost for this problem.
# +
# Unroller transpile your circuit into CNOTs and U gates
pass_ = Unroller(['u', 'cx'])
pm = PassManager(pass_)
ansatz_tp = pm.run(ansatz_t)
cnots = ansatz_tp.count_ops()['cx']
score = cnots
accuracy_threshold = 4.0 # in mHa
energy = result.optimal_value
result_dict = {
'optimizer': optimizer.__class__.__name__,
'mapping': converter.mapper.__class__.__name__,
'ansatz': ansatz_t.__class__.__name__,
'energy (Ha)': energy,
'error (mHa)': (energy - exact_energy)*1000,
'pass': (energy - exact_energy)*1000 <= accuracy_threshold,
'# of parameters': len(result.optimal_point),
'final parameters': result.optimal_point,
'# of evaluations': result.optimizer_evals,
'optimizer time': result.optimizer_time,
'# of qubits': int(qubit_op.num_qubits),
'# of CNOTs': cnots,
'score': score
}
print('Ansatz used: ', result_dict['ansatz'])
print('Number of parameters: ', result_dict['# of parameters'])
print('VQE Energy: ', result_dict['energy (Ha)'])
print('Error: ', result_dict['error (mHa)'])
print('Pass/Fail: ', result_dict['pass'])
print('CNOT score: ', result_dict['score'])
# -
# Check your answer using following code
from qc_grader import grade_ex5
freeze_core = True # change to True if you freezed core electrons
grade_ex5(ansatz_temp,qubit_op,result,freeze_core)
| solutions by participants/ex5/ex5-AbeerVaishnav-3cnot-2.339053mHa-8params.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
h = """
<link type="text/css" rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/jsgrid/1.5.3/jsgrid.min.css" />
<link type="text/css" rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/jsgrid/1.5.3/jsgrid-theme.min.css" />
<div id="jsGrid"></div>
<script>
var clients = [
{ "Name": "<NAME>", "Age": 25, "Country": 1, "Address": "Ap #897-1459 Quam Avenue", "Married": false },
{ "Name": "<NAME>", "Age": 45, "Country": 2, "Address": "Ap #370-4647 Dis Av.", "Married": true },
{ "Name": "<NAME>", "Age": 29, "Country": 3, "Address": "Ap #365-8835 Integer St.", "Married": false },
{ "Name": "<NAME>", "Age": 56, "Country": 1, "Address": "911-5143 Luctus Ave", "Married": true },
{ "Name": "<NAME>", "Age": 32, "Country": 3, "Address": "Ap #614-689 Vehicula Street", "Married": false }
];
db = []
var i;
for (i = 0; i < 10; i++) {
db = db.concat(clients)
}
var countries = [
{ Name: "", Id: 0 },
{ Name: "United States", Id: 1 },
{ Name: "Canada", Id: 2 },
{ Name: "United Kingdom", Id: 3 }
];
$("#jsGrid").jsGrid({
width: "100%",
height: "400px",
heading: true,
filtering: true,
inserting: false,
editing: false,
selecting: false,
sorting: true,
paging: true,
pageLoading: false,
autoload: true,
pageSize: 15,
pageButtonCount: 3,
controller: {
loadData: function(filter) {
//to do: filtering
return db;
}
},
fields: [
{ name: "Name", type: "text", width: 150, validate: "required" },
{ name: "Age", type: "number", width: 50 },
{ name: "Address", type: "text", width: 200 },
{ name: "Country", type: "select", items: countries, valueField: "Id", textField: "Name" },
{ name: "Married", type: "checkbox", title: "Is Married", sorting: false }
]
});
</script>
"""
from IPython.core.display import display, HTML
display(HTML(h))
# -
| examples/tutorial/Untitled3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# http://nbviewer.ipython.org/github/jrjohansson/scientific-python-lectures/blob/master/Lecture-1-Introduction-to-Python-Programming.ipynb
#
# 函式 function
#
# 何謂函式(function)?
# help() 與 print() 都是 Python 的內建函式
#
# http://faculty.msmary.edu/heinold/Introduction_to_Programming_Using_Python_Heinold.pdf
#
# http://www.eecs.wsu.edu/~schneidj/PyBook/swan.pdf
# + run_control={"read_only": true}
help(print)
# + run_control={"read_only": true}
help(help)
# + run_control={"read_only": true}
# 列印 Python 系統的版次與關鍵字
import sys
import keyword
print("Python version: ", sys.version_info)
print("Python keywords: ", keyword.kwlist)
# + run_control={"read_only": true}
# 字串, 整數, 浮點數
help(str)
help(int)
help(float)
# + run_control={"read_only": false}
import random
dir(random)
# + run_control={"read_only": false}
help(dir)
# + run_control={"read_only": false}
for i in 2, 4, 6, 8:
print(i)
# + run_control={"read_only": false}
# 讓圖直接 show 在 output
# %matplotlib inline
import matplotlib.pyplot as plt
vals = [3,2,5,3,1]
plt.plot(vals)
# 在 server 中, 以視窗顯示 plt
#plt.show()
# + run_control={"read_only": false}
temp = eval(input('Enter a temperature in Celsius: '))
print('In Fahrenheit, that is', 9/5*temp+32)
# + run_control={"read_only": false}
# %matplotlib inline
from pylab import plot,ylim,xlabel,ylabel,show
from numpy import linspace,sin,cos
x = linspace(0,10,100)
y1 = sin(x)
y2 = cos(x)
plot(x,y1,"k-")
plot(x,y2,"k--")
ylim(-1.1,1.1)
xlabel("x axis")
ylabel("y = sin x or y = cos x")
# + run_control={"read_only": false}
for c in [0b1001000, 0b1100101, 0b1101100, 0b1101100,
0b1101111, 0b0100000, 0b1010111, 0b1101111, 0b1110010,
0b1101100, 0b1100100, 0b0100001, 0b0001010]:
print(chr(c), end="")
# + run_control={"read_only": false}
| content/notebook/intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# !pip install fhir.resources
import base64
import csv
from datetime import date
from functools import reduce
from github import Github
import json
import pprint
import random
import uuid
from fhir.resources.bundle import Bundle
from fhir.resources.devicedefinition import DeviceDefinition
from IPython.core.debugger import set_trace
# +
g = Github('access_token')
rdt_scan_repo = g.get_repo('cjpark87/rdt-scan')
img_path_prefix = 'app/src/main/res/drawable-nodpi/'
# generate reproducible UUIDs
rd = random.Random()
seed = "Wznza9%R$xs8eigsteuKg5pewMnAuFK8fWidZ2D9r8Fstb!Fpa%ovZs3o3yQQ%Jp4Lv2dS#eAbaH^*A@kxCTCSes*XzZfKXx6KRY9rgV!t^8#woP@GrpMHVnXJb3gQG*"
empty_flag = "ZZZ"
line_code_to_text = {
"C": "Control",
"T": "Test",
"G": "IgG",
"M": "IgM",
"G+M": "IgG / IgM"
}
# +
# generate reproducible UUIDs
# rd = random.Random()
# uuid.uuid4()
# +
def is_valid(row):
return(
reduce(lambda x, y: x and row[y],
['Manufacturer', 'RDT_type', 'Lines'],
True)
and (row['Sampleqty drops'] or row['Sampleqty uL'])
)
def add_image(config):
path = config['REF_IMG']
if not path: return
content = None
try:
content = rdt_scan_repo.get_contents('{}{}.jpg'.format(img_path_prefix, path)).decoded_content
except:
content = rdt_scan_repo.get_contents('{}{}.JPG'.format(img_path_prefix, path)).decoded_content
config['REF_IMG'] = base64.b64encode(content).decode("utf-8")
return config
def build_device_definitions(upto=0, seed=seed):
# seed reproducible UUIDs per call
rd.seed(seed)
device_definitions = []
with open('fhir-deviceDefinitions-list.csv') as f:
csv_reader = csv.DictReader(f)
for i, row in enumerate(csv_reader):
if upto and i > upto:
break
if not is_valid(row):
print("skipping row {}".format(i))
continue
dd_json = {}
# generate a UUID 4 string with the seed RNG
dd_json["identifier"] = [
{
"value": str(uuid.UUID(int=rd.getrandbits(128)))
}
]
dd_json["manufacturerString"] = row['Manufacturer']
dd_json["deviceName"] = [{
"name": row['RDT_type'],
"type": "user-friendly-name"
}]
dd_json["udiDeviceIdentifier"] = []
dd_json["capability"] = []
dd_json["property"] = []
if row['GTIN']:
dd_json["udiDeviceIdentifier"].append({
"deviceIdentifier": row['GTIN'],
"issuer": "urn:gtin",
"jurisdiction": "urn:global"
})
if row['Product Code']:
dd_json["udiDeviceIdentifier"].append({
"deviceIdentifier": row['Product Code'],
"issuer": "urn:manufacturer",
"jurisdiction": "urn:global"
})
# define capabilities
if row['Steps']:
dd_json["capability"].append({
"type": {
"text":"instructions"
},
"description": [
{
"text": step.strip()
} for step in row['Steps'].split('/')
]
})
if row['Lines']:
try:
dd_json["capability"].append({
"type": {
"text":"lines"
},
"description": [
{
"coding": [{"code":line}],
"text": line_code_to_text[line]
} for line in row['Lines'].split('/')
]
})
except KeyError:
print("Line code to text key '{}' not found, skipping row {}".format(line, i))
continue
if row['Sampletypes']:
dd_json["capability"].append({
"type": {
"text":"sample types"
},
"description": [
{
"text": sample_type.strip()
} for sample_type in row['Sampletypes'].split('/')
]
})
# define timing property
timing_value_quantities = []
if row['minimumtimeforresult']:
timing_value_quantities.append({
"value": float(row['minimumtimeforresult']),
"comparator": ">=",
"unit": "minutes"
})
if row['maximumtimeforresult']:
timing_value_quantities.append({
"value": float(row['maximumtimeforresult']),
"comparator": "<=",
"unit": "minutes"
})
if len(timing_value_quantities):
dd_json["property"].append({
"type": {
"text": "Time For Result"
},
"valueQuantity": timing_value_quantities
})
# define temperature property
temperature_value_quantities = []
if row['low_temp']:
temperature_value_quantities.append({
"value": float(row['low_temp']),
"comparator": ">=",
"unit": "degrees celsius"
})
if row['high_temp']:
temperature_value_quantities.append({
"value": float(row['high_temp']),
"comparator": "<=",
"unit": "degrees celsius"
})
if len(temperature_value_quantities):
dd_json["property"].append({
"type": {
"text": "Temperature"
},
"valueQuantity": temperature_value_quantities
})
# define sample quantity property
sample_quantity_value_quantities = []
if row['Sampleqty drops']:
sample_quantity_value_quantities.append({
"value": float(row['Sampleqty drops']),
"unit": "drop"
})
if row['Sampleqty uL']:
sample_quantity_value_quantities.append({
"value": float(row['Sampleqty uL']),
"unit": "microliter"
})
if len(sample_quantity_value_quantities):
dd_json["property"].append({
"type": {
"text": "Sample Quantity"
},
"valueQuantity": sample_quantity_value_quantities
})
# define buffer quantity property
buffer_quantity_value_quantities = []
if row['Bufferqty drops']:
buffer_quantity_value_quantities.append({
"value": float(row['Bufferqty drops']),
"comparator": ">=",
"unit": "drop"
})
if row['Bufferqty drops max']:
buffer_quantity_value_quantities.append({
"value": float(row['Bufferqty drops max']),
"comparator": "<=",
"unit": "drop"
})
if row['Bufferqty uL']:
buffer_quantity_value_quantities.append({
"value": float(row['Bufferqty uL']),
"unit": "microliter"
})
if len(buffer_quantity_value_quantities):
dd_json["property"].append({
"type": {
"text": "Buffer Quantity"
},
"valueQuantity": buffer_quantity_value_quantities
})
# define rdt scan configuration property
if row['RDTScan Configuration']:
rdtscan_json = json.loads(row['RDTScan Configuration'])
rdtscan_json = add_image(rdtscan_json)
value_code = [{
"coding": [
{
"code": key
}
],
"text": str(value)
} for key, value in rdtscan_json.items()]
dd_json["property"].append({
"type": {
"text": "RDTScan Configuration"
},
"valueCode": value_code
})
# convert JSON to FHIR class
dd = DeviceDefinition(dd_json)
device_definitions.append(dd)
return device_definitions
device_definitions = build_device_definitions()
device_definitions_json = [{"resource": x.as_json()} for x in device_definitions]
bundle = Bundle({
"identifier": {
"value": "rdt-og-device-definitions"
},
"type": "collection",
"entry": device_definitions_json
})
with open('device_definitions-rdt-og-{}.json'.format(date.today().isoformat()), 'w') as f:
f.write(json.dumps(bundle.as_json(), indent=4))
pprint.pprint(device_definitions_json)
# -
| scripts/device-definitions-notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Teste de hipóteses T
import numpy as np
from scipy import stats
from scipy.stats import ttest_rel
dados_originais = np.array([149,160,147,189,175,168,156,160,152])
dados_originais.mean(), np.std(dados_originais)
dados_novos = dados_originais * 1.02
dados_novos.mean(), np.std(dados_novos)
_, p = ttest_rel(dados_originais, dados_novos)
p
alpha = 0.01
if p < alpha:
print('Hipóteses nula rejeitada.')
else:
print('Hipóteses alternativa aceita.')
| 8_intervalo_confianca/teste_hipotese_t.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Analysis
# 1. From the tested treatments, Capomulina and Ramican show the largest reduction in tumor volume. Given how similar both treatments performed, further testing is necessary to determine which regimen will work the best.
#
# 2. The correlation coefficient for mouse weight and average tumor volume is approximately .83 meaning we have a very strong linear relationship between these two variables.
#
# 3. With an r-squared value of .6962, we know that approximately 70% variation from the mean is explained by our model. While this model provides a fairly strong capacity to predict tumor volume for a given weight, adding other variables like age, breed, and sex would likely increase its effectiveness.
#
# ### Import Depedencies and Read CSV Data
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import numpy as np
# Study data files
mouse_metadata_path = "Resources/Mouse_metadata.csv"
study_results_path = "Resources/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
# Combine the data into a single dataset
combined_data_df = pd.merge(mouse_metadata, study_results, on = 'Mouse ID')
# Display the data table for preview
combined_data_df.head()
# -
# Checking the number of mice.
mouse_count1 = combined_data_df['Mouse ID'].nunique()
mouse_count1
#check observation count
combined_data_df['Mouse ID'].count()
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
duplicated_vals = combined_data_df[combined_data_df.duplicated(subset = ['Mouse ID', 'Timepoint'], keep = False)]
duplicated_vals
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
clean_df = combined_data_df.drop_duplicates(subset = ['Mouse ID', 'Timepoint'], keep = False)
clean_df.head()
# Checking the number of mice in the clean DataFrame.
clean_mouse_count = clean_df['Mouse ID'].nunique()
clean_mouse_count
#Check observation count on clean data
clean_df['Mouse ID'].count()
# ## Summary Statistics
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Use groupby and summary statistical methods to calculate the following properties of each drug regimen: mean, median, variance, standard deviation, and SEM of the tumor volume.
#Group Dataframe by Drug Regimen
regimen_groups = clean_df.groupby(['Drug Regimen'])
#Find mean for each regimen group
regimen_mean = regimen_groups['Tumor Volume (mm3)'].mean()
#Find median for each regimen group
regimen_median = regimen_groups['Tumor Volume (mm3)'].median()
#Find variance for each regimen group
regimen_variance = regimen_groups['Tumor Volume (mm3)'].var()
#Find standard deviation for each regimen group
regimen_std = regimen_groups['Tumor Volume (mm3)'].std()
#Find sem for each regimen group
regimen_sem = regimen_groups['Tumor Volume (mm3)'].sem()
# +
# Assemble the resulting series into a single summary dataframe.
summary_table = pd.DataFrame({"Mean": regimen_mean,
"Median":regimen_median,
"Variance":regimen_variance,
"Standard Deviation": regimen_std,
"SEM": regimen_sem})
summary_table
# +
# Using the aggregation method, produce the same summary statistics in a single line
aggregate_df = clean_df.groupby('Drug Regimen').aggregate({"Tumor Volume (mm3)": ['mean', 'median', 'var',
'std', 'sem']})
aggregate_df
# -
# ## Bar and Pie Charts
# +
# Get value counts for each regimen
regimen_count = clean_df['Drug Regimen'].value_counts()
# Generate a bar plot showing the total number of measurements taken on each drug regimen using pandas.
regimen_count = clean_df['Drug Regimen'].value_counts().plot.bar(width=0.5)
# Set labels for axes
regimen_count.set_xlabel("Drug Regimen")
regimen_count.set_ylabel("Number of Observations")
regimen_count.set_title("Treatment Regimen Observation Count")
# -
regimen_count
# +
# Generate a bar plot showing the total number of measurements taken on each drug regimen using pyplot.
# Determine number of data points
py_regimen_count = clean_df['Drug Regimen'].value_counts()
# Set X axis
x_axis = np.arange(len(py_regimen_count))
#Create bar plot
plt.bar(x_axis, py_regimen_count, width = 0.5)
# Set names for drug regimen groups
tick_locations = [value for value in x_axis]
plt.xticks(tick_locations, py_regimen_count.index.values)
#Change orientation of x labels
plt.xticks(rotation=90)
# Add labels and title
plt.xlabel("Drug Regimen")
plt.ylabel("Number of Observations")
plt.title('Treatment Regimen Observation Count')
# Display results
plt.show()
# -
# Determine number of data points
py_regimen_count = clean_df['Drug Regimen'].value_counts()
py_regimen_count
# +
# Generate a pie plot showing the distribution of female versus male mice using pandas
# Find distribition of mice by sex
sex_count = clean_df['Sex'].value_counts()
# Generate Pie chart for sex distribution
sex_distribution_chart = sex_count.plot.pie(startangle=90, title='Distribution by Sex', autopct="%1.1f%%")
# Hide Y label to improve presentation
sex_distribution_chart.set_ylabel('')
# +
# Generate a pie plot showing the distribution of female versus male mice using pyplot
# Identify distribution of data by sex
py_sex_distribution = clean_df['Sex'].value_counts()
# Tell matplotlib to create a pie chart filled with corresponding percentages and displayed vertically
plt.pie(py_sex_distribution, labels=py_sex_distribution.index.values, startangle=90, autopct="%1.1f%%")
plt.title('Distribution by Sex')
# Display resulting plot
plt.show()
# -
# ## Quartiles, Outliers and Boxplots
# +
# Calculate the final tumor volume of each mouse across four of the treatment regimens: Capomulin, Ramicane, Infubinol, and Ceftamin
# Start by getting the last (greatest) timepoint for each mouse
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
maxtimept_df = pd.DataFrame(clean_df.groupby('Mouse ID')['Timepoint'].max()).reset_index().rename(columns={'Timepoint': 'Timepoint (Max)'})
clean_max_df = pd.merge(clean_df, maxtimept_df, on='Mouse ID')
clean_max_df.head()
# +
regimens = ['Capomulin', 'Ramicane', 'Infubinol', 'Ceftamin']
regimen_values = []
for regimen in regimens:
# create dataframe with all regimens we are interested in
selected_regimens_df = clean_max_df.loc[clean_max_df['Drug Regimen'] == regimen]
# find last time point using max and store in another dataframe
results_df= selected_regimens_df.loc[selected_regimens_df['Timepoint'] == selected_regimens_df['Timepoint (Max)']]
# Get Tumor volume from clean_max_df dataframe
values = results_df['Tumor Volume (mm3)']
regimen_values.append(values)
# Calculate Quartiles and IQR
quartiles = values.quantile([0.25, 0.5, 0.75])
upperquartile = quartiles[0.75]
lowerquartile = quartiles[0.25]
iqr = upperquartile - lowerquartile
#print results
print(f" IQR for {regimen} is {iqr}")
#Find upper and lower bounds
upper_bound = upperquartile + (1.5 * iqr)
lower_bound = lowerquartile - (1.5 * iqr)
print(f"Upper Bound for {regimen}: {upper_bound}")
print(f"Lower Bound for {regimen}: {lower_bound}")
# Find Outliers
outliers_count = (values.loc[(clean_max_df['Tumor Volume (mm3)'] >= upper_bound) |
(clean_max_df['Tumor Volume (mm3)'] <= lower_bound)]).count()
print(f" The {regimen} regimen has {outliers_count} outlier(s)")
# +
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
# Create Box Plot
plt.boxplot(regimen_values)
# Add Title and Labels
plt.title('Tumor Volume by Drug')
plt.ylabel(' Tumor Volume (mm3)')
plt.xticks([1, 2, 3, 4], ['Capomulin', 'Ramicane', 'Infubinol', 'Ceftamin'])
# -
# ## Line and Scatter Plots
# +
# Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin
# Isolate Capomulin regimen oberservations
Capomulin_df = clean_df.loc[clean_df['Drug Regimen'] == 'Capomulin']
Capomulin_mouse= Capomulin_df.loc[Capomulin_df['Mouse ID'] == "b128",:]
Capomulin_mouse.head()
#create chart
plt.plot(Capomulin_mouse['Timepoint'], Capomulin_mouse['Tumor Volume (mm3)'], marker = 'o')
# Add labels and title to plot
plt.xlabel("Time (days)")
plt.ylabel("Tumor Volume (mm3)")
plt.title("Capomulin Treatment for Mouse b128")
plt.show()
# +
# Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen
# Isolate Capomulin regimen oberservations
capomulin_df = clean_df.loc[clean_df['Drug Regimen'] == 'Capomulin']
#create df with average tumor volumes
Avg_Tumor_Vol = pd.DataFrame(capomulin_df.groupby('Mouse ID')['Tumor Volume (mm3)'].mean())
# Merge with capomulin_df
Average_Tumor_Volume_df =pd.merge(capomulin_df, Avg_Tumor_Vol, on = 'Mouse ID', how = "left").rename(columns = {'Tumor Volume (mm3)_y' : 'Avg. Tumor Volume'})
Average_Tumor_Volume_df.head()
# Define Variables for scatter plot
x_axis = Average_Tumor_Volume_df['Weight (g)']
y_axis = Average_Tumor_Volume_df['Avg. Tumor Volume']
#Create scatter plot
plt.scatter(x_axis, y_axis)
# Add labels and title to plot
plt.xlabel("Weight (g)")
plt.ylabel("Average Tumor Volume (mm3)")
plt.title('Average Tumor Volume by Weight')
# Display plot
plt.show()
# -
# ## Correlation and Regression
# Calculate the correlation coefficient and linear regression model
correlation = st.pearsonr(x_axis, y_axis)
print(f"""The correlation between weight and average tumor volume in the Capomulin regimen is {round((correlation[0]), 4)}.""")
# +
# For mouse weight and average tumor volume for the Capomulin regimen
(slope, intercept, rvalue, pvalue, stderr) = st.linregress(x_axis, y_axis)
regression_values = x_axis * slope + intercept
linear_equation = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
# Plot linear regression on to the scatter plot
plt.scatter(x_axis,y_axis)
plt.plot(x_axis,regression_values,"r-")
#apply labels and title
plt.xlabel("Weight (g)")
plt.ylabel("Average Tumor Volume (mm3)")
plt.title('Average Tumor Volume by Weight')
# Add linear equation to the scatterplot
plt.annotate(linear_equation,(20,37), fontsize=15, color="black")
# Display plot
plt.show()
# +
# Calculate r squared to see how well our model predicts average tumor volume for a given weight
rsquared = round((rvalue**2),4)
rsquared
# -
| pymaceuticals_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="gmbFXmhc_GvB" colab_type="text"
# ## If fetching data from Google Drive
# My model
# + id="H63lYTIIKwqR" colab_type="code" outputId="0d8c7996-b5a5-4b5a-c912-3fcd47d92da5" colab={"base_uri": "https://localhost:8080/", "height": 121}
from google.colab import drive
drive.mount('/content/gdrive')
# + [markdown] id="ZSHFW_Iq_hXQ" colab_type="text"
# # Dataset
# + id="AHlJ2PS3K-xT" colab_type="code" outputId="b80e154b-a47f-47f2-8b95-00a1f0a4e76a" colab={"base_uri": "https://localhost:8080/", "height": 52}
# %%time
# !unzip -qq '/content/gdrive/My Drive/modest_museum_dataset/modest_museum_dataset.zip' -d .
# + [markdown] id="49_YCuX75n0E" colab_type="text"
# # Imports
# + id="xBTBHup5LAR_" colab_type="code" colab={}
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torch.optim as optim
import torch.nn as nn
import PIL
import numpy as np
import random
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau
# + [markdown] id="d4D9x6Ao5tSB" colab_type="text"
# # Import Packages
# + id="lLrNx-W4LIIR" colab_type="code" colab={}
from deepnet.data.dataset.modest import ModestMuseum
from deepnet.utils.cuda import initialze_cuda
from deepnet.model.models.model import MaskNet3, DepthMaskNet
from deepnet.model.train import Train
from deepnet.model.losses.loss_combination import BCE_RMSE_LOSS, SSIM_RMSE_LOSS, BCE_SSIM_LOSS, RMSE_SSIM_LOSS, SSIM_DICE_LOSS, RMSE_DICE_LOSS
from deepnet.model.losses.ssim import SSIM, MS_SSIM
from deepnet.model.losses.dice_loss import DiceLoss
from deepnet.utils.plot import Plot
from deepnet.utils.checkpoint import Checkpoint
from deepnet.utils.summary import summary
from deepnet.utils.tensorboard import Tensorboard
# + [markdown] id="gk1Dy817_kNm" colab_type="text"
# # Initialize Cuda
# + id="Ae896dDavqJM" colab_type="code" outputId="7b64473b-8fac-42fa-b2b8-fd53eacda6ce" colab={"base_uri": "https://localhost:8080/", "height": 67}
# %%time
cuda, device = initialze_cuda(1)
# + [markdown] id="MDnFPbsU_oYH" colab_type="text"
# ## Device Description
# + id="ck8YF9pH_vKs" colab_type="code" outputId="b0dc96ae-6bae-4ab7-cb5b-ca30f95415b8" colab={"base_uri": "https://localhost:8080/", "height": 302}
# !nvidia-smi
# + id="qMGuEZCm_tYq" colab_type="code" outputId="39aa9115-075c-41a4-d64c-7d9c8e4218ad" colab={"base_uri": "https://localhost:8080/", "height": 50}
# %%time
modest = ModestMuseum(
cuda= cuda,
batch_size = 128,
num_workers = 16,
path = 'modest_museum_dataset',
resize=(64,64),
train_test_split=0.7,
seed=1)
# + [markdown] id="UUPueP6z_-TN" colab_type="text"
# # Dataloader
# + id="NeemnIC1LIPm" colab_type="code" colab={}
trainloader = modest.dataloader(train=True)
testloader = modest.dataloader(train=False)
# + [markdown] id="5NKXiVSvABjF" colab_type="text"
# # Dataset Visualization
# + id="oDYE0L9oUstv" colab_type="code" outputId="916d648f-e25c-429d-9832-7daf40a46cbd" colab={"base_uri": "https://localhost:8080/", "height": 699}
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
columns = 4
rows = 5
images = []
for i in range(rows):
idx = random.randint(0, len(trainloader))
batch = trainloader.dataset.dataset[idx]
images.append(batch['bg'])
images.append(batch['bg_fg'])
images.append(batch['bg_fg_mask'])
images.append(batch['bg_fg_depth'])
fig=plt.figure(figsize=(12,12))
for i in range(1, columns*rows +1):
img = Image.open(images[i-1])
fig.add_subplot(rows, columns, i)
plt.axis('off')
if images[i-1].split('/')[1] == 'bg':
plt.title('bg')
plt.imshow(img)
elif images[i-1].split('/')[1] == 'bg_fg':
plt.title('bg_fg')
plt.imshow(img)
elif images[i-1].split('/')[1] == 'bg_fg_mask':
plt.title('bg_fg_mask')
plt.imshow(img, cmap='gray')
elif images[i-1].split('/')[1] == 'bg_fg_depth_map':
plt.title('bg_fg_depth_map')
plt.imshow(img)
plt.show()
# + [markdown] id="2Qd6cJDoALsR" colab_type="text"
# # Model Architecture
# + id="D2fy-2piLhyv" colab_type="code" outputId="01de3a0f-493c-42c5-cccf-781d7b2cc41a" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# %%time
model = DepthMaskNet()
model = model.to(device)
summary(model, {k:v for idx,(k,v) in enumerate(modest.input_size.items()) if idx<2 })
# + [markdown] id="PUHlLTxU7y8V" colab_type="text"
# # Tensorboard
# + id="U8qnznkQA8Yn" colab_type="code" outputId="ff6415d4-cd31-415b-a149-bbdf9a529600" colab={"base_uri": "https://localhost:8080/", "height": 838}
# %load_ext tensorboard
# %tensorboard --logdir runs
# + [markdown] id="j88-_ZIgAnyZ" colab_type="text"
# # Training
# ## Modify Learner Class
# + id="gSjdRF5F-8tY" colab_type="code" colab={}
class ModestTrain(Train):
def __init__(self):
return
def _fetch_data(self, batch, device):
feature = {}
feature['bg'] = batch['bg'].to(device)
feature['bg_fg'] = batch['bg_fg'].to(device)
target = {}
target['bg_fg_mask'] = batch['bg_fg_mask'].to(device)
target['bg_fg_depth'] = batch['bg_fg_depth'].to(device)
return feature, target
def _fetch_result(self, prediction):
pred = {
'bg_fg_mask' : (prediction[0]),
'bg_fg_depth' : torch.sigmoid(prediction[1])
}
return pred
def _fetch_sigmoid_data(self, prediction):
pred = {
'bg_fg_mask' : torch.sigmoid(prediction[0]),
'bg_fg_depth' : torch.sigmoid(prediction[1])
}
return pred
modest_train = ModestTrain()
# + [markdown] id="ApYRwLDjArXC" colab_type="text"
# ## Optimizer and Scheduler
# + id="7aEBM680LINc" colab_type="code" colab={}
optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-8, momentum=0.9)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=2, verbose=True,
threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0,
eps=1e-08)
# + [markdown] id="nV1Eiu3XAQcx" colab_type="text"
# ## Sample Images for Tensorboard
# + id="AYixTrxUASsN" colab_type="code" outputId="c4237aae-a449-45b4-c82e-d82563648ed3" colab={"base_uri": "https://localhost:8080/", "height": 50}
# %%time
img_idxes = random.sample(range(len(modest._test_data)), 32)
# Prepare Images
images = {
'bg': modest._test_data[img_idxes[0]]['bg'].unsqueeze(0),
'bg_fg': modest._test_data[img_idxes[0]]['bg_fg'].unsqueeze(0)
}
for idx in img_idxes[1:]:
images['bg'] = torch.cat((
images['bg'], modest._test_data[idx]['bg'].unsqueeze(0)
), 0)
images['bg_fg'] = torch.cat((
images['bg_fg'], modest._test_data[idx]['bg_fg'].unsqueeze(0)
), 0)
images['bg'] = images['bg'].to(device)
images['bg_fg'] = images['bg_fg'].to(device)
# + [markdown] id="31BQZLt93Oc5" colab_type="text"
# # BCE-RMSE Loss
# + id="gpM3y4FDQR4x" colab_type="code" colab={}
criterion = BCE_RMSE_LOSS()
checkpoint = Checkpoint('bce_rmse2/checkpoint.pth', monitor='Loss', verbose=1)
tensorboard = Tensorboard('runs/bce_rmse2',images)
# + id="NzDPzfnSMFaJ" colab_type="code" outputId="f29eb8e7-2a9a-4bfd-9528-c3bd889598c0" colab={"base_uri": "https://localhost:8080/", "height": 423}
# %%time
epochs = 3
model.learner(
0,
model,
tensorboard,
modest_train,
trainloader,
testloader,
device,
optimizer,
criterion,
epochs,
metrics=['rmse','iou'],
callbacks=[scheduler, checkpoint]
)
# + [markdown] id="AFF3zuPCnXLC" colab_type="text"
# ## Training for 128x128
# + id="yBmOBmsRt6Ox" colab_type="code" colab={}
checkpoint = Checkpoint('bce_rmse2/checkpoint.pth', monitor='Loss', verbose=1, last_reload=True)
# + id="aCwIbNOBnV5X" colab_type="code" outputId="62a27ee8-cf45-48f3-f034-b77ffa13008b" colab={"base_uri": "https://localhost:8080/", "height": 50}
# %%time
modest = ModestMuseum(
cuda= cuda,
batch_size = 128,
num_workers = 16,
path = 'modest_museum_dataset',
resize=(128,128),
train_test_split=0.7,
seed=1)
nexttrainloader = modest.dataloader(train=True)
nexttestloader = modest.dataloader(train=False)
# + id="ppIZHmQ9u71G" colab_type="code" outputId="53af92cf-f05c-49a2-e274-77e21ce93552" colab={"base_uri": "https://localhost:8080/", "height": 423}
# %%time
#epoch 6-9
epochs = 3
model.learner(
3,
model,
tensorboard,
modest_train,
nexttrainloader,
nexttestloader,
device,
optimizer,
criterion,
epochs,
metrics=['rmse','iou'],
callbacks=[scheduler, checkpoint]
)
# + id="oU00prJQV12R" colab_type="code" colab={}
| experiments/Enhanced_BCE_RMSE_DepthPrediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
## For Directed Graph
from collections import defaultdict
class graph:
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self, u, v):
self.graph[u].append(v)
if not self.graph[v]:
self.graph[v] = []
def cycle(self, v, visited):
visited[v] = 1
for i in self.graph[v]:
if visited[i]:
return "Cycle Found"
return self.cycle(i, visited)
return "No cycle"
if __name__=="__main__":
g = graph()
g.addEdge(0, 1)
g.addEdge(2, 0)
g.addEdge(1, 2)
visited = defaultdict(int)
for i in g.graph:
if not visited[i]:
res = g.cycle(i, visited)
print(res)
print(g.graph)
print(visited)
# +
## For Undirected Graph
from collections import defaultdict
class graph:
def __init__(self):
self.graph = defaultdict(list)
def addedge(self, u, v):
self.graph[u].append(v)
self.graph[v].append(u)
def cycle(self, v, visited, parent = -1):
visited[v] = 1
for i in self.graph[v]:
if i == parent and parent != -1:
continue
if visited[i]:
return "Cycle Found"
return self.cycle(i, visited, v)
return "No cycle"
if __name__=="__main__":
g = graph()
g.addedge(0, 1)
g.addedge(1, 2)
g.addedge(2, 3)
g.addedge(3, 4)
visited = defaultdict(int)
for i in g.graph:
if not visited[i]:
res = g.cycle(i, visited)
print(res)
print(g.graph)
# -
| Graph/Cycle in Graph .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # I- Demo. Data Access from AVISO+ repository
# This notebook aims at documenting how to access & manipulate the input datasets for one "ocean data challenge".
# Simulated Sea Surface Height (SSH) datasets are available on the AVISO+ opendap server.
# The **2020a-SSH-mapping-NATL60-grid** dataset refers to the reference simulation, a.k.a NATL60-CMJ165 nature run carried out by the MEOM Team. The **2020a-SSH-mapping-NATL60-along-track** corresponds to the observations datasets (for various altimeter missions) based on nadir (TOPEX/Poseidon, Jason1, Envisat, Geosat-2) and large swath (SWOT) orbits constructed with the [SWOTsimulator](https://github.com/SWOTsimulator/swotsimulator) package.
# The example below read the dataset using an xarray interface for accessing OpenDAP datasets with pydap. Alternatively, you may use the ```wget``` command to download the files. Note that users must first create an AVISO+ account to access the data. You can follow [this guide](https://github.com/ocean-data-challenges/2020a_SSH_mapping_NATL60/wiki/AVISO---account-creation) for creating your account...
import xarray as xr
import requests as rq
import hvplot.xarray
import sys
# ### Open your AVISO+ session: fill the ```<AVISO_LOGIN>``` and ```<AVISO_PWD>``` items below
my_aviso_session = rq.Session()
my_aviso_session.auth = ("<AVISO_LOGIN>", "<AVISO_PWD>")
url_alongtrack = 'https://tds.aviso.altimetry.fr/thredds/dodsC/2020a-SSH-mapping-NATL60-along-track'
sys.path.append('..')
from src.mod_plot import *
# ### Load Jason1 pseudo obs.
url_ds_j1 = f'{url_alongtrack}/2020a_SSH_mapping_NATL60_jason1.nc'
store_ds_j1 = xr.backends.PydapDataStore.open(url_ds_j1, session=my_aviso_session)
ds_j1 = xr.open_dataset(store_ds_j1)
ds_j1
# +
# Alternatiavely, you may use wget:
# #!wget --user '<AVISO_LOGIN>' --password '<<PASSWORD>>' 'https://tds.aviso.altimetry.fr/thredds/fileServer/2020a-SSH-mapping-NATL60-along-track/2020a_SSH_mapping_NATL60_jason1.nc'
# -
# ### Load Envisat pseudo obs.
url_ds_en = f'{url_alongtrack}/2020a_SSH_mapping_NATL60_envisat.nc'
store_ds_en = xr.backends.PydapDataStore.open(url_ds_en, session=my_aviso_session)
ds_en = xr.open_dataset(store_ds_en)
ds_en
# ### Load Topex/Poseidon Interleaved pseudo obs.
url_ds_tpn = f'{url_alongtrack}/2020a_SSH_mapping_NATL60_topex-poseidon_interleaved.nc'
store_ds_tpn = xr.backends.PydapDataStore.open(url_ds_tpn, session=my_aviso_session)
ds_tpn = xr.open_dataset(store_ds_tpn)
ds_tpn
# ### Load Geosat2 pseudo obs.
url_ds_g2 = f'{url_alongtrack}/2020a_SSH_mapping_NATL60_geosat2.nc'
store_ds_g2 = xr.backends.PydapDataStore.open(url_ds_g2, session=my_aviso_session)
ds_g2 = xr.open_dataset(store_ds_g2)
ds_g2
# ### Load SWOT nadir pseudo obs.
url_ds_swot_nadir = f'{url_alongtrack}/2020a_SSH_mapping_NATL60_nadir_swot.nc'
store_ds_swot_nadir = xr.backends.PydapDataStore.open(url_ds_swot_nadir, session=my_aviso_session)
ds_swot_nadir = xr.open_dataset(store_ds_swot_nadir)
ds_swot_nadir
# ### Load SWOT karin pseudo obs.
url_ds_swot_karin = f'{url_alongtrack}/2020a_SSH_mapping_NATL60_karin_swot.nc'
store_ds_swot_karin = xr.backends.PydapDataStore.open(url_ds_swot_karin, session=my_aviso_session)
ds_swot_karin = xr.open_dataset(store_ds_swot_karin)
ds_swot_karin
# ### Load Reference gridded SSH
url_dc_ref = 'https://tds.aviso.altimetry.fr/thredds/dodsC/2020a-SSH-mapping-NATL60-grid'
store_dc_ref = xr.backends.PydapDataStore.open(url_dc_ref, session=my_aviso_session)
dc_ref = xr.open_dataset(store_dc_ref, chunks={'time': '100MB'})
dc_ref
# ### Example of figures
list_of_dataset = [ds_j1, ds_g2, ds_en, ds_tpn]
central_date = numpy.datetime64('2012-10-05')
delta_t = numpy.timedelta64(5, 'D')
plot_demo_obs(list_of_dataset, dc_ref, central_date, delta_t)
list_of_dataset = [ds_swot_karin]
central_date = numpy.datetime64('2012-10-05')
delta_t = numpy.timedelta64(5, 'D')
plot_demo_obs(list_of_dataset, dc_ref, central_date, delta_t)
# ### Example of "interactive" figures
list_of_dataset = [ds_j1, ds_g2, ds_en, ds_tpn]
central_date = numpy.datetime64('2012-10-05')
delta_t = numpy.timedelta64(5, 'D')
hvplot_demo_obs_nadir(list_of_dataset, dc_ref, central_date, delta_t)
list_of_dataset = ds_swot_karin
central_date = numpy.datetime64('2012-10-05')
delta_t = numpy.timedelta64(5, 'D')
hvplot_demo_obs_karin(list_of_dataset, dc_ref, central_date, delta_t)
| notebooks/example_data_access_aviso.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Quick intro to Jupyter
#
# A short lesson on running Python code in as *beautiful* and *elegant* environment.
#
# Uses:
# * Rapid testing and development
# * Gorgeous presentation of results
# * In-line graphics
# +
import random
outcomes = ['eat lunch', 'work through lunch', 'stay late', 'snack']
print(random.choice(outcomes))
# -
# ## Loops
# +
n = 8
for i in range(n):
pad = ' ' * i
print(pad, 'Wow! It moves to the right')
# -
# ## Conditionals
#
# Alternate code paths depending a condition
for i in range(10):
if i % 2 == 0: # test for an even number
print(i, i**2)
# ## Modules
#
# The Python Standard Library is sometimes called "the batteries".
#
# The term *batteries* comes from gifts for children with the label "batteries included".
# +
import math
import collections
from pprint import pprint
print(math.cos(3.0 * math.pi * math.sqrt(3.5)))
pprint(collections.Counter('abracadabra').most_common(3), width=15)
# -
# ## In-line graphs
#
# Octive-like numeric array processing (Matlab-style) is provided by *numpy* which is a third-party Python package. This means numpy isn't in the core distribution from python.org but is already in the anaconda distribution.
#
# The same is true from *matplotlib*.
#
# +
# %matplotlib inline
from pylab import plot
data = [78, 88, 87, 91, 71]
plot(data)
# +
import matplotlib.pyplot as plt
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
labels = ['Python', 'Perl', 'Ruby', 'AWK', 'TCL']
sizes = [140, 35, 10, 2, 1]
explode = (0.1, 0.0, 0, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
explode = (0.1, 0.0, 0, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=120)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
# -
import numpy as np
x = np.linspace(0, 10, 1000)
y = np.sin(x)
plot(x, y)
# For more examples of how to make graphs, see [Matplotlib Gallery](https://matplotlib.org/gallery/index.html#)
# ## Kepler's Third Law
#
# Recreate Kepler's [original analysis](https://thonyc.wordpress.com/2018/05/15/400-years-of-the-third-law-an-overlooked-and-neglected-revolution-in-astronomy/) but using modern data including planets Kepler didn't know about.
# +
planets = { # Tuple[dist from sun in millions of miles, orbital period in years]
'mercury': (35.98, 88/365),
'venus': (67.24, 225/365),
'earth': (92.96, 1),
'mars': (141.6, 687/365),
'jupiter': (483.8, 11.86),
'saturn': (890.7, 29.46),
'uranus': (1784, 84),
'pluto': (3670, 248),
}
for planet_name, (dist_from_sun, orbital_period) in planets.items():
proportion = orbital_period ** 2 / dist_from_sun ** 3
print(proportion, planet_name)
# -
# For a more powerful example of Python, Matplotlib, and Numbpy data analysis see: [LIGO Jupyter Notebook](https://losc.ligo.org/s/events/GW150914/GW150914_tutorial.html)
# ## REST API Demonstration using Github
#
# The [Github REST API](https://developer.github.com/v3/#current-version) is for computer to computer communication using JSON file format.
#
# We could use the Python Standard Library but the third-party [*requests*](http://docs.python-requests.org/en/master/user/quickstart/) module is usually preferred and much easier to work with.
# +
import requests
def show_github_member(username):
"Display a Github user's contact info"
r = requests.get(f'https://api.github.com/users/{username}')
info = r.json()
print(f"{info['name']} works at {info['company']}")
# -
# ## Call the Function
show_github_member('raymondh')
show_github_member('hugs')
# ## How to explore the REST API headers and JSON data
# +
from pprint import pprint
r = requests.get(f'https://api.github.com/users/raymondh')
print(r.status_code)
pprint(dict(r.headers))
info = r.json()
pprint(info)
# -
# ## What does the *F* do?
#
# Why were people at Pycon this year wearing badges that said ``F"yes!"``?
# +
name = 'Hima'
pronoun = 'She'
title = 'Manager of all things Python'
experience = 6
# The old, old formatting way using tuples
print('%s is a %s. %s worked here for %d years.' %
(name, title, pronoun, experience))
# The old way, using dictionaries
person = {'name': name, 'pronoun': pronoun, 'title': title, 'experience':experience}
print('%(name)s is a %(title)s. %(pronoun)s worked for here %(experience)d years.' % person)
# The new way, using positional arguments
print('{0} is a {1}. {2} worked here for {3} years.'.format(name, title, pronoun, experience))
# The new way, using keyword arguments
print('{firstname} is a {title}. {pronoun} worked here for {experience} years.'.format(
firstname=name, title=title, pronoun=pronoun, experience=experience))
# The new, new Python 3.6 way with f-strings
print(f'{name} is a {title}. {pronoun} worked here for {experience} years.')
# So, how do you feel about all this
print(F"yes!")
# -
# ## Interacting with the file system
# +
import os
os.listdir('.')
# -
with open('Day1.ipynb') as f:
print(len(f.read()))
# +
import shutil
shutil.copyfile('Day1.ipynb', 'Day1.tmp')
os.listdir('.')
os.remove('Day1.tmp') # unlink()
os.listdir('.')
# -
print(os.getcwd())
os.chdir('notes')
print(os.getcwd())
os.chdir('..')
print(os.getcwd())
# +
import sys
n = sys.stdout.write('Hello World')
n = sys.stderr.write('Goodbye Cruel World')
with open('message.txt', 'w') as f:
f.write('It was the best of times.\nIt was the worst of times.\n')
# -
# ## Reducers (lists goes in and a single value comes out)
s = [10, 20, 30, 20, 10, 20]
print(sum(s)) # Totals all the values
print(min(s)) # Smallest of the values
print(max(s)) # Largest of the values
print(len(s)) # Number of values
print(s.count(20)) # Number of instances of a particular value
print(s[0]) # The first value
print(s[-1]) # The last value
# ## List comprehension
# +
# Syntax: [<expr> for <var> in <iterable>]
[x**2 for x in range(10)]
# Syntax: [<expr> for <var> in <iterable> if <cond>]
[x**2 for x in range(10) if x%2==0]
# -
# ## Parsing of data
#
# When data arrives from a socket, url, or file, it usually arrives in the form of a string. That string needs to be "parsed" to turn it into useful data.
history = '''\
Our team history
* On 3/14/2013, we scored 14 points and lost the game.
* On 6/2/2013, we scored 15 points and lost the game.
* On 9/15/2013, we scored only 1 point and won the game.
Go figure.
'''
# +
import re
# Pattern for date: NUMBER SLASH NUMBER SLASH NUMBER
dates = re.findall(r'\d+/\d+/\d+', history)
record = re.findall(r'won|lost', history)
scores = [int(score) for score in re.findall(r'([0-9]+) point', history)]
print(f'Last year, we played {len(dates)} games.')
print(f'The season ran from {dates[0]} to {dates[-1]}.')
print(f'Our record was {record.count("won")}-{record.count("lost")}.')
print(f'We scored {sum(scores)} goals.')
print(f'Our worst was {min(scores)} goal and best was {max(scores)} goals.')
# -
| Beginner/notes/Day1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import cv2
img = np.zeros((512,512,3),np.uint8)
img = cv2.circle(img,(20,20),5,(255,0,0),-1)
cv2.namedWindow('image')
while(1):
cv2.imshow('image',img)
if(cv2.waitKey(20) & 0xFF == 27):
break
cv2.destroyAllWindows()
| 3.3.circles-and-zeroes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ```
# The device on your wrist beeps several times, and once again you feel like you're falling.
#
# "Situation critical," the device announces. "Destination indeterminate. Chronal interference detected. Please specify new target coordinates."
#
# The device then produces a list of coordinates (your puzzle input). Are they places it thinks are safe or dangerous? It recommends you check manual page 729. The Elves did not give you a manual.
#
# If they're dangerous, maybe you can minimize the danger by finding the coordinate that gives the largest distance from the other points.
#
# Using only the Manhattan distance, determine the area around each coordinate by counting the number of integer X,Y locations that are closest to that coordinate (and aren't tied in distance to any other coordinate).
#
# Your goal is to find the size of the largest area that isn't infinite. For example, consider the following list of coordinates:
#
# 1, 1
# 1, 6
# 8, 3
# 3, 4
# 5, 5
# 8, 9
# If we name these coordinates A through F, we can draw them on a grid, putting 0,0 at the top left:
#
# ..........
# .A........
# ..........
# ........C.
# ...D......
# .....E....
# .B........
# ..........
# ..........
# ........F.
# This view is partial - the actual grid extends infinitely in all directions. Using the Manhattan distance, each location's closest coordinate can be determined, shown here in lowercase:
#
# aaaaa.cccc
# aAaaa.cccc
# aaaddecccc
# aadddeccCc
# ..dDdeeccc
# bb.deEeecc
# bBb.eeee..
# bbb.eeefff
# bbb.eeffff
# bbb.ffffFf
# Locations shown as . are equally far from two or more coordinates, and so they don't count as being closest to any.
#
# In this example, the areas of coordinates A, B, C, and F are infinite - while not shown here, their areas extend forever outside the visible grid. However, the areas of coordinates D and E are finite: D is closest to 9 locations, and E is closest to 17 (both including the coordinate's location itself). Therefore, in this example, the size of the largest area is 17.
#
# What is the size of the largest area that isn't infinite?
# ```
# +
from collections import Counter, defaultdict
# Parses lines like "123, 123"
def parse(line):
x, y = line.split(',')
return int(x), int(y)
# Compute Manhattan distance
def distance((x1, y1), (x2, y2)):
return abs(x1 - x2) + abs(y1 - y2)
# Nearst location to p or None if there is more than one location at the same distance
def nearest(p, locations):
distances = defaultdict(list)
for l in locations:
distances[distance(l, p)].append(l)
min_distance = min(distances.keys())
# Don't count positions with more than one location at the same distance
if len(distances[min_distance]) > 1:
return None
return distances[min_distance][0]
# Returns True if the (x, y) location is surrounded
def has_finite_area((x, y), locations):
directions = [
(i, j) -> (i < x) and (j < y), # northeast
(i, j) -> (i < x) and (j > y), # southeast
(i, j) -> (i > x) and (j < y), # northwest
(i, j) -> (i > x) and (j > y) # southwest
]
for d in directions:
# If there is no other location at one direction
# it will have an infinite area
if sum(1 for i, j in locations if d(i, j)) == 0:
return False
return True
# Returns and iterator of all the coordinates in the area of interest
def area_to_explore(locations):
# Area borders
north = min(l[1] for l in locations)
south = max(l[1] for l in locations)
east = min(l[0] for l in locations)
west = max(l[0] for l in locations)
# All area coordinates
return ((x, y) for x in range(east, west+1) for y in range(north, south+1))
# Returns the maximum area of a surrounded location
def maximum_finite_area(locations):
# Count the area
areas = Counter(locations |> area_to_explore |> fmap$(nearest$(?, locations)))
# Find locations that has a finite area
finite_locations = locations |> filter$(has_finite_area$(?, locations)) |> list
# Return the biggest area belonging to a finite area location
return max(areas[l] for l in finite_locations)
# -
# Test the example
example = ["1, 1", "1, 6", "8, 3", "3, 4", "5, 5", "8, 9"]
example |> fmap$(parse) |> maximum_finite_area
# Compute input
open("input/day06.txt") |> fmap$(parse) |> maximum_finite_area
# ```
# --- Part Two ---
# On the other hand, if the coordinates are safe, maybe the best you can do is try to find a region near as many coordinates as possible.
#
# For example, suppose you want the sum of the Manhattan distance to all of the coordinates to be less than 32. For each location, add up the distances to all of the given coordinates; if the total of those distances is less than 32, that location is within the desired region. Using the same coordinates as above, the resulting region looks like this:
#
# ..........
# .A........
# ..........
# ...###..C.
# ..#D###...
# ..###E#...
# .B.###....
# ..........
# ..........
# ........F.
# In particular, consider the highlighted location 4,3 located at the top middle of the region. Its calculation is as follows, where abs() is the absolute value function:
#
# Distance to coordinate A: abs(4-1) + abs(3-1) = 5
# Distance to coordinate B: abs(4-1) + abs(3-6) = 6
# Distance to coordinate C: abs(4-8) + abs(3-3) = 4
# Distance to coordinate D: abs(4-3) + abs(3-4) = 2
# Distance to coordinate E: abs(4-5) + abs(3-5) = 3
# Distance to coordinate F: abs(4-8) + abs(3-9) = 10
# Total distance: 5 + 6 + 4 + 2 + 3 + 10 = 30
# Because the total distance to all coordinates (30) is less than 32, the location is within the region.
#
# This region, which also includes coordinates D and E, has a total size of 16.
#
# Your actual region will need to be much larger than this example, though, instead including all locations with a total distance of less than 10000.
#
# What is the size of the region containing all locations which have a total distance to all given coordinates of less than 10000?
#
# ```
# +
# Check that a is in a safe region with distance less than safe_distance
def is_in_safe_region(a, locations, safe_distance):
total_distance = locations |> fmap$(distance$(?, a)) |> sum
return total_distance < safe_distance
# Returns and iterator over all safe coordinates
def locations_in_safe_region(locations, safe_distance):
return area_to_explore(locations) |> filter$(is_in_safe_region$(?, locations, safe_distance))
# -
example |> fmap$(parse) |> locations_in_safe_region$(?, 32) |> list |> len
open("input/day06.txt") |> fmap$(parse) |> locations_in_safe_region$(?, 10000) |> list |> len
| 2018/jordi/Day 6 - Chronal Coordinates.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (ox)
# language: python
# name: python3
# ---
# # Download any OSM Geospatial Entities with OSMnx
#
# Author: [<NAME>](https://geoffboeing.com/)
#
# More info:
#
# - [Overview of OSMnx](http://geoffboeing.com/2016/11/osmnx-python-street-networks/)
# - [Documentation and install instructions](https://osmnx.readthedocs.io)
# - [Examples, demos, tutorials](https://github.com/gboeing/osmnx-examples)
# - [Journal article and citation info](http://geoffboeing.com/publications/osmnx-complex-street-networks/)
# - [GitHub repo](https://github.com/gboeing/osmnx)
#
# This notebook provides a quick tour of using OSMnx to download any geospatial entites/objects from OpenStreetMap as a geopandas GeoDataFrame.
# +
import osmnx as ox
# %matplotlib inline
ox.config(use_cache=True, log_console=False)
ox.__version__
# -
# Use the `geometries` module to download entities, such as grocery stores, transit stops, points of interest, or building footprints, and turn them into a GeoDataFrame: [see docs](https://osmnx.readthedocs.io/en/stable/osmnx.html#module-osmnx.geometries).
#
# To query, pass a `tags` dict where keys are OSM tags. The dict's values can be either:
# 1. `True` to retrieve all OSM objects with this tag, regardless of its value
# 2. a single value as a string to retrieve all OSM objects with this tag:value combination
# 3. a list of string values to retrieve all OSM objects where this tag matches any of these values
#
# Pass multiple dict key:value pairs to retrieve the union (not intersection) of these pairs.
# get all building footprints in some neighborhood
# `True` means retrieve any object with this tag, regardless of value
place = "Bunker Hill, Los Angeles, California"
tags = {"building": True}
gdf = ox.geometries_from_place(place, tags)
gdf.shape
fig, ax = ox.plot_footprints(gdf, figsize=(3, 3))
# get all the parks in some neighborhood
# constrain acceptable `leisure` tag values to `park`
tags = {"leisure": "park"}
gdf = ox.geometries_from_place(place, tags)
gdf.shape
# get everything tagged amenity,
# and everything tagged landuse = retail or commercial,
# and everything tagged highway = bus_stop
tags = {"amenity": True, "landuse": ["retail", "commercial"], "highway": "bus_stop"}
gdf = ox.geometries_from_place("Piedmont, California, USA", tags)
gdf.shape
# view just the banks
gdf[gdf["amenity"] == "bank"].dropna(axis=1, how="any")
# view just the bus stops
gdf[gdf["highway"] == "bus_stop"].dropna(axis=1, how="any").head()
| osmnx-examples/16-download-osm-geospatial-entities.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0
# ---
# # Plagiarism Detection Model
#
# ## Steps
# * Upload data to S3.
# * Define a binary classification model and a training script.
# * Train your model and deploy it.
# * Evaluate your deployed classifier and answer some questions about your approach.
import pandas as pd
import boto3
import sagemaker
# ## Load Data to S3
# +
# session and role
sagemaker_session = sagemaker.Session()
role = sagemaker.get_execution_role()
# create an S3 bucket
bucket = sagemaker_session.default_bucket()
# -
# ## Upload training data to S3
# +
# the name of directory created to save the features data
data_dir = 'plagiarism_data'
# set prefix
prefix = 'plagiarism_project'
# upload all data to S3
input_data = sagemaker_session.upload_data(path=data_dir, bucket=bucket, key_prefix=prefix)
# -
# ### Test cell
# +
# confirm that data is in S3 bucket
empty_check = []
for obj in boto3.resource('s3').Bucket(bucket).objects.all():
empty_check.append(obj.key)
print(obj.key)
assert len(empty_check) !=0, 'S3 bucket is empty.'
print('Test passed!')
# -
# # SKLearn Model Creation
# !pygmentize source_sklearn/train.py
# # Create the Estimator
# +
from sagemaker.sklearn.estimator import SKLearn
output_path = 's3://{}/{}'.format(bucket, prefix)
estimator = SKLearn(entry_point = 'train.py',
source_dir = 'source_sklearn',
role = role,
train_instance_count = 1,
train_instance_type = 'ml.m4.xlarge',
sagemaker_session = sagemaker_session,
output_path = output_path,
)
# -
# ## EXERCISE: Train the estimator
# %%time
estimator.fit({'train': input_data})
# ## EXERCISE: Deploy the trained model
# +
# %%time
from sagemaker.pytorch import PyTorchModel
# deploy model to create the predictor
predictor = estimator.deploy(instance_type='ml.m4.xlarge',
initial_instance_count=1)
# -
# # Evaluating The Model
# +
import os
# read in test data, assuming it is stored locally
test_data = pd.read_csv(os.path.join(data_dir, "test.csv"), header=None, names=None)
# labels are in the first column
test_y = test_data.iloc[:,0]
test_x = test_data.iloc[:,1:]
# -
# ## Determining the accuracy of the model
# +
# First: generate predicted, class labels
test_y_preds = predictor.predict(test_x)
# test that the model generates the correct number of labels
assert len(test_y_preds)==len(test_y), 'Unexpected number of predictions.'
print('Test passed!')
# +
# Second: calculate the test accuracy
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(test_y, test_y_preds)
print('accuracy:',accuracy)
print(classification_report(test_y.values, test_y_preds,))
## print out the array of predicted and true labels, if you want
print('\nPredicted class labels: ')
print(test_y_preds)
print('\nTrue class labels: ')
print(test_y.values)
# -
# ## Clean up Resources
# +
# <name_of_deployed_predictor>.delete_endpoint()
# -
# ### Deleting S3 bucket
# +
# bucket_to_delete = boto3.resource('s3').Bucket(bucket)
# bucket_to_delete.objects.all().delete()
# -
# ## NLP+Classification for Plagiarism Detection Successful!
# I hope you enjoyed this project as much as I did!!
#
# Thanks for viewing :)
#
# You can always message me on Github for more information on the process I used!
| 3_Training_a_Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ___
#
# <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>
# ___
#
# # MATPLOTLIB
#
# ----
# ## Matplotlib Basics
# ## Introduction
# Matplotlib is the "grandfather" library of data visualization with Python. It was created by <NAME>. He created it to try to replicate MatLab's (another programming language) plotting capabilities in Python. So if you happen to be familiar with matlab, matplotlib will feel natural to you.
#
# It is an excellent 2D and 3D graphics library for generating scientific figures.
#
# Some of the major Pros of Matplotlib are:
#
# * Generally easy to get started for simple plots
# * Support for custom labels and texts
# * Great control of every element in a figure
# * High-quality output in many formats
# * Very customizable in general
#
# Matplotlib allows you to create reproducible figures programmatically. Let's learn how to use it! Before continuing this lecture, I encourage you just to explore the official Matplotlib web page: http://matplotlib.org/
#
# ## Installation
#
# If you are using our environment, its already installed for you. If you are not using our environment (not recommended), you'll need to install matplotlib first with either:
#
# conda install matplotlib
# or
#
# pip install matplotlib
#
# ## Importing
# Import the `matplotlib.pyplot` module under the name `plt` (the tidy way):
# +
# COMMON MISTAKE!
# DON'T FORGET THE .PYPLOT part
import matplotlib.pyplot as plt
# -
# **NOTE: If you are using an older version of jupyter, you need to run a "magic" command to see the plots inline with the notebook. Users of jupyter notebook 1.0 and above, don't need to run the cell below:**
# %matplotlib inline
# **NOTE: For users running .py scripts in an IDE like PyCharm or Sublime Text Editor. You will not see the plots in a notebook, instead if you are using another editor, you'll use: *plt.show()* at the end of all your plotting commands to have the figure pop up in another window.**
# # Basic Example
#
# Let's walk through a very simple example using two numpy arrays:
# ### Basic Array Plot
#
# Let's walk through a very simple example using two numpy arrays. You can also use lists, but most likely you'll be passing numpy arrays or pandas columns (which essentially also behave like arrays).
#
# **The data we want to plot:**
import numpy as np
x = np.arange(0,10)
y = 2*x
x
y
# # Using Matplotlib with plt.plot() function calls
#
# ## Basic Matplotlib Commands
#
# We can create a very simple line plot using the following ( I encourage you to pause and use Shift+Tab along the way to check out the document strings for the functions we are using).
plt.plot(x, y)
plt.xlabel('X Axis Title Here')
plt.ylabel('Y Axis Title Here')
plt.title('String Title Here')
plt.show() # Required for non-jupyter users , but also removes Out[] info
# ### Editing more figure parameters
plt.plot(x, y)
plt.xlabel('X Axis Title Here')
plt.ylabel('Y Axis Title Here')
plt.title('String Title Here')
plt.xlim(0,6) # Lower Limit, Upper Limit
plt.ylim(0,12) # Lower Limit, Upper Limit
plt.show() # Required for non-jupyter users , but also removes Out[] info
# ## Exporting a plot
help(plt.savefig)
plt.plot(x,y)
plt.savefig('example.png')
# ---------------------
#
# --------------------
| Data Science Resources/Jose portila - ML/04-Matplotlib/.ipynb_checkpoints/00-Matplotlib-Basics-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Analysis of sequence and p
# +
import pprint
import subprocess
import sys
sys.path.append('../')
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
# %matplotlib inline
plt.rcParams['figure.figsize'] = (12.9, 12)
np.set_printoptions(suppress=True, precision=2)
sns.set(font_scale=3.5)
from network import Protocol, NetworkManager, BCPNNPerfect
from plotting_functions import plot_weight_matrix, plot_state_variables_vs_time, plot_winning_pattern
from plotting_functions import plot_network_activity, plot_network_activity_angle
from analysis_functions import calculate_recall_time_quantities, calculate_angle_from_history, get_weights
from connectivity_functions import artificial_connectivity_matrix
# -
# ## An example for debugging
# +
always_learning = False
strict_maximum = True
perfect = False
z_transfer = False
k_perfect = True
diagonal_zero = False
g_w_ampa = 2.0
g_w = 0.0
g_a = 10.0
tau_a = 0.250
G = 1.0
sigma = 0.0
tau_m = 0.020
tau_z_pre_ampa = 0.005
tau_z_post_ampa = 0.005
tau_p = 10.0
# Patterns parameters
hypercolumns = 1
minicolumns = 10
n_patterns = 10
# Manager properties
dt = 0.001
values_to_save = ['o']
# Protocol
training_time = 0.050
inter_sequence_interval = 1.0
inter_pulse_interval = 0.0
epochs = 3
# Recall
T_recall = 3.0
n = 1
T_cue = 0.050
nn = BCPNNPerfect(hypercolumns, minicolumns, g_w_ampa=g_w_ampa, g_w=g_w, g_a=g_a, tau_a=tau_a, tau_m=tau_m,
sigma=sigma, G=G, tau_z_pre_ampa=tau_z_pre_ampa, tau_z_post_ampa=tau_z_post_ampa, tau_p=tau_p,
z_transfer=z_transfer, diagonal_zero=diagonal_zero, strict_maximum=strict_maximum,
perfect=perfect, k_perfect=k_perfect, always_learning=always_learning)
# Build the manager
manager = NetworkManager(nn=nn, dt=dt, values_to_save=values_to_save)
# Build the protocol for training
protocol = Protocol()
patterns_indexes = [i for i in range(n_patterns)]
protocol.simple_protocol(patterns_indexes, training_time=training_time, inter_pulse_interval=inter_pulse_interval,
inter_sequence_interval=inter_sequence_interval, epochs=epochs)
manager.run_network_protocol(protocol=protocol, verbose=False)
sequences = [patterns_indexes]
aux = calculate_recall_time_quantities(manager, T_recall, T_cue, n, sequences)
total_sequence_time, mean, std, success, timings = aux
plot_weight_matrix(manager.nn, ampa=True)
plot_network_activity_angle(manager)
# -
# Let's extract the next column, the difference between the second the one before the last column, the rest fr the two, the standard deviation
# +
second_index = 1
last_index = n_patterns - 2
w_next_second = nn.w_ampa[second_index + 1, second_index]
w_rest_second_mean = nn.w_ampa[(second_index + 2):, second_index].mean()
w_rest_second_std = nn.w_ampa[(second_index + 2):, second_index].std()
w_next_last = nn.w_ampa[last_index + 1, last_index]
w_rest_last_mean = nn.w_ampa[:last_index, last_index].mean()
w_rest_last_std = nn.w_ampa[:last_index, last_index].std()
# -
nn.w_ampa[:, last_index]
nn.w_ampa[:last_index, last_index]
plt.plot(nn.w_ampa[:, second_index], 'o-')
plt.axhline(w_next_second)
plt.axhline(w_rest_second_mean)
plt.plot(nn.w_ampa[:, last_index], 'o-')
plt.axhline(w_next_last)
plt.axhline(w_rest_last_mean)
# ## Systematic
# +
always_learning = False
strict_maximum = True
perfect = False
z_transfer = False
k_perfect = True
diagonal_zero = False
second_index = 1
last_index = n_patterns - 2
g_w_ampa = 2.0
g_w = 0.0
g_a = 10.0
tau_a = 0.250
G = 1.0
sigma = 0.0
tau_m = 0.020
tau_z_pre_ampa = 0.005
tau_z_post_ampa = 0.005
tau_p = 10.0
# Patterns parameters
hypercolumns = 1
minicolumns = 10
n_patterns = 10
# Manager properties
dt = 0.001
values_to_save = ['o']
# Protocol
training_time = 0.050
inter_sequence_interval = 1.0
inter_pulse_interval = 0.0
epochs = 3
# Recall
T_recall = 3.0
n = 1
T_cue = 0.050
num = 50
tau_p_values = np.linspace(5, 100, num=num)
successes = np.zeros_like(tau_p_values)
persistent_times = np.zeros_like(tau_p_values)
w_next_second_vector = np.zeros_like(tau_p_values)
w_rest_second_mean_vector = np.zeros_like(tau_p_values)
w_rest_second_std_vector = np.zeros_like(tau_p_values)
w_next_last_vector = np.zeros_like(tau_p_values)
w_rest_last_mean_vector = np.zeros_like(tau_p_values)
w_rest_last_std_vector = np.zeros_like(tau_p_values)
for index, tau_p in enumerate(tau_p_values):
nn = BCPNNPerfect(hypercolumns, minicolumns, g_w_ampa=g_w_ampa, g_w=g_w, g_a=g_a, tau_a=tau_a, tau_m=tau_m,
sigma=sigma, G=G, tau_z_pre_ampa=tau_z_pre_ampa, tau_z_post_ampa=tau_z_post_ampa, tau_p=tau_p,
z_transfer=z_transfer, diagonal_zero=diagonal_zero, strict_maximum=strict_maximum,
perfect=perfect, k_perfect=k_perfect, always_learning=always_learning)
# Build the manager
manager = NetworkManager(nn=nn, dt=dt, values_to_save=values_to_save)
# Build the protocol for training
protocol = Protocol()
patterns_indexes = [i for i in range(n_patterns)]
protocol.simple_protocol(patterns_indexes, training_time=training_time, inter_pulse_interval=inter_pulse_interval,
inter_sequence_interval=inter_sequence_interval, epochs=epochs)
manager.run_network_protocol(protocol=protocol, verbose=False)
sequences = [patterns_indexes]
aux = calculate_recall_time_quantities(manager, T_recall, T_cue, n, sequences)
total_sequence_time, mean, std, success, timings = aux
persistent_times[index] = mean
successes[index] = success
w_next_second_vector[index] = nn.w_ampa[second_index + 1, second_index]
w_rest_second_mean_vector[index] = nn.w_ampa[(second_index + 2):, second_index].mean()
w_rest_second_std_vector[index] = nn.w_ampa[(second_index + 2):, second_index].std()
w_next_last_vector[index] = nn.w_ampa[last_index + 1, last_index]
w_rest_last_mean_vector[index] = nn.w_ampa[:last_index, last_index].mean()
w_rest_last_std_vector[index] = nn.w_ampa[:last_index, last_index].std()
# +
next_difference = w_next_second_vector - w_next_last_vector
rest_difference = w_rest_second_mean_vector - w_rest_last_mean_vector
markersize = 15
linewidth = 10
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(tau_p_values, next_difference, 'o-', markersize=markersize, lw=linewidth, label=r'next')
ax.plot(tau_p_values, rest_difference, 'o-', markersize=markersize, lw=linewidth, label='rest')
ax.axhline(0, ls='--', color='gray')
ax.axvline(0, ls='--', color='gray')
ax.legend();
# +
color_palette = sns.color_palette()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(tau_p_values, w_rest_second_mean_vector, 'o-', markersize=markersize,
lw=linewidth, color=color_palette[0], label=r'rest second')
ax.plot(tau_p_values, w_rest_last_mean_vector, 'o-', markersize=markersize,
lw=linewidth, color=color_palette[1], label='rest last')
ax.fill_between(tau_p_values, w_rest_second_mean_vector - w_rest_second_std_vector,
w_rest_second_mean_vector + w_rest_second_std_vector,
color=current_palette[0], alpha=0.25)
ax.fill_between(tau_p_values, w_rest_last_mean_vector - w_rest_last_std_vector,
w_rest_last_mean_vector + w_rest_last_std_vector,
color=current_palette[1], alpha=0.25)
ax.axhline(0, ls='--', color='gray')
ax.axvline(0, ls='--', color='gray')
ax.legend();
# -
| jupyter/2018-04-02(Analysis of sequence and tau_p).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Subtleties of estimating entropy
#
# ## <NAME>
#
# For some algorithms, we need to calculate the entropy and its derivative. If
# there is no analytic formula for the entropy, we can resort to sampling. Given
# the definition of entropy:
#
# $$
# \begin{equation*}
# H(p) = E_{x\sim p_\theta}(-\log p_\theta(x))
# \end{equation*}
# $$
#
# We can see that $-\log p_{\theta}(x)$ is an unbiased estimator of $H$ if $x$ is
# sampled from $p_{\theta}$. It is tempting to use
# $-\frac{\partial\log p_\theta(x)}{\partial\theta}$ as an estimator of
# $\frac{\partial H}{\partial\theta}$. However, it is wrong, as shown in the
# following:
#
# $$
# \begin{equation*}
# E_{x\sim p_\theta}\left(\frac{\partial\log p_\theta(x)}{\partial\theta}\right)
# = \int \frac{\partial\log p_\theta(x)}{\partial\theta} p_\theta(x) dx
# = \int \frac{\partial p_\theta(x)}{\partial\theta} dx
# = \frac{\partial}{\partial\theta} \int p_\theta(x) dx
# = \frac{\partial 1}{\partial\theta} = 0
# \end{equation*}
# $$
#
# We need to actually go through the process of calculating the derivative to get
# the unbiased estimator of $\frac{\partial H}{\partial\theta}$:
#
# $$
# \begin{array}{ll}
# \frac{\partial H}{\partial\theta}
# &=&-\frac{\partial}{\partial\theta}\int \log p_\theta(x) p_\theta(x) dx \\
# &=& - \int \left(\frac{\partial\log p_\theta(x)}{\partial\theta}p_\theta(x)
# + \log p_\theta(x) \frac{\partial p_\theta(x)}{\partial\theta}\right) dx \\
# &=& - \int \left(\frac{\partial\log p_\theta(x)}{\partial\theta}p_\theta(x)
# + \log p_\theta(x) \frac{\partial\log p_\theta(x)}{\partial\theta} p_\theta(x)\right) dx \\
# &=& - \int (1+\log p_\theta(x))\frac{\partial\log p_\theta(x)}{\partial\theta} p_\theta(x) dx \\
# &=& -E_{x\sim p_\theta}\left(\log p_\theta(x)\frac{\partial\log p_\theta(x)}{\partial\theta}\right)
# -E_{x\sim p_\theta}\left(\frac{\partial\log p_\theta(x)}{\partial\theta}\right) \\
# &=& -\frac{1}{2}E_{x\sim p_\theta}\left(\frac{\partial}{\partial\theta}(\log p_\theta(x))^2\right) \\
# \end{array}
# $$
#
# This means that $-\frac{1}{2}\frac{\partial}{\partial\theta}(\log p_\theta(x))^2$
# is an unbiased estimator of $\frac{\partial H}{\partial\theta}$. Actually,
# $-\frac{1}{2}\frac{\partial}{\partial\theta}(c+\log p_\theta(x))^2$ is an
# unbiased estimator for any constant $c$.
#
# For some distributions, the sample of $p_\theta$ is generated by transforming
# $\epsilon \sim q$ by $f_\theta(\epsilon)$, where $q$ is a fixed distribution and
# $f_\theta$ is a smooth bijective mapping. $p_\theta(x)$ is implicitly defined by
# $q$ and $f_\theta$ as:
#
# $$
# \begin{equation*}
# p_\theta(x) = q(f_\theta^{-1}(x)) / \left|\det \left.
# \frac{\partial f_\theta(\epsilon)}{\partial\epsilon}\right|
# _{\epsilon=f_\theta^{-1}(x)}\right|
# \end{equation*}
# $$
#
# Interestingly, when calculating $-\frac{\partial\log p_\theta(x)}{\partial\theta}$,
# if we treat $x$ as $x=f_\theta(\epsilon)$, we get an unbiased estimator of
# $\frac{\partial H}{\partial\theta}$:
#
# $$
# \begin{array}{ll}
# && E_{x\sim p_\theta}\left(-\frac{\partial\log p_\theta(x)}{\partial\theta}\right)
# = E_{\epsilon \sim q}\left(-\frac{\partial\log p_\theta(f_\theta(\epsilon))}{\partial\theta}\right) \\
# &=& -\frac{\partial}{\partial\theta}E_{\epsilon \sim q}\left(\log p_\theta(f_\theta(\epsilon))\right)
# = -\frac{\partial}{\partial\theta}E_{x \sim p_\theta}\left(\log p_\theta(x)\right)
# = \frac{\partial}{\partial\theta}H(p)
# \end{array}
# $$
#
# So we can use $-\frac{\partial\log p_\theta(x)}{\partial\theta}$ as an unbiased
# estimator of $\frac{\partial H(p)}{\partial\theta}$ if $x=f_\theta(\epsilon)$
# and we allow gradient to propagate through $x$ to $\theta$.
| docs/subtleties_of_estimating_entropy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from google.cloud import bigquery
client = bigquery.Client()
# Which countries do not use ppm as a unit to measure pollutants?
query = """SELECT DISTINCT country
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE unit != 'ppm'
"""
query_job = client.query(query)
rows = list(query_job.result(timeout=30))
print(len(rows), "results")
print([x.country for x in rows])
# if DISTINCT is not used
unique_res = {}
for x in rows:
if x.country in unique_res:
unique_res[x.country] += 1
else:
unique_res[x.country] = 1
print(unique_res.keys())
# +
# Which pollutants have a value of exactly 0, in what locations?
query = """SELECT pollutant, location
FROM `bigquery-public-data.openaq.global_air_quality`
WHERE value = 0.00"""
query_job = client.query(query)
rows = list(query_job.result(timeout=30))
# print(len(rows), "results")
# print([x.pollutant for x in rows])
import pandas as pd
df = pd.DataFrame(data=[list(x.values()) for x in rows], columns=list(rows[0].keys()))
# e.g. places where there is a recorded CO level of 0.
print(df['location'][df['pollutant'] == 'co'])
# -
# get the schema to explain what each column means
table_ref = client.dataset('openaq', project='bigquery-public-data').table('global_air_quality')
for name, desc in [(s.name, s.description) for s in client.get_table(table_ref).schema]:
print(name, ":", desc)
| day1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Quasi Cycle Induced Cross Frequency
# The basic function for the simulation.
# +
import numpy as np
from scipy.signal import hilbert
from scipy.special import expit
from scipy.fftpack import fft, ifft
from numpy.linalg import inv
from copy import deepcopy
import matplotlib.pyplot as plt
# %matplotlib inline
def Cros(X, dt, min_low_band, max_low_band, min_high_band, max_high_band):
L = filt(X, dt, min_low_band, max_low_band)
s = np.mean(L)
phase = np.angle(hilbert(L-s))
H = filt(X, dt, min_high_band, max_high_band)
s = np.mean(H)
amp = np.abs(hilbert(H-s))
div = 10
b = np.pi/div
In = np.arange(0,2*np.pi,2*np.pi/div)
P = np.zeros(div)
for i in range(div):
(I,) = np.where(np.abs(phase-(In[i]-np.pi))<b);
P[i] = np.mean(amp[I])
P = P/sum(P)
return P
def filt(X, dt, a, b):
"""
Parameters
----------
X is raw signal. dt is the time bin.
"""
A = fft(X)
char = np.zeros(len(X))
char[int(a*dt):int(dt*b)] = 1.
A = char*A
A[0] = 0
C = np.conj(A)
C = C[::-1]
C = np.append(np.array([0]),C[:-1])
A = (A+C)/2.
y = np.real(ifft(A))
return y
def all_cros(X,
dt,
T,
min_low_band,
max_low_band,
min_high_band,
max_high_band):
n = int(T/1000)
dis = int(1/dt)
P = np.zeros(10)
for i in range(n):
P = P + Cros(X[i*dis*1000:(i+1)*dis*1000], 1, min_low_band, max_low_band, min_high_band, max_high_band)
P = P/sum(P)
return P
def Entropy(f):
return np.log(10) + sum(f*np.log(f))
def network(Ts, dt, noise, alpha, xx, XX, xX):
e = .2
i = .3
E = .2
I = .3
ee = xx[0,0]
ii = xx[1,1]
ei = xx[0,1]
ie = xx[1,0]
EE = XX[0,0]
II = XX[1,1]
EI = XX[0,1]
IE = XX[1,0]
eE = xX[0,0]
iI = xX[1,1]
eI = xX[0,1]
iE = xX[1,0]
he = -np.log(1./(alpha*e) -1) - (ee*e - ei*i + eE*E - eI*I)
hi = -np.log(1./(alpha*i) -1) - (ie*e - ii*i + iE*E - iI*I)
HE = -np.log(1./(alpha*E) -1) - (EE*E - EI*I)
HI = -np.log(1./(alpha*I) -1) - (IE*E - II*I)
T = 1000*Ts+100.
ddt = np.sqrt(dt)
N = int(T/dt)
x = np.zeros([2,N])
y = np.zeros([2,N])
x[0,0] = e + .01
x[1,0] = i - .01
y[0,0] = E - .01
y[1,0] = I - .01
for n in range(N-1):
x[0,n+1] = x[0,n] + dt*(-alpha*x[0,n] + expit(ee*x[0,n] - ei*x[1,n] + eE*y[0,n] - eI*y[1,n] + he)) + ddt*noise*np.random.randn()
x[1,n+1] = x[1,n] + dt*(-alpha*x[1,n] + expit(ie*x[0,n] - ii*x[1,n] + iE*y[0,n] - iI*y[1,n] + hi)) + ddt*noise*np.random.randn()
y[0,n+1] = y[0,n] + dt*(-alpha*y[0,n] + expit(EE*y[0,n] - EI*y[1,n] + HE)) + ddt*noise*np.random.randn()
y[1,n+1] = y[1,n] + dt*(-alpha*y[1,n] + expit(IE*y[0,n] - II*y[1,n] + HI)) + ddt*noise*np.random.randn()
return x, y, he, hi
def time_dep_network(Ts, dt, A, B, noise):
T = 1000*Ts
ddt = np.sqrt(dt)
N = int(T/dt)
x = np.zeros([2,N])
for n in range(N-1):
x[0,n+1] = x[0,n] + dt*((A[0,0]+np.cos(.01*n*dt)*B[0,0])*x[0,n] + (A[0,1]+np.cos(.01*n*dt)*B[0,1])*x[1,n]) + ddt*noise*np.random.randn()
x[1,n+1] = x[1,n] + dt*((A[1,0]+np.cos(.01*n*dt)*B[1,0])*x[0,n] + (A[1,1]+np.cos(.01*n*dt)*B[1,1])*x[1,n]) + ddt*noise*np.random.randn()
return x
def cros_network(Ts, dt, noise, alpha, xx, XX, xX, min_low_band, max_low_band, min_high_band, max_high_band):
x, y, he, hi = network(Ts, dt, noise, alpha, xx, XX, xX)
return all_cros(sum(x+y), dt, Ts*1000, min_low_band, max_low_band, min_high_band, max_high_band)
def check_CF():
X = np.cos(4*np.arange(0,50,.01))+((1-1.0*np.cos(4*np.arange(0,50,.01)))**2)*np.cos(18*np.arange(0,50,.01))
a = filt(X, 1, 3, 30)
a = Cros(X,1,20,50,100,200)
#plt.plot(X[0:1000])
plt.plot(a)
#plt.plot(np.abs(fft(X)[1:200]))
def eig_independent():
alpha = .05
xx = np.array([[22., 24.],[28., 11.]])
XX = np.array([[22., 16.],[14.9, 11.]])
xX = np.array([[3., 0.],[0., 0.]])
ee = xx[0,0]
ii = xx[1,1]
ei = xx[0,1]
ie = xx[1,0]
EE = XX[0,0]
II = XX[1,1]
EI = XX[0,1]
IE = XX[1,0]
eE = xX[0,0]
iI = xX[1,1]
eI = xX[0,1]
iE = xX[1,0]
ee = 24.
ii = 11.
ei = 24.
ie = 28.
EE = 22.
II = 9.5
EI = 17.
IE = 14
eE = 10.
iI = 0
eI = 0
iE = 0
e = .2
i = .3
E = .2
I = .3
Df = np.array([[-alpha+alpha*ee*e*(1-alpha*e), -alpha*ei*e*(1-alpha*e)],[alpha*ie*i*(1-alpha*i),-alpha-(alpha*ii*i*(1-alpha*i))]])
DF = np.array([[-alpha+alpha*EE*E*(1-alpha*E), -alpha*EI*E*(1-alpha*E)],[alpha*IE*I*(1-alpha*I),-alpha-alpha*II*I*(1-alpha*I)]])
print Df
print DF
print np.linalg.eig(Df)
print np.linalg.eig(DF)
print np.trace(np.linalg.inv(Df+np.transpose(Df)))
def pws(A, m = 100., dw = 1.):
I = np.zeros(int(m/dw))
n = 0
for i in np.arange(0,m,dw):
a = A + 2*np.pi*i*1j*np.eye(2)
b = inv(a)
c = np.dot(b, np.transpose(np.conj(b)))
I[n] = np.real(np.trace(c))
n += 1
return I
def time_dep_pws(A, B, m = 100., dw = 1.):
I = np.zeros(int(m/dw))
n = 0
N = 100
for i in np.arange(0,m,dw):
c = np.zeros(A.shape)
for j in range(N):
a = A + 2*np.pi*i*1j*np.eye(2)
a[0,0] += np.cos((2*j*np.pi)/N)*B[0,0]
a[0,1] += np.cos((2*j*np.pi)/N)*B[0,1]
a[1,0] += np.cos((2*j*np.pi)/N)*B[1,0]
a[1,1] += np.cos((2*j*np.pi)/N)*B[1,1]
b = inv(a)
c = c + np.dot(b, np.transpose(np.conj(b)))
c = c/N
I[n] = np.real(np.trace(c))
n += 1
return I
# -
A = np.array([[-.02, -.2],[.2, -.04]])
B = 0*np.random.randn(2,2)
x = time_dep_network(10, .01, A, B, .001)
X = sum(x)
ff = abs(fft(X))
it = 20
for i in range(it -1):
x = time_dep_network(10, .01, A, B, .001)
X = sum(x)
ff += abs(fft(X))
ff = ff/it
fff = ff[0:1000]
fff = fff/sum(fff)
f, (ax1, ax2) = plt.subplots(2,1)
f.set_size_inches(7,10)
ax1.plot(np.arange(0,1000,.01),X[0:100000])
ax2.plot(np.arange(0,100,.1),fff)
I = pws(1000*A, dw = .1)
I = I/sum(I)
ax2.plot(np.arange(0,100,.1),.57*I[0:1000],'k--')
f.savefig("/Volumes/Arch/Dropbox/simple.eps", format = "eps")
# +
A = np.array([[-.02, -.2],[.2, -.04]])
B = np.array([[-.01, -.1],[.1, .01]])
x = time_dep_network(10, .01, A, B, .001)
X = sum(x)
ff = abs(fft(X))
it = 20
for i in range(it -1):
x = time_dep_network(10, .01, A, B, .001)
X = sum(x)
ff += abs(fft(X))
ff = ff/it
fff = ff[0:1000]
fff = fff/sum(fff)
# -
f, (ax1, ax2) = plt.subplots(2,1)
f.set_size_inches(7,10)
ax1.plot(np.arange(0,1000,.01),X[300000:400000])
ax2.plot(np.arange(0,100,.1),fff)
I = time_dep_pws(1000*A, 1000*B, dw = .1)
I = I/sum(I)
ax2.plot(np.arange(0,100,.1),.8*I[0:1000],'k--')
f.savefig("/Volumes/Arch/Dropbox/complex.eps", format = "eps")
# # Network of two oscilators
#
# The Network is a stochastic rate model;
# $$\frac{dr}{dt} = -r + f(Wr+h) + N_t$$
# Where $r$ is the rate of activity of each population, W is the matrix of the connection and f is a non-linear function, here sigmoid, and $N_t$ is a gaussian noise.
# The network consists of 2 pairs of Excitatory/Inhibitory neurons. When they are not connected
# +
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
from scipy.special import expit
# %matplotlib inline
xx = np.array([[24., 24.],[28., 11.]])
XX = np.array([[22., 17.],[14.5, 9.5]])
xX = np.array([[2., 0.],[0., 0.]])
e = .2
i = .3
E = .2
I = .3
alpha = .05
dt = .01
Ts = 1
T = 1000*Ts
noise = .0005
x, y, he, hi = network(Ts = Ts, dt = dt, noise = noise, alpha = alpha, xx = xx, XX = XX, xX = xX)
Y, X = np.mgrid[0:1:20j, 0:1:20j]
U = -alpha*X + expit(xx[0,0]*X - xx[0,1]*Y + he)
V = -alpha*Y + expit(xx[1,0]*X - xx[1,1]*Y + hi)
speed = np.sqrt(U**2 + V**2)
UN = U/speed
VN = V/speed
plt.quiver(X, Y, UN, VN, # data
U, # colour the arrows based on this array
cmap=cm.seismic, # colour map
headlength=7) # length of the arrows
plt.colorbar() # adds the colour bar
mesh = np.arange(0.001,1,.001)
val1 = (np.log(1./(alpha*mesh) -1) + xx[0,0]*mesh + he)/xx[0,1]
val2 = (-np.log(1./(alpha*mesh) -1) + xx[1,1]*mesh - hi)/xx[1,0]
plt.plot(mesh, val1,'r')
plt.plot(val2, mesh,'b')
plt.plot(x[0,:], x[1,:],'k')
plt.axis([0, 1, 0, 1]);
# -
# # fast oscilator
# Excitatory signal, E/I signal, the amplitude and power spectrum in 1 second.
# +
rec = x[0,-int((1000)/dt):]
a = np.mean(rec)
amp = np.abs(hilbert(rec-a))
f, ((ax1, ax2), (ax3,ax4)) = plt.subplots(2,2)
f.set_size_inches(18,12)
ax1.plot(rec)
ax3.plot(rec)
ax3.plot(amp+a,'r')
ax2.plot(x[0,-int((1000)/dt):], x[1,-int((1000)/dt):],'k')
ax4.plot(np.abs(fft(rec)[1:80]))
# -
# # Slow oscilator
# Excitatory signal, E/I signal, the amplitude and power spectrum in 1 second.
rec = y[0,-int((1000)/dt):]
a = np.mean(rec)
ang = .01*np.angle(hilbert(rec-a))
f, ((ax1, ax2), (ax3,ax4)) = plt.subplots(2,2)
f.set_size_inches(18,12)
ax1.plot(rec)
ax3.plot(rec)
ax3.plot(ang+a,'r')
ax2.plot(y[0,-int((1000)/dt):], y[1,-int((1000)/dt):],'k')
ax4.plot(np.abs(fft(rec)[1:80]))
# # The activity of whole network
# The sum of two previous signals, decomposition to law pass filter (with phase) and high pass filter (with amplitude) in 1 second.
# +
t0 = 1000
rec = x[0,-int((t0)/dt):]+y[0,-int((t0)/dt):]+x[1,-int((t0)/dt):]+y[1,-int((t0)/dt):]
L = filt(rec,1,4,18)
H = filt(rec,1,30,60)
f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True)
f.set_size_inches(8,8)
mH = np.mean(H)
mL = np.mean(L)
mr = np.mean(rec)
ax1.plot(rec-mr)
amp = np.abs(hilbert(H-mH))
ang = np.angle(hilbert(L-mL))
ax2.plot(L,'r')
ax2.plot(.01*ang+mL,'g')
ax3.plot(H,'r')
ax3.plot(amp+mH,'g')
# -
# # Power spectrum of whole network
plt.plot(np.abs(fft(rec)[1:120]))
# # Cross frequency for $\alpha$ and $\theta$ bands
rec = sum(x) + sum(y)
a = all_cros(rec,dt,T,4,18,30,60)
plt.plot(a);
print Entropy(a)
# # Cross frequency vs. the weigth between two networks
m = 10
cr = np.zeros(m)
for i in range(m):
xx = np.array([[24., 24.],[28., 11.]])
XX = np.array([[22., 17.],[14.5, 9.5]])
xX = np.array([[i*2, 0.],[0., 0.]])
alpha = .05
dt = .01
Ts = 5
T = 1000*Ts
noise = .0005
x, y, he, hi = network(Ts = Ts, dt = dt, noise = noise, alpha = alpha, xx = xx, XX = XX, xX = xX)
rec = sum(x) + sum(y)
a = all_cros(rec,dt,T,4,18,30,60)
cr[i] = Entropy(a)
plt.plot(cr)
plt.xlabel("the weith of e-E connection")
plt.ylabel("Entropy value")
| QCCF.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
# -
# # Basic Statistics
#
# > __Note:__ marked as _experimental_
#
# ## Correlation
# - MLlib Main Guide: https://spark.apache.org/docs/2.4.3/ml-statistics.html#correlation
# - API Docs: https://spark.apache.org/docs/2.4.3/api/python/pyspark.ml.html#pyspark.ml.stat.Correlation
#
# Calculating the correlation between two series of data is a common operation in Statistics. Spark MLlib provides the flexibility to calculate pairwise correlations among many series.
#
# ### `ml.stat.Correlation`
#
# `Correlation` computes the correlation matrix for the input `DataFrame` of `Vector`s using the specified method. The output will be a `DataFrame` that contains the correlation matrix of the column of vectors.
#
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/34/Correlation_coefficient.png/800px-Correlation_coefficient.png" alt="Correlation coefficient" width="35%"/>
# <small>Examples of scatter diagrams with different values of correlation coefficient (ρ)<br/>Source: wikipedia</small>
#
# To use correlation, one uses the `Correlation` class from the `pyspark.ml.stat` module.
# On this class, call the `.corr` method.
#
# Parameters for `Correlation.corr`
# - `dataset` – A DataFrame.
# - `column` – The name of the column of vectors for which the correlation coefficient needs to be computed. This must be a column of the dataset, and it must contain `Vector` objects.
# - `method` – String specifying the method to use for computing correlation. Supported: pearson (default), spearman.
#
# The supported correlation methods are currently:
# - [Pearson's correlation coefficient](https://en.wikipedia.org/wiki/Pearson_correlation_coefficient) (default)
# `method="pearson"`
# Code example:
# ```python
# Correlation.corr(dataframe, column="<column_of_vectors>", method="pearson")
# ```
#
#
# - [Spearman's rank correlation coefficient](https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient)
# Code example
# ```python
# df = DataFrame.cache()
# Correlation.corr(df, column="<column_of_vectors>", method="spearman")
# ```
#
# >__Note:__ For Spearman, a rank correlation, Spark needs to create an `RDD[Double]` for each column and sort it in order to retrieve the ranks and then join the columns back into an `RDD[Vector]`, which is a fairly costly operation. Hence, cache the input `DataFrame` before calling `.corr` with `method=‘spearman’` to avoid recomputing the common lineage.
#
#
#
#
# #### Example:
#
# +
from pyspark.ml.linalg import Vectors
from pyspark.ml.stat import Correlation
data = [
(Vectors.sparse(4, [(0, 1.0), (3, -2.0)]),),
(Vectors.dense([4.0, 5.0, 0.0, 3.0]),),
(Vectors.dense([6.0, 7.0, 0.0, 8.0]),),
(Vectors.sparse(4, [(0, 9.0), (3, 1.0)]),),
]
df = spark.createDataFrame(data, ["features"])
# Pearson
print("Pearson correlation matrix:")
r1 = Correlation.corr(df, "features")
r1.show()
print(f"{r1.first()[0]}\n")
# Spearman
df = df.cache()
print("Spearman correlation matrix:")
r2 = Correlation.corr(df, "features", "spearman")
r2.show()
print(f"{r2.first()[0]}\n")
# -
# ## Hypothesis Testing
#
# - MLlib Main Guide: https://spark.apache.org/docs/2.4.3/ml-statistics.html#hypothesis-testing
#
# Hypothesis testing is a powerful tool in statistics to determine whether a result is statistically significant, whether this result occurred by chance or not. Spark's MLlib Main Guide only makes mentions of the `ChiSquareTest`, but it also supports `KolmogorovSmirnovTest`. The `KolmogorovSmirnovTest` feature was newly added in Spark 2.4.0 and has not made it to the official guide yet.
#
# ### `ml.stat.ChiSquareTest`
# - Wikipedia: [Chi-squared test](https://en.wikipedia.org/wiki/Chi-squared_test)
# - API Docs: https://spark.apache.org/docs/2.4.3/api/python/pyspark.ml.html#pyspark.ml.stat.ChiSquareTest
#
# <p>
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/8/8e/Chi-square_distributionCDF-English.png/600px-Chi-square_distributionCDF-English.png" alt="Chi-square distribution" width="30%" /><br/>
# <small>Chi-squared distribution, showing χ2 on the x-axis and p-value (right tail probability) on the y-axis.<br/>Source: wikipedia</small>
# </p>
#
# Conducts Pearson’s independence test for every feature against the label. For each feature, the (feature, label) pairs are converted into a contingency matrix for which the Chi-squared statistic is computed. All label and feature values must be categorical.
#
# The null hypothesis is that the occurrence of the outcomes is statistically independent.
#
# To use Chi-squared test, one uses the `ChiSquareTest` class from the `pyspark.ml.stat` module.
# On this class, call the `.test` method.
#
# Parameters for `ChiSquareTest.test`
#
# - `dataset` – DataFrame of categorical labels and categorical features. Real-valued features will be treated as categorical for each distinct value.
# - `featuresCol` – Name of features column in dataset (must be of type `Vector`).
# - `labelCol` – Name of label column in dataset (any numerical type).
#
# Code example:
# ```python
# ChiSquareTest.test(dataframe, featuresCol="<features_column>", labelCol="<label_column>")
# ```
#
# ### `ml.stat.KolmogorovSmirnovTest`
# - Wikipedia: [Kolmogorov-Smirnov Test](https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test)
# - API Docs: https://spark.apache.org/docs/2.4.3/api/python/pyspark.ml.html#pyspark.ml.stat.KolmogorovSmirnovTest
#
# <img src="https://upload.wikimedia.org/wikipedia/commons/c/cf/KS_Example.png" alt="Kolmogorov–Smirnov statistic" width="30%"/>
# <small>Illustration of the Kolmogorov–Smirnov statistic. Red line is CDF, blue line is an ECDF, and the black arrow is the K–S statistic<br/>Source: wikipedia</small>
#
# Conduct the two-sided Kolmogorov Smirnov (KS) test for data sampled from a continuous distribution.
#
# By comparing the largest difference between the empirical cumulative distribution of the sample data and the theoretical distribution we can provide a test for the the null hypothesis that the sample data comes from that theoretical distribution.
#
# To use Kolmogorov–Smirnov, one uses the `KolmogorovSmirnovTest` class from the `pyspark.ml.stat` module.
# On this class, call the `.test` method.
#
# Parameters for `KolmogorovSmirnovTest.test`
# (positional only)
# - `dataset` – a DataFrame containing the sample of data to test.
# - `sampleCol` – Name of sample column in dataset, of any numerical type.
# - `distName` – a string name for a theoretical distribution, (currently only support “norm”).
# - `*params` – Double values specifying the parameters to be used for the theoretical distribution. For “norm” distribution, the parameters includes mean and variance.
# - `mean`
# - `variance`
#
# Code example:
# ```python
# KolmogorovSmirnovTest.test(dataframe, "<sample_column>", "norm", 0.1, 1.0)
# ```
#
# ### Examples
# +
from pyspark.ml.linalg import Vectors
from pyspark.ml.stat import ChiSquareTest, KolmogorovSmirnovTest
# ChiSquareTest
data = [
(0.0, Vectors.dense(0.5, 10.0)),
(0.0, Vectors.dense(1.5, 20.0)),
(1.0, Vectors.dense(1.5, 30.0)),
(0.0, Vectors.dense(3.5, 30.0)),
(0.0, Vectors.dense(3.5, 40.0)),
(1.0, Vectors.dense(3.5, 40.0)),
]
df = spark.createDataFrame(data, ["label", "features"])
r = ChiSquareTest.test(df, "features", "label")
print("\nChiSquareTest:")
print("in:")
df.show()
print("out:")
r.show(1, False)
print(f" pValues: {r.first().pValues}")
print(f" degreesOfFreedom: {r.first().degreesOfFreedom}")
print(f" statistics: {r.first().statistics}")
# KolmogorovSmirnovTest
data = [[0.1], [0.15], [0.2], [0.3], [0.25]]
df = spark.createDataFrame(data, ["sample"])
r = KolmogorovSmirnovTest.test(df, 'sample', 'norm', 0.0, 1.0)
print("\nKolmogorovSmirnovTest:")
print("in:")
df.show()
print("out:")
r.show()
# Summary of the test including the p-value, test statistic
print("DataFrame based result:")
print(f" pValue: {round(r.first().pValue, 5)}")
print(f" statistic: {round(r.first().statistic, 5)}\n")
# Note that the Scala functionality of calling Statistics.kolmogorovSmirnovTest with
# a lambda to calculate the CDF is not made available in the Python API
# The RDD-based API for KolmogorovSmirnovTest outputs quite a bit nicer, which shows
# the lack of feature parity with the RDD-based API for this feature
from pyspark.mllib.stat import Statistics
# needs an rdd to run, creating an rdd with similar data
data = [0.1, 0.15, 0.2, 0.3, 0.25]
rdd = spark.sparkContext.parallelize(data)
# run a KS test for the sample versus a standard normal distribution
testResult = Statistics.kolmogorovSmirnovTest(rdd, "norm", 0, 1)
# Note that the Scala functionality of calling Statistics.kolmogorovSmirnovTest with
# a lambda to calculate the CDF is not made available in the Python API
# summary of the test including the p-value, test statistic, and null hypothesis
# if our p-value indicates significance, we can reject the null hypothesis
print("RDD based result:")
print(testResult)
# -
# # Summarizer
#
# Spark provides vector column summary statistics for Dataframes through Summarizer. Available metrics are the column-wise max, min, mean, variance, and number of nonzeros, as well as the total count.
#
# This feature was newly added in Spark 2.4.0 - hence it is quite 'fresh'. I found some typo's in the documentation related specifically to the function of this. Currently, the performance of this interface is about 2x-3x slower compared to using the equivalent RDD interface.
#
#
# ### `ml.stat.Summarizer`
# API guide: https://spark.apache.org/docs/2.4.3/api/python/pyspark.ml.html#pyspark.ml.stat.Summarizer
# Tools for vectorized statistics on MLlib Vectors. The methods in this package provide various statistics for `Vectors` contained inside `DataFrame`s. This class lets users pick the statistics they would like to extract for a given column.
#
# There are two ways to use this class, singular or multiple. Instructions below.
#
# #### 1. Computing singular metrics:
# - `mean(col, weightCol=None)`
# coefficient-wise mean.
# - `variance(col, weightCol=None)`
# coefficient-wise variance.
# - `count(col, weightCol=None)`
# count of all vectors seen
# - `numNonZeros(col, weightCol=None)`
# number of non-zeros for each coefficient
# - `max(col, weightCol=None)`
# maximum for each coefficient
# - `min(col, weightCol=None)`
# minimum for each coefficient
# - `normL1(col, weightCol=None)`
# L1 norm of each coefficient (sum of the absolute values)
# - `normL2(col, weightCol=None)`
# Euclidean norm for each coefficient
#
# #### 2. Multiple metrics
# To compute multiple metrics, first run `Summarizer.metrics` with the specific metrics that you want to compute.
#
# - `Summarizer.metrics(*metrics)`
# Given a list of metrics, provides a builder that computes metrics from a column.
# Available metrics (same as singular metrics above): `mean`, `variance`, `count`, `numNonZeros`, `max`, `min`, `normL1`, `normL2`
#
# This returns an instance of the `SummaryBuilder` class. On this return instance of `SummaryBuilder` use the `.summary()` method.
# ```python
# SummaryBuilder.summary("featuresCol", weightCol=None)
# ```
#
# ### Examples
# +
from pyspark.ml.stat import Summarizer
from pyspark.ml.linalg import Vectors
data = [
(1.0, Vectors.dense(1.0, 1.0, 1.0)),
(0.0, Vectors.dense(1.0, 2.0, 3.0)),
]
df = spark.createDataFrame(data, ["weight", "features"])
# compute statistics for single metric "mean" with weight
df.select(Summarizer.mean(df.features, df.weight)).show(truncate=False)
# compute statistics for single metric "mean" without weight
df.select(Summarizer.mean(df.features)).show(truncate=False)
# create summarizer for multiple metrics "mean", "count", "numNonZeros"
summarizer = Summarizer.metrics("mean", "count", "numNonZeros")
# compute statistics for multiple metrics with weight
df.select(summarizer.summary(df.features, df.weight)).show(truncate=False)
# compute statistics for multiple metrics without weight
df.select(summarizer.summary(df.features)).show(truncate=False)
# -
| Section 5 - Classification and Regression/5.5/pyspark.ml.stat.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cs
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernel_info:
// name: .net-csharp
// kernelspec:
// display_name: .NET (C#)
// language: C#
// name: .net-csharp
// ---
#r "nuget:Microsoft.ML,1.4.0"
#r "nuget:Microsoft.ML.AutoML,0.16.0"
#r "nuget:Microsoft.Data.Analysis,0.1.0"
using Microsoft.Data.Analysis;
using XPlot.Plotly;
using Microsoft.AspNetCore.Html;
Formatter<DataFrame>.Register((df, writer) =>
{
var headers = new List<IHtmlContent>();
headers.Add(th(i("index")));
headers.AddRange(df.Columns.Select(c => (IHtmlContent) th(c.Name)));
var rows = new List<List<IHtmlContent>>();
var take = 20;
for (var i = 0; i < Math.Min(take, df.RowCount); i++)
{
var cells = new List<IHtmlContent>();
cells.Add(td(i));
foreach (var obj in df[i])
{
cells.Add(td(obj));
}
rows.Add(cells);
}
var t = table(
thead(
headers),
tbody(
rows.Select(
r => tr(r))));
writer.Write(t);
}, "text/html");
using System.IO;
using System.Net.Http;
string housingPath = "housing.csv";
if (!File.Exists(housingPath))
{
var contents = new HttpClient()
.GetStringAsync("https://raw.githubusercontent.com/ageron/handson-ml2/master/datasets/housing/housing.csv").Result;
File.WriteAllText("housing.csv", contents);
}
var housingData = DataFrame.LoadCsv(housingPath);
housingData
housingData.Description()
Chart.Plot(
new Graph.Histogram()
{
x = housingData["median_house_value"],
nbinsx = 20
}
)
// +
var chart = Chart.Plot(
new Graph.Scattergl()
{
x = housingData["longitude"],
y = housingData["latitude"],
mode = "markers",
marker = new Graph.Marker()
{
color = housingData["median_house_value"],
colorscale = "Jet"
}
}
);
chart.Width = 600;
chart.Height = 600;
display(chart);
// +
static T[] Shuffle<T>(T[] array)
{
Random rand = new Random();
for (int i = 0; i < array.Length; i++)
{
int r = i + rand.Next(array.Length - i);
T temp = array[r];
array[r] = array[i];
array[i] = temp;
}
return array;
}
int[] randomIndices = Shuffle(Enumerable.Range(0, (int)housingData.RowCount).ToArray());
int testSize = (int)(housingData.RowCount * .1);
int[] trainRows = randomIndices[testSize..];
int[] testRows = randomIndices[..testSize];
DataFrame housing_train = housingData[trainRows];
DataFrame housing_test = housingData[testRows];
display(housing_train.RowCount);
display(housing_test.RowCount);
// -
using Microsoft.ML;
using Microsoft.ML.Data;
using Microsoft.ML.AutoML;
// +
%%time
var mlContext = new MLContext();
var experiment = mlContext.Auto().CreateRegressionExperiment(maxExperimentTimeInSeconds: 15);
var result = experiment.Execute(housing_train, labelColumnName:"median_house_value");
// +
var scatters = result.RunDetails.Where(d => d.ValidationMetrics != null).GroupBy(
r => r.TrainerName,
(name, details) => new Graph.Scattergl()
{
name = name,
x = details.Select(r => r.RuntimeInSeconds),
y = details.Select(r => r.ValidationMetrics.MeanAbsoluteError),
mode = "markers",
marker = new Graph.Marker() { size = 12 }
});
var chart = Chart.Plot(scatters);
chart.WithXTitle("Training Time");
chart.WithYTitle("Error");
display(chart);
Console.WriteLine($"Best Trainer:{result.BestRun.TrainerName}");
// +
var testResults = result.BestRun.Model.Transform(housing_test);
var trueValues = testResults.GetColumn<float>("median_house_value");
var predictedValues = testResults.GetColumn<float>("Score");
var predictedVsTrue = new Graph.Scattergl()
{
x = trueValues,
y = predictedValues,
mode = "markers",
};
var maximumValue = Math.Max(trueValues.Max(), predictedValues.Max());
var perfectLine = new Graph.Scattergl()
{
x = new[] {0, maximumValue},
y = new[] {0, maximumValue},
mode = "lines",
};
var chart = Chart.Plot(new[] {predictedVsTrue, perfectLine });
chart.WithXTitle("True Values");
chart.WithYTitle("Predicted Values");
chart.WithLegend(false);
chart.Width = 600;
chart.Height = 600;
display(chart);
| NotebookExamples/csharp/Samples/HousingML.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Import Libraries
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
# ## Read CSV
df = pd.read_csv("allyear.csv")
df.drop(['county'],axis=1,inplace=True)
df.reset_index()
df
# ## Variable Cutoff
# +
# get all explanatory variable names
array = []
for col in df.columns:
array.append(col)
del array[-1]
combos=[]
opvars = []
# unique combinations
x=0
for i in array[x:]:
for j in array[x+1:]:
column_1 = df[i]
column_2 = df[j]
combos.append((i,j))
x=x+1
# variable cutoff
for i in combos:
c1=df[i[0]]
c2=df[i[1]]
corr = abs(c1.corr(c2))
if corr > 0.85:
snapcol = df['POP_SNAP']
corr_snap1 = abs(c1.corr(snapcol))
corr_snap2 = abs(c2.corr(snapcol))
if(corr_snap1 > corr_snap2 and opvars.count(i[0]) == 0):
opvars.append(i[0])
elif opvars.count(i[1]) == 0:
opvars.append(i[1])
print(opvars)
# -
# set new dataframe with optimized variables and response variable POP_SNAP
opvars.append('POP_SNAP')
df = df[opvars]
df
# ## Multiple Linear Regression
x = df.drop(['POP_SNAP'],axis=1).values
y = df['POP_SNAP'].values
# +
# splitting dataset into training and test set
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.33, random_state=0)
# training model on training set
ml = LinearRegression()
ml.fit(x_train,y_train)
# predict test set results
y_pred=ml.predict(x_test)
plt.figure(figsize=(8,5))
plt.scatter(y_test,y_pred,color="darkblue")
plt.xlabel('Actual', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.ylabel('Predicted', fontsize=16)
plt.title('Method 2A', fontsize=16)
# +
# define our intput
X2 = sm.add_constant(x)
# create a OLS model
model = sm.OLS(y, X2)
# fit the data
est = model.fit()
print(est.summary())
# -
| method2/metric2A.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.6.1
# language: julia
# name: julia-0.6
# ---
# +
using algT
using GeometryTypes
algT.Node
a = algT.Node(0, algT.Pt2D(0., 1.), 1)
@show a
a.parentID = 2
@show a
# -
# +
include("testRRT.jl")
plotly()
main()
# -
include("RRT.jl")
# +
rand(Uniform(0,20))
a = false
b = false
if !a && !b
print("hi")
end
# -
module fun
using algT
algT.Node
end
# +
module MyModule
export MyType, foo
type MyType
x
end
bar(x) = 2x
foo(a::MyType) = bar(a.x) + 1
show(io, a::MyType) = print(io, "MyType $(a.x)")
end
module runTest
using MyModule
f= MyModule.MyType(1)
@show f
end
# -
workspace()
using GeometryTypes
Pt2D{T} = Point{2, Float64}
Line2D{T} = LineSegment{Pt2D}
Point{2, Float64}
Point(2, Float64)
# +
using GeometryTypes
a = LineSegment(Point(2.0, 3.0), Point(4.0,4.))
b = LineSegment(Point(1.0, 3.0), Point(3.0,4.))
@show intersects(a,b)[1]
a = LineSegment([2,3], [4,5])
b = LineSegment([1,3], [5,6])
intersects(a,b)[1]
# -
| wk11/testRRT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
data = pd.read_csv('https://raw.githubusercontent.com/fivethirtyeight/data/22a478af3edc00f69693c3f5f4604b2f1fd024b0/sleeping-alone-data/sleeping-alone-data.csv', encoding='ISO-8859-1')
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 50)
# **Replacing the old column names to new names**
# +
new_col_name = ['StartDate','EndDate','CurrentRelationshipStatus', 'RelationshipLength','Frequency in separate beds',
'YouSleepAt','OtherPlace','PartnerSleepAt','PartnerOtherPlace','Reason_One of us snores',
'Reason_One of us makes frequent bathroom trips in the night',
'Reason_One of us is sick','Reason_We are no longer physically intimate',
'Reason_We have different temperature preferences for the room',
'Reason_We have had an argument or fight','Reason_Not enough space',
'Reason_Do not want to share the covers','Reason_One of us needs to sleep with a child',
'Reason_Night working/very different sleeping times','Reason_Other',
'First time in separate beds', 'Sleeping in separate beds helps us to stay together',
'We sleep better when we sleep in separate beds', 'Our sex life has improved as a result of sleeping in separate beds',
'Occupation','Other Occupation','Gender','Age','Household income','Education','Location']
old_col_name = data.columns
replace_name = dict(zip(old_col_name, new_col_name))
# -
df = data.copy().rename(columns = replace_name)
df.drop(0, axis=0, inplace=True)
# **Replace other occupation to the actual occupation**
# +
for i in range(1,len(df['Occupation'])):
if df['Occupation'][i] == 'Other (please specify)':
df['Occupation'][i] = df['Other Occupation'][i]
df.drop('Other Occupation', axis = 1, inplace=True)
# -
# **Replace different groups into number**
# +
df['Household income'].value_counts()
df['Household income'] = df['Household income'].fillna('Did not disclose')
df['Household income_code'] = df['Household income'].map({'$0 - $24,999':1, '$25,000 - $49,999':2, '$50,000 - $99,999':3,
'$100,000 - $149,999':4, '$150,000+':5, 'Did not disclose':0})
# +
df['Age'].value_counts()
df['Age'] = df['Age'].fillna('Did not disclose')
df['Age_code'] = df['Age'].map({'18-29':1, '30-44':2, '45-60':3, '> 60':4, 'Did not disclose':0})
# +
df['Education'].value_counts()
df['Education'] = df['Education'].fillna('Did not disclose')
df['Education_code'] = df['Education'].map({'Graduate degree':5, 'Bachelor degree':4, 'Some college or Associate degree':3,
'High school degree':2, 'Less than high school degree':1, 'Did not disclose':0})
# +
df['Gender'].value_counts()
# female:1 male:0
df['Gender'] = df['Gender'].fillna('Did not disclose')
df['Gender_code'] = df['Gender'].map({'Male':0, 'Female':1, 'Did not disclose':2})
# +
df['RelationshipLength'].value_counts()
# Less than 1 year:0
df['RelationshipLength_code'] = df['RelationshipLength'].map({'More than 20 years':5, '16-20 years':4, '11-15 years':3,
'6-10 years':2, '1-5 years':1, 'Less than 1 year':0})
# +
df['Frequency in separate beds'].value_counts()
# Never:0
df['Frequency in separate beds_code'] = df['Frequency in separate beds'].map(
{'Every night':5, 'A few times per week':4, 'A few times per month':3,
'Once a month or less':2, 'Once a year or less':1, 'Never':0})
# -
df['Location'] = df['Location'].fillna('Did not disclose')
df['Occupation'] = df['Occupation'].fillna('Did not disclose')
df
# ## Demographic data
demographic = df[['CurrentRelationshipStatus', 'RelationshipLength','Frequency in separate beds','Occupation','Gender',
'Age','Household income','Education','Location','Gender_code','Age_code','Household income_code',
'Education_code', 'RelationshipLength_code', 'Frequency in separate beds_code']]
demographic.to_csv('Demographic_data.csv')
# ## Separate the data into two subsets: one is sleeping together, the other is separate.
separate_df = df[df['Frequency in separate beds'] != 'Never']
together_df = df[df['Frequency in separate beds'] == 'Never']
# ### Cleaning together data
# 507 sleeping separately
#
# 586 sleep together
together_df = together_df[['StartDate','EndDate','CurrentRelationshipStatus', 'RelationshipLength','Frequency in separate beds',
'Occupation','Gender','Age','Household income','Education','Location','Gender_code','Age_code','Household income_code',
'Education_code']]
together_df.isna().sum()
together_df.to_csv("sleep_together.csv")
# ### Cleaning separate data
separate_df.isna().sum()
# +
separate_reasons =['Reason_One of us snores',
'Reason_One of us makes frequent bathroom trips in the night',
'Reason_One of us is sick','Reason_We are no longer physically intimate',
'Reason_We have different temperature preferences for the room',
'Reason_We have had an argument or fight','Reason_Not enough space',
'Reason_Do not want to share the covers','Reason_One of us needs to sleep with a child',
'Reason_Night working/very different sleeping times','Reason_Other']
separate_df[separate_reasons] = separate_df[separate_reasons].fillna(0)
for reason in separate_reasons:
separate_df[reason][separate_df[reason] != 0] = 1
# +
QA = ['Sleeping in separate beds helps us to stay together','We sleep better when we sleep in separate beds',
'Our sex life has improved as a result of sleeping in separate beds']
for question in QA:
separate_df[question] = separate_df[question].fillna(0)
separate_df[question] = separate_df[question].map({'Strongly disagree':5, 'Somewhat agree':4,'Neither agree nor disagree':3,
'Strongly agree':2,'Somewhat disagree':1})
separate_df['Sleeping in separate beds helps us to stay together'].fillna(0)
# +
separate_reasons =['Reason_One of us snores',
'Reason_One of us makes frequent bathroom trips in the night',
'Reason_One of us is sick','Reason_We are no longer physically intimate',
'Reason_We have different temperature preferences for the room',
'Reason_We have had an argument or fight','Reason_Not enough space',
'Reason_Do not want to share the covers','Reason_One of us needs to sleep with a child',
'Reason_Night working/very different sleeping times','Reason_Other']
df[separate_reasons] = df[separate_reasons].fillna(0)
for reason in separate_reasons:
df[reason][df[reason] != 0] = 1
# -
separate_df['Sleeping in separate beds helps us to stay together_code'] = separate_df['Sleeping in separate beds helps us to stay together'].map({
'Strongly agree':5, 'Somewhat agree':4, 'Neither agree nor disagree':3, 'Somewhat disagree':2, 'Strongly disagree':1
})
separate_df['We sleep better when we sleep in separate beds_code'] = separate_df['We sleep better when we sleep in separate beds'].map({
'Strongly agree':5, 'Somewhat agree':4, 'Neither agree nor disagree':3, 'Somewhat disagree':2, 'Strongly disagree':1
})
separate_df['Our sex life has improved as a result of sleeping in separate beds_code'] = separate_df['Our sex life has improved as a result of sleeping in separate beds'].map({
'Strongly agree':5, 'Somewhat agree':4, 'Neither agree nor disagree':3, 'Somewhat disagree':2, 'Strongly disagree':1
})
separate_df.drop(['OtherPlace', 'PartnerOtherPlace'], axis=1, inplace=True)
separate_df['YouSleepAt'].value_counts()
separate_df.dropna(inplace=True)
separate_df.to_csv('sleep_separately.csv')
| data_cleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fitting a Polynomial
#
# In this tutorial, we will show how to use the generic curve fitting class `kontrol.curvefit.CurveFit` to fit a polynomial.
#
# `kontrol.curvefit.CurveFit` is a low-level class for curve fitting.
# It uses optimization to minimize a cost function, e.g. mean squared error, to fit a curve.
# It requires at least 5 specifications,
#
# * `xdata`: the independent variable data,
# * `ydata`: the dependent variable data,
# * `model`: The model,
# * `cost`: the cost function, and
# * `optimizer`: the optimization algorithm.
#
# In addition, keyword arguments can be specified to the model and optimizer as `model_kwargs` and `optimizer_kwargs`.
#
# The functions `model`, `cost`, and `optimizer` takes a specific format. See documentation or tutorial below on how to construct them, or simply use the predefined ones in `kontrol`.
# Here, we will create the data to be fitted, which is a simple polynomial.
#
# \begin{equation}
# y = \sum_{i=0} a_i x^i
# \end{equation}
# +
# Prepare the data
import numpy as np
import matplotlib.pyplot as plt
xdata = np.linspace(-1, 1, 1024)
np.random.seed(123)
random_args = np.random.random(5)*2 - 1 # Generate some random args to be fitted.
def polynomial(x, args, **kwargs):
"""
Parameters
----------
x : array
x axis
args : array
A list of coefficients of the polynomial
Returns
-------
array
args[0]*x**0 + args[1]*x**1 ... args[len(args)-1]*x**(len(args)-1).
"""
poly = np.sum([args[i]*x**i for i in range(len(args))], axis=0)
return poly
ydata = polynomial(xdata, random_args)
print(random_args)
# -
# We see that the coefficients are
#
# \begin{equation}
# a_i = \begin{bmatrix}0.39293837 & -0.42772133 & -0.54629709 & 0.10262954 & 0.43893794\end{bmatrix}
# \end{equation}
#
# Now let's see if we can recover it.
# +
import kontrol.curvefit
import scipy.optimize
a = kontrol.curvefit.CurveFit()
a.xdata = xdata
a.ydata = ydata
a.model = polynomial
error_func = kontrol.curvefit.error_func.mse ## Mean square error
a.cost = kontrol.curvefit.Cost(error_func=error_func)
# If we know the boundary of the coefficients,
# scipy.optimize.differential_evolution would be a suitable optimizer.
a.optimizer = scipy.optimize.differential_evolution
a.optimizer_kwargs = {"bounds": [(-1, 1)]*5, "workers": -1, "updating": "deferred"} ## workers=1 will use all available CPU cores.
a.fit()
de_args = a.optimized_args
de_fit = a.yfit
print(de_args)
# -
# If we know the inital guess instead,
# scipy.optimizer.minimize can be used.
# In this case, we choose the Powell algorithm.
# We also intentionally fit with 6th-order polynomial instead of 5th-order one.
a.optimizer = scipy.optimize.minimize
a.optimizer_kwargs = {"x0": [0]*6, "method": "Powell"} ## Start from [0, 0, 0, 0, 0]
a.fit()
pw_args = a.optimized_args
pw_fit = a.yfit
print(pw_args)
# In both cases we see the parameters are recovered well. Now let's look at some plots.
## Plot
plt.figure(figsize=(10, 5))
plt.plot(xdata, ydata, "-", label="Data", lw=5)
plt.plot(xdata, de_fit, "--", label="Fit with differetial evolution", lw=3)
plt.plot(xdata, pw_fit, "-.", label="Fit with Powell")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.grid(which="both")
| docs/source/tutorials/curve_fitting.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.0
# language: julia
# name: julia-1.4
# ---
# # Entanglement-Assisted Communication Value for Axisymmetric Channels
#
# In this notebook, we explore the entanglement-assisted communication value for axisymmetric channels.
# These channels are constructed from the axisymmetric states described in https://arxiv.org/pdf/1505.01833.pdf.
# They are important to investigate because all PPT state are also separable meaning that `pptCVPrimal` computes the exact communication value.
# The entanglement-assisted communication value is bounded as
#
# $$
# CV_{ea}(\mathcal{N}) \leq d_b CV(\mathcal{N}),
# $$
#
# where $\mathcal{N}$ is the axisymmetric channel and $d_b$ is the Hilbert space dimension of the output of $\mathcal{N}$. Here, we plot the difference
#
# $$
# CV_{ea}(\mathcal{N}) - d_b CV(\mathcal{N})
# $$
#
# over the entire axisymmetric parameter space to identify the parameters at which the bound is achieved and where large separations occur. From these plots, note an interesting feature, the maximal separation from the upper bound is $-d(d-1)$:
#
# $$
# \max_{x,y} CV_{ea}(\mathcal{N}_{x,y}) - d CV(\mathcal{N}_{x,y}) = -d(d-1),
# $$
#
# where $\mathcal{N}_{x,y}$ is the axisymmetric channel with parameters $x$ and $y$.
using CVChannel
using Plots
"""
This function scans over the parameter space for axisymmetric channels
and computes the communication value for each axisymmetric channel.
Two parameters, x and y, are scanned with their respective number of
samples across the domain of the axisymmetric states.
The returned data is arranged to be plotted as a contour plot.
"""
function scan_axisymmetric_cv(d, x_samples, y_samples)
x_bound = CVChannel._axisymmetric_x_bounds(d)
y_bound = CVChannel._axisymmetric_y_bounds(d)
x_step = (x_bound[2]-x_bound[1])/x_samples
y_step = (y_bound[2]-y_bound[1])/y_samples
x_range = x_bound[1]:x_step:x_bound[2]
y_range = y_bound[1]:y_step:y_bound[2]
cv_diff = ones(Float64, length(y_range), length(x_range))*(-d)*(d-1)
for y_id in 1:length(y_range)
y = y_range[y_id]
x_constraints = CVChannel._axisymmetric_x_constraints(d,y)
xid_lower = findfirst(x_id -> x_range[x_id] ≥ x_constraints[1], 1:length(x_range))
xid_upper = findlast(x_id -> x_range[x_id] ≤ x_constraints[2], 1:length(x_range))
for x_id in xid_lower:xid_upper
x = x_range[x_id]
ρ_axi = axisymmetricState(d,x,y)
J_N = d * ρ_axi
(ea_cv_N, ea_σAB_N) = eaCVDual(J_N, d, d)
(ppt_cv_N, ppt_σAB_N) = pptCVDual(J_N, d, d)
cv_diff[y_id,x_id] = ea_cv_N - d * ppt_cv_N
end
end
return (x_range, y_range, cv_diff)
end;
# ## d = 2
d = 2
x_range, y_range, cvs = scan_axisymmetric_cv(d,40,40)
contour(x_range,y_range,cvs,
fill=true,
xlabel="x",
ylabel="y",
title="Difference between `eaCV(N) - d*CV(N)` \nfor Axisymmetric Channels (d = $d)",
)
# ## d= 3
d = 3
x_range, y_range, cvs = scan_axisymmetric_cv(d,40,40)
contour(x_range,y_range,cvs,
fill=true,
xlabel="x",
ylabel="y",
title="Difference between `eaCV(N) - d*CV(N)` \nfor Axisymmetric Channels (d = $d)",
)
# ## d = 4
d = 4
x_range, y_range, cvs = scan_axisymmetric_cv(d,40,40)
contour(x_range,y_range,cvs,
fill=true,
xlabel="x",
ylabel="y",
title="Difference between `eaCV(N) - d*CV(N)` \nfor Axisymmetric Channels (d = $d)",
)
# ## d = 5
d = 5
x_range, y_range, cvs = scan_axisymmetric_cv(d,20,20)
contour(x_range,y_range,cvs,
fill=true,
xlabel="x",
ylabel="y",
title="Difference `eaCV(N) - d*CV(N)` \nfor Axisymmetric Channels (d = $d)",
)
# ## d = 6
d = 6
x_range, y_range, cvs = scan_axisymmetric_cv(d,20,20)
contour(x_range,y_range,cvs,
fill=true,
xlabel="x",
ylabel="y",
title="Difference between `eaCV(N) - d*CV(N)` \nfor Axisymmetric Channels (d = $d)",
)
# ## d = 7
d = 7
@time x_range, y_range, cvs = scan_axisymmetric_cv(d,20,20)
contour(x_range,y_range,cvs,
fill=true,
xlabel="x",
ylabel="y",
title="Difference between `eaCV(N) - d*CV(N)` \nfor Axisymmetric Channels (d = $d)",
)
| notebook/eaCV_of_axisymmetric_channels.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Gray Image Transformation
# +
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# %matplotlib inline
# -
image = mpimg.imread('images/car1.jpg')
print('Image dimensions:', image.shape)
# +
# Change from color to grayscale
imag = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
plt.imshow(gray_image, cmap='gray')
# -
| gray_image_processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from collections import deque
from env.tsp_env import TspEnv
from utils import tsp_plots
import numpy as np
import pandas as pd
import random
import time
import torch
import torch.nn as nn
import torch.optim as optim
# ## Set parameters
NUMBER_OF_CITIES = 10
NUMBER_OF_NETS = 10
# Discount rate of future rewards
GAMMA = 1.0
# Learing rate for neural network
LEARNING_RATE = 0.001
# Maximum number of game steps (state, action, reward, next state) to keep
MEMORY_SIZE = 100000
# Frequency of neural net
BATCH_SIZE = 5
# Number of game steps to play before starting training
REPLAY_START_SIZE = 1000
# Exploration rate (episolon) is probability of choosing a random action
EXPLORATION_MAX = 1.0
EXPLORATION_MIN = 0.01
# Reduction in epsilon with each game step
EXPLORATION_DECAY = 0
# Number of steps between target network update
SYNC_TARGET_STEPS = 1000
# Set stopping conditions
MAXIMUM_RUNS = 50000
MAXIMUM_TIME_MINS = 480
NO_IMPROVEMENT_RUNS = 5000
NO_IMPROVEMENT_TIME = 120
# Set whether to plot all new best routes as they are found
PLOT_NEW_BEST_ROUTES = True
class DuellingDQN(nn.Module):
"""
Deep Q Network solver. Includes control variables, memory of
state/action/reward/end, neural net,and methods to act,
remember, and update neural net by sampling from memory.
"""
def __init__(self, observation_space, action_space, learning_rate):
"""Constructor method. Set up neural nets."""
# Set up action space (choice of possible actions)
self.action_space = action_space
super(DuellingDQN, self).__init__()
self.feature = nn.Sequential(
nn.Linear(observation_space, observation_space * 4),
nn.ReLU(),
nn.Linear(observation_space * 4, observation_space * 4),
nn.ReLU()
)
self.advantage = nn.Sequential(
nn.Linear(observation_space * 4, observation_space * 4),
nn.ReLU(),
nn.Linear(observation_space * 4, action_space)
)
self.value = nn.Sequential(
nn.Linear(observation_space * 4, observation_space * 4),
nn.ReLU(),
nn.Linear(observation_space * 4, 1)
)
# Set optimizer
self.optimizer = optim.Adam(
params=self.parameters(), lr=learning_rate)
def act(self, state, exploration_rate):
"""Act either randomly or by redicting action that gives max Q"""
# Set up a dataframe array of q, city index, random
action_df = pd.DataFrame()
action_df['city'] = np.arange(NUMBER_OF_CITIES)
q_values = self.forward(torch.FloatTensor(state))
action_df['q'] = q_values = q_values.detach().numpy()[0]
action_df['visted'] = state[0][0:len(q_values)]
action_df['random'] = np.random.random(NUMBER_OF_CITIES)
# Filter to unvisted cities
mask = action_df['visted'] == 0
action_df = action_df[mask]
# If no unvisted cities, action is to return to city 0
if mask.sum() == 0:
action = 0
else:
# Sort action table by Q or random order (for exploration)
sort_col = ('random' if np.random.rand() < exploration_rate
else 'q')
action_df.sort_values(sort_col, ascending=False, inplace=True)
action = int(action_df['city'].iloc[0])
return action
def forward(self, x):
x = self.feature(x)
advantage = self.advantage(x)
value = self.value(x)
action_q = value + advantage - advantage.mean()
return action_q
class Memory():
"""
Replay memory used to train model.
Limited length memory (using deque, double ended queue from collections).
Holds, state, action, reward, next state, and episode done.
"""
def __init__(self, memory_size):
"""Constructor method to initialise replay memory"""
self.memory = deque(maxlen=memory_size)
def remember(self, state, action, reward, next_state, done):
"""state/action/reward/next_state/done"""
self.memory.append((state, action, reward, next_state, done))
def optimize_duelling_dqn(policy_net, target_net, memory, batch_size, gamma):
"""
Update model by sampling from memory.
Uses policy network to predict best action (best Q).
Uses target network to provide target of Q for the selected next action.
"""
# Do not try to train model if memory is less than reqired batch size
if len(memory) < batch_size:
return
# Sample a random batch from memory
batch = random.sample(memory, batch_size)
for state, action, reward, state_next, terminal in batch:
state_action_values = policy_net(torch.FloatTensor(state))
if not terminal:
# For non-terminal actions get Q from policy net
expected_state_action_values = policy_net(torch.FloatTensor(state))
# Detach next state values from gradients to prevent updates
expected_state_action_values = expected_state_action_values.detach()
# Get next state action with best Q from the policy net (double DQN)
policy_next_state_values = policy_net(torch.FloatTensor(state_next))
policy_next_state_values = policy_next_state_values.detach()
best_action = np.argmax(policy_next_state_values[0].numpy())
# Get targen net next state
next_state_action_values = target_net(torch.FloatTensor(state_next))
# Use detach again to prevent target net gradients being updated
next_state_action_values = next_state_action_values.detach()
best_next_q = next_state_action_values[0][best_action].numpy()
updated_q = reward + (gamma * best_next_q)
expected_state_action_values[0][action] = updated_q
else:
# For termal actions Q = reward (-1)
expected_state_action_values = policy_net(torch.FloatTensor(state))
# Detach values from gradients to prevent gradient update
expected_state_action_values = expected_state_action_values.detach()
# Set Q for all actions to reward (-1)
expected_state_action_values[0] = reward
# Update neural net
# Reset net gradients
policy_net.optimizer.zero_grad()
# calculate loss
loss_v = nn.MSELoss()(state_action_values, expected_state_action_values)
# Backpropogate loss
loss_v.backward()
# Update network gradients
policy_net.optimizer.step()
return
def main():
"""Main program loop"""
print()
# Set up environment
time_start = time.time()
env = TspEnv(number_of_cities = NUMBER_OF_CITIES,
grid_dimensions = (100,100))
exploration_rate = EXPLORATION_MAX
# Get number of observations returned for state
observation_space = env.observation_space.shape[0] * 2
# Get number of actions possible
n_actions = len(env.action_space)
# Set up policy and target neural nets
policy_nets = [DuellingDQN(observation_space, n_actions, LEARNING_RATE) for i in range(NUMBER_OF_NETS)]
target_nets = [DuellingDQN(observation_space, n_actions, LEARNING_RATE) for i in range(NUMBER_OF_NETS)]
# Copy weights from policy_net to target
for i in range(NUMBER_OF_NETS):
target_nets[i].load_state_dict(policy_nets[i].state_dict())
# Set target net to eval rather than training mode
# We do not train target net - ot is copied from policy net at intervals
target_nets[i].eval()
# Set up a single memorymemomry
memory = Memory(MEMORY_SIZE)
# Set up list for results
results_run = []
results_exploration = []
total_rewards = []
best_reward = -999999
best_route = None
# Set run and time of last best route
run_last_best = 0
time_last_best = time.time()
# Set up run counter and learning loop
step = 0
run = 0
continue_learning = True
# Continue repeating games (episodes) until target complete
while continue_learning:
# Increment run (episode) counter
run += 1
total_reward = 0
# Start run and get first state observations
state, reward, terminal, info = env.reset()
total_reward += reward
# Reshape state into 2D array with state obsverations as first 'row'
state = np.reshape(state, [1, observation_space])
# Reset route
route = []
# Episode loop
while True:
# Increment step counter
step += 1
actions = [policy_nets[i].act(state, exploration_rate) for i in range(NUMBER_OF_NETS)]
random_index = random.randint(0, NUMBER_OF_NETS - 1)
action = actions[random_index]
route.append(action)
# Act
state_next, reward, terminal, info = env.step(action)
total_reward += reward
# Get observations for new state (s')
state_next = np.reshape(state_next, [1, observation_space])
# Record state, action, reward, new state & terminal
memory.remember(state, action, reward, state_next, terminal)
# Update state
state = state_next
# Update neural net
if len(memory.memory) >= REPLAY_START_SIZE:
# Update policy net
for i in range(NUMBER_OF_NETS):
optimize_duelling_dqn(policy_nets[i], target_nets[i], memory.memory, BATCH_SIZE, GAMMA)
# Update the target network at intervals
if step % SYNC_TARGET_STEPS == 0:
for i in range(NUMBER_OF_NETS):
target_nets[i].load_state_dict(policy_nets[i].state_dict())
# Actions to take if end of game episode
if terminal:
# Clear print row content
clear_row = '\r' + ' '*100 + '\r'
print (clear_row, end ='')
print (f'Run: {run: 5.0f}, ', end='')
exp = exploration_rate
print (f'exploration: {exp: 4.3f}, ', end='')
print (f'total reward: {total_reward: 6.0f}', end='')
# Add to results lists
results_run.append(run)
results_exploration.append(exploration_rate)
total_rewards.append(total_reward)
# Check for best route so far
if total_reward > best_reward:
best_reward = total_reward
best_route = route
run_last_best = run
time_last_best = time.time()
time_elapsed = (time.time() - time_start) / 60
print(f'\nNew best run. Run : {run: 5.0f}, ' \
f'Time {time_elapsed: 4.0f} ' \
f'Reward {total_reward: 6.0f}')
# Plot new best route
if PLOT_NEW_BEST_ROUTES and step > REPLAY_START_SIZE:
if best_reward > 0:
tsp_plots.plot_route(env, best_route)
print()
# Adjust exploration rate
if step > REPLAY_START_SIZE:
exploration_rate *= EXPLORATION_DECAY
exploration_rate = max(EXPLORATION_MIN, exploration_rate)
# Check stopping conditions
stop = False
if step > REPLAY_START_SIZE:
if run == MAXIMUM_RUNS:
stop = True
elif time.time() - time_start > MAXIMUM_TIME_MINS * 60:
stop = True
elif time.time() - time_last_best > NO_IMPROVEMENT_TIME*60:
stop = True
elif run - run_last_best == NO_IMPROVEMENT_RUNS:
stop = True
if stop:
# End training
continue_learning = False
# End episode
break
############################# Plot results #################################
# Plot result progress
tsp_plots.plot_result_progress(total_rewards)
# Plot best route
tsp_plots.plot_route(env, best_route)
###################### Show route and distances ############################
print ('Route')
print (best_route)
print ()
print ('Best route distance')
print (f'{env.state.calculate_distance(best_route):.0f}')
main()
| tsp_rl/.ipynb_checkpoints/tsp_bootstrap_duelling_unvisited_10-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
fig.dpi = 100
x = [1, 2, 3, 4, 5, 6, 7, 8]
y = [2, 4, 5, 3, 4, 5, 7, 2]
y2 = [4, 10, 7, 10, 4, 6, 10, 4]
ax.plot(x, y, label="Neukunden")
ax.plot(x, y2, label="Interessenten")
ax.legend()
ax.set_ylim([0, 11])
ax.set_xlim([1, 8])
ax.set_title("Performance im Laufe der Zeit")
ax.set_xlabel("Datum")
ax.set_ylabel("Anzahl")
plt.show()
# +
import seaborn as sns
sns.set_style("whitegrid") # white, whitegrid, dark, darkgrid
fig, ax = plt.subplots()
fig.dpi = 100
x = [1, 2, 3, 4, 5, 6, 7, 8]
y = [2, 4, 5, 3, 4, 5, 7, 2]
y2 = [4, 10, 7, 10, 4, 6, 10, 4]
ax.plot(x, y, label="Neukunden")
ax.plot(x, y2, label="Interessenten")
ax.legend()
ax.set_ylim([0, 11])
ax.set_xlim([1, 8])
ax.set_title("Performance im Laufe der Zeit")
ax.set_xlabel("Datum")
ax.set_ylabel("Anzahl")
plt.show()
# -
| UDEMY_Datavis_Python/03 - matplotlib basics/Seaborn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Import Movie Review Data
# Set the seed
import numpy as np
np.random.seed(42)
# Import the dataset as pandas dataframe
import pandas as pd
# Data can be downloaded from Kaggle at the following URL
#
# - https://www.kaggle.com/c/word2vec-nlp-tutorial/data
df = pd.read_csv('labeledTrainData.tsv',header=0, delimiter="\t", quoting=3)
df.head()
df.shape
# Split Data into Training and Test Data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
df['review'],
df['sentiment'],
test_size=0.2,
random_state=42
)
type(X_train)
# # Build the Tokenizer
from tensorflow.python.keras.preprocessing.text import Tokenizer
top_words = 5000
t = Tokenizer(num_words=top_words) # num_words -> Vocablury size
t.fit_on_texts(X_train.tolist())
# Get the Training and Test Data
X_train = t.texts_to_matrix(X_train.tolist(),mode='tfidf')
X_train.shape
X_test = t.texts_to_matrix(X_test.tolist(),mode='tfidf')
X_test.shape
# # Build the Graph
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dropout, Dense
model = Sequential()
model.add(Dense(200,activation='relu',input_shape=(5000,)))
model.add(Dense(100,activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(60,activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(30,activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(1,activation='sigmoid'))
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
# # Execute the graph
model.fit(X_train,y_train,epochs=3,batch_size=128,validation_data=(X_test, y_test))
| 2. Sentiment_Analysis_TFIDF_FC_Layer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('partition2.png')
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv.morphologyEx(thresh,cv.MORPH_OPEN,kernel, iterations = 2)
# sure background area
sure_bg = cv.dilate(opening,kernel,iterations=3)
# Finding sure foreground area
dist_transform = cv.distanceTransform(opening,cv.DIST_L2,5)
ret, sure_fg = cv.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv.subtract(sure_bg,sure_fg)
cv.imshow('sure_bg',sure_bg)
cv.imshow('sure_fg',sure_fg)
cv.imshow('unknown',unknown)
# Marker labelling
ret, markers = cv.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
markers = cv.watershed(img,markers)
img[markers == -1] = [255,0,0]
cv.imshow('img',img)
cv.waitKey(0)
cv.destroyAllWindows()
# titles = ['Original Image','thresh']
# plt.figure(dpi=800)
# img = img[:, :, ::-1]
# images = [img, thresh]
# for i in range(2):
# plt.subplot(1,2,i+1),plt.imshow(images[i],'gray')
# plt.title(titles[i])
# plt.xticks([]),plt.yticks([])
# plt.show()
| opencv_tutorial/opencv_py/3ImageProcessWaterSehed.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
# %matplotlib inline
import seaborn as sns
# +
# read in csv file
df = pd.read_csv('LinkedIn_Job_Postings.csv')
df.head()
# -
df.shape
df.describe()
df.dtypes
df.isnull().sum()
df.info()
# ## Which companies have the most job postings?
# Count openings by company
jobs_by_company = df.groupby(['Company'])['Company'].count().sort_values(ascending=False).head(20)
jobs_by_company
# +
# Plot postings by company
plt.figure(figsize=(12,8))
jobs_by_company.sort_values().plot(kind='barh')
plt.xlabel("Number of Postings", size=15)
plt.ylabel("Company", size=15)
plt.title("Companies with the Most Job Openings", size=18)
# -
# ## What are the most frequently occuring job titles that are posted?
# Count number of jobs by title
jobs_by_title = df.groupby(['Title'])['Title'].count().sort_values(ascending=False).head(20)
jobs_by_title
# +
# Plot number of jobs by title
plt.figure(figsize=(12,8))
jobs_by_title.sort_values().plot(kind='barh')
plt.xlabel("Number of Jobs Posted", size=15)
plt.ylabel("Job Titles", size=15)
plt.title("Open Roles by Job Title", size = 18)
# -
| Notebooks/Linkedin Visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # A Guide to Metrics (Estimates) in Exploratory Data Analysis
from IPython import display
display.Image("https://www.ealizadeh.com/wp-content/uploads/2020/12/bp08_featured_image.png")
# **Website: https://ealizadeh.com**
#
# **Medium: https://medium.com/@ealizadeh**
#
# Copyright © 2020 <NAME>
# ---
# Exploratory data analysis (EDA) is an important step in any data science project. We always try to get a glance of our data by computing descriptive statistics of our dataset. If you are like me, the first function you call might be Pandas dataframe.describe() to obtain descriptive statistics. While such analysis is important, we often underestimate the importance of choosing the correct sample statistics/metrics/estimates.
#
# In this post, we will go over several metrics that you can use in your data science projects. In particular, we are going to cover several estimates of location and variability and their robustness (sensitiveness to outliers).
#
# The following common metrics/estimates are covered in this article:
# - Estimates of location (first moment of the distribution)
# - mean, trimmed/truncated mean, weighted mean
# - median, weighted median
# - Estimates of variability (second moment of the distribution)
# - range
# - variance and standard deviation
# - mean absolute deviation, median absolute deviation
# - percentiles (quantiles)
#
# For each metric, we will cover:
# - The definition and mathematical formulation along with some insights.
# - Whether the metric is robust (sensitiveness to extreme cases)
# - Python implementation and an example
#
# Note: The focus of this article is on the metrics and estimates used in the univariate analysis of numeric data.
# ## Estimates of Location
#
# Estimates of location are measures of the central tendency of the data (where most of the data is located). In statistics, this is usually referred to as the first moment of a distribution.
# ### Python Implementation
#
# Let's first import all necessary Python libraries and generate our dataset.
# +
import pandas as pd
import numpy as np
from scipy import stats
import robustats
df = pd.DataFrame({
"data": [2, 1, 2, 3, 2, 2, 3, 20],
"weights": [1, 0.5, 1, 1, 1, 1, 1, 0.5] # Not necessarily add up to 1!!
})
data, weights = df["data"], df["weights"]
# -
# You can use NumPy's average() function to calculate the mean and weighted mean (equations 1.1 & 1.2). For computing truncated mean, you can use trim_mean() from the SciPy stats module. A common choice for truncating the top and bottom of the data is 10%[1].
#
# You can use NumPy's median() function to calculate the median. For computing the weighted median, you can use weighted_median() from the robustats Python library (you can install it using pip install robustats). Robustats is a high-performance Python library to compute robust statistical estimators implemented in C.
#
# For computing the mode, you can either use the mode() function either from the robustats library that is particularly useful on large datasets or from scipy.stats module.
# +
mean = np.average(data) # You can use Pandas dataframe.mean()
weighted_mean = np.average(data, weights=weights)
truncated_mean = stats.trim_mean(data, proportiontocut=0.1)
median = np.median(data) # You can use Pandas dataframe.median()
weighted_median = robustats.weighted_median(x=data, weights=weights)
mode = stats.mode(data) # You can also use robustats.mode() on larger datasets
print("Mean: ", mean.round(3))
print("Weighted Mean: ", weighted_mean.round(3))
print("Truncated Mean: ", truncated_mean.round(3))
print("Median: ", median)
print("Weighted Median: ", weighted_median)
print("Mode: ", mode)
# -
# Now, let's see if we just remove 20 from our data, how that will impact our mean.
mean = np.average(data[:-1]) # Remove the last data point (20)
print("Mean: ", mean.round(3))
# You can see how the last data point (20) impacted the mean (4.375 vs 2.143). There can be many situations that we may end up with some outliers that should be cleaned from our datasets like faulty measurements that are in orders of magnitude away from other data points.
# ## Estimates of Variability
# The second dimension (or moment) addresses how the data is spread out (variability or dispersion of the data). For this, we have to measure the difference (aka residual) between an estimate of location and an observed value[1].
# ### Python Implementation
#
# You can use NumPy's var() and std() function to calculate the variance and standard deviation, respectively. On the other hand, to calculate the mean absolute deviation, you can use Pandas mad() function. For computing the trimmed standard deviation, you can use SciPy's tstd() from the stats module. You can use Pandas boxplot() to quickly visualize a boxplot of the data.
# #### Example: Variability Estimates of State Population
# +
variance = np.var(data)
standard_deviation = np.std(data) # df["Population"].std()
mean_absolute_deviation = df["data"].mad()
trimmed_standard_deviation = stats.tstd(data)
median_absolute_deviation = stats.median_abs_deviation(data, scale="normal") # stats.median_absolute_deviation() is deprecated
# Percentile
Q1 = np.quantile(data, q=0.25) # Can also use data.quantile(0.25)
Q3 = np.quantile(data, q=0.75) # Can also use data.quantile(0.75)
IQR = Q3 - Q1
print("Variance: ", variance.round(3))
print("Standard Deviation: ", standard_deviation.round(3))
print("Mean Absolute Deviation: ", mean_absolute_deviation.round(3))
print("Trimmed Standard Deviation: ", trimmed_standard_deviation.round(3))
print("Median Absolute Deviation: ", median_absolute_deviation.round(3))
print("Interquantile Range (IQR): ", IQR)
# -
# ## Conclusion
# In this post, I talked about various estimates of location and variability. In particular, I covered more than 10 different sample statistics and whether they are robust metrics or not. A table of all the metric along with their corresponding Python and R functions are summarized in Table 3. We also saw how the presence of an outlier may impact non-robust metrics like the mean. In this case, we may want to use a robust estimate. However, in some problems, we are interested in studying extreme cases and outliers such as anomaly detection.
#
# Thanks for reading!
# ## References
#
# [1] <NAME> & <NAME> (2017), *Practical Statistics for Data Scientists*, First Edition, O'Reilly
#
# [2] Wikipedia, [Truncated mean](https://en.wikipedia.org/wiki/Truncated_mean)
# ## Useful Links
# [1] <NAME> (2018), [Understanding Boxplots](https://towardsdatascience.com/understanding-boxplots-5e2df7bcbd51), Towards Data Science blog
| notebooks/A_Guide_to_Metrics_Estimates_in_EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="nCc3XZEyG3XV"
# Lambda School Data Science
#
# *Unit 2, Sprint 3, Module 2*
#
# ---
#
#
# # Permutation & Boosting
#
# You will use your portfolio project dataset for all assignments this sprint.
#
# ## Assignment
#
# Complete these tasks for your project, and document your work.
#
# - [ ] If you haven't completed assignment #1, please do so first.
# - [ ] Continue to clean and explore your data. Make exploratory visualizations.
# - [ ] Fit a model. Does it beat your baseline?
# - [ ] Try xgboost.
# - [ ] Get your model's permutation importances.
#
# You should try to complete an initial model today, because the rest of the week, we're making model interpretation visualizations.
#
# But, if you aren't ready to try xgboost and permutation importances with your dataset today, that's okay. You can practice with another dataset instead. You may choose any dataset you've worked with previously.
#
# The data subdirectory includes the Titanic dataset for classification and the NYC apartments dataset for regression. You may want to choose one of these datasets, because example solutions will be available for each.
#
#
# ## Reading
#
# Top recommendations in _**bold italic:**_
#
# #### Permutation Importances
# - _**[Kaggle / <NAME>: Machine Learning Explainability](https://www.kaggle.com/dansbecker/permutation-importance)**_
# - [<NAME>: Interpretable Machine Learning](https://christophm.github.io/interpretable-ml-book/feature-importance.html)
#
# #### (Default) Feature Importances
# - [<NAME>: Selecting good features, Part 3, Random Forests](https://blog.datadive.net/selecting-good-features-part-iii-random-forests/)
# - [<NAME>, et al: Beware Default Random Forest Importances](https://explained.ai/rf-importance/index.html)
#
# #### Gradient Boosting
# - [A Gentle Introduction to the Gradient Boosting Algorithm for Machine Learning](https://machinelearningmastery.com/gentle-introduction-gradient-boosting-algorithm-machine-learning/)
# - _**[A Kaggle Master Explains Gradient Boosting](http://blog.kaggle.com/2017/01/23/a-kaggle-master-explains-gradient-boosting/)**_
# - [_An Introduction to Statistical Learning_](http://www-bcf.usc.edu/~gareth/ISL/ISLR%20Seventh%20Printing.pdf) Chapter 8
# - [Gradient Boosting Explained](http://arogozhnikov.github.io/2016/06/24/gradient_boosting_explained.html)
# - _**[Boosting](https://www.youtube.com/watch?v=GM3CDQfQ4sw) (2.5 minute video)**_
# -
import numpy as np
import pandas as pd
import os
import seaborn as sns
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.pipeline import make_pipeline
import category_encoders as ce
from sklearn.impute import SimpleImputer
import eli5
from eli5.sklearn import PermutationImportance
from xgboost import XGBClassifier
# %matplotlib inline
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# cd C:\Users\Hakuj\Documents\DataSets\Kickstarter
# ## Getting csv
def get_a_year(year):
df = pd.DataFrame(
columns=['backers_count', 'blurb', 'category', 'converted_pledged_amount',
'country', 'created_at', 'creator', 'currency', 'currency_symbol',
'currency_trailing_code', 'current_currency', 'deadline',
'disable_communication', 'fx_rate', 'goal', 'id', 'is_starrable',
'launched_at', 'name', 'photo', 'pledged', 'profile', 'slug',
'source_url', 'spotlight', 'staff_pick', 'state', 'state_changed_at',
'static_usd_rate', 'urls', 'usd_pledged', 'usd_type', 'location',
'friends', 'is_backing', 'is_starred', 'permissions']
)
folders = os.listdir(f'Data\\{year}') #Get the monthly folders inside the year
for folder in folders[:1]:
files = os.listdir(f'Data\\{year}\\{folder}') #Get the filenames inside monthly folders
monthly = pd.concat(
[pd.read_csv(
f'Data\\{year}\\{folder}\\{file}') for file in files[:1]] #Not getting a whole year for now
) #Reads in all the csv files in a given month
df = df.append(monthly)
return df
df = get_a_year(2018)
df.shape
df.describe()
df.head()
df.info()
df.isna().sum()
df['state'].value_counts()
# ## Assignment 1 redo
# ### Target selection and baseline
# - I will use funded as my target.
# - I want to have a classification of 'Funded' 'Failed' and 'Funded Early'
# - I may not be able to do the last one
# - I would also like to present probability
# - suggestions on how to improve would be a good stretch goal for me.
# - I will have to engineer it from 'state'
# - I can also see if it is funded ahead of time by using 'goal' and (usd)'pledged'
base_preds = ['successful'] * len(df)
accuracy_score(base_preds, df['state'])
# ### Feature selection
# - There will be repeats as some campaing run longer than the scrape periods, so I will have to mind that
# - I will have to be careful with time travel
# - There are some features that are mostly NaN
# - Pledged and usd_pledged are essentially the same.
# - I may not even include these in my project as I want to see if you will be funded before you start
# ## Assignment 2
def wrangle(df):
#Time series data
df['created_at'] = pd.to_datetime(df['created_at'], format='%m%d%Y').astype(str)
df['deadline'] = pd.to_datetime(df['deadline'], format='%m%d%Y').astype(str)
df['launched_at'] = pd.to_datetime(df['launched_at'], format='%m%d%Y').astype(str)
df['state_changed_at'] = pd.to_datetime(df['state_changed_at'], format='%m%d%Y').astype(str)
return df
X = df.drop(columns=['state','pledged', 'usd_pledged', 'state'])
y = df['state']
X_train, X_test,y_train, y_test = train_test_split(X, y, random_state=42)
# ### Base model
pipeline1 = make_pipeline(
SimpleImputer(strategy='most_frequent'),
ce.OrdinalEncoder(),
DecisionTreeClassifier(random_state=42, max_depth=3)
)
pipeline1.fit(X_train, y_train)
accuracy_score(pipeline1.predict(X_test), y_test)
# ### Permuter
transformer = make_pipeline(
SimpleImputer(strategy='most_frequent'),
ce.OrdinalEncoder()
)
model = RandomForestClassifier(random_state=42, max_depth=3)
X_train_transformed = transformer.fit_transform(X_train)
X_test_transformed = transformer.transform(X_test)
model.fit(X_train_transformed, y_train)
permuter = PermutationImportance(
model,
scoring='accuracy',
n_iter=3,
random_state=42
)
permuter.fit(X_test_transformed, y_test)
features = X_test.columns.tolist()
pd.Series(permuter.feature_importances_, features[:30])
eli5.show_weights(
permuter,
top=None,
feature_names=features[:30]
)
# ### XGBoost
# +
pipeline2 = make_pipeline(
ce.OrdinalEncoder(),
XGBClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
pipeline2.fit(X_train, y_train)
# -
accuracy_score(pipeline2.predict(X_test), y_test)
| module2/assignment_applied_modeling_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Importar Pandas
#importa pandas
import pandas as pd
# # Crear una Serie
# Explore series en python en el siguiente [link](https://pandas.pydata.org/pandas-docs/stable/user_guide/10min.html) en las primeras lineas del documento
# + tags=[]
# Crea una Serie de los numeros 10, 20 and 10.
series = pd.Series([10,20,10])
series
# + tags=[]
# Crea una Serie con tres objetos: 'rojo', 'verde', 'azul'
colores = pd.Series(['rojo','verde','azul'])
colores
# -
# # Crear un Dataframe
# + tags=[]
# Crea un dataframe vacío llamado 'df'
df = pd.DataFrame()
df
# + tags=[]
# Crea una nueva columna en el dataframe, y asignale la primera serie que has creado
import pandas as pd
columna = {'series':[10,20,10]}
df = pd.DataFrame(data=columna)
df
# + tags=[]
# Crea otra columna en el dataframe y asignale la segunda Serie que has creado
df.assign(colores=lambda x:['rojo','verde','azul'])
# -
# # Leer un dataframe
# + tags=[]
# Lee el archivo llamado 'avengers.csv" localizado en la carpeta "data" y crea un DataFrame, llamado 'avengers'.
# El archivo está localizado en "data/avengers.csv"
df_avengers = pd.read_csv("./src/pandas/avengers.csv")
# -
# # Inspeccionar un dataframe
# + tags=[]
# Muestra las primeras 5 filas del DataFrame.
df_avengers.head(5)
# + tags=[]
# Muestra las primeras 10 filas del DataFrame.
df_avengers.head(10)
# + tags=[]
# Muestra las últimas 5 filas del DataFrame.
df_avengers.tail(5)
# -
# # Tamaño del DataFrame
# + tags=[]
# Muestra el tamaño del DataFrame
df_avengers.size
# -
# # Data types en un DataFrame
# + tags=[]
# Muestra los data types del dataframe
df_avengers = pd.read_csv("./src/pandas/avengers.csv")
df_avengers.dtypes
# -
# # Editar el indice (index)
# + tags=[]
# Cambia el indice a la columna "fecha_inicio".
df_avengers = df_avengers.rename(columns={'fecha_inicio': 'inicio_fecha'})
# -
# # Ordenar el indice
# + tags=[]
# Ordena el índice de forma descendiente
df_avengers.sort_values(by=['URL','nombre'], ascending=[False,True])
# -
# # Resetear el indice
# + tags=[]
# Resetea el índice
df_avengers = df_avengers.reset_index()
| 1.DataFrames y Series-ejercicio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 301 Regression
#
# View more, visit my tutorial page: https://morvanzhou.github.io/tutorials/
# My Youtube Channel: https://www.youtube.com/user/MorvanZhou
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
# %matplotlib inline
torch.manual_seed(1) # reproducible
# +
x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1) # x data (tensor), shape=(100, 1)
y = x.pow(2) + 0.2*torch.rand(x.size()) # noisy y data (tensor), shape=(100, 1)
print(x, '\n', y)
# torch can only train on Variable, so convert them to Variable
x, y = Variable(x), Variable(y)
plt.scatter(x.data.numpy(), y.data.numpy())
plt.show()
# -
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer
self.predict = torch.nn.Linear(n_hidden, n_output) # output layer
def forward(self, x):
x = F.relu(self.hidden(x)) # activation function for hidden layer
x = self.predict(x) # linear output
return x
net = Net(n_feature=1, n_hidden=10, n_output=1) # define the network
print(net) # net architecture
optimizer = torch.optim.SGD(net.parameters(), lr=0.2)
loss_func = torch.nn.MSELoss() # this is for regression mean squared loss
plt.ion() # Turn interactive mode on
# +
for t in range(200):
prediction = net(x) # input x and predict based on x
loss = loss_func(prediction, y) # must be (1. nn output, 2. target)
optimizer.zero_grad() # clear gradients for next train
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if t % 10 == 0: # plot and show learning process each 10 steps
plt.cla() # Clear the current axes
plt.scatter(x.data.numpy(), y.data.numpy())
plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
plt.text(0.5, 0, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 20, 'color': 'red'})
plt.show()
plt.pause(0.1)
plt.ioff()
# -
| tutorial-contents-notebooks/301_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="jL2nKxjS4S_r"
# # Start with a simple neural network for MNIST
# Note that there are 2 layers, one with 20 neurons, and one with 10.
#
# The 10-neuron layer is our final layer because we have 10 classes we want to classify.
#
# Train this, and you should see it get about 98% accuracy
# + colab={"base_uri": "https://localhost:8080/"} id="zrQ-DeQtybXZ" outputId="4b91eb0e-2ee3-499a-87cf-6523730d0089"
import tensorflow as tf
data = tf.keras.datasets.mnist
(training_images, training_labels), (val_images, val_labels) = data.load_data()
training_images = training_images / 255.0
val_images = val_images / 255.0
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(input_shape=(28,28)),
tf.keras.layers.Dense(20, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=20, validation_data=(val_images, val_labels))
# + [markdown] id="JN92xbwK4i_S"
# ## Examine the test data
#
# Using model.evaluate, you can get metrics for a test set. In this case we only have a training set and a validation set, so we can try it out with the validation set. The accuracy will be slightly lower, at maybe 96%. This is because the model hasn't previously seen this data and may not be fully generalized for all data. Still it's a pretty good score.
#
# You can also predict images, and compare against their actual label. The [0] image in the set is a number 7, and here you can see that neuron 7 has a 9.9e-1 (99%+) probability, so it got it right!
#
# + colab={"base_uri": "https://localhost:8080/"} id="Rzit5Te-4lT6" outputId="6ec95776-14b7-4a53-f00e-3435ecb4b704"
model.evaluate(val_images, val_labels)
classifications = model.predict(val_images)
print(classifications[0])
print(val_labels[0])
# + [markdown] id="6LkJGAiI5Cr3"
# ## Modify to inspect learned values
#
# This code is identical, except that the layers are named prior to adding to the sequential. This allows us to inspect their learned parameters later.
# + colab={"base_uri": "https://localhost:8080/"} id="eyyJ3RMYpFXR" outputId="47744159-5324-44c6-9e6a-88a065e8d191"
import tensorflow as tf
data = tf.keras.datasets.mnist
(training_images, training_labels), (val_images, val_labels) = data.load_data()
training_images = training_images / 255.0
val_images = val_images / 255.0
layer_1 = tf.keras.layers.Dense(20, activation=tf.nn.relu)
layer_2 = tf.keras.layers.Dense(10, activation=tf.nn.softmax)
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(input_shape=(28,28)),
layer_1,
layer_2])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(training_images, training_labels, epochs=20)
model.evaluate(val_images, val_labels)
classifications = model.predict(val_images)
print(classifications[0])
print(val_labels[0])
# + [markdown] id="7pNYFF935PlE"
# # Inspect weights
#
# If you print layer_1.get_weights(), you'll see a lot of data. Let's unpack it. First, there are 2 arrays in the result, so let's look at the first one. In particular let's look at its size.
# + colab={"base_uri": "https://localhost:8080/"} id="QACivjNKxFWW" outputId="37145da6-2bfd-45e2-b063-f0d6ceb998c4"
print(layer_1.get_weights()[0].size)
# + [markdown] id="lqpHrDyp5acs"
# The above code should print 15680. Why?
#
# Recall that there are 20 neurons in the first layer.
#
# Recall also that the images are 28x28, which is 784.
#
# If you multiply 784 x 20 you get 15680.
#
# So...this layer has 20 neurons, and each neuron learns a W parameter for each pixel. So instead of y=Mx+c, we have
# y=M1X1+M2X2+M3X3+....+M784X784+C in every neuron!
#
# Every pixel has a weight in every neuron. Those weights are multiplied by the pixel value, summed up, and given a bias.
#
# + colab={"base_uri": "https://localhost:8080/"} id="TdXrHDEw6ACm" outputId="01bed7be-c081-4108-d5d4-0683abf00763"
print(layer_1.get_weights()[1].size)
# + [markdown] id="EIOZ7rJy6Eg1"
# The above code will give you 20 -- the get_weights()[1] contains the biases for each of the 20 neurons in this layer.
# + [markdown] id="yyFKpzxN6T-N"
# ## Inspecting layer 2
#
# Now let's look at layer 2. Printing the get_weights will give us 2 lists, the first a list of weights for the 10 neurons, and the second a list of biases for the 10 neurons
#
# Let's look first at the weights:
# + colab={"base_uri": "https://localhost:8080/"} id="o9P_PVwXyKXJ" outputId="dbce1584-8652-4f56-e468-467973013d23"
print(layer_2.get_weights()[0].size)
# + [markdown] id="daah2gq56fdb"
# This should return 200. Again, consider why?
#
# There are 10 neurons in this layer, but there are 20 neurons in the previous layer. So, each neuron in this layer will learn a weight for the incoming value from the previous layer. So, for example, the if the first neuron in this layer is N21, and the neurons output from the previous layers are N11-N120, then this neuron will have 20 weights (W1-W20) and it will calculate its output to be:
#
# W1N11+W2N12+W3N13+...+W20N120+Bias
#
# So each of these weights will be learned as will the bias, for every neuron.
#
# + colab={"base_uri": "https://localhost:8080/"} id="Reyw9wC65o8z" outputId="3d936af0-a5b2-4ddf-c5a7-b4edcf519ea6"
print(layer_2.get_weights()[1].size)
# + [markdown] id="DS89WZag7GlB"
# ...and as expected there are 10 elements in this array, representing the 10 biases for the 10 neurons.
#
# Hopefully this helps you see how the element of a simple neuron containing y=mx+c can be expanded greatly into a deep neural network, and that DNN can learn the parameters that match the 784 pixels of an image to their output!
| ML/Exploring_Categorical.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
spark = SparkSession \
.builder \
.config("spark.sql.session.timeZone", "Asia/Seoul") \
.getOrCreate()
# +
spark = SparkSession.builder.appName("종족별 랭킹").getOrCreate()
abyss = (spark.createDataFrame([
(1, "천족", "Cat", 30),
(1, "천족", "Dog", 28),
(1, "마족", "Monkey", 28),
(1, "마족", "Cat", 24),
(1, "마족", "Dog", 10)
], ["Server", "Race", "Actor", "Point"]))
abyss.show(truncate=False)
abyss.createOrReplaceTempView("abyss")
# CASE WHEN key = 1 THEN 1 ELSE 2 END
light="천족_타이틀"
dark="마족_타이틀"
res = spark.sql("""
with ranked_table as ( select *, row_number() over (partition by server, race order by point desc) as rank from abyss)
select case when race = '천족' then '{}' else '{}' end as cat_name, * from ranked_table
""".format(light, dark))
res.show(truncate=False)
# -
| spark/notebooks/pyspark-foo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.1 64-bit (''.venv'': venv)'
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import LogisticRegression
# -
import warnings
warnings.filterwarnings("ignore")
# # Data
train = pd.read_csv("voting_train.csv")
test = pd.read_csv("voting_test.csv")
train.info()
train.head()
test.info()
test.head()
# +
X_train = train.drop(["ID", "class"], axis=1)
y_train = train["class"]
X_test = test.drop(["ID"], axis=1)
# -
# # Pre-processing
# ## Encoding
ohe = OneHotEncoder()
X_train_enc = ohe.fit_transform(X_train)
X_test_enc = ohe.fit_transform(X_test)
# # Model selection
# +
# Grid Search
cv = StratifiedKFold(5, shuffle=True, random_state=1987)
verbose = 1
# -
# ## Logistic Regression
# +
parameters = {
"penalty":["l1", "l2", "elasticnet", "none"],
"C":[0.001, 0.01 ,0.1, 1, 5, 10, 100],
"class_weight":["balanced"],
"solver":["liblinear", "saga", "lbfgs", "newton-cg"]}
model = GridSearchCV(LogisticRegression(), parameters, cv=cv, verbose=verbose, scoring="accuracy")
model.fit(X_train_enc, y_train)
results = pd.DataFrame(model.cv_results_)
results = results[["param_penalty", "param_C", "param_solver", "mean_test_score", "std_test_score"]]
results.sort_values(["mean_test_score"], ascending=False).head(10)
# -
# # Final model
best_model = model.best_estimator_
best_model.fit(X_train_enc, y_train)
predictions = pd.DataFrame(test["ID"])
predictions["class"] = best_model.predict(X_test_enc)
predictions.to_csv("submission.csv", index=False)
| kaggle_voting/voting_log.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#python 2/3 compatibility
from __future__ import print_function
#simplified interface for building models
import keras
#our handwritten character labeled dataset
from keras.datasets import mnist
#because our models are simple
from keras.models import Sequential
#dense means fully connected layers, dropout is a technique to improve convergence, flatten to reshape our matrices for feeding
#into respective layers
from keras.layers import Dense, Dropout, Flatten
#for convolution (images) and pooling is a technique to help choose the most relevant features in an image
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
# +
#mini batch gradient descent ftw
batch_size = 128
#10 difference characters
num_classes = 10
#very short training time
epochs = 12
# input image dimensions
#28x28 pixel images.
img_rows, img_cols = 28, 28
# -
# the data downloaded, shuffled and split between train and test sets
#if only all datasets were this easy to import and format
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train.shape
#this assumes our data format
#For 3D data, "channels_last" assumes (conv_dim1, conv_dim2, conv_dim3, channels) while
#"channels_first" assumes (channels, conv_dim1, conv_dim2, conv_dim3).
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
#more reshaping
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
#build our model
model = Sequential()
#convolutional layer with rectified linear unit activation
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
#again
model.add(Conv2D(64, (3, 3), activation='relu'))
#choose the best features via pooling
model.add(MaxPooling2D(pool_size=(2, 2)))
#randomly turn neurons on and off to improve convergence
model.add(Dropout(0.25))
#flatten since too many dimensions, we only want a classification output
model.add(Flatten())
#fully connected to get all relevant data
model.add(Dense(128, activation='relu'))
#one more dropout for convergence' sake :)
model.add(Dropout(0.5))
#output a softmax to squash the matrix into output probabilities
model.add(Dense(num_classes, activation='softmax'))
#Adaptive learning rate (adaDelta) is a popular form of gradient descent rivaled only by adam and adagrad
#categorical ce since we have multiple classes (10)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
#train that ish!
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
#how well did it do?
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
#Save the model
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
| doc/code/model_deployment/train/model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D4_GeneralizedLinearModels/W1D4_Tutorial2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# -
# # Tutorial 2: Classifiers and regularizers
# **Week 1, Day 4: Generalized Linear Models**
#
# **By Neuromatch Academy**
#
# __Content creators:__ <NAME>, <NAME>, <NAME>
#
# __Content reviewers:__ <NAME>, <NAME>, <NAME>, <NAME>
#
# **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**
#
# <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>
# # Tutorial Objectives
#
# *Estimated timing of tutorial: 1 hour, 35 minutes*
#
# This is part 2 of a 2-part series about Generalized Linear Models (GLMs), which are a fundamental framework for supervised learning. In part 1, we learned about and implemented GLMs. In this tutorial, we’ll implement logistic regression, a special case of GLMs used to model binary outcomes.
# Oftentimes the variable you would like to predict takes only one of two possible values. Left or right? Awake or asleep? Car or bus? In this tutorial, we will decode a mouse's left/right decisions from spike train data. Our objectives are to:
# 1. Learn about logistic regression, how it is derived within the GLM theory, and how it is implemented in scikit-learn
# 2. Apply logistic regression to decode choies from neural responses
# 3. Learn about regularization, including the different approaches and the influence of hyperparameters
#
# ---
# We would like to acknowledge [Steinmetz _et al._ (2019)](https://www.nature.com/articles/s41586-019-1787-x) for sharing their data, a subset of which is used here.
#
# + cellView="form" tags=[]
# @title Tutorial slides
# @markdown These are the slides for the videos in all tutorials today
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/upyjz/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
# -
# # Setup
#
# + cellView="both"
# Imports
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
# + cellView="form"
#@title Figure settings
import ipywidgets as widgets
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# + cellView="form"
# @title Plotting Functions
def plot_weights(models, sharey=True):
"""Draw a stem plot of weights for each model in models dict."""
n = len(models)
f = plt.figure(figsize=(10, 2.5 * n))
axs = f.subplots(n, sharex=True, sharey=sharey)
axs = np.atleast_1d(axs)
for ax, (title, model) in zip(axs, models.items()):
ax.margins(x=.02)
stem = ax.stem(model.coef_.squeeze(), use_line_collection=True)
stem[0].set_marker(".")
stem[0].set_color(".2")
stem[1].set_linewidths(.5)
stem[1].set_color(".2")
stem[2].set_visible(False)
ax.axhline(0, color="C3", lw=3)
ax.set(ylabel="Weight", title=title)
ax.set(xlabel="Neuron (a.k.a. feature)")
f.tight_layout()
def plot_function(f, name, var, points=(-10, 10)):
"""Evaluate f() on linear space between points and plot.
Args:
f (callable): function that maps scalar -> scalar
name (string): Function name for axis labels
var (string): Variable name for axis labels.
points (tuple): Args for np.linspace to create eval grid.
"""
x = np.linspace(*points)
ax = plt.figure().subplots()
ax.plot(x, f(x))
ax.set(
xlabel=f'${var}$',
ylabel=f'${name}({var})$'
)
def plot_model_selection(C_values, accuracies):
"""Plot the accuracy curve over log-spaced C values."""
ax = plt.figure().subplots()
ax.set_xscale("log")
ax.plot(C_values, accuracies, marker="o")
best_C = C_values[np.argmax(accuracies)]
ax.set(
xticks=C_values,
xlabel="$C$",
ylabel="Cross-validated accuracy",
title=f"Best C: {best_C:1g} ({np.max(accuracies):.2%})",
)
def plot_non_zero_coefs(C_values, non_zero_l1, n_voxels):
"""Plot the accuracy curve over log-spaced C values."""
ax = plt.figure().subplots()
ax.set_xscale("log")
ax.plot(C_values, non_zero_l1, marker="o")
ax.set(
xticks=C_values,
xlabel="$C$",
ylabel="Number of non-zero coefficients",
)
ax.axhline(n_voxels, color=".1", linestyle=":")
ax.annotate("Total\n# Neurons", (C_values[0], n_voxels * .98), va="top")
# + cellView="form"
#@title Data retrieval and loading
import os
import requests
import hashlib
url = "https://osf.io/r9gh8/download"
fname = "W1D4_steinmetz_data.npz"
expected_md5 = "d19716354fed0981267456b80db07ea8"
if not os.path.isfile(fname):
try:
r = requests.get(url)
except requests.ConnectionError:
print("!!! Failed to download data !!!")
else:
if r.status_code != requests.codes.ok:
print("!!! Failed to download data !!!")
elif hashlib.md5(r.content).hexdigest() != expected_md5:
print("!!! Data download appears corrupted !!!")
else:
with open(fname, "wb") as fid:
fid.write(r.content)
def load_steinmetz_data(data_fname=fname):
with np.load(data_fname) as dobj:
data = dict(**dobj)
return data
# -
# ---
#
# #Section 1: Logistic regression
# + cellView="form"
# @title Video 1: Logistic regression
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1P54y1q7Qn", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="qfXFrUnLU0o", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
# Logistic Regression is a binary classification model. It is a GLM with a *logistic* link function and a *Bernoulli* (i.e. coinflip) noise model.
#
# Like in the last notebook, logistic regression invokes a standard procedure:
#
# 1. Define a *model* of how inputs relate to outputs.
# 2. Adjust the parameters to maximize (log) probability of your data given your model
#
# ## Section 1.1: The logistic regression model
#
# *Estimated timing to here from start of tutorial: 8 min*
#
# <details>
# <summary> <font color='blue'>Click here for text recap of relevant part of video </font></summary>
#
# The fundamental input/output equation of logistic regression is:
#
# \begin{align}
# \hat{y} \equiv p(y=1|x,\theta) = \sigma(\theta^Tx)
# \end{align}
#
# Note that we interpret the output of logistic regression, $\hat{y}$, as the **probability that y = 1** given inputs $x$ and parameters $\theta$.
#
# Here $\sigma()$ is a "squashing" function called the **sigmoid function** or **logistic function**. Its output is in the range $0 \leq y \leq 1$. It looks like this:
#
# \begin{align}
# \sigma(z) = \frac{1}{1 + \textrm{exp}(-z)}
# \end{align}
#
# Recall that $z = \theta^T x$. The parameters decide whether $\theta^T x$ will be very negative, in which case $\sigma(\theta^T x)\approx 0$, or very positive, meaning $\sigma(\theta^T x)\approx 1$.
#
# ### Coding Exercise 1.1: Implement the sigmoid function
#
# + cellView="both"
def sigmoid(z):
"""Return the logistic transform of z."""
##############################################################################
# TODO for students: Fill in the missing code (...) and remove the error
raise NotImplementedError("Student exercise: implement the sigmoid function")
##############################################################################
sigmoid = ...
return sigmoid
# Visualize
plot_function(sigmoid, "\sigma", "z", (-10, 10))
# +
# to_remove solution
def sigmoid(z):
"""Return the logistic transform of z."""
sigmoid = 1 / (1 + np.exp(-z))
return sigmoid
# Visualize
with plt.xkcd():
plot_function(sigmoid, "\sigma", "z", (-10, 10))
# -
# ## Section 1.2: Using scikit-learn
#
# *Estimated timing to here from start of tutorial: 13 min*
#
# Unlike the previous notebook, we're not going to write the code that implements all of the Logistic Regression model itself. Instead, we're going to use the implementation in [scikit-learn](https://scikit-learn.org/stable/), a very popular library for Machine Learning.
#
# The goal of this next section is to introduce `scikit-learn` classifiers and understand how to apply it to real neural data.
# ---
# # Section 2: Decoding neural data with logistic regression
# ## Section 2.1: Setting up the data
#
# *Estimated timing to here from start of tutorial: 15 min*
#
# In this notebook we'll use the Steinmetz dataset that you have seen previously. Recall that this dataset includes recordings of neurons as mice perform a decision task.
#
# Mice had the task of turning a wheel to indicate whether they perceived a Gabor stimulus to the left, to the right, or not at all. Neuropixel probes measured spikes across the cortex. Check out the following task schematic below from the BiorXiv preprint.
#
#
#
# + cellView="form"
# @markdown Execute to see schematic
import IPython
IPython.display.Image("http://kordinglab.com/images/others/steinmetz-task.png")
# -
# Today we're going to **decode the decision from neural data** using Logistic Regression. We will only consider trials where the mouse chose "Left" or "Right" and ignore NoGo trials.
#
# ### Data format
#
# In the hidden `Data retrieval and loading` cell, there is a function that loads the data:
#
# - `spikes`: an array of normalized spike rates with shape `(n_trials, n_neurons)`
# - `choices`: a vector of 0s and 1s, indicating the animal's behavioral response, with length `n_trials`.
data = load_steinmetz_data()
for key, val in data.items():
print(key, val.shape)
# As with the GLMs you've seen in the previous tutorial (Linear and Poisson Regression), we will need two data structures:
#
# - an `X` matrix with shape `(n_samples, n_features)`
# - a `y` vector with length `n_samples`.
#
# In the previous notebook, `y` corresponded to the neural data, and `X` corresponded to something about the experiment. Here, we are going to invert those relationships. That's what makes this a *decoding* model: we are going to predict behavior (`y`) from the neural responses (`X`):
y = data["choices"]
X = data["spikes"]
# ## Section 2.2: Fitting the model
#
# *Estimated timing to here from start of tutorial: 25 min*
#
# Using a Logistic Regression model within `scikit-learn` is very simple.
# +
# Define the model
log_reg = LogisticRegression(penalty="none")
# Fit it to data
log_reg.fit(X, y)
# -
# There's two steps here:
#
# - We *initialized* the model with a hyperparameter, telling it what penalty to use (we'll focus on this in the second part of the notebook)
# - We *fit* the model by passing it the `X` and `y` objects.
#
#
# ## Section 2.3: Classifying the training data
#
# *Estimated timing to here from start of tutorial: 27 min*
#
# Fitting the model performs maximum likelihood optimization, learning a set of *feature weights*. We can use those learned weights to *classify* new data, or predict the labels for each sample:
y_pred = log_reg.predict(X)
# ## Section 2.4: Evaluating the model
#
# *Estimated timing to here from start of tutorial: 30 min*
#
# Now we need to evaluate the model's predictions. We'll do that with an *accuracy* score. The accuracy of the classifier is the proportion of trials where the predicted label matches the true label.
#
# ### Coding Exercise 2.4: Classifier accuracy
#
# For the first exercise, implement a function to evaluate a classifier using the accuracy score. Use it to get the accuracy of the classifier on the *training* data.
# +
def compute_accuracy(X, y, model):
"""Compute accuracy of classifier predictions.
Args:
X (2D array): Data matrix
y (1D array): Label vector
model (sklearn estimator): Classifier with trained weights.
Returns:
accuracy (float): Proportion of correct predictions.
"""
#############################################################################
# TODO Complete the function, then remove the next line to test it
raise NotImplementedError("Implement the compute_accuracy function")
#############################################################################
y_pred = model.predict(X)
accuracy = ...
return accuracy
# Compute train accurcy
train_accuracy = compute_accuracy(X, y, log_reg)
print(f"Accuracy on the training data: {train_accuracy:.2%}")
# +
# to_remove solution
def compute_accuracy(X, y, model):
"""Compute accuracy of classifier predictions.
Args:
X (2D array): Data matrix
y (1D array): Label vector
model (sklearn estimator): Classifier with trained weights.
Returns:
accuracy (float): Proportion of correct predictions.
"""
y_pred = model.predict(X)
accuracy = (y == y_pred).mean()
return accuracy
# Compute train accurcy
train_accuracy = compute_accuracy(X, y, log_reg)
print(f"Accuracy on the training data: {train_accuracy:.2%}")
# -
# ## Section 2.5: Cross-validating the classifer
#
# *Estimated timing to here from start of tutorial: 40 min*
#
# Classification accuracy on the training data is 100%! That might sound impressive, but you should recall from yesterday the concept of *overfitting*: the classifier may have learned something idiosyncratic about the training data. If that's the case, it won't have really learned the underlying data->decision function, and thus won't generalize well to new data.
#
# To check this, we can evaluate the *cross-validated* accuracy.
#
# + cellView="form"
# @markdown Execute to see schematic
import IPython
IPython.display.Image("http://kordinglab.com/images/others/justCV-01.png")
# -
# ### Cross-validating using `scikit-learn` helper functions
#
# Yesterday, we asked you to write your own functions for implementing cross-validation. In practice, this won't be necessary, because `scikit-learn` offers a number of [helpful functions](https://scikit-learn.org/stable/model_selection.html) that will do this for you. For example, you can cross-validate a classifer using `cross_val_score`.
#
# `cross_val_score` takes a `sklearn` model like `LogisticRegression`, as well as your `X` and `y` data. It then retrains your model on test/train splits of `X` and `y`, and returns the test accuracy on each of the test sets.
accuracies = cross_val_score(LogisticRegression(penalty='none'), X, y, cv=8) # k=8 crossvalidation
# + cellView="form"
#@title
#@markdown Run to plot out these `k=8` accuracy scores.
f, ax = plt.subplots(figsize=(8, 3))
ax.boxplot(accuracies, vert=False, widths=.7)
ax.scatter(accuracies, np.ones(8))
ax.set(
xlabel="Accuracy",
yticks=[],
title=f"Average test accuracy: {accuracies.mean():.2%}"
)
ax.spines["left"].set_visible(False)
# -
# The lower cross-validated accuracy compared to the training accuracy (100%) suggests that the model is being *overfit*. Is this surprising? Think about the shape of the $X$ matrix:
X.shape
# The model has almost three times as many features as samples. This is a situation where overfitting is very likely (almost guaranteed).
#
# **Link to neuroscience**: Neuro data commonly has more features than samples. Having more neurons than independent trials is one example. In fMRI data, there are commonly more measured voxels than independent trials.
#
# ### Why more features than samples leads to overfitting
#
# In brief, the variance of model estimation increases when there are more features than samples. That is, you would get a very different model every time you get new data and run `.fit()`. This is very related to the *bias/variance tradeoff* you learned about on day 1.
#
# Why does this happen? Here's a tiny example to get your intuition going. Imagine trying to find a best-fit line in 2D when you only have 1 datapoint. There are simply a infinite number of lines that pass through that point. This is the situation we find ourselves in with more features than samples.
#
# ### What we can do about it
# As you learned on day 1, you can decrease model variance if you don't mind increasing its bias. Here, we will increase bias by assuming that the correct parameters are all small. In our 2D example, this is like prefering the horizontal line to all others. This is one example of *regularization*.
# -----
#
# #Section 3: Regularization
#
# *Estimated timing to here from start of tutorial: 50 min*
#
# + cellView="form"
# @title Video 2: Regularization
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1Tg4y1i773", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="b2IaUCZ91bo", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
#
# <details>
# <summary> <font color='blue'>Click here for text recap of video </font></summary>
#
# Regularization forces a model to learn a set solutions you *a priori* believe to be more correct, which reduces overfitting because it doesn't have as much flexibility to fit idiosyncracies in the training data. This adds model bias, but it's a good bias because you know (maybe) that parameters should be small or mostly 0.
#
# In a GLM, a common form of regularization is to *shrink* the classifier weights. In a linear model, you can see its effect by plotting the weights. We've defined a helper function, `plot_weights`, that we'll use extensively in this section.
#
# </details>
#
# Here is what the weights look like for a Logistic Regression model with no regularization:
log_reg = LogisticRegression(penalty="none").fit(X, y)
plot_weights({"No regularization": log_reg})
# It's important to understand this plot. Each dot visualizes a value in our parameter vector $\theta$. (It's the same style of plot as the one showing $\theta$ in the video). Since each feature is the time-averaged response of a neuron, each dot shows how the model uses each neuron to estimate a decision.
#
# Note the scale of the y-axis. Some neurons have values of about $20$, whereas others scale to $-20$.
# ## Section 3.1: $L_2$ regularization
#
# *Estimated timing to here from start of tutorial: 53 min*
#
# Regularization comes in different flavors. A very common one uses an $L_2$ or "ridge" penalty. This changes the objective function to
#
# \begin{align}
# -\log\mathcal{L}'(\theta | X, y)=
# -\log\mathcal{L}(\theta | X, y) +\frac\beta2\sum_i\theta_i^2,
# \end{align}
#
# where $\beta$ is a *hyperparameter* that sets the *strength* of the regularization.
#
# You can use regularization in `scikit-learn` by changing the `penalty`, and you can set the strength of the regularization with the `C` hyperparameter ($C = \frac{1}{\beta}$, so this sets the *inverse* regularization).
#
# Let's compare the unregularized classifier weights with the classifier weights when we use the default `C = 1`:
# +
log_reg_l2 = LogisticRegression(penalty="l2", C=1).fit(X, y)
# now show the two models
models = {
"No regularization": log_reg,
"$L_2$ (C = 1)": log_reg_l2,
}
plot_weights(models)
# -
# Using the same scale for the two y axes, it's almost impossible to see the $L_2$ weights. Let's allow the y axis scales to adjust to each set of weights:
plot_weights(models, sharey=False)
#
# Now you can see that the weights have the same basic pattern, but the regularized weights are an order-of-magnitude smaller.
#
# ### Interactive Demo 3.1: The effect of varying C on parameter size
#
# We can use this same approach to see how the weights depend on the *strength* of the regularization:
# + cellView="form"
# @markdown Execute this cell to enable the widget!
# Precompute the models so the widget is responsive
log_C_steps = 1, 11, 1
penalized_models = {}
for log_C in np.arange(*log_C_steps, dtype=int):
m = LogisticRegression("l2", C=10 ** log_C, max_iter=5000)
penalized_models[log_C] = m.fit(X, y)
@widgets.interact
def plot_observed(log_C = widgets.FloatSlider(value=1, min=1, max=10, step=1)):
models = {
"No regularization": log_reg,
f"$L_2$ (C = $10^{log_C}$)": penalized_models[log_C]
}
plot_weights(models)
# -
# Recall from above that $C=\frac1\beta$ so larger `C` is less regularization. The top panel corresponds to $C=\infty$.
# ## Section 3.2: $L_1$ regularization
#
# *Estimated timing to here from start of tutorial: 1 hr, 3 min*
#
# $L_2$ is not the only option for regularization. There is also the $L_1$, or "Lasso" penalty. This changes the objective function to
#
# \begin{align}
# -\log\mathcal{L}'(\theta | X, y)=
# -\log\mathcal{L}(\theta | X, y) +\frac\beta2\sum_i|\theta_i|
# \end{align}
#
# In practice, using the summed absolute values of the weights causes *sparsity*: instead of just getting smaller, some of the weights will get forced to $0$:
log_reg_l1 = LogisticRegression(penalty="l1", C=1, solver="saga", max_iter=5000)
log_reg_l1.fit(X, y)
models = {
"$L_2$ (C = 1)": log_reg_l2,
"$L_1$ (C = 1)": log_reg_l1,
}
plot_weights(models)
# Note: You'll notice that we added two additional parameters: `solver="saga"` and `max_iter=5000`. The `LogisticRegression` class can use several different optimization algorithms ("solvers"), and not all of them support the $L_1$ penalty. At a certain point, the solver will give up if it hasn't found a minimum value. The `max_iter` parameter tells it to make more attempts; otherwise, we'd see an ugly warning about "convergence".
# ## Section 3.3: The key difference between $L_1$ and $L_2$ regularization: sparsity
#
# *Estimated timing to here from start of tutorial: 1 hr, 10 min*
#
# When should you use $L_1$ vs. $L_2$ regularization? Both penalties shrink parameters, and both will help reduce overfitting. However, the models they lead to are different.
#
# In particular, the $L_1$ penalty encourages *sparse* solutions in which most parameters are 0. Let's unpack the notion of sparsity.
#
# A "dense" vector has mostly nonzero elements:
# $\begin{bmatrix}
# 0.1 \\ -0.6\\-9.1\\0.07
# \end{bmatrix}$.
# A "sparse" vector has mostly zero elements:
# $\begin{bmatrix}
# 0 \\ -0.7\\ 0\\0
# \end{bmatrix}$.
#
# The same is true of matrices:
# + cellView="form"
# @markdown Execute to plot a dense and a sparse matrix
np.random.seed(50)
n = 5
M = np.random.random((n, n))
M_sparse = np.random.choice([0,1], size=(n, n), p=[0.8, 0.2])
fig, axs = plt.subplots(1, 2, sharey=True, figsize=(10,5))
axs[0].imshow(M)
axs[1].imshow(M_sparse)
axs[0].axis('off')
axs[1].axis('off')
axs[0].set_title("A dense matrix", fontsize=15)
axs[1].set_title("A sparse matrix", fontsize=15)
text_kws = dict(ha="center", va="center")
for i in range(n):
for j in range(n):
iter_parts = axs, [M, M_sparse], ["{:.1f}", "{:d}"]
for ax, mat, fmt in zip(*iter_parts):
val = mat[i, j]
color = ".1" if val > .7 else "w"
ax.text(j, i, fmt.format(val), c=color, **text_kws)
# -
# ### Coding Exercise 3.3: The effect of $L_1$ regularization on parameter sparsity
#
# Please complete the following function to fit a regularized `LogisticRegression` model and return **the number of coefficients in the parameter vector that are equal to 0**.
#
# Don't forget to check out the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html).
#
# +
def count_non_zero_coefs(X, y, C_values):
"""Fit models with different L1 penalty values and count non-zero coefficients.
Args:
X (2D array): Data matrix
y (1D array): Label vector
C_values (1D array): List of hyperparameter values
Returns:
non_zero_coefs (list): number of coefficients in each model that are nonzero
"""
#############################################################################
# TODO Complete the function and remove the error
raise NotImplementedError("Implement the count_non_zero_coefs function")
#############################################################################
non_zero_coefs = []
for C in C_values:
# Initialize and fit the model
# (Hint, you may need to set max_iter)
model = ...
...
# Get the coefs of the fit model (in sklearn, we can do this using model.coef_)
coefs = ...
# Count the number of non-zero elements in coefs
non_zero = ...
non_zero_coefs.append(non_zero)
return non_zero_coefs
# Use log-spaced values for C
C_values = np.logspace(-4, 4, 5)
# Count non zero coefficients
non_zero_l1 = count_non_zero_coefs(X, y, C_values)
# Visualize
plot_non_zero_coefs(C_values, non_zero_l1, n_voxels=X.shape[1])
# +
# to_remove solution
def count_non_zero_coefs(X, y, C_values):
"""Fit models with different L1 penalty values and count non-zero coefficients.
Args:
X (2D array): Data matrix
y (1D array): Label vector
C_values (1D array): List of hyperparameter values
Returns:
non_zero_coefs (list): number of coefficients in each model that are nonzero
"""
non_zero_coefs = []
for C in C_values:
# Initialize and fit the model
# (Hint, you may need to set max_iter)
model = LogisticRegression(penalty="l1", C=C, solver="saga", max_iter=5000)
model.fit(X,y)
# Get the coefs of the fit model (in sklearn, we can do this using model.coef_)
coefs = model.coef_
# Count the number of non-zero elements in coefs
non_zero = np.sum(coefs != 0)
non_zero_coefs.append(non_zero)
return non_zero_coefs
# Use log-spaced values for C
C_values = np.logspace(-4, 4, 5)
# Count non zero coefficients
non_zero_l1 = count_non_zero_coefs(X, y, C_values)
# Visualize
with plt.xkcd():
plot_non_zero_coefs(C_values, non_zero_l1, n_voxels=X.shape[1])
# -
# Smaller `C` (bigger $\beta$) leads to sparser solutions.
#
# **Link to neuroscience**: When is it OK to assume that the parameter vector is sparse? Whenever it is true that most features don't affect the outcome. One use-case might be decoding low-level visual features from whole-brain fMRI: we may expect only voxels in V1 and thalamus should be used in the prediction.
#
# **WARNING**: be careful when interpreting $\theta$. Never interpret the nonzero coefficients as *evidence* that only those voxels/neurons/features carry information about the outcome. This is a product of our regularization scheme, and thus *our prior assumption that the solution is sparse*. Other regularization types or models may find very distributed relationships across the brain. Never use a model as evidence for a phenomena when that phenomena is encoded in the assumptions of the model.
# ## Section 3.4: Choosing the regularization penalty
#
# *Estimated timing to here from start of tutorial: 1 hr, 25 min*
#
# In the examples above, we just picked arbitrary numbers for the strength of regularization. How do you know what value of the hyperparameter to use?
#
# The answer is the same as when you want to know whether you have learned good parameter values: use cross-validation. The best hyperparameter will be the one that allows the model to generalize best to unseen data.
# ### Coding Exercise 3.4: Model selection
#
# In the final exercise, we will use cross-validation to evaluate a set of models, each with a different $L_2$ penalty. Your `model_selection` function should have a for-loop that gets the mean cross-validated accuracy for each penalty value (use the `cross_val_score` function that we introduced above).
# +
def model_selection(X, y, C_values):
"""Compute CV accuracy for each C value.
Args:
X (2D array): Data matrix
y (1D array): Label vector
C_values (1D array): Array of hyperparameter values
Returns:
accuracies (1D array): CV accuracy with each value of C
"""
#############################################################################
# TODO Complete the function and remove the error
raise NotImplementedError("Implement the model_selection function")
#############################################################################
accuracies = []
for C in C_values:
# Initialize and fit the model
# (Hint, you may need to set max_iter)
model = ...
# Get the accuracy for each test split using cross-validation
accs = ...
# Store the average test accuracy for this value of C
accuracies.append(...)
return accuracies
# Use log-spaced values for C
C_values = np.logspace(-4, 4, 9)
# Compute accuracies
accuracies = model_selection(X, y, C_values)
# Visualize
plot_model_selection(C_values, accuracies)
# +
# to_remove solution
def model_selection(X, y, C_values):
"""Compute CV accuracy for each C value.
Args:
X (2D array): Data matrix
y (1D array): Label vector
C_values (1D array): Array of hyperparameter values.
Returns:
accuracies (1D array): CV accuracy with each value of C.
"""
accuracies = []
for C in C_values:
# Initialize and fit the model
# (Hint, you may need to set max_iter)
model = LogisticRegression(penalty="l2", C=C, max_iter=5000)
# Get the accuracy for each test split using cross-validation
accs = cross_val_score(model, X, y, cv=8)
# Store the average test accuracy for this value of C
accuracies.append(accs.mean())
return accuracies
# Use log-spaced values for C
C_values = np.logspace(-4, 4, 9)
# Compute accuracies
accuracies = model_selection(X, y, C_values)
# Visualize
with plt.xkcd():
plot_model_selection(C_values, accuracies)
# -
# This plot suggests that the right value of $C$ does matter — up to a point. Remember that C is the *inverse* regularization. The plot shows that models where the regularization was too strong (small C values) performed very poorly. For $C > 10^{-2}$, the differences are marginal, but the best performance was obtained with an intermediate value ($C \approx 10^1$).
# ---
# # Summary
#
# *Estimated timing of tutorial: 1 hour, 35 minutes*
#
# In this notebook, we learned about Logistic Regression, a fundamental algorithm for *classification*. We applied the algorithm to a *neural decoding* problem: we tried to predict an animal's behavioral choice from its neural activity. We saw again how important it is to use *cross-validation* to evaluate complex models that are at risk for *overfitting*, and we learned how *regularization* can be used to fit models that generalize better. Finally, we learned about some of the different options for regularization, and we saw how cross-validation can be useful for *model selection*.
# ---
# # Notation
#
# \begin{align}
# x &\quad \text{input}\\
# y &\quad \text{measurement, response}\\
# \theta &\quad \text{parameter}\\
# \sigma(z) &\quad \text{logistic function}\\
# C &\quad \text{inverse regularization strength parameter}\\
# \beta &\quad \text{regularization strength parameter}\\
# \hat{y} &\quad \text{estimated output}\\
# \mathcal{L}(\theta| y_i, x_i) &\quad \text{likelihood of that parameter } \theta \text{ producing response } y_i \text{ from input } x_i\\
# L_1 &\quad \text{Lasso regularization}\\
# L_2 &\quad \text{ridge regularization}\\
# \end{align}
# ---
# # Bonus
#
# ---
# ## Bonus Section 1: The Logistic Regression model in full
#
# The fundamental input/output equation of logistic regression is:
#
# \begin{align}
# p(y_i = 1 |x_i, \theta) = \sigma(\theta^Tx_i)
# \end{align}
#
# **The logistic link function**
#
# You've seen $\theta^T x_i$ before, but the $\sigma$ is new. It's the *sigmoidal* or *logistic* link function that "squashes" $\theta^T x_i$ to keep it between $0$ and $1$:
#
# \begin{align}
# \sigma(z) = \frac{1}{1 + \textrm{exp}(-z)}
# \end{align}
#
# **The Bernoulli likelihood**
#
# You might have noticed that the output of the sigmoid, $\hat{y}$ is not a binary value (0 or 1), even though the true data $y$ is! Instead, we interpret the value of $\hat{y}$ as the *probability that y = 1*:
#
# \begin{align}
# \hat{y_i} \equiv p(y_i=1|x_i,\theta) = \frac{1}{{1 + \textrm{exp}(-\theta^Tx_i)}}
# \end{align}
#
# To get the likelihood of the parameters, we need to define *the probability of seeing $y$ given $\hat{y}$*. In logistic regression, we do this using the Bernoulli distribution:
#
# \begin{align}
# P(y_i\ |\ \hat{y}_i) = \hat{y}_i^{y_i}(1 - \hat{y}_i)^{(1 - y_i)}
# \end{align}
#
# So plugging in the regression model:
#
# \begin{align}
# P(y_i\ |\ \theta, x_i) = \sigma(\theta^Tx_i)^{y_i}\left(1 - \sigma(\theta^Tx_i)\right)^{(1 - y_i)}.
# \end{align}
#
# This expression effectively measures how good our parameters $\theta$ are. We can also write it as the likelihood of the parameters given the data:
#
# \begin{align}
# \mathcal{L}(\theta\ |\ y_i, x_i) = P(y_i\ |\ \theta, x_i),
# \end{align}
#
# and then use this as a target of optimization, considering all of the trials independently:
#
# \begin{align}
# \log\mathcal{L}(\theta | X, y) = \sum_{i=1}^Ny_i\log\left(\sigma(\theta^Tx_i)\right)\ +\ (1-y_i)\log\left(1 - \sigma(\theta^Tx_i)\right).
# \end{align}
# ---
# ## Bonus Section 2: More detail about model selection
#
# In the final exercise, we used all of the data to choose the hyperparameters. That means we don't have any fresh data left over to evaluate the performance of the selected model. In practice, you would want to have two *nested* layers of cross-validation, where the final evaluation is performed on data that played no role in selecting or training the model.
#
# Indeed, the proper method for splitting your data to choose hyperparameters can get confusing. Here's a guide that the authors of this notebook developed while writing a tutorial on using machine learning for neural decoding (https://arxiv.org/abs/1708.00909).
#
#
#
# + cellView="form"
# @markdown Execute to see schematic
import IPython
IPython.display.Image("http://kordinglab.com/images/others/CV-01.png")
| tutorials/W1D4_GeneralizedLinearModels/W1D4_Tutorial2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # How to perform PRF photometry?
#
# In previous tutorials we discussed [how aperture photometry works](1.03-what-are-lightcurves.html) and how you can [create your own custom masks](2.05-making-custom-apertures.html) to extract aperture photometry.
#
# As a reminder, aperture photometry works by summing up the flux values across all the pixels in a pre-defined mask. It tends to offer the fastest and most robust way to extract photometry for an isolated star, but it suffers from the three limitations:
# 1. When two or more stars overlap, aperture photometry will return the sum of the flux of all stars. Separating their signals requires additional assumptions.
# 2. When the telescope experience motion or focus changes, the fraction of the flux of a star captured by the pre-defined aperture mask will change. This imposes systematics on the data which need to be removed (e.g. using the CBV or SFF techniques).
# 3. It may be difficult to separate a background signal from the lightcurve of a star. In particular, this is a concern when time-varying background signals are present, such as the so-called "rolling bands" in Kepler.
#
# An alternative to aperture photometry which addresses the above limitations is called called *Point Response Function (PRF) model fitting photometry*, also known as *Point Spread Function (PSF) fitting photometry* or *PRF (PSF) photometry* in short. In this method, a parameterized model is fit to the data. This method is significantly more complicated then aperture photometry and is prone to a different set of systematics. However it offers the ability to separate the signals of overlapping stars in very crowded regions or star clusters.
#
# This tutorial demonstrates how *lightkurve* can be used to extract lightcurves using the model fitting technique. We will start with an easy example and then show how you can tune the parameters of the model to create more careful analyses.
#
# ## Table of contents
#
# 1. Simple example: PRF photometry of Kepler-10
# 2. The `TPFModel` object
# 3. Visualizing and fitting a `TPFModel` object
# 4. Changing `TPFModel` priors
# 5. Creating lightcurves with a custom `TPFModel` object
#
#
# ## Simple example: PRF photometry of Kepler-10
#
# Let's start with a simple example by obtaining PRF photometry for planet system *Kepler-10*. We start by downloading the pixel data from the archive:
import lightkurve as lk
tpf = lk.search_targetpixelfile("Kepler-10", quarter=3).download(quality_bitmask='hardest')
# The easiest way to get PRF photometry is to simply call the `to_lightcurve` method with parameter `method='prf'`. This will fit the pixel data of all cadences with a default model and return the lightcurve.
lc = tpf.to_lightcurve(method='prf')
lc.plot()
# A good way to verify that the model-fitting photometry produced a sensible output is by comparing it against standard aperture photometry. You can obtain aperture photometry using the same `to_lightcurve` method and the parameter `method='aperture'` (which is the default value of this parameter).
lc_aper = tpf.to_lightcurve(method='aperture')
lc_aper.plot()
# There is a notable difference in the long-term trend in both lightcurves. This is because PRF-fitting photometry yields a local estimate of the background which is often different from the Kepler pipeline's global background level estimates.
#
# Let's fold both lightcurves on the period of the known planet *Kepler-10 b* to show that the transit can be recovered equally well:
import matplotlib.pyplot as plt
_, ax = plt.subplots(nrows=1, ncols=2, figsize=(14,4))
ax[0].set_title('PRF photometry')
lc.flatten().fold(period=0.8376).plot(ax=ax[0])
ax[1].set_title('Aperture photometry')
lc_aper.flatten().fold(period=0.8376).plot(ax=ax[1])
# Success! Note that the outliers between *phase* -0.4 and 0.0 are caused by the transits of another planet with a different period.
#
# Note that PRF photometry appears to offer a few percent less noise compared to its aperture counterpart:
print("""The CDPP noise metric equals:
{:.1f} ppm for PRF-fitting photometry;
{:.1f} ppm for aperture photometry.""".format(lc.estimate_cdpp(), lc_aper.estimate_cdpp()))
# This is all you need to know to get started with PRF photometry! In what follows, we explain how you can tune the parameters of the model fitting.
# ## The `TPFModel` object
#
# It is often possible to obtain better results by carefully tuning the model that is being fit to the data. This is true in particular when the data contain multiple overlapping stars. Let's have a look at how you can interact and change the model.
#
# *Lightkurve* uses objects of type `TPFModel` to specify the model.
# You can obtain the default model using the `get_model()` method:
model = tpf.get_model()
type(model)
# A `TPFModel` object encapsulates our beliefs about the stars contained in a pixel file. The key parameters are:
#
# * `model.star_priors` captures our prior beliefs about the positions and fluxes of the point sources in the data using a list of `StarPrior` objects. Each `StarPrior` object is parameterized by `col` and `row` in pixel coordinates and `flux` in electrons/second;
# * `model.background_prior` captures our beliefs about the per-pixel background, parameterized by `flux` in electrons/second;
# * `model.focus_prior` captures our beliefs on the focus of the spacecraft, parameterized by the unit-less stretch factors `scale_col` and `scale_row`, and `rotation_angle` in radians;
# * `model.motion_prior` captures our beliefs about spacecraft motion, parameterized by `shift_col` and `shift_row` in pixel coordinates;
# * `model.prfmodel` an object of type `KeplerPRF` which models the Point Response Function.
# * `model.fit_background`, `model.fit_focus`, and `model.fit_motion` are booleans which indicate whether or not the background, focus, and motion parameters should be considered free parameters, i.e. whether or not our beliefs should assumed to be perfect.
#
# The parameters of each of these components are prior distributions.
#
# We can inspect the our beliefs by accessing the properties, for example, this is the prior on the column pixel coordinate of the star in our data:
model.star_priors[0].col
# And this is the model's prior on the background flux:
model.background_prior.flux
# We can inspect all the parameters at once using the `print` function:
print(model)
# ## Visualizing and fitting a `TPFModel` object
#
# A `TPFModel` object contains several methods to interact with the model. For example, you can visualize the default prediction using the `plot()` method. This will visualize the model prediction using the prior means. Let's plot the model below (left panel) and compare it against the observed data (right panel):
import matplotlib.pyplot as plt
_, ax = plt.subplots(nrows=1, ncols=2, figsize=(14,4))
model.plot(ax=ax[0])
tpf.plot(ax=ax[1]);
# You can fit the model to a single cadence of pixel data using the `fit()` method, which returns an object of type `TPFModelParameters` which encapulsates the maximum likelihood values of the parameters for that cadence:
parameters = model.fit(tpf.flux[0] + tpf.flux_bkg[0])
print(parameters)
# For example, we can access the maximum likelihood value of the stellar flux as follows:
parameters.stars[0].flux
# And the background flux was found to be:
parameters.background.flux
# We can visualize the model again, this time passing the fitted parameters:
_, ax = plt.subplots(nrows=1, ncols=2, figsize=(12,4))
model.plot(parameters, ax=ax[0])
tpf.plot(ax=ax[1])
# ## Changing `TPFModel` priors
# We can change a `TPFModel` by manipulating its properties. For example, we can add a star to the lower right corner of the pixel file as follows:
from lightkurve.prf import StarPrior
model = tpf.get_model()
model.star_priors.append(StarPrior(col=660, row=243, flux=1e6))
model.plot()
# Alternatively, you can build a `TPFModel` from scratch. In the example below, we use an arbitrary combination of Gaussian, Uniform, and Fixed priors to initialize a model. Note that the ideal choices for these assumptions depend strongly on your scientific objectives and may require trial-and-error.
from lightkurve.prf import TPFModel, StarPrior, BackgroundPrior, FocusPrior, MotionPrior
from lightkurve.prf import GaussianPrior, UniformPrior, FixedValuePrior
model = TPFModel(star_priors=[StarPrior(col=GaussianPrior(mean=656, var=1),
row=GaussianPrior(mean=247, var=1),
flux=UniformPrior(lb=0, ub=1e7)),
StarPrior(col=FixedValuePrior(value=660),
row=FixedValuePrior(value=243),
flux=UniformPrior(lb=0, ub=1e7))],
background_prior=BackgroundPrior(flux=GaussianPrior(mean=100., var=10.)),
focus_prior=FocusPrior(scale_col=GaussianPrior(mean=1, var=0.0001),
scale_row=GaussianPrior(mean=1, var=0.0001),
rotation_angle=UniformPrior(lb=-3.1415, ub=3.1415)),
motion_prior=MotionPrior(shift_col=GaussianPrior(mean=0., var=0.01),
shift_row=GaussianPrior(mean=0., var=0.01)),
prfmodel=tpf.get_prf_model(),
fit_background=True,
fit_focus=False,
fit_motion=False)
model.plot()
# ## Creating lightcurves with a custom `TPFModel` object
# Above we demonstrated how you can interact with a `TPFModel` and fit its parameters to a single cadence. Most users will be interested in obtaining a lightcurve by fitting the model to *all* the cadences rather than a single one.
#
# To make this easy, *lightkurve* provides a helper class called `PRFPhotometry`, which makes it easy to fit the model to all cadences and inspect the results. For example:
from lightkurve import PRFPhotometry
prfphot = PRFPhotometry(model=tpf.get_model())
prfphot.run(tpf.flux + tpf.flux_bkg)
prfphot.plot_results()
# You can access the details of the fit in each cadence using the `prfphot.results` property, e.g.:
prfphot.results[0].stars[0].flux
# Alternative, you can access the fitted fluxes as a `LightCurve` object:
prfphot.lightcurves[0]
# Note that the focus and motion parameters in the example above are constant. This is because the `fit_focus` and `fit_motion` parameters were set to `False` in the model. By setting these to `True`, we can potentially improve the quality of the fit. Beware however, this comes at the cost of slowing down the fitting process and potentially introducing more complicated systematics and parameter degeneracies.
prfphot.model.fit_focus = True
prfphot.model.fit_motion = True
prfphot.run(tpf.flux + tpf.flux_bkg)
prfphot.plot_results()
prfphot.lightcurves[0].plot()
# Uhoh, the increased model flexibility introduced complicated systematics. These systematics are caused in part by the degeneracy between the star's `col` and `row` position and the `shift_col` and `shift_row` parameters. The degeneracy will disappear when additional stars are present in the data, or when tighter priors are employed.
# ## Comments?
#
# At present we are evaluating the design and use of *lightkurve*'s PRF photometry for use in crowded fields. If you encounter issues, or would like to contribute a tutorial or working example, then please open an issue or pull request on GitHub!
| docs/source/tutorials/03-how-to-use-prf-photometry.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib notebook
import control as c
import ipywidgets as w
import numpy as np
from IPython.display import display, HTML
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.animation as animation
display(HTML('<script> $(document).ready(function() { $("div.input").hide(); }); </script>'))
# -
# ## Creating a P-controller using Operational Amplifiers
#
# In analog electronics, operational amplifiers are generally used for the realization of Proportional-Integral-Derivating (PID) controllers. While the mathematical models for Linear Time-Invariant (LTI) systems assume ideal conditions, the realistic circuits may not entirely match them.
# In the following example, we will take a look at two models used to describe operational amplifiers and compare their output to the idealized system in order to understand how close they approximate the ideal model in a steady state.
#
# One of the most important parameters of an operational amplifier is its open-loop gain, a frequency-dependent parameter that is the ratio of the output voltage, and the input voltage difference.
#
# <b>Select an open-loop gain value for the calculations!</b>
# +
# Model selector
opampGain = w.ToggleButtons(
options=[('10 000', 10000), ('200 000', 200000),],
description='Operational amplifier gain: ', style={'description_width':'30%'})
display(opampGain)
# -
# An ideal operational amplifier assumes the input voltage difference to be zero, and the open-loop gain to be infinite. This model allows for the calculation of a transfer function based on the complex impedance of the feedforward and feedback paths.
# In this example, only resistors are present in the circuit; therefore, the result will be a simple closed-loop gain:
# <br><br>
# $$\frac{V_{out}}{V_{in}}=-\frac{Z_F}{Z_G}$$
# <br>
# Some frequency characteristics can be included in the linear model by the following expansion:
# <br><br>
# $$\frac{V_{out}}{V_{in}}=-\frac{\frac{-A\cdot Z_F}{Z_G+Z_F}}{1+\frac{A\cdot Z_G}{Z_G+Z_F}}$$
# <br>
# <b>Set up a system so that the results are approximating the ideal as much as possible! At what conditions does this happen?</b>
#
# <br><br>
# <img src="Images/gain.png" width="30%" />
# <br>
# +
# System model
def system_model(rg, rf, a):
Rg = rg / 1000 # Convert to Ohm
Rf = rf / 1000
G_ideal = -Rf / Rg # Ideal closed-loop gain
G_ac = (-a*Rf/(Rf+Rg)) / (1+a*Rg/(Rf+Rg)) # Non-ideal closed-loop gain
print('Ideal closed-loop gain:')
print('{0:.4g}'.format(G_ideal))
print('\nNon-ideal closed-loop gain:')
print('{0:.4g}'.format(G_ac))
print('\nDifference from ideal:')
print('{0:.4%}'.format((G_ac-G_ideal)/G_ideal))
# GUI widgets
rg_slider = w.FloatLogSlider(value=1, base=10, min=-3, max=3, description=r'$R_g\ [k\Omega]\ :$', continuous_update=False,
layout=w.Layout(width='75%'))
rf_slider = w.FloatLogSlider(value=1, base=10, min=-3, max=3, description=r'$R_f\ [k\Omega]\ :$', continuous_update=False,
layout=w.Layout(width='75%'))
input_data = w.interactive_output(system_model, {'rg':rg_slider, 'rf':rf_slider, 'a':opampGain})
display(w.HBox([rg_slider, rf_slider]), input_data)
# -
# This model can be further refined by including the internal and load impedances of the operational amplifier. This realistic model, however, is still based on frequency-dependent components. When designing an analog control circuit, these parameters have to be chosen so that the amplifier most closely approximates the ideal at the controller's frequency range.
#
# <b>Adjust the system parameters, so that the system approximates the ideal value! What are your observations?</b>
# +
# Scene data
anim_fig = plt.figure()
anim_fig.set_size_inches((9.8, 4))
anim_fig.set_tight_layout(True)
scene_ax = anim_fig.add_subplot(111)
scene_ax.set_xlim((-3, 4))
scene_ax.set_ylim((-1, 1.8))
scene_ax.axis('off')
scene_ax.add_patch(patches.Polygon(np.array([[-0.7, -0.7, 1.55], [-1, 1.7, 0.35]]).T, closed=True, fill=False,
lw=2, ec='dimgray', joinstyle='round', zorder=20))
scene_ax.add_patch(patches.Rectangle((-0.6, 0), 0.25, 0.7, fill=False, lw=1.5, ec='blue', zorder=10))
scene_ax.add_patch(patches.Rectangle((0.5, 0.225), 0.7, 0.25, fill=False, lw=1.5, ec='blue', zorder=10))
scene_ax.add_patch(patches.Circle((0.2, 0.35), 0.2, fill=False, lw=1.5, ec='blue', zorder=10))
scene_ax.plot([-1.1, -0.475, -0.475], [1.2, 1.2, 0.7], color='red', lw=1.5, zorder=0)
scene_ax.plot([-1.1, -0.475, -0.475], [-0.5, -0.5, 0], color='red', lw=1.5, zorder=0)
scene_ax.add_patch(patches.Circle((-1.15, 1.2), 0.05, fill=False, lw=1.5, ec='blue', zorder=10))
scene_ax.add_patch(patches.Circle((-1.15, -0.5), 0.05, fill=False, lw=1.5, ec='blue', zorder=10))
scene_ax.plot([-0.15, -0.15, 0], [-0.1, 0.35, 0.35], color='red', lw=1.5, zorder=0)
scene_ax.plot([-0.275, -0.025], [-0.1, -0.1], color='blue', lw=1.5, zorder=10)
scene_ax.plot([-0.225, -0.075], [-0.175, -0.175], color='blue', lw=1.5, zorder=10)
scene_ax.plot([-0.175, -0.125], [-0.25, -0.25], color='blue', lw=1.5, zorder=10)
scene_ax.plot([0.4, 0.5], [0.35, 0.35], color='red', lw=1.5, zorder=0)
scene_ax.plot([1.2, 1.75], [0.35, 0.35], color='red', lw=1.5, zorder=0)
scene_ax.add_patch(patches.Circle((1.8, 0.35), 0.05, fill=False, lw=1.5, ec='blue', zorder=10))
scene_ax.add_patch(patches.Rectangle((2, -0.55), 0.25, 0.7, fill=False, lw=1.5, ec='blue', zorder=10))
scene_ax.plot([1.85, 2.125, 2.125], [0.35, 0.35, 0.15], color='red', lw=1.5, zorder=0)
scene_ax.plot([2.125, 2.125], [-0.55, -0.75], color='red', lw=1.5, zorder=0)
scene_ax.plot([2, 2.25], [-0.75, -0.75], color='blue', lw=1.5, zorder=10)
scene_ax.plot([2.05, 2.2], [-0.825, -0.825], color='blue', lw=1.5, zorder=10)
scene_ax.plot([2.1, 2.15], [-0.9, -0.9], color='blue', lw=1.5, zorder=10)
scene_ax.text(0.85, 0.6, '$R_{out}$', fontsize=15, color='black', va='center_baseline', ha='center', zorder=30)
scene_ax.text(1.9, -0.05, '$R_{load}$', fontsize=15, color='black', va='center_baseline', ha='center',
rotation=90, zorder=30)
scene_ax.text(-0.3, 0.85, '$R_{in}$', fontsize=15, color='black', va='center_baseline', ha='center', zorder=30)
scene_ax.text(0.2, 0.375, '$A\\dot{}V_{in}$', fontsize=15, color='black', va='center_baseline', ha='center', zorder=30)
Rin_text = scene_ax.text(-0.4, 0.35, '$R_{in}$', fontsize=11, color='black', va='center_baseline', ha='center',
rotation=90, rotation_mode='anchor', zorder=30)
Rout_text = scene_ax.text(0.85, 0.35, '$R_{out}$', fontsize=11, color='black', va='center_baseline', ha='center', zorder=30)
Rload_text = scene_ax.text(2.2, -0.2, '$R_{load}$', fontsize=11, color='black', va='center_baseline', ha='center',
rotation=90, rotation_mode='anchor', zorder=30)
# System parameters
def real_model(rg, rf, a, rin, rout, rload):
global Rin_text, Rout_text, Rload_text
Rin_text.set_text('${0:.3g}\/k\Omega$'.format(rin))
Rout_text.set_text('${0:.3g}\/k\Omega$'.format(rout))
Rload_text.set_text('${0:.3g}\/k\Omega$'.format(rload))
Rg = rg / 1000 # Convert to Ohm
Rf = rf / 1000
Rin = rin / 1000
Rout = rout / 1000
Rload = rload / 1000
G_ideal = -Rf / Rg # Ideal closed-loop gain
mu = (1 + Rout/Rf + Rout/Rload) * (1 + Rf/Rg + Rf/Rin) / (a - Rout/Rf)
G_real = (-Rf / Rg) / (1 + mu) # Realistic closed-loop gain
print('Real closed-loop gain:')
print('{0:.4g}'.format(G_real))
print('\nDifference from ideal:')
print('{0:.4%}'.format((G_real-G_ideal)/G_ideal))
rin_slider = w.FloatLogSlider(value=1, base=10, min=-3, max=3, description=r'$R_{in}\ [k\Omega]\ :$', continuous_update=False,
layout=w.Layout(width='75%'), style={'description_width':'30%'})
rout_slider = w.FloatLogSlider(value=1, base=10, min=-3, max=3, description=r'$R_{out}\ [k\Omega]\ :$', continuous_update=False,
layout=w.Layout(width='75%'), style={'description_width':'30%'})
rload_slider = w.FloatLogSlider(value=1, base=10, min=-3, max=3, description=r'$R_{load}\ [k\Omega]\ :$', continuous_update=False,
layout=w.Layout(width='75%'), style={'description_width':'30%'})
input_data = w.interactive_output(real_model, {'rg':rg_slider, 'rf':rf_slider, 'a':opampGain,
'rin':rin_slider, 'rout':rout_slider, 'rload':rload_slider})
display(w.HBox([rin_slider, rout_slider, rload_slider]), input_data)
| ICCT_en/examples/03/FD-16_OpAmp_P_Controller.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pymc3 as pm
import langevin
from scipy.stats import pearsonr
from scipy.optimize import minimize_scalar
import theano.tensor as tt
SMALL_SIZE = 16
MEDIUM_SIZE = 18
BIGGER_SIZE = 20
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
SEED = 35010732 # from random.org
np.random.seed(SEED)
print(plt.style.available)
plt.style.use('seaborn-white')
# +
# function to calculate A and B from the dataset
def OUanalytic1(data):
N = data.size
data1sq = data[0]**2
dataNsq = data[-1]**2
datasq = np.sum(data[1:-1]**2)
datacorr = np.sum(data[0:-1]*data[1:])
coef = [(N-1)*datasq,
(2.0-N)*datacorr,
-data1sq-(N+1)*datasq-dataNsq,
N*datacorr]
B=np.roots(coef)[-1]
Q=(data1sq+dataNsq)/(1-B**2)
Q=Q+datasq*(1+B**2)/(1-B**2)
Q=Q-datacorr*2*B/(1-B**2)
A = Q/N
P2A = -N/2/A**2
Btmp = (N-1)*(1+B**2)/(1-B**2)**2
tmp = (2+6*B**2)*(data1sq+dataNsq) + (4+12*B**2)*datasq - (12*B+4*B**3)*datacorr
P2B = Btmp - tmp/A/2/(1-B**2)**3
PAB = (N-1)*B/A/(1-B**2)
dA = np.sqrt(-P2B/(P2A*P2B-PAB**2))
dB = np.sqrt(-P2A/(P2A*P2B-PAB**2))
return A,dA,B,dB
def OUresult1(data,deltat):
A, dA, B ,dB = OUanalytic1(data)
tau = -deltat/np.log(B)
dtau = deltat*dB/B/np.log(B)**2
return A,dA,tau,dtau
# +
# function to calculate A and B from the dataset
def OUanalytic2(data):
N = data.size
data1sq = data[0]**2
dataNsq = data[-1]**2
datasq = np.sum(data[1:-1]**2)
datacorr = np.sum(data[0:-1]*data[1:])
coef = [(N-1)*datasq,
(2.0-N)*datacorr,
-data1sq-(N+1)*datasq-dataNsq,
N*datacorr]
B=np.roots(coef)[-1]
Q=(data1sq+dataNsq)/(1-B**2)
Q=Q+datasq*(1+B**2)/(1-B**2)
Q=Q-datacorr*2*B/(1-B**2)
A = Q/N
P2A = -N/A**2/2
Btmp = B**2*(1+2*N)
tmp = (1+Btmp)*(data1sq+dataNsq) + (2*Btmp + N + 1 -B**4*(N-1))*datasq - 2*B*(1+B**2+2*N)*datacorr
P2B = -tmp/((1-B**2)**2*(data1sq+dataNsq + (1+B**2)*datasq - 2*B*datacorr))
PAB = (N-1)*B/A/(1-B**2)
dA = np.sqrt(-P2B/(P2A*P2B-PAB**2))
dB = np.sqrt(-P2A/(P2A*P2B-PAB**2))
return A,dA,B,dB
def OUresult2(data,deltat):
A, dA, B ,dB = OUanalytic2(data)
tau = -deltat/np.log(B)
dtau = deltat*dB/B/np.log(B)**2
return A,dA,tau,dtau
# -
class Ornstein_Uhlenbeck(pm.Continuous):
"""
Ornstein-Uhlenbeck Process
Parameters
----------
B : tensor
B > 0, B = exp(-(D/A)*delta_t)
A : tensor
A > 0, amplitude of fluctuation <x**2>=A
delta_t: scalar
delta_t > 0, time step
"""
def __init__(self, A=None, B=None,
*args, **kwargs):
super(Ornstein_Uhlenbeck, self).__init__(*args, **kwargs)
self.A = A
self.B = B
self.mean = 0.
def logp(self, x):
A = self.A
B = self.B
x_im1 = x[:-1]
x_i = x[1:]
ou_like = pm.Normal.dist(mu=x_im1*B, tau=1.0/A/(1-B**2)).logp(x_i)
return pm.Normal.dist(mu=0.0,tau=1.0/A).logp(x[0]) + tt.sum(ou_like)
A,D = 1.0,1.0
sN = 0.5
delta_t = 0.01
#M=1000 # number of data sets
N=2000 # length of data set
#print(np.exp(-delta_t*D/A))
data = langevin.time_series(A=A, D=D, delta_t=delta_t, N=N)
dataN = data + np.random.normal(loc=0.0, scale=sN, size=N)
plt.plot(dataN)
plt.plot(data)
# %%timeit
a_bound=20
# compile model for reuse
with pm.Model() as model:
B = pm.Beta('B', alpha=5.0,beta=1.0)
A = pm.Uniform('A', lower=0, upper=a_bound)
path = Ornstein_Uhlenbeck('path',A=A, B=B, observed=data)
trace = pm.sample(2000)
pm.summary(trace)
pm.traceplot(trace)
# %%timeit
a_bound=20
# compile model for reuse
with pm.Model() as model:
B = pm.Beta('B', alpha=5.0,beta=1.0)
A = pm.Uniform('A', lower=0, upper=a_bound)
sigma = pm.Uniform('sigma',lower=0,upper=5)
path = Ornstein_Uhlenbeck('path',A=A, B=B,shape=len(dataN))
dataObs = pm.Normal('dataObs',mu=path,sigma=sigma,observed=dataN)
trace = pm.sample(2000)
pm.summary(trace)
pm.traceplot(trace)
avgpath = np.mean(trace['path'],axis=0)
stdpath = np.std(trace['path'],axis=0)
print(avgpath.shape)
plt.plot(data-avgpath)
print(np.std(data-avgpath))
plt.plot(dataN-data)
print(np.std(dataN-data))
plt.plot(stdpath)
print(stdpath.mean())
AA,DD = 1.0,1.0
sN = 0.5
total_noise = np.sqrt(AA+sN**2)
print("total noise: ",total_noise)
delta_t_list=np.linspace(0.01,4,50)
#M=1000 # number of data sets
N=2000 # length of data set
#print(np.exp(-delta_t*D/A))
result_array = None
for delta_t in delta_t_list:
print(delta_t)
data = langevin.time_series(A=AA, D=DD, delta_t=delta_t, N=N)
dataN = data + np.random.normal(loc=0.0, scale=sN, size=N)
with pm.Model() as model:
B = pm.Beta('B', alpha=5.0,beta=1.0)
A = pm.Uniform('A', lower=0, upper=a_bound)
sigma = pm.Uniform('sigma',lower=0,upper=5)
path = Ornstein_Uhlenbeck('path',A=A, B=B,shape=len(dataN))
dataObs = pm.Normal('dataObs',mu=path,sigma=sigma,observed=dataN)
trace = pm.sample(2000)
a_mean = trace['A'].mean()
b_mean = trace['B'].mean()
a_std = trace['A'].std()
b_std = trace['B'].std()
sigma_mean = trace['sigma'].mean()
sigma_std = trace['sigma'].std()
avgpath = np.mean(trace['path'],axis=0)
stddiff = np.std(data-avgpath)
stdpath = np.std(trace['path'],axis=0).mean()
results = [a_mean,a_std,b_mean,b_std,sigma_mean,sigma_std,stddiff,stdpath]
if result_array is None:
result_array = results
else:
result_array = np.vstack((result_array, results))
tau = -delta_t_list/np.log(result_array.T[2])
dtau = delta_t_list*result_array.T[3]/result_array.T[2]/np.log(result_array.T[2])**2
plt.plot(delta_t_list,result_array.T[6],"o")
plt.xlabel(r'$\Delta t/\tau$')
plt.ylabel(r'$\sigma_{GT-model}$')
plt.errorbar(delta_t_list,result_array.T[0],yerr=result_array.T[1],fmt="o",label="A")
plt.errorbar(delta_t_list,tau,dtau,fmt="o",label=r'$\tau$')
plt.legend(loc="upper left")
plt.errorbar(delta_t_list,result_array.T[4],yerr=result_array.T[5],fmt="o")
plt.xlabel(r'$\Delta t/\tau$')
plt.ylabel(r'$\sigma_{noise}$')
| Simulating datasets MCMC noise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # mzcn
# 中文版本的matchzoo-py
# 本库包是基于matchzoo-py的库包做的二次开发开源项目,MatchZoo 是一个通用的文本匹配工具包,它旨在方便大家快速的实现、比较、以及分享最新的深度文本匹配模型。
# <br>
# 由于matchzoo-py面向英文预处理较为容易,中文处理则需要进行一定的预处理。为此本人在借鉴学习他人成功的基础上,改进了matchzoo-py包,开发mzcn库包。
# <br>
# mzcn库包对中文文本语料进行只保留文本、去除表情、去除空格、去除停用词等操作,使得使用者可以快速进行中文文本语料进行预处理,使用方法和matchzoo-py基本一致。
# # 快速入手
# ## 定义损失函数和指标
import torch
import numpy as np
import pandas as pd
import mzcn as mz
print('matchzoo version', mz.__version__)
ranking_task = mz.tasks.Ranking(losses=mz.losses.RankHingeLoss())
ranking_task.metrics = [
mz.metrics.NormalizedDiscountedCumulativeGain(k=3),
mz.metrics.NormalizedDiscountedCumulativeGain(k=5),
mz.metrics.MeanAveragePrecision()
]
print("`ranking_task` initialized with metrics", ranking_task.metrics)
# ## 配置停用表
# +
import os
folder = str(mz.__path__[0])+'\\preprocessors\\units\\stopwords.txt'
#获取此py文件路径,在此路径选创建在new_folder文件夹中的test文件夹
if not os.path.exists(folder):
print('请将stopwords.txt放置到'+folder+'下')
else:
print('停用表已经配置好')
# -
folder
# ## 准备输入数据
def load_data(tmp_data,tmp_task):
df_data = mz.pack(tmp_data,task=tmp_task)
return df_data
##数据集,并且进行相应的预处理
train=pd.read_csv('./data/train_data.csv').sample(100)
dev=pd.read_csv('./data/dev_data.csv').sample(50)
test=pd.read_csv('./data/test_data.csv').sample(30)
train_pack_raw = load_data(train,ranking_task)
dev_pack_raw = load_data(dev,ranking_task)
test_pack_raw=load_data(test,ranking_task)
# ## 数据集预处理
preprocessor = mz.models.aNMM.get_default_preprocessor()
train_pack_processed = preprocessor.fit_transform(train_pack_raw)
dev_pack_processed = preprocessor.transform(dev_pack_raw)
test_pack_processed = preprocessor.transform(test_pack_raw)
# ## 生成训练数据
trainset = mz.dataloader.Dataset(
data_pack=train_pack_processed,
mode='pair',
num_dup=2,
num_neg=1
)
devset = mz.dataloader.Dataset(
data_pack=dev_pack_processed
)
# ## 生成管道
# +
padding_callback = mz.models.aNMM.get_default_padding_callback()
trainloader = mz.dataloader.DataLoader(
dataset=trainset,
stage='train',
callback=padding_callback,
)
devloader = mz.dataloader.DataLoader(
dataset=devset,
stage='dev',
callback=padding_callback,
)
# -
# ## 定义模型
model = mz.models.aNMM()
model.params['task'] = ranking_task
model.params["embedding_output_dim"]=100
model.params["embedding_input_dim"]=preprocessor.context["embedding_input_dim"]
model.params['dropout_rate'] = 0.1
model.build()
print(model)
# ## 模型训练
# +
optimizer = torch.optim.Adam(model.parameters(), lr = 3e-4)
trainer = mz.trainers.Trainer(
model=model,
optimizer=optimizer,
trainloader=trainloader,
validloader=devloader,
validate_interval=None,
epochs=10
)
trainer.run()
# -
# # Install
# 由于mzcn是依赖于matchzoo-py模型,所以一共有两种途径安装mzcn
# ### Install MatchZoo-py from Pypi:
# pip install mzcn
# ### Install MatchZoo-py from the Github source:
# git clone https://github.com/yingdajun/mzcn.git
# <br>
# # cd mzcn
# <br>
# python setup.py install
# # Citation
# 本人是第一次写库包,水平有限,希望能给大家带来使用的帮助,如果有不足的地方请多指教
# 这里是所有引用过的库包
# ## matchzoo-py
# @inproceedings{Guo:2019:MLP:3331184.3331403,
# author = {<NAME> and <NAME> and <NAME> and <NAME>},
# title = {MatchZoo: A Learning, Practicing, and Developing System for Neural Text Matching},
# booktitle = {Proceedings of the 42Nd International ACM SIGIR Conference on Research and Development in Information Retrieval},
# series = {SIGIR'19},
# year = {2019},
# isbn = {978-1-4503-6172-9},
# location = {Paris, France},
# pages = {1297--1300},
# numpages = {4},
# url = {http://doi.acm.org/10.1145/3331184.3331403},
# doi = {10.1145/3331184.3331403},
# acmid = {3331403},
# publisher = {ACM},
# address = {New York, NY, USA},
# keywords = {matchzoo, neural network, text matching},
# }
# ## CSDN的作者:SK-Berry的博文
# https://blog.csdn.net/sk_berry/article/details/104984599
| test/ranking/README.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.5 64-bit
# name: python37564bit6c0c4de1db534a08968024907c735ed7
# ---
import os
hostname = os.popen("hostname").read().split("\n")[0]
if(hostname != "reckoner1429-Predator-PH315-52"):
from google.colab import drive
from google.colab import drive
drive.mount('/content/gdrive')
# ! chmod 755 "/content/gdrive/My Drive/collab-var.sh"
! "/content/gdrive/My Drive/collab-var.sh"
# %cd "/content/gdrive/My Drive/github/video-emotion-recognition"
import utils.preprocess_util as preprocess_util
# + tags=["outputPrepend"]
dataset = preprocess_util.RML()
dataset.process_video()
# -
| preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Programming Exercise 5 - Regularized Linear Regression and Bias v.s. Variance
# +
# # %load ../../../standard_import.txt
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.io import loadmat
from scipy.optimize import minimize
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.preprocessing import PolynomialFeatures
pd.set_option('display.notebook_repr_html', False)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 150)
pd.set_option('display.max_seq_items', None)
# #%config InlineBackend.figure_formats = {'pdf',}
# %matplotlib inline
import seaborn as sns
sns.set_context('notebook')
sns.set_style('white')
# -
data = loadmat('data/ex5data1.mat')
data.keys()
# +
y_train = data['y']
X_train = np.c_[np.ones_like(data['X']), data['X']]
yval = data['yval']
Xval = np.c_[np.ones_like(data['Xval']), data['Xval']]
print('X_train:', X_train.shape)
print('y_train:', y_train.shape)
print('Xval:', Xval.shape)
print('yval:', yval.shape)
# -
# ### Regularized Linear Regression
plt.scatter(X_train[:,1], y_train, s=50, c='r', marker='x', linewidths=1)
plt.xlabel('Change in water level (x)')
plt.ylabel('Water flowing out of the dam (y)')
plt.ylim(ymin=0);
# #### Regularized Cost function
def linearRegCostFunction(theta, X, y, reg):
m = y.size
h = X.dot(theta)
J = (1/(2*m))*np.sum(np.square(h-y)) + (reg/(2*m))*np.sum(np.square(theta[1:]))
return(J)
# #### Gradient
def lrgradientReg(theta, X, y, reg):
m = y.size
h = X.dot(theta.reshape(-1,1))
grad = (1/m)*(X.T.dot(h-y))+ (reg/m)*np.r_[[[0]],theta[1:].reshape(-1,1)]
return(grad.flatten())
initial_theta = np.ones((X_train.shape[1],1))
cost = linearRegCostFunction(initial_theta, X_train, y_train, 0)
gradient = lrgradientReg(initial_theta, X_train, y_train, 0)
print(cost)
print(gradient)
def trainLinearReg(X, y, reg):
#initial_theta = np.zeros((X.shape[1],1))
initial_theta = np.array([[15],[15]])
# For some reason the minimize() function does not converge when using
# zeros as initial theta.
res = minimize(linearRegCostFunction, initial_theta, args=(X,y,reg), method=None, jac=lrgradientReg,
options={'maxiter':5000})
return(res)
fit = trainLinearReg(X_train, y_train, 0)
fit
# #### Comparison: coefficients and cost obtained with LinearRegression in Scikit-learn
regr = LinearRegression(fit_intercept=False)
regr.fit(X_train, y_train.ravel())
print(regr.coef_)
print(linearRegCostFunction(regr.coef_, X_train, y_train, 0))
plt.plot(np.linspace(-50,40), (fit.x[0]+ (fit.x[1]*np.linspace(-50,40))), label='Scipy optimize')
#plt.plot(np.linspace(-50,40), (regr.coef_[0]+ (regr.coef_[1]*np.linspace(-50,40))), label='Scikit-learn')
plt.scatter(X_train[:,1], y_train, s=50, c='r', marker='x', linewidths=1)
plt.xlabel('Change in water level (x)')
plt.ylabel('Water flowing out of the dam (y)')
plt.ylim(ymin=-5)
plt.xlim(xmin=-50)
plt.legend(loc=4);
def learningCurve(X, y, Xval, yval, reg):
m = y.size
error_train = np.zeros((m, 1))
error_val = np.zeros((m, 1))
for i in np.arange(m):
res = trainLinearReg(X[:i+1], y[:i+1], reg)
error_train[i] = linearRegCostFunction(res.x, X[:i+1], y[:i+1], reg)
error_val[i] = linearRegCostFunction(res.x, Xval, yval, reg)
return(error_train, error_val)
t_error, v_error = learningCurve(X_train, y_train, Xval, yval, 0)
plt.plot(np.arange(1,13), t_error, label='Training error')
plt.plot(np.arange(1,13), v_error, label='Validation error')
plt.title('Learning curve for linear regression')
plt.xlabel('Number of training examples')
plt.ylabel('Error')
plt.legend();
# ### Polynomial regression (Scikit-learn)
# +
poly = PolynomialFeatures(degree=8)
X_train_poly = poly.fit_transform(X_train[:,1].reshape(-1,1))
regr2 = LinearRegression()
regr2.fit(X_train_poly, y_train)
regr3 = Ridge(alpha=20)
regr3.fit(X_train_poly, y_train)
# plot range for x
plot_x = np.linspace(-60,45)
# using coefficients to calculate y
plot_y = regr2.intercept_+ np.sum(regr2.coef_*poly.fit_transform(plot_x.reshape(-1,1)), axis=1)
plot_y2 = regr3.intercept_ + np.sum(regr3.coef_*poly.fit_transform(plot_x.reshape(-1,1)), axis=1)
plt.plot(plot_x, plot_y, label='Scikit-learn LinearRegression')
plt.plot(plot_x, plot_y2, label='Scikit-learn Ridge (alpha={})'.format(regr3.alpha))
plt.scatter(X_train[:,1], y_train, s=50, c='r', marker='x', linewidths=1)
plt.xlabel('Change in water level (x)')
plt.ylabel('Water flowing out of the dam (y)')
plt.title('Polynomial regression degree 8')
plt.legend(loc=4);
| notebooks/.ipynb_checkpoints/Programming Exercise 5 - Regularized Linear Regression and Bias v.s. Variance-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
import datetime
print(datetime.datetime.now())
from pygentoolbox import TrimFastaToSeqLength
#dir(pygentoolbox.Tools)
# %matplotlib inline
import matplotlib.pyplot as plt
# +
seqlength = 27
fastafile = '/media/sf_LinuxShare/Projects/Lyna/CharSeqPipe/EV_E/pear/MacAndIESDNABuffer2000RNABuffer15/ContactsInMDSs/WT_E_L1_R1R2.trim.AssUnFUnR.RNAs.ContactsNearIES.dnabuffer0.rnabuffer0.Short.fa'
outfile = fastafile[:-len('.fa')] + f'.{seqlength}bp.fa'
#outfile = '/media/sf_LinuxShare/Projects/Lyna/KMC/scnRNA/internal_eliminated_sequence_PGM_ParTIES.pt_51_with_ies.trim.DCL2_3.0.1.up.25bp.fa'
TrimFastaToSeqLength.main(fastafile, outfile, seqlength)
seqlength = 27
fastafile = '/media/sf_LinuxShare/Projects/Lyna/CharSeqPipe/EV_E/pear/MacAndIESDNABuffer2000RNABuffer15/ContactsInIESs/WT_E_L1_R1R2.trim.AssUnFUnR.RNAs.ContactsNearIES.dnabuffer2000.rnabuffer15.Short.fa'
outfile = fastafile[:-len('.fa')] + f'.{seqlength}bp.fa'
#outfile = '/media/sf_LinuxShare/Projects/Lyna/KMC/scnRNA/internal_eliminated_sequence_PGM_ParTIES.pt_51_with_ies.trim.DCL2_3.0.1.up.25bp.fa'
TrimFastaToSeqLength.main(fastafile, outfile, seqlength)
seqlength = 27
fastafile = '/media/sf_LinuxShare/Projects/Lyna/CharSeqPipe/EV_L/pear/MacAndIESDNABuffer2000RNABuffer15/ContactsInMDSs/WT_L_L1_R1R2.trim.AssUnFUnR.RNAs.ContactsNearIES.dnabuffer0.rnabuffer0.Short.fa'
outfile = fastafile[:-len('.fa')] + f'.{seqlength}bp.fa'
#outfile = '/media/sf_LinuxShare/Projects/Lyna/KMC/scnRNA/internal_eliminated_sequence_PGM_ParTIES.pt_51_with_ies.trim.DCL2_3.0.1.up.25bp.fa'
TrimFastaToSeqLength.main(fastafile, outfile, seqlength)
seqlength = 27
fastafile = '/media/sf_LinuxShare/Projects/Lyna/CharSeqPipe/EV_L/pear/MacAndIESDNABuffer2000RNABuffer15/ContactsInIESs/WT_L_L1_R1R2.trim.AssUnFUnR.RNAs.ContactsNearIES.dnabuffer2000.rnabuffer15.Short.fa'
outfile = fastafile[:-len('.fa')] + f'.{seqlength}bp.fa'
#outfile = '/media/sf_LinuxShare/Projects/Lyna/KMC/scnRNA/internal_eliminated_sequence_PGM_ParTIES.pt_51_with_ies.trim.DCL2_3.0.1.up.25bp.fa'
TrimFastaToSeqLength.main(fastafile, outfile, seqlength)
seqlength = 27
fastafile = '/media/sf_LinuxShare/Projects/Lyna/CharSeqPipe/PGM_L/pear/MacAndIESDNABuffer2000RNABuffer15/ContactsInMDSs/PGM_L_S2_L002_R1_R1R2.trim.AssUnFUnR.RNAs.ContactsNearIES.dnabuffer0.rnabuffer0.Short.fa'
outfile = fastafile[:-len('.fa')] + f'.{seqlength}bp.fa'
#outfile = '/media/sf_LinuxShare/Projects/Lyna/KMC/scnRNA/internal_eliminated_sequence_PGM_ParTIES.pt_51_with_ies.trim.DCL2_3.0.1.up.25bp.fa'
TrimFastaToSeqLength.main(fastafile, outfile, seqlength)
seqlength = 27
fastafile = '/media/sf_LinuxShare/Projects/Lyna/CharSeqPipe/PGM_L/pear/MacAndIESDNABuffer2000RNABuffer15/ContactsInIESs/PGM_L_S2_L002_R1_R1R2.trim.AssUnFUnR.RNAs.ContactsNearIES.dnabuffer2000.rnabuffer15.Short.fa'
outfile = fastafile[:-len('.fa')] + f'.{seqlength}bp.fa'
#outfile = '/media/sf_LinuxShare/Projects/Lyna/KMC/scnRNA/internal_eliminated_sequence_PGM_ParTIES.pt_51_with_ies.trim.DCL2_3.0.1.up.25bp.fa'
TrimFastaToSeqLength.main(fastafile, outfile, seqlength)
seqlength = 27
fastafile = '/media/sf_LinuxShare/Projects/Lyna/CharSeqPipe/Nowa1_E/pear/MacAndIESDNABuffer2000RNABuffer15/ContactsInMDSs/NOWA_E_S1_L002_R1_R1R2.trim.AssUnFUnR.RNAs.ContactsNearIES.dnabuffer0.rnabuffer0.Short.fa'
outfile = fastafile[:-len('.fa')] + f'.{seqlength}bp.fa'
#outfile = '/media/sf_LinuxShare/Projects/Lyna/KMC/scnRNA/internal_eliminated_sequence_PGM_ParTIES.pt_51_with_ies.trim.DCL2_3.0.1.up.25bp.fa'
TrimFastaToSeqLength.main(fastafile, outfile, seqlength)
seqlength = 27
fastafile = '/media/sf_LinuxShare/Projects/Lyna/CharSeqPipe/Nowa1_E/pear/MacAndIESDNABuffer2000RNABuffer15/ContactsInIESs/NOWA_E_S1_L002_R1_R1R2.trim.AssUnFUnR.RNAs.ContactsNearIES.dnabuffer2000.rnabuffer15.Short.fa'
outfile = fastafile[:-len('.fa')] + f'.{seqlength}bp.fa'
#outfile = '/media/sf_LinuxShare/Projects/Lyna/KMC/scnRNA/internal_eliminated_sequence_PGM_ParTIES.pt_51_with_ies.trim.DCL2_3.0.1.up.25bp.fa'
TrimFastaToSeqLength.main(fastafile, outfile, seqlength)
# -
| notebooks/Lyna/TrimFastaToSeqLength_27bp_iesRNAContacts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Scrapping Market Info in Argentina using Selenium and lxml
# +
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
import requests
import re
import pandas as pd
import datetime
import investpy
import numpy as np
import lxml
#For Options
def options_chain():
#Data From Allaria Ledesma
page = requests.get('https://www.allaria.com.ar/es/Opcion#0')
doc = lxml.html.fromstring(page.content)
stock=[]
ticker=[]
opt_type=[]
exp_date=[]
strike=[]
price=[]
bid_q=[]
bid_p=[]
ask_p=[]
ask_q=[]
volume_q=[]
volume_money=[]
try:
for t in range(1,1000):
if t%2!=0:
#Paths
stock_path='//*[@id="tableOpcionesAcciones"]/tbody/tr['+str(t)+']/td[1]/text()'
ticker_path='//*[@id="tableOpcionesAcciones"]/tbody/tr['+str(t)+']/td[2]/a/strong/text()'
type_path='//*[@id="tableOpcionesAcciones"]/tbody/tr['+str(t)+']/td[3]/text()'
exp_path='//*[@id="tableOpcionesAcciones"]/tbody/tr['+str(t)+']/td[4]/text()'
strike_path='//*[@id="tableOpcionesAcciones"]/tbody/tr['+str(t)+']/td[5]/text()'
price_path='//*[@id="tableOpcionesAcciones"]/tbody/tr['+str(t)+']/td[6]/strong/text()'
bidq_path='//*[@id="tableOpcionesAcciones"]/tbody/tr['+str(t)+']/td[8]/text()'
bidp_path='//*[@id="tableOpcionesAcciones"]/tbody/tr['+str(t)+']/td[9]/text()'
askp_path='//*[@id="tableOpcionesAcciones"]/tbody/tr['+str(t)+']/td[10]/text()'
askq_path='//*[@id="tableOpcionesAcciones"]/tbody/tr['+str(t)+']/td[11]/text()'
volumeq_path='//*[@id="tableOpcionesAcciones"]/tbody/tr['+str(t)+']/td[12]/text()'
volmoney_path='//*[@id="tableOpcionesAcciones"]/tbody/tr['+str(t)+']/td[13]/text()'
#stock
stock.append(re.sub(r"[,. ]","",doc.xpath(stock_path)[0]))
#ticker
ticker.append(doc.xpath(ticker_path)[0])
#Option type
opt_type.append(re.sub(r" ","",doc.xpath(type_path)[0]))
#Exp date
exp_date.append(re.sub(r" ","",doc.xpath(exp_path)[0]))
#strike
strike.append(float(re.sub(r"[\r\n ]","",doc.xpath(strike_path)[0])))
#price
price.append(float(re.sub(r"[,. ]","",doc.xpath(price_path)[0]))/100)
#Bid Q
bid_q.append(float(re.sub(r"[.,]","",doc.xpath(bidq_path)[0]))/100)
#Bid Price
bid_p.append(float(re.sub(r"[.,]","",doc.xpath(bidp_path)[0]))/100)
#Ask Price
ask_p.append(float(re.sub(r"[.,]","",doc.xpath(askp_path)[0]))/100)
#Ask Q
ask_q.append(float(re.sub(r"[.,]","",doc.xpath(askq_path)[0]))/100)
#Volume Q
if re.sub(r"[\r\n., ]","",doc.xpath(volumeq_path)[0])!='':
volume_q.append(float(re.sub(r"[\r\n., ]","",doc.xpath(volumeq_path)[0]))/100)
else:
volume_q.append(0)
#Volume Money
if re.sub(r"[\r\n., ]","",doc.xpath(volmoney_path)[0])!='':
volume_money.append(float(re.sub(r"[\r\n., ]","",doc.xpath(volmoney_path)[0]))/100)
else:
volume_money.append(0)
except:
pass
options_chain=pd.DataFrame({'Ticker':ticker,'Stock':stock,'Type':opt_type,'Expiration':exp_date,'Strike':strike,
'Last price':price,'Bid Q':bid_q,'Bid price':bid_p,'Ask price':ask_p,'Ask Q':ask_q,
'Volume(Q)':volume_q,'Volume($)':volume_money})
return options_chain
# -
#For local Stocks and Cedears historical data
def get_historical(stock,start,end=datetime.datetime.strftime(datetime.datetime.today(),'%d/%m/%Y'),frecuency='Daily'):
df = investpy.get_stock_historical_data(stock=stock,
country='argentina',
from_date=start,
to_date=end,interval=frecuency)
return df
#For Interest Rates
def interest_rates():
option = Options()
option.add_argument('headless')
browser=webdriver.Chrome('/Users/federicoglancszpigel/Desktop/chromedriver',options=option)
web_page='https://www.cronista.com/MercadosOnline/tasas.html'
while browser.current_url!=web_page:
try:
browser.get(web_page)
except:
pass
if browser.current_url==web_page:
try:
rate_name=[]
rate_value=[]
for t in range(2,5):
name_path='//*[@id="mercados-list"]/div['+str(t)+']/ul/li[1]'
value_path='//*[@id="mercados-list"]/div['+str(t)+']/ul/li[3]'
rate_name.append(browser.find_element_by_xpath(name_path).text)
rate_value.append(float(re.sub(r"[.,]","",browser.find_element_by_xpath(value_path).text))/100)
except:
pass
browser.close()
rates=pd.DataFrame({'Rate name':rate_name,'Rate value':rate_value})
return rates
#CEDEAR's Chain
def cedears_chain():
option = Options()
option.add_argument('headless')
browser=webdriver.Chrome('/Users/federicoglancszpigel/Desktop/chromedriver',options=option)
web_page='https://www.invertironline.com/mercado/cotizaciones/argentina/acciones/cedears'
while browser.current_url!=web_page:
try:
browser.get(web_page)
except:
pass
if browser.current_url==web_page:
browser.find_element_by_xpath('//*[@id="cotizaciones_length"]/label/select/option[4]').click()
company=[]
ticker=[]
price=[]
variation=[]
p_open=[]
p_low=[]
p_high=[]
volume_money=[]
try:
for t in range(1,1000):
ticker_path=browser.find_element_by_xpath('//*[@id="cotizaciones"]/tbody/tr['+str(t)+']/td[1]/a/b').text
company_path=browser.find_element_by_xpath('//*[@id="cotizaciones"]/tbody/tr['+str(t)+']/td[1]/a/span').text
price_path=browser.find_element_by_xpath('//*[@id="cotizaciones"]/tbody/tr['+str(t)+']/td[2]').text
variation_path=browser.find_element_by_xpath('//*[@id="cotizaciones"]/tbody/tr['+str(t)+']/td[3]').text
open_path=browser.find_element_by_xpath('//*[@id="cotizaciones"]/tbody/tr['+str(t)+']/td[8]').text
low_path=browser.find_element_by_xpath('//*[@id="cotizaciones"]/tbody/tr['+str(t)+']/td[9]').text
high_path=browser.find_element_by_xpath('//*[@id="cotizaciones"]/tbody/tr['+str(t)+']/td[10]').text
volmoney_path=browser.find_element_by_xpath('//*[@id="cotizaciones"]/tbody/tr['+str(t)+']/td[12]').text
ticker.append(ticker_path)
company.append(company_path)
price.append(float(re.sub(r"[,.]","",price_path))/100)
variation.append(variation_path)
p_open.append(float(re.sub(r"[,.]","",open_path))/100)
p_low.append(float(re.sub(r"[,.]","",low_path))/100)
p_high.append(float(re.sub(r"[,.]","",high_path))/100)
volume_money.append(float(re.sub(r"[,.]","",volmoney_path))/100)
except:
pass
cedears_chain=pd.DataFrame({'Ticker':ticker,'Company':company,'Last Price':price,
'Change':variation,'Open':p_open,'High':p_high,
'Low':p_low,'Volume($)':volume_money})
while browser.current_url!='http://www.sinelefantesblancos.com.ar/inversiones/CEDEAR.php':
try:
browser.get('http://www.sinelefantesblancos.com.ar/inversiones/CEDEAR.php')
except:
pass
if browser.current_url=='http://www.sinelefantesblancos.com.ar/inversiones/CEDEAR.php':
cedear=[]
conversion_rate=[]
dividend_freq=[]
try:
for t in range(2,1000):
ticker_path=browser.find_element_by_xpath('/html/body/table[2]/tbody/tr['+str(t)+']/td[3]').text
conversion_path=browser.find_element_by_xpath('/html/body/table[2]/tbody/tr['+str(t)+']/td[7]').text
dividend_path=browser.find_element_by_xpath('/html/body/table[2]/tbody/tr['+str(t)+']/td[8]').text
cedear.append(ticker_path)
conversion_rate.append(conversion_path)
dividend_freq.append(dividend_path)
except:
pass
browser.close()
more_info=pd.DataFrame({'Ticker':cedear,'Conv.Rate':conversion_rate,'Dividend Freq':dividend_freq})
df=pd.merge(cedears_chain,more_info,on='Ticker')
return df
#Local Stocks Chain
def stocks_chain():
option = Options()
option.add_argument('headless')
browser=webdriver.Chrome('/Users/federicoglancszpigel/Desktop/chromedriver',options=option)
web_page='https://www.invertironline.com/mercado/cotizaciones/argentina/acciones/panel-general'
while browser.current_url!=web_page:
try:
browser.get(web_page)
except:
pass
if browser.current_url==web_page:
browser.find_element_by_xpath('//*[@id="cotizaciones_length"]/label/select/option[4]').click()
company=[]
ticker=[]
price=[]
variation=[]
p_open=[]
p_low=[]
p_high=[]
volume_money=[]
volume_q=[]
try:
for t in range(1,200):
ticker_path=browser.find_element_by_xpath('//*[@id="cotizaciones"]/tbody/tr['+str(t)+']/td[1]/a/b').text
company_path=browser.find_element_by_xpath('//*[@id="cotizaciones"]/tbody/tr['+str(t)+']/td[1]/a/span').text
price_path=browser.find_element_by_xpath('//*[@id="cotizaciones"]/tbody/tr['+str(t)+']/td[2]').text
variation_path=browser.find_element_by_xpath('//*[@id="cotizaciones"]/tbody/tr['+str(t)+']/td[3]').text
open_path=browser.find_element_by_xpath('//*[@id="cotizaciones"]/tbody/tr['+str(t)+']/td[8]').text
low_path=browser.find_element_by_xpath('//*[@id="cotizaciones"]/tbody/tr['+str(t)+']/td[9]').text
high_path=browser.find_element_by_xpath('//*[@id="cotizaciones"]/tbody/tr['+str(t)+']/td[10]').text
volmoney_path=browser.find_element_by_xpath('//*[@id="cotizaciones"]/tbody/tr['+str(t)+']/td[12]').text
ticker.append(ticker_path)
company.append(company_path)
price.append(float(re.sub(r"[,.]","",price_path))/100)
variation.append(variation_path)
p_open.append(float(re.sub(r"[,.]","",open_path))/100)
p_low.append(float(re.sub(r"[,.]","",low_path))/100)
p_high.append(float(re.sub(r"[,.]","",high_path))/100)
volume_money.append(float(re.sub(r"[,.]","",volmoney_path))/100)
except:
pass
stocks_chain=pd.DataFrame({'Ticker':ticker,'Company':company,'Last Price':price,
'Change':variation,'Open':p_open,'High':p_high,
'Low':p_low,'Volume($)':volume_money})
return stocks_chain
#Bonds Chain
def bonds_chain():
option = Options()
#option.add_argument('headless')
browser=webdriver.Chrome('/Users/federicoglancszpigel/Desktop/chromedriver',options=option)
web_page='https://www.byma.com.ar/bonos-iamc/'
while browser.current_url!=web_page:
browser.get(web_page)
browser.implicitly_wait(10)
if browser.current_url==web_page:
bond=[]
ticker=[]
accrued_interest=[]
last_traded=[]
technical_v=[]
parity=[]
price=[]
ytm=[]
vr=[]
md=[]
category=[]
try:
for t in range(2,200):
bond_path=browser.find_element_by_xpath('//*[@id="dataBonos"]/tbody/tr['+str(t)+']/td[1]').text
ticker_path=browser.find_element_by_xpath('//*[@id="dataBonos"]/tbody/tr['+str(t)+']/td[2]').text
ainterest_path=browser.find_element_by_xpath('//*[@id="dataBonos"]/tbody/tr['+str(t)+']/td[3]').text
last_path=browser.find_element_by_xpath('//*[@id="dataBonos"]/tbody/tr['+str(t)+']/td[4]').text
technicalv_path=browser.find_element_by_xpath('//*[@id="dataBonos"]/tbody/tr['+str(t)+']/td[6]').text
parity_path=browser.find_element_by_xpath('//*[@id="dataBonos"]/tbody/tr['+str(t)+']/td[7]').text
price_path=browser.find_element_by_xpath('//*[@id="dataBonos"]/tbody/tr['+str(t)+']/td[8]').text
ytm_path=browser.find_element_by_xpath('//*[@id="dataBonos"]/tbody/tr['+str(t)+']/td[9]').text
vr_path=browser.find_element_by_xpath('//*[@id="dataBonos"]/tbody/tr['+str(t)+']/td[10]').text
md_path=browser.find_element_by_xpath('//*[@id="dataBonos"]/tbody/tr['+str(t)+']/td[12]').text
cat_path=browser.find_element_by_xpath('//*[@id="dataBonos"]/tbody/tr['+str(t)+']/td[11]').text
bond.append(bond_path)
ticker.append(ticker_path)
if ainterest_path=='-':
accrued_interest.append(0)
else:
accrued_interest.append(float(re.sub(r"[,]","",ainterest_path)))
last_traded.append(last_path)
if technicalv_path=='-':
technical_v.append(0)
else:
technical_v.append(float(re.sub(r"[,]","",technicalv_path)))
if parity_path=='-':
parity.append(0)
else:
parity.append(float(re.sub(r"[,]","",parity_path)))
if price_path=='-':
price.append(0)
else:
price.append(float(re.sub(r"[,]","",price_path)))
if ytm_path=='-':
ytm.append(0)
else:
ytm.append(float(re.sub(r"[,]","",ytm_path)))
if vr_path=='-':
vr.append(0)
else:
vr.append(float(re.sub(r"[,]","",vr_path)))
if md_path=='-':
md.append(0)
else:
md.append(float(re.sub(r"[,]","",md_path)))
category.append(cat_path)
except:
pass
browser.close()
bonds_chain=pd.DataFrame({'Bond':bond,'Ticker':ticker,'Accrued Interest':accrued_interest,'Last traded':last_traded,'Technical Value':technical_v,'Parity':parity,'Price':price,'YTM':ytm,'VR':vr,'MD':md,'Category':category})
return bonds_chain
| Code/Scrapper ARG Market Info.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # M² and Beam Quality Parameters
#
# **<NAME>**
#
# **July 2020**
#
# In this notebook, the basic definitions of the beam waist, beam divergence, beam product, and M² are introduced.
#
# As Ross points out in his book, *Laser Beam Quality Metrics*, describing a laser beam by a few numbers is an approximation that discards quite a lot of information.
#
# > Any attempt to reduce the behavior of a seven-dimensional object to a single number inevitably results in loss of information.
#
# where the seven dimensions consist of three-amplitude, three-phase, and time. Nevertheless, M² is a simple, widely-used metric for characterizing laser beams.
import numpy as np
import matplotlib.pyplot as plt
import laserbeamsize as lbs
# ## Minimum Beam Radius
#
# The minimum beam radius $w_0$ (and its location $z_0$) tell us a lot about a laser beam. We know that a laser beam must have a minimum beam radius somewhere. If we assume that the beam obeys Gaussian beam propagation rules then we can make a few observations.
#
# > It would seem that $w$ should stand for *width*, but it doesn't. This means that $w$ is not the diameter but the radius. Go figure.
#
# A laser cavity with a flat mirror will have minimum beam radius at that mirror. For diode lasers, beam exits through a cleaved flat surface. Since the gain medium in a diode laser usually has a rectangular cross section, there are two different minimum beam radii associated with the exit aperture. These are often assumed to correspond to the dimensions of the gain medium.
#
# In general, though, the beam waist happens somewhere inside the laser and both its location and radius is unknown. To determine the beam waist an aberration-free focusing lens is used to create an new beam waist external to the cavity that can be measured.
# ## Gaussian Beam Radius
#
# The parameter $w(z)$ represents the beam radius at an axial location $z$. When $z = z_0$, the beam reaches its minimum radius $w_0$,
#
# $$
# w^2(z)=w_0^2\left[1+\left(\frac{z-z_0}{z_R}\right)^2\right]
# $$
# where $z_R=\pi w_0^2/\lambda M^2$.
#
# Therefore, for a simple gaussian beam M²=1, the minimum radius $w_0$ and its location $z_0$ determine the beam size everywhere (assuming, of course, that the wavelength is known).
#
# As can be seen in the plot below, the beam reaches a minimum and then expands symmetrically about the axial location of the minimum.
# +
w0=0.1e-3 # radius of beam waist [m]
lambda0=632.8e-9 # wavelength again in m
zR=lbs.z_rayleigh(w0,lambda0) # Rayleigh Distance in m
z = np.linspace(-5*zR,5*zR,100)
r = lbs.beam_radius(w0,lambda0,z)
plt.fill_between(z,-r,r,color='pink')
plt.axhline(0,color='black',lw=1)
plt.vlines(0,0,w0,color='black',lw=1)
plt.text(0,w0/2,' $w_0$', va='center')
plt.xlabel("Position on optical axis, $z$")
plt.ylabel("Beam radius, $w(z)$")
plt.xticks([])
plt.yticks([])
plt.show()
# -
# ## Beam Divergence $\Theta$
#
# All beams diverge or spread out as the beam propagates along the $z$ direction. The far-field divergence is defined as the half-angle
#
# $$
# \theta=\lim_{z\rightarrow\infty}\frac{w(z)}{z} = \frac{w_0}{z_R}
# $$
#
# where $w(z)$ is the beam radius at a distance $z$. The full angle $\Theta$ is
#
# $$
# \Theta=\lim_{z\rightarrow\infty}\frac{d(z)}{z}= \frac{2 w_0}{z_R}
# $$
# +
w0=0.1e-3 # radius of beam waist [m]
lambda0=632.8e-9 # wavelength again in m
zR=lbs.z_rayleigh(w0,lambda0) # Rayleigh Distance in m
theta = w0/zR
z = np.linspace(-5*zR,5*zR,100)
r = lbs.beam_radius(w0,lambda0,z)
plt.fill_between(z,-r,r,color='pink')
plt.plot(z,theta*z,'--b')
plt.plot(z,-theta*z,'--b')
plt.xlabel("Position on optical axis, $z$")
plt.ylabel("Beam radius, $w(z)$")
plt.title("Beam Divergence")
plt.text(120e-3, 0e-3, r'$\Theta=2\theta$', fontsize=14, va='center',color='blue')
plt.annotate('',xy=(100e-3,0.2e-3),xytext=(100e-3,-0.2e-3),arrowprops=dict(connectionstyle="arc3,rad=0.2", arrowstyle="<->",color='blue'))
#plt.xticks([])
#plt.yticks([])
plt.show()
# -
# For a perfect Gaussian beam, the beam divergence is completely determined by its minimum beam radius $w_{00}$
#
# $$
# \Theta_{00} = \frac{\lambda}{\pi w_{00}}
# $$
#
# where the 00 subscript indicates that these only apply to the TEM$_{00}$ or fundamental gaussian mode.
# ## Beam Parameter Product
#
# Laser beam quality can be described by combining the previous two metrics into a single beam parameter product (BPP) or
#
# $$
# \mathrm{BPP} = w \cdot \Theta
# $$
#
# where $w$ is the radius of the beam (at its waist/narrowest point) and $\Theta$ is the half-angle measure of the beam divergence in the far-field.
#
# This is not unlike the throughput parameter (area $\times$ solid angle) from radiometry which captures both the angular expansion of light and focusing into a single variable. The BPP represents, for instance, the amount of light that can be coupled into a fiber. For practical use of the BPP, see
#
# Wang, [Fiber coupled diode laser beam parameter product calculation and rules for optimized design](https://www.researchgate.net/publication/253527159_Fiber_Coupled_Diode_Laser_Beam_Parameter_Product_Calculation_and_Rules_for_Optimized_Design), *Proc. SPIE*, **7918**, 9 (2011)
#
#
# ## M² or the beam propagation factor
#
# It turns out that real beams differ from perfect Gaussian beams. Specifically, they diverge more quickly or don't focus to the same size spot.
#
# The beam propagation factor M² is a measure of how close a beam is to Gaussian (TEM$_{00}$ mode).
#
# Johnston and Sasnett write in their chapter "Characterization of Laser Beams: The M² Model" in the *Handbook of Optical and Laser Scanning*, Marcel Dekker, (2004)::
#
# > Unlike the fundamental mode beam where the 1/e$^2$-diameter definition is universally understood and applied, for mixed modes a number of different diameter definitions have been employed. The different definitions have in common that they all reduce to the 1/e$^2$-diameter when applied to an $M^2=1$ fundamental mode beam, but when applied to a mixed mode with higher order mode content, they in general give different numerical values. As M² always depends on a product of two measured diameters, its numerical value changes also as the square of that for diameters. It is all the same beam, but different methods provide results in different currencies; one has to specify what currency is in use and know the exchange rate.
#
# M² is defined as the ratio of the beam parameter product (BPP)
#
# $$
# M^2 = \frac{\mathrm{BPP}}{\mathrm{BPP}_{00}} = \frac{\Theta \cdot w_0}{\Theta_{00}\cdot w_{00}}
# $$
#
# where $\Theta$ is the far-field beam divergence and $w_0$ is the minimum beam radius of the real beam. The beam divergence of a perfect Gaussian is
#
# $$
# \Theta_{00} = \frac{\lambda}{\pi w_{00}}
# $$
#
# and therefore the beam quality factor becomes
#
# $$
# M^2 = \frac{\Theta \cdot w_0}{\lambda\cdot \pi}
# $$
#
# where radius $w_0$ is the minimum radius for the real beam.
#
# A Gaussian beam has M²=1, while all other beams will have M²>1. Moreover,
#
# * for a given *beam radius*, the Gaussian beam has the smallest possible beam divergence
# * for a given *beam divergence*, the Gaussian beam has the smallest possible beam radius.
#
# A multimode beam has a beam waist which is M² times larger than a fundamental Gaussian beam with the same beam divergence, or a beam divergence which is M² times larger than that of a fundamental Gaussian beam with the same beam waist.
# ### Astigmatic or Elliptical Beams
#
# A simple stigmatic beam has rotational symmetry — any cross section will display a circular profile. However, a simple astigmatic beam will have elliptical cross-sections. It is *simple* because the major and minor axes of the ellipse remain in the same plane (a general astigmatic beam will have elliptical cross-sections that rotate with propagation distance).
#
# For an elliptical beam, the beam waist radius, beam waist location, and Rayleigh distance will differ on the semi-major and semi-minor axes. Unsurprisingly, the M² values may differ as well
#
# $$
# w_x^2(z) = w_{0x}^2\left[1 + \left(\frac{z-z_0}{z_{Rx}} \right)^2\right]
# $$
#
# and
#
# $$
# w_y^2(z) = w_{0y}^2\left[1 + \left(\frac{z-z_0}{z_{Ry}} \right)^2\right]
# $$
#
# Two different M² values for the major and minor axes of the elliptical beam shape arise from the two Rayleigh distances
#
# $$
# z_{Rx} = \frac{\pi w_{0x}^2}{\lambda M_x^2} \qquad\mbox{and}\qquad z_{Ry} = \frac{\pi w_{0y}^2}{\lambda M_y^2}
# $$
# ## Rayleigh Distance $z_R$
#
# The Rayleigh distance $z_R$ is the distance from the beam waist to the point where the beam area has doubled. This means that the irradiance (power/area) has dropped 50% or that beam radius has increased by a factor of $\sqrt{2}$.
#
# Interestingly, the radius of curvature of the beam is largest at one Rayleigh distance from the beam waist.
#
# The Rayleigh distance for a real beam defined as
#
# $$
# z_R=\frac{\pi w_0^2}{\lambda M^2}
# $$
#
# where $w_0$ is the minimum beam radius of the beam.
# +
w0=0.1 # radius of beam waist [mm]
lambda0=0.6328/1000 # again in mm
zR=lbs.z_rayleigh(w0,lambda0) # Rayleigh Distance
theta = w0/zR
z = np.linspace(-3*zR,3*zR,100)
r = lbs.beam_radius(w0,lambda0,z)
plt.fill_between(z,-r,r,color='pink')
plt.axhline(0,color='black',lw=1)
#plt.axvline(z0,color='black',lw=1)
plt.axvline(zR,color='blue', linestyle=':')
plt.axvline(-zR,color='blue', linestyle=':')
plt.text(zR, -3*w0, ' $z_R$')
plt.text(-zR, -3*w0, '$-z_R$ ', ha='right')
plt.text(0,w0/2,' $w_0$', va='center')
plt.text(zR,w0/2,' $\sqrt{2}w_0$', va='center')
plt.vlines(0,0,w0,color='black',lw=2)
plt.vlines(zR,0,np.sqrt(2)*w0,color='black',lw=2)
plt.xlabel("Position on optical axis, $z$")
plt.ylabel("Beam radius, $w(z)$")
plt.title("Rayleigh Distance")
plt.xticks([])
plt.yticks([])
plt.show()
# -
| docs/07a-M2-Parameters.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.3 64-bit ('base')
# language: python
# name: python37364bitbase6782713bed674ed7a26121bd52530782
# ---
# # Laboratório 6: Pesca
#
# ### Referente ao capítulo 11
#
# Suponha que uma população de peixes é introduzida em um tanque artificial ou em uma região de água com redes. Seja $x(t)$ o nível de peixes escalado em $t$, com $x(0) = x_0 > 0$. Os peixes inicialmente são pequenos e tem massa média um valor quase nula: trateremos como $0$. Após, a massa média é uma função
# $$
# f_{massa}(t) = k\frac{t}{t+1},
# $$
# onde $k$ é o máximo de massa possivelmente atingido. Consideraremos $T$ suficientemente pequeno de forma que não haja reprodução de peixes. Seja $u(t)$ a taxa de colheita e $m$ a taxa de morte natural do peixe. Queremos maximizar a massa apanhada no intervalo, mas minimizando os custos envolvidos. Assim o problema é
#
# $$
# \max_u \int_0^T Ak\frac{t}{t+1}x(t)u(t) - u(t)^2 dt, A \ge 0
# $$
# $$
# \text{sujeito a }x'(t) = -(m + u(t))x(t), x(0) = x_0,
# $$
# $$
# 0 \le u(t) \le M,
# $$
# onde $M$ é o limite físico da colheita.
# ## Condições Necessárias
#
# ### Hamiltoniano
#
# $$
# H = Ak\frac{t}{t+1}x(t)u(t) - u(t)^2 - \lambda(t)\left(m + u(t)\right)x(t)
# $$
#
# ### Equação adjunta
#
# $$
# \lambda '(t) = - Ak\frac{t}{t+1}u(t) + \lambda(t)\left(m + u(t)\right)
# $$
#
# ### Condição de transversalidade
#
# $$
# \lambda(T) = 0
# $$
#
# ### Condição de otimalidade
#
# $$
# H_u = Ak\frac{t}{t+1}x(t) - 2u(t) - \lambda(t)x(t)
# $$
#
# $$
# H_u < 0 \implies u^*(t) = 0 \implies x(t)\left(Ak\frac{t}{t+1} - \lambda(t)\right) < 0
# $$
#
# $$
# H_u = 0 \implies 0 \le u^*(t) = 0.5x(t)\left(Ak\frac{t}{t+1} - \lambda(t)\right) \le M
# $$
#
# $$
# H_u > 0 \implies u^*(t) = M \implies 0.5x(t)\left(Ak\frac{t}{t+1} - \lambda(t)\right) > M
# $$
#
# Assim $u^*(t) = \min\left\{M, \max\left\{0, 0.5x(t)\left(Ak\frac{t}{t+1} - \lambda(t)\right)\right\}\right\}$
#
# ### Importanto as bibliotecas
# +
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
import sympy as sp
import sys
sys.path.insert(0, '../pyscripts/')
from optimal_control_class import OptimalControl
# -
# ### Usando a biblitoca sympy
t_sp, x_sp,u_sp,lambda_sp, k_sp, A_sp, m_sp = sp.symbols('t x u lambda k A m')
H = A_sp*k_sp*(t_sp/(t_sp+1))*x_sp*u_sp - u_sp**2 - lambda_sp*(m_sp + u_sp)*x_sp
H
print('H_x = {}'.format(sp.diff(H,x_sp)))
print('H_u = {}'.format(sp.diff(H,u_sp)))
print('H_lambda = {}'.format(sp.diff(H,lambda_sp)))
# Resolvendo para $H_u = 0$
eq = sp.Eq(sp.diff(H,u_sp), 0)
sp.solve(eq,u_sp)
# Aqui podemos descrever as funções necessárias para a classe.
# +
parameters = {'A': None, 'k': None, 'm': None, 'M': None}
diff_state = lambda t, x, u, par: -x*(par['m'] + u)
diff_lambda = lambda t, x, u, lambda_, par: - par['A']*par['k']*t*u/(t + 1) + lambda_*(par['m'] + u)
update_u = lambda t, x, lambda_, par: np.minimum(par['M'], np.maximum(0, 0.5*x*(par['A']*par['k']*t - lambda_*t - lambda_)/(t + 1)))
# -
# ## Aplicando a classe ao exemplo
#
# Vamos fazer algumas exeperimentações. Sinta-se livre para variar os parâmetros. Nesse caso passaremos os limites como parâmetro do `solve`.
problem = OptimalControl(diff_state, diff_lambda, update_u)
x0 = 0.4
T = 10
parameters['A'] = 5
parameters['k'] = 10
parameters['m'] = 0.2
parameters['M'] = 1
t,x,u,lambda_ = problem.solve(x0, T, parameters, bounds = [(0, parameters['M'])])
ax = problem.plotting(t,x,u,lambda_)
for i in range(3):
ax[i].set_xlabel('Semanas')
plt.show()
# A estratégia ótima nesse caso inicia em $0$ e logo aumenta muito rapidamente, com um declínio posterior suave. A população é praticamente extinta no período considerado. O limite superior não teve efeito, dado que foi bem alto. Por isso, podemos testar com outros valores.
parameters['M'] = 0.4
t,x,u,lambda_ = problem.solve(x0, T, parameters, bounds = [(0, parameters['M'])])
ax = problem.plotting(t,x,u,lambda_)
for i in range(3):
ax[i].set_xlabel('Semanas')
plt.show()
# Sugerimos que experimente a variação dos outros parâmetros.
# ## Experimentação
# +
#N0 = 1
#T = 5
#parameters['r'] = 0.3
#parameters['a'] = 10
#parameters['delta'] = 0.4
#
#t,x,u,lambda_ = problem.solve(N0, T, parameters)
#roblem.plotting(t,x,u,lambda_)
# -
# ### Este é o final do notebook
| notebooks/.ipynb_checkpoints/Laboratory6-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="haZmY-Z4IIx_" colab_type="code" colab={}
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import cross_val_score
# + id="MCvnAOx7Jowe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3fa0fed1-22ca-4b08-b0c8-ba472f90557e" executionInfo={"status": "ok", "timestamp": 1581608102034, "user_tz": -60, "elapsed": 483, "user": {"displayName": "ZupaGrzybowa", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDJBheFQvfQGwvLId5HrYzTz61IcAewxeKNEtDN=s64", "userId": "15273861130524040784"}}
# cd "/content/drive/My Drive/Colab Notebooks/matrix"
# + id="cTxJ_PzJKVOs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="005450ce-2514-48be-b8f5-75e8d0e73719" executionInfo={"status": "ok", "timestamp": 1581608117935, "user_tz": -60, "elapsed": 1780, "user": {"displayName": "ZupaGrzybowa", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDJBheFQvfQGwvLId5HrYzTz61IcAewxeKNEtDN=s64", "userId": "15273861130524040784"}}
# ls data
# + id="tepI15EEMJ85" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5378b688-d35d-456d-e022-44f575ebecae" executionInfo={"status": "ok", "timestamp": 1581608317617, "user_tz": -60, "elapsed": 1195, "user": {"displayName": "ZupaGrzybowa", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDJBheFQvfQGwvLId5HrYzTz61IcAewxeKNEtDN=s64", "userId": "15273861130524040784"}}
df = pd.read_csv('data/men_shoes.csv',low_memory=False)
df.shape
# + id="SfSmM39LMokp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="d36028ec-df08-433d-dbbd-652038bef5ad" executionInfo={"status": "ok", "timestamp": 1581608340855, "user_tz": -60, "elapsed": 555, "user": {"displayName": "ZupaGrzybowa", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDJBheFQvfQGwvLId5HrYzTz61IcAewxeKNEtDN=s64", "userId": "15273861130524040784"}}
df.columns
# + id="y3PO4K5TNArt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="fa44a5c3-979d-4361-eb77-5eb28a3431e2" executionInfo={"status": "ok", "timestamp": 1581608536889, "user_tz": -60, "elapsed": 2667, "user": {"displayName": "ZupaGrzybowa", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDJBheFQvfQGwvLId5HrYzTz61IcAewxeKNEtDN=s64", "userId": "15273861130524040784"}}
mean_price = np.mean( df['prices_amountmin'] )
mean_price
# + id="JrLKji8fNsnt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="01ef5e84-123c-435e-8f32-dbf511722e34" executionInfo={"status": "ok", "timestamp": 1581608839417, "user_tz": -60, "elapsed": 535, "user": {"displayName": "ZupaGrzybowa", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDJBheFQvfQGwvLId5HrYzTz61IcAewxeKNEtDN=s64", "userId": "15273861130524040784"}}
y_true = df['prices_amountmin']
y_pred = [mean_price] * y_true.shape[0]
mean_absolute_error(y_true, y_pred)
# + id="9Olmb7OuOTuj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="3fbaeeed-1e09-49ad-b06c-25ee4c1e528a" executionInfo={"status": "ok", "timestamp": 1581609025259, "user_tz": -60, "elapsed": 970, "user": {"displayName": "ZupaGrzybowa", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDJBheFQvfQGwvLId5HrYzTz61IcAewxeKNEtDN=s64", "userId": "15273861130524040784"}}
np.log1p(df['prices_amountmin']).hist(bins=100)
# + id="2r_HifBsPbIz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="003f99d6-1b6d-44b4-b21f-8f2bc97459d9" executionInfo={"status": "ok", "timestamp": 1581609194194, "user_tz": -60, "elapsed": 494, "user": {"displayName": "ZupaGrzybowa", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDJBheFQvfQGwvLId5HrYzTz61IcAewxeKNEtDN=s64", "userId": "15273861130524040784"}}
y_true = df['prices_amountmin']
y_pred = [np.median(y_true)] * y_true.shape[0]
mean_absolute_error(y_true, y_pred)
# + id="a1BHu3hOQRBa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6f2e300c-507c-40a6-8776-d5b696282085" executionInfo={"status": "ok", "timestamp": 1581610441112, "user_tz": -60, "elapsed": 552, "user": {"displayName": "ZupaGrzybowa", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDJBheFQvfQGwvLId5HrYzTz61IcAewxeKNEtDN=s64", "userId": "15273861130524040784"}}
y_true = df['prices_amountmin']
price_log_mean = np.expm1(np.mean(np.log1p(y_true)))
y_pred = [price_log_mean] * y_true.shape[0]
mean_absolute_error(y_true, y_pred)
# + id="SqS6sp26RWxx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="987d57f5-6220-40ca-80d6-b80500f15c30" executionInfo={"status": "ok", "timestamp": 1581610505042, "user_tz": -60, "elapsed": 573, "user": {"displayName": "ZupaGrzybowa", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDJBheFQvfQGwvLId5HrYzTz61IcAewxeKNEtDN=s64", "userId": "15273861130524040784"}}
df.columns
# + id="v4AXIuwvT1KK" colab_type="code" colab={}
df['brand_cat'] = df['brand'].factorize()[0]
# + id="l_4OfYoMVysb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7b57f68f-8064-4453-e87e-e45523e1fbd9" executionInfo={"status": "ok", "timestamp": 1581611264062, "user_tz": -60, "elapsed": 563, "user": {"displayName": "ZupaGrzybowa", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDJBheFQvfQGwvLId5HrYzTz61IcAewxeKNEtDN=s64", "userId": "15273861130524040784"}}
feats = ['brand_cat']
X = df[feats].values
y = df['prices_amountmin'].values
model = DecisionTreeRegressor(max_depth=5)
scores = cross_val_score(model,X,y, scoring ='neg_mean_absolute_error')
np.mean(scores), np.std(scores)
# + id="c6vtMQOHXIYl" colab_type="code" colab={}
def run_model(feats):
X = df[ feats ].values
y = df['prices_amountmin'].values
model = DecisionTreeRegressor(max_depth=5)
scores = cross_val_score(model,X,y, scoring ='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
# + id="pCvSmWH0ZXA9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6419822f-dc2c-4d12-8599-ffc6a9c5ac7e" executionInfo={"status": "ok", "timestamp": 1581611871026, "user_tz": -60, "elapsed": 626, "user": {"displayName": "ZupaGrzybowa", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDJBheFQvfQGwvLId5HrYzTz61IcAewxeKNEtDN=s64", "userId": "15273861130524040784"}}
run_model(['brand_cat'])
# + id="Q3IhnP_2aegy" colab_type="code" colab={}
df['manufacturer_cat'] = df['manufacturer'].factorize()[0]
# + id="iPK-x67yb7sw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d346e4e1-0641-40c7-c85c-c2ca6f66c42c" executionInfo={"status": "ok", "timestamp": 1581612291074, "user_tz": -60, "elapsed": 517, "user": {"displayName": "ZupaGrzybowa", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDJBheFQvfQGwvLId5HrYzTz61IcAewxeKNEtDN=s64", "userId": "15273861130524040784"}}
feats = ['manufacturer_cat']
X = df[feats].values
y = df['prices_amountmin'].values
model = DecisionTreeRegressor(max_depth=5)
scores = cross_val_score(model,X,y, scoring ='neg_mean_absolute_error')
np.mean(scores), np.std(scores)
# + id="sEwAGBSUcFGd" colab_type="code" colab={}
def run_model(feats):
X = df[ feats ].values
y = df['prices_amountmin'].values
model = DecisionTreeRegressor(max_depth=5)
scores = cross_val_score(model,X,y, scoring ='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
# + id="-GcfHmCzcYj9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="84ac6e55-642a-433b-fa54-f439fd070c75" executionInfo={"status": "ok", "timestamp": 1581612394784, "user_tz": -60, "elapsed": 1103, "user": {"displayName": "ZupaGrzybowa", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDJBheFQvfQGwvLId5HrYzTz61IcAewxeKNEtDN=s64", "userId": "15273861130524040784"}}
run_model(['manufacturer_cat'])
# + id="mdg_V6WRceR1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ea895ffd-8577-478f-e207-e4bf1007bc35" executionInfo={"status": "ok", "timestamp": 1581612482047, "user_tz": -60, "elapsed": 1058, "user": {"displayName": "ZupaGrzybowa", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDJBheFQvfQGwvLId5HrYzTz61IcAewxeKNEtDN=s64", "userId": "15273861130524040784"}}
run_model(['manufacturer_cat','brand_cat'])
# + id="wiFTYaGoczls" colab_type="code" colab={}
pvd
| matrix_one/day4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_pytorch_p36)
# language: python
# name: conda_pytorch_p36
# ---
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import torch
import random
device = 'cuda' if torch.cuda.is_available() else 'cpu'
import os,sys
opj = os.path.join
from tqdm import tqdm
import acd
from copy import deepcopy
from model_fashion_mnist import Net, Net2c, FashionCNN
from visualize import *
import dset_fashion_mnist as dset
import foolbox
sys.path.append('../trim')
from transforms_torch import transform_bandpass, tensor_t_augment, batch_fftshift2d, batch_ifftshift2d
from trim import *
from util import *
from attributions import *
from captum.attr import *
import warnings
warnings.filterwarnings("ignore")
# +
# set args
args = dset.get_args()
args.cuda = "True"
args.test_batch_size = 100
# load fashion-mnist dataset
train_loader, test_loader = dset.load_data(args.batch_size, args.test_batch_size, device)
# model
model = FashionCNN().to(device)
model.load_state_dict(torch.load('./fashion-mnist.model.pth', map_location=device))
model = model.eval().to(device)
dset.test(model, test_loader, args)
# +
# foolbox model
preprocessing = dict(mean=[0,], std=[1,], axis=-3)
fmodel = foolbox.models.PyTorchModel(model, bounds=(0, 1), num_classes=10, preprocessing=preprocessing)
# get a batch of images and labels and print the accuracy
images, labels = iter(test_loader).next()
images = images.numpy()
labels = labels.numpy()
print(np.mean(fmodel.forward(images).argmax(axis=-1) == labels))
# apply the attack
attack = foolbox.attacks.FGSM(fmodel)
im_adversarials = attack(images, labels)
# if the i'th image is misclassfied without a perturbation, then adversarials[i] will be the same as images[i]
# if the attack fails to find an adversarial for the i'th image, then adversarials[i] will all be np.nan
# Foolbox guarantees that all returned adversarials are in fact in adversarials
print(np.mean(fmodel.forward(im_adversarials).argmax(axis=-1) == labels))
# # You can always get the actual adversarial class that was observed for that sample by Foolbox by
# # passing `unpack=False` to get the actual `Adversarial` objects:
attack = foolbox.attacks.FGSM(fmodel, distance=foolbox.distances.Linf)
adversarials = attack(images, labels, unpack=False)
adversarial_classes = np.asarray([a.adversarial_class for a in adversarials])
# print(labels)
# print(adversarial_classes)
# print(np.mean(adversarial_classes == labels)) # will always be 0.0
# The `Adversarial` objects also provide a `distance` attribute. Note that the distances
# can be 0 (misclassified without perturbation) and inf (attack failed).
distances = np.asarray([a.distance.value for a in adversarials])
print("{:.1e}, {:.1e}, {:.1e}".format(distances.min(), np.median(distances), distances.max()))
print("{} of {} attacks failed".format(sum(adv.distance.value == np.inf for adv in adversarials), len(adversarials)))
print("{} of {} inputs misclassified without perturbation".format(sum(adv.distance.value == 0 for adv in adversarials), len(adversarials)))
# -
# index with minimum dist to orig. image while perturbation suceed
min_idx = np.where(distances == np.min(distances[np.nonzero(distances)]))[0][0]
im = images[min_idx]
im_a = im_adversarials[min_idx]
viz_im_a(im.squeeze(), im_a.squeeze())
print('True and perturbed labels: ', dset.output_label(labels[min_idx]), dset.output_label(adversarial_classes[min_idx]))
# ## Interpret adversarial images
# +
# FFT
t = lambda x: torch.fft(torch.stack((x, torch.zeros_like(x)),dim=4), 2)
transform_i = modularize(lambda x: torch.ifft(x, 2)[...,0])
# prepend transformation
model = model.to(device)
model_t = TrimModel(model, transform_i)
# interp methods
attr_methods = ['IG', 'DeepLift', 'SHAP', 'InputXGradient']
# band center and width
band_centers = list(np.arange(1, 40) * 0.025)
band_width_lower = 0.025
band_width_upper = 0.025
# indexes
idx_adv = np.logical_and(distances > 0, distances < np.inf)
idx_fail = distances == np.inf
# -
#
# #### images that succeed adversarial attack
# +
im_scores = {
'IG': [],
'DeepLift': [],
'SHAP': [],
# 'CD': [], no support on batch with different class labels
'InputXGradient': []
}
im = torch.from_numpy(images[idx_adv]).to(device)
im_t = t(im)
target = torch.from_numpy(labels[idx_adv]).to(device)
scores = {
'IG': [],
'DeepLift': [],
'SHAP': [],
'InputXGradient': []
}
# attr
results = get_attributions(im_t, model_t, class_num=target, attr_methods=attr_methods, device=device)
for band_center in band_centers:
mask = ifftshift(freq_band(28, band_center, band_width_lower, band_width_upper))
for name in attr_methods:
im_attr = (results[name] * mask).sum(axis=(1,2))
scores[name].append(im_attr)
im_scores['IG'] = np.array(scores['IG'])
im_scores['DeepLift'] = np.array(scores['DeepLift'])
im_scores['SHAP'] = np.array(scores['SHAP'])
im_scores['InputXGradient'] = np.array(scores['InputXGradient'])
# -
# #### adversarial images
# +
im_a_scores = {
'IG': [],
'DeepLift': [],
'SHAP': [],
# 'CD': [], no support on batch with different class labels
'InputXGradient': []
}
im = torch.from_numpy(im_adversarials[idx_adv]).to(device)
im_t = t(im)
adv_labels = np.array(adversarial_classes[idx_adv], dtype=np.int)
target = torch.from_numpy(adv_labels).to(device)
scores = {
'IG': [],
'DeepLift': [],
'SHAP': [],
'InputXGradient': []
}
# attr
results = get_attributions(im_t, model_t, class_num=target, attr_methods=attr_methods, device=device)
for band_center in band_centers:
mask = ifftshift(freq_band(28, band_center, band_width_lower, band_width_upper))
for name in attr_methods:
im_attr = (results[name] * mask).sum(axis=(1,2))
scores[name].append(im_attr)
im_a_scores['IG'] = np.array(scores['IG'])
im_a_scores['DeepLift'] = np.array(scores['DeepLift'])
im_a_scores['SHAP'] = np.array(scores['SHAP'])
im_a_scores['InputXGradient'] = np.array(scores['InputXGradient'])
# -
# #### images that fail adversarial attack
# +
im_f_scores = {
'IG': [],
'DeepLift': [],
'SHAP': [],
# 'CD': [], no support on batch with different class labels
'InputXGradient': []
}
im = torch.from_numpy(images[idx_fail]).to(device)
im_t = t(im)
target = torch.from_numpy(labels[idx_fail]).to(device)
scores = {
'IG': [],
'DeepLift': [],
'SHAP': [],
'InputXGradient': []
}
# attr
results = get_attributions(im_t, model_t, class_num=target, attr_methods=attr_methods, device=device)
for band_center in band_centers:
mask = ifftshift(freq_band(28, band_center, band_width_lower, band_width_upper))
for name in attr_methods:
im_attr = (results[name] * mask).sum(axis=(1,2))
scores[name].append(im_attr)
im_f_scores['IG'] = np.array(scores['IG'])
im_f_scores['DeepLift'] = np.array(scores['DeepLift'])
im_f_scores['SHAP'] = np.array(scores['SHAP'])
im_f_scores['InputXGradient'] = np.array(scores['InputXGradient'])
# -
# #### plot
band_idx = 0
fig, ax = plt.subplots(1, 1, figsize=(13,5))
im_avg = im_scores['IG'].mean(axis=1)/im_scores['IG'].mean(axis=1).sum()
im_sd = im_scores['IG'].std(axis=1)/im_scores['IG'].mean(axis=1).sum()
im_a_avg = im_a_scores['IG'].mean(axis=1)/im_a_scores['IG'].mean(axis=1).sum()
im_a_sd = im_a_scores['IG'].std(axis=1)/im_a_scores['IG'].mean(axis=1).sum()
im_f_avg = im_f_scores['IG'].mean(axis=1)/im_f_scores['IG'].mean(axis=1).sum()
im_f_sd = im_f_scores['IG'].std(axis=1)/im_f_scores['IG'].mean(axis=1).sum()
ax.plot(band_centers, im_avg, alpha=0.5, color='blue', label='image that succeeds adv. att.', linewidth=4.0)
ax.fill_between(band_centers, im_avg - im_sd,
im_avg + im_sd, color='blue', alpha=0.1)
ax.plot(band_centers, im_a_avg, alpha=0.5, color='red', label='adv. image', linewidth=4.0)
ax.fill_between(band_centers, im_a_avg - im_a_sd,
im_a_avg + im_a_sd, color='red', alpha=0.1)
ax.plot(band_centers, im_f_avg, alpha=0.5, color='green', label='image that fails adv. att.', linewidth=4.0)
ax.fill_between(band_centers, im_f_avg - im_f_sd,
im_f_avg + im_f_sd, color='green', alpha=0.1)
ax.legend()
plt.show()
plt.plot(band_centers, im_scores['IG']/im_scores['IG'].sum(axis=0),
alpha=0.2, color='blue', label='image that succeeds adv. att.')
plt.plot(band_centers, im_a_scores['IG']/im_a_scores['IG'].sum(axis=0),
alpha=0.2, color='red', label='adv. image')
plt.show()
| ex_fashion_mnist/interp_adv_batch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Load modules
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
import pandas as pd
# Load dataset
df = pd.read_csv(r"C:\Users\root\PycharmProjects\iris\dataset\iris.csv")
# Split into training data and test data
X = df[['sepal_length','sepal_width','petal_length','petal_width']]
y = df['classification']
# Create training and testing vars, It’s usually around 80/20 or 70/30.
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.20, random_state=1)
# Now we’ll fit the model on the training data
model = SVC(gamma='auto')
model.fit(X_train, Y_train)
# Make predictions on validation dataset
predictions = model.predict(X_test)
# Pickle model
pd.to_pickle(model,r'C:\Users\root\PycharmProjects\iris\prediction_model\new_model.pickle')
# Unpickle model
model = pd.read_pickle(r'C:\Users\root\PycharmProjects\iris\prediction_model\new_model.pickle')
# read a pickle pd.read_pickle('model.pkl')
# Take input from user
sepal_length = float(input("Enter sepal_length: "))
sepal_width = float(input("Enter sepa_width: "))
petal_length = float(input("Enter petal_length: "))
petal_width = float(input("Enter petal_width: "))
result = model.predict([[sepal_length,sepal_width,petal_length,petal_width]]) # input must be 2D array
print(result)
# -
| iris/iris.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:522]
# language: python
# name: conda-env-522-py
# ---
# # Exploratory data analysis of the UCI Bank Marketing data set
# This is the exploratory data analysis for our data analysis [proposal](https://github.com/UBC-MDS/DSCI_522_Group_10).
# +
import warnings
warnings.filterwarnings('ignore')
from IPython.display import HTML, display
import numpy as np
import pandas as pd
bank_df = pd.read_csv("../data/raw/bank-additional/bank-additional-full.csv", sep=";")
train_df = pd.read_csv("../data/processed/bank-additional-full_train.csv")
test_df = pd.read_csv("../data/processed/bank-additional-full_test.csv")
# -
# ## Summary of the data set
# The data we are using for this project, [bank-additional-full.csv](https://archive.ics.uci.edu/ml/machine-learning-databases/00222/bank-additional.zip), was from a marketing campaign of a Portuguese bank. It was sourced from the UCI Learning Repository and can be found on this [website](https://archive.ics.uci.edu/ml/datasets/Bank+Marketing).
#
# Each row of the data is related to the direct telemarketing campaigns. They were using telemarketing to attempt to get customer to sign up for the bank's term deposit product. The target in this dataset is yes or no to subscribing to the term deposit product.
#
# There are also some values of 'unknown' in some categorical features like education. We are considering imputation but will re-assess this while preprocessing the features.
#
total_examples = len(bank_df)
total_features = len(bank_df.columns) - 1
count_NA = bank_df.isna().sum().sum()
print(f"There is a total of {total_examples} examples, {total_features} features and {str(count_NA)} observations with missing values in the dataset")
class_distribution = bank_df.y.value_counts().to_frame().T.rename(columns={"no":"Not Subscribed", "yes":"Subscribed"}, index={"y":"Class Distribution"})
class_distribution
# Table 1. Counts of observation for each class.
#
# The data used in this analysis is very simliar to the data used in [Moro et al., 2014].
# ## Partition the data set into training and test sets
# Before proceeding further, we will split the data such that 80% of observations are in the training and 20% of observations are in the test set. Below we list the counts of observations for each class:
train_class_distribution = train_df.target.value_counts().to_frame().T.rename(columns={0:"Not Subscribed", 1:"Subscribed"}, index={"target":"Training Set Class Distribution"})
test_class_distribution = test_df.target.value_counts().to_frame().T.rename(columns={0:"Not Subscribed", 1:"Subscribed"}, index={"target":"Test Set Class Distribution"})
pd.concat([train_class_distribution, test_class_distribution])
# Table 2. Counts of observation for each class for each data partition.
#
# As shown above, there is class imbalance. Our positive for this analysis is for our customers to subscribe to the term deposit. We will try to spot this class so we make sure we capture as many customers as we can with this term deposit product. We care a bit more about recall than precision because we want to capture as many customers as we can. If we can tune our prediction model to minimize false negatives, the more customers we hope we can get signed up this term deposit product.
#
# We will first attempt to capture the right metrics to find and tune the best model. We note the class imbalance as shown in the table above. So in addition to our initial tuning, we are prepared to change the training procedures (ex. class weight) and maybe even changing the data (over/under sampling) as we continue our analysis. This will also be dependent on our initial tuning to see if any metrics identify any other problems.
#
#
#
# # Exploratory analysis on the training set
# To gain an understanding on which features could be helpful in predicting the positive class, we plotted histograms of numeric features (didn't subscibe: blue and subscibed: orange) and percent subscribed bar graphs for each of the categorical features for all observations in the training data set. Although the histograms distributions for all of the numeric features overlap to a certain degree, they do show a difference in their centres and spreads, for example, `age` histogram. For the categorical features, some features are similar in the proportion subscribed, while others seem to be promising in predicting the positive class. The `poutcome` (previous outcome) feature seem to be the best as previous success is highly associated with the positive class. In addition, the features values (`contact`: cellphone, `education`:illitrate, `age_category`:older adults then young adults, and `job`s:retired and student) seem to be associated with the positive class.
#
# +
display(HTML("<table>" +
"<tr><td><img src='../results/age.png'></td><td><img src='../results/last_contact_duration.png'></td></tr>" +
"<tr><td><img src='../results/contacts_during_campaign.png'></td><td><img src='../results/days_after_previous_contact.png'></td></tr>" +
"<tr><td><img src='../results/previous_contacts.png'></td><td><img src='../results/employment_variation_rate.png'></td></tr>" +
"<tr><td><img src='../results/consumer_price_index.png'></td><td><img src='../results/consumer_confidence_index.png'></td></tr>" +
"<tr><td><img src='../results/euribor_3_month_rate.png'></td><td><img src='../results/number_of_employees.png'></td></tr>" +
"</table>"))
# -
# Figure 1. Distribution of numeric features in the training set for subscribers and non-subscribers to the bank's term deposit product.
# +
display(HTML("<table>" +
"<tr><td><img src='../results/job.png'></td><td><img src='../results/month.png'></td></tr>" +
"<tr><td><img src='../results/education.png'></td><td><img src='../results/day_of_week.png'></td></tr>" +
"<tr><td><img src='../results/loan.png'></td><td><img src='../results/previous_outcome.png'></td></tr>" +
"<tr><td><img src='../results/marital_status.png'></td><td><img src='../results/housing.png'></td></tr>" +
"<tr><td><img src='../results/contact.png'></td><td><img src='../results/default.png'></td></tr>" +
"</table>"))
# -
# Figure 2. Distribution of categorical features in the training set for subscribers to the bank's term deposit product.
# # References
#
# [Moro et al., 2014] <NAME>, <NAME> and <NAME>. A Data-Driven Approach to Predict the Success of Bank Telemarketing. Decision Support Systems, Elsevier, 62:22-31, June 2014. https://archive.ics.uci.edu/ml/datasets/Bank+Marketing#.
#
| src/bank_marketing_data_eda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + init_cell=true
# %logstop
# %logstart -ortq ~/.logs/PY_Algorithms.py append
# %matplotlib inline
import matplotlib
import seaborn as sns
sns.set()
matplotlib.rcParams['figure.dpi'] = 144
# -
import expectexception
# # Algorithms
# We have learned a bit about how to program in Python and some ways in which we can make our code more Pythonic. However, programming is not only about making the computer do work for us, its about optimizing the amount of work the computer needs to do. There are multiple types of work we can consider, but here we will consider three major bottlenecks in code:
#
# 1. _Computational Complexity_ - how many instructions are executed?
# 2. _Memory Needs_ - how much memory is needed?
# 3. _I/O_ - How many reads and writes or network requests do I need to make?
#
# An *Algorithm* is a procedure for solving a problem. It describes a sequence of operations then when performed will result in a solution to a problem. There are many types of algorithms, some are guaranteed find a solution, some do not. Often we are interested in understanding the performance of an algorithm in terms of the three bottlenecks listed above (as well as others). In order to analyze these algorithms, we need to develop some tools to understand how algorithms behave as a function of the problem size.
#
# ## Big O
#
# In order to quantify the complexity of a particular algorithm, we can consider how the algorithm grows with respect to the size of the problem. For the purposes of this notebook we will only consider problems that are one dimensional, so we can quantify the algorithm with respect to a single number, which we will denote as $N$. Remember that a problem itself does not have a complexity, rather it is the algorithmic solution which has complexity. For example, lets consider the problem of summing all the numbers between 1 and $N$ (inclusive). On way to sum this might be to take the of all of these numbers.
def sum_num(N):
sum_ = 0
for n in range(N + 1):
sum_ += n
return sum_
# This algorithm will be $O(N)$ because we need to perform about $N$ operations. Note that we only care about the dominant function of $N$ in the expansion so for our purposes $O(N) \approx O(N+1) \approx O(2N)$.
#
# However, if we remember think a bit about how numbers sum, we can invoke a summation rule often attributed to Gauss which says that
# $$\sum_{n=1}^{N} n = \frac{N(N+1)}{2}$$
def sum_gauss(N):
return N*(N+1)//2 # We can use integer division here, why?
# This algorithm is $O(1)$ because it does not depend on how the size of $N$!. Lets just check that it gives the same answer.
for N in range(100):
assert sum_num(N) == sum_gauss(N)
# Now lets plot the time it takes to compute these functions as a function of $N$. We will use a package called `matplotlib` to do some plotting, don't worry, we will learn about it later!
#
# We will time how long it takes to perform both of these algorithms. We will take the mean of several runs.
# +
import matplotlib.pyplot as plt
import time
def compute(n_avgs, func, N):
times = []
for _ in range(n_avgs):
ts = time.time()
func(N)
times.append(time.time() - ts)
return sum(times)/float(len(times)) * 1000 # milliseconds
n_avgs = 100
time_sum = []
time_gauss = []
N_range = range(10,100000, 5000)
for N in N_range:
time_sum.append(compute(n_avgs, sum_num, N))
time_gauss.append(compute(n_avgs, sum_gauss, N))
# -
plt.plot(N_range, time_sum, 'o-', label='Sum Numbers')
plt.plot(N_range, time_gauss, 'o-', label='Gauss')
plt.xlabel('N')
plt.ylabel('Average time (ms)')
plt.legend()
# ## Computational Complexity
#
# Lets solve a version of a common problem you might find as a data scientist, how should I store my data? Lets take a very simple case where our data is just a list of numbers and we need to store this in a list? In there any way to optimize the storage?
#
# Lets consider the tradeoffs for various things we might want to do in the list.
#
# ### Finding an element
#
# If we want to find an element in a list and we know nothing about that list, then we need to check every element in the list to see if that element is there. Lets write a function to do this.
def find_ele(list_, ele):
for i in list_:
if i == ele:
return True
return False
# In order to test these, lets use the `random` module to generate a list of random numbers between $0$ and $10 *N$ where $N$ is the length of the list we want.
import random
def random_list(N, sort=False):
list_ = [random.randint(0, 10*N) for _ in range(N)]
return sorted(list_) if sort else list_
random_list(5)
# +
import numpy as np
def time_func(func, *args):
ts = time.time()
func(*args)
return time.time() - ts
def compute_with_list(n_avgs, N, sort, *funcs):
ans = []
for _ in range(n_avgs):
list_r = random_list(N, sort)
n_to_find = random.randint(0, 10*N)
ans.append([time_func(func, list_r, n_to_find)
for func in funcs])
# now find avg
return np.array(ans).mean(axis=0)*1000
n_avgs = 40
N_range = range(10, 100000, 10000)
time_list = np.array([compute_with_list(n_avgs, N, False, find_ele) for N in N_range])
# -
plt.plot(N_range, time_list, 'o-')
# Let us take a slightly different approach where we know that this list sorted. Note that sorting itself is $N\log(N)$ complexity, so although we will be able to perform optimized searches on a sorted list, its not in general faster to sort and then find the elements. However, if we know we will be searching often, we can build up the list as a sorted structure and for now we can assume that we have already done so.
#
# The most basic optimization we can perform is to only check until we have seen a number greater than what we are looking for. Since we know the list is sorted, we are guaranteed to not find the number in the rest of the list.
# +
def find_ele_sorted(list_, ele):
for i in list_:
if i == ele:
return True
if i > ele:
return False
return False
n_avgs = 40
N_range = range(10, 100000, 10000)
time_list = np.array([compute_with_list(n_avgs, N, True, find_ele, find_ele_sorted) for N in N_range])
# -
plt.plot(N_range, time_list[:,0], 'o-', label='find_ele')
plt.plot(N_range, time_list[:,1], 'o-', label='find_ele_sorted')
plt.legend()
# This does better on average, but it still has the same $O(N)$ runtime. Such optimizations are useful, but we can do better. Lets implement what is sometimes known as binary search. This is a recursive algorithm that allows the list to be divided roughly in half on each recursive step. this will yield logarithmic asymptotic run time. Lets first illustrate the algorithm by walking through an example where `l_=[1,2,3,4,5,6,7,8,9,10,11]` and we want to check if 2 is contained in the list.
#
# First we check the midpoint of the list, which is 6. We know that 2 does not equal 6, but since the list is sorted, we can immediately rule out the part of the list containing numbers greater than 6. Thus we have already ruled out half the elements of the list.
#
# Now we can ask the question is 2 contained in list `[1,2,3,4,5]`. First we check the midpoint element of the list, which is 3. We know that 3 is not 2, but again, since $3>2$, we can eliminate half the list.
#
# Now we can check if 2 is contained in the list `[1,2]`. We will take midpoint of this list as the first element (since it has index $1=len(list)/2$), and this is equal to 2. Thus 2 is in the original list.
#
# We can see we have performed this search in only three steps and up to an extra step, this did not depend on where 2 was in the list, only that it was sorted. Since we are removing half the list each time, we expect that the number of steps will be roughly $log(N)$, where the logarithm is understood to be base 2. Lets make a plot of this function compared to $N$.
x = np.linspace(10, 2000, 200)
plt.plot(x, np.log(x)/x)
plt.xlabel('N')
plt.ylabel(r'$\log(x)/x$')
# Now we can compare this to our other search algorithms.
def find_ele_binary(l_, ele):
if len(l_) < 1:
return False
mid_point = len(l_)//2
if l_[mid_point] == ele:
return True
elif l_[mid_point] > ele:
return find_ele_binary(l_[:mid_point], ele)
else:
return find_ele_binary(l_[mid_point+1:], ele)
n_avgs = 50
N_range = np.arange(1000, 70000, 8000)
time_list = np.array([compute_with_list(n_avgs, N, True, find_ele_sorted, find_ele_binary) for N in N_range])
# +
for i, func in enumerate(['find_ele_sorted', 'find_ele_binary']):
l, = plt.plot(N_range, 2**time_list[:, i], 'o-', label=func)
# fit a line to the exponent
p = np.polyfit(N_range, 2**time_list[:, i], 1)
plt.plot(N_range, N_range * p[0] + p[1], color=l.get_color())
plt.legend()
# -
# Of course, if we are only keeping track of what numbers we have seen, we can use something like a `set` which will be $O(1)$ access.
# ## Memoization
#
# Often we can get a performance increase just by not recomputing things we have already computed! Let's look again at our recursive Fibonacci sequence defined in a previous notebook.
def fibonacci_recursive(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fibonacci_recursive(n-1) + fibonacci_recursive(n-2)
# Lets make a slightly different version which keeps track of how many times we call the function on each element.
from collections import defaultdict
def fibonacci_count(n, d):
d[n] += 1
if n == 0:
return 0, d
elif n == 1:
return 1, d
else:
n1, _ = fibonacci_count(n-1, d)
n2, _ = fibonacci_count(n-2, d)
return n1 + n2, d
# Lets see this in action for $N=5$.
N = 5
ans, d = fibonacci_count(N, defaultdict(int))
for i in range(N):
print(i, d[i])
#
# 5
# 4 3
# 3 2 2 1
# 2 1 1 0 1 0
# 1 0
# Now lets look for $N=25$.
N = 25
ans, d = fibonacci_count(N, defaultdict(int))
print(ans)
for i in range(N):
print(i, d[i])
# Notice that we are calling some of these functions with the same argument thousands of time. If we store the answer to the problem instead of recomputing it, can we do any better?
def fibonacci_mem(n, d):
if n in d:
return d[n]
elif n == 0:
ans = 0
elif n == 1:
ans = 1
else:
ans = fibonacci_mem(n-1, d) + fibonacci_mem(n-2, d)
d[n] = ans
return ans
# %%timeit
fibonacci_mem(33, {0:0,1:1})
# %%timeit
fibonacci_recursive(33)
fibonacci_mem(33, {}) == fibonacci_recursive(33)
# Our memoized solution does much better, it is several orders of magnitude faster than the bare recursive solution.
#
# However, it does come at a cost, although we save computation, we must use more memory to store the previous result. Often there will be a tradeoff between the two.
# ### Exercise
#
# 1. Write the factorial function $f(n) = n!$ as a recursive function.
# 2. Would memoization make this function faster?
# 3. Now what if we needed to calculate the factorial often (perhaps we were computing probabilities of different selections), would memoization be useful in this case?
# ## Memory
# As seen before memoization has a tradeoff in terms of memory. Lets try to describe that here for the case of the Fibonacci sequence. We have to keep track of a single element number (the computed solution) for all number less than $N$, the number we want to compute. Thus the memory we need grows with problem size as $O(N)$.
#
# We can analyze our algorithms in terms of memory in a similar way. Again remember, it is the algorithm (and its implementation) which has memory complexity, not the problem itself.
#
# For our first problem, we will again look at summing the numbers between 0 and $N$, and we will take two different approaches.
#
# For the first we will build a list of these elements and then sum them.
def sum_list(n):
numbers = range(n)
return sum(numbers)
def sum_iter(n):
number = 0
sum_ = 0
while number < n:
sum_ += number
number += 1
return sum_
sum_list(100), sum_iter(100)
# ## Choose a data structure wisely
#
# As we may have noticed in the sorting section, the type of data structure we use is often tied into our choice of algorithm. For example, if we don't already have sorted data, we probably don't want to use binary search because we would need to sort the data first and then would negate any search improvement (sorting is worse than $O(N)$).
#
# This can be mitigated by choosing our original structure wisely, especially when get to build it from raw data. For example when building a list, inserting elements in a sorted manner can be done in $O(log(N))$ time (with almost the same as binary search).
#
# Other data structures lend themselves to other algorithmic purposes.. For example, a `heap` (implemented in Python with the [`heapq`](https://docs.python.org/2/library/heapq.html) library) implements a tree like structure which is useful for order statistics, such as keeping track of the largest or smallest $N$ items in a collection. You can read more about it [here](https://en.wikipedia.org/wiki/Binary_heap).
#
# Even as you work through your miniprojects, sometimes choosing a dictionary instead of a list will be the difference between minutes or seconds of computation.
#
# ### Exercises
#
# 1. Explain why sorting and then using binary search is slower than just searching.
# 2. Implement insertion on a list using the same principles as binary search.
# *Copyright © 2019 The Data Incubator. All rights reserved.*
| python/PY_Algorithms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py35]
# language: python
# name: conda-env-py35-py
# ---
# # Mine Common French Words from Wiktionary
# Sites:
url_list = ["https://en.wiktionary.org/wiki/Wiktionary:French_frequency_lists/1-2000",
"https://en.wiktionary.org/wiki/Wiktionary:French_frequency_lists/2001-4000",
"https://en.wiktionary.org/wiki/Wiktionary:French_frequency_lists/4001-6000",
"https://en.wiktionary.org/wiki/Wiktionary:French_frequency_lists/6001-8000",
"https://en.wiktionary.org/wiki/Wiktionary:French_frequency_lists/8001-10000"
]
from urllib.request import urlopen
from lxml import html
import unicodedata
french_words = list()
french_words_set = set()
# +
# Function to strip accents
def strip_accents(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
# Function to process a word
def process_french_word(word):
return strip_accents(word.lower())
# -
for url in url_list:
page_html = urlopen(url).read()
tree = html.fromstring(page_html)
word_list = tree.xpath('.//div/table//tr//li/span/a')
for w in word_list:
word = w.text
proc_word = process_french_word(word)
if proc_word not in french_words_set:
french_words_set.add(proc_word)
french_words.append(proc_word)
# Write words to a text file
f_out = open("french.txt", 'w')
for word in french_words:
f_out.write(word+"\n")
f_out.close()
| word list scraping/Mining French Words.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Shantanu9326/Data-Science-Portfolio/blob/master/Latent_Dirichlet_Allocation_on_Articles.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="I6AJRZnqoi9i" colab_type="text"
# # Latent Dirichlet Allocation
# + [markdown] id="M6FGtZf6oi9i" colab_type="text"
# ## Data
#
# We will be using articles from NPR (National Public Radio), obtained from their website [www.npr.org](http://www.npr.org)
# + id="bCBAW567oi9j" colab_type="code" colab={}
import pandas as pd
# + id="gA3YsT-ao8df" colab_type="code" outputId="6e0b10a6-36aa-45bc-cdaa-18068d0401c6" colab={"base_uri": "https://localhost:8080/", "height": 34}
#Running or Importing .py Files with Google Colab
from google.colab import drive
drive.mount('/content/drive/')
# + id="UQpIxqV6oi9k" colab_type="code" colab={}
npr = pd.read_csv("/content/drive/My Drive/app/npr.csv")
# + id="AgbIMBtcoi9m" colab_type="code" outputId="caed0316-38f0-436f-fc2d-f7df780575a1" colab={"base_uri": "https://localhost:8080/", "height": 204}
npr.head()
# + [markdown] id="MDyoxoZ-oi9p" colab_type="text"
# Notice how we don't have the topic of the articles! Let's use LDA to attempt to figure out clusters of the articles.
# + [markdown] id="2eGnw3G1oi9p" colab_type="text"
# ## Preprocessing
# + id="6skz31_loi9q" colab_type="code" colab={}
from sklearn.feature_extraction.text import CountVectorizer
# + [markdown] id="un0zCGajoi9r" colab_type="text"
# **`max_df`**` : float in range [0.0, 1.0] or int, default=1.0`<br>
# When building the vocabulary ignore terms that have a document frequency strictly higher than the given threshold (corpus-specific stop words). If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None.
#
# **`min_df`**` : float in range [0.0, 1.0] or int, default=1`<br>
# When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None.
# + id="SkhHsnteoi9s" colab_type="code" colab={}
cv = CountVectorizer(max_df=0.95, min_df=2, stop_words='english')
# + id="j5yonZ8roi9t" colab_type="code" colab={}
dtm = cv.fit_transform(npr['Article'])
# + id="Dbn3ejluoi9v" colab_type="code" outputId="986356d4-8b16-47b8-83fa-3bb6bb6feeba" colab={"base_uri": "https://localhost:8080/", "height": 51}
dtm
# + [markdown] id="1luFG-zooi9y" colab_type="text"
# ## LDA
# + id="s5zdudFToi9z" colab_type="code" colab={}
from sklearn.decomposition import LatentDirichletAllocation
# + id="VNX7DDpQoi90" colab_type="code" colab={}
LDA = LatentDirichletAllocation(n_components=7,random_state=42)
# + id="Ygjy_01uoi92" colab_type="code" outputId="95b122ae-c649-48bf-8fe5-aca97da36d5c" colab={"base_uri": "https://localhost:8080/", "height": 136}
# This can take a while, we're dealing with a large amount of documents!
LDA.fit(dtm)
# + [markdown] id="yNf4usl2oi95" colab_type="text"
# ## Showing Stored Words
# + id="qAT7nAfBoi96" colab_type="code" outputId="f5e7099a-76c6-4f41-9357-6fa1c21fd6aa" colab={"base_uri": "https://localhost:8080/", "height": 34}
len(cv.get_feature_names())
# + id="-01QBp41oi97" colab_type="code" colab={}
import random
# + id="ksmN1zJPoi99" colab_type="code" outputId="6a83c287-0076-4341-e9d6-92dfc997bfae" colab={"base_uri": "https://localhost:8080/", "height": 187}
for i in range(10):
random_word_id = random.randint(0,54776)
print(cv.get_feature_names()[random_word_id])
# + id="K7Ywf1Luoi9_" colab_type="code" outputId="6f6a1370-edee-44dc-b862-843901f9338d" colab={"base_uri": "https://localhost:8080/", "height": 187}
for i in range(10):
random_word_id = random.randint(0,54776)
print(cv.get_feature_names()[random_word_id])
# + [markdown] id="B2NZt_KSoi-B" colab_type="text"
# ### Showing Top Words Per Topic
# + id="H6eyvhGCoi-C" colab_type="code" outputId="e7b59918-4054-42b7-cd84-f581d2e57f67" colab={"base_uri": "https://localhost:8080/", "height": 34}
len(LDA.components_)
# + id="zl9ICtaMoi-E" colab_type="code" outputId="36849a8a-173e-434e-f344-a70e3428840e" colab={"base_uri": "https://localhost:8080/", "height": 238}
LDA.components_
# + id="nXDZnGMboi-G" colab_type="code" outputId="930c490c-7c8f-42a9-c51a-6430d8cbfa38" colab={"base_uri": "https://localhost:8080/", "height": 34}
len(LDA.components_[0])
# + id="H1CpNUGWoi-J" colab_type="code" outputId="a2fdd07f-21ae-48d3-d8c1-5d8b56379ee0" colab={"base_uri": "https://localhost:8080/", "height": 51}
single_topic = LDA.components_[0]
single_topic[0:7]
# + id="80VSJiRcoi-L" colab_type="code" outputId="93cc7971-c1f7-4bdc-f576-17b97db701cd" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Returns the indices that would sort this array.
single_topic.argsort()
# + id="Wdft5sEboi-O" colab_type="code" outputId="f4febbf1-560f-4910-cc67-b1c2412f670a" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Word least representative of this topic
single_topic[18302]
# + id="W_bJFb4soi-Q" colab_type="code" outputId="56d609b9-66dc-4891-96e9-823f0e8af12c" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Word most representative of this topic
single_topic[42993]
# + id="cHhR7zGpoi-S" colab_type="code" outputId="b8549707-17ed-472b-cc05-cd8c155eb79d" colab={"base_uri": "https://localhost:8080/", "height": 51}
# Top 10 words for this topic:
single_topic.argsort()[-10:]
# + id="QXHaduMgoi-T" colab_type="code" colab={}
top_word_indices = single_topic.argsort()[-10:]
# + id="7pCdxYMyoi-V" colab_type="code" outputId="9e66d247-87a7-48aa-a09a-aac1bc52c208" colab={"base_uri": "https://localhost:8080/", "height": 187}
for index in top_word_indices:
print(cv.get_feature_names()[index])
# + [markdown] id="D8DT6Zs9oi-Y" colab_type="text"
# These look like business articles perhaps... Let's confirm by using .transform() on our vectorized articles to attach a label number. But first, let's view all the 10 topics found.
# + id="ooQnxjINoi-Y" colab_type="code" outputId="d6f4b031-d4d9-4638-cc4d-d05b6a4972b2" colab={"base_uri": "https://localhost:8080/", "height": 493}
for index,topic in enumerate(LDA.components_):
print(f'THE TOP 15 WORDS FOR TOPIC #{index}')
print([cv.get_feature_names()[i] for i in topic.argsort()[-15:]])
print('\n')
# + [markdown] id="azxes_46oi-b" colab_type="text"
# ### Attaching Discovered Topic Labels to Original Articles
# + id="5Ly4YPAroi-b" colab_type="code" outputId="71d498b6-87c4-4a40-b8c9-d168fd11839d" colab={"base_uri": "https://localhost:8080/", "height": 51}
dtm
# + id="vzfZMMWUoi-d" colab_type="code" outputId="7244816c-30fd-4d85-9160-60d6375e3fe1" colab={"base_uri": "https://localhost:8080/", "height": 34}
dtm.shape
# + id="iCUeS5Bboi-e" colab_type="code" outputId="acf8b909-3a85-4f48-ec22-8fb9e04172e4" colab={"base_uri": "https://localhost:8080/", "height": 34}
len(npr)
# + id="LmFcxGihoi-g" colab_type="code" colab={}
topic_results = LDA.transform(dtm)
# + id="cldzVYGloi-i" colab_type="code" outputId="21ae5a16-5777-4a87-a8c7-ab36a6d99acc" colab={"base_uri": "https://localhost:8080/", "height": 34}
topic_results.shape
# + id="uRzKffZ-oi-k" colab_type="code" outputId="f9af84b8-188b-4c45-a5f2-dcb8c788bc08" colab={"base_uri": "https://localhost:8080/", "height": 51}
topic_results[0]
# + id="WW7cJLJvoi-m" colab_type="code" outputId="88b9f630-8e87-4c36-de19-1d37861e1056" colab={"base_uri": "https://localhost:8080/", "height": 34}
topic_results[0].round(2)
# + id="YKbSfxMCoi-o" colab_type="code" outputId="95cd2182-794b-401d-d591-ea93f4d67073" colab={"base_uri": "https://localhost:8080/", "height": 34}
topic_results[0].argmax()
# + [markdown] id="WdI-CQ3xoi-q" colab_type="text"
# This means that our model thinks that the first article belongs to topic #1.
# + [markdown] id="AyaJESXSoi-q" colab_type="text"
# ### Combining with Original Data
# + id="bGQSKRw2oi-r" colab_type="code" outputId="824a4f76-08ee-4994-bb13-5da034d99dd3" colab={"base_uri": "https://localhost:8080/", "height": 204}
npr.head()
# + id="81shC1cFoi-s" colab_type="code" outputId="73b611ec-b9c8-41f6-ad7e-05828bc80eee" colab={"base_uri": "https://localhost:8080/", "height": 34}
topic_results.argmax(axis=1)
# + id="PB-v1ydeoi-v" colab_type="code" colab={}
npr['Topic'] = topic_results.argmax(axis=1)
# + id="mhTqevA_oi-x" colab_type="code" outputId="55ef7dea-ff53-4f8f-d252-1fda29cf5013" colab={"base_uri": "https://localhost:8080/", "height": 359}
npr.head(10)
# + [markdown] id="VCpb1qzKoi-z" colab_type="text"
# ## Great work!
| Text Analytics/Latent_Dirichlet_Allocation_on_Articles.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Trust Region Policy Optimization (PPO)
# ---
# In this notebook, we train PPO with plain pixel-wise perturbation environment.
#
# ### 1. Import the Necessary Packages
# +
import argparse
from itertools import count
import gym
import scipy.optimize
import torch
from models import *
from replay_memory import Memory
from running_state import ZFilter
from torch.autograd import Variable
from trpo import trpo_step
from utils_trpo import *
import matplotlib.pyplot as plt
# %matplotlib inline
from dynamics import Dynamics
torch.utils.backcompat.broadcast_warning.enabled = True
torch.utils.backcompat.keepdim_warning.enabled = True
torch.set_default_tensor_type('torch.DoubleTensor')
# -
# ### 2. Instantiate the Environment and Agent
# +
env = Dynamics(dataset = 'mnist', vae = 'VAE_mnist', cls = 'CLS_mnist', target = 9)
env.reset()
state_size = env.state_space[0]
action_size = env.action_space[0]
policy_net = Policy(state_size, action_size)
value_net = Value(state_size)
class args:
gamma = 0.995
tau = 0.97
l2_reg = 1e-3
max_kl = 1e-2
damping = 1e-1
seed = 543
batch_size = 150
render = False
log_interval = 100
# +
def select_action(state):
state = torch.from_numpy(state).unsqueeze(0)
action_mean, _, action_std = policy_net(Variable(state))
action = torch.normal(action_mean, action_std)
return action
def update_params(batch):
rewards = torch.Tensor(batch.reward)
masks = torch.Tensor(batch.mask)
actions = torch.Tensor(np.concatenate(batch.action, 0))
states = torch.Tensor(batch.state)
values = value_net(Variable(states))
returns = torch.Tensor(actions.size(0),1)
deltas = torch.Tensor(actions.size(0),1)
advantages = torch.Tensor(actions.size(0),1)
prev_return = 0
prev_value = 0
prev_advantage = 0
for i in reversed(range(rewards.size(0))):
returns[i] = rewards[i] + args.gamma * prev_return * masks[i]
deltas[i] = rewards[i] + args.gamma * prev_value * masks[i] - values.data[i]
advantages[i] = deltas[i] + args.gamma * args.tau * prev_advantage * masks[i]
prev_return = returns[i, 0]
prev_value = values.data[i, 0]
prev_advantage = advantages[i, 0]
targets = Variable(returns)
# Original code uses the same LBFGS to optimize the value loss
def get_value_loss(flat_params):
set_flat_params_to(value_net, torch.Tensor(flat_params))
for param in value_net.parameters():
if param.grad is not None:
param.grad.data.fill_(0)
values_ = value_net(Variable(states))
value_loss = (values_ - targets).pow(2).mean()
# weight decay
for param in value_net.parameters():
value_loss += param.pow(2).sum() * args.l2_reg
value_loss.backward()
return (value_loss.data.double().numpy(), get_flat_grad_from(value_net).data.double().numpy())
flat_params, _, opt_info = scipy.optimize.fmin_l_bfgs_b(get_value_loss, get_flat_params_from(value_net).double().numpy(), maxiter=25)
set_flat_params_to(value_net, torch.Tensor(flat_params))
advantages = (advantages - advantages.mean()) / advantages.std()
action_means, action_log_stds, action_stds = policy_net(Variable(states))
fixed_log_prob = normal_log_density(Variable(actions), action_means, action_log_stds, action_stds).data.clone()
def get_loss(volatile=False):
if volatile:
with torch.no_grad():
action_means, action_log_stds, action_stds = policy_net(Variable(states))
else:
action_means, action_log_stds, action_stds = policy_net(Variable(states))
log_prob = normal_log_density(Variable(actions), action_means, action_log_stds, action_stds)
action_loss = -Variable(advantages) * torch.exp(log_prob - Variable(fixed_log_prob))
return action_loss.mean()
def get_kl():
mean1, log_std1, std1 = policy_net(Variable(states))
mean0 = Variable(mean1.data)
log_std0 = Variable(log_std1.data)
std0 = Variable(std1.data)
kl = log_std1 - log_std0 + (std0.pow(2) + (mean0 - mean1).pow(2)) / (2.0 * std1.pow(2)) - 0.5
return kl.sum(1, keepdim=True)
trpo_step(policy_net, get_loss, get_kl, args.max_kl, args.damping)
running_state = ZFilter((state_size,), clip=5)
running_reward = ZFilter((1,), demean=False, clip=10)
# -
# ### 3. Train the Agent with TRPO
# +
num_epoch = 10000
for i_episode in count(num_epoch):
memory = Memory()
num_steps = 0
reward_batch = 0
num_episodes = 0
while num_steps < args.batch_size:
print("batch %d" % (num_steps))
state = env.reset()
state = running_state(state)
reward_sum = 0
for t in range(1000): # Don't infinite loop while learning
print("step %d" % (t))
action = select_action(state)
action = action.data[0].numpy()
next_state, reward, done, _ = env.step(action)
reward_sum += reward
next_state = running_state(next_state)
mask = 1
if done:
mask = 0
memory.push(state, np.array([action]), mask, next_state, reward)
if args.render:
env.render()
if done:
break
state = next_state
num_steps += (t-1)
num_episodes += 1
reward_batch += reward_sum
reward_batch /= num_episodes
batch = memory.sample()
update_params(batch)
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(reward_sum)), end="")
if i_episode % args.log_interval == 0:
print('Episode {}\tLast reward: {}\tAverage reward {:.2f}'.format(
i_episode, reward_sum, reward_batch))
torch.save(policy_net.state_dict(), 'policy_checkpoint.pth')
torch.save(value_net.state_dict(), 'value_checkpoint.pth')
# -
# ### 4. Watch a Smart Agent!
# +
agent.actor_local.load_state_dict(torch.load('checkpoint_actor.pth'))
agent.critic_local.load_state_dict(torch.load('checkpoint_critic.pth'))
state = env.reset()
for t in range(200):
action = agent.act(state, add_noise=False)
env.render()
state, reward, done, _ = env.step(action)
print(reward, done)
if done:
break
img = env.render()
img.show()
#env.close()
# -
# ### 6. Explore
#
# In this exercise, we have provided a sample DDPG agent and demonstrated how to use it to solve an OpenAI Gym environment. To continue your learning, you are encouraged to complete any (or all!) of the following tasks:
# - Amend the various hyperparameters and network architecture to see if you can get your agent to solve the environment faster than this benchmark implementation. Once you build intuition for the hyperparameters that work well with this environment, try solving a different OpenAI Gym task!
# - Write your own DDPG implementation. Use this code as reference only when needed -- try as much as you can to write your own algorithm from scratch.
# - You may also like to implement prioritized experience replay, to see if it speeds learning.
# - The current implementation adds Ornsetein-Uhlenbeck noise to the action space. However, it has [been shown](https://blog.openai.com/better-exploration-with-parameter-noise/) that adding noise to the parameters of the neural network policy can improve performance. Make this change to the code, to verify it for yourself!
# - Write a blog post explaining the intuition behind the DDPG algorithm and demonstrating how to use it to solve an RL environment of your choosing.
| exp_adv/TRPO/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # Aggergate osu top 10k dump into MongoDB
# **Contributors:** <NAME>
#
# **Achievement:** Migrated osu_user_stats, osu_scores_high into separate db osu_top_db. Migrated osu_beatmaps & osu_beatmap_attribs into existing osu_random_db collections to reduce redundancy.
#
# **Requirements:**
#
# - External
# - MySQL DB has imported osu top 10k dump from https://data.ppy.sh/
# - Notebooks
# - *exploration/sql_migration/random_dump_migration.ipynb*
import sys
sys.path.append('../..')
from exploration.config import sql_inst, mongo_inst
from mlpp.data_collection.sql_migration import SqlDumpMigrator
# +
# Order of insertion matters
SQL_DUMPS = [
"osu_top_2021_01",
]
migrator = SqlDumpMigrator(sql_inst)
osu_random_db, osu_top_db = mongo_inst["osu_random_db"], mongo_inst["osu_top_db"]
# +
osu_top_db['osu_scores_high'].create_index('user_id')
osu_top_db['osu_scores_high'].create_index('beatmap_id')
osu_top_db['osu_scores_high'].create_index('date')
migrator.migrate_users_and_scores(SQL_DUMPS, osu_top_db)
# -
migrator.migrate_beatmaps(SQL_DUMPS, osu_random_db)
migrator.migrate_beatmap_attribs(SQL_DUMPS, osu_random_db)
| winter21/1_sql_migration/top_dump_migration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn import metrics
from matplotlib import pyplot as plt
import numpy as np
import itertools
from sklearn.feature_extraction.text import TfidfVectorizer
import pickle
data = pd.read_csv("../Data/train_data.csv")
data.columns
data.dropna(axis=0, inplace=True)
target = data['target']
data = data.drop(columns=["target", 'Unnamed: 0'], axis=1)
print("{}\n{}".format(data.shape, target.shape))
vectorizer = TfidfVectorizer()
tweets = vectorizer.fit_transform(data["tweets"])
#print(vectorizer.get_feature_names())
print(tweets.shape)
vect = open("../Pickle/Vectorizer.pickle", "wb")
pickle.dump(vectorizer, vect)
vect.close()
x_train, x_test, y_train, y_test = train_test_split(tweets, target, test_size=0.333, random_state=42)
LR = LogisticRegression()
LR.fit(x_train,y_train)
yhat = LR.predict(x_test)
print("LogisticRegression")
print("Train set Accuracy: ", metrics.accuracy_score(y_train, LR.predict(x_train)))
print("Test set Accuracy: ", metrics.accuracy_score(y_test, yhat))
classifier_f = open("../Pickle/LogisticRegression.pickle", "wb")
pickle.dump(LR, classifier_f)
classifier_f.close()
BNB = BernoulliNB()
BNB.fit(x_train,y_train)
yhat = BNB.predict(x_test)
print("BernoulliNB")
print("Train set Accuracy: ", metrics.accuracy_score(y_train, BNB.predict(x_train)))
print("Test set Accuracy: ", metrics.accuracy_score(y_test, yhat))
classifier_f = open("../Pickle/BernoulliNB.pickle", "wb")
pickle.dump(BNB, classifier_f)
classifier_f.close()
RForest = RandomForestClassifier()
RForest.fit(x_train, y_train)
yhat = RForest.predict(x_test)
print("RandomForestClassifier")
print("Train set Accuracy: ", metrics.accuracy_score(y_train, RForest.predict(x_train)))
print("Test set Accuracy: ", metrics.accuracy_score(y_test, yhat))
classifier_f = open("../Pickle/RandomForestClassifier.pickle", "wb")
pickle.dump(RForest, classifier_f)
classifier_f.close()
| Notebook/Finalmodelling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=["remove_cell"]
# <h1 style="font-size:35px;
# color:black;
# ">Lab 2 Quantum Measurements</h1>
# -
# Prerequisite
# - [Ch.1.4 Single Qubit Gates](https://qiskit.org/textbook/ch-states/single-qubit-gates.html)
# - [Ch.2.2 Multiple Qubits and Entangled States](https://qiskit.org/textbook/ch-gates/multiple-qubits-entangled-states.html)
# - [Mitigating Noise on Real Quantum Computers](https://www.youtube.com/watch?v=yuDxHJOKsVA&list=PLOFEBzvs-Vvp2xg9-POLJhQwtVktlYGbY&index=8)
#
# Other relevant materials
# - [Feynman Lectures Ch. III - 12](https://www.feynmanlectures.caltech.edu/III_12.html)
# - [Quantum Operation](https://qiskit.org/documentation/tutorials/circuits/3_summary_of_quantum_operations.html)
# - [Interactive Bloch Sphere](https://nonhermitian.org/kaleido/stubs/kaleidoscope.interactive.bloch_sphere.html#kaleidoscope.interactive.bloch_sphere)
# - [Ch.5.2 Measurement Error Mitigation](https://qiskit.org/textbook/ch-quantum-hardware/measurement-error-mitigation.html)
from qiskit import *
import numpy as np
from numpy import linalg as la
from qiskit.tools.monitor import job_monitor
import qiskit.tools.jupyter
# <h2 style="font-size:24px;">Part 1: Measuring the state of a qubit</h2>
#
# <br>
# <div style="background: #E8E7EB; border-radius: 5px;
# -moz-border-radius: 5px;">
# <p style="background: #800080;
# border-radius: 5px 5px 0px 0px;
# padding: 10px 0px 10px 10px;
# font-size:18px;
# color:white;
# "><b>Goal</b></p>
# <p style=" padding: 0px 0px 10px 10px;
# font-size:16px;">Determine the Bloch components of a qubit.</p>
# </div>
#
# Fundamental to the operation of a quantum computer is the ability to compute the Bloch components of a qubit or qubits. These components correspond to the expectation values of the Pauli operators $X, Y, Z$, and are important quantities for applications such as quantum chemistry and optimization. Unfortunately, it is impossible to simultaneously compute these values, thus requiring many executions of the same circuit. In addition, measurements are restricted to the computational basis (Z-basis) so that each Pauli needs to be rotated to the standard basis to access the x and y components. Here we verify the methods by considering the case of a random vector on the Bloch sphere.
# <h3 style="font-size: 20px">📓 1. Express the expectation values of the Pauli operators for an arbitrary qubit state $|q\rangle$ in the computational basis. </h3>
#
# The case for the expectation value of Pauli Z gate is given as an example.
# Using the diagonal representation, also known as spectral form or orthonormal decomposition, of Pauli $Z$ gate and the relations among the Pauli gates (see [here](https://qiskit.org/textbook/ch-states/single-qubit-gates.html)), expectation values of $ X, Y, Z $ gates can be written as
#
# $$
# \begin{align}
# \langle Z \rangle &=\langle q | Z | q\rangle =\langle q|0\rangle\langle 0|q\rangle - \langle q|1\rangle\langle 1|q\rangle
# =|\langle 0 |q\rangle|^2 - |\langle 1 | q\rangle|^2\\\\
# \langle X \rangle &= \\\\
# \langle Y \rangle &=
# \end{align}
# \\
# $$
# , respectively.
#
# Therefore, the expectation values of the Paulis for a qubit state $|q\rangle$ can be obtained by making a measurement in the standard basis after rotating the standard basis frame to lie along the corresponding axis. The probabilities of obtaining the two possible outcomes 0 and 1 are used to evaluate the desired expectation value as the above equations show.
# <h3 style="font-size: 20px">2. Measure the Bloch sphere coordinates of a qubit using the qasm simulator and plot the vector on the Bloch sphere.</h3>
# <h4 style="font-size: 17px">📓Step A. Create a qubit state using the circuit method, <code>initialize</code> with two random complex numbers as the parameter.</h4>
#
# To learn how to use the function `initialize`, check [here](https://qiskit.org/documentation/tutorials/circuits/3_summary_of_quantum_operations.html#Arbitrary-initialization). (go to the `arbitrary initialization` section.)
# +
qc = QuantumCircuit(1)
#### your code goes here
# -
# <h4 style="font-size: 17px">📓 Step B. Build the circuits to measure the expectation values of $X, Y, Z$ gate based on your answers to the question 1. Run the cell below to estimate the Bloch sphere coordinates of the qubit from step A using the qasm simulator.</h4>
#
# The circuit for $Z$ gate measurement is given as an example.
# +
# z measurement of qubit 0
measure_z = QuantumCircuit(1,1)
measure_z.measure(0,0)
# x measurement of qubit 0
measure_x = QuantumCircuit(1,1)
# your code goes here
# y measurement of qubit 0
measure_y = QuantumCircuit(1,1)
# your code goes here
shots = 2**14 # number of samples used for statistics
sim = Aer.get_backend('aer_simulator')
bloch_vector_measure = []
for measure_circuit in [measure_x, measure_y, measure_z]:
# run the circuit with the selected measurement and get the number of samples that output each bit value
counts = execute(qc+measure_circuit, sim, shots=shots).result().get_counts()
# calculate the probabilities for each bit value
probs = {}
for output in ['0','1']:
if output in counts:
probs[output] = counts[output]/shots
else:
probs[output] = 0
bloch_vector_measure.append( probs['0'] - probs['1'] )
# normalizing the Bloch sphere vector
bloch_vector = bloch_vector_measure/la.norm(bloch_vector_measure)
print('The Bloch sphere coordinates are [{0:4.3f}, {1:4.3f}, {2:4.3f}]'
.format(*bloch_vector))
# -
# <h4 style="font-size: 17px">Step C. Plot the vector on the Bloch sphere.</h4>
#
# Note that the following cell for the interactive bloch_sphere would not run properly unless you work in [IQX](https://quantum-computing.ibm.com/login). You can either use `plot_bloch_vector` for the non-interactive version or install `kaleidoscope` by running
#
# ```
# pip install kaleidoscope
#
# ```
# in a terminal. You also need to restart your kernel after the installation. To learn more about how to use the interactive Bloch sphere, go [here](https://nonhermitian.org/kaleido/stubs/kaleidoscope.interactive.bloch_sphere.html#kaleidoscope.interactive.bloch_sphere).
# +
from kaleidoscope.interactive import bloch_sphere
bloch_sphere(bloch_vector, vectors_annotation=True)
# +
from qiskit.visualization import plot_bloch_vector
plot_bloch_vector( bloch_vector )
# -
# <h2 style="font-size:24px;">Part 2: Measuring Energy</h2>
#
# <br>
# <div style="background: #E8E7EB; border-radius: 5px;
# -moz-border-radius: 5px;">
# <p style="background: #800080;
# border-radius: 5px 5px 0px 0px;
# padding: 10px 0px 10px 10px;
# font-size:18px;
# color:white;
# "><b>Goal</b></p>
# <p style=" padding: 0px 0px 10px 10px;
# font-size:16px;">Evaluate the energy levels of the hydrogen ground state using qasm simulator.</p>
# </div>
#
#
# The energy of a quantum system can be estimated by measuring the expectation value of its Hamiltonian, which is a Hermitian operator, through the procedure we mastered in part 1.
#
# The ground state of hydrogen is not defined as a single unique state but actually contains four different states due to the spins of the electron and proton. In part 2 of this lab, we evaluate the energy difference among these four states, which is from the `hyperfine splitting`, by computing the energy expectation value for the system of two spins with the Hamiltonian expressed in Pauli operators. For more information about `hyperfine structure`, see [here](https://www.feynmanlectures.caltech.edu/III_12.html)
# Consider the system with two qubit interaction Hamiltonian $H = A(XX+YY+ZZ)$ where $A = 1.47e^{-6} eV$ and $X, Y, Z$ are Pauli gates. Then the energy expectation value of the system can be evaluated by combining the expectation value of each term in the Hamiltonian.
# In this case, $E = \langle H\rangle = A( \langle XX\rangle + \langle YY\rangle + \langle ZZ\rangle )$.
# <h3 style="font-size: 20px">📓 1. Express the expectation value of each term in the Hamiltonian for an arbitrary two qubit state $|\psi \rangle$ in the computational basis.</h3>
#
# The case for the term $\langle ZZ\rangle$ is given as an example.
#
# $$
# \begin{align}
# \langle ZZ\rangle &=\langle \psi | ZZ | \psi\rangle =\langle \psi|(|0\rangle\langle 0| - |1\rangle\langle 1|)\otimes(|0\rangle\langle 0| - |1\rangle\langle 1|) |\psi\rangle
# =|\langle 00|\psi\rangle|^2 - |\langle 01 | \psi\rangle|^2 - |\langle 10 | \psi\rangle|^2 + |\langle 11|\psi\rangle|^2\\\\
# \langle XX\rangle &= \\\\
# \langle YY\rangle &=
# \end{align}
# $$
# <h3 style="font-size: 20px">2. Measure the expected energy of the system using the qasm simulator when two qubits are entangled. Regard the bell basis, four different entangled states.</h3>
# <h4 style="font-size: 17px">📓Step A. Construct the circuits to prepare four different bell states.</h4>
#
# Let's label each bell state as
# $$
# \begin{align}
# Tri1 &= \frac{1}{\sqrt2} (|00\rangle + |11\rangle)\\
# Tri2 &= \frac{1}{\sqrt2} (|00\rangle - |11\rangle)\\
# Tri3 &= \frac{1}{\sqrt2} (|01\rangle + |10\rangle)\\
# Sing &= \frac{1}{\sqrt2} (|10\rangle - |01\rangle)
# \end{align}
# $$
# +
# circuit for the state Tri1
Tri1 = QuantumCircuit(2)
# your code goes here
# circuit for the state Tri2
Tri2 = QuantumCircuit(2)
# your code goes here
# circuit for the state Tri3
Tri3 = QuantumCircuit(2)
# your code goes here
# circuit for the state Sing
Sing = QuantumCircuit(2)
# your code goes here
# -
# <h4 style="font-size: 17px">📓Step B. Create the circuits to measure the expectation value of each term in the Hamiltonian based on your answer to the question 1.</h4>
# +
# <ZZ>
measure_ZZ = QuantumCircuit(2)
measure_ZZ.measure_all()
# <XX>
measure_XX = QuantumCircuit(2)
# your code goes here
# <YY>
measure_YY = QuantumCircuit(2)
# your code goes here
# -
# <h4 style="font-size: 17px">Step C. Execute the circuits on qasm simulator by running the cell below and evaluate the energy expectation value for each state.</h4>
# +
shots = 2**14 # number of samples used for statistics
A = 1.47e-6 #unit of A is eV
E_sim = []
for state_init in [Tri1,Tri2,Tri3,Sing]:
Energy_meas = []
for measure_circuit in [measure_XX, measure_YY, measure_ZZ]:
# run the circuit with the selected measurement and get the number of samples that output each bit value
qc = state_init+measure_circuit
counts = execute(qc, sim, shots=shots).result().get_counts()
# calculate the probabilities for each computational basis
probs = {}
for output in ['00','01', '10', '11']:
if output in counts:
probs[output] = counts[output]/shots
else:
probs[output] = 0
Energy_meas.append( probs['00'] - probs['01'] - probs['10'] + probs['11'] )
E_sim.append(A * np.sum(np.array(Energy_meas)))
# +
# Run this cell to print out your results
print('Energy expectation value of the state Tri1 : {:.3e} eV'.format(E_sim[0]))
print('Energy expectation value of the state Tri2 : {:.3e} eV'.format(E_sim[1]))
print('Energy expectation value of the state Tri3 : {:.3e} eV'.format(E_sim[2]))
print('Energy expectation value of the state Sing : {:.3e} eV'.format(E_sim[3]))
# -
# <h4 style="font-size: 17px">Step D. Understanding the result. </h4>
#
# If you found the energy expectation values successfully, you would have obtained exactly the same value, $A (= 1.47e^{-6} eV)$, for the triplet states, $|Tri1\rangle, |Tri2\rangle, |Tri3\rangle$ and one lower energy level, $-3A (= -4.41e^{-6} eV)$ for the singlet state $|Sing\rangle$.
#
# What we have done here is measuring the energies of the four different spin states corresponding to the ground state of hydrogen and observed `hyperfine structure` in the energy levels caused by spin-spin coupling. This tiny energy difference between the singlet and triplet states is the reason for the famous 21-cm wavelength radiation used to map the structure of the galaxy.
# In the cell below, we verify the wavelength of the emission from the transition between the triplet states and singlet state.
# +
# reduced plank constant in (eV) and the speed of light(cgs units)
hbar, c = 4.1357e-15, 3e10
# energy difference between the triplets and singlet
E_del = abs(E_sim[0] - E_sim[3])
# frequency associated with the energy difference
f = E_del/hbar
# convert frequency to wavelength in (cm)
wavelength = c/f
print('The wavelength of the radiation from the transition\
in the hyperfine structure is : {:.1f} cm'.format(wavelength))
# -
# <h2 style="font-size:24px;">Part 3: Execute the circuits on Quantum Computer</h2>
#
# <br>
# <div style="background: #E8E7EB; border-radius: 5px;
# -moz-border-radius: 5px;">
# <p style="background: #800080;
# border-radius: 5px 5px 0px 0px;
# padding: 10px 0px 10px 10px;
# font-size:18px;
# color:white;
# "><b>Goal</b></p>
# <p style=" padding: 0px 0px 10px 10px;
# font-size:16px;"> Re-run the circuits on a IBM quantum system. Perform measurement error mitigations on the result to improve the accuracy in the energy estimation.</p>
# </div>
# <h4 style="font-size: 17px">Step A. Run the following cells to load your account and select the backend </h4>
# + tags=["uses-hardware"]
provider = IBMQ.load_account()
# + tags=["uses-hardware"]
backend = provider.get_backend('ibmq_athens')
# -
# <h4 style="font-size: 17px">Step B. Execute the circuits on the quantum system. </h4>
#
#
# In Lab1 when we executed multiple circuits on a real quantum system, we submitted each circuit as a separate job which produces the multiple job ids. This time, we put all the circuits in a list and execute the list of the circuits as one job. In this way, all the circuit executions can happen at once, which would possibly decrease your wait time in the queue.
#
# In addition, `transpile` is not used here as all the circuits that we run consist of one or two qubit gates. We can still specify the initial_layout and optimization_level through `execute` function. Without using `transpile`, the transpiled circuits are not accessible which is not a concern for this case.
# <p>📓 Check the backend configuration information and error map through the widget to determine your <code>initial_layout</code>.
# + tags=["uses-hardware"]
# run this cell to get the backend information through the widget
backend
# + tags=["uses-hardware"]
# assign your choice for the initial layout to the list variable `initial_layout`.
initial_layout =
# -
# Run the following cell to execute the circuits with the initial_layout on the backend.
# + tags=["uses-hardware"]
qc_all = [state_init+measure_circuit for state_init in [Tri1,Tri2,Tri3,Sing]
for measure_circuit in [measure_XX, measure_YY, measure_ZZ] ]
shots = 8192
job = execute(qc_all, backend, initial_layout=initial_layout, optimization_level=3, shots=shots)
print(job.job_id())
job_monitor(job)
# + tags=["uses-hardware"]
# getting the results of your job
results = job.result()
# + tags=["uses-hardware"]
## To access the results of the completed job
#results = backend.retrieve_job('job_id').result()
# -
# <h4 style="font-size: 17px">Step C. Estimate the ground state energy levels from the results of the previous step by executing the cells below. </h4>
# + tags=["uses-hardware"]
def Energy(results, shots):
"""Compute the energy levels of the hydrogen ground state.
Parameters:
results (obj): results, results from executing the circuits for measuring a Hamiltonian.
shots (int): shots, number of shots used for the circuit execution.
Returns:
Energy (list): energy values of the four different hydrogen ground states
"""
E = []
A = 1.47e-6
for ind_state in range(4):
Energy_meas = []
for ind_comp in range(3):
counts = results.get_counts(ind_state*3+ind_comp)
# calculate the probabilities for each computational basis
probs = {}
for output in ['00','01', '10', '11']:
if output in counts:
probs[output] = counts[output]/shots
else:
probs[output] = 0
Energy_meas.append( probs['00'] - probs['01'] - probs['10'] + probs['11'] )
E.append(A * np.sum(np.array(Energy_meas)))
return E
# + tags=["uses-hardware"]
E = Energy(results, shots)
print('Energy expectation value of the state Tri1 : {:.3e} eV'.format(E[0]))
print('Energy expectation value of the state Tri2 : {:.3e} eV'.format(E[1]))
print('Energy expectation value of the state Tri3 : {:.3e} eV'.format(E[2]))
print('Energy expectation value of the state Sing : {:.3e} eV'.format(E[3]))
# -
# <h4 style="font-size: 17px">Step D. Measurement error mitigation. </h4>
#
# The results you obtained from running the circuits on the quantum system are not exact due to the noise from the various sources such as energy relaxation, dephasing, crosstalk between qubits, etc. In this step, we will alleviate the effects of the noise through the measurement error mitigation. Before we start, watch this [video](https://www.youtube.com/watch?v=yuDxHJOKsVA&list=PLOFEBzvs-Vvp2xg9-POLJhQwtVktlYGbY&index=8).
# + tags=["uses-hardware"]
from qiskit.ignis.mitigation.measurement import *
# -
# <p>📓Construct the circuits to profile the measurement errors of all basis states using the function 'complete_meas_cal'. Obtain the measurement filter object, 'meas_filter', which will be applied to the noisy results to mitigate readout (measurement) error.
# For further helpful information to complete this task, check [here](https://qiskit.org/textbook/ch-quantum-hardware/measurement-error-mitigation.html).
# + tags=["uses-hardware"]
# your code to create the circuits, meas_calibs, goes here
meas_calibs, state_labels =
# execute meas_calibs on your choice of the backend
job = execute(meas_calibs, backend, shots = shots)
print(job.job_id())
job_monitor(job)
cal_results = job.result()
## To access the results of the completed job
#cal_results = backend.retrieve_job('job_id').result()
# your code to obtain the measurement filter object, 'meas_filter', goes here
# + tags=["uses-hardware"]
results_new = meas_filter.apply(results)
# + tags=["uses-hardware"]
E_new = Energy(results_new, shots)
print('Energy expectation value of the state Tri1 : {:.3e} eV'.format(E_new[0]))
print('Energy expectation value of the state Tri2 : {:.3e} eV'.format(E_new[1]))
print('Energy expectation value of the state Tri3 : {:.3e} eV'.format(E_new[2]))
print('Energy expectation value of the state Sing : {:.3e} eV'.format(E_new[3]))
# -
# <h4 style="font-size: 17px">Step E. Interpret the result. </h4>
# <p>📓 Compute the relative errors (or the fractional error) of the energy values for all four states with and without measurement error mitigation.
# + tags=["uses-hardware"]
# results for the energy estimation from the simulation,
# execution on a quantum system without error mitigation and
# with error mitigation in numpy array format
Energy_exact, Energy_exp_orig, Energy_exp_new = np.array(E_sim), np.array(E), np.array(E_new)
# + tags=["uses-hardware"]
# Calculate the relative errors of the energy values without error mitigation
# and assign to the numpy array variable `Err_rel_orig` of size 4
Err_rel_orig =
# + tags=["uses-hardware"]
# Calculate the relative errors of the energy values with error mitigation
# and assign to the numpy array variable `Err_rel_new` of size 4
Err_rel_new =
# + tags=["uses-hardware"]
np.set_printoptions(precision=3)
print('The relative errors of the energy values for four bell basis\
without measurement error mitigation : {}'.format(Err_rel_orig))
# + tags=["uses-hardware"]
np.set_printoptions(precision=3)
print('The relative errors of the energy values for four bell basis\
with measurement error mitigation : {}'.format(Err_rel_new))
# -
# <p>📓 Compare the size of the errors before and after the measurement error mitigation and discuss about the effect of the readout error regarding the error map information of the backend that you selected.
# **Your answer:**
| content/ch-labs/Lab02_QuantumMeasurement.ipynb |