code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import tempfile
import json
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
import warnings
warnings.simplefilter('ignore')
import logging
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
from banditpylib import trials_to_dataframe
from banditpylib.arms import GaussianArm
from banditpylib.bandits import MultiArmedBandit
from banditpylib.protocols import SinglePlayerProtocol
from banditpylib.learners.mab_fcbai_learner import ExpGap, LilUCBHeuristic
# -
confidence = 0.95
means = [0.3, 0.5, 0.7]
arms = [GaussianArm(mu=mean, std=1) for mean in means]
bandit = MultiArmedBandit(arms=arms)
learners = [ExpGap(arm_num=len(arms), confidence=confidence, threshold=3, name='Exponential-Gap Elimination'),
LilUCBHeuristic(arm_num=len(arms), confidence=confidence, name='Heuristic lilUCB')]
# for each setup we run 20 trials
trials = 20
temp_file = tempfile.NamedTemporaryFile()
# simulator
game = SinglePlayerProtocol(bandit=bandit, learners=learners)
# start playing the game
# add `debug=True` for debugging purpose
game.play(trials=trials, output_filename=temp_file.name)
trials_df = trials_to_dataframe(temp_file.name)
del trials_df['other']
trials_df.head()
trials_df['confidence'] = confidence
fig = plt.figure()
ax = plt.subplot(111)
sns.barplot(x='confidence', y='total_actions', hue='learner', data=trials_df)
plt.ylabel('pulls')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
| examples/fix_confidence_bai_multi_armed_bandit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# <center>
# <font size="+3">BobbleBot Controller Analysis Report<br><br></font>
# <font size="+2">Sensitivity to Mass Properties<br><br></font>
# <i><NAME><br>
# <i>10/26/18<br>
# <img src="imgs/BobbleCAD.png" alt="BobbleBot CAD" style="height: 350px; width: 250px;"/>
# </center>
# ## Introduction
# This document outlines an analysis of BobbleBot controller performance under varying mass properties. The BobbleBot simulator was used to collect the data. The simulated BobbleBot was subjected to impulse forces applied at a fixed location and in the +/- X direction. The controller response is analyzed as the BobbleBot CG is varied along the x-direction. The result is a set of data that captures the controller's ability to maintain adequate balance control within a bounding volume of CG locations.
# <center>
# <br>
# <img src="imgs/BobbleBotSimCg.png" alt="BobbleBot CG" style="height: 350px; width: 250px;"/>
# </center>
# ## Loading BobbleBot Simulation Data
#
# The BobbleBot simulator runs in Gazebo. Using the gazebo-ros packages, one can log data as the simulator runs and store it in a ros bag format. The simulation data can then be analyzed with Python using [Pandas](https://pandas.pydata.org/). This article discusses how to [load ROS bag files into Pandas](https://nimbus.unl.edu/2014/11/using-rosbag_pandas-to-analyze-rosbag-files/).
# Load anaylsis environment file. This file defines data directories
# and imports all needed Python packages for this notebook.
exec(open("env.py").read())
# ### Print sim data in tabular form
# All the sim data was loaded when the analysis env file was sourced. We can get the data for a run in tabular form like so.
n_rows = 10
df_x['cgx_0.0'].head(n_rows)
# ### Search for a column
# Here's how to search for a column(s) in a data frame.
#
search_string = 'Velocity'
found_data = df_x['cgx_0.0'].filter(regex=search_string)
found_data.head()
# ## CG Shift X
#
# ### Tilt Plot
# %matplotlib inline
import matplotlib.pyplot as plt
# Make the plot
fig = plt.figure(figsize=(20, 10), dpi=40)
ax1 = fig.add_subplot(111)
coplot_var_for_runs(ax1, df_x, pc_x['measured_tilt'])
fig.tight_layout()
sns.despine()
plt.savefig('TiltsX.png', bbox_inches='tight')
# ### Velocity Plot
# %matplotlib inline
import matplotlib.pyplot as plt
# Make the plot
fig = plt.figure(figsize=(20, 10), dpi=40)
ax1 = fig.add_subplot(111)
coplot_var_for_runs(ax1, df_x, pc_x['velocity'])
fig.tight_layout()
sns.despine()
plt.savefig('VelocitiesX.png', bbox_inches='tight')
# ## CG Shift Z
#
# ### Desired Tilt vs Actual (nominal z cg 0.180 m)
# %matplotlib inline
import matplotlib.pyplot as plt
# Make the plot
fig = plt.figure(figsize=(20, 10), dpi=40)
ax1 = fig.add_subplot(111)
desired_vs_actual_for_runs(ax1, df_z, pc_z['desired_tilt_vs_actual'])
fig.tight_layout()
sns.despine()
plt.savefig('DesiredTiltVsActualZ.png', bbox_inches='tight')
# ### Desired Velocity vs Actual (nominal z cg 0.180 m)
# %matplotlib inline
import matplotlib.pyplot as plt
# Make the plot
fig = plt.figure(figsize=(20, 10), dpi=40)
ax1 = fig.add_subplot(111)
desired_vs_actual_for_runs(ax1, df_z, pc_z['desired_velocity_vs_actual'])
fig.tight_layout()
sns.despine()
plt.savefig('DesiredVelocityVsActualZ.png', bbox_inches='tight')
# ### Tilt Plot
# %matplotlib inline
import matplotlib.pyplot as plt
# Make the plot
fig = plt.figure(figsize=(20, 10), dpi=40)
ax1 = fig.add_subplot(111)
coplot_var_for_runs(ax1, df_z, pc_z['measured_tilt'])
fig.tight_layout()
sns.despine()
plt.savefig('TiltsZ.png', bbox_inches='tight')
# ### Velocity Plot
# %matplotlib inline
import matplotlib.pyplot as plt
# Make the plot
fig = plt.figure(figsize=(20, 10), dpi=40)
ax1 = fig.add_subplot(111)
coplot_var_for_runs(ax1, df_z, pc_z['velocity'])
fig.tight_layout()
sns.despine()
plt.savefig('VelocitiesZ.png', bbox_inches='tight')
| analysis/notebooks/CgPlacement/CgAnalysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
import xarray as xr
import matplotlib.pyplot as plt
from src.data_generator import *
from src.train import *
from src.utils import *
from src.networks import *
os.environ["CUDA_VISIBLE_DEVICES"]=str(7)
limit_mem()
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_policy(policy)
args = load_args('../nn_configs/B/81-resnet_d3_dr_0.1.yml')
args['exp_id'] = '81.1-resnet_d3_dr_0.1'
args['train_years'] = ['2015', '2015']
dg_train, dg_valid, dg_test = load_data(**args)
args['filters'] = [128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 4]
model = build_resnet(
**args, input_shape=dg_train.shape,
)
# + jupyter={"outputs_hidden": true}
model.summary()
# -
X, y = dg_train[0]
model.output
y.shape
# ## Combined CRPS MAE
crps_mae = create_lat_crps_mae(dg_train.data.lat, 2)
preds = model(X)
crps_mae(y, preds)
# ## Log
def pdf(y, mu, sigma):
eps = 1e-7
sigma = np.maximum(eps, sigma)
p = 1 / sigma / np.sqrt(2*np.pi) * np.exp(
-0.5 * ((y - mu) / sigma)**2
)
return p
from scipy.stats import norm
pdf(5, 0.1, 3), norm.pdf(3, loc=5, scale=0.1)
def log_loss(y_true, mu, sigma):
prob = pdf(y_true, mu, sigma)
ll = - np.log(prob)
return ll
mu = 3
sigma = 5
y = 3
log_loss(y, mu, sigma)
def create_lat_log_loss(lat, n_vars):
weights_lat = np.cos(np.deg2rad(lat)).values
weights_lat /= weights_lat.mean()
def log_loss(y_true, y_pred):
# Split input
mu = y_pred[:, :, :, :n_vars]
sigma = y_pred[:, :, :, n_vars:]
sigma = tf.nn.relu(sigma)
# Compute PDF
eps = 1e-7
sigma = tf.maximum(eps, sigma)
prob = 1 / sigma / np.sqrt(2*np.pi) * tf.math.exp(
-0.5 * ((y_true - mu) / sigma)**2
)
# Compute log loss
ll = - tf.math.log(tf.maximum(prob, eps))
ll = ll * weights_lat[None, : , None, None]
return tf.reduce_mean(ll)
return log_loss
ll = create_lat_log_loss(dg_train.data.lat, 2)
ll(y, preds)
# ## CRPS
type(y)
pred = model(X)
type(pred)
type(tf.math.sqrt(pred))
y.shape[-1]
y_true, y_pred = y, pred
n_vars = y_true.shape[-1]
mu = y_pred[:, :, :, :n_vars]
sigma = y_pred[:, :, :, n_vars:]
mu.shape, sigma.shape
np.min(sigma)
sigma = tf.math.sqrt(tf.math.square(sigma))
np.min(sigma)
loc = (y_true - mu) / sigma
loc.shape
phi = 1.0 / np.sqrt(2.0 * np.pi) * tf.math.exp(-tf.math.square(loc) / 2.0)
phi.shape
Phi = 0.5 * (1.0 + tf.math.erf(loc / np.sqrt(2.0)))
crps = sigma * (loc * (2. * Phi - 1.) + 2 * phi - 1. / np.sqrt(np.pi))
crps.shape
tf.reduce_mean(crps)
def crps_cost_function(y_true, y_pred):
n_vars = y_true.shape[-1]
# Split input
mu = y_pred[:, :, :, :n_vars]
sigma = y_pred[:, :, :, n_vars:]
# To stop sigma from becoming negative we first have to
# convert it the the variance and then take the square
# root again.
sigma = tf.math.sqrt(tf.math.square(sigma))
# The following three variables are just for convenience
loc = (y_true - mu) / sigma
phi = 1.0 / np.sqrt(2.0 * np.pi) * tf.math.exp(-tf.math.square(loc) / 2.0)
Phi = 0.5 * (1.0 + tf.math.erf(loc / np.sqrt(2.0)))
# First we will compute the crps for each input/target pair
crps = sigma * (loc * (2. * Phi - 1.) + 2 * phi - 1. / np.sqrt(np.pi))
# Then we take the mean. The cost is now a scalar
return tf.reduce_mean(crps)
y_test = np.zeros((32, 32, 64, 2))
pred_test = np.concatenate([-np.ones_like(y_test), np.zeros_like(y_test)], axis=-1)
pred_test = tf.Variable(pred_test)
y_test.shape, pred_test.shape
crps_cost_function(y_test, pred_test)
dg_train.data.lat
import pdb
def create_lat_crps(lat, n_vars):
weights_lat = np.cos(np.deg2rad(lat)).values
weights_lat /= weights_lat.mean()
def crps_loss(y_true, y_pred):
# pdb.set_trace()
# Split input
mu = y_pred[:, :, :, :n_vars]
sigma = y_pred[:, :, :, n_vars:]
# To stop sigma from becoming negative we first have to
# convert it the the variance and then take the square
# root again.
sigma = tf.math.sqrt(tf.math.square(sigma))
# The following three variables are just for convenience
loc = (y_true - mu) / tf.maximum(1e-7, sigma)
phi = 1.0 / np.sqrt(2.0 * np.pi) * tf.math.exp(-tf.math.square(loc) / 2.0)
Phi = 0.5 * (1.0 + tf.math.erf(loc / np.sqrt(2.0)))
# First we will compute the crps for each input/target pair
crps = sigma * (loc * (2. * Phi - 1.) + 2 * phi - 1. / np.sqrt(np.pi))
crps = crps * weights_lat[None, : , None, None]
# Then we take the mean. The cost is now a scalar
return tf.reduce_mean(crps)
return crps_loss
dg_train.output_idxs
crps_test = create_lat_crps(dg_train.data.lat, 2)
crps_test(y_test, pred_test)
model = build_resnet(
**args, input_shape=dg_train.shape,
)
model.compile(keras.optimizers.Adam(1e-3), crps_test)
from src.clr import LRFinder
lrf = LRFinder(
dg_train.n_samples, args['batch_size'],
minimum_lr=1e-5, maximum_lr=10,
lr_scale='exp', save_dir='./', verbose=0)
model.fit(dg_train, epochs=1,
callbacks=[lrf], shuffle=False)
plot_lr_find(lrf, log=True)
plt.axvline(2.5e-4)
# + jupyter={"outputs_hidden": true}
X, y = dg_train[31]
crps_test(y, model(X))
# + jupyter={"outputs_hidden": true}
for i, (X, y) in enumerate(dg_train):
loss = crps_test(y, model(X))
print(loss)
# -
dg_valid.data
np.concatenate([dg.std.isel(level=dg.output_idxs).values]*2)
preds = create_predictions(model, dg_valid, parametric=True)
preds
preds = model.predict(dg_test)
preds.shape
dg = dg_train
level = dg.data.isel(level=dg.output_idxs).level
level
xr.concat([level]*2, dim='level')
level_names = dg.data.isel(level=dg.output_idxs).level_names
level_names
list(level_names.level.values) * 2
xr.DataArray(['a', 'b', 'c', 'd'], dims=['level'],
coords={'level': list(level_names.level.values) *2})
level_names[:] = ['a', 'b']
l = level_names[0]
l = l.split('_')
l[0] += '-mean'
'_'.join(l)
level_names = level_names + [l.]
level_names
| nbs_probabilistic/04-implement-parametric.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# +
import numpy as np
from sklearn.datasets import load_iris
from sklearn import tree
iris = load_iris()
print iris.feature_names
# -
print iris.target_names
print iris.data[0:5]
print iris.target[0:5]
# +
#for i in range(len(iris.target)):
# print "Example %d: label %s, features %s" % (i, iris.target[i], iris.data[i])
# +
test_idx = [0, 50, 100]
# training data
train_target = np.delete(iris.target, test_idx)
train_data = np.delete(iris.data, test_idx, axis=0)
# testing data
test_target = iris.target[test_idx]
test_data = iris.data[test_idx]
print test_target
# -
clf = tree.DecisionTreeClassifier()
clf.fit(train_data, train_target)
print clf.predict(test_data)
# viz code
from sklearn.externals.six import StringIO
import pydot
dot_data = StringIO()
tree.export_graphviz(clf,
out_file=dot_data,
feature_names=iris.feature_names,
class_names=iris.target_names,
filled=True, rounded=True,
impurity=False)
# +
import os
os.environ["PATH"] += os.pathsep + r'C:\ProgramData\Anaconda2\pkgs\graphviz-2.38-h308b129_2\Library\bin\graphviz'
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph[0].write_pdf("iris.pdf")
# -
| src/2. Visualizing a decision tree.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:PROJ_irox_oer] *
# language: python
# name: conda-env-PROJ_irox_oer-py
# ---
# # Import Modules
# +
import os
print(os.getcwd())
import sys
import pandas as pd
from pymatgen.io.ase import AseAtomsAdaptor
# #########################################################
from methods import get_df_dft
# #########################################################
sys.path.insert(0, "..")
from local_methods import XRDCalculator
from local_methods import get_top_xrd_facets
# -
# # Script Inputs
# +
verbose = True
# verbose = False
# bulk_id_i = "8ymh8qnl6o"
# bulk_id_i = "8p8evt9pcg"
# bulk_id_i = "8l919k6s7p"
bulk_id_i = "64cg6j9any"
# -
# # Read Data
# +
df_dft = get_df_dft()
print("df_dft.shape:", df_dft.shape[0])
from methods import get_df_xrd
df_xrd = get_df_xrd()
df_xrd = df_xrd.set_index("id_unique", drop=False)
# + active=""
#
#
#
# +
# #########################################################
row_i = df_dft.loc[bulk_id_i]
# #########################################################
atoms_i = row_i.atoms
atoms_stan_prim_i = row_i.atoms_stan_prim
# #########################################################
# Writing bulk facets
atoms_i.write("out_data/bulk.traj")
atoms_i.write("out_data/bulk.cif")
# #########################################################
row_xrd_i = df_xrd.loc[bulk_id_i]
# #########################################################
top_facets_i = row_xrd_i.top_facets
# #########################################################
print(
"top_facets:",
top_facets_i
)
# +
# assert False
# +
# atoms = atoms_i
atoms = atoms_stan_prim_i
AAA = AseAtomsAdaptor()
struct_i = AAA.get_structure(atoms)
XRDCalc = XRDCalculator(
wavelength='CuKa',
symprec=0,
debye_waller_factors=None,
)
# XRDCalc.get_plot(structure=struct_i)
# # XRDCalc.get_plot?
plt = XRDCalc.plot_structures([struct_i])
# -
# # Saving plot to file
file_name_i = os.path.join(
"out_plot",
bulk_id_i + ".png",
)
plt.savefig(
file_name_i,
dpi=1600,
)
| workflow/xrd_bulks/plot_xrd_patterns/plot_xrd_patterns.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook was prepared by [<NAME>](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# # Solution Notebook
# ## Problem: Format license keys.
#
# See the [LeetCode](https://leetcode.com/problems/license-key-formatting/) problem page.
#
# <pre>
# Now you are given a string S, which represents a software license key which we would like to format. The string S is composed of alphanumerical characters and dashes. The dashes split the alphanumerical characters within the string into groups. (i.e. if there are M dashes, the string is split into M+1 groups). The dashes in the given string are possibly misplaced.
#
# We want each group of characters to be of length K (except for possibly the first group, which could be shorter, but still must contain at least one character). To satisfy this requirement, we will reinsert dashes. Additionally, all the lower case letters in the string must be converted to upper case.
#
# So, you are given a non-empty string S, representing a license key to format, and an integer K. And you need to return the license key formatted according to the description above.
#
# Example 1:
# Input: S = "2-4A0r7-4k", K = 4
#
# Output: "24A0-R74K"
#
# Explanation: The string S has been split into two parts, each part has 4 characters.
# Example 2:
# Input: S = "2-4A0r7-4k", K = 3
#
# Output: "24-A0R-74K"
#
# Explanation: The string S has been split into three parts, each part has 3 characters except the first part as it could be shorter as said above.
#
# Note:
# The length of string S will not exceed 12,000, and K is a positive integer.
# String S consists only of alphanumerical characters (a-z and/or A-Z and/or 0-9) and dashes(-).
# String S is non-empty.
# </pre>
#
# * [Constraints](#Constraints)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Unit Test](#Unit-Test)
# ## Constraints
#
# * Is the output a string?
# * Yes
# * Can we change the input string?
# * No, you can't modify the input string
# * Can we assume the inputs are valid?
# * No
# * Can we assume this fits memory?
# * Yes
# ## Test Cases
#
# * None -> TypeError
# * '---', k=3 -> ''
# * '2-4A0r7-4k', k=3 -> '24-A0R-74K'
# * '2-4A0r7-4k', k=4 -> '24A0-R74K'
# ## Algorithm
#
# * Loop through each character in the license key backwards, keeping a count of the number of chars we've reached so far, while inserting each character into a result list (convert to upper case)
# * If we reach a '-', skip it
# * Whenever we reach a char count of k, append a '-' character to the result list, reset the char count
# * Careful that we don't have a leading '-', which we might hit with test case: '2-4A0r7-4k', k=4 -> '24A0-R74K'
# * Reverse the result list and return it
#
# Complexity:
# * Time: O(n)
# * Space: O(n)
# ## Code
class Solution(object):
def format_license_key(self, license_key, k):
if license_key is None:
raise TypeError('license_key must be a str')
if not license_key:
raise ValueError('license_key must not be empty')
formatted_license_key = []
num_chars = 0
for char in license_key[::-1]:
if char == '-':
continue
num_chars += 1
formatted_license_key.append(char.upper())
if num_chars >= k:
formatted_license_key.append('-')
num_chars = 0
if formatted_license_key and formatted_license_key[-1] == '-':
formatted_license_key.pop(-1)
return ''.join(formatted_license_key[::-1])
# ## Unit Test
# +
# %%writefile test_format_license_key.py
import unittest
class TestSolution(unittest.TestCase):
def test_format_license_key(self):
solution = Solution()
self.assertRaises(TypeError, solution.format_license_key, None, None)
license_key = '---'
k = 3
expected = ''
self.assertEqual(solution.format_license_key(license_key, k), expected)
license_key = '2-4A0r7-4k'
k = 3
expected = '24-A0R-74K'
self.assertEqual(solution.format_license_key(license_key, k), expected)
license_key = '2-4A0r7-4k'
k = 4
expected = '24A0-R74K'
self.assertEqual(solution.format_license_key(license_key, k), expected)
print('Success: test_format_license_key')
def main():
test = TestSolution()
test.test_format_license_key()
if __name__ == '__main__':
main()
# -
# %run -i test_format_license_key.py
| online_judges/license_key/format_license_key_solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Final project: StackOverflow assistant bot
#
# Congratulations on coming this far and solving the programming assignments! In this final project, we will combine everything we have learned about Natural Language Processing to construct a *dialogue chat bot*, which will be able to:
# * answer programming-related questions (using StackOverflow dataset);
# * chit-chat and simulate dialogue on all non programming-related questions.
#
# For a chit-chat mode we will use a pre-trained neural network engine available from [ChatterBot](https://github.com/gunthercox/ChatterBot).
# Those who aim at honor certificates for our course or are just curious, will train their own models for chit-chat.
# 
# ©[xkcd](https://xkcd.com)
# ### Data description
#
# To detect *intent* of users questions we will need two text collections:
# - `tagged_posts.tsv` — StackOverflow posts, tagged with one programming language (*positive samples*).
# - `dialogues.tsv` — dialogue phrases from movie subtitles (*negative samples*).
#
# +
try:
import google.colab
IN_COLAB = True
except:
IN_COLAB = False
if IN_COLAB:
# ! wget https://raw.githubusercontent.com/hse-aml/natural-language-processing/master/setup_google_colab.py -O setup_google_colab.py
import setup_google_colab
setup_google_colab.setup_project()
import sys
sys.path.append("..")
from common.download_utils import download_project_resources
download_project_resources()
# -
# For those questions, that have programming-related intent, we will proceed as follow predict programming language (only one tag per question allowed here) and rank candidates within the tag using embeddings.
# For the ranking part, you will need:
# - `word_embeddings.tsv` — word embeddings, that you trained with StarSpace in the 3rd assignment. It's not a problem if you didn't do it, because we can offer an alternative solution for you.
# As a result of this notebook, you should obtain the following new objects that you will then use in the running bot:
#
# - `intent_recognizer.pkl` — intent recognition model;
# - `tag_classifier.pkl` — programming language classification model;
# - `tfidf_vectorizer.pkl` — vectorizer used during training;
# - `thread_embeddings_by_tags` — folder with thread embeddings, arranged by tags.
#
# Some functions will be reused by this notebook and the scripts, so we put them into *utils.py* file. Don't forget to open it and fill in the gaps!
from utils import *
# ## Part I. Intent and language recognition
# We want to write a bot, which will not only **answer programming-related questions**, but also will be able to **maintain a dialogue**. We would also like to detect the *intent* of the user from the question (we could have had a 'Question answering mode' check-box in the bot, but it wouldn't fun at all, would it?). So the first thing we need to do is to **distinguish programming-related questions from general ones**.
#
# It would also be good to predict which programming language a particular question referees to. By doing so, we will speed up question search by a factor of the number of languages (10 here), and exercise our *text classification* skill a bit. :)
# +
import numpy as np
import pandas as pd
import pickle
import re
from sklearn.feature_extraction.text import TfidfVectorizer
# -
# ### Data preparation
# In the first assignment (Predict tags on StackOverflow with linear models), you have already learnt how to preprocess texts and do TF-IDF tranformations. Reuse your code here. In addition, you will also need to [dump](https://docs.python.org/3/library/pickle.html#pickle.dump) the TF-IDF vectorizer with pickle to use it later in the running bot.
def tfidf_features(X_train, X_test, vectorizer_path):
"""Performs TF-IDF transformation and dumps the model."""
# Train a vectorizer on X_train data.
# Transform X_train and X_test data.
# Pickle the trained vectorizer to 'vectorizer_path'
# Don't forget to open the file in writing bytes mode.
######################################
######### YOUR CODE HERE #############
######################################
return X_train, X_test
# Now, load examples of two classes. Use a subsample of stackoverflow data to balance the classes. You will need the full data later.
# +
sample_size = 200000
dialogue_df = pd.read_csv('data/dialogues.tsv', sep='\t').sample(sample_size, random_state=0)
stackoverflow_df = pd.read_csv('data/tagged_posts.tsv', sep='\t').sample(sample_size, random_state=0)
# -
# Check how the data look like:
dialogue_df.head()
stackoverflow_df.head()
# Apply *text_prepare* function to preprocess the data.
#
# If you filled in the file, but NotImplementedError is still displayed, please refer to [this thread](https://github.com/hse-aml/natural-language-processing/issues/27).
from utils import text_prepare
dialogue_df['text'] = ######### YOUR CODE HERE #############
stackoverflow_df['title'] = ######### YOUR CODE HERE #############
# ### Intent recognition
# We will do a binary classification on TF-IDF representations of texts. Labels will be either `dialogue` for general questions or `stackoverflow` for programming-related questions. First, prepare the data for this task:
# - concatenate `dialogue` and `stackoverflow` examples into one sample
# - split it into train and test in proportion 9:1, use *random_state=0* for reproducibility
# - transform it into TF-IDF features
from sklearn.model_selection import train_test_split
# +
X = np.concatenate([dialogue_df['text'].values, stackoverflow_df['title'].values])
y = ['dialogue'] * dialogue_df.shape[0] + ['stackoverflow'] * stackoverflow_df.shape[0]
X_train, X_test, y_train, y_test = ######### YOUR CODE HERE ##########
print('Train size = {}, test size = {}'.format(len(X_train), len(X_test)))
X_train_tfidf, X_test_tfidf = ######### YOUR CODE HERE ###########
# -
# Train the **intent recognizer** using LogisticRegression on the train set with the following parameters: *penalty='l2'*, *C=10*, *random_state=0*. Print out the accuracy on the test set to check whether everything looks good.
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# +
######################################
######### YOUR CODE HERE #############
######################################
# -
# Check test accuracy.
y_test_pred = intent_recognizer.predict(X_test_tfidf)
test_accuracy = accuracy_score(y_test, y_test_pred)
print('Test accuracy = {}'.format(test_accuracy))
# Dump the classifier to use it in the running bot.
pickle.dump(intent_recognizer, open(RESOURCE_PATH['INTENT_RECOGNIZER'], 'wb'))
# ### Programming language classification
# We will train one more classifier for the programming-related questions. It will predict exactly one tag (=programming language) and will be also based on Logistic Regression with TF-IDF features.
#
# First, let us prepare the data for this task.
X = stackoverflow_df['title'].values
y = stackoverflow_df['tag'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
print('Train size = {}, test size = {}'.format(len(X_train), len(X_test)))
# Let us reuse the TF-IDF vectorizer that we have already created above. It should not make a huge difference which data was used to train it.
# +
vectorizer = pickle.load(open(RESOURCE_PATH['TFIDF_VECTORIZER'], 'rb'))
X_train_tfidf, X_test_tfidf = vectorizer.transform(X_train), vectorizer.transform(X_test)
# -
# Train the **tag classifier** using OneVsRestClassifier wrapper over LogisticRegression. Use the following parameters: *penalty='l2'*, *C=5*, *random_state=0*.
from sklearn.multiclass import OneVsRestClassifier
# +
######################################
######### YOUR CODE HERE #############
######################################
# -
# Check test accuracy.
y_test_pred = tag_classifier.predict(X_test_tfidf)
test_accuracy = accuracy_score(y_test, y_test_pred)
print('Test accuracy = {}'.format(test_accuracy))
# Dump the classifier to use it in the running bot.
pickle.dump(tag_classifier, open(RESOURCE_PATH['TAG_CLASSIFIER'], 'wb'))
# ## Part II. Ranking questions with embeddings
# To find a relevant answer (a thread from StackOverflow) on a question you will use vector representations to calculate similarity between the question and existing threads. We already had `question_to_vec` function from the assignment 3, which can create such a representation based on word vectors.
#
# However, it would be costly to compute such a representation for all possible answers in *online mode* of the bot (e.g. when bot is running and answering questions from many users). This is the reason why you will create a *database* with pre-computed representations. These representations will be arranged by non-overlaping tags (programming languages), so that the search of the answer can be performed only within one tag each time. This will make our bot even more efficient and allow not to store all the database in RAM.
# Load StarSpace embeddings which were trained on Stack Overflow posts. These embeddings were trained in *supervised mode* for duplicates detection on the same corpus that is used in search. We can account on that these representations will allow us to find closely related answers for a question.
#
# If for some reasons you didn't train StarSpace embeddings in the assignment 3, you can use [pre-trained word vectors](https://code.google.com/archive/p/word2vec/) from Google. All instructions about how to work with these vectors were provided in the same assignment. However, we highly recommend to use StarSpace's embeddings, because it contains more appropriate embeddings. If you chose to use Google's embeddings, delete the words, which are not in Stackoverflow data.
starspace_embeddings, embeddings_dim = load_embeddings('data/word_embeddings.tsv')
# Since we want to precompute representations for all possible answers, we need to load the whole posts dataset, unlike we did for the intent classifier:
posts_df = pd.read_csv('data/tagged_posts.tsv', sep='\t')
# Look at the distribution of posts for programming languages (tags) and find the most common ones.
# You might want to use pandas [groupby](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.groupby.html) and [count](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.count.html) methods:
counts_by_tag = ######### YOUR CODE HERE #############
# Now for each `tag` you need to create two data structures, which will serve as online search index:
# * `tag_post_ids` — a list of post_ids with shape `(counts_by_tag[tag],)`. It will be needed to show the title and link to the thread;
# * `tag_vectors` — a matrix with shape `(counts_by_tag[tag], embeddings_dim)` where embeddings for each answer are stored.
#
# Implement the code which will calculate the mentioned structures and dump it to files. It should take several minutes to compute it.
# +
import os
os.makedirs(RESOURCE_PATH['THREAD_EMBEDDINGS_FOLDER'], exist_ok=True)
for tag, count in counts_by_tag.items():
tag_posts = posts_df[posts_df['tag'] == tag]
tag_post_ids = ######### YOUR CODE HERE #############
tag_vectors = np.zeros((count, embeddings_dim), dtype=np.float32)
for i, title in enumerate(tag_posts['title']):
tag_vectors[i, :] = ######### YOUR CODE HERE #############
# Dump post ids and vectors to a file.
filename = os.path.join(RESOURCE_PATH['THREAD_EMBEDDINGS_FOLDER'], os.path.normpath('%s.pkl' % tag))
pickle.dump((tag_post_ids, tag_vectors), open(filename, 'wb'))
# -
# ## Part III. Putting all together
# Now let's combine everything that we have done and enable the bot to maintain a dialogue. We will teach the bot to sequentially determine the intent and, depending on the intent, select the best answer. As soon as we do this, we will have the opportunity to chat with the bot and check how well it answers questions.
# Implement Dialogue Manager that will generate the best answer. In order to do this, you should open *dialogue_manager.py* and fill in the gaps.
from dialogue_manager import DialogueManager
dialogue_manager = ######### YOUR CODE HERE #############
# Now we are ready to test our chat bot! Let's chat with the bot and ask it some questions. Check that the answers are reasonable.
# +
questions = [
"Hey",
"How are you doing?",
"What's your hobby?",
"How to write a loop in python?",
"How to delete rows in pandas?",
"python3 re",
"What is the difference between c and c++",
"Multithreading in Java",
"Catch exceptions C++",
"What is AI?",
]
for question in questions:
answer = ######### YOUR CODE HERE #############
print('Q: %s\nA: %s \n' % (question, answer))
| week5/week5-project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ### OSKAR System noise testing
#
# This is an simple test looking at the expected RMS and mean of the uncorrelated system noise
# on auto and cross correlations.
#
# In this example $n=10^{6}$ samples are generated and correlated in blocks of $m=100$ with the blocks then averaged before measuring the mean and STD.
# +
# %matplotlib inline
from IPython.display import display
import numpy as np
import matplotlib.pyplot as pp
import time
n = 1e7
m = 1000
s = 15
Xp = (np.random.randn(n/m, m) + 1.j*np.random.randn(n/m, m))*s
Xq = (np.random.randn(n/m, m) + 1.j*np.random.randn(n/m, m))*s
ac = np.sum(Xp*np.conj(Xp),1)/m # Auto-correlation
xc = np.sum(Xp*np.conj(Xq),1)/m # Cross-correlation
print 'Cross-correlation: measured (predicted)'
print ' mean : %.4f%+.4fi (0+0i)' % (np.real(np.mean(xc)), np.imag(np.mean(xc)))
print ' STD : %.4f (%.4f)' % (np.std(xc), 2*s**2/(m**0.5))
print 'Auto-correlation: measured (predicted)'
print ' mean : %.4f (%.4f)' % (np.real(np.mean(ac)), 2*s**2)
print ' STD : %.4f (%.4f)' % (np.std(ac), 2*s**2/(m**0.5))
# -
# #### Cross-correlation
# Has a mean of $0$ and a STD of $\frac{2\mathrm{s}^{2}}{\sqrt{m}}$.
#
# #### Auto-correlation
# Has a mean of $2\mathrm{s}^{2}$ and a STD of $\frac{2\mathrm{s}^{2}}{\sqrt{m}}$.
#
# #### In terms of OSKAR parameters
#
# The number of independent samples in an integration is $m = \sqrt{2\Delta\nu\tau}$.
# The System equivalent flux density of one polarisation of an antenna from a unpolarised source is:
# $$S = \frac{2k_{\mathrm{B}}T_{\mathrm{sys}}}{A_{\mathrm{eff}}\eta}$$
# And the RMS from this SEFD is:
# $$ \sigma_{p,q} = \frac{ \sqrt{ S_{p} S_{q}} } { \sqrt{2\Delta\nu\tau} } $$
# if $S_{p} = S_{q} = S$
# $$ \sigma_{p,q} = \frac{S} { \sqrt{2\Delta\nu\tau} } $$
# As Visibilities are complex the measured STD (or RMS) will be
# $$\varepsilon = \sqrt{2}\sigma_{p,q}$$
# That is we would expect to measure and RMS of
# $$ \varepsilon = \frac{\sqrt{2} S} { \sqrt{2\Delta\nu\tau} } = \frac{\sqrt{2} S} { \sqrt{m} }$$
# If we relate this to the parameter s in the script above
# $$ \frac{2\mathrm{s}^{2}}{\sqrt{m}} = \frac{\sqrt{2} S} { \sqrt{m} } $$
# and therefore,
# $$ s = \sqrt{\frac{\sqrt{m}\sigma_{p,q}} {\sqrt{2}} } = \sqrt{\frac{S}{\sqrt{2}}}$$
# or
# $$ \sigma_{p,q} = \sqrt{\frac{2}{m}}s^{2} $$
| doc/ipython_notebooks/uncorrelated_system_noise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0
# ---
# # Examine the Evalution Metrics
#
# Examine the resulting model evaluation after the pipeline completes. Download the resulting evaluation.json file from S3 and print the report.
# +
from botocore.exceptions import ClientError
import os
import sagemaker
import logging
import boto3
import sagemaker
import pandas as pd
sess = sagemaker.Session()
bucket = sess.default_bucket()
role = sagemaker.get_execution_role()
region = boto3.Session().region_name
sm = boto3.Session().client(service_name="sagemaker", region_name=region)
# -
# %store -r pipeline_name
print(pipeline_name)
# %store -r pipeline_experiment_name
print(pipeline_experiment_name)
# +
# %%time
import time
from pprint import pprint
executions_response = sm.list_pipeline_executions(PipelineName=pipeline_name)["PipelineExecutionSummaries"]
pipeline_execution_status = executions_response[0]["PipelineExecutionStatus"]
print(pipeline_execution_status)
while pipeline_execution_status == "Executing":
try:
executions_response = sm.list_pipeline_executions(PipelineName=pipeline_name)["PipelineExecutionSummaries"]
pipeline_execution_status = executions_response[0]["PipelineExecutionStatus"]
# print('Executions for our pipeline...')
# print(pipeline_execution_status)
except Exception as e:
print("Please wait...")
time.sleep(30)
pprint(executions_response)
# -
# # List Pipeline Execution Steps
pipeline_execution_status = executions_response[0]["PipelineExecutionStatus"]
print(pipeline_execution_status)
pipeline_execution_arn = executions_response[0]["PipelineExecutionArn"]
print(pipeline_execution_arn)
# +
from pprint import pprint
steps = sm.list_pipeline_execution_steps(PipelineExecutionArn=pipeline_execution_arn)
pprint(steps)
# -
# # Retrieve Evaluation Metrics
# +
# for execution_step in reversed(execution.list_steps()):
for execution_step in reversed(steps["PipelineExecutionSteps"]):
if execution_step["StepName"] == "EvaluateModel":
processing_job_name = execution_step["Metadata"]["ProcessingJob"]["Arn"].split("/")[-1]
describe_evaluation_processing_job_response = sm.describe_processing_job(ProcessingJobName=processing_job_name)
evaluation_metrics_s3_uri = describe_evaluation_processing_job_response["ProcessingOutputConfig"]["Outputs"][0][
"S3Output"
]["S3Uri"]
evaluation_metrics_s3_uri
# +
import json
from pprint import pprint
evaluation_json = sagemaker.s3.S3Downloader.read_file("{}/evaluation.json".format(evaluation_metrics_s3_uri))
pprint(json.loads(evaluation_json))
# -
# # Download and Analyze the Trained Model from S3
# +
training_job_arn = None
for execution_step in steps["PipelineExecutionSteps"]:
if execution_step["StepName"] == "Train":
training_job_arn = execution_step["Metadata"]["TrainingJob"]["Arn"]
break
training_job_name = training_job_arn.split("/")[-1]
print(training_job_name)
# -
model_tar_s3_uri = sm.describe_training_job(TrainingJobName=training_job_name)["ModelArtifacts"]["S3ModelArtifacts"]
# !aws s3 cp $model_tar_s3_uri ./
# !mkdir -p ./model
# !tar -zxvf model.tar.gz -C ./model
# !saved_model_cli show --all --dir ./model/tensorflow/saved_model/0/
# !saved_model_cli run --dir ./model/tensorflow/saved_model/0/ --tag_set serve --signature_def serving_default \
# --input_exprs 'input_ids=np.zeros((1,64));input_mask=np.zeros((1,64))'
# # List All Artifacts Generated By The Pipeline
processing_job_name = None
training_job_name = None
# +
import time
from sagemaker.lineage.visualizer import LineageTableVisualizer
viz = LineageTableVisualizer(sagemaker.session.Session())
for execution_step in reversed(steps["PipelineExecutionSteps"]):
print(execution_step)
# We are doing this because there appears to be a bug of this LineageTableVisualizer handling the Processing Step
if execution_step["StepName"] == "Processing":
processing_job_name = execution_step["Metadata"]["ProcessingJob"]["Arn"].split("/")[-1]
print(processing_job_name)
display(viz.show(processing_job_name=processing_job_name))
elif execution_step["StepName"] == "Train":
training_job_name = execution_step["Metadata"]["TrainingJob"]["Arn"].split("/")[-1]
print(training_job_name)
display(viz.show(training_job_name=training_job_name))
else:
display(viz.show(pipeline_execution_step=execution_step))
time.sleep(5)
# -
# # Analyze Experiment
# +
from sagemaker.analytics import ExperimentAnalytics
time.sleep(30) # avoid throttling exception
import pandas as pd
pd.set_option("max_colwidth", 500)
experiment_analytics = ExperimentAnalytics(
experiment_name=pipeline_experiment_name,
)
experiment_analytics.dataframe()
# -
# # Release Resources
# + language="html"
#
# <p><b>Shutting down your kernel for this notebook to release resources.</b></p>
# <button class="sm-command-button" data-commandlinker-command="kernelmenu:shutdown" style="display:none;">Shutdown Kernel</button>
#
# <script>
# try {
# els = document.getElementsByClassName("sm-command-button");
# els[0].click();
# }
# catch(err) {
# // NoOp
# }
# </script>
# + language="javascript"
#
# try {
# Jupyter.notebook.save_checkpoint();
# Jupyter.notebook.session.delete();
# }
# catch(err) {
# // NoOp
# }
| 10_pipeline/02_Evaluate_Pipeline_Execution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="vLSN7SzBJNm_"
# # 4. Data Preprocessing
# In this part, we will learn how to process data that will be feed to machine learning algorithm
# + [markdown] id="HuDN2B9BNlGH"
# ##### Using [Iris Dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set), that has missing value for the necessity of this module
# + id="YmKtRQ1YV_sp"
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# + id="guinmynAKPcM"
import pandas as pd
df = pd.read_csv("https://raw.githubusercontent.com/henseljahja/learn-ml/main/Dataset/iris_2_class_modified.csv")
# + [markdown] id="XWWA9Z-ibfJO"
# and We dont need the class for now, so we will drop the class
# + id="AM_brAD8bmNR"
df.drop(columns="species",inplace=True)
# + [markdown] id="Ox9c5mAXJpev"
# ## 4.1 Data Cleaning
# + [markdown] id="xG55T8LBJtFF"
# ### 4.1.1 Simple Imputer
# + [markdown] id="7oHE-exLJwVn"
# Simple imputer Imputation transformer for completing missing values.
# the documentation can be found [here](https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html)
# + id="RreybPG9Gx6m" colab={"base_uri": "https://localhost:8080/", "height": 488} outputId="86a74795-0eff-408c-b826-60f7a7891e37"
import numpy as np
# finding all the index
df_nan_value = np.where(df.isnull() == True)[0]
df.loc[df_nan_value]
# + id="c6bX-G5bLjOs"
#Importing Simple Imputer
from sklearn.impute import SimpleImputer
# + [markdown] id="x8O9ZoPtR2jJ"
#
#
# If “mean”, then replace missing values using the mean along each column. Can only be used with numeric data.
#
# If “median”, then replace missing values using the median along each column. Can only be used with numeric data.
#
# If “most_frequent”, then replace missing using the most frequent value along each column. Can be used with strings or numeric data. If there is more than one such value, only the smallest is returned.
#
# If “constant”, then replace missing values with fill_value. Can be used with strings or numeric data.
#
# + id="5mesDoxuRLKI"
#si means Simple Imputer
#Using the Mean value
si_mean = SimpleImputer(strategy="mean")
#Using the Median value
si_median = SimpleImputer(strategy="median")
#Using the most frequent value
si_most_frequent = SimpleImputer(strategy="most_frequent")
#Custom imputer
si_constant = SimpleImputer(strategy="constant", fill_value=999)
# + colab={"base_uri": "https://localhost:8080/", "height": 488} id="bfW5rZhkSrI6" outputId="21fe4f97-d2b2-44d1-de6e-8c4086c113b7"
#using the si_mean
df_si_mean = si_mean.fit_transform(df)
pd.DataFrame(data=df_si_mean[df_nan_value],columns=df.columns)
# + colab={"base_uri": "https://localhost:8080/", "height": 488} id="F3gapqstUXuy" outputId="5743a907-c785-4cdf-b052-9a2c26d90b6a"
#using the si_median
df_si_median = si_median.fit_transform(df)
pd.DataFrame(data=df_si_median[df_nan_value],columns=df.columns)
# + colab={"base_uri": "https://localhost:8080/", "height": 488} id="h3gSMzrZU3Vn" outputId="a62d79c1-3507-4329-d1b9-a62b7ff006d3"
#using the si_most_frequent
df_si_most_frequent = si_most_frequent.fit_transform(df)
pd.DataFrame(data=df_si_most_frequent[df_nan_value],columns=df.columns)
# + colab={"base_uri": "https://localhost:8080/", "height": 488} id="nXg7eXeTVTV0" outputId="8ad67e09-c673-4a64-bbd1-d8de8e312045"
#using the si_constant
df_si_constant = si_constant.fit_transform(df)
pd.DataFrame(data=df_si_constant[df_nan_value],columns=df.columns)
# + [markdown] id="KTLnUYs0Vl-b"
# ## 4.2 Feature Scalling
# + [markdown] id="_vFlQssCahSe"
# ##### For this we will use the dataframe that contains no missing value,
# + id="pW0uXungamJP"
df = pd.read_csv("https://raw.githubusercontent.com/henseljahja/learn-ml/main/Dataset/iris_2_class.csv")
# + id="VvOLg4oWcpL1"
df.drop("species",axis=1,inplace=True)
# + [markdown] id="5raEwT3jVpZ1"
# ### 4.2.2 Standardization (Z-score Normalization)
# 
#
# In machine learning, we can handle various types of data, e.g. audio signals and pixel values for image data, and this data can include multiple dimensions. Feature standardization makes the values of each feature in the data have zero-mean (when subtracting the mean in the numerator) and unit-variance. This method is widely used for normalization in many machine learning algorithms (e.g., support vector machines, logistic regression, and artificial neural networks). The general method of calculation is to determine the distribution mean and standard deviation for each feature. Next we subtract the mean from each feature. Then we divide the values (mean is already subtracted) of each feature by its standard deviation. Source : [Wikipedia](https://en.wikipedia.org/wiki/Feature_scaling)
# + id="BQwwIpmTVoTD"
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
df_sc = sc.fit_transform(df)
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="cR-dAjknWonQ" outputId="30c4fe08-7b94-4f3c-d2f4-edfe172a0fa9"
pd.DataFrame(df, columns = df.columns)
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="I7d3_WIfW_Ef" outputId="e4f5c683-b8cb-4441-916c-80e920a3cba3"
#Before Standard Scalling
df.plot.kde()
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="q9weZ9CXXCNX" outputId="a2611bac-2ad0-4933-b2dc-4b1f8e3afb7e"
#After Standard Scalling
pd.DataFrame(df_sc, columns = df.columns).plot.kde()
# + [markdown] id="ZaDuOe76Ynq1"
# ### 4.2.2 Rescaling (min-max normalization)
# + [markdown] id="ZM2bdupIZMbM"
# 
#
# Also known as min-max scaling or min-max normalization, is the simplest method and consists in rescaling the range of features to scale the range in [0, 1] or [−1, 1]. Selecting the target range depends on the nature of the data. Source : [Wikipedia](https://en.wikipedia.org/wiki/Feature_scaling)
# + id="xUAXgIvcXlxE"
from sklearn.preprocessing import MinMaxScaler
mms = MinMaxScaler()
df_mms = mms.fit_transform(df)
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="Y6-KPJqca3ti" outputId="7dbaa32d-c702-4e6c-b368-6d77f3e7678d"
df.plot.kde()
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="N4prbim7Zzfd" outputId="47b15926-fd3e-409a-9c5d-795d51ab3ad1"
pd.DataFrame(df_mms, columns = df.columns).plot.kde()
# + [markdown] id="UVw5cR1e5p_3"
# ## 4.3 Pipeline
# + [markdown] id="dyKM3bjO5rdZ"
# Let's say you have created a perfect preprocessing step, but you got a new data for preprocessing, repeating each would be tiring, so the solution is to set a Pipeline
# + [markdown] id="5wpHHnRt5uOg"
# This is a Pipeline with Imputer, Feature Scalling, and simple predictor
# + id="n4Mn1z3TazZR"
from sklearn.pipeline import Pipeline
#Define each steps
df_pipeline = Pipeline([
("imputer" , SimpleImputer(strategy="median")),
("scaler" , StandardScaler())
])
# + colab={"base_uri": "https://localhost:8080/"} id="p-YYnjxv5zYt" outputId="187f753a-930a-4bcc-a01f-074bb5fd8d00"
df_pipeline.fit_transform(df)
# + id="K6HFZD2M6Fby"
df_after_pipeline = pd.DataFrame(df_pipeline.fit_transform(df), columns = df.columns)
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="_LINK53460QB" outputId="43167e51-3bc2-4f49-b177-e03e38de56f8"
df_after_pipeline
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="Rp4c2vxA640E" outputId="e0e1a248-f285-40f0-e2b4-0c8f046bb11c"
#Before Pipeline
df.plot.kde()
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="9Cpz1ciK8dIg" outputId="9fa4f64f-cbd0-423d-ce76-e1742795b2a6"
#After Pipeline
df_after_pipeline.plot.kde()
# + id="I6Kab2Xk8hrW"
| 4_data_preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import importlib
import pickle
import pandas as pd
import torch
import sys
sys.path.append("/home/jarobyte/guemes/lib")
from pytorch_decoding import seq2seq
import ocr_correction
from timeit import default_timer as t
from tqdm.notebook import tqdm
from ocr_correction import *
# +
importlib.reload(ocr_correction)
language = "de"
model_id = "22694260_9"
scratch = f"/home/jarobyte/scratch/guemes/icdar/{language}/"
print(f"Evaluating {language}...")
data = pd.read_pickle(f"/home/jarobyte/scratch/guemes/icdar/{language}/data/test.pkl")
device = torch.device("cuda")
model = seq2seq.load_architecture(scratch + f"baseline/models/{model_id}.arch")
model.to(device)
model.eval()
model.load_state_dict(torch.load(scratch + f"baseline/checkpoints/{model_id}.pt"))
with open(scratch + "data/vocabulary.pkl", "rb") as file:
vocabulary = pickle.load(file)
len(vocabulary)
# -
raw = data.ocr_to_input
gs = data.gs_aligned
document_progress_bar = 0
window_size = 50
metrics = []
old = levenshtein(reference = gs, hypothesis = raw).cer.mean()
start = t()
corrections = [correct_by_disjoint_window(s,
model,
vocabulary,
document_progress_bar = 0,
window_size = window_size)
for s in tqdm(raw)]
metrics.append({"window":"disjoint",
"decoding":
"greedy",
"window_size":window_size * 2,
"inference_seconds":t() - start,
"cer_before":old,
"cer_after":levenshtein(gs, corrections).cer.mean()})
start = t()
corrections = [correct_by_disjoint_window(s,
model,
vocabulary,
document_progress_bar = document_progress_bar,
window_size = window_size * 2)
for s in raw]
metrics.append({"window":"disjoint",
"decoding":"greedy",
"window_size":window_size,
"inference_seconds":t() - start,
"cer_before":old,
"cer_after":levenshtein(gs, corrections).cer.mean()})
start = t()
corrections = [correct_by_disjoint_window(s,
model,
vocabulary,
decoding_method = "beam_search",
document_progress_bar = document_progress_bar,
window_size = window_size)
for s in tqdm(raw)]
metrics.append({"window":"disjoint",
"decoding":"beam",
"window_size":window_size * 2,
"inference_seconds":t() - start,
"cer_before":old,
"cer_after":levenshtein(gs, corrections).cer.mean()})
start = t()
corrections = [correct_by_disjoint_window(s,
model,
vocabulary,
decoding_method = "beam_search",
document_progress_bar = document_progress_bar,
window_size = window_size * 2)
for s in tqdm(raw)]
metrics.append({"window":"disjoint",
"decoding":"beam",
"window_size":window_size,
"inference_seconds":t() - start,
"cer_before":old,
"cer_after":levenshtein(gs, corrections).cer.mean()})
start = t()
corrections = [correct_by_sliding_window(s, model, vocabulary,
weighting = uniform,
document_progress_bar = document_progress_bar,
window_size = window_size)[1]
for s in tqdm(raw)]
metrics.append({"window":"sliding",
"decoding":"greedy",
"weighting":"uniform",
"window_size":window_size,
"inference_seconds":t() - start,
"cer_before":old,
"cer_after":levenshtein(gs, corrections).cer.mean()})
start = t()
corrections = [correct_by_sliding_window(s, model, vocabulary,
decoding_method = "beam_search",
weighting = uniform,
document_progress_bar = document_progress_bar,
window_size = window_size)[1]
for s in tqdm(raw)]
metrics.append({"window":"sliding",
"decoding":"beam",
"weighting":"uniform",
"window_size":window_size,
"inference_seconds":t() - start,
"cer_before":old,
"cer_after":levenshtein(gs, corrections).cer.mean()})
pd.DataFrame(metrics).assign(improvement = lambda df: 100 * (1 - df.cer_after / df.cer_before))
| notebooks/de/6_evaluation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from __future__ import division, print_function
# %matplotlib inline
from importlib import reload # Python 3
import utils; reload(utils)
from utils import *
# + [markdown] heading_collapsed=true
# ## Setup
# + hidden=true
batch_size=64
# + hidden=true
from keras.datasets import mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
# + hidden=true
X_test = np.expand_dims(X_test,1)
X_train = np.expand_dims(X_train,1)
# + hidden=true
X_train.shape
# + hidden=true
y_train[:5]
# + hidden=true
y_train = onehot(y_train)
y_test = onehot(y_test)
# + hidden=true
y_train[:5]
# + hidden=true
mean_px = X_train.mean().astype(np.float32)
std_px = X_train.std().astype(np.float32)
# + hidden=true
def norm_input(x): return (x-mean_px)/std_px
# + [markdown] heading_collapsed=true
# ## Linear model
# + hidden=true
def get_lin_model():
model = Sequential([
Lambda(norm_input, input_shape=(1,28,28)),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
return model
# + hidden=true
lm = get_lin_model()
# + hidden=true
gen = image.ImageDataGenerator()
batches = gen.flow(X_train, y_train, batch_size=batch_size)
test_batches = gen.flow(X_test, y_test, batch_size=batch_size)
steps_per_epoch = int(np.ceil(batches.n/batch_size))
validation_steps = int(np.ceil(test_batches.n/batch_size))
# + hidden=true
lm.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1,
validation_data=test_batches, validation_steps=validation_steps)
# + hidden=true
lm.optimizer.lr=0.1
# + hidden=true
lm.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1,
validation_data=test_batches, validation_steps=validation_steps)
# + hidden=true
lm.optimizer.lr=0.01
# + hidden=true
lm.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=4,
validation_data=test_batches, validation_steps=validation_steps)
# + [markdown] heading_collapsed=true
# ## Single dense layer
# + hidden=true
def get_fc_model():
model = Sequential([
Lambda(norm_input, input_shape=(1,28,28)),
Flatten(),
Dense(512, activation='softmax'),
Dense(10, activation='softmax')
])
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
return model
# + hidden=true
fc = get_fc_model()
# + hidden=true
fc.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1,
validation_data=test_batches, validation_steps=validation_steps)
# + hidden=true
fc.optimizer.lr=0.1
# + hidden=true
fc.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=4,
validation_data=test_batches, validation_steps=validation_steps)
# + hidden=true
fc.optimizer.lr=0.01
# + hidden=true
fc.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=4,
validation_data=test_batches, validation_steps=validation_steps)
# + [markdown] heading_collapsed=true
# ## Basic 'VGG-style' CNN
# + hidden=true
def get_model():
model = Sequential([
Lambda(norm_input, input_shape=(1,28,28)),
Conv2D(32,(3,3), activation='relu'),
Conv2D(32,(3,3), activation='relu'),
MaxPooling2D(),
Conv2D(64,(3,3), activation='relu'),
Conv2D(64,(3,3), activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(512, activation='relu'),
Dense(10, activation='softmax')
])
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
return model
# + hidden=true
model = get_model()
# + hidden=true
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1,
validation_data=test_batches, validation_steps=validation_steps)
# + hidden=true
model.optimizer.lr=0.1
# + hidden=true
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1,
validation_data=test_batches, validation_steps=validation_steps)
# + hidden=true
model.optimizer.lr=0.01
# + hidden=true
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=8,
validation_data=test_batches, validation_steps=validation_steps)
# + [markdown] heading_collapsed=true
# ## Data augmentation
# + hidden=true
model = get_model()
# + hidden=true
gen = image.ImageDataGenerator(rotation_range=8, width_shift_range=0.08, shear_range=0.3,
height_shift_range=0.08, zoom_range=0.08)
batches = gen.flow(X_train, y_train, batch_size=batch_size)
test_batches = gen.flow(X_test, y_test, batch_size=batch_size)
steps_per_epoch = int(np.ceil(batches.n/batch_size))
validation_steps = int(np.ceil(test_batches.n/batch_size))
# + hidden=true
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1,
validation_data=test_batches, validation_steps=validation_steps)
# + hidden=true
model.optimizer.lr=0.1
# + hidden=true
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=4,
validation_data=test_batches, validation_steps=validation_steps)
# + hidden=true
model.optimizer.lr=0.01
# + hidden=true
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=8,
validation_data=test_batches, validation_steps=validation_steps)
# + hidden=true
model.optimizer.lr=0.001
# + hidden=true
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=14,
validation_data=test_batches, validation_steps=validation_steps)
# + hidden=true
model.optimizer.lr=0.0001
# + hidden=true
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=10,
validation_data=test_batches, validation_steps=validation_steps)
# + [markdown] heading_collapsed=true
# ## Batchnorm + data augmentation
# + hidden=true
def get_model_bn():
model = Sequential([
Lambda(norm_input, input_shape=(1,28,28)),
Conv2D(32,(3,3), activation='relu'),
BatchNormalization(axis=1),
Conv2D(32,(3,3), activation='relu'),
MaxPooling2D(),
BatchNormalization(axis=1),
Conv2D(64,(3,3), activation='relu'),
BatchNormalization(axis=1),
Conv2D(64,(3,3), activation='relu'),
MaxPooling2D(),
Flatten(),
BatchNormalization(),
Dense(512, activation='relu'),
BatchNormalization(),
Dense(10, activation='softmax')
])
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
return model
# + hidden=true
model = get_model_bn()
# + hidden=true
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1,
validation_data=test_batches, validation_steps=validation_steps)
# + hidden=true
model.optimizer.lr=0.1
# + hidden=true
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=4,
validation_data=test_batches, validation_steps=validation_steps)
# + hidden=true
model.optimizer.lr=0.01
# + hidden=true
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=12,
validation_data=test_batches, validation_steps=validation_steps)
# + hidden=true
model.optimizer.lr=0.001
# + hidden=true
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=12,
validation_data=test_batches, validation_steps=validation_steps)
# + [markdown] heading_collapsed=true
# ## Batchnorm + dropout + data augmentation
# + hidden=true
def get_model_bn_do():
model = Sequential([
Lambda(norm_input, input_shape=(1,28,28)),
Conv2D(32,(3,3), activation='relu'),
BatchNormalization(axis=1),
Conv2D(32,(3,3), activation='relu'),
MaxPooling2D(),
BatchNormalization(axis=1),
Conv2D(64,(3,3), activation='relu'),
BatchNormalization(axis=1),
Conv2D(64,(3,3), activation='relu'),
MaxPooling2D(),
Flatten(),
BatchNormalization(),
Dense(512, activation='relu'),
BatchNormalization(),
Dropout(0.5),
Dense(10, activation='softmax')
])
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
return model
# + hidden=true
model = get_model_bn_do()
# + hidden=true
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1,
validation_data=test_batches, validation_steps=validation_steps)
# + hidden=true
model.optimizer.lr=0.1
# + hidden=true
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=4,
validation_data=test_batches, validation_steps=validation_steps)
# + hidden=true
model.optimizer.lr=0.01
# + hidden=true
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=12,
validation_data=test_batches, validation_steps=validation_steps)
# + hidden=true
model.optimizer.lr=0.001
# + hidden=true
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1,
validation_data=test_batches, validation_steps=validation_steps)
# -
# ## Ensembling
def fit_model():
"""
vgg model with data aug, batchnorm, dropout, 35 epochs total
Learning rate decreases gradually
"""
model = get_model_bn_do()
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=1, verbose=0,
validation_data=test_batches, validation_steps=validation_steps)
model.optimizer.lr=0.1
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=4, verbose=0,
validation_data=test_batches, validation_steps=validation_steps)
model.optimizer.lr=0.01
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=12, verbose=0,
validation_data=test_batches, validation_steps=validation_steps)
model.optimizer.lr=0.001
model.fit_generator(batches, steps_per_epoch=steps_per_epoch, epochs=18, verbose=0,
validation_data=test_batches, validation_steps=validation_steps)
return model
# create 6 of the models above
models = [fit_model() for i in range(6)]
# +
import os
user_home = os.path.expanduser('~')
path = os.path.join(user_home, "pj/fastai/data/MNIST_data/")
model_path = path + 'models/'
# path = "data/mnist/"
# model_path = path + 'models/'
# -
# save the weight of 6 models in a file
for i,m in enumerate(models):
m.save_weights(model_path+'cnn-mnist23-'+str(i)+'.pkl')
eval_batch_size = 256
# +
evals = np.array([m.evaluate(X_test, y_test, batch_size=eval_batch_size) for m in models])
# -
evals.mean(axis=0)
# for each models, predict the test sets. Stack all predictions together
all_preds = np.stack([m.predict(X_test, batch_size=eval_batch_size) for m in models])
all_preds.shape
avg_preds = all_preds.mean(axis=0)
keras.metrics.categorical_accuracy(y_test, avg_preds).eval()
| deeplearning1/nbs/mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# -*- coding: utf-8 -*-
# In this script we use a simple classifer called naive bayes and try to predict the violations. But before that we use
# some methods to tackle the problem of our skewed dataset. :)
# 11 May 2016
# @author: reyhane_askari
# Universite de Montreal, DIRO
import csv
import numpy as np
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
from sklearn import metrics
import pandas as pd
from os import chdir, listdir
from pandas import read_csv
from os import path
from random import randint, sample, seed
from collections import OrderedDict
from pandas import DataFrame, Series
import numpy as np
import csv
import codecs
import matplotlib as mpl
import seaborn as sns
sns.set()
import itertools
from sklearn.decomposition import PCA
from unbalanced_dataset import UnderSampler, NearMiss, CondensedNearestNeighbour, OneSidedSelection,\
NeighbourhoodCleaningRule, TomekLinks, ClusterCentroids, OverSampler, SMOTE,\
SMOTETomek, SMOTEENN, EasyEnsemble, BalanceCascade
almost_black = '#262626'
# %matplotlib inline
# +
colnames = ['old_index','job_id', 'task_idx','sched_cls', 'priority', 'cpu_requested',
'mem_requested', 'disk', 'violation']
tain_path = r'/Users/reyhane.askari/Dropbox/Project_step_by_step/3_create_database/csvs/frull_db_2.csv'
X = pd.read_csv(tain_path, header = None, index_col = False ,names = colnames, skiprows = [0], usecols = [3,4,5,6,7])
y = pd.read_csv(tain_path, header = None, index_col = False ,names = colnames, skiprows = [0], usecols = [8])
y = y['violation'].values
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.333, random_state=0)
x = X.values
# +
# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components = 2)
# Fit and transform x to visualise inside a 2D feature space
x_vis = pca.fit_transform(x)
# Plot the original data
# Plot the two classes
palette = sns.color_palette()
# +
plt.scatter(x_vis[y==0, 0], x_vis[y==0, 1], label="Class #0", alpha=0.009,
edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)
plt.scatter(x_vis[y==1, 0], x_vis[y==1, 1], label="Class #1", alpha=0.009,
edgecolor=almost_black, facecolor=palette[2], linewidth=0.15)
plt.legend()
plt.show()
# -
# Generate the new dataset using under-sampling method
verbose = False
# 'Random under-sampling'
US = UnderSampler(verbose=verbose)
x, y = US.fit_transform(x, y)
# 'Clustering centroids'
CC = ClusterCentroids(verbose=verbose)
x, y = CC.fit_transform(x, y)
# 'NearMiss-1'
NM1 = NearMiss(version=1, verbose=verbose)
x, y = NM1.fit_transform(x, y)
# 'NearMiss-2'
NM2 = NearMiss(version=2, verbose=verbose)
x, y = NM2.fit_transform(x, y)
# 'NearMiss-3'
NM3 = NearMiss(version=3, verbose=verbose)
x, y = NM3.fit_transform(x, y)
# 'One-Sided Selection'
OSS = OneSidedSelection(size_ngh=51, n_seeds_S=51, verbose=verbose)
x, y = OSS.fit_transform(x, y)
# 'Neighboorhood Cleaning Rule'
NCR = NeighbourhoodCleaningRule(size_ngh=51, verbose=verbose)
x, y = NCR.fit_transform(x, y)
ratio = float(np.count_nonzero(y==1)) / float(np.count_nonzero(y==0))
# 'Random over-sampling'
OS = OverSampler(ratio=ratio, verbose=verbose)
x, y = OS.fit_transform(x, y)
# 'SMOTE'
smote = SMOTE(ratio=ratio, verbose=verbose, kind='regular')
x, y = smote.fit_transform(x, y)
# 'SMOTE bordeline 1'
bsmote1 = SMOTE(ratio=ratio, verbose=verbose, kind='borderline1')
x, y = bsmote1.fit_transform(x, y)
# 'SMOTE bordeline 2'
bsmote2 = SMOTE(ratio=ratio, verbose=verbose, kind='borderline2')
x, y = bsmote2.fit_transform(x, y)
# 'SMOTE SVM'
svm_args={'class_weight' : 'auto'}
svmsmote = SMOTE(ratio=ratio, verbose=verbose, kind='svm', **svm_args)
x, y = svmsmote.fit_transform(x, y)
# 'SMOTE Tomek links'
STK = SMOTETomek(ratio=ratio, verbose=verbose)
x, y = STK.fit_transform(x, y)
# 'SMOTE ENN'
SENN = SMOTEENN(ratio=ratio, verbose=verbose)
x, y = SENN.fit_transform(x, y)
# 'EasyEnsemble'
EE = EasyEnsemble(verbose=verbose)
x, y = EE.fit_transform(x, y)
# 'BalanceCascade'
BS = BalanceCascade(verbose=verbose)
x, y = BS.fit_transform(x, y)
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=.333, random_state=0)
# +
from sklearn.naive_bayes import GaussianNB, BernoulliNB
gnb = GaussianNB() #Guassian Naive Bayes
# gnb = BernoulliNB() #Bernoulli Naive Bayes
y_pred = gnb.fit(X_train, y_train).predict(X_test)
y_score = gnb.fit(X_train, y_train).predict_proba(X_test)[:,1]
mean_accuracy = gnb.fit(X_train, y_train).score(X_test,y_test,sample_weight=None)
# print(y_score)
print(mean_accuracy)
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.savefig('/Users/reyhane.askari/Dropbox/Project_step_by_step/3_create_database/naive_bays_guassian.png')
plt.show()
print("Number of mislabeled points out of a total %d points : %d"
% (X_test.shape[0],(y_test != y_pred).sum()))
from sklearn import metrics
# metrics.precision_score(y_test, y_pred)
# -
from sklearn import metrics
metrics.precision_score(y_test, y_pred)
metrics.recall_score(y_test, y_pred)
metrics.fbeta_score(y_test, y_pred, beta=0.5)
metrics.fbeta_score(y_test, y_pred, beta=1)
metrics.fbeta_score(y_test, y_pred, beta=2)
metrics.precision_recall_fscore_support(y_test, y_pred, beta=0.5)
| 4_simple_models/interactive_scripts/naive_bayes_guassian_under_sampling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Importing data
df = pd.read_csv('train.csv')
#Printing head
df.head()
#Printing tail
df.tail()
# +
#Subsetting the dataset
#Index 11856 marks the end of year 2013
df = pd.read_csv('train.csv', nrows = 11856)
#Creating train and test set
#Index 10392 marks the end of October 2013
train=df[0:10392]
test=df[10392:]
#Aggregating the dataset at daily level
df.Timestamp = pd.to_datetime(df.Datetime,format='%d-%m-%Y %H:%M')
df.index = df.Timestamp
df = df.resample('D').mean()
train.Timestamp = pd.to_datetime(train.Datetime,format='%d-%m-%Y %H:%M')
train.index = train.Timestamp
train = train.resample('D').mean()
test.Timestamp = pd.to_datetime(test.Datetime,format='%d-%m-%Y %H:%M')
test.index = test.Timestamp
test = test.resample('D').mean()
#Plotting data
train.Count.plot(figsize=(15,8), title= 'Daily Ridership', fontsize=14)
test.Count.plot(figsize=(15,8), title= 'Daily Ridership', fontsize=14)
plt.show()
# -
# ## Method 1: Start with a Naive Approach
dd= np.asarray(train.Count)
y_hat = test.copy()
y_hat['naive'] = dd[len(dd)-1]
plt.figure(figsize=(12,8))
plt.plot(train.index, train['Count'], label='Train')
plt.plot(test.index,test['Count'], label='Test')
plt.plot(y_hat.index,y_hat['naive'], label='Naive Forecast')
plt.legend(loc='best')
plt.title("Naive Forecast")
plt.show()
from sklearn.metrics import mean_squared_error
from math import sqrt
rms = sqrt(mean_squared_error(test.Count, y_hat.naive))
print(rms)
# ## Method 2: – Simple Average
y_hat_avg = test.copy()
y_hat_avg['avg_forecast'] = train['Count'].mean()
plt.figure(figsize=(12,8))
plt.plot(train['Count'], label='Train')
plt.plot(test['Count'], label='Test')
plt.plot(y_hat_avg['avg_forecast'], label='Average Forecast')
plt.legend(loc='best')
plt.show()
rms = sqrt(mean_squared_error(test.Count, y_hat_avg.avg_forecast))
print(rms)
# ## Method 3 – Moving Average
y_hat_avg = test.copy()
y_hat_avg['moving_avg_forecast'] = train['Count'].rolling(60).mean().iloc[-1]
plt.figure(figsize=(16,8))
plt.plot(train['Count'], label='Train')
plt.plot(test['Count'], label='Test')
plt.plot(y_hat_avg['moving_avg_forecast'], label='Moving Average Forecast')
plt.legend(loc='best')
plt.show()
rms = sqrt(mean_squared_error(test.Count, y_hat_avg.moving_avg_forecast))
print(rms)
# ## Method 4 – Simple Exponential Smoothing
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
y_hat_avg = test.copy()
fit2 = SimpleExpSmoothing(np.asarray(train['Count'])).fit(smoothing_level=0.6,optimized=False)
y_hat_avg['SES'] = fit2.forecast(len(test))
plt.figure(figsize=(16,8))
plt.plot(train['Count'], label='Train')
plt.plot(test['Count'], label='Test')
plt.plot(y_hat_avg['SES'], label='SES')
plt.legend(loc='best')
plt.show()
rms = sqrt(mean_squared_error(test.Count, y_hat_avg.SES))
print(rms)
# ## Method 5 – Holt’s Linear Trend method
import statsmodels.api as sm
sm.tsa.seasonal_decompose(train.Count).plot()
result = sm.tsa.stattools.adfuller(train.Count)
plt.show()
# +
y_hat_avg = test.copy()
fit1 = Holt(np.asarray(train['Count'])).fit(smoothing_level = 0.3,smoothing_slope = 0.1)
y_hat_avg['Holt_linear'] = fit1.forecast(len(test))
plt.figure(figsize=(16,8))
plt.plot(train['Count'], label='Train')
plt.plot(test['Count'], label='Test')
plt.plot(y_hat_avg['Holt_linear'], label='Holt_linear')
plt.legend(loc='best')
plt.show()
# -
rms = sqrt(mean_squared_error(test.Count, y_hat_avg.Holt_linear))
print(rms)
# ## Method 6 – Holt-Winters Method
y_hat_avg = test.copy()
fit1 = ExponentialSmoothing(np.asarray(train['Count']) ,seasonal_periods=7 ,trend='add', seasonal='add',).fit()
y_hat_avg['Holt_Winter'] = fit1.forecast(len(test))
plt.figure(figsize=(16,8))
plt.plot( train['Count'], label='Train')
plt.plot(test['Count'], label='Test')
plt.plot(y_hat_avg['Holt_Winter'], label='Holt_Winter')
plt.legend(loc='best')
plt.show()
rms = sqrt(mean_squared_error(test.Count, y_hat_avg.Holt_Winter))
print(rms)
# ## Method 7 – ARIMA
y_hat_avg = test.copy()
fit1 = sm.tsa.statespace.SARIMAX(train.Count, order=(2, 1, 4),seasonal_order=(0,1,1,7)).fit()
y_hat_avg['SARIMA'] = fit1.predict(start="2013-11-1", end="2013-12-31", dynamic=True)
plt.figure(figsize=(16,8))
plt.plot( train['Count'], label='Train')
plt.plot(test['Count'], label='Test')
plt.plot(y_hat_avg['SARIMA'], label='SARIMA')
plt.legend(loc='best')
plt.show()
rms = sqrt(mean_squared_error(test.Count, y_hat_avg.SARIMA))
print(rms)
| time_series_forecast/7 methods to perform Time Series forecasting (with Python codes).ipynb |
# ---
# title: "7. Data Viz 3 - Visualizing Point Patterns"
# output: html_document
# ---
#
# ## Assignment Objectives
#
# By the end of this practical lab you will be able to:
# * Create
# - binning
# - kde
#
# We will first read some crime data into R for the [City of Chicago, USA](https://data.cityofchicago.org/view/5cd6-ry5g). This contains the recorded crimes during the year 2016 as of 24th December 2016; with the attributes including a category of the crime and a variety of other information such as location and date / time. The following code imports the data, parses the date and time stamp using the base R function strptime() and also ymd_hms() from the package lubridate. We then remove unwanted columns and restrict the results to "BURGLARY".
#
# +
# Import Crimes
crimes <- read.csv("./data/chicago_crimes_2016.csv")
# Append days, months
install.packages("lubridate")
library(lubridate)
#Parse date & time
crimes$New_Date <- ymd_hms((strptime(crimes$Date, "%m/%d/%Y %I:%M:%S %p",tz="UTC")))
#Subset the data to remove unwanted colums
crimes <- crimes[crimes$Primary.Type == "BURGLARY",c("ID","Latitude","Longitude","New_Date")]
#View the top of the data
head(crimes)
# -
# We can then see how the burgalaries are distributed by day of the week:
ggplot(data=crimes, aes(wday(crimes$New_Date,label = TRUE))) +
geom_bar() +
xlab("Day") +
ylab("Burglaries (count)")
# Or by month:
ggplot(data=crimes, aes(month(crimes$New_Date,label = TRUE))) +
geom_bar() +
xlab("Month") +
ylab("Burglaries (count)")
# We can also use the facet_grid option within ggplot to produce separate plots for each month; and additionally add an aesthetic to the geom_bar which colors each day differently.
ggplot(data=crimes, aes(wday(crimes$New_Date,label = TRUE))) +
geom_bar(aes(,fill=wday(crimes$New_Date,label = TRUE))) +
xlab("Day") +
ylab("Burglaries (count)") +
facet_grid(~month(crimes$New_Date,label = TRUE)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1),legend.position="none")
# Using a similar technique we can also explore the time of the day in which burglaries are recorded broken down into:
# +
#Create a summary data frame of the counts of burglaries by hour time band and quarter of the year
t <- data.frame(table(hour(crimes$New_Date),quarter(crimes$New_Date)))
colnames(t) <- c("Time","Quarter","Freq") # Name columns as something sensible
#Create plot
p <- ggplot(t, aes(x=Time, y=Freq, group=Quarter, colour = Quarter))
p + geom_line()
# -
#
#
#
#
#
# We will now consider
# +
library(ggmap)
#Plot
chicago <- get_map(location = "chicago", zoom = 11)
ggmap(chicago) + geom_point(data = crimes, aes(x = Longitude, y = Latitude))
# + eval=false
install.packages("pointdensityP")
# -
library(pointdensityP)
#
#
#
# +
# Remove crimes with no lat / lon
crimes <- crimes[!is.na(crimes$Latitude),]
chicago <- get_map(location = "chicago", zoom = 11)
ggmap(chicago, base_layer = ggplot(crimes, aes(x=Longitude, y=Latitude))) +
coord_cartesian(xlim = c(-87.84918,-87.3)) +
stat_binhex(bins=20) +
theme_bw() +
theme(axis.line = element_blank(),
axis.text = element_blank(),
axis.title=element_blank(),
axis.ticks = element_blank(),
legend.key = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank())
# +
ggmap(chicago, base_layer = ggplot(crimes, aes(x=Longitude, y=Latitude))) +
stat_bin2d(bins = 20)
# +
ggmap(chicago, base_layer = ggplot(crimes, aes(x=Longitude, y=Latitude))) +
stat_density2d(aes(x = Longitude, y = Latitude,fill = ..level..,alpha=..level..), bins = 10, geom = "polygon", data = crimes) +
scale_fill_gradient(low = "black", high = "red")
# -
#
#
# +
# Calculate Density
density <- pointdensity(crimes,lat_col="Latitude",lon_col="Longitude",grid_size=1,radius=3)
map_base <- qmap('Chicago', zoom = 11, darken=0.2)
map_base + geom_point(aes(x = lon, y = lat, colour = count,alpha=count),
shape = 16, size = 1, data =density) +
scale_colour_gradient(low = "yellow", high = "red")
# -
#
| notebooks/P7_Mapping_Points.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The Implicit Backward Time Centered Space (BTCS) Difference Equation for the Heat Equation
# ## The Heat Equation
# The Heat Equation is the first order in time ($t$) and second order in space ($x$) Partial Differential Equation:
# $$ \frac{\partial u}{\partial t} = \frac{\partial^2 u}{\partial x^2},$$
# The equation describes heat transfer on a domain
# $$ \Omega = \{ t \geq, 0\leq x \leq 1\}. $$
# with an initial condition at time $t=0$ for all $x$ and boundary condition on the left ($x=0$) and right side ($x=1$).
#
# ## Backward Time Centered Space (BTCS) Difference method
# This note book will illustrate the Backward Time Centered Space (BTCS) Difference method for the Heat Equation with the __initial conditions__
# $$ u(x,0)=2x, \ \ 0 \leq x \leq \frac{1}{2}, $$
# $$ u(x,0)=2(1-x), \ \ \frac{1}{2} \leq x \leq 1, $$
# and __boundary condition__
# $$ u(0,t)=0, u(1,t)=0. $$
#
# +
# LIBRARY
# vector manipulation
import numpy as np
# math functions
import math
# THIS IS FOR PLOTTING
# %matplotlib inline
import matplotlib.pyplot as plt # side-stepping mpl backend
import warnings
warnings.filterwarnings("ignore")
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''')
# -
# ## Discete Grid
# The region $\Omega$ is discretised into a uniform mesh $\Omega_h$. In the space $x$ direction into $N$ steps giving a stepsize of
# $$ h=\frac{1-0}{N},$$
# resulting in
# $$x[i]=0+ih,$$
# and into $N_t$ steps in the time $t$ direction giving a stepsize of
# $$ k=\frac{1-0}{N}$$
# resulting in
# $$t[i]=0+ik.$$
# The Figure below shows the discrete grid points for $N=10$ and $N_t=15$, the red dots are the unknown values, the green dots are the known boundary conditions and the blue dots are the known initial conditions of the Heat Equation.
N=10
Nt=200
h=1/N
k=1/Nt
r=k/(h*h)
time_steps=15
time=np.arange(0,(time_steps+.5)*k,k)
x=np.arange(0,1.0001,h)
X, Y = np.meshgrid(x, time)
fig = plt.figure()
plt.plot(X,Y,'ro');
plt.plot(x,0*x,'bo',label='Initial Condition');
plt.plot(np.ones(time_steps+1),time,'go',label='Boundary Condition');
plt.plot(x,0*x,'bo');
plt.plot(0*time,time,'go');
plt.xlim((-0.02,1.02))
plt.xlabel('x')
plt.ylabel('time (ms)')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.title(r'Discrete Grid $\Omega_h,$ h= %s, k=%s'%(h,k),fontsize=24,y=1.08)
plt.show();
# ## Discrete Initial and Boundary Conditions
#
# The discrete initial conditions are
# $$ w[i,0]=2x[i], \ \ 0 \leq x[i] \leq \frac{1}{2} $$
# $$ w[i,0]=2(1-x[i]), \ \ \frac{1}{2} \leq x[i] \leq 1 $$
# and the discete boundary conditions are
# $$ w[0,j]=0, w[10,j]=0, $$
# where $w[i,j]$ is the numerical approximation of $U(x[i],t[j])$.
#
# The Figure below plots the inital and boundary conditions for $t[0]=0.$
# +
w=np.zeros((N+1,time_steps+1))
b=np.zeros(N-1)
# Initial Condition
for i in range (1,N):
w[i,0]=2*x[i]
if x[i]>0.5:
w[i,0]=2*(1-x[i])
# Boundary Condition
for k in range (0,time_steps):
w[0,k]=0
w[N,k]=0
fig = plt.figure(figsize=(8,4))
plt.plot(x,w[:,0],'o:',label='Initial Condition')
plt.plot(x[[0,N]],w[[0,N],0],'go',label='Boundary Condition t[0]=0')
#plt.plot(x[N],w[N,0],'go')
plt.xlim([-0.1,1.1])
plt.ylim([-0.1,1.1])
plt.title('Intitial and Boundary Condition',fontsize=24)
plt.xlabel('x')
plt.ylabel('w')
plt.legend(loc='best')
plt.show()
# -
# ## The Implicit Backward Time Centered Space (BTCS) Difference Equation
# The implicit Backward Time Centered Space (BTCS) difference equation of the Heat Equation is
# \begin{equation*}
# \frac{w_{ij+1}-w_{ij}}{k}=\frac{w_{i+1j+1}-2w_{ij+1}+w_{i-1j+1}}{h^2}
# \end{equation*}
# Rearranging the equation we get
# \begin{equation}
# -rw_{i-1j+1}+(1+2r)w_{ij+1}-rw_{i+1j+1}=w_{ij}
# \end{equation}
# for $i=1,...9$ where $r=\frac{k}{h^2}$.
#
# This gives the formula for the unknown term $w_{ij+1}$ at the $(ij+1)$ mesh points
# in terms of $x[i]$ along the jth time row.
#
# Hence we can calculate the unknown pivotal values of $w$ along the first row of $j=1$ in terms of the known boundary conditions.
#
# This can be written in matrix form
# $$ A\mathbf{w}_{j+1}=\mathbf{w}_{j} +\mathbf{b}_{j+1} $$
# for which $A$ is a $9\times9$ matrix:
# $$
# \left(\begin{array}{cccc cccc}
# 1+2r&-r& 0&0&0 &0&0&0\\
# -r&1+2r&-r&0&0&0 &0&0&0\\
# 0&-r&1+2r &-r&0&0& 0&0&0\\
# 0&0&-r&1+2r &-r&0&0& 0&0\\
# 0&0&0&-r&1+2r &-r&0&0& 0\\
# 0&0&0&0&-r&1+2r &-r&0&0\\
# 0&0&0&0&0&-r&1+2r &-r&0\\
# 0&0&0&0&0&0&-r&1+2r&-r\\
# 0&0&0&0&0&0&0&-r&1+2r\\
# \end{array}\right)
# \left(\begin{array}{c}
# w_{1j+1}\\
# w_{2j+1}\\
# w_{3j+1}\\
# w_{4j+1}\\
# w_{5j+1}\\
# w_{6j+1}\\
# w_{7j+1}\\
# w_{8j+1}\\
# w_{9j+1}\\
# \end{array}\right)=
# \left(\begin{array}{c}
# w_{1j}\\
# w_{2j}\\
# w_{3j}\\
# w_{4j}\\
# w_{5j}\\
# w_{6j}\\
# w_{7j}\\
# w_{8j}\\
# w_{9j}\\
# \end{array}\right)+
# \left(\begin{array}{c}
# rw_{0j+1}\\
# 0\\
# 0\\
# 0\\
# 0\\
# 0\\
# 0\\
# 0\\
# rw_{10j+1}\\
# \end{array}\right).
# $$
# It is assumed that the boundary values $w_{0j}$ and $w_{10j}$ are known for $j=1,2,...$, and $w_{i0}$ for $i=0,...,10$ is the initial condition.
#
# The Figure below shows the matrix $A$ and its inverse $A^{-1}$ in color plot form for $r$.
# +
A=np.zeros((N-1,N-1))
for i in range (0,N-1):
A[i,i]=1+2*r
for i in range (0,N-2):
A[i+1,i]=-r
A[i,i+1]=-r
Ainv=np.linalg.inv(A)
fig = plt.figure(figsize=(12,4));
plt.subplot(121)
plt.imshow(A,interpolation='none');
plt.xticks(np.arange(N-1), np.arange(1,N-0.9,1));
plt.yticks(np.arange(N-1), np.arange(1,N-0.9,1));
clb=plt.colorbar();
clb.set_label('Matrix elements values');
clb.set_clim((-1,1));
plt.title('Matrix A r=%s'%(np.round(r,3)),fontsize=24)
plt.subplot(122)
plt.imshow(Ainv,interpolation='none');
plt.xticks(np.arange(N-1), np.arange(1,N-0.9,1));
plt.yticks(np.arange(N-1), np.arange(1,N-0.9,1));
clb=plt.colorbar();
clb.set_label('Matrix elements values');
clb.set_clim((-1,1));
plt.title(r'Matrix $A^{-1}$ r=%s'%(np.round(r,3)),fontsize=24)
fig.tight_layout()
plt.show();
# -
# ## Results
# To numerically approximate the solution at $t[1]$ the matrix equation becomes
# $$ \mathbf{w}_{1}=A\mathbf{w}_{0} +\mathbf{b}_{0} $$
# where all the right hand side is known.
# To approximate solution at time $t[2]$ we use the matrix equation
# $$ \mathbf{w}_{2}=A\mathbf{w}_{1} +\mathbf{b}_{1}. $$
# Each set of numerical solutions $w[i,j]$ for all $i$ at the previous time step is used to approximate the solution $w[i,j+1]$.
# The left and right plot below show the numerical approximation $w[i,j]$ of the Heat Equation using the BTCS method for $x[i]$ for $i=0,...,10$ and time steps $t[j]$ for $j=1,...,15$.
# +
fig = plt.figure(figsize=(12,6))
plt.subplot(121)
for j in range (1,time_steps+1):
w[1:(N),j]=np.dot(Ainv,w[1:(N),j-1])
plt.plot(x,w[:,j],'o:',label='t[%s]=%s'%(j,time[j]))
plt.xlabel('x')
plt.ylabel('w')
#plt.legend(loc='bottom', bbox_to_anchor=(0.5, -0.1))
plt.legend(bbox_to_anchor=(-.4, 1), loc=2, borderaxespad=0.)
plt.subplot(122)
plt.imshow(w.transpose())
plt.xticks(np.arange(len(x)), x)
plt.yticks(np.arange(len(time)), time)
plt.xlabel('x')
plt.ylabel('time')
clb=plt.colorbar()
clb.set_label('Temperature (w)')
plt.suptitle('Numerical Solution of the Heat Equation r=%s'%(np.round(r,3)),fontsize=24,y=1.08)
fig.tight_layout()
plt.show()
# -
| Chapter 08 - Heat Equations/.ipynb_checkpoints/03_Heat Equation-Crank_Nicolson-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import seaborn as sns
from db.DataAccessObject import DataAccessObject
from sklearn.metrics import roc_curve, roc_auc_score, classification_report
from sklearn.metrics import confusion_matrix as cm
from sklearn.linear_model import LogisticRegression
import pickle
PATH = "./analysis/"
IMG_PATH = "./analysis/"
# +
# Preparing data and trained models.
nn_gender = pickle.load(open(PATH + "nn_gender.dat", "rb"))
nn_age = pickle.load(open(PATH + "nn_age.dat", "rb"))
nn_ethnicity = pickle.load(open(PATH + "nn_ethnicity.dat", "rb"))
dataset = DataAccessObject(database="local", collection="grayscale")
cursor = dataset.get_X_y_pair(20000, 0, **{"gender": True, "age": True, "ethnicity": True})
X = []
y_gender = []
y_age = []
y_ethnicity = []
for raw_data in cursor:
X.append(raw_data["pixels"])
y_gender.append(raw_data["gender"])
y_age.append(raw_data["age"])
y_ethnicity.append(raw_data["ethnicity"])
X = np.array(X)
y_hat_gender_label = nn_gender.predict(X)
y_hat_gender = nn_gender.predict(X)
y_hat_age = nn_age.predict(X)
y_hat_ethnicity = nn_ethnicity.predict(X)
# -
# Confusion matrix and heatmap.
def confusion_matrix(y_hat, y_truth, criterion):
"""Function that returns the confusion matrix of the given data set."""
tp = tn = fp = fn = 0
# Calculate for True Positive
for idx in range(len(y_hat)):
if y_truth[idx] == criterion:
if y_hat[idx] == y_truth[idx]:
tp += 1
else:
fp += 1
else:
if y_hat[idx] == y_truth[idx]:
tn += 1
else:
fn += 1
print(f"----- Confusion Matrix -----")
print(f"True Negative: {tn} False Positive: {fp}")
print(f"False Negative: {fn} True Positive: {tp}")
return tp, fp, tn, fn
def plot_confusion_matrix(true_negative, false_positive, false_negative, true_positive, axis_labels=[]):
total = true_negative + false_positive + false_negative + true_positive
data = np.array([[true_negative, false_positive], [false_negative, true_positive]])
labels = np.asarray([f"True Neg\n{true_negative}\n{true_negative / total:.2%}",
f"False Pos\n{false_positive}\n{false_positive / total:.2%}",
f"False Neg\n{false_negative}\n{false_negative / total:.2%}",
f"True Pos\n{true_positive}\n{true_positive / total:.2%}"]
).reshape((2, 2))
fig = sns.heatmap(data, annot=labels, xticklabels=axis_labels, yticklabels=axis_labels,
fmt="", cmap="Blues").get_figure()
fig.savefig(IMG_PATH + "confusion_matrix.png")
# +
# Calculate for McFadden's pseudo R-squared
def logit(x, w):
return 1 / (1 + np.exp(-np.dot(x, w.T)))
def log_likelihood(w, X, y):
"""Function that calculates for the log-likelihood given weight, X, and y."""
ll = 0
for idx in range(len(y)):
proba = np.power(logit(X[idx], w), y[idx]) + np.power((1 - logit(X[idx], w)), 1 - y[idx])
ll += proba
return ll
def mcfadden_rsquared(trained_weight, null_weight, X, y):
trained_log_likelihood = log_likelihood(trained_weight, X, y)
null_log_likelihood = log_likelihood(null_weight, X, y)
return 1.0 - (trained_log_likelihood / null_log_likelihood)
# -
# ROC-AUC Curve
def plot_roc(nn: LogisticRegression, X, y_truth, pos_label, title):
# Calculate for score of this model based on X.
y_predicted_prob = nn.predict_proba(X)
y_predicted_prob = np.array([p[0] for p in y_predicted_prob])
fpr, tpr, threshold = roc_curve(y_truth, y_predicted_prob, pos_label=pos_label)
roc_auc = roc_auc_score([1 if p == pos_label else 0 for p in y_truth], y_predicted_prob)
roc_plot = sns.lineplot(x="False Positive Rate", y="True Positive Rate", data={"False Positive Rate": fpr, "True Positive Rate": tpr})
roc_plot.set_title(title + f"\nAUC: {roc_auc}")
roc_plot.set_xlabel("False Positive Rate")
roc_plot.set_ylabel("True Positive Rate")
fig = roc_plot.get_figure()
fig.savefig(IMG_PATH + f"{title}.png")
# Plot confusion matrix.
true_positive, false_positive, true_negative, false_negative = confusion_matrix(y_hat_gender, y_gender, "female")
plot_confusion_matrix(true_negative, false_positive, false_negative, true_positive, ["male", "female"])
# Compute pseudo r-squared
y_numeric = [0 if g == "male" else 1 for g in y_gender]
null_weight = np.array([1.0 if i == 0 else 0.0 for i in range(X.shape[1])])
print(f"McFadden's pseudo R-squared: {mcfadden_rsquared(nn_gender.coef_, null_weight, X, y_numeric)[0]}")
# value from 0.2-0.4 is excellent fit!!
# Create ROC
plot_roc(nn_gender, X, y_gender, pos_label="female", title="ROC for nn_gender")
# Model Report
print(classification_report(y_gender, y_hat_gender))
| NN Statistics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %pylab
import pandas as pd
import glob as glob
# **Introduction**
#
# The dataset from MS Birka Stockholm is in .xls Excel-97 format. And the data was gathered in several steps during three different trips. Some of the data is overlapping in time-index, and same headers (data points) exist in several files. So to be able to filter and consolidate all the data it must be done in several steps. As the Excel-97 format is limited in 65k rows and also a limited amount of columns it was needed to divide into several files.
#
# Some of the data is in Boolean format, and some have data-points missing but the majority should be in numerical format.
#
# In all Excel-files the meta data of each data-point (header) is in the first 14 rows. The first step is to make a pre-processing of the .xls files, and filter out non uni-code characters, put in a split character between the meta-data and joining everything in the data header. Still keeping the index in time-series format.
#
# +
csv_data_path = '/Users/fredde/Database/csv-1year/'
xls_data_path = '/Users/fredde/Database/data-files/1year/'
database_path = '/Users/fredde/Database/'
xlsfiles = glob.glob(xls_data_path + '*.xls')
print(xlsfiles)
df = pd.DataFrame()
all_data = pd.DataFrame()
# -
# Clean up csv
# +
# Make CSV-files and clean up headers.
for i in range(len(xlsfiles)):
df = pd.DataFrame()
df2 = pd.DataFrame()
print('Processing: '+str(xlsfiles[i].split('/')[-1].split('.')[0]))
df = pd.read_excel(xlsfiles[i],index_col=0)
headers = list(df)
headers_new = list()
# And now extract the relevant meta-data in the first couple of rows.
# Make a new list of headers in the file. Using ':' as a split.
for head in headers:
name = str(df[head].iloc[0])
id_nr = str(head.split('.')[2].split(':')[1])
unit = str(df[head].iloc[1])
data_type = str(df[head].iloc[5])
sample_interval = str(df[head].iloc[8])
headers_new.append(str(name+':'+id_nr+':'+unit+':'+data_type+':'+sample_interval))
for n in range(len(headers_new)):
series = df[headers[n]].ix[13:]
df2[headers_new[n]] = series
# Save in .csv format.
df2.to_csv(csv_data_path + xlsfiles[i].split('/')[-1].split('.')[0] + '.csv')
#df2.to_excel(csv_data_path + xlsfiles[i].split('/')[-1].split('.')[0] + '.xls')
# Clean up memory
del df2
del df
print(str(i+1) + ' done of ' + str(len(xlsfiles)))
print('All done!')
# -
# Time to create a database, joining all the files into one master DataFrame.
# +
all_data=pd.DataFrame()
csvfiles = glob.glob(csv_data_path + '*.csv')
for i in range(len(csvfiles)):
df = pd.DataFrame()
print('Processing: '+str(csvfiles[i].split('/')[-1].split('.')[0]))
df = pd.read_csv(csvfiles[i],header=0,index_col=0,dtype='a')
all_data = all_data.append(df)
del df # Clean up memory
print(str(i+1) + ' done of ' + str(len(csvfiles)))
df = all_data.sort_index()
del all_data # Clean memory
df_out = pd.DataFrame() # Make a new DataFrame so the process of converting to numeric goes faster
for i in range(len(list(df))):
df_out[list(df)[i]] = pd.to_numeric(df[list(df)[i]],errors='ignore')
df_out.to_hdf(database_path + 'all_data_1year.h5','table')
print('All done!')
# -
# Load the data and see if it can be used..
# +
df = pd.read_hdf(database_path + 'all_data_1year.h5','table')
# -
df.to_hdf(database_path + 'all_data_1year_comp.h5','table',complevel=9,complib='blosc')
df2 = pd.read_hdf(database_path + 'all_data_1year_comp.h5','table')
df2.describe()
# %timeit df['AE SCR 2CFC20 ALARM:6854:-:Average:900']
# %timeit df2['AE SCR 2CFC20 ALARM:6854:-:Average:900']
| notebooks/create_db/create_birka_database.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Filtering Comprehensions
#
# +
# checking prime or not
from math import sqrt
def is_prime(x):
if x < 2:
return False
for i in range(2, int(sqrt(x))+1):
if x%i == 0:
return False
return True
# -
[x for x in range(101) if is_prime(x)]
prime_square_divisors = { x: (1,x,x*x) for x in range(101) if is_prime(x)}
prime_square_divisors
| 06.Core Python - Getting Started/09.Filtering Comprehensions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"is_executing": false}
import torch
import torch.nn as nn
import torch.nn.functional as F
torch.cuda.current_device()
import torchvision
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
import time
import os
import logging
import numpy as np
from my_layers import LatticeLocalSL2, GroupLocalSL2, GroupMaxPool, GroupReLU
logging.basicConfig(level=logging.INFO, filename='MNIST_test.log',
format='%(asctime)s:%(name)s:%(levelname)s:%(message)s')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# + pycharm={"name": "#%%\n", "is_executing": false}
indices = list(range(60_000))
# np.random.shuffle(indices)
train_indices = indices[:50_000]
valid_indices = indices[50_000:]
# + pycharm={"name": "#%%\n", "is_executing": false}
transformation = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(valid_indices)
train_set = torchvision.datasets.MNIST(root='./data', train=True,
download=True, transform=transformation)
valid_set = torchvision.datasets.MNIST(root='./data', train=True,
download=True, transform=transformation)
classes = ('0', '1', '2', '3',
'4', '5', '6', '7', '8', '9')
NUM_CLASSES = 10
# + pycharm={"name": "#%%\n", "is_executing": false}
class DoubleMNIST(nn.Module):
def __init__(self, first_channels=32, second_channels=64, fc_channels=128):
super().__init__()
self.gconv1 = LatticeLocalSL2(1, first_channels, 5, 2, len_fun="len", group='SL2', pad_type='partial')
self.gconv2 = GroupLocalSL2(first_channels, second_channels, 5, 2, len_fun="len")
self.fc1 = nn.Linear(second_channels * 8 * 4 * 4, fc_channels)
self.fc2 = nn.Linear(fc_channels, 10)
self.pool = GroupMaxPool(2, 2)
self.grelu = GroupReLU()
def forward(self, x):
dict1, x = self.gconv1(x)
x = self.pool(self.grelu(x))
dict2, x = self.gconv2(x, dict1)
x = self.pool(self.grelu(x))
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
# + pycharm={"name": "#%%\n", "is_executing": false}
import torch.optim as optim
predict_criterion = nn.CrossEntropyLoss()
# + pycharm={"name": "#%%\n", "is_executing": false}
def train(net):
learned_train_set = False
true_epoch = 0
total_time = 0
train_error_list = []
test_error_list = []
testloader = torch.utils.data.DataLoader(valid_set, batch_size=128, sampler=valid_sampler,
shuffle=False, num_workers=2, pin_memory=False)
trainloader = torch.utils.data.DataLoader(train_set, batch_size=20, sampler=train_sampler,
shuffle=False, num_workers=2, pin_memory=True)
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.5)
for epoch in range(30): # loop over the dataset multiple times
start_time = time.time()
true_epoch += 1
running_loss = 0.0
true_train_total = 0.0
correct_train_total = 0.0
correct_train = 0.0
total_train = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = predict_criterion(outputs, labels)
_, predicted = torch.max(outputs.data, 1)
total_train += labels.size(0)
true_train_total += labels.size(0)
correct_train += (predicted == labels).sum().item()
correct_train_total += (predicted == labels).sum().item()
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 500 == 499: # print every 100 mini-batches
print(f'[{true_epoch}, {i + 1}] loss: {running_loss / 100:.4f}, Correct Rate: {100 * correct_train / total_train:.2f}%, Cumulative time: {time.time() - start_time}s')
running_loss = 0.0
correct_train = 0.0
total_train = 0.0
train_error_list.append(100*correct_train_total / true_train_total)
print(f"Correctness on training epoch {true_epoch}: {100*correct_train_total / true_train_total:.2f}%")
logging.info(f"Correctness on training epoch {true_epoch}: {100*correct_train_total / true_train_total:.2f}%")
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
test_error_list.append(100 * correct / total)
print(f'Accuracy of the network on the 10000 test images for epoch {true_epoch}: {100 * correct / total:.2f}%')
logging.info(f'Accuracy of the network on the 10000 test images for epoch {true_epoch}: {100 * correct / total:.2f}%')
total_time += time.time() - start_time
print(f'Finished epoch {true_epoch}, cumulative time: {total_time}s')
if correct_train_total == true_train_total:
if not learned_train_set:
learned_train_set = True
else:
break
print("Finished")
logging.info("Finished Training")
# + pycharm={"name": "#%%\n", "is_executing": false}
logging.info("------------------------Beginning Test---------------------")
for i in range(6):
print(f'Starting test {i+1}')
logging.info(f'Starting test {i+1}')
net = DoubleMNIST(9,32,128)
net.to(device)
net.gconv1.flat_indices = net.gconv1.flat_indices.to(device)
logging.info(repr(net))
train(net)
logging.info("------------------------Ending Test---------------------")
# + pycharm={"name": "#%%\n", "is_executing": false}
| SL2Equivariance/training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import itertools
import emachine as EM
# -
np.random.seed(1)
s0 = np.loadtxt('s_test.dat')
l,n = s0.shape
print(l,n)
# find conserved variables
fc = 0.9
l,n = s0.shape
frequency = [max(np.unique(s0[:,i], return_counts=True)[1]) for i in range(n)]
cols_conserved = [i for i in range(n) if frequency[i]/float(l) > fc]
print(cols_conserved)
cols_active = np.delete(np.arange(0,n),cols_conserved)
print(len(cols_active))
# load predicted w from train set
#w_mle = np.loadtxt('w_mle.dat')
#w_ple = np.loadtxt('w_ple.dat')
w_eps1 = np.loadtxt('w_em_eps07.dat')
w_eps2 = np.loadtxt('w_em_eps09.dat')
w_em = np.loadtxt('w_em.dat')
#print(w_em.shape)
#w_HF = np.loadtxt('w_HF.dat')
# random missing position
for n_hidden in [2,4,6,8,10,12,14,16]:
#for n_hidden in [2]:
# every possibilities of configurations of hiddens
s_hidden_possibles = np.asarray(list(itertools.product([1.0, -1.0], repeat=n_hidden)))
n_possibles = s_hidden_possibles.shape[0]
#print(n_possibles)
# consider a specific sample t:
acc = np.zeros((3,l))
for t in range(l):
s = s0[t]
#print(s)
#hidden = np.random.choice(np.arange(17,n),n_hidden,replace=False)
#hidden = np.random.choice(np.arange(0,n),n_hidden,replace=False)
hidden = np.random.choice(cols_active,n_hidden,replace=False)
#print(hidden)
s_possibles = np.tile(s,(n_possibles,1))
s_possibles[:,hidden] = s_hidden_possibles
#print(s_possibles)
# calculate energy of each possible configuration
ops = EM.operators(s_possibles)
"""
#----------------------------------------------
# w from MLE
energy0 = -ops.dot(w_mle)
# select the best sequence that maximize probability
#s_hidden_recover0 = s_hidden_possibles[np.argmin(energy0)]
#print(s_hidden_recover)
acc[0,t] = np.sum((s[hidden] == s_hidden_recover0))
#print(acc[t])
"""
#----------------------------------------------
# w from HF
#energy0 = -ops.dot(w_HF)
energy0 = -ops.dot(w_eps1)
s_hidden_recover0 = s_hidden_possibles[np.argmin(energy0)]
acc[0,t] = np.sum((s[hidden] == s_hidden_recover0))
# w from PLE
#energy1 = -ops.dot(w_ple)
energy1 = -ops.dot(w_eps2)
s_hidden_recover1 = s_hidden_possibles[np.argmin(energy1)]
acc[1,t] = np.sum((s[hidden] == s_hidden_recover1))
#----------------------------------------------
# w from EM
energy2 = -ops.dot(w_em)
s_hidden_recover2 = s_hidden_possibles[np.argmin(energy2)]
acc[2,t] = np.sum((s[hidden] == s_hidden_recover2))
acc_av = acc.sum(axis=1)/(n_hidden*l)
#print('accuracy of MLE, PLE, EM, HF:',n_hidden,acc_av)
print('accuracy with eps1, eps2, eps_optimal:',n_hidden,acc_av)
| Ref/fig4_smoking_data_train30_test70/3predict_hidden_eps07eps09.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SSD300 Training Tutorial
#
# This tutorial explains how to train an SSD300 on the Pascal VOC datasets. The preset parameters reproduce the training of the original SSD300 "07+12" model. Training SSD512 works simiarly, so there's no extra tutorial for that. The same goes for training on other datasets.
#
# You can find a summary of a full training here to get an impression of what it should look like:
# [SSD300 "07+12" training summary](https://github.com/pierluigiferrari/ssd_keras/blob/master/training_summaries/ssd300_pascal_07%2B12_training_summary.md)
# +
from keras.optimizers import Adam, SGD
from keras.callbacks import Callback, ModelCheckpoint, LearningRateScheduler, TerminateOnNaN, CSVLogger, EarlyStopping, TensorBoard
from keras import backend as K
from keras.models import load_model
from math import ceil
import numpy as np
from matplotlib import pyplot as plt
from keras.models import Model
from matplotlib import pyplot as plt
from keras.preprocessing import image
from imageio import imread
from models.keras_ssd300 import ssd_300
from keras_loss_function.keras_ssd_loss_mod import SSDLoss
from keras_loss_function.keras_ssd_loss_proj import SSDLoss_proj
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
from keras_layers.keras_layer_L2Normalization import L2Normalization
from ssd_encoder_decoder.ssd_input_encoder_mod import SSDInputEncoder
from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast
from data_generator.object_detection_2d_data_generator import DataGenerator
from data_generator.object_detection_2d_geometric_ops import Resize_Modified
from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels_Modified
from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation_modified
from data_generator.object_detection_2d_geometric_ops import Resize
from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels
from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation
from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms
from bounding_box_utils.bounding_box_utils import iou, convert_coordinates
from ssd_encoder_decoder.matching_utils import match_bipartite_greedy, match_multi
import random
np.set_printoptions(precision=20)
import tensorflow as tf
np.random.seed(1337)
# %matplotlib inline
# -
# ## 0. Preliminary note
#
# All places in the code where you need to make any changes are marked `TODO` and explained accordingly. All code cells that don't contain `TODO` markers just need to be executed.
# ## 1. Set the model configuration parameters
#
# +
img_height = 300 # Height of the model input images
img_width = 600 # Width of the model input images
img_channels = 3 # Number of color channels of the model input images
mean_color = [123, 117, 104] # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.
swap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images.
n_classes = 1 # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO
scales_pascal = [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets
scales_coco = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets
scales = scales_pascal
aspect_ratios = [[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5]] # The anchor box aspect ratios used in the original SSD300; the order matters
two_boxes_for_ar1 = True # print(y_encoded)
steps = [8, 16, 32, 64, 100, 300] # The space between two adjacent anchor box center points for each predictor layer.
offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.
clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries
variances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are divided as in the original implementation
normalize_coords = True
# -
# ## 2. Build or load the model
#
# You will want to execute either of the two code cells in the subsequent two sub-sections, not both.
# +
# 1: Build the Keras model.
K.clear_session() # Clear previous models from memory.
model = ssd_300(image_size=(img_height, img_width, img_channels),
n_classes=n_classes,
mode='training',
l2_regularization=0.0005,
scales=scales,
aspect_ratios_per_layer=aspect_ratios,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
normalize_coords=normalize_coords,
subtract_mean=mean_color,
swap_channels=swap_channels)
# 2: Load some weights into the model.
# TODO: Set the path to the weights you want to load.
weights_path = 'weights/VGG_ILSVRC_16_layers_fc_reduced.h5'
model.load_weights(weights_path, by_name=True)
# 3: Instantiate an optimizer and the SSD loss function and compile the model.
# If you want to follow the original Caffe implementation, use the preset SGD
# optimizer, otherwise I'd recommend the commented-out Adam optimizer.
# -
model.summary()
# +
def gt_rem(pred, gt):
val = tf.subtract(tf.shape(pred)[1], tf.shape(gt)[1],name="gt_rem_subtract")
gt = tf.slice(gt, [0, 0, 0], [1, tf.shape(pred)[1], 18],name="rem_slice")
return gt
def gt_add(pred, gt):
#add to gt
val = tf.subtract(tf.shape(pred)[1], tf.shape(gt)[1],name="gt_add_subtract")
ext = tf.slice(gt, [0, 0, 0], [1, val, 18], name="add_slice")
gt = K.concatenate([ext,gt], axis=1)
return gt
def equalalready(gt, pred): return pred
def make_equal(pred, gt):
equal_tensor = tf.cond(tf.shape(pred)[1] < tf.shape(gt)[1], lambda: gt_rem(pred, gt), lambda: gt_add(pred, gt), name="make_equal_cond")
return equal_tensor
# ssd_loss3 = SSDLoss_proj(neg_pos_ratio=3, alpha=1.0)
# ssd_loss4 = SSDLoss_proj(neg_pos_ratio=3, alpha=1.0)
def Accuracy(y_true, y_pred):
'''Calculates the mean accuracy rate across all predictions for
multiclass classification problems.
'''
print("y_pred: ",y_pred)
print("y_true: ",y_true)
y_true = y_true[:,:,:18]
y_pred = y_pred[:,:,:18]
return K.mean(K.equal(K.argmax(y_true[:,:,:-4], axis=-1),
K.argmax(y_pred[:,:,:-4], axis=-1)))
def Accuracy_Proj(y_pred, y_true):
#add to gt
y_true_1 = y_true[:,:,:18]
y_pred_1 = y_pred[:,:,:18]
y_true_2 = y_true[:,:,18:]
y_pred_2 = y_pred[:,:,18:]
acc = tf.constant(0)
y_pred, y_true = matcher(y_true_1,y_pred_1,y_true_2,y_pred_2,1)
return K.mean(K.equal(K.argmax(y_true[:,:,:-4], axis=-1),
K.argmax(y_pred[:,:,:-4], axis=-1)))
adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
ssd_loss1 = SSDLoss(neg_pos_ratio=3, alpha=1.0)
ssd_loss2 = SSDLoss(neg_pos_ratio=3, alpha=1.0)
ssd_loss3 = SSDLoss_proj(neg_pos_ratio=3, alpha=1.0)
ssd_loss4 = SSDLoss_proj(neg_pos_ratio=3, alpha=1.0)
losses = {
"predictions_1": ssd_loss1.compute_loss,
"predictions_2": ssd_loss2.compute_loss,
"predictions_1_proj": ssd_loss3.compute_loss,
"predictions_2_proj": ssd_loss4.compute_loss
}
lossWeights = {"predictions_1": 1.0,"predictions_2": 1.0,"predictions_1_proj": 1.0,"predictions_2_proj": 1.0}
# MetricstDict = {"predictions_1": Accuracy,"predictions_2": Accuracy, "predictions_1_proj": Accuracy_Proj,"predictions_2_proj": Accuracy_Proj}
# lossWeights = {"predictions_1": 1.0,"predictions_2": 1.0}
MetricstDict = {"predictions_1": Accuracy,"predictions_2": Accuracy}
model.compile(optimizer=adam, loss=losses, loss_weights=lossWeights, metrics=MetricstDict)
# model.compile(optimizer=adam, loss=losses, loss_weights=lossWeights)
# -
model.summary()
# ### 2.2 Load a previously created model
#
# +
# train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path='dataset_pascal_voc_07+12_trainval.h5')
# val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path='dataset_pascal_voc_07_test.h5')
train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
VOC_2007_images_dir = '../datasets/Images/'
# The directories that contain the annotations.
VOC_2007_annotations_dir = '../datasets/VOC/Pasadena/Annotations_Multi/'
VOC_2007_trainval_image_set_filename = '../datasets/VOC/Pasadena/ImageSets/Main/reid_neu/train_few.txt'
VOC_2007_val_image_set_filename = '../datasets/VOC/Pasadena/ImageSets/Main/reid_neu/val_few.txt'
VOC_2007_test_image_set_filename = '../datasets/VOC/Pasadena/ImageSets/Main/reid_neu/test_few.txt'
# The pat[Accuracy]hs to the image sets.
# VOC_2007_trainval_image_set_filename = '../datasets/VOC/Pasadena/ImageSets/Main/siamese/trainval_sia.txt'
# VOC_2007_val_image_set_filename = '../datasets/VOC/Pasadena/ImageSets/Main/siamese/val_sia.txt'
# VOC_2007_test_image_set_filename = '../datasets/VOC/Pasadena/ImageSets/Main/siamese/test_sia.txt'
# VOC_2007_trainval_image_set_filename = '../datasets/VOC/Pasadena/ImageSets/Main/siamese/trainval_sia_same.txt'
# VOC_2007_val_image_set_filename = '../datasets/VOC/Pasadena/ImageSets/Main/siamese/val_sia_same.txt'
# VOC_2007_test_image_set_filename = '../datasets/VOC/Pasadena/ImageSets/Main/siamese/test_sia_same.txt'
# VOC_2007_trainval_image_set_filename = '../datasets/VOC/Pasadena/ImageSets/Main/siamese/trainval_sia_sub.txt'
# VOC_2007_val_image_set_filename = '../datasets/VOC/Pasadena/ImageSets/Main/siamese/val_sia_sub.txt'
# VOC_2007_test_image_set_filename = '../datasets/VOC/Pasadena/ImageSets/Main/siamese/test_sia_sub.txt'
# VOC_2007_trainval_image_set_filename = '../datasets/VOC/Pasadena/ImageSets/Main/siamese/trainval_one.txt'
# VOC_2007_val_image_set_filename = '../datasets/VOC/Pasadena/ImageSets/Main/siamese/val_one.txt'
# VOC_2007_test_image_set_filename = '../datasets/VOC/Pasadena/ImageSets/Main/siamese/test_one.txt'
# The XML parser needs to now what object class names to look for and in which order to map them to integers.
classes = ['background',
'tree']
train_dataset.parse_xml(images_dirs=[VOC_2007_images_dir],
image_set_filenames=[VOC_2007_trainval_image_set_filename],
annotations_dirs=[VOC_2007_annotations_dir],
classes=classes,
include_classes='all',
exclude_truncated=False,
exclude_difficult=False,
ret=False)
val_dataset.parse_xml(images_dirs=[VOC_2007_images_dir],
image_set_filenames=[VOC_2007_val_image_set_filename],
annotations_dirs=[VOC_2007_annotations_dir],
classes=classes,
include_classes='all',
exclude_truncated=False,
exclude_difficult=True,
ret=False)
# +
batch_size = 4
ssd_data_augmentation = SSDDataAugmentation_modified(img_height=img_height,
img_width=img_width,
background=mean_color)
# For the validation generator:
convert_to_3_channels = ConvertTo3Channels_Modified()
resize = Resize_Modified(height=img_height, width=img_width)
# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.
# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.
predictor_sizes = [model.get_layer('conv4_3_norm_mbox_conf__1').output_shape[1:3],
model.get_layer('fc7_mbox_conf__1').output_shape[1:3],
model.get_layer('conv6_2_mbox_conf__1').output_shape[1:3],
model.get_layer('conv7_2_mbox_conf__1').output_shape[1:3],
model.get_layer('conv8_2_mbox_conf__1').output_shape[1:3],
model.get_layer('conv9_2_mbox_conf__1').output_shape[1:3]]
ssd_input_encoder = SSDInputEncoder(img_height=img_height,
img_width=img_width,
n_classes=n_classes,
predictor_sizes=predictor_sizes,
scales=scales,
aspect_ratios_per_layer=aspect_ratios,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
matching_type='multi',
pos_iou_threshold=0.5,
neg_iou_limit=0.5,
normalize_coords=normalize_coords)
# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.
train_generator = train_dataset.generate(batch_size=batch_size,
shuffle=False,
transformations=[ssd_data_augmentation],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'encoded_labels'},
keep_images_without_gt=False)
val_generator = val_dataset.generate(batch_size=batch_size,
shuffle=False,
transformations=[convert_to_3_channels,
resize],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'encoded_labels'},
keep_images_without_gt=False)
# Get the number of samples in the training and validations datasets.
train_dataset_size = train_dataset.get_dataset_size()
val_dataset_size = val_dataset.get_dataset_size()
print("Number of images in the training dataset:\t{:>6}".format(train_dataset_size))
print("Number of images in the validation dataset:\t{:>6}".format(val_dataset_size))
# -
# ## 4. Set the remaining training parameters
#
# We've already chosen an optimizer and set the batch size above, now let's set the remaining training parameters. I'll set one epoch to consist of 1,000 training steps. The next code cell defines a learning rate schedule that replicates the learning rate schedule of the original Caffe implementation for the training of the SSD300 Pascal VOC "07+12" model. That model was trained for 120,000 steps with a learning rate of 0.001 for the first 80,000 steps, 0.0001 for the next 20,000 steps, and 0.00001 for the last 20,000 steps. If you're training on a different dataset, define the learning rate schedule however you see fit.
#
# I'll set only a few essential Keras callbacks below, feel free to add more callbacks if you want TensorBoard summaries or whatever. We obviously need the learning rate scheduler and we want to save the best models during the training. It also makes sense to continuously stream our training history to a CSV log file after every epoch, because if we didn't do that, in case the training terminates with an exception at some point or if the kernel of this Jupyter notebook dies for some reason or anything like that happens, we would lose the entire history for the trained epochs. Finally, we'll also add a callback that makes sure that the training terminates if the loss becomes `NaN`. Depending on the optimizer you use, it can happen that the loss becomes `NaN` during the first iterations of the training. In later iterations it's less of a risk. For example, I've never seen a `NaN` loss when I trained SSD using an Adam optimizer, but I've seen a `NaN` loss a couple of times during the very first couple of hundred training steps of training a new model when I used an SGD optimizer.
# +
# Define a learning rate schedule.
def lr_schedule(epoch):
if epoch < 80:
return 0.001
elif epoch < 100:
return 0.0001
else:
return 0.00001
# +
class prediction_history(Callback):
def __init__(self):
print("Predictor")
def on_epoch_end(self, epoch, logs={}):
predder = np.load('outputs/predder.npy')
bX = predder[0][0]
bZ = predder[0][1]
gX = predder[0][2]
gZ = predder[0][3]
y_true = predder[1]['predictions_1_proj']
ssd_loss_ = SSDLoss_proj(neg_pos_ratio=3, alpha=1.0)
intermediate_layer_model = Model(inputs=model.input,
outputs=model.get_layer("predictions_1").output)
intermediate_layer_model_1 = Model(inputs=model.input,
outputs=model.get_layer("predictions_1_proj").output)
intermediate_layer_model_2 = Model(inputs=model.input,
outputs=model.get_layer("predictions_2").output)
intermediate_layer_model_3 = Model(inputs=model.input,
outputs=model.get_layer("predictions_2_proj").output)
intermediate_output = intermediate_layer_model.predict([bX,bZ,gX,gZ])
intermediate_output_1 = intermediate_layer_model_1.predict([bX,bZ,gX,gZ])
intermediate_output_2 = intermediate_layer_model_2.predict([bX,bZ,gX,gZ])
intermediate_output_3 = intermediate_layer_model_3.predict([bX,bZ,gX,gZ])
print(SSDLoss_proj.compute_loss(intermediate_output_1,y_true))
# np.save('outputs/predictions_1_'+str(epoch)+'.npy',intermediate_output)
# np.save('outputs/predictions_1_proj_'+str(epoch)+'.npy',intermediate_output_1)
# np.save('outputs/predictions_2_'+str(epoch)+'.npy',intermediate_output_2)
# np.save('outputs/predictions_2_proj_'+str(epoch)+'.npy',intermediate_output_3)
# +
# Define model callbacks.
# TODO: Set the filepath under which you want to save the model.
model_checkpoint = ModelCheckpoint(filepath='checkpoints/double_ssd300_pascal_07+12_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
#model_checkpoint.best =
tbCallBack = TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)
csv_logger = CSVLogger(filename='ssd300_pascal_07+12_training_log.csv',
separator=',',
append=True)
learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule)
early_stopping = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=1,
verbose=0, mode='auto')
terminate_on_nan = TerminateOnNaN()
printer_callback = prediction_history()
# custom_los = custom_loss()
callbacks = [
# model_checkpoint,
# csv_logger,
# custom_los,
learning_rate_scheduler,
early_stopping,
terminate_on_nan,
printer_callback,
tbCallBack
]
# -
# ## 5. Train
#
# +
# If you're resuming a previous training, set `initial_epoch` and `final_epoch` accordingly.
initial_epoch = 0
final_epoch = 500
steps_per_epoch = 1000
# history = model.fit_generator(generator=train_generator,
# steps_per_epoch=ceil(train_dataset_size/batch_size),
# epochs=final_epoch,
# callbacks=callbacks,
# verbose=1,
# validation_data=val_generator,
# validation_steps=ceil(val_dataset_size/batch_size),
# initial_epoch=initial_epoch)
history = model.fit_generator(generator=train_generator,
steps_per_epoch=ceil(train_dataset_size/batch_size),
epochs=final_epoch,
callbacks=callbacks,
verbose=1,
validation_data=val_generator,
validation_steps=ceil(val_dataset_size/batch_size),
initial_epoch=initial_epoch)
# -
| ssd300_training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Analyzing bias web communities using word embeddings
# DATA 512 Project Report
# #### Introduction
#
# This project is an attempt to understand what word embedding models can tell us about the sources from which the models were trained. It build on prior research on evaluating binary gender bias in word embeddings, using visualizations to compares several pre-trained word embeddings. The aim of the project is to understand if it is possible to perform reverse inference from the model bias to dataset bias and check if so it's possible to draw any conclusions about the biases of the datasets. In this analysis, I've compared two independent sets of models, Facebook's fasttext models and Stanford university's Glove models. Given the complexity of training a complete word embedding model from scratch, this project uses pre-trained word embeddings. As discussed in the limitations section below, this is potentially a source of noise which makes interpreting the results harder than expected. The 'future work' section suggests a few potential solutions which might improve on this analysis.
# #### Backgroud / Prior Work
#
# Word embeddings are a way to encode the semantic structure of words using a high-dimensional vector space. Each word is mapped to a real valued vector such that words that tend to co-occur in a sentence tend to have similar vectors
# Is able to capture interesting semantic structure
#
# Some common examples used to descibe the expressiveness of word embedding models are:
#
# 1. vec(King) - vec(man) + vec(woman) ~= vec(Queen)
# 2. vec(Mom) - vec(Dad) ~= vec(Grandma) - vec(Grandpa) ~= vec(Her) - vec(He)
#
# The second example above shows an example of an 'bias axis', which is used in this analysis. A bias axis is a pair of words (for example 'he' and 'she'). A more robust estimator for the bias axis (as described in the paper https://arxiv.org/pdf/1607.06520.pdf) is to collect a set of many word pairs that represent gender, and to compute the first principal component of their differences. An example of one such set of words is at (https://github.com/tolga-b/debiaswe/blob/master/data/definitional_pairs.json), (licensed under MIT, and collected by Amazon mechanical task workers)
# #### Methods
#
# The project contains two sets of comparisions:
#
# - between glove models on common crawl and twitter text
# - between fasttext models on wikipedia and common crawl text
#
# Note that all comparisions are beteen models withing the same set (either fasttext or glove). The analysis avoids any comparision between a fasttext model and a glove model, since the algorithm being different could introducee a bias into the word embeddings that might not be representative of the source text.
#
# However, even keeping the type of model constant, there are other factors that might make affect the results and conclusions of this project - The exact type of preprocessing on the text and the model parameters. All of which need to be kept constant across the models in each set. For this report, I've tried to pick out pre-trainied models that as as close to each other as possible, but given that the authors have not published any information on the exact parameeters used, it is possible that they might be different.
#
# As described in the limitations section at the bottom of the report, given enough time, manually retraining each model from scratch, with the same parameters and pre-processing would be ideal. Since this was not possible at this time, this report should be considered more of a experiment on if model based comparisiions are possible (and not that the exact conclusions drawn from visualizing bias in the set of pre-trained models I've chosen is representative of the sources.)
# +
import pickle
import numpy as np
import pandas as pd
from scipy import spatial
from tqdm import tqdm
from collections import namedtuple
# -
# #### Data load
#
# The pre-trained vector files are dowloaded into the models folder. These files are linked below and must be dowloaded and extracted beforere the script can be run.
#
# The models are located at
#
# Glove:
# - Large common crawl dataset: http://nlp.stanford.edu/data/glove.840B.300d.zip
# - Small common crawl dataset: http://nlp.stanford.edu/data/glove.42B.300d.zip
# - Twitter dataset: http://nlp.stanford.edu/data/glove.twitter.27B.zip
#
# The above links are distributed from the site (https://nlp.stanford.edu/projects/glove/), under the er the Public Domain Dedication and License v1.0 (http://opendatacommons.org/licenses/pddl/)
#
# Fasttext:
# - Common crawl dataset: https://s3-us-west-1.amazonaws.com/fasttext-vectors/crawl-300d-2M-subword.zip
# - Wikipedia dataset: https://s3-us-west-1.amazonaws.com/fasttext-vectors/wiki.en.vec
#
# The links are from the site (https://fasttext.cc) and the models are distributed under the Creative Commons Attribution-Share-Alike License 3.0. (https://creativecommons.org/licenses/by-sa/3.0/)
#
# The analysis also uses a list of occupations, located at https://github.com/tolga-b/debiaswe/blob/master/data/professions.json, and is licensed under MIT.
def load_vectors(file_name, dim, has_header=True):
"""
Reads vectors from the given file.
has_header controls if the first line should be ignored.
"""
vectors = []
words = {}
with open(file_name) as f:
if has_header:
count = int(f.readline().split()[0])
for i, line in tqdm(enumerate(f)):
a = line.strip().split(' ')
vec = np.array(a[1:], dtype=np.float16)
if vec.shape[0] == dim:
words[a[0]] = i
vectors.append(vec)
return words, np.stack(vectors)
# Given that the model files are large (~6Gb), The following functions parse through each file and cache the model
# in the numpy format, which makes reading it into the notebook much faster.
# +
def cache_model(model_path, name, dim = 300):
"""
Caches the (words, vectors) tuple to disk for faster
retreival.
"""
words, vectors = load_vectors(model_path, dim)
np.save(f'./models/cache/{name}.vec.npy', vectors)
with open(f'./models/cache/{name}.words.pkl', 'wb+') as f:
pickle.dump(words, f)
# the datastructure we use to represent a word embdding model.
EmbeddingModel = namedtuple('EmbeddingModel', ['words', 'vectors'])
def load_cached_model(name):
"""
Loads a model that was previously cached by cache_model
"""
vectors = np.load(f'./models/cache/{name}.vec.npy')
with open(f'./models/cache/{name}.words.pkl', 'rb') as f:
words = pickle.load(f)
return EmbeddingModel(words, vectors)
# -
# We pickle all the models used in this analysis at the first run. Subsequent runs of this
# notebook only load in the picked varients.
# +
def cache():
"""
Helper function to cache all the models we want to use in the analysis.
"""
cache_model('./models/fasttext/wiki.en.vec', 'wiki')
cache_model('./models/fasttext/crawl-300d-2M-subword.vec', 'cc')
cache_model('./models/glove/glove.twitter.27B.200d.txt', 'twitter_glove', 200)
cache_model('./models/glove/glove.42B.300d.txt', 'cc_42_glove')
cache_model('./models/glove/glove.840B.300d.txt', 'cc_840_glove')
# cache() # This needs to be run only once.
wiki = load_cached_model('wiki')
cc = load_cached_model('cc')
glove_twitter = load_cached_model('twitter_glove')
glove_cc1 = load_cached_model('cc_42_glove')
glove_cc2 = load_cached_model('cc_840_glove')
# -
# Helper functions to fetch a vector for a word and to compute similarities between words, given a model.
# +
def get_vector(model, word):
"""
Fetchs the vector of the given word.
Returns None if the word does not exist in the model.
"""
if (word not in model.words):
return None
v = model.vectors[model.words[word]]
return v / np.linalg.norm(v)
def compare_vectors(model, word_a, word_b):
"""
Computes the cosine similarity between two words according to the
model give.
Returns none if either word does not exist in the model.
"""
v1 = get_vector(model, word_a)
v2 = get_vector(model, word_b)
if v1 is None or v2 is None:
return None
return np.abs(1 - spatial.distance.cosine(v1, v2))
def bias(m, axis, w, scale=False):
"""
Computes the bias score for the word with repect to the axis specified.
(the bias computation is as defined in [1] under the with the C=1), under
the section 'Direct Bias'.
If scale is false, this just returns the 2-D point for the word w.r.t to the
two bias axes for plotting.
[1] Man is to Computer Programmer as Woman is to Homemaker? Debiasing Word Embeddings: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>: http://papers.nips.cc/paper/6228-man-is-to-computer-programmer-as-woman-is-to-homemaker-debiasing-word-embeddings.pdf
"""
a = compare_vectors(m, axis[0], w)
b = compare_vectors(m, axis[1], w)
if scale:
if a is None or b is None:
return None
return np.abs(a-b)
# f = a + b
# if f > 0:
# a /= f
# b /= f
return a, b
# -
# The following code block contains helper function to create Plot.ly plots.
# +
import plotly.plotly as py
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
# %matplotlib inline
init_notebook_mode(connected=True)
def scatter(x, y, words, label=False, title='Occupations w.r.t he-she axis'):
"""
Generates a scatter plot with the list of co-ordinates specified
by zip(x, y)
"""
trace = go.Scatter(
x = x,
y = y,
text = words,
mode = 'markers' + ('+text' if label else ''),
textposition='bottom center'
)
data = [trace]
layout= go.Layout(
title= title,
hovermode= 'closest',
xaxis= dict(
title= 'Similarity to "she"',
ticklen= 5,
zeroline= False,
gridwidth= 2,
),
yaxis=dict(
title= 'Similarity to "he"',
ticklen= 5,
gridwidth= 2,
),
showlegend= False
)
fig = go.Figure(data=data, layout=layout)
iplot(fig)
def scatter_single_axis(points, words, label=True, title=""):
"""
Creates a multiple 1-D axis char to enable comparisions between
the speads in the bias scores of two models.
"""
a = np.array(list(zip(*points)))
data = []
for r, w in zip(a, words):
trace = go.Scatter(
x = r[:, 0],
y = r[:, 1],
text = w,
name = w,
mode = 'markers' + ('+text' if label else ''),
textposition='bottom center'
)
data.append(trace)
layout= go.Layout(
title= title,
hovermode= 'closest',
xaxis= dict(
title= 'Bias score',
ticklen= 5,
zeroline= False,
gridwidth= 2,
),
yaxis=dict(
autorange = True,
categoryorder = "category descending",
title = "",
type = "category"
),
showlegend= True
)
fig = go.Figure(data=data, layout=layout)
iplot(fig)
# -
# #### Findings
print(get_vector(wiki, 'he'))
print(get_vector(wiki, 'she'))
print(get_vector(wiki, 'programmer'))
(compare_vectors(wiki, 'he', 'programmer'),
compare_vectors(wiki, 'she', 'programmer'))
(compare_vectors(cc, 'he', 'programmer'),
compare_vectors(cc, 'she', 'programmer'))
# The comparision above already shows, that both the models associate the word 'programmer' with the word 'he'
# more than the word 'she'.
#
# Also, it is interesting that the common crawl model assigns a higher similarity in both case, but the magnitude of the difference shows that there is larger spread. The word programmer is not equidistant from the words he and she, and is more skewed in the common crawl model than the wikipedia model
profs = pd.read_json('./data/professions.json')
profs.head()
# ### Research question:
#
# How do web communities differ in their gender biases?
# - Comparing Wikipedia to Common crawl.
# - Comparing Twitter to Common crawl.
points = np.array([bias(wiki, ['she', 'he'], w) for w in profs[0].values])
scatter(points[:, 0], points[:, 1], profs[0].values, label = True, title='Fasttext Wikipedia')
# If the above plot is not visible on github, please use the following link:
# (github does not render plot.ly graphs)
#
# http://nbviewer.jupyter.org/github/viv-r/Data512-HCDS-Final-Project/blob/master/Report.ipynb
# The above visualizations show all occupations along with their similarities to both the axes.
# If the model were perfectly unbiased, we would expect all the words to lie the x=y line through the origin.
#
# The spread of the points around this line is an indication of bias, and for the wikipedia data, most of the words seem clustered at about the same location around (0.15, 0.12), which shows a slight bias towards 'she' for the list of occupations we've chosen.
points = np.array([bias(cc, ['she', 'he'], w) for w in profs[0].values])
scatter(points[:, 0], points[:, 1], profs[0].values, label= True, title='Fasttext Common crawl')
# Similar plots for the fasttextt model trained on the Common craw data set shows the similarites have larger magnitudes in general. The cluster center in this case is very close to the x=y line suggesting that most of the occupations we've chosen are equally biased towards 'he' and 'she'
points = np.array([bias(glove_twitter, ['she', 'he'], w) for w in profs[0].values])
scatter(points[:, 0], points[:, 1], profs[0].values, label= True, title='Glove Twitter')
points = np.array([bias(glove_cc1, ['she', 'he'], w) for w in profs[0].values])
scatter(points[:, 0], points[:, 1], profs[0].values, label= True, title='Glove Common crawl 1')
points = np.array([bias(glove_cc2, ['she', 'he'], w) for w in profs[0].values])
scatter(points[:, 0], points[:, 1], profs[0].values, label= True, title='Glove Common crawl 2')
# The 3 plots above for the glove models show something different to the fast text models. The common crawl models both look relatively similar to each other. The twitter model looks clearly different to the other two, and seems to have two clusters - one close to the origin and one located at approximately (3.5,3). This suggests that, a subset of occupations are biased differently from the rest, but it not clear as to why this is so.
# ### Computing bias scores for words
#
# This section contains plots comparing two models based on the magnitude of difference between the similarites of the word to the bias axis.
# +
profs_subset = ['physician', 'boss', 'programmer', 'adventurer', 'trader', 'dancer', 'housekeeper', 'socialite']
glove_models = [('glove_twitter', glove_twitter), ('glove_cc1', glove_cc2), ('glove_cc2', glove_cc2)]
ft_models = [('wiki', wiki), ('cc', cc)]
points = np.array([[[bias(m, ['she', 'he'], w, scale=True), i] for w in profs_subset] for i, m in glove_models[:-1]])
scatter_single_axis(points, profs_subset)
# -
points = np.array([[[bias(m, ['she', 'he'], w, scale=True), i] for w in profs[0].values] for i, m in ft_models])
scatter_single_axis(points, profs_subset)
# While I expected the above visualizations to help understand if the models agree on the magnitude of bias, the results are not clear for most of the words due to overlap. Coming up with a better way to visualize this is listed in the 'future improvements' section below. Words like 'adventurer' and 'socialite' are placed at opposite ends in different models in each set, which could suggest that there is a significant different in how these communities use these words.
# #### Limitations
#
# Comparing machine learning models is a hard problem, there are many factors that affect what a model learns and keeping all of them consistent while varying just the data can be a challenge.
#
# - Pre-trained models are good for prototyping but ideally we’d want to train models from scratch just to ensure all the model parameters are being held constant. (so that the bias introduced by the model itself is held constant across datasets)
# - Original scope was to compare the models on multiple types of bias (religion, race, and gender), but I’ve had to reduce the scope to only gender with a binary gender model
# - Identification of bias axis is hard.
# In this project I’ve used 'he' and 'she' as the bias axis.
# A more general approach would be using the data from
# data from https://github.com/tolga-b/debiaswe/tree/master/data
# the authors have crowdsourced word pairs that define the binary gender axis.
# However, in general, this can be subjective and hard to define.
#
# #### Future work
#
# In the future, I would like to extend the comparisions performed in this notebook to other types of biases: religion, race, etc. In addition, it would also be interesting to explore if it is possible to use a non-binary bias axis to compare words against. It would also make the comparisions and results much more reliable if the models used were trained from scratch, so that it is possible to ensure constant parameters. Finally, a better way to interactively visualize the bias score for each word would make the analysis easy to understand.
#
# #### Conclusions
#
# The resutls in the analyis suggest that potentially significant differences exist between communities and model based comparisions might be able to extract that information. Given the fact that I had no control over the training of the exact models used in this analysis, I cannot claim that the results are conclusive. However, the methods used here could potentially be used in the following human centered applications:
#
# - Comparing across wikipedia articles to check if the writing style in one category of pages is different from other.
# - Validation of moderation policies to see if they result in changes to bias in text content, by comparing a model trained on text before introduction of policy to a model (with same parameters) trained on the text written after.
#
#
#
#
#
# #### References
#
# - <NAME> (2018) "Text Analytics Techniques in the Digital World: Word Embeddings and Bias," Irish Communication Review: Vol. 16: Iss. 1, Article 6. doi:10.21427/D7TJ05 Available at: https://arrow.dit.ie/icr/vol16/iss1/6
# - Demographic Word Embeddings for Racism Detection on Twitter, <NAME>, <NAME>, <NAME>: http://www.aclweb.org/anthology/I17-1093
# - Quantifying and Reducing Stereotypes in Word Embeddings, <NAME> <NAME> <NAME> <NAME> <NAME>: https://pdfs.semanticscholar.org/2558/231cadaf0b1a4ac79d1a5c79322c8fbd327f.pdf
# - Quantifying and Reducing Gender Stereotypes in Word Embeddings: https://drive.google.com/file/d/1IxIdmreH4qVYnx68QVkqCC9-_yyksoxR/view
# - Man is to Computer Programmer as Woman is to Homemaker? Debiasing Word Embeddings: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>: http://papers.nips.cc/paper/6228-man-is-to-computer-programmer-as-woman-is-to-homemaker-debiasing-word-embeddings.pdf
# - <NAME>, <NAME>, and <NAME>. 2014. GloVe: Global Vectors for Word Representation.
| Report.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Clustering for SP detection
#
# Will likely need multivew clustering
#
#
# +
import numpy as np
import pandas as pd
import seaborn as sns
import scipy.stats as stats
import scipy.special as scisp
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn import mixture
import sklearn
import wiggum as wg
import sp_data_util as spdata
from sp_data_util import sp_plot,plot_clustermat
import itertools as itert
import string
# +
r_clusters = -.9 # correlation coefficient of clusters
cluster_spread = [.6,.8,.5] # pearson correlation of means
p_sp_clusters = .75 # portion of clusters with SP
k = [3, 2,5] # number of clusters
cluster_size = [7,1]
domain_range = [0, 20, 0, 20]
N = 200 # number of points
p_clusters = [[1.0/k_i]*k_i for k_i in k]
n_views = 3
many_sp_df_diff = spdata.geometric_indep_views_gmm_sp(n_views,r_clusters,cluster_size,cluster_spread,p_sp_clusters,
domain_range,k,N,p_clusters,numeric_categorical=True)
sp_design_list = [('x1','x2','A'),('x3','x4','B'), ('x5','x6','C')]
many_sp_df_diff.head()
# -
sp_plot(many_sp_df_diff,'x1','x2','A')
sp_plot(many_sp_df_diff,'x3','x4','B')
sp_plot(many_sp_df_diff,'x5','x6','C')
# We can represent the relationship between the categorical and continuous variables with a binary matrix, that indicates which categorical varialbes represent known clusters in continuous dimensions. For the above data this is known and specified a priori, at least mostly. Since they are draw fully independently, it is possible that ther is a high degree of mutual information between two or more categorical variables and then there would be some errors in the matrix below
z = [[1, 0, 0],[1, 0, 0],[0,1,0],[0,1,0],[0,0,1],[0,0,1]]
ax = plot_clustermat(z,'list')
plt.xlabel('categorical variables')
plt.gca().xaxis.set_label_position('top')
plt.xticks([0,1,2],['A','B','C'])
plt.ylabel('continuous variables')
plt.yticks(range(n_views*2),['x'+ str(i) for i in range(n_views*2)]);
# In the case where we know this underlying structure, it is straightforward to apply any clustering method, but if we do not know this strucutre, we need a method to detect this structure as well. To explore development of such algorithms we here consider different potential for more varied underlying structure and develop data generators for these.
# # A new clustering model
#
# We clearly want a way that we can cluster in only some dimensions at a time and also we might want a generator that allows more complex data than the binary matrix we had above. The generative model can also help derive the clustering algorithm.
#
# We at least want to for each categorical variable sample its values as a CRP- we don't want to have to set in advance. We may want to switch to pitman-yor for more control over rich get richer properties.
#
# Further, we can use a clustering model to assign which continuous variables to sample wrt each categorical variable.
#
# we can use a dpgmm/ crp model across the multiple dimenions and use worse case a sampler, but probably something like tamara's npb k-means-like formulation .
#
# an early stoppying criterion for some branches of the inference should be applied such that for views where there is confidence of no SP occurences should be stopped.
#
# Can we pose the mutliview clustering like a indian buffet process or feature allocation problem instead?
#
#
# I propose we have a varialbe structure model - that relates the categorical variables to continuous varialbe and a separate clustering model for each categorical variable. then after that, we will have models for the data generation for the
# ## Review of NPB models
#
# First, let's look at the models and see how they look. We'll discuss them first as options for the variable structure model.
#
# The CRP /Dirichlet process is a clustering model.
# +
D = 10
alpha = 2
pi = [1]
z = []
for n in range(D):
# sample from pi
z.append(np.random.choice(len(pi),p=pi))
K = max(z) +1
# update counts
counts,e = np.histogram(z,bins = np.arange(K+1)-.5)
# append alpha and normalize to a distribution
pi = np.append(counts,alpha)/(alpha + n +1.0)
plot_clustermat(z,'crplist')
plt.xlabel('categorical variables')
plt.gca().xaxis.set_label_position('top')
plt.xticks(range(K),string.ascii_uppercase[:K])
plt.ylabel('continuous variables')
plt.yticks(range(D),['x'+ str(i) for i in range(D)]);
# +
# <NAME>
D = 10
alpha = 2 # > -d
d= .5 # [0,1)
pi = [1]
z_py = []
for n in range(D):
# sample from pi
z_py.append(np.random.choice(len(pi),p=pi))
K = max(z_py) +1
# update counts
counts,e = np.histogram(z_py,bins = np.arange(K+1)-.5)
# append alpha and normalize to a distribution
# denoms = np.append()
pi = np.append(counts - d,alpha + d*K)/(alpha + n +1)
plot_clustermat(z_py,'crplist')
plt.xlabel('categorical variables')
plt.gca().xaxis.set_label_position('top')
plt.xticks(range(K),string.ascii_uppercase[:K])
plt.ylabel('continuous variables')
plt.yticks(range(D),['x'+ str(i) for i in range(D)]);
K_py = max(z_py)
# -
# As a variable structure model this assumes no interaction among the categorical varialbes in hwo they influence continuous variables, because each continusou variable is assigned to exactly one categorical variable.
# +
#run CRP for each of K dims to sample categorical variables
group_by = []
for k in range(K_py):
sample_CRP()
# sample continuous variable
# -
# ## Feature Allocation
#
# We can sample from an indian buffet process to encode interaction between the different categorical varialbes in thier influence on continuous variables. This structure would be necessary, for example, to have race and gender jointly interact with some measurment (ie: income).
# +
# Samle from IBP
# # The first customer takes the first Poisson(gamma)
# dishes. The following customers try previously sampled
# dishes with probability mk/n, where mk is the
# number of people who tried dish k before customer
# n. Each customer also takes Poisson(gamma/n) new dishes.
# The value Znk records if customer n tried dish k.
def p_row(p):
return np.asarray([np.random.choice([1,0],p=[p_i, 1-p_i]) for p_i in p])
gamma = 2
z = []
z_tmp = np.ones(np.random.poisson(gamma))
m = np.zeros(z_tmp.shape)
z.append(z_tmp)
for n in range(1,D):
m += z_tmp
# print(m)
p = m/(n+1)
# print(p)
new = np.random.poisson(gamma/n)
z_tmp = np.concatenate((p_row(p),np.ones(new)))
m = np.concatenate((m,np.zeros(new)))
z.append(z_tmp)
K = len(z_tmp)
plot_clustermat(z,'ibplist')
plt.xlabel('categorical variables')
plt.gca().xaxis.set_label_position('top')
plt.xticks(range(K),string.ascii_uppercase[:K])
plt.ylabel('continuous variables')
plt.yticks(range(D),['x'+ str(i) for i in range(D)]);
# -
# However, we might want a somewhat more sparse reltionship, without the strictness of exactly one that the CRP provides. For example, we might want both a small number of continuous dimensions to have intneractions and each one to have a small number of interactions. Instead of the classical IBP, we can try the 3IBP version from [<NAME> 201x](https://arxiv.org/pdf/1301.6647.pdf). Additionally, if we flip the role of columns and rows, combined with these new parameters we have enough control.
# +
gamma = 3
theta = 5 # >0, set to 1 to recover above
alpha = .5 # in [0,1), set to 0 to revover above
z = []
z_tmp = np.ones(np.random.poisson(gamma))
m = np.zeros(z_tmp.shape)
z.append(z_tmp)
for n in range(2,D):
m += z_tmp
# print(m)
p = [(m_k- alpha)/(n + theta - 1) for m_k in m]
# print(p)
G1 = scisp.gamma(theta+1) /scisp.gamma(theta + n )
G2 = scisp.gamma(theta+ alpha - 1 + n) /scisp.gamma(theta+ alpha)
new = np.random.poisson(gamma*G1*G2)
z_tmp = np.concatenate((p_row(p),np.ones(new)))
m = np.concatenate((m,np.zeros(new)))
z.append(z_tmp)
K = len(z_tmp)
plot_clustermat(z,'ibplist')
plt.ylabel('categorical variables')
plt.gca().xaxis.set_label_position('top')
plt.yticks(range(D),string.ascii_uppercase[:D])
plt.xlabel('continuous variables')
plt.xticks(range(K),['x'+ str(i) for i in range(K)]);
# -
# This flip means that we directly control the number of categorical variables and only probabilisticallly control the number of continuous variables in sampling. In inference, it means we'll be learning the number of the cntinuos variables that are related to an unobserved categorical variable.
n_dim_cat = [sum(z_i) for z_i in z]
n_dim_cat
# The above is the number of dimensions for the means for each of the categorical variables.
#
# Then we now have to use the samplers for a CRP for each categorical variable, with additional increases in order to make the interactions happen. Or that we have to speciy that the means work so that this cn be done and that we have to sample some dependent on one another.
#
# Whenever two categorical variables influence the same output dimension we need to either sample the conditionnally or figure out hot to make th emean such that it reflets all of the things.
| research_notebooks/generating_multiple_views.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from csynthpop import zone_synthesizer as zs
# -
# #### Specify sample data csv paths. See the files listed here for expected structure. Marginal tables require multi-indexed columns with category name and category value in levels 0 and 1 of the index. Sample file category columns should be labeled with corresponding category names and values in those columns should match the category value headers in the marginal table.
hh_marginal_file = './data/hh_marginals.csv'
person_marginal_file = './data/person_marginals.csv'
hh_sample_file = './data/household_sample.csv'
person_sample_file = './data/person_sample.csv'
def convert_to_str(margin, sample):
colist = []
for acol in margin.columns:
colist.append(acol[0])
colist = np.unique(colist)
for acol in colist:
if acol in sample.columns:
sample[acol] = sample[acol].astype(str)
return sample
# #### Load and process input marginals and samples and geography crosswalk
# +
hh_marg, p_marg, hh_sample, p_sample, xwalk = zs.load_data(hh_marginal_file, person_marginal_file, hh_sample_file, person_sample_file)
hh_sample = convert_to_str(hh_marg,hh_sample)
p_sample = convert_to_str(p_marg,p_sample)
# -
hh_sample
hh_marg = hh_marg.astype(int)
p_marg = p_marg.astype(int)
hh_marg.head()
p_marg.head()
p_sample.head()
p_sample.columns
hh_sample.columns
p_sample.dtypes
hh_sample.dtypes
xwalk
# #### Iterate over all marginals in the geography crosswalk and synthesize in-line
all_households, all_persons, all_stats = zs.synthesize_all_zones(hh_marg, p_marg, hh_sample, p_sample, xwalk)
all_households.head()
all_households.shape
all_persons.shape
# #### all_persons.household_id maps person records to all_households.index
all_persons.head()
all_stats
| silodemos/.ipynb_checkpoints/non_census_synthesis-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # TensorFlowと共に使ってみよう
#
# 以下のような宣言を一番先にしておくと便利。
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
# +
# a + bの計算式
a = tf.constant(100)
b = tf.constant(50)
add_op = a + b
# 変数vを宣言
v = tf.Variable(0)
# 変数vにadd_opの結果を代入
let_op = tf.assign(v, add_op)
# セッションを開始
sess = tf.Session()
# 変数の初期化
sess.run(tf.initialize_all_variables())
# let_opを実行
sess.run(let_op)
# 変数の内容を表示
print(sess.run(v))
# -
| sample/ch5/tensorflow-test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # A hydro-thermal power system
# The Brazilian interconnected power system have four regions, SE, S, N and NE, denoted by 0,1,2,3 for simiplicity. In each region, there are one integrated reserviour and several thermal plants to provide energy. Energy exchange is allowed between regions and an additional transshipment station. If demand can not be satisfied, a deficit cost will be incur. The objective is to minimize the total cost over the designing period meanwhile meeting energy requirements and feasibility constraints.
# # Notation
# $v_{it}$: stored energy in subsystem (reservior) $i$ at the beginning of stage $t$
# $a_{it}$: energy inflow in subsystem $i$ during stage $t$
# $q_{it}$: turbined energy (hydro generation) in subsystem $i$ during stage $t$
# $s_{it}$: spilled energy in subsystem $i$ during stage $t$
# $g_{kt}$: thermal generation at stage $t$ of thermal plants $k$ in each subsystem $i$ during stage $t$
# $\textrm{ex}_{i\rightarrow j,t}$: energy exchange from subsystem $i$ to subsystem $j$
# $\textrm{ex}_{j\rightarrow i,t}$: energy exchange from subsystem $j$ to subsystem $i$
# $\textrm{df}_{ijt}$: deficit account for subsystem $i$ in subsystem $j$
# # Formulation
# Dynamics of each reservior $i$ is given by
# \begin{equation*}
# v_{i,t+1} + s_{it} + q_{it} - v_{it} = a_{it}
# \end{equation*}
# Thermal plant generated energy and the hydro generated energy are the sources to satisfy demand. Energy can be exchanged between reserviors. Energy deficit will incur if demand can't be met. The supply-demand equation is thus
# \begin{equation*}
# q_{it} + \sum_{k\in\omega_i} \textrm{g}_{kt} + \sum_{j} \textrm{df}_{ijt} -\sum_{j} \textrm{ex}_{i\rightarrow j,t} +\sum_{j} \textrm{ex}_{j\rightarrow i,t} = d_{it}
# \end{equation*}
# We assume there is no cost for hydro generation, $\$u_k$ for every megawatt thermal plant $k$ produces, $\$v_{ij}$ for every megawatt deficit account produces. Objective is to minimize energy generation cost (in thousands) over 12 months
# \begin{equation*}
# \sum_{t=1}^{12} \sum_i \big[\sum_k u_k g_{kt} + \sum_j v_{ij} \textrm{df}_{ijt}\big]
# \end{equation*}
import pandas
import numpy
import matplotlib.pyplot as plt
from msppy.msp import MSLP
from msppy.solver import Extensive
from msppy.solver import SDDP
from msppy.evaluation import Evaluation
import gurobipy
# # Data
hydro_ = pandas.read_csv("./data/hydro.csv", index_col=0)
demand = pandas.read_csv("./data/demand.csv", index_col=0)
deficit_ = pandas.read_csv("./data/deficit.csv", index_col=0)
exchange_ub = pandas.read_csv("./data/exchange.csv", index_col=0)
exchange_cost = pandas.read_csv("./data/exchange_cost.csv", index_col=0)
thermal_ = [pandas.read_csv("./data/thermal_{}.csv".format(i), index_col=0) for i in range(4)]
# The hydro_ dataframe gives the upper bounds of stored energy and hydro generation. It also gives the initial value of stored energy and inflow energy.
hydro_
# thermal_ is a list containing four dataframes. Each dataframe provides LB, UB, Obj for each thermal plants in a specific region.
thermal_[0].head()
# demand is a dataframe of monthly demand in each region. Demand is assumed to be deterministic.
demand
# deficit_ dataframe gives the maximum deficit energy each region can afford and its related costs. For example, to meet the demand for region 0, the maximum deficit energy each region can afford is 5%, 5%, 10%, 80% respectively.
deficit_
# The exchange_ub dataframe gives the upper bound of energy flows between four regions (0,1,2,3) and one transshipment station (4). Number 99999999 indicates no upper limit. The exchange_cost dataframe gives related costs.
exchange_ub
exchange_cost
# # Inflow modelling
# Inflow energy is assumed to be random. In this example, we use historical monthly data as scenarios.
# hist is a list containing four dataframes. Each dataframe gives historical monthly data for inflow energy.
hist = [pandas.read_csv("./data/hist_{}.csv".format(i), sep=";") for i in range(4)]
hist[0].head()
# The following plot is the first 200 months inflow data for reservior one. It clearly shows a seasonality pattern. While in this tutorial we will pretend scenarios are stage-wise independent discrete.
plt.plot(hist[0].values.flatten()[0:200])
# Concatenate the four dataframes and remove NA
hist = pandas.concat(hist, axis=1)
hist.dropna(inplace=True)
hist.drop(columns='YEAR', inplace=True)
# Each column of hist dataframe now gives scenarios of monthly inflow energy.
hist.head()
# Disjoin scenarios for each regions.
scenarios = [hist.iloc[:,12*i:12*(i+1)].transpose().values for i in range(4)]
# # Solution
T = 3
# ## STEP ONE: BUILD THE TRUE PROBLEM
HydroThermal = MSLP(T=T, bound=0, discount=0.9906)
for t in range(T):
m = HydroThermal[t]
stored_now, stored_past = m.addStateVars(4, ub=hydro_['UB'][:4], name="stored")
spill = m.addVars(4, name="spill", obj=0.001)
hydro = m.addVars(4, ub=hydro_['UB'][-4:], name="hydro")
deficit = m.addVars(
[(i,j) for i in range(4) for j in range(4)],
ub = [demand.iloc[t%12][i] * deficit_['DEPTH'][j] for i in range(4) for j in range(4)],
obj = [deficit_['OBJ'][j] for i in range(4) for j in range(4)],
name = "deficit")
thermal = [None] * 4
for i in range(4):
thermal[i] = m.addVars(
len(thermal_[i]),
ub=thermal_[i]['UB'],
lb=thermal_[i]['LB'],
obj=thermal_[i]['OBJ'],
name="thermal_{}".format(i)
)
exchange = m.addVars(5,5, obj=exchange_cost.values.flatten(),
ub=exchange_ub.values.flatten(), name="exchange")
thermal_sum = m.addVars(4, name="thermal_sum")
m.addConstrs(thermal_sum[i] == gurobipy.quicksum(thermal[i].values()) for i in range(4))
for i in range(4):
m.addConstr(
thermal_sum[i]
+ gurobipy.quicksum(deficit[(i,j)] for j in range(4))
+ hydro[i]
- gurobipy.quicksum(exchange[(i,j)] for j in range(5))
+ gurobipy.quicksum(exchange[(j,i)] for j in range(5))
== demand.iloc[t%12][i]
)
m.addConstr(
gurobipy.quicksum(exchange[(j,4)] for j in range(5))
- gurobipy.quicksum(exchange[(4,j)] for j in range(5))
== 0
)
for i in range(4):
if t == 0:
m.addConstr(
stored_now[i] + spill[i] + hydro[i] - stored_past[i]
== hydro_['INITIAL'][4:8][i]
)
else:
m.addConstr(
stored_now[i] + spill[i] + hydro[i] - stored_past[i] == 0,
uncertainty={'rhs': scenarios[i][(t-1)%12]}
)
if t == 0:
m.addConstrs(stored_past[i] == hydro_['INITIAL'][:4][i] for i in range(4))
# ## STEP THREE: SOLVE AN APPROXIMATION MODEL
# ### Call an Extensive solver
# +
# Extensive(HydroThermal).solve(outputFlag=0)
# -
# ### Call an SDDP solver
HydroThermal_SDDP = SDDP(HydroThermal)
HydroThermal_SDDP.solve(logFile=0, n_processes=3, n_steps=12, max_iterations=40)
# ## STEP FOUR: EVALUATE THE COMPUTED POLICY
# We now evaluate the policy by computing the policy values exhaustively for every scenarios. Suppose we are interested in solutions of hydro-generation.
result = Evaluation(HydroThermal)
result.run(n_simulations=-1, query = ['hydro[{}]'.format(i) for i in range(4)])
# %matplotlib inline
result.solution['hydro[0]'].plot(legend = False)
# epv is the exact value of the expected policy value. pv is the list of computed policy values.
result.epv, numpy.std(result.pv)
| examples/hydro_thermal/quick_start.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from lib import transient
from imp import reload
reload(transient)
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.colorbar as colorbar
import matplotlib.cm as cmx
from mpl_toolkits.axes_grid1 import make_axes_locatable
# %matplotlib inline
import pandas as pd
# +
tr = transient.Transient()
filepath = 'E:/steinn/new data/2017-11-28/'
files = os.listdir(filepath)
ax = []
trs = []
k_parameters = []
cutFreq = 0.01 # THz
t0 = 76
key_parameter = 'pump_power'
description = ''
for i in range(len(files)):
tr = transient.Transient()
tr.import_file(filepath + files[i],cleanData=False, key_parameter = key_parameter, description=description)
# print(tr.pump_power)
tr.crop_time_scale()
tr.shift_time(t0)
tr.filter_low_pass(cutFreq)
# tr.flip_trace()
tr.remove_DC_offset()
tr.flip_time()
k_parameters.append(float(tr.pump_power))
tr.trace = np.divide(tr.trace,max(tr.trace))
# tr.trace = np.divide(tr.trace,tr.pump_power)
trs.append(tr)
print('Imported {0} scan(s) as {1} dependence'.format(len(trs),trs[0].key_parameter))
fig = plt.figure(figsize=(16,9))
ax = fig.add_subplot(111)
jet = cm = plt.get_cmap('jet')
cNormLog = colors.LogNorm(vmin=min(k_parameters), vmax=max(k_parameters))
cNorm = colors.Normalize(vmin=min(k_parameters), vmax=max(k_parameters))
scalarMap = cmx.ScalarMappable(norm=cNormLog, cmap=jet)
lines = []
for idx in range(len(trs)):
colorVal = scalarMap.to_rgba(trs[idx].pump_power)
retLine, = ax.plot(trs[idx].time, trs[idx].trace, color=colorVal)
lines.append(retLine)
# make labels
labels = []
for i in range(len(lines)):
labels.append((lines[i],k_parameters[i]))
labels = sorted(labels, key=lambda kpar: float(kpar[1]))
lbLines=[]
lbVals=[]
for i in range(len(labels)):
lbLines.append(labels[i][0])
lbVals.append(labels[i][1])
ax.legend(lbLines, lbVals, loc='upper right')
# plt.xlim((-5,5))
# plt.ylim((-0.0002,0.0008))
ax.grid()
# +
tr = transient.Transient()
filepath = 'E:/steinn/RuCl3/Low Fluence_temperature/'
files = os.listdir(filepath)
ax = []
trs = []
k_parameters = []
cutFreq = 0.01 # THz
t0 = 76
key_parameter = 'pump_power'
description = ''
for i in range(len(files)):
tr = transient.Transient()
tr.import_file(filepath + files[i],cleanData=False, key_parameter = key_parameter, description=description)
print(tr.pump_power)
tr.crop_time_scale()
tr.shift_time(t0)
tr.filter_low_pass(cutFreq)
# tr.flip_trace()
tr.remove_DC_offset()
tr.flip_time()
k_parameters.append(float(tr.temperature))
# tr.trace = np.divide(tr.trace,max(tr.trace))
# tr.trace = np.divide(tr.trace,tr.pump_power)
trs.append(tr)
print('Imported {0} scan(s) as {1} dependence'.format(len(trs),trs[0].key_parameter))
fig = plt.figure(figsize=(16,9))
ax = fig.add_subplot(111)
jet = cm = plt.get_cmap('jet')
cNormLog = colors.LogNorm(vmin=min(k_parameters), vmax=max(k_parameters))
cNorm = colors.Normalize(vmin=min(k_parameters), vmax=max(k_parameters))
scalarMap = cmx.ScalarMappable(norm=cNormLog, cmap=jet)
lines = []
for idx in range(len(trs)):
colorVal = scalarMap.to_rgba(trs[idx].temperature)
retLine, = ax.plot(trs[idx].time, trs[idx].trace, color=colorVal)
lines.append(retLine)
# make labels
labels = []
for i in range(len(lines)):
labels.append((lines[i],k_parameters[i]))
labels = sorted(labels, key=lambda kpar: float(kpar[1]))
lbLines=[]
lbVals=[]
for i in range(len(labels)):
lbLines.append(labels[i][0])
lbVals.append(labels[i][1])
ax.legend(lbLines, lbVals, loc='upper right')
# plt.xlim((-5,5))
# plt.ylim((-0.0002,0.0008))
ax.grid()
# -
trs[1].pump_energy
tr.filter_low_pass(cutHigh=0.05)
# +
filepath = 'E:/steinn/new data/2017-11-28/'
files = os.listdir(filepath)
pump_spot = 80
probe_spot = 50
cutFreq = 0.01 # THz
t0 = 80
key_parameter = 'pump_power'
description = ''
###########################
# init lsits:
scanList = []
k_parameters = []
for i in range(len(files)):
tr = transient.Transient()
tr.import_file(filepath + files[i],cleanData=False, key_parameter = key_parameter, description=description)
tr.pump_spot = pump_spot
tr.probe_spot = probe_spot
tr.calc_energy_densities()
tr.crop_time_scale()
tr.shift_time(t0)
tr.filter_low_pass(cutFreq)
# tr.flip_trace()
tr.remove_DC_offset()
tr.flip_time()
k_parameters.append(float(getattr(tr,key_parameter)))
tr.trace = np.divide(tr.trace,tr.pump_energy)
scanList.append(tr)
print('Imported {0} scan(s) as {1} dependence'.format(len(scanList),scanList[0].key_parameter))
data1 = scanList
fig = plt.figure(figsize=(16,9))
ax = fig.add_subplot(111)
for idx in range(len(scanList)):
ax.plot(scanList[idx].time, scanList[idx].trace)
# plt.xlim((-2,5))
# plt.ylim((-0.0003,0.002))
plt.grid()
# -
data3 = data1[:4]
for i in data2:
data3.append(i)
list(data3)
# +
dataFrame = {}
key_parameter = 'pump_energy'
for tr in scanList:
# tr.calc_energy_densities()
name = str(getattr(tr,key_parameter)) + ' ' + str(tr.get_unit(key_parameter))
dataFrame[name] = tr.trace
dataFrame = pd.DataFrame(dataFrame, index = scanList[0].time)
# dataFrame[] = tr.trace
# tr.trace = np.divide(tr.trace,max(tr.trace))
# tr.trace = np.divide(tr.trace,tr.pump_power)
# fig = plt.figure(figsize=(16,9))
# ax = fig.add_subplot(111)
# jet = cm = plt.get_cmap('jet')
# cNormLog = colors.LogNorm(vmin=min(k_parameters), vmax=max(k_parameters))
# cNorm = colors.Normalize(vmin=min(k_parameters), vmax=max(k_parameters))
# scalarMap = cmx.ScalarMappable(norm=cNormLog, cmap=jet)
# lines = []
# for idx in range(len(trs)):
# colorVal = scalarMap.to_rgba(trs[idx].temperature)
# retLine, = ax.plot(trs[idx].time, trs[idx].trace, color=colorVal)
# lines.append(retLine)
# # make labels
# labels = []
# for i in range(len(lines)):
# labels.append((lines[i],k_parameters[i]))
# labels = sorted(labels, key=lambda kpar: float(kpar[1]))
# lbLines=[]
# lbVals=[]
# for i in range(len(labels)):
# lbLines.append(labels[i][0])
# lbVals.append(labels[i][1])
# ax.legend(lbLines, lbVals, loc='upper right')
# # plt.xlim((-5,5))
# # plt.ylim((-0.0002,0.0008))
# ax.grid()
# -
dataFrame[].plot(colormap='jet',)
def plotScanList(data,key_parameter,
xlim=(0,0),ylim=(0,0),
norm=None,
save = True,
saveDir='e:/Steinn/Data/RuCl3/figs/',
title=None):
kParList = []
for scan in data:
kParList.append(getattr(scan,key_parameter))
if norm is not None:
if norm == 'peak':
scan.trace = np.divide(scan.trace,max(scan.trace))
else:
scan.trace = np.divide(scan.trace,getattr(scan,norm))
fig = plt.figure(figsize=(16,9))
ax = fig.add_subplot(111)
jet = cm = plt.get_cmap('jet')
cNormLog = colors.LogNorm(vmin=min(kParList), vmax=max(kParList))
cNorm = colors.Normalize(vmin=min(kParList), vmax=max(kParList))
scalarMap = cmx.ScalarMappable(norm=cNormLog, cmap=jet)
lines = []
for scan in data:
colorVal = scalarMap.to_rgba(getattr(scan,key_parameter))
retLine, = ax.plot(scan.time, scan.trace, color=colorVal)
lines.append(retLine)
# make labels
labels = []
for i in range(len(lines)):
labels.append((lines[i],kParList[i]))
labels = sorted(labels, key=lambda kpar: float(kpar[1]))
lbLines=[]
lbVals=[]
for i in range(len(labels)):
lbLines.append(labels[i][0])
lbVals.append(labels[i][1])
ax.legend(lbLines, lbVals, loc='upper right')
if xlim == (0,0):
pass
else:
plt.xlim(xlim)
if ylim == (0,0):
pass
else:
plt.ylim(ylim)
ax.grid()
plt.title(title)
plt.ylabel = 'dr/r'
if norm is not None:
plt.ylabel = 'dr/r [norm to {}]'.format(norm)
plt.xlabel = 'Time [ps]'
if save:
if not os.path.isdir(saveDir):
os.makedirs(saveDir)
plt.savefig(saveDir+title)
# +
filepath = 'E:/steinn/new data/asdf2/'
files = os.listdir(filepath)
pump_spot = 80
probe_spot = 50
cutFreq = 0.01 # THz
t0 = 80
key_parameter = 'pump_power'
description = ''
###########################
# init lsits:
scanList = []
k_parameters = []
for i in range(len(files)):
tr = transient.Transient()
tr.import_file(filepath + files[i],cleanData=False, key_parameter = key_parameter, description=description)
tr.pump_spot = pump_spot
tr.probe_spot = probe_spot
tr.calc_energy_densities()
tr.crop_time_scale()
tr.shift_time(t0)
tr.filter_low_pass(cutFreq)
# tr.flip_trace()
tr.remove_DC_offset()
tr.flip_time()
k_parameters.append(float(getattr(tr,key_parameter)))
tr.trace = np.divide(tr.trace,max(tr.trace))
scanList.append(tr)
print('Imported {0} scan(s) as {1} dependence'.format(len(scanList),scanList[0].key_parameter))
data1 = scanList
fig = plt.figure(figsize=(16,9))
ax = fig.add_subplot(111)
for idx in range(len(scanList)):
ax.plot(scanList[idx].time, scanList[idx].trace)
# plt.xlim((-2,5))
# plt.ylim((-0.0003,0.002))
plt.grid()
# -
# # Export data from first power depedence:
# +
filepath = 'E:/steinn/RuCl3/temperature dependence/p0,1mW/'
files = os.listdir(filepath)
pump_spot = 80
probe_spot = 50
cutFreq = 0.01 # THz
t0 = 80.5
key_parameter = 'pump_power'
description = ''
###########################
# init lsits:
scanList = []
k_parameters = []
for i in range(len(files)):
tr = transient.Transient()
tr.import_file(filepath + files[i],cleanData=False, key_parameter = key_parameter, description=description)
tr.pump_spot = pump_spot
tr.probe_spot = probe_spot
tr.calc_energy_densities()
tr.crop_time_scale()
tr.shift_time(t0)
tr.filter_low_pass(cutFreq)
# tr.flip_trace()
tr.remove_DC_offset()
tr.flip_time()
k_parameters.append(float(getattr(tr,key_parameter)))
tr.temperature = 3.8
tr.description = str(tr.temperature) + 'K'
tr.give_name()
# tr.trace = np.divide(tr.trace,tr.pump_energy)
scanList.append(tr)
print('Imported {0} scan(s) as {1} dependence'.format(len(scanList),scanList[0].key_parameter))
data1 = scanList
fig = plt.figure(figsize=(16,9))
ax = fig.add_subplot(111)
for idx in range(len(scanList)):
ax.plot(scanList[idx].time, scanList[idx].trace)
# plt.xlim((-4,8))
# plt.ylim((-0.0003,0.006))
plt.grid()
# +
csvDir = 'e:/Steinn/Data/RuCl3/'
if not os.path.isdir(csvDir):
os.makedirs(csvDir)
for scan in scanList:
scan.export_file_csv(csvDir)
# +
csvScanList = []
for csv in os.listdir(csvDir):
filePath = csvDir + csv
if not os.path.isdir(filePath):
tr = transient.Transient()
tr.import_file(filePath,cleanData=False)
# tr.filter_low_pass(cutHigh=0.01)
csvScanList.append(tr)
# -
for scan in data1:
print(scan.pump_energy)
title = '0,1mW-temperature-noNorm selected-01-12'
plotScanList(data1,'temperature',norm=None,title=title, save=True)
| Analysis Tests.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Attacking PPD trained on MNIST
# ===
# This Notebook loads trained PPD models on MNIST dataset, Ensembles them and generates adversarial examples using different attacks.
#
# The models are loaded from saved_models >> mnistdense
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
from copy import deepcopy
# +
# This block is needed because before loading the trained models, the graph should be loaded with the same
# structure as it was trained.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
from cleverhans.model import Model
# ______________________________________ content of my local utils_tf.py _______________________________
def fftshift(inputs):
"""
Calculates and returns fftshift of the inputs tensor along second and third axes.
:param inputs: Tensor with shape (None, ., ., .)
:return: a Tensor which is fftshift of inputs along second and third axes with same dtype and shape
"""
axes = range(1, len(inputs.shape) - 1)
for k in axes:
n = inputs.shape[k]
p2 = (n + 1) // 2
my_list = tf.concat((tf.range(p2, n), tf.range(p2)), axis=0)
inputs = tf.gather(inputs, my_list, axis=k)
return inputs
def pixel2phase(inputs):
"""
convert the inputs images to the phase domain along each channel.
:param inputs: Tensor with shape (None, height, width, channels)
:return: Tensor with same shape and dtype as inputs
"""
inputs_dtype = inputs.dtype
dtype = tf.complex64
inputs = tf.cast(inputs, dtype=dtype)
input_f = fftshift(tf.transpose(tf.fft2d(tf.transpose(inputs, perm=[0, 3, 1, 2])), perm=[0, 2, 3, 1]))
input_f = tf.where(tf.less(tf.abs(input_f), 1e-5), tf.zeros(tf.shape(input_f), dtype=dtype), input_f)
return tf.cast(tf.angle(input_f), dtype=inputs_dtype)
#_______________________________________________________________________________________________________
#________________________________________content of my local model_structure.py ________________________
class ModelDense(Model):
def __init__(self, scope, nb_classes, reg, **kwargs):
del kwargs
Model.__init__(self, scope, nb_classes, locals())
self.reg = reg
def fprop(self, x, **kwargs):
del kwargs
my_conv = functools.partial(tf.layers.dense, activation=tf.nn.relu,
kernel_regularizer=tf.contrib.layers.l2_regularizer(self.reg),
kernel_initializer=HeReLuNormalInitializer,
)
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
y = pixel2phase(x)
y = tf.layers.flatten(y)
y = my_conv(y, 800)
y = my_conv(y, 300)
logits = tf.layers.dense(y, self.nb_classes,
kernel_regularizer=tf.contrib.layers.l2_regularizer(self.reg),
kernel_initializer=HeReLuNormalInitializer)
return {self.O_LOGITS: logits,
self.O_PROBS: tf.nn.softmax(logits=logits)}
class HeReLuNormalInitializer(tf.initializers.random_normal):
def __init__(self, dtype=tf.float32):
self.dtype = tf.as_dtype(dtype)
def get_config(self):
return dict(dtype=self.dtype.name)
def __call__(self, shape, dtype=None, partition_info=None):
del partition_info
dtype = self.dtype if dtype is None else dtype
std = tf.rsqrt(tf.cast(tf.reduce_prod(shape[:-1]), tf.float32) + 1e-7)
return tf.random_normal(shape, stddev=std, dtype=dtype)
# +
# _________________________________________ content of utils.py ______________________________
def _permute_index(l, seed):
"""
Creates a permutation of np.array([0, ..., l-1]) and its inverse
:param l: length of the array to permute
:param seed: permutation seed
:return: (s, s_inverse) where s is permutation of np.array([0, ..., l-1]) and s_inverse is its inverse
"""
st0 = np.random.get_state()
s = np.arange(l)
np.random.seed(seed)
np.random.shuffle(s)
s_inverse = np.argsort(s)
np.random.set_state(st0)
return s, s_inverse
def permute(data, seed):
"""
Permutes images in the data with given seed for each channel.
:param data: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)
:param seed: permutation seed. If seed=None returns data without permutation
:return: numpy array with shape (nb_images, img_rows, img_cols, nb_channels) of permuted images
"""
"""
Permutes images in the data with given seed. If seed=None, returns data without permutation.
Assumes data has shape (nb_images, img_rows, img_cols, nb_channels)
"""
nb_images, img_rows, img_cols, nb_channels = data.shape
if seed is None:
return data
l = img_rows * img_cols # length of the permutation array
s, _ = _permute_index(l, seed)
output = np.zeros(data.shape)
for ch in range(nb_channels):
output[:, :, :, ch] = data[:, :, :, ch].reshape(-1, l)[:, s].reshape(-1, img_rows, img_cols)
return output
def ipermute(data, seed):
"""
inverse of permute
:param data: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)
:param seed: permutation seed. If seed=None returns data without permutation
:return: numpy array with shape (nb_images, img_rows, img_cols, nb_channels) of inverse permuted images
"""
nb_images, img_rows, img_cols, nb_channels = data.shape
if seed is None:
return data
l = img_rows * img_cols # length of the permutation array
_, s_inverse = _permute_index(l, seed)
output = np.zeros(data.shape)
for ch in range(nb_channels):
output[:, :, :, ch] = data[:, :, :, ch].reshape(-1, l)[:, s_inverse].reshape(-1, img_rows, img_cols)
return output
# +
def log_attack(attack_name, adv_x, perturbation_strength, attack_params):
"""
saves adv_x with name perturbaton_strength in subfolder with attack_name
"""
directory = os.path.join('Attack Logs', attack_name)
if not os.path.exists(directory):
os.makedirs(directory)
import json
with open(os.path.join(directory, 'params' + str(perturbation_strength) + '.txt'), 'w') as file:
file.write(json.dumps(attack_params)) # use `json.loads` to do the reverse
np.save(os.path.join(directory, str(perturbation_strength)), adv_x)
def _read_attack(attack_name, perturbation_strength):
"""
loads adv_x with name perturbaton_strength in subfolder with attack_name
:param attack_name: string of attack name used for folder to save
:param perturbation_strength: a float or string of attack file
"""
filename = os.path.join('Attack Logs', attack_name, str(perturbation_strength) + '.npy')
return np.load(filename)
def read_attack(attack_name):
directory = os.path.join('Attack Logs', attack_name)
out = dict()
for filename in os.listdir(directory):
if filename.endswith('.npy'):
path_to_file = os.path.join(directory, filename)
out[np.float(os.path.splitext(filename)[0])] = np.load(path_to_file)
return out
# +
def measure_perturbation(x, adv_x, order):
"""
average perturbation between x and adv_x. Note that each image is converted to
a vector of size (img_rows*img_cols*nb_channels) and then norm is calculated.
:param x: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)
:param adv_x: numpy array with same shape as x
:param order: order of the norm (mimics numpy) possible values are np.inf, 1 or 2
:return: a scalar denoting perturbation between x and adv_x averaged over images.
"""
nb_images, _, _, _ = x.shape
dev = (x-adv_x).reshape(nb_images, -1)
dev_norms = np.linalg.norm(dev, order, axis=1)
return np.mean(dev_norms)
def random_perturb(x, perturbation_strength, order):
"""
randomly perturbes pixels of x with perturbation_strength such that
measure_perturbation(x, random_perturb(x, perturbation_strength, order), order) = perturbation_strength.
For order=np.inf each pixel is perturbed with either -perturbation_strenth or perturbation_strength.
For order = 1 and order = 2, images of the pixel are perturbed with a uniform random noise with mean zero.
:param x: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)
:param perturbation_strength: a scalar that is strength of noise.
:param order: order of the norm (mimics numpy) possible values are np.inf, 1 or 2
:return: numpy array with same shape as x which denotes random perturbation of pixels of x with perturbation_strength
"""
nb_images, img_rows, img_cols, nb_channels = x.shape
if order == np.inf:
dev = (np.random.randint(0, 2, size=nb_images*img_rows*img_cols*nb_channels) * 2 * perturbation_strength - perturbation_strength)
elif order == 1:
tmp = np.random.rand(nb_images, img_rows*img_cols*nb_channels) - 0.5
coef = perturbation_strength / np.sum(np.abs(tmp), axis=1)
dev = tmp * np.expand_dims(coef, axis=1)
elif order == 2:
tmp = np.random.rand(nb_images, img_rows*img_cols*nb_channels) - 0.5
coef = perturbation_strength / np.linalg.norm(tmp, 2, axis=1)
dev = tmp * np.expand_dims(coef, axis=1)
else:
raise(ValueError('order should be np.inf, 1 or 2'))
return x + dev.reshape(x.shape)
# +
def log_plot_data(attack_name, header, arr):
"""
concatenates numpy arrays in arr and saves them as 'plot_data.csv'.
:param attack_name: string of attack name (the folder in which data is to be logged)
:param header: list of strings denoting header name for element of arr
:param arr: list of numpy arrays to be logged. For example: [strength, adv_acc, ...]
"""
import pandas as pd
directory = os.path.join('Attack Logs', attack_name)
tmp = np.concatenate(tuple([np.array(a).reshape(-1, 1) for a in arr]), axis=1)
df = pd.DataFrame(tmp, columns=header)
df.to_csv(os.path.join(directory, 'plot_data'), index=False)
def load_plot_data(attack_name):
"""
reads data saved with log_plot_data
:param attack_name: string of attack name (the folder to read from)
:return: a pandas dataFrame containing plot data.
"""
import pandas as pd
path = os.path.join('Attack Logs', attack_name, 'plot_data')
df = pd.read_csv(path)
return df
# +
from cleverhans.utils_tf import tf_model_load
class Ensemble(object):
def __init__(self, seeds, directory, model_class, **kwargs):
self.img_rows = kwargs.get('img_rows', 28)
self.img_cols = kwargs.get('img_cols', 28)
self.nb_channels = kwargs.get('nb_channels', 1)
self.nb_classes = kwargs.get('nb_classes', 10)
self.reg = kwargs.get('reg', 5e-3)
self.scope = kwargs.get('scope', 'model1')
self.seeds = seeds
self.directory = directory
self.model_class = model_class
self.sessions_dict, self.models_dict, self.placeholders_dict = self.load_models()
def load_models(self):
sessions_dict = dict()
models_dict = dict()
placeholders_dict = dict()
for seed in self.seeds:
sess = tf.Session(graph=tf.Graph())
with sess.graph.as_default():
# ____________________ defining the model graph ________________________
x = tf.placeholder(tf.float32, shape=(None, self.img_rows, self.img_cols, self.nb_channels))
model = self.model_class(scope=self.scope, nb_classes=self.nb_classes, reg=self.reg)
preds = model.get_logits(x)
# ______________________________________________________________________
model_path = os.path.join(self.directory, str(seed), 'mnist')
if os.path.exists(model_path + ".meta"):
tf_model_load(sess, model_path)
sessions_dict[seed] = sess
models_dict[seed] = model
placeholders_dict[seed] = x
return sessions_dict, models_dict, placeholders_dict
def predict(self, unpermuted_pixel_data, seeds=None):
if seeds is None:
seeds = self.seeds
total_pred = 0
for seed in seeds:
sess = self.sessions_dict[seed]
with sess.graph.as_default():
model = self.models_dict[seed]
x = self.placeholders_dict[seed]
preds = model.get_probs(x)
total_pred += sess.run(preds, feed_dict={x: permute(unpermuted_pixel_data, seed=seed)})
return total_pred/len(seeds)
def get_model(self, seed):
return self.models_dict[seed], self.sessions_dict[seed], self.placeholders_dict[seed]
def accuracy_plot(self, unpermuted_pixel_data, true_labels, seeds=None):
if seeds is None:
seeds = self.seeds
out = []
total_pred = 0
for seed in seeds:
sess = self.sessions_dict[seed]
with sess.graph.as_default():
model = self.models_dict[seed]
x = self.placeholders_dict[seed]
preds = model.get_probs(x)
total_pred += sess.run(preds, feed_dict={x: permute(unpermuted_pixel_data, seed=seed)})
p = total_pred/(len(out) + 1)
out.append(np.mean(np.equal(np.argmax(p, axis=1), np.argmax(true_labels, axis=1))))
return out
def accuracy(self, unpermuted_pixel_data, true_labels, seeds=None):
return np.mean(np.equal(np.argmax(self.predict(unpermuted_pixel_data, seeds=seeds), axis=1), np.argmax(true_labels, axis=1)))
# +
dataset_params = {
'img_rows': 28,
'img_cols': 28,
'nb_channels': 1,
'nb_classes': 10
}
model_params = {
'scope': 'model1',
'reg': 5e-3
}
eval_params = {
'batch_size': 128
}
ensemble_params = deepcopy(dataset_params)
ensemble_params.update(model_params)
# +
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = np.expand_dims(x_train, axis=3).astype('float32')/255
x_test = np.expand_dims(x_test, axis=3).astype('float32')/255
y_train = tf.keras.utils.to_categorical(y_train, dataset_params['nb_classes'])
y_test = tf.keras.utils.to_categorical(y_test, dataset_params['nb_classes'])
print("x_train shape =", x_train.shape)
print("y_train shape =", y_train.shape)
print ("x_test shape =", x_test.shape)
# -
seeds = range(100, 151) # before loading models they should be trained and saved using main_mnist.py
directory = os.path.join('saved_models', 'mnistdense')
ensemble = Ensemble(seeds=seeds, directory=directory, model_class=ModelDense, **ensemble_params)
out = ensemble.accuracy_plot(x_test, y_test)
# acc = ensemble.accuracy(x_test, y_test, seeds=range(100, 150)) # if seeds=None, accuracy on all the ensembled models is provided. if a list
# of models is provided as seeds (for example seeds=[100, 101, 102]) ensemble of accuracy on models in seeds is provided.
plt.plot(out)
plt.grid()
# FGSM
# ===
# +
# performing FGSM attack
from cleverhans.attacks import FastGradientMethod
from cleverhans.utils_tf import model_eval
seed = 100 # model for which attack is performed
model, sess, x = ensemble.get_model(seed)
nb_attacked_images = 100
fgsm_params = {
'eps': 3.992,
'ord': 2
}
print('Attacking model {0} ...'.format(seed))
fgsm = FastGradientMethod(model, sess=sess)
with sess.graph.as_default():
y = tf.placeholder(tf.float32, shape=(None, dataset_params['nb_classes']))
adv_x_fgsm = ipermute(fgsm.generate_np(permute(x_test[0:nb_attacked_images], seed), **fgsm_params), seed)
preds = model.get_logits(x)
adv_acc = model_eval(sess, x, y, preds, permute(adv_x_fgsm, seed=seed), y_test[0:nb_attacked_images], args=eval_params)
print('Accuracy on model {0}:'.format(seed), adv_acc)
# -
order = 2
perturbation_strength = measure_perturbation(x_test[0:nb_attacked_images], adv_x_fgsm, order=order)
print('Average l_{0} norm perturbation of adversarial examples:'.format(order), perturbation_strength)
log_attack(attack_name='fgml2', adv_x=adv_x_fgsm, perturbation_strength=perturbation_strength, attack_params=fgsm_params)
# CW Attack
# ===
# +
# performing CW attack
from cleverhans.attacks import CarliniWagnerL2
from cleverhans.utils_tf import model_eval
seed = 100 # model for which attack is performed
model, sess, x = ensemble.get_model(seed)
nb_attacked_images = 10000
cw_params = {'binary_search_steps': 1,
'max_iterations': 300,
'learning_rate': 0.1, # it was .005
'initial_const': 10, # it was 0.01
'batch_size' : 1,
'confidence': 0,
'abort_early': True}
cw = CarliniWagnerL2(model, sess=sess)
with sess.graph.as_default():
y = tf.placeholder(tf.float32, shape=(None, dataset_params['nb_classes']))
adv_x_cw = ipermute(cw.generate_np(permute(x_test[0:nb_attacked_images], seed=seed), **cw_params), seed=seed)
preds = model.get_logits(x)
adv_acc = model_eval(sess, x, y, preds, permute(adv_x_cw[0:nb_attacked_images], seed=seed), y_test[0:nb_attacked_images], args=eval_params)
print('Accuracy on model {0}:'.format(seed), adv_acc)
# -
order = 2
perturbation_strength = measure_perturbation(x_test[0:nb_attacked_images], adv_x_cw, order=order)
print('Average l_{0} norm perturbation of adversarial examples:'.format(order), perturbation_strength)
log_attack(attack_name='cwl2', adv_x=adv_x_cw, perturbation_strength=perturbation_strength, attack_params=cw_params)
# PGD
# ===
# +
from cleverhans.attacks import MadryEtAl
from cleverhans.utils_tf import model_eval
seed = 100 # model for which attack is performed
model, sess, x = ensemble.get_model(seed)
nb_attacked_images = 10000
pgd_params = {'eps': 1.074, # madry 0.3
'eps_iter': 0.1, # madry 0.01
'nb_iter': 200, # madry 100
'ord': 2
}
pgd = MadryEtAl(model, sess=sess)
with sess.graph.as_default():
y = tf.placeholder(tf.float32, shape=(None, dataset_params['nb_classes']))
adv_x_pgd = ipermute(pgd.generate_np(permute(x_test[0:nb_attacked_images], seed=seed), **pgd_params), seed=seed)
preds = model.get_logits(x)
adv_acc = model_eval(sess, x, y, preds, permute(adv_x_pgd, seed=seed), y_test[0:nb_attacked_images], args=eval_params)
print('Accuracy on model {0}:'.format(seed), adv_acc)
# -
order = pgd_params['ord']
perturbation_strength = measure_perturbation(x_test[0:nb_attacked_images], adv_x_pgd, order=order)
print('Average l_{0} norm perturbation of adversarial examples:'.format(order), perturbation_strength)
log_attack(attack_name='pgdl2', adv_x=adv_x_pgd, perturbation_strength=perturbation_strength, attack_params=pgd_params)
# MIM
# ===
# +
from cleverhans.attacks import MomentumIterativeMethod
from cleverhans.utils_tf import model_eval
seed = 100 # model for which attack is performed
model, sess, x = ensemble.get_model(seed)
nb_attacked_images = 100
mim = MomentumIterativeMethod(model, sess=sess)
mim_params = {'eps_iter': 0.01,
'nb_iter': 100,
'decay_factor': 1,
'eps': .3}
with sess.graph.as_default():
y = tf.placeholder(tf.float32, shape=(None, dataset_params['nb_classes']))
adv_x_mim = ipermute(mim.generate_np(permute(x_test[0:nb_attacked_images], seed=seed), **mim_params), seed=seed)
preds = model.get_logits(x)
adv_acc = model_eval(sess, x, y, preds, permute(adv_x_mim, seed=seed), y_test[0:nb_attacked_images], args=eval_params)
print('Accuracy on model {0}:'.format(seed), adv_acc)
# -
order = np.inf
perturbation_strength = measure_perturbation(x_test[0:nb_attacked_images], adv_x_mim, order=order)
print('Average l_{0} norm perturbation of adversarial examples:'.format(order), perturbation_strength)
log_attack(attack_name='miml2', adv_x=adv_x_mim, perturbation_strength=perturbation_strength, attack_params=mim_params)
# BIM
# ===
# +
from cleverhans.attacks import BasicIterativeMethod
from cleverhans.utils_tf import model_eval
seed = 100 # model for which attack is performed
model, sess, x = ensemble.get_model(seed)
nb_attacked_images = 100
bim = BasicIterativeMethod(model, sess=sess)
bim_params = {'eps': .3,
'eps_iter': 0.01,
'ord': np.inf,
'nb_iter': 200}
with sess.graph.as_default():
y = tf.placeholder(tf.float32, shape=(None, dataset_params['nb_classes']))
adv_x_bim = ipermute(bim.generate_np(permute(x_test[0:nb_attacked_images], seed=seed), **bim_params), seed=seed)
preds = model.get_logits(x)
adv_acc = model_eval(sess, x, y, preds, permute(adv_x_bim, seed=seed), y_test[0:nb_attacked_images], args=eval_params)
print('Accuracy on model {0}:'.format(seed), adv_acc)
# -
order = bim_params['ord']
perturbation_strength = measure_perturbation(x_test[0:nb_attacked_images], adv_x_bim, order=order)
print('Average l_{0} norm perturbation of adversarial examples:'.format(order), perturbation_strength)
log_attack(attack_name='bim', adv_x=adv_x_bim, perturbation_strength=perturbation_strength, attack_params=bim_params)
# ENM
# ===
# +
from cleverhans.attacks import ElasticNetMethod
from cleverhans.utils_tf import model_eval
seed = 100 # model for which attack is performed
model, sess, x = ensemble.get_model(seed)
nb_attacked_images = 10000
enm = ElasticNetMethod(model, sess=sess)
enm_params = {'binary_search_steps': 1,
'max_iterations': 200,
'initial_const': .5,
'beta': 0,
'batch_size': 1}
with sess.graph.as_default():
y = tf.placeholder(tf.float32, shape=(None, dataset_params['nb_classes']))
adv_x_enm = ipermute(enm.generate_np(permute(x_test[0:nb_attacked_images], seed=seed), **enm_params), seed=seed)
preds = model.get_logits(x)
adv_acc = model_eval(sess, x, y, preds, permute(adv_x_enm, seed=seed), y_test[0:nb_attacked_images], args=eval_params)
print('Accuracy on model {0}:'.format(seed), adv_acc)
# -
order = 1
perturbation_strength = measure_perturbation(x_test[0:nb_attacked_images], adv_x_enm, order=order)
print('Average l_{0} norm perturbation of adversarial examples:'.format(order), perturbation_strength)
log_attack(attack_name='enm_beta0', adv_x=adv_x_enm, perturbation_strength=perturbation_strength, attack_params=enm_params)
# Saliency Map Method
# ===
# +
from cleverhans.attacks import SaliencyMapMethod
from cleverhans.utils_tf import model_eval
seed = 100 # model for which attack is performed
model, sess, x = ensemble.get_model(seed)
nb_attacked_images = 1000
smm = SaliencyMapMethod(model, sess=sess)
smm_params = {
'theta': 1.,
'gamma': .03
}
with sess.graph.as_default():
y = tf.placeholder(tf.float32, shape=(None, dataset_params['nb_classes']))
adv_x_smm = ipermute(smm.generate_np(permute(x_test[0:nb_attacked_images], seed=seed), **smm_params), seed=seed)
preds = model.get_logits(x)
adv_acc = model_eval(sess, x, y, preds, permute(adv_x_smm, seed=seed), y_test[0:nb_attacked_images], args=eval_params)
print('Accuracy on model {0}:'.format(seed), adv_acc)
# -
order = 2
perturbation_strength = measure_perturbation(x_test[0:nb_attacked_images], adv_x_smm, order=order)
print('Average l_{0} norm perturbation of adversarial examples:'.format(order), perturbation_strength)
log_attack(attack_name='saliencyl2', adv_x=adv_x_smm, perturbation_strength=perturbation_strength, attack_params=smm_params)
# DeepFool
# ===
# +
from cleverhans.attacks import DeepFool
from cleverhans.utils_tf import model_eval
seed = 100 # model for which attack is performed
model, sess, x = ensemble.get_model(seed)
nb_attacked_images = 10000
deep_fool = DeepFool(model, sess=sess)
deep_fool_params = {
'nb_candidate': dataset_params['nb_classes'],
'overshoot': 0.1,
'max_iter': 10
}
with sess.graph.as_default():
y = tf.placeholder(tf.float32, shape=(None, dataset_params['nb_classes']))
adv_x_deep_fool = ipermute(deep_fool.generate_np(permute(x_test[0:nb_attacked_images], seed=seed), **deep_fool_params), seed=seed)
preds = model.get_logits(x)
adv_acc = model_eval(sess, x, y, preds, permute(adv_x_deep_fool, seed=seed), y_test[0:nb_attacked_images], args=eval_params)
print('Accuracy on model {0}:'.format(seed), adv_acc)
# -
order = 2
perturbation_strength = measure_perturbation(x_test[0:nb_attacked_images], adv_x_deep_fool, order=order)
print('Average l_{0} norm perturbation of adversarial examples:'.format(order), perturbation_strength)
log_attack(attack_name='deepfooll2', adv_x=adv_x_deep_fool, perturbation_strength=perturbation_strength, attack_params=deep_fool_params)
# EOT with PGD
# ===
# +
attacked_models = range(100, 130)
TARGET = 0
ensemble_logits = []
x = tf.placeholder(tf.float32, shape=(1, dataset_params['img_rows'], dataset_params['img_cols'], dataset_params['nb_channels']))
for seed in attacked_models:
model, _, _ = ensemble.get_model(seed)
ensemble_logits.append(model.get_logits(x))
ensemble_logits = tf.concat(ensemble_logits, axis=0)
ensemble_preds = tf.nn.softmax(ensemble_logits)
ensemble_labels = tf.tile(tf.expand_dims(tf.one_hot(TARGET, dataset_params['nb_classes']), axis=0), (ensemble_logits.shape[0], 1))
ensemble_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=ensemble_logits, labels=ensemble_labels))
ensemble_grad, = tf.gradients(ensemble_loss, x)
# +
# PGD
LR = .1
EPSILON = .3
nb_attacked_images = 100
tmp = []
for j in range(nb_attacked_images):
print(j)
orig = x_test[j:j+1]
adv = np.copy(orig)
lower = np.clip(orig-EPSILON, 0, 1)
upper = np.clip(orig+EPSILON, 0, 1)
# TARGET = (np.argmax(y_test[j]) + 1) % 10
# ensemble_logits = []
# x = tf.placeholder(tf.float32, shape=(1, dataset_params['img_rows'], dataset_params['img_cols'], dataset_params['nb_channels']))
# for seed in attacked_models:
# model, _, _ = ensemble.get_model(seed)
# ensemble_logits.append(model.get_logits(x))
# ensemble_logits = tf.concat(ensemble_logits, axis=0)
# ensemble_preds = tf.nn.softmax(ensemble_logits)
# ensemble_labels = tf.tile(tf.expand_dims(tf.one_hot(TARGET, dataset_params['nb_classes']), axis=0), (ensemble_logits.shape[0], 1))
# ensemble_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=ensemble_logits, labels=ensemble_labels))
# ensemble_grad, = tf.gradients(ensemble_loss, x)
with tf.Session() as sess:
for i in range(50):
sess.run(tf.global_variables_initializer())
g, p= sess.run([ensemble_grad, ensemble_preds], {x: adv})
if i % 10 == 0:
print('step %d, preds=%s' % (i, np.argmax(p, axis=1)))
# step
adv -= LR * g
# project
adv = np.clip(adv, lower, upper)
tmp.append(adv)
adv_x = np.concatenate(tmp, axis=0)
# -
order = np.inf
perturbation_strength = measure_perturbation(x_test[0:nb_attacked_images], adv_x, order=order)
print('Average l_{0} norm perturbation of adversarial examples:'.format(order), perturbation_strength)
plt.imshow(adv_x[0].reshape(28, 28), cmap='gray')
p = ensemble.predict(adv_x[0:], seeds=attacked_models)
print(np.argmax(p))
acc = ensemble.accuracy(adv_x, y_test[0:nb_attacked_images], seeds=range(130,150))
print(acc)
# Log Perturbation Plot Data
# ===
# +
attack_name = 'pgdl2'
order = 2 # this should be consistent with attack parameters
seed = 100 # model for which attack was performed
seeds = range(101, 151) # other models used for ensemble
nb_attacked_images = 10000
d = read_attack(attack_name = attack_name)
# +
# generating random perturbation of x with same power as adversary
strength = []
adv_acc = []
adv_acc_mean_other = []
adv_acc_ensemble_other = []
rand_acc_mean_other = []
rand_acc_ensemble_other = []
for perturbation_strength in sorted(d.keys()):
print('calculating stuff for perturbation = ', perturbation_strength)
strength.append(perturbation_strength)
adv_acc.append(ensemble.accuracy(d[perturbation_strength][0:nb_attacked_images], y_test[0:nb_attacked_images], seeds=[seed]))
adv_acc_mean_other.append(np.mean([ensemble.accuracy(d[perturbation_strength][0:nb_attacked_images], y_test[0:nb_attacked_images], seeds=[a]) for a in seeds]))
adv_acc_ensemble_other.append(ensemble.accuracy(d[perturbation_strength][0:nb_attacked_images], y_test[0:nb_attacked_images], seeds=seeds))
x_random_perturbed = random_perturb(x_test[0:nb_attacked_images], perturbation_strength=perturbation_strength, order=order)
rand_acc_mean_other.append(np.mean([ensemble.accuracy(x_random_perturbed, y_test[0:nb_attacked_images], seeds=[a]) for a in seeds]))
rand_acc_ensemble_other.append(ensemble.accuracy(x_random_perturbed, y_test[0:nb_attacked_images], seeds=seeds))
# -
plt.plot(strength, adv_acc_mean_other, label='adv_acc_mean_other')
plt.plot(strength, adv_acc_ensemble_other, label='adv_acc_ensemble_other')
plt.plot(strength, rand_acc_mean_other, label='rand_acc_mean_other')
plt.plot(strength, rand_acc_ensemble_other, label='rand_acc_ensemble_other')
plt.plot(strength, adv_acc, label='adv_acc')
plt.legend()
header=['strength', 'adv_acc', 'adv_acc_mean_other', 'adv_acc_ensemble_other', 'rand_acc_mean_other', 'rand_acc_ensemble_other']
arr = [strength, adv_acc, adv_acc_mean_other, adv_acc_ensemble_other, rand_acc_mean_other, rand_acc_ensemble_other]
log_plot_data(attack_name=attack_name, header=header, arr=arr)
| Adversarial Attacks/MNIST/new_dense_ensemble_load.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Water Interoperability Similarity
#
# * **Products used:** [ls8_usgs_sr_scene](https://explorer.digitalearth.africa/ls8_usgs_sr_scene), [sentinel1_ghana_monthly](https://explorer.digitalearth.africa/sentinel1_ghana_monthly), **s2a_msil2a**, **s2b_msil2a**
# ## Background
#
# There are a few water classifiers for Landsat, Sentinel-1, and Sentinel-2. We will examine WOfS for Landsat, thresholding for Sentinel-1, and WOfS for Sentinel-2.
#
# Although WOfS performs well on clear water bodies, it can misclassify murky water bodies as not water. WASARD or Sentinel-1 thresholding generally perform equally well or better than WOfS – especially on murky water bodies.
#
# Because WOfS uses an optical data source (Landsat), it often does not have data to make water classifications due to cloud occlusion. The same limitation applies to Sentinel-2 water detection.
#
# The main reasons to use multiple data sources in the same water detection analysis are to increase temporal resolution and account for missing data.
#
# ## Description
#
# This notebook checks how similar water classifications are among a selected set of sources (e.g. WOfS for Landsat, thresholding for Sentinel-1, etc.).
# These are the steps followed:
#
# 1. Determine the dates of coincidence of data for the selected sensors using the CEOS COVE tool.
# 1. Acquire water classifications for each sensor.
# 1. Show the RGB representation of Time Slices and Water Classifications
# 1. Obtain the intersected clean mask for the sensors.
# 1. Show the per-time-slice percent of water (masked with the intersected clean mask) according to each sensor as a line plot.
# 1. Show the per-time-slice similarity (% of matching pixels) of each pair of sensors as a line plot.
# ***
# ## Getting started
#
# **To run this analysis**, run all the cells in the notebook, starting with the "Load packages" cell.
#
# **After finishing the analysis**, return to the "Analysis parameters" cell, modify some values (e.g. choose a different location or time period to analyse) and re-run the analysis.
# There are additional instructions on modifying the notebook at the end.
# ### Load packages
# Load key Python packages and supporting functions for the analysis.
# +
# %matplotlib inline
import sys
import datacube
import numpy
import numpy as np
import xarray as xr
from xarray.ufuncs import isnan as xr_nan
import pandas as pd
import matplotlib.pyplot as plt
sys.path.append("../Scripts")
from deafrica_datahandling import load_ard
from deafrica_plotting import display_map
# -
# ### Connect to the datacube
# Activate the datacube database, which provides functionality for loading and displaying stored Earth observation data.
dc = datacube.Datacube(app="water_interoperability_similarity")
# ### Analysis parameters
#
# The following cell sets the parameters, which define the area of interest and the length of time to conduct the analysis over.
# The parameters are
#
# * `latitude`: The latitude range to analyse (e.g. `(-11.288, -11.086)`).
# For reasonable loading times, make sure the range spans less than ~0.1 degrees.
# * `longitude`: The longitude range to analyse (e.g. `(130.324, 130.453)`).
# For reasonable loading times, make sure the range spans less than ~0.1 degrees.
#
# **If running the notebook for the first time**, keep the default settings below.
# This will demonstrate how the analysis works and provide meaningful results.
# The example covers an area around Obuasi, Ghana.
#
# **To run the notebook for a different area**, make sure Landsat 8, Sentinel-1, and Sentinel-2 data is available for the chosen area using the [DE Africa Sandbox Explorer](https://explorer.digitalearth.africa/ga_ls8c_gm_2_annual).
#
# +
# Define the area of interest
# Obuasi, Ghana
# latitude = (6.10, 6.26)
# longitude = (-1.82, -1.66)
latitude = (6.1582, 6.2028)
longitude = (-1.7295, -1.6914)
# The time range in which we want to determine
# dates of close scenes among sensors.
time_extents = ('2014-01-01', '2018-12-31')
# +
from deafrica_plotting import display_map
display_map(longitude, latitude)
# -
# ## Determine the dates of coincidence of data for the selected sensors using the COVE tool.
# We used a tool from the Committee on Earth Observations (CEOS) called the CEOS Visualization Environment (COVE). This tool has several applications, such as Acquisition Forecaster for determining what scenes areas will have and when, and Coverage Analyzer for determining what scenes areas have and when.
#
# For this analysis, we used the Coincident Calculator to determine when Landsat 8, Sentinel-1, and Sentinel-2 have close dates so we can compare them on a per-time-slice basis.
#
# The COVE Coincident Calculator (https://ceos-cove.org/en/coincident_calculator/) allows users to specify the sensors to determine coincidence for. For this analysis, we first determined the dates of coincidence of Landsat 8 and Sentinel-2. We then determined dates which are close to those which have Sentinel-1 data.
#
# We first found dates for which both Landsat 8 and Sentinel-2 data is available for the time range and area of interest, where were the following 8 dates:
# **[04-22-2017, 07-11-2017, 09-29-2017, 12-18-2017, 03-08-2018, 05-27-2018, 08-15-2018, 11-03-2018]**
#
# Then we found dates for which Landsat 8 and Sentinel-1 data is available for the time range and area of interest, and then found the subset of closely matching dates, which were the following 6 dates: **[07-12-2017 (off 1), 09-29-2017, 12-15-2017 (off 3), 03-09-2018 (off 1), 05-27-2018, 08-12-2018 (off 3)]** These are the daets we use in this analysis.
# ## Acquire water classifications for each sensor.
# +
common_load_params = dict(latitude=latitude, longitude=longitude,
group_by='solar_day',
output_crs="epsg:4326",
resolution=(-0.00027,0.00027))
# The minimum fraction of data that a time slice must have
# to be kept in this analysis
MIN_FRAC_DATA = 0.5
# -
# ### Determine the time range of overlapping data for all sensors.
metadata = {}
metadata['Landsat 8'] = dc.load(**common_load_params,
product='ls8_usgs_sr_scene',
time=time_extents,
dask_chunks={'time':1})
metadata['Sentinel-1'] = dc.load(**common_load_params,
product='sentinel1_ghana_monthly',
time=time_extents,
dask_chunks={'time':1})
s2a_meta = dc.load(**common_load_params,
product='s2a_msil2a',
time=time_extents,
dask_chunks={'time':1})
s2b_meta = dc.load(**common_load_params,
product='s2b_msil2a',
time=time_extents,
dask_chunks={'time':1})
metadata['Sentinel-2'] = xr.concat((s2a_meta, s2b_meta), dim='time').sortby('time')
del s2a_meta, s2b_meta
# +
ls8_time_rng = metadata['Landsat 8'].time.values[[0,-1]]
s2_time_rng = metadata['Sentinel-2'].time.values[[0,-1]]
time_rng = np.stack((ls8_time_rng, s2_time_rng))
overlapping_time = time_rng[:,0].max(), time_rng[:,1].min()
# -
overlapping_time
# **Limit the metadata to check for close scenes to the overlapping time range.**
for sensor in metadata:
metadata[sensor] = metadata[sensor].sel(time=slice(*overlapping_time))
# ### Determine the dates of close scenes among the sensors
# +
# Constants #
# The maximum number of days of difference between scenes
# from sensors for those scenes to be considered approximately coincident.
# The Sentinel-1 max date diff is set high enough to allow any set of dates
# from the other sensors to match with one of its dates since we will
# select its matching dates with special logic later.
MAX_NUM_DAYS_DIFF = {'Landsat 8': 4, 'Sentinel-1':30, 'Sentinel-2':4}
# End Constants #
# all_times
num_datasets = len(metadata)
ds_names = list(metadata.keys())
first_ds_name = ds_names[0]
# All times for each dataset.
ds_times = {ds_name: metadata[ds_name].time.values for ds_name in ds_names}
# The time indices for each dataset's sorted time dimension
# currently being compared.
time_inds = {ds_name: 0 for ds_name in ds_names}
corresponding_times = {ds_name: [] for ds_name in ds_names}
# The index of the dataset in `metadata` to compare times against the first.
oth_ds_ind = 1
oth_ds_name = ds_names[oth_ds_ind]
oth_ds_time_ind = time_inds[oth_ds_name]
# For each time in the first dataset, find any
# closely matching dates in the other datasets.
for first_ds_time_ind, first_ds_time in enumerate(ds_times[first_ds_name]):
time_inds[first_ds_name] = first_ds_time_ind
# Find a corresponding time in this other dataset.
while True:
oth_ds_name = ds_names[oth_ds_ind]
oth_ds_time_ind = time_inds[oth_ds_name]
# If we've checked all dates for the other dataset,
# check the next first dataset time.
if oth_ds_time_ind == len(ds_times[oth_ds_name]):
break
oth_ds_time = metadata[ds_names[oth_ds_ind]].time.values[oth_ds_time_ind]
time_diff = (oth_ds_time - first_ds_time).astype('timedelta64[D]').astype(int)
# If this other dataset time is too long before this
# first dataset time, check the next other dataset time.
if time_diff <= -MAX_NUM_DAYS_DIFF[oth_ds_name]:
oth_ds_time_ind += 1
time_inds[ds_names[oth_ds_ind]] = oth_ds_time_ind
continue
# If this other dataset time is within the acceptable range
# of the first dataset time...
elif abs(time_diff) <= MAX_NUM_DAYS_DIFF[oth_ds_name]:
# If there are more datasets to find a corresponding date for
# these current corresponding dates, check those datasets.
if oth_ds_ind < len(ds_names)-1:
oth_ds_ind += 1
continue
else: # Otherwise, record this set of corresponding dates.
for ds_name in ds_names:
corresponding_times[ds_name].append(ds_times[ds_name][time_inds[ds_name]])
# Don't use these times again.
time_inds[ds_name] = time_inds[ds_name] + 1
oth_ds_ind = 1
break
# If this other dataset time is too long after this
# first dataset time, go to the next first dataset time.
else:
oth_ds_ind -= 1
break
# -
# convert to pandas datetime
for sensor in corresponding_times:
for ind in range(len(corresponding_times[sensor])):
corresponding_times[sensor][ind] = \
pd.to_datetime(corresponding_times[sensor][ind])
# **The Sentinel-1 data is a monthly composite, so we need special logic for choosing data from it.**
ls8_pd_datetimes = corresponding_times['Landsat 8']
s1_pd_datetimes = pd.to_datetime(metadata['Sentinel-1'].time.values)
for time_ind, ls8_time in enumerate(ls8_pd_datetimes):
matching_s1_time_ind = [s1_time_ind for (s1_time_ind, s1_time)
in enumerate(s1_pd_datetimes) if
s1_time.month == ls8_time.month][0]
matching_s1_time = metadata['Sentinel-1'].time.values[matching_s1_time_ind]
corresponding_times['Sentinel-1'][time_ind] = pd.to_datetime(matching_s1_time)
# ### Landsat 8
# **Load the data**
ls8_times = corresponding_times['Landsat 8']
s1_times = corresponding_times['Sentinel-1']
s2_times = corresponding_times['Sentinel-2']
ls8_data = []
ls8_data = dc.load(**common_load_params,
product='ls8_usgs_sr_scene',
time=(ls8_times[0], ls8_times[-1]),
dask_chunks = {'time': 1})
ls8_data = ls8_data.sel(time=corresponding_times['Landsat 8'], method='nearest')
print(f"Subset the data to {len(ls8_data.time)} times of near coincidence.")
# **Acquire the clean mask**
ls8_not_nan_da = ~xr_nan(ls8_data).to_array()
ls8_clean_mask = ls8_not_nan_da.min('variable')
del ls8_not_nan_da
# **Acquire water classifications**
# +
from water_interoperability_utils.dc_water_classifier import wofs_classify
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=Warning)
ls8_water = wofs_classify(ls8_data).wofs
# -
# ### Sentinel-1
# **Load the data**
s1_data = dc.load(**common_load_params,
product='sentinel1_ghana_monthly',
time=(s1_times[0], s1_times[-1]),
dask_chunks = {'time': 1})
s1_data = s1_data.sel(time=corresponding_times['Sentinel-1'], method='nearest')
print(f"Subset the data to {len(s1_data.time)} times of near coincidence.")
# **Acquire the clean mask**
s1_not_nan_da = ~xr_nan(s1_data).to_array()
s1_clean_mask = s1_not_nan_da.min('variable')
del s1_not_nan_da
# **Acquire water classifications**
# +
from sklearn.impute import SimpleImputer
from skimage.filters import try_all_threshold, threshold_otsu
thresh_vv = threshold_otsu(s1_data.vv.values)
thresh_vh = threshold_otsu(s1_data.vh.values)
binary_vv = s1_data.vv.values < thresh_vv
binary_vh = s1_data.vh.values < thresh_vh
s1_water = xr.DataArray(binary_vv & binary_vh, coords=s1_data.vv.coords,
dims=s1_data.vv.dims, attrs=s1_data.vv.attrs)
# -
# ### Sentinel-2
# **Acquire the data**
s2a_data = dc.load(**common_load_params,
product='s2a_msil2a',
time=(s2_times[0], s2_times[-1]),
dask_chunks = {'time': 1})
s2b_data = dc.load(**common_load_params,
product='s2b_msil2a',
time=(s2_times[0], s2_times[-1]),
dask_chunks = {'time': 1})
s2_data = xr.concat((s2a_data, s2b_data), dim='time').sortby('time')
s2_data = s2_data.sel(time=corresponding_times['Sentinel-2'], method='nearest')
print(f"Subsetting the data to {len(s2_data.time)} times of near coincidence.")
# **Acquire the clean mask**
# See figure 3 on this page for more information about the
# values of the scl data for Sentinel-2:
# https://earth.esa.int/web/sentinel/technical-guides/sentinel-2-msi/level-2a/algorithm
s2_clean_mask = s2_data.scl.isin([1, 2, 3, 4, 5, 6, 7, 10, 11])
s2_data = s2_data.astype(np.float32).where(s2_clean_mask)
# **Acquire water classifications**
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=Warning)
s2_water = wofs_classify(s2_data.rename(
{'nir_1': 'nir', 'swir_1': 'swir1', 'swir_2': 'swir2'})).wofs
# +
ls8_data = ls8_data.compute()
ls8_clean_mask = ls8_clean_mask.compute()
s1_data = s1_data.compute()
s1_clean_mask = s1_clean_mask.compute()
s2_data = s2_data.compute()
s2_clean_mask = s2_clean_mask.compute()
# -
# ## Show the RGB Representation of Time Slices and Water Classifications
# **Obtain the intersected clean mask for the sensors.**
intersected_clean_mask = xr.DataArray((ls8_clean_mask.values &
s1_clean_mask.values &
s2_clean_mask.values),
coords=ls8_clean_mask.coords,
dims=ls8_clean_mask.dims)
# **Show the data and water classifications for each sensor as the data will be compared among them (an intersection).**
# ## Show the per-time-slice percent of water according to each sensor as a line plot.
# ## Show the per-time-slice similarity (% of matching pixels) of each pair of sensors as a line plot.
| Water_Interoperability/water_interoperability_similarity_v1-clean.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <center>Prepare Lab of 2nd Exercise</center>
# ## <center> Speech Recognition using HMMs and RNNs </center>
# ### Description
# Our goal is the implementation of a speech recognition system, that recognizes isolated words. The first part involves the extraction of the appropriate acoustic features from our recordings and their further analysis. These features are the cepstral coefficients, that are computed using a filterbank (inspired by psychoacoustic methods).
# More specifically, the system will recognize isolated digits in English. Our dataset contains dictations of 9 digits from 15 different speakers in separate .wav files. In total, there are 133 files, since 2 dictations are missing. The name of each file (e.g. eight8.wav) declares both the dictated digit (e.g. eight) and the speaker (speakers are numbered from 1 to 15). The sampling rate is Fs=16k and the duration of each dictation differs.
# ### Implementation
# Import necessary libraries
# +
import librosa as l
import os
import re
import IPython.display as ipd
import numpy as np
import random
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# sklearn
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.naive_bayes import GaussianNB
from sklearn import preprocessing
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
# pytorch
from torch.utils.data import Dataset, DataLoader
import torch
from torch import nn
from torch import optim
# pandas
import pandas as pd
# -
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# __Step 1:__ Sound analysis using [Praat](http://www.fon.hum.uva.nl/praat/).
# - Open __onetwothree1.wav__ and __onetwothree8.wav__ using Praat. These files contain the phrase "one two three" from speakers 1 and 8 (male and female respectively).
# - Waveforms of male and female speaker
#
# <img src="outputs/waveform.png" alt="Waveforms" style="width: 700px;height: 700px;"/>
#
# - Spectogram of male speaker
# <img src="outputs/spec_male.png" alt="Male specrogram" style="width: 700px; height: 700px;"/>
# Spectogram of female speaker
# <img src="outputs/spec_female.png" alt="Female specrogram" style="width: 700px;height: 700px; "/>
# - Extract the mean value of the pitch for the vowels "α" "ου" and "ι" (in praat select the part of the waveform that the vowel exists and press "F5").
# | File | Gender | Vowel | Pitch (Hz) |
# | -----| ----- | ----- | ----- |
# | onetwothree1.wav | male | α | 133.66 |
# | onetwothree1.wav | male | ου | 128.51 |
# | onetwothree1.wav | male | ι | 130.3 |
# | onetwothree8.wav | female | α | 176.83 |
# | onetwothree8.wav | female | ου | 179.81 |
# | onetwothree8.wav | female | ι | 174.59 |
# We observe that the pitch of the female speaker is higher than the pitch of the male speaker in all vowels. As a result, we can use pitch in order to distinguish the gender of the speaker. However, we can't use pitch in a digit recognition task, since for a single speaker the pitch of different vowels is close to each other.
# - Extract the first 3 formants for each vowel (in praat select the central point in the part of the waveform that the vowel exists and press F1, F2 and F3 respectively).
# | File | Gender | Vowel | F1 | F2 | F3 |
# | -----| ----- | ----- | -- | -- | -- |
# | onetwothree1.wav | male | α | 782 | 1069 | 2243 |
# | onetwothree8.wav | female | α | 951 | 1584 | 2999 |
# | onetwothree1.wav | male | ου | 359 | 1818 | 2443 |
# | onetwothree8.wav | female | ου | 370 | 2150 | 2663 |
# | onetwothree1.wav | male | ι | 386 | 1981 | 2447 |
# | onetwothree8.wav | female | ι | 356 | 2456 | 2552 |
# We observe that the formants differ from each other in different vowels and are not influenced a lot when the speaker is changed. So, these peaks (F1, F2, F3) can be used in speech recognition to distinguish vowels.
# __Step 2:__ Create a function (data parser) that reads the sound files in __digits/__ folder and returns 3 Python lists: the .wav file read in librosa, the respective speaker and the digit.
# Define useful variables
n_samples = 133
digits_dir = "../pr_lab2_2019-20_data/digits"
sr = 16000
# Dictionary to convert string digits in their numerical format.
str2num = {'one':1, 'two':2, 'three':3, 'four':4, 'five':5, 'six':6,
'seven':7, 'eight':8, 'nine':9}
# Dictionary to convert digits from numerical format in their
# string representation.
num2str = {v: k for k, v in str2num.items()}
# +
# Simple parser that split a string in the first digit
def digit_split(s):
return re.split(r'(\d+)', s)[:2]
print(digit_split("six13.wav"))
# -
# Main data parser method
def data_parser(digits_dir):
wav = [None] * n_samples
speaker = np.zeros(n_samples, dtype=int)
digit = np.zeros(n_samples, dtype=int)
for i, file in enumerate(os.listdir(digits_dir)):
filename = os.fsdecode(file)
# Read wav file with librosa
wav[i],_ = l.load(os.path.join(digits_dir, filename), sr=16000)
# Keep speaker and digit
decoded_name = digit_split(filename.split('.')[0])
if decoded_name[0] in str2num:
digit[i] = str2num[decoded_name[0]]
else:
print("Error in decoding " + str(decoded_name[0]))
speaker[i] = int(decoded_name[1])
return wav, speaker, digit
wav, speaker, digit = data_parser(digits_dir)
# Check data parser before continue on step 3.
# Print first three files
for i in range(3):
print("Sample " + str(i))
print("Waveform: " + str(wav[i][:3]))
print("Speaker: " + str(speaker[i]))
print("Digit: " + str(digit[i]))
print()
# Sample a random file
sample = random.randint(0,n_samples-1)
# Construct its correct filename
name = num2str[digit[sample]] + str(speaker[sample]) + '.wav'
print(name)
# Play it and check that parser is correct.
ipd.Audio("../pr_lab2_2019-20_data/digits/" + name)
# __Step 3:__ Extract Mel-Frequency Cepstral Coefficients (MFCCs) from each sound file using librosa (13 features per file). Use 25 ms window size and 10 ms step size. Also, compute delta and delta-deltas of the features.
# +
# Convert window and step size from ms to number os points.
n_fft = int(sr * 0.025)
hop_length = int(sr * 0.01)
mfccs = []
delta = []
delta_deltas = []
# For each sample compute the mfccs, the deltas and the delta-deltas.
for i in range(n_samples):
mfcc = l.feature.mfcc(wav[i], sr=sr, n_mfcc=13, hop_length=hop_length, n_fft=n_fft)
mfccs.append(mfcc)
delta.append(l.feature.delta(mfcc))
delta_deltas.append(l.feature.delta(mfcc, order=2))
# -
# __Step 4:__ Display a histogram for the 1st and the 2nd MFCC of digits n1 and n2 for all recordings. In my case, n1 = 0 and n2 = 9. Since n1 = 0, we define n1 = 9-1 = 8.
# +
# Extract 1st and 2nd mfcc of digit 8
mfcc1_d8 = [mfccs[i][0] for i in range(n_samples) if digit[i] == 8]
mfcc2_d8 = [mfccs[i][1] for i in range(n_samples) if digit[i] == 8]
# Extract 1st and 2nd mfcc of digit 9
mfcc1_d9 = [mfccs[i][0] for i in range(n_samples) if digit[i] == 9]
mfcc2_d9 = [mfccs[i][1] for i in range(n_samples) if digit[i] == 9]
# +
# Define a function that convert a list of lists in a global list
def flat_list(l):
return [item for sublist in l for item in sublist]
# Flattening
mfcc1_d8_flat = flat_list(mfcc1_d8)
mfcc2_d8_flat = flat_list(mfcc2_d8)
mfcc1_d9_flat = flat_list(mfcc1_d9)
mfcc2_d9_flat = flat_list(mfcc2_d9)
# Plot the histograms
fig = plt.figure(figsize=(15,12))
fig.add_subplot(2, 2, 1)
plt.hist(mfcc1_d8_flat, bins=20)
fig.add_subplot(2, 2, 2)
plt.hist(mfcc2_d8_flat, bins=20)
fig.add_subplot(2, 2, 3)
plt.hist(mfcc1_d9_flat, bins=20)
fig.add_subplot(2, 2, 4)
plt.hist(mfcc2_d9_flat, bins=20)
plt.show()
# -
# The divergence between 8 and 9 is small in both featured. In order to classify them, we need more MFCC features.
# Choose two recordings for each digit from two different speaker and compute Mel Filterbank Spectral Coefficients (MFSCs). Then, plot the correlation of the MFSCs and MFCCs for each recording.
spk1 = 1
spk2 = 2
n1 = 8
n2 = 9
for i in range(n_samples):
if speaker[i] == spk1 and digit[i] == n1:
s1_n1 = i
if speaker[i] == spk1 and digit[i] == n2:
s1_n2 = i
if speaker[i] == spk2 and digit[i] == n1:
s2_n1 = i
if speaker[i] == spk2 and digit[i] == n2:
s2_n2 = i
print("Speaker 1 - Digit 8: " + str(s1_n1))
print("Speaker 1 - Digit 9: " + str(s1_n2))
print("Speaker 2 - Digit 8: " + str(s2_n1))
print("Speaker 2 - Digit 9: " + str(s2_n2))
mfscs_1_1 = l.feature.melspectrogram(wav[s1_n1], sr=sr, hop_length=hop_length, n_fft=n_fft, n_mels=13)
mfscs_1_2 = l.feature.melspectrogram(wav[s1_n2], sr=sr, hop_length=hop_length, n_fft=n_fft, n_mels=13)
mfscs_2_1 = l.feature.melspectrogram(wav[s2_n1], sr=sr, hop_length=hop_length, n_fft=n_fft, n_mels=13)
mfscs_2_2 = l.feature.melspectrogram(wav[s2_n2], sr=sr, hop_length=hop_length, n_fft=n_fft, n_mels=13)
# In order to construct the correlation plot easily, we convert out data in a dataframe and use the function df.corr by pandas.
# +
fig = plt.figure(figsize=(15,12))
fig.add_subplot(2, 2, 1)
mfsc_df_1_1 = pd.DataFrame.from_records(mfscs_1_1.T)
plt.imshow(mfsc_df_1_1.corr())
fig.add_subplot(2, 2, 2)
mfsc_df_1_2 = pd.DataFrame.from_records(mfscs_1_2.T)
plt.imshow(mfsc_df_1_2.corr())
fig.add_subplot(2, 2, 3)
mfsc_df_2_1 = pd.DataFrame.from_records(mfscs_2_1.T)
plt.imshow(mfsc_df_2_1.corr())
fig.add_subplot(2, 2, 4)
mfsc_df_2_2 = pd.DataFrame.from_records(mfscs_2_2.T)
plt.imshow(mfsc_df_2_2.corr())
plt.show()
# +
fig = plt.figure(figsize=(15,12))
fig.add_subplot(2, 2, 1)
mfcc_df_1_1 = pd.DataFrame.from_records(mfccs[s1_n1].T)
plt.imshow(mfcc_df_1_1.corr())
fig.add_subplot(2, 2, 2)
mfcc_df_1_2 = pd.DataFrame.from_records(mfccs[s1_n2].T)
plt.imshow(mfcc_df_1_2.corr())
fig.add_subplot(2, 2, 3)
mfcc_df_2_1 = pd.DataFrame.from_records(mfccs[s2_n1].T)
plt.imshow(mfcc_df_2_1.corr())
fig.add_subplot(2, 2, 4)
mfcc_df_2_2 = pd.DataFrame.from_records(mfccs[s2_n2].T)
plt.imshow(mfcc_df_2_2.corr())
plt.show()
# -
# We observe that MFSCs coefficients are higly correlated, while MSCCs are not. This explains the fact that the Discrete Cosine Transform (DCT) is used to decorrelate the mel filter bank coefficients, a process also referred to as whitening.
# __Step 5:__ Extraction of a global feature vector for each recording, by combining the mean value and the variance of the mfccs – deltas – delta-deltas for all the windows.
# +
# X = mean mffc - mean delta - mean delta-deltas- var mfcc - var delta - var delta-deltas
X = np.zeros((n_samples, 78))
for i in range(n_samples):
X[i, :13] = np.mean(mfccs[i], axis=1)
X[i, 13:26] = np.mean(delta[i], axis=1)
X[i, 26:39] = np.mean(delta_deltas[i], axis=1)
X[i, 39:52] = np.std(mfccs[i], axis=1)
X[i, 52:65] = np.std(delta[i], axis=1)
X[i, 65:] = np.std(delta_deltas[i], axis=1)
# -
# Plot the first two dimensions in a 2D scatter plot.
# Define a function that plots the decision surface of 2D-dimensional data
def scatter_2d(X, y, labels):
fig, ax = plt.subplots()
# title for the plots
# Set-up grid for plotting.
X0, X1 = X[:, 0], X[:, 1]
one = ax.scatter(
X0[y == 1], X1[y == 1],
c='red', label=labels[0],
s=50, alpha=0.9, edgecolors='k')
two = ax.scatter(
X0[y == 2], X1[y == 2],
c='purple', label=labels[1],
s=50, alpha=0.9, edgecolors='k')
three = ax.scatter(
X0[y == 3], X1[y == 3],
c='green', label=labels[2],
s=50, alpha=0.9, edgecolors='k')
four = ax.scatter(
X0[y == 4], X1[y == 4],
c='gray', label=labels[3],
s=50, alpha=0.9, edgecolors='k')
five = ax.scatter(
X0[y == 5], X1[y == 5],
c='orange', label=labels[4],
s=50, alpha=0.9, edgecolors='k')
six = ax.scatter(
X0[y == 6], X1[y == 6],
c='black', label=labels[5],
s=50, alpha=0.9, edgecolors='k')
seven = ax.scatter(
X0[y == 7], X1[y == 7],
c='pink', label=labels[6],
s=50, alpha=0.9, edgecolors='k')
eight = ax.scatter(
X0[y == 8], X1[y == 8],
c='white', label=labels[7],
s=50, alpha=0.9, edgecolors='k')
nine = ax.scatter(
X0[y == 9], X1[y == 9],
c='yellow', label=labels[8],
s=50, alpha=0.9, edgecolors='k')
ax.set_xticks(())
ax.set_yticks(())
ax.legend()
plt.show()
plt.rcParams['figure.figsize'] = [20, 15]
scatter_2d(X, digit, [i for i in range(1,10)])
# We observe that if we take into account only the first two dimensions of the global feature vector, the points of the same class are not always close to each other and their class is easily separable among the other classes. As a result, a classifier would perform poorly in these 2D data.
# __Step 6:__ Apply PCA to reduce the dimensions of the feature vector from 78 to 2 or 3 and plot the data in 2D and 3D respectively.
# Define PCA
pca_2d = PCA(n_components=2)
# Apply PCA on data
X_2d = pca_2d.fit_transform(X)
scatter_2d(X_2d, digit, [i for i in range(1, 10)])
# After applying PCA, the scatter plot is much better since we kept more information from all the 78 dimensions in our 2D data.
# Define a function that plots the decision surface of 3-dimensional data
def scatter_3d(X, y, labels):
fig, ax = plt.subplots()
ax = fig.add_subplot(111, projection='3d')
# title for the plots
# Set-up grid for plotting.
X0, X1, X2 = X[:, 0], X[:, 1], X[:,2]
one = ax.scatter(
X0[y == 1], X1[y == 1], X2[y == 1],
c='red', label=labels[0],
s=50, alpha=0.9, edgecolors='k')
two = ax.scatter(
X0[y == 2], X1[y == 2], X2[y == 2],
c='purple', label=labels[1],
s=50, alpha=0.9, edgecolors='k')
three = ax.scatter(
X0[y == 3], X1[y == 3], X2[y == 3],
c='green', label=labels[2],
s=50, alpha=0.9, edgecolors='k')
four = ax.scatter(
X0[y == 4], X1[y == 4], X2[y == 4],
c='gray', label=labels[3],
s=50, alpha=0.9, edgecolors='k')
five = ax.scatter(
X0[y == 5], X1[y == 5], X2[y == 5],
c='orange', label=labels[4],
s=50, alpha=0.9, edgecolors='k')
six = ax.scatter(
X0[y == 6], X1[y == 6], X2[y == 6],
c='black', label=labels[5],
s=50, alpha=0.9, edgecolors='k')
seven = ax.scatter(
X0[y == 7], X1[y == 7], X2[y == 7],
c='pink', label=labels[6],
s=50, alpha=0.9, edgecolors='k')
eight = ax.scatter(
X0[y == 8], X1[y == 8], X2[y == 8],
c='white', label=labels[7],
s=50, alpha=0.9, edgecolors='k')
nine = ax.scatter(
X0[y == 9], X1[y == 9], X2[y == 9],
c='yellow', label=labels[8],
s=50, alpha=0.9, edgecolors='k')
ax.set_xticks(())
ax.set_yticks(())
ax.legend()
plt.show()
# Define PCA
pca_3d = PCA(n_components=3)
# Apply PCA on data
X_3d = pca_3d.fit_transform(X)
scatter_3d(X_3d, digit, [i for i in range(1,10)])
print(pca_2d.explained_variance_ratio_)
print(pca_3d.explained_variance_ratio_)
# We observe that after dimensionality reduction we lost a lot of variance, that means that the whole procedure did not go well and we have lost useful information.
# __Step 7:__ Classification
# - Split data in train and test set in proportion 70%-30%
y = digit
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42, stratify=y)
# - Normalize our data
X_train_norm = preprocessing.normalize(X_train)
X_test_norm = preprocessing.normalize(X_test)
# - Define the numpy based Gaussian classifier used in the 1st Lab.
class GaussianNB_np(BaseEstimator, ClassifierMixin):
"""Classify samples based on the Gaussian Naive Bayes"""
def __init__(self):
self.X_mean_ = None
self.X_var_ = None
self.prior = None
self.classes = None
self.n_classes = None
def fit(self, X, y):
"""
This should fit classifier. All the "work" should be done here.
Calculates self.X_mean_ and self.X_var_ based on the mean
feature values in X for each class. Also, calculates self.prior
that contains the prior probability of each class.
self.X_mean_ becomes a numpy.ndarray of shape
(n_classes, n_features)
self.X_var_ becomes a numpy.ndarray of shape
(n_classes, n_features)
self.prior becomes a numpy.array of shape
(n_classes)
fit always returns self.
"""
# Initialize useful variables
self.classes = np.unique(y)
train_size, n_features = X.shape
self.n_classes = len(self.classes)
self.X_mean_ = np.zeros((self.n_classes, n_features))
self.X_var_ = np.zeros((self.n_classes, n_features))
# Compute mean and variance values for each class
for k, c in enumerate(self.classes):
idx_i = [i for i in range(train_size) if y[i] == c]
X_k = np.take(X, idx_i, axis=0)
self.X_mean_[k, :] = np.mean(X_k, axis=0, keepdims=True)
self.X_var_[k, :] = np.var(X_k, axis=0, keepdims=True)
# Compute prior probabilities for each class
self.prior = np.zeros(self.n_classes)
for k, c in enumerate(self.classes):
self.prior[k] = np.count_nonzero(y == c) / train_size
return self
def predict(self, X, smooth=None):
"""
Make predictions for X based on
the highest posterior probability
"""
# Compute likelihood
like = np.zeros((self.n_classes, len(X)))
# Define e for calculation stability (division by zero).
if smooth:
e = smooth
else:
e = 10**(-9)
for i in range(self.n_classes):
like[i] = np.prod(1/(np.sqrt(2*np.pi*self.X_var_[i]+ e)) * \
np.exp(-0.5*((X - self.X_mean_[i])**2 / (self.X_var_[i] + e))), axis=1)
return np.argmax(like.T * self.prior, axis=1) + 1
def score(self, X, y, smooth=None):
"""
Return accuracy score on the predictions
for X based on ground truth y
"""
corr = 0
y_pred = self.predict(X, smooth)
corr = sum(int(y[i] == y_pred[i]) for i in range(len(y)))
acc = corr / len(y)
return acc
# - Classify data using our custom Bayes from Lab 1.
# +
# Define a custom scorer
def my_scorer(clf, X, y_true):
return clf.score(X, y_true)
# Define the classifier
gaussNB_np = GaussianNB_np()
gaussNB_np.fit(X_train, y_train)
print("Accuracy of custom NumPy GaussianNB classifier")
print()
# Predict using default smoothing.
print("Smoothing 1e-9: " + str(gaussNB_np.score(X_test, y_test)))
# Predict using 1e-6 smoothing.
print("Smoothing 1e-6: " + str(gaussNB_np.score(X_test, y_test, smooth=10**(-6))))
# Predict using 1e-3 smoothing.
print("Smoothing 1e-3: " + str(gaussNB_np.score(X_test, y_test, smooth=10**(-3))))
# Predict using 1 smoothing.
print("Smoothing 1: " + str(gaussNB_np.score(X_test, y_test, smooth=1)))
# +
# Define a custom scorer
def my_scorer(clf, X, y_true):
return clf.score(X, y_true)
# Define the classifier
gaussNB_np = GaussianNB_np()
gaussNB_np.fit(X_train_norm, y_train)
print("Accuracy of custom NumPy GaussianNB classifier in normalized data")
print()
# Predict using default smoothing.
print("Smoothing 1e-9: " + str(gaussNB_np.score(X_test_norm, y_test)))
# Predict using 1e-6 smoothing.
print("Smoothing 1e-6: " + str(gaussNB_np.score(X_test_norm, y_test, smooth=10**(-6))))
# Predict using 1e-3 smoothing.
print("Smoothing 1e-3: " + str(gaussNB_np.score(X_test_norm, y_test, smooth=10**(-3))))
# Predict using 1 smoothing.
print("Smoothing 1: " + str(gaussNB_np.score(X_test_norm, y_test, smooth=1)))
# -
# - Classify data using Naive Bayes of sklearn.
# +
gaussNB = GaussianNB()
gaussNB.fit(X_train, y_train)
print("Accuracy of sklearn GaussianNB classifier")
print()
print(gaussNB.score(X_test, y_test))
# +
gaussNB = GaussianNB()
gaussNB.fit(X_train_norm, y_train)
print("Accuracy of sklearn GaussianNB classifier in normalized data")
print()
print(gaussNB.score(X_test_norm, y_test))
# -
# - Classify data using Nearest Neighbors classifier
# +
neigh = KNeighborsClassifier(n_neighbors=3)
neigh.fit(X_train, y_train)
print("Accuracy of Nearest Neihbors classifier")
print()
print(neigh.score(X_test, y_test))
# +
neigh = KNeighborsClassifier(n_neighbors=3)
neigh.fit(X_train_norm, y_train)
print("Accuracy of Nearest Neihbors classifier in normalized data")
print()
print(neigh.score(X_test_norm, y_test))
# -
# - Classify data using SVM classifier with linear kernel.
# +
svm_linear = SVC(kernel="linear", probability=True)
svm_linear.fit(X_train, y_train)
print("Accuracy of SVM classifier, using linear kernel")
print()
print(svm_linear.score(X_test, y_test))
# +
svm_linear = SVC(kernel="linear", probability=True)
svm_linear.fit(X_train_norm, y_train)
print("Accuracy of SVM classifier, using linear kernel in normalized data")
print()
print(svm_linear.score(X_test_norm, y_test))
# -
# - Classify data using SVM classifier with poly kernel.
# +
svm_poly = SVC(kernel="poly", probability=True)
svm_poly.fit(X_train, y_train)
print("Accuracy of SVM classifier, using poly kernel")
print()
print(svm_poly.score(X_test, y_test))
# +
svm_poly = SVC(kernel="poly", probability=True)
svm_poly.fit(X_train_norm, y_train)
print("Accuracy of SVM classifier, using poly kernel in normalized data")
print()
print(svm_poly.score(X_test_norm, y_test))
# -
# - Classify data using Logistic Regression.
# +
log_reg = LogisticRegression()
log_reg.fit(X_train, y_train)
print("Accuracy of Logistic Regression classifier")
print()
print(log_reg.score(X_test, y_test))
# +
log_reg = LogisticRegression()
log_reg.fit(X_train_norm, y_train)
print("Accuracy of Logistic Regression classifier in normalized data")
print()
print(log_reg.score(X_test_norm, y_test))
# -
# - Summarize our results in the following table:
# | Classifier | Parameter | Normalized | Accuracy |
# | ---------------- | --------------- | ---------- | -------- |
# | Bayes_np | smooth=1e-6 | No | 65 |
# | Bayes_np | smooth=1e-6 | Yes | 67.5 |
# | Bayes_sklearn | smooth=1e-9 | No | 65 |
# | Bayes_sklearn | smooth=1e-9 | Yes | 60 |
# | Nearest Neighbor | n = 3 | No | 62.5 |
# | Nearest Neighbor | n = 3 | Yes | 55 |
# | SVM | kernel = linear | No | 87.5 |
# | SVM | kernel = linear | Yes | 25 |
# | SVM | kernel = poly | No | 85 |
# | SVM | kernel = poly | Yes | 25 |
# | Logistic | - | No | 82.5 |
# | Logistic | - | Yes | 25 |
# __Step 8:__ Pytorch introduction
# - Generate 10-point sin and cosine waves with f = 40 Hz and random amplitude.
# +
f = 40
step = 0.001
X = np.zeros((1000, 10))
y = np.zeros((1000, 10))
for i in range(1000):
# Random amplitude in range [0, 10]
A = np.random.rand()*10
# Random starting point in range [0, 40T]
start = np.random.rand() * (40/f)
time = np.linspace(start, start+step*10, num=10)
X[i] = A*np.sin(2*np.pi*f*time)
y[i] = A*np.cos(2*np.pi*f*time)
# -
# - Plot some samples from our generated dataset
# +
# Define a figure with 10 plots.
fig = plt.figure(figsize=(25,6))
columns = 9
samples = [100, 200, 300, 400, 500, 600, 700, 800, 900]
for i in range(9):
# Display the randomly selected image in a subplot
fig.add_subplot(2, columns, i+1)
plt.plot(np.arange(10), X[samples[i]])
for i in range(9):
# Display the randomly selected image in a subplot
fig.add_subplot(2, columns, i+10)
plt.plot(np.arange(10), y[samples[i]])
plt.show()
# -
# - Split data in train and test set and convert them in tensors.
# +
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
X_train = torch.from_numpy(X_train)
y_train = torch.from_numpy(y_train)
X_test = torch.from_numpy(X_test)
y_test = torch.from_numpy(y_test)
# -
# - Define an LSTM
class LSTMNet(nn.Module):
def __init__(self, input_size=1, hidden_layer_size=100, output_size=1):
super().__init__()
self.hidden_layer_size = hidden_layer_size
self.lstm = nn.LSTMCell(1, hidden_layer_size)
self.linear = nn.Linear(hidden_layer_size, 1)
def forward(self, input, future = 0):
outputs = []
h_t = torch.zeros(input.size(0), self.hidden_layer_size, dtype=torch.double)
c_t = torch.zeros(input.size(0), self.hidden_layer_size, dtype=torch.double)
for i, input_t in enumerate(input.chunk(input.size(1), dim=1)):
h_t, c_t = self.lstm(input_t, (h_t, c_t))
output = self.linear(h_t)
outputs += [output]
outputs = torch.stack(outputs, 1).squeeze(2)
return outputs
# - Define model parameters
model = LSTMNet().double()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# - Train the model
# +
train_losses = []
test_losses = []
epochs = 1500
for i in range(epochs):
optimizer.zero_grad()
out = model(X_train)
loss = criterion(out, y_train)
if i%100 == 0:
print('Train loss', loss.item())
loss.backward()
optimizer.step()
train_losses.append(loss.item())
with torch.no_grad():
pred = model(X_test)
loss = criterion(pred, y_test)
if i%100 == 0:
print('Test loss:', loss.item())
print()
test_losses.append(loss.item())
# -
train_losses
plt.rcParams['figure.figsize'] = [10, 5]
plt.plot(np.arange(len(train_losses)), train_losses)
plt.plot(np.arange(len(test_losses)), test_losses)
# - Plot some test predictions
# +
# Define a figure with 10 plots.
fig = plt.figure(figsize=(25,10))
columns = 5
rows = 3
samples = np.random.randint(0, 200, 15)
for i in range(15):
# Display the randomly selected image in a subplot
fig.add_subplot(rows, columns, i+1)
plt.axis('off')
with torch.no_grad():
pred = model(X_test[samples[i]].view(1,-1))
plt.plot(np.arange(10), pred[0])
plt.plot(np.arange(10), y_test[samples[i]])
# -
# We observe that the cosine prediction is very successful. The only point that is not predicted properly is the first, because the model has not enough data at this time.
| Lab2/prepare_lab/prepare_lab2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # VAE on MNIST dataset
# In this homework, we play with VAE model on the MNIST dataset. The materials are from https://github.com/pytorch/examples/blob/master/vae/main.py.
#
# - We use python 3.0
# - With more explanations
# +
# load the modules
## load pytorch module
import torch
## load torch data manager: Dataset, DataLoader
from torch.utils.data import Dataset, DataLoader
## load neural network module
from torch import nn
from torch.nn import functional as F
## load optimizer
from torch import optim
## load modules for MNIST dataset
from torchvision import datasets, transforms
from torchvision.utils import save_image
## load other python modules
### plt to view the image
import matplotlib.pyplot as plt
# -
# ### load MNIST dataset
# +
## download MNIST trainign dataset
train_dataset: Dataset = datasets.MNIST("./data", train = True, download = True,
transform = transforms.ToTensor())
## class Dataset provides:
## - function "__len__": to get the size of the dataset
## - function "__getitem__": to get the data point by the index
## show the data size
print(train_dataset)
## show one data point
## each data point has two element: one is the image, one is which number it is.
## print the tensor size: (1, 28, 28):
## the first dim: 1 is the number of channel (only one) here
## the second and third dim: the width and height for this channel.
print(train_dataset[0][0].shape)
## print the number
print(train_dataset[0][1])
# -
## let's view this image
plt.imshow(train_dataset[0][0][0])
# ### Use Dataloader to wrap up the dataset
#
# During the learning process, we will use stochasitic optmization, which means we will shuffle the sample, and put a small part of the dataset into the optimizer. You can do this by yourself, but pytorch actually provides the data manager named Dataloader to simplify this process, and it is usually used together with the Dataset class.
## you can choose whatever batch_size you want.
## shuffle = True, means every epoch, we will shuffle the samplers
## by the sampling algorithm you or the default one.
train_loader = DataLoader(train_dataset, batch_size = 32, shuffle = True)
## we don't need to shuffle the samples during test.
test_loader = DataLoader(test_dataset, batch_size = 32, shuffle = False)
# ## VAE
# +
## All the neural networks are implemented as the class of nn.Module in pytorch.
## It need to provide the function named "forward" to declare how it transform a tensor.
class VAE(nn.Module):
## __init__ is the basic syntax in python. when you want to describe a class, you need this
## to tell python how to initialize this class.
def __init__(self):
## all the modules need this firstly to initialize itself.
super().__init__()
## then we define several neural network structure
## pytorch provides lots of neural network structures under the nn module. you can check it yourself.
## here we just use the simpliest linear transformation
## [QUESTION]: why we use the numebr 784 here?
## this linear transformation is a typical one-layer fully connected neural network
## with input dim 784, and output dim 400.
## [QUESTION]: how many parameters we use for the five fully connected neural networks?
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 784)
## now let's move forward to implement the VAE, a typical VAE involves
## - an encoder: define q(z|x), i.e., the mean and variance of the Gaussian distribution
## - a decoder: defile p(x|z)
## - the implementation of reparameterization
def encode(self, x):
## generate the mean and log of the variance of q(z|x)
## we assume that z_i and z_j are independent for any z_i, z_j \in z and i != j
h1 = F.relu(self.fc1(x))
## [QUESTION]: what's dimention or size of the two outputs?
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5*logvar)
## we sample the eps from a standard normal distribution,
## whose shape or dimention is the same as std
## [QUESTION]: do you know how many samples we use to approxmate the derivates of ELBO
## base on the expression below?
eps = torch.randn_like(std)
## [QUESTION]: why we need to do the reparameterization?
return mu + eps*std
def decode(self, z):
h3 = F.relu(self.fc3(z))
## [QUESTION]: do you know what's the p(x/z) is used here?
return torch.sigmoid(self.fc4(h3))
def forward(self, x):
## x is a typical mini-batch samples.
## view function is used to re-define the dimension of x in order to output the
mu, logvar = self.encode(x.view(-1, 784))
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
model = VAE()
print(model)
# -
# ## Define the loss function
# +
## Now let's define the loss function of VAE.
## There are two parts:
## - the loss of reconstructio, i.e., - p(x|z)
## - the KL divergence towards the prior, i.e., KL(q(z|x) || p(z))
## binary cross entropy loss
reconst_loss = nn.BCELoss(reduction = 'sum')
def loss_function(recon_x, x, mu, logvar):
rcloss = reconst_loss(recon_x, x.view(-1, 784))
## analytic formulation of KL divergence of Gaussian distribution.
kld = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return rcloss + kld
# -
# ## Select the optimizer
## lots of the optimizers you can select in torch.
## nowadays, people use Adam a lot.
## An interesting thing: the first author of Adam is the same first author of VAE.
optimizer = optim.Adam(model.parameters(), lr = 1e-3)
# ## Let's start to train VAE!
# +
## Epoch: once we run through all the samples in the training dataset, we finish one epoch.
def train(epoch):
## [IMPORTANT]: in pytorch, model will update the parameters when it's in training state.
model.train()
train_loss = 0
## we ignore the labels of MNIST, and only use the images.
for batch_id, (x, _) in enumerate(train_loader):
## [IMPORTANT]: each differentiable tensor in pytorch will recognize the previous gradient.
## so we have to set them as zeros before we compute next time.
optimizer.zero_grad()
recon_batch, mu, logvar = model(x)
loss = loss_function(recon_batch, x, mu, logvar)
## we do the back propagation on the tensor generated by the loss function.
loss.backward()
## then update the parameters by the optmizer.
optimizer.step()
## let's remember the loss
train_loss += loss.item()
## print the loss
if batch_id % 500 == 0:
print(f"Train Epoch {epoch} [{batch_id}]: loss {loss.item() / len(x)}")
print(f"Epoch: {epoch} average loss: {train_loss / len(train_loader.dataset)}")
# -
for epoch in range(10):
train(epoch)
# ## Let's view the results
# +
## no grad means the computations below will not be used for optimization.
# %matplotlib inline
with torch.no_grad():
for _ in range(10):
sample = torch.randn(1, 20)
rdm_image = model.decode(sample).view(28, 28)
plt.imshow(rdm_image)
## [QUESTION*]: can you show the generated images given a specific number?
| homework/vae_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import burdock
import sklearn.datasets
import pandas as pd
from burdock.sql import (PandasReader,
execute_private_query)
from burdock.metadata.collection import CollectionMetadata, Table, Float
sklearn_dataset = sklearn.datasets.load_iris()
sklearn_df = pd.DataFrame(data=sklearn_dataset.data,
columns=sklearn_dataset.feature_names)
iris = Table("dbo", "iris", 150, [
Float("sepal length (cm)", 4, 8),
Float("sepal width (cm)", 2, 5),
Float("petal length (cm)", 1, 7),
Float("petal width (cm)", 0, 3)
])
schema = Collection([iris], "csv")
reader = PandasReader(schema, sklearn_df)
rowset = execute_private_query(reader, schema, 0.3,
'SELECT AVG("petal width (cm)") FROM dbo.iris')
pd.DataFrame(rowset[1:], columns=rowset[0])
| samples/DP_SQL_sklearn_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/oniolalekan/CDL-Mobile-/blob/master/SpellChecker.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="FokfgGtew0sK" colab_type="text"
# # Creating a Spell Checker
# + [markdown] id="b93sl1LQw0sN" colab_type="text"
# The objective of this project is to build a model that can take a sentence with spelling mistakes as input, and output the same sentence, but with the mistakes corrected. The data that we will use for this project will be twenty popular books from [Project Gutenberg](http://www.gutenberg.org/ebooks/search/?sort_order=downloads). Our model is designed using grid search to find the optimal architecture, and hyperparameter values. The best results, as measured by sequence loss with 15% of our data, were created using a two-layered network with a bi-direction RNN in the encoding layer and Bahdanau Attention in the decoding layer. [FloydHub's](https://www.floydhub.com/) GPU service was used to train the model.
#
# The sections of the project are:
# - Loading the Data
# - Preparing the Data
# - Building the Model
# - Training the Model
# - Fixing Custom Sentences
# - Summary
# + id="u-G1w5GOw0sP" colab_type="code" colab={}
import pandas as pd
import numpy as np
import tensorflow as tf
import os
from os import listdir
from os.path import isfile, join
from collections import namedtuple
from tensorflow.python.layers.core import Dense
from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors
import time
import re
from sklearn.model_selection import train_test_split
# + [markdown] id="ouEtx5Daw0sV" colab_type="text"
# ## Loading the Data
# + id="RxDhz66PxVE9" colab_type="code" outputId="159dc3af-cb24-4097-c430-edee97429d94" colab={"base_uri": "https://localhost:8080/", "height": 34}
from google.colab import drive
drive.mount('/content/gdrive')
# + id="5k6jtljmw0sW" colab_type="code" colab={}
def load_book(path):
"""Load a book from its file"""
input_file = os.path.join(path)
with open(input_file) as f:
book = f.read()
return book
# + id="E6t-MvH4w0sZ" colab_type="code" colab={}
# Collect all of the book file names
path = '/content/gdrive/My Drive/books/'
book_files = [f for f in listdir(path) if isfile(join(path, f))]
book_files = book_files[0:]
# + id="a0OIjs3gw0sc" colab_type="code" colab={}
# Load the books using the file names
books = []
for book in book_files:
books.append(load_book(path+book))
# + id="CIru3BS2w0sf" colab_type="code" outputId="4c8765d5-1bef-4b4c-e910-68d620865384" colab={"base_uri": "https://localhost:8080/", "height": 51}
# Compare the number of words in each book
for i in range(len(books)):
print("There are {} words in {}.".format(len(books[i].split()), book_files[i]))
# + id="UMo6Lm7bw0sk" colab_type="code" outputId="96d325d6-2fab-4489-ab89-cc4f7ad47020" colab={"base_uri": "https://localhost:8080/", "height": 54}
# Check to ensure the text looks alright
books[0][:500]
# + [markdown] id="pQUtDWbjw0sm" colab_type="text"
# ## Preparing the Data
# + id="sp8rV8NVw0sn" colab_type="code" colab={}
def clean_text(text):
'''Remove unwanted characters and extra spaces from the text'''
text = re.sub(r'\n', ' ', text)
text = re.sub(r'[{}@_*>()\\#%+=\[\]]','', text)
text = re.sub('a0','', text)
text = re.sub('\'92t','\'t', text)
text = re.sub('\'92s','\'s', text)
text = re.sub('\'92m','\'m', text)
text = re.sub('\'92ll','\'ll', text)
text = re.sub('\'91','', text)
text = re.sub('\'92','', text)
text = re.sub('\'93','', text)
text = re.sub('\'94','', text)
text = re.sub('\.','. ', text)
text = re.sub('\!','! ', text)
text = re.sub('\?','? ', text)
text = re.sub(' +',' ', text)
text = re.sub('1','', text)
text = re.sub('0','', text)
text = re.sub('2','', text)
text = re.sub('3','', text)
text = re.sub('4','', text)
text = re.sub('5','', text)
text = re.sub('6','', text)
text = re.sub('7','', text)
text = re.sub('8','', text)
text = re.sub('9','', text)
text = re.sub('\t','', text)
text = re.sub('!','', text)
text = re.sub('A','a', text)
text = re.sub('B','b', text)
text = re.sub('C','c', text)
text = re.sub('D','d', text)
text = re.sub('E','e', text)
text = re.sub('F','f', text)
text = re.sub('G','g', text)
text = re.sub('H','h', text)
text = re.sub('I','i', text)
text = re.sub('J','j', text)
text = re.sub('K','k', text)
text = re.sub('L','l', text)
text = re.sub('M','m', text)
text = re.sub('N','n', text)
text = re.sub('O','o', text)
text = re.sub('P','p', text)
text = re.sub('R','r', text)
text = re.sub('S','s', text)
text = re.sub('T','t', text)
text = re.sub('U','u', text)
text = re.sub('V','v', text)
text = re.sub('W','w', text)
text = re.sub('Y','y', text)
text = re.sub(' +',' ', text)
text = re.sub('À','à', text)
text = re.sub('Á','á', text)
text = re.sub('È','è', text)
text = re.sub('É','é', text)
text = re.sub('Á','á', text)
text = re.sub('Ì','ì', text)
text = re.sub('Í','í', text)
text = re.sub('Ò','ò', text)
text = re.sub('Ó','ó', text)
text = re.sub('Ù','ù', text)
text = re.sub('Ú','ú', text)
text = re.sub('Ó','ó', text)
text = re.sub('ô','o', text)
text = re.sub('ń','n', text)
text = re.sub('Ń','n', text)
text = re.sub('Ǹ','n', text)
text = re.sub('ǹ','n', text)
text = re.sub('ʒ','', text)
text = re.sub('—','', text)
text = re.sub('Ẹ','ẹ', text)
text = re.sub('-','', text)
text = re.sub('"','', text)
text = re.sub("'",'', text)
text = re.sub('א','', text)
text = re.sub('/','', text)
text = re.sub('\xa0','', text)
text = re.sub('\xad','', text)
text = re.sub('´','', text)
text = re.sub('’','', text)
text = re.sub('’','', text)
text = re.sub('“','', text)
text = re.sub('”','', text)
text = re.sub('…','', text)
text = re.sub('\u2060','', text)
text = re.sub('∫','', text)
text = re.sub('\uf08d','', text)
text = re.sub('\ufeff','', text)
return text
# + id="JCkX8kV9w0sp" colab_type="code" colab={}
# Clean the text of the books
clean_books = []
for book in books:
clean_books.append(clean_text(book))
# + id="5h8SqjIfw0sr" colab_type="code" outputId="10b1f600-dddc-46e3-f133-7a265c8fcd6a" colab={"base_uri": "https://localhost:8080/", "height": 54}
# Check to ensure the text has been cleaned properly
clean_books[0][:500]
# + id="vUFx2CYRw0su" colab_type="code" colab={}
# Create a dictionary to convert the vocabulary (characters) to integers
vocab_to_int = {}
count = 0
for book in clean_books:
for character in book:
if character not in vocab_to_int:
vocab_to_int[character] = count
count += 1
# Add special tokens to vocab_to_int
codes = ['<PAD>','<EOS>','<GO>']
for code in codes:
vocab_to_int[code] = count
count += 1
# + id="08VtqyAbw0sx" colab_type="code" outputId="5d00e02e-4140-4a3f-926c-c6c97c319a32" colab={"base_uri": "https://localhost:8080/", "height": 71}
# Check the size of vocabulary and all of the values
vocab_size = len(vocab_to_int)
print("The vocabulary contains {} characters.".format(vocab_size))
print(sorted(vocab_to_int))
# + [markdown] id="_hBSdBeuw0sz" colab_type="text"
# *Note: We could have made this project a little easier by using only lower case words and fewer special characters ($,&,-...), but I want to make this spell checker as useful as possible.*
# + id="48O2rITtw0s0" colab_type="code" colab={}
# Create another dictionary to convert integers to their respective characters
int_to_vocab = {}
for character, value in vocab_to_int.items():
int_to_vocab[value] = character
# + id="XzLi7uZtw0s2" colab_type="code" outputId="cb3cfcd2-bdc8-4478-83f7-d42e4682de1b" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Split the text from the books into sentences.
sentences = []
for book in clean_books:
for sentence in book.split('. '):
sentences.append(sentence + '.')
print("There are {} sentences.".format(len(sentences)))
# + id="nVu0FA57w0s4" colab_type="code" outputId="75f507a1-380a-4aeb-b03b-fa9f5b219a5b" colab={"base_uri": "https://localhost:8080/", "height": 122}
# Check to ensure the text has been split correctly.
sentences[:5]
# + [markdown] id="VFvMy17dw0s7" colab_type="text"
# *Note: I expect that you have noticed the very ugly text in the first sentence. We do not need to worry about removing it from any of the books because will be limiting our data to sentences that are shorter than it.*
# + id="1X-U0G-uw0s8" colab_type="code" colab={}
# Convert sentences to integers
int_sentences = []
for sentence in sentences:
int_sentence = []
for character in sentence:
int_sentence.append(vocab_to_int[character])
int_sentences.append(int_sentence)
# + id="n8LojWpKw0s-" colab_type="code" colab={}
# Find the length of each sentence
lengths = []
for sentence in int_sentences:
lengths.append(len(sentence))
lengths = pd.DataFrame(lengths, columns=["counts"])
# + id="2kzPC-C-w0tB" colab_type="code" outputId="be88c400-eb64-43b1-c40c-d885a6210d21" colab={"base_uri": "https://localhost:8080/", "height": 297}
lengths.describe()
# + id="BPyCU7DNw0tE" colab_type="code" outputId="df41f9e7-b745-4f2a-e027-cea536f9a614" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Limit the data we will use to train our model
max_length = 92
min_length = 10
good_sentences = []
for sentence in int_sentences:
if len(sentence) <= max_length and len(sentence) >= min_length:
good_sentences.append(sentence)
print("We will use {} to train and test our model.".format(len(good_sentences)))
# + [markdown] id="yS0WlQ9mw0tH" colab_type="text"
# *Note: I decided to not use very long or short sentences because they are not as useful for training our model. Shorter sentences are less likely to include an error and the text is more likely to be repetitive. Longer sentences are more difficult to learn due to their length and increase the training time quite a bit. If you are interested in using this model for more than just a personal project, it would be worth using these longer sentence, and much more training data to create a more accurate model.*
# + id="F9pk7MgIw0tJ" colab_type="code" outputId="6be85d12-27e1-4768-afdf-f59530addf36" colab={"base_uri": "https://localhost:8080/", "height": 51}
# Split the data into training and testing sentences
training, testing = train_test_split(good_sentences, test_size = 0.15, random_state = 2)
print("Number of training sentences:", len(training))
print("Number of testing sentences:", len(testing))
# + id="Fn7cH8ohw0tN" colab_type="code" colab={}
# Sort the sentences by length to reduce padding, which will allow the model to train faster
training_sorted = []
testing_sorted = []
for i in range(min_length, max_length+1):
for sentence in training:
if len(sentence) == i:
training_sorted.append(sentence)
for sentence in testing:
if len(sentence) == i:
testing_sorted.append(sentence)
# + id="MGOLVhjGw0tP" colab_type="code" outputId="f5243697-67f5-4981-d92e-da7110e9b891" colab={"base_uri": "https://localhost:8080/", "height": 102}
# Check to ensure the sentences have been selected and sorted correctly
for i in range(5):
print(training_sorted[i], len(training_sorted[i]))
# + id="ZL75Fia2w0tS" colab_type="code" colab={}
letters = ['a','b','c','d','e','f','g','h','i','j','k','l','m',
'n','o','p','r','s','t','u','v','w','x','y',]
def noise_maker(sentence, threshold):
'''Relocate, remove, or add characters to create spelling mistakes'''
noisy_sentence = []
i = 0
while i < len(sentence):
random = np.random.uniform(0,1,1)
# Most characters will be correct since the threshold value is high
if random < threshold:
noisy_sentence.append(sentence[i])
else:
new_random = np.random.uniform(0,1,1)
# ~33% chance characters will swap locations
if new_random > 0.67:
if i == (len(sentence) - 1):
# If last character in sentence, it will not be typed
continue
else:
# if any other character, swap order with following character
noisy_sentence.append(sentence[i+1])
noisy_sentence.append(sentence[i])
i += 1
# ~33% chance an extra lower case letter will be added to the sentence
elif new_random < 0.33:
random_letter = np.random.choice(letters, 1)[0]
noisy_sentence.append(vocab_to_int[random_letter])
noisy_sentence.append(sentence[i])
# ~33% chance a character will not be typed
else:
pass
i += 1
return noisy_sentence
# + [markdown] id="iK1x_i4Qw0tU" colab_type="text"
# *Note: The noise_maker function is used to create spelling mistakes that are similar to those we would make. Sometimes we forget to type a letter, type a letter in the wrong location, or add an extra letter.*
# + id="7lJiKF_8w0tV" colab_type="code" outputId="c5528c73-83a1-4064-eddf-98ec0e5f2da0" colab={"base_uri": "https://localhost:8080/", "height": 272}
# Check to ensure noise_maker is making mistakes correctly.
threshold = 0.9
for sentence in training_sorted[:5]:
print(sentence)
print(noise_maker(sentence, threshold))
print()
# + [markdown] id="htwbHNF9w0tY" colab_type="text"
# # Building the Model
# + id="kt6Yhwohw0tY" colab_type="code" colab={}
def model_inputs():
'''Create palceholders for inputs to the model'''
with tf.name_scope('inputs'):
inputs = tf.placeholder(tf.int32, [None, None], name='inputs')
with tf.name_scope('targets'):
targets = tf.placeholder(tf.int32, [None, None], name='targets')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
inputs_length = tf.placeholder(tf.int32, (None,), name='inputs_length')
targets_length = tf.placeholder(tf.int32, (None,), name='targets_length')
max_target_length = tf.reduce_max(targets_length, name='max_target_len')
return inputs, targets, keep_prob, inputs_length, targets_length, max_target_length
# + id="hKcqcrPSw0tb" colab_type="code" colab={}
def process_encoding_input(targets, vocab_to_int, batch_size):
'''Remove the last word id from each batch and concat the <GO> to the begining of each batch'''
with tf.name_scope("process_encoding"):
ending = tf.strided_slice(targets, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], vocab_to_int['<GO>']), ending], 1)
return dec_input
# + id="ZhoIQ6gow0td" colab_type="code" colab={}
def encoding_layer(rnn_size, sequence_length, num_layers, rnn_inputs, keep_prob, direction):
'''Create the encoding layer'''
if direction == 1:
with tf.name_scope("RNN_Encoder_Cell_1D"):
for layer in range(num_layers):
with tf.variable_scope('encoder_{}'.format(layer)):
lstm = tf.contrib.rnn.LSTMCell(rnn_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm,
input_keep_prob = keep_prob)
enc_output, enc_state = tf.nn.dynamic_rnn(drop,
rnn_inputs,
sequence_length,
dtype=tf.float32)
return enc_output, enc_state
if direction == 2:
with tf.name_scope("RNN_Encoder_Cell_2D"):
for layer in range(num_layers):
with tf.variable_scope('encoder_{}'.format(layer)):
cell_fw = tf.contrib.rnn.LSTMCell(rnn_size)
cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw,
input_keep_prob = keep_prob)
cell_bw = tf.contrib.rnn.LSTMCell(rnn_size)
cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw,
input_keep_prob = keep_prob)
enc_output, enc_state = tf.nn.bidirectional_dynamic_rnn(cell_fw,
cell_bw,
rnn_inputs,
sequence_length,
dtype=tf.float32)
# Join outputs since we are using a bidirectional RNN
enc_output = tf.concat(enc_output,2)
# Use only the forward state because the model can't use both states at once
return enc_output, enc_state[0]
# + id="USVJJmQxw0tf" colab_type="code" colab={}
def training_decoding_layer(dec_embed_input, targets_length, dec_cell, initial_state, output_layer,
vocab_size, max_target_length):
'''Create the training logits'''
with tf.name_scope("Training_Decoder"):
training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_input,
sequence_length=targets_length,
time_major=False)
training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,
training_helper,
initial_state,
output_layer)
training_logits, _ ,_ = tf.contrib.seq2seq.dynamic_decode(training_decoder,
output_time_major=False, impute_finished=True,
maximum_iterations=max_target_length)
return training_logits
# + id="7-xMiwyJw0ti" colab_type="code" colab={}
def inference_decoding_layer(embeddings, start_token, end_token, dec_cell, initial_state, output_layer,
max_target_length, batch_size):
'''Create the inference logits'''
with tf.name_scope("Inference_Decoder"):
start_tokens = tf.tile(tf.constant([start_token], dtype=tf.int32), [batch_size], name='start_tokens')
inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(embeddings,
start_tokens,
end_token)
inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,
inference_helper,
initial_state,
output_layer)
inference_logits, _ ,_ = tf.contrib.seq2seq.dynamic_decode(inference_decoder, output_time_major=False,
impute_finished=True, maximum_iterations=max_target_length)
return inference_logits
# + id="6oHGNfatw0tm" colab_type="code" colab={}
def decoding_layer(dec_embed_input, embeddings, enc_output, enc_state, vocab_size, inputs_length, targets_length,
max_target_length, rnn_size, vocab_to_int, keep_prob, batch_size, num_layers, direction):
'''Create the decoding cell and attention for the training and inference decoding layers'''
with tf.name_scope("RNN_Decoder_Cell"):
for layer in range(num_layers):
with tf.variable_scope('decoder_{}'.format(layer)):
lstm = tf.contrib.rnn.LSTMCell(rnn_size)
dec_cell = tf.contrib.rnn.DropoutWrapper(lstm, input_keep_prob = keep_prob)
# dec_cell = tf.contrib.seq2seq.AttentionWrapper(dec_cell, attn_mech, rnn_size)
output_layer = Dense(vocab_size,
kernel_initializer = tf.truncated_normal_initializer(mean = 0.0, stddev=0.1))
attn_mech = tf.contrib.seq2seq.BahdanauAttention(rnn_size,
enc_output,
inputs_length,
normalize=False,
name='BahdanauAttention')
with tf.name_scope("Attention_Wrapper"):
dec_cell = tf.contrib.seq2seq.AttentionWrapper(dec_cell,
attn_mech,
rnn_size)
# initial_state = tf.contrib.seq2seq.DynamicAttentionWrapperState(enc_state,_zero_state_tensors(rnn_size, batch_size, tf.float32))
initial_state = dec_cell.zero_state(batch_size=batch_size,dtype=tf.float32).clone(cell_state=enc_state)
with tf.variable_scope("decode"):
training_logits = training_decoding_layer(dec_embed_input,
targets_length,
dec_cell,
initial_state,
output_layer,
vocab_size,
max_target_length)
with tf.variable_scope("decode", reuse=True):
inference_logits = inference_decoding_layer(embeddings,
vocab_to_int['<GO>'],
vocab_to_int['<EOS>'],
dec_cell,
initial_state,
output_layer,
max_target_length,
batch_size)
return training_logits, inference_logits
# + id="LMfUQbcCw0to" colab_type="code" colab={}
def seq2seq_model(inputs, targets, keep_prob, inputs_length, targets_length, max_target_length,
vocab_size, rnn_size, num_layers, vocab_to_int, batch_size, embedding_size, direction):
'''Use the previous functions to create the training and inference logits'''
enc_embeddings = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1, 1))
enc_embed_input = tf.nn.embedding_lookup(enc_embeddings, inputs)
enc_output, enc_state = encoding_layer(rnn_size, inputs_length, num_layers,
enc_embed_input, keep_prob, direction)
dec_embeddings = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1, 1))
dec_input = process_encoding_input(targets, vocab_to_int, batch_size)
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
training_logits, inference_logits = decoding_layer(dec_embed_input,
dec_embeddings,
enc_output,
enc_state,
vocab_size,
inputs_length,
targets_length,
max_target_length,
rnn_size,
vocab_to_int,
keep_prob,
batch_size,
num_layers,
direction)
return training_logits, inference_logits
# + id="ZdgrBxihw0tq" colab_type="code" colab={}
def pad_sentence_batch(sentence_batch):
"""Pad sentences with <PAD> so that each sentence of a batch has the same length"""
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [vocab_to_int['<PAD>']] * (max_sentence - len(sentence)) for sentence in sentence_batch]
# + id="1CA_67ugw0tw" colab_type="code" colab={}
def get_batches(sentences, batch_size, threshold):
"""Batch sentences, noisy sentences, and the lengths of their sentences together.
With each epoch, sentences will receive new mistakes"""
for batch_i in range(0, len(sentences)//batch_size):
start_i = batch_i * batch_size
sentences_batch = sentences[start_i:start_i + batch_size]
sentences_batch_noisy = []
for sentence in sentences_batch:
sentences_batch_noisy.append(noise_maker(sentence, threshold))
sentences_batch_eos = []
for sentence in sentences_batch:
sentence.append(vocab_to_int['<EOS>'])
sentences_batch_eos.append(sentence)
pad_sentences_batch = np.array(pad_sentence_batch(sentences_batch_eos))
pad_sentences_noisy_batch = np.array(pad_sentence_batch(sentences_batch_noisy))
# Need the lengths for the _lengths parameters
pad_sentences_lengths = []
for sentence in pad_sentences_batch:
pad_sentences_lengths.append(len(sentence))
pad_sentences_noisy_lengths = []
for sentence in pad_sentences_noisy_batch:
pad_sentences_noisy_lengths.append(len(sentence))
yield pad_sentences_noisy_batch, pad_sentences_batch, pad_sentences_noisy_lengths, pad_sentences_lengths
# + [markdown] id="9LgCvdgew0ty" colab_type="text"
# *Note: This set of values achieved the best results.*
# + id="P3C0YXc4w0tz" colab_type="code" colab={}
# The default parameters
epochs = 100
batch_size = 128
num_layers = 2
rnn_size = 512
embedding_size = 128
learning_rate = 0.0005
direction = 2
threshold = 0.95
keep_probability = 0.75
# + id="MxHfmWmOw0t1" colab_type="code" colab={}
def build_graph(keep_prob, rnn_size, num_layers, batch_size, learning_rate, embedding_size, direction):
tf.reset_default_graph()
# Load the model inputs
inputs, targets, keep_prob, inputs_length, targets_length, max_target_length = model_inputs()
# Create the training and inference logits
training_logits, inference_logits = seq2seq_model(tf.reverse(inputs, [-1]),
targets,
keep_prob,
inputs_length,
targets_length,
max_target_length,
len(vocab_to_int)+1,
rnn_size,
num_layers,
vocab_to_int,
batch_size,
embedding_size,
direction)
# Create tensors for the training logits and inference logits
training_logits = tf.identity(training_logits.rnn_output, 'logits')
with tf.name_scope('predictions'):
predictions = tf.identity(inference_logits.sample_id, name='predictions')
tf.summary.histogram('predictions', predictions)
# Create the weights for sequence_loss
masks = tf.sequence_mask(targets_length, max_target_length, dtype=tf.float32, name='masks')
with tf.name_scope("cost"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(training_logits,
targets,
masks)
tf.summary.scalar('cost', cost)
with tf.name_scope("optimze"):
optimizer = tf.train.AdamOptimizer(learning_rate)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
# Merge all of the summaries
merged = tf.summary.merge_all()
# Export the nodes
export_nodes = ['inputs', 'targets', 'keep_prob', 'cost', 'inputs_length', 'targets_length',
'predictions', 'merged', 'train_op','optimizer']
Graph = namedtuple('Graph', export_nodes)
local_dict = locals()
graph = Graph(*[local_dict[each] for each in export_nodes])
return graph
# + [markdown] id="US36orVHw0t3" colab_type="text"
# ## Training the Model
# + id="97GWB7yXw0t4" colab_type="code" colab={}
def train(model, epochs, log_string):
'''Train the RNN'''
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Used to determine when to stop the training early
testing_loss_summary = []
# Keep track of which batch iteration is being trained
iteration = 0
display_step = 30 # The progress of the training will be displayed after every 30 batches
stop_early = 0
stop = 5 # If the batch_loss_testing does not decrease in 3 consecutive checks, stop training
per_epoch = 3 # Test the model 3 times per epoch
testing_check = (len(training_sorted)//batch_size//per_epoch)-1
print()
print("Training Model: {}".format(log_string))
train_writer = tf.summary.FileWriter('./logs/1/train/{}'.format(log_string), sess.graph)
test_writer = tf.summary.FileWriter('./logs/1/test/{}'.format(log_string))
for epoch_i in range(1, epochs+1):
batch_loss = 0
batch_time = 0
for batch_i, (input_batch, target_batch, input_length, target_length) in enumerate(
get_batches(training_sorted, batch_size, threshold)):
start_time = time.time()
summary, loss, _ = sess.run([model.merged,
model.cost,
model.train_op],
{model.inputs: input_batch,
model.targets: target_batch,
model.inputs_length: input_length,
model.targets_length: target_length,
model.keep_prob: keep_probability})
batch_loss += loss
end_time = time.time()
batch_time += end_time - start_time
# Record the progress of training
train_writer.add_summary(summary, iteration)
iteration += 1
if batch_i % display_step == 0 and batch_i > 0:
print('Epoch {:>3}/{} Batch {:>4}/{} - Loss: {:>6.3f}, Seconds: {:>4.2f}'
.format(epoch_i,
epochs,
batch_i,
len(training_sorted) // batch_size,
batch_loss / display_step,
batch_time))
batch_loss = 0
batch_time = 0
#### Testing ####
if batch_i % testing_check == 0 and batch_i > 0:
batch_loss_testing = 0
batch_time_testing = 0
for batch_i, (input_batch, target_batch, input_length, target_length) in enumerate(
get_batches(testing_sorted, batch_size, threshold)):
start_time_testing = time.time()
summary, loss = sess.run([model.merged,
model.cost],
{model.inputs: input_batch,
model.targets: target_batch,
model.inputs_length: input_length,
model.targets_length: target_length,
model.keep_prob: 1})
batch_loss_testing += loss
end_time_testing = time.time()
batch_time_testing += end_time_testing - start_time_testing
# Record the progress of testing
test_writer.add_summary(summary, iteration)
n_batches_testing = batch_i + 1
print('Testing Loss: {:>6.3f}, Seconds: {:>4.2f}'
.format(batch_loss_testing / n_batches_testing,
batch_time_testing))
batch_time_testing = 0
# If the batch_loss_testing is at a new minimum, save the model
testing_loss_summary.append(batch_loss_testing)
if batch_loss_testing <= min(testing_loss_summary):
print('New Record!')
stop_early = 0
checkpoint = "./{}.ckpt".format(log_string)
saver = tf.train.Saver()
saver.save(sess, checkpoint)
else:
print("No Improvement.")
stop_early += 1
if stop_early == stop:
break
if stop_early == stop:
print("Stopping Training.")
break
# + id="bNZirZ93w0t6" colab_type="code" outputId="5df9d056-f301-43f6-beea-fa0b6c773abb" colab={"base_uri": "https://localhost:8080/", "height": 513}
# Train the model with the desired tuning parameters
for keep_probability in [0.75]:
for num_layers in [2]:
for threshold in [0.95]:
log_string = 'kp={},nl={},th={}'.format(keep_probability,
num_layers,
threshold)
model = build_graph(keep_probability, rnn_size, num_layers, batch_size,
learning_rate, embedding_size, direction)
train(model, epochs, log_string)
# + [markdown] id="NUiGRAhrw0t9" colab_type="text"
# ## Fixing Custom Sentences
# + id="X_Z8pek9w0t-" colab_type="code" colab={}
def text_to_ints(text):
'''Prepare the text for the model'''
text = clean_text(text)
return [vocab_to_int[word] for word in text]
# + id="wb0vIxLBw0uB" colab_type="code" outputId="40a6e733-e4b0-4564-be32-33941efbdca6" colab={"base_uri": "https://localhost:8080/", "height": 190}
# Create your own sentence or use one from the dataset
text = "Spellin is difficult, whch is wyh you need to study everyday."
text = text_to_ints(text)
#random = np.random.randint(0,len(testing_sorted))
#text = testing_sorted[random]
#text = noise_maker(text, 0.95)
checkpoint = "./kp=0.75,nl=2,th=0.95.ckpt"
model = build_graph(keep_probability, rnn_size, num_layers, batch_size, learning_rate, embedding_size, direction)
with tf.Session() as sess:
# Load saved model
saver = tf.train.Saver()
saver.restore(sess, checkpoint)
#Multiply by batch_size to match the model's input parameters
answer_logits = sess.run(model.predictions, {model.inputs: [text]*batch_size,
model.inputs_length: [len(text)]*batch_size,
model.targets_length: [len(text)+1],
model.keep_prob: [1.0]})[0]
# Remove the padding from the generated sentence
pad = vocab_to_int["<PAD>"]
print('\nText')
print(' Word Ids: {}'.format([i for i in text]))
print(' Input Words: {}'.format("".join([int_to_vocab[i] for i in text])))
print('\nSummary')
print(' Word Ids: {}'.format([i for i in answer_logits if i != pad]))
print(' Response Words: {}'.format("".join([int_to_vocab[i] for i in answer_logits if i != pad])))
# + [markdown] id="scz5YuOxw0uG" colab_type="text"
# Examples of corrected sentences:
# - Spellin is difficult, whch is wyh you need to study everyday.
# - Spelling is difficult, which is why you need to study everyday.
#
#
# - The first days of her existence in th country were vrey hard for Dolly.
# - The first days of her existence in the country were very hard for Dolly.
#
#
# - Thi is really something impressiv thaat we should look into right away!
# - This is really something impressive that we should look into right away!
# + [markdown] id="iUbV_Kclw0uI" colab_type="text"
# ## Summary
# + [markdown] id="I4zLA9qBw0uJ" colab_type="text"
# I hope that you have found this project to be rather interesting and useful. The example sentences that I have presented above were specifically chosen, and the model will not always be able to make corrections of this quality. Given the amount of data that we are working with, this model still struggles. For it to be more useful, it would require far more training data, and additional parameter tuning. This parameter values that I have above worked best for me, but I expect there are even better values that I was not able to find.
#
# Thanks for reading!
| SpellChecker.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
# Setting the random seed, feel free to change it and see different solutions.
np.random.seed(42)
# +
def stepFunction(t):
if t >= 0:
return 1
return 0
def prediction(X, W, b):
return stepFunction((np.matmul(X,W)+b)[0])
# TODO: Fill in the code below to implement the perceptron trick.
# The function should receive as inputs the data X, the labels y,
# the weights W (as an array), and the bias b,
# update the weights and bias W, b, according to the perceptron algorithm,
# and return W and b.
def perceptronStep(X, y, W, b, learn_rate = 0.01):
# Fill in code
for i in range(len(X)):
y_hat = prediction(X[i],W,b)
if y[i]-y_hat == 1:
W[0] += X[i][0]*learn_rate
W[1] += X[i][1]*learn_rate
b += learn_rate
elif y[i]-y_hat == -1:
W[0] -= X[i][0]*learn_rate
W[1] -= X[i][1]*learn_rate
b -= learn_rate
return W, b
# This function runs the perceptron algorithm repeatedly on the dataset,
# and returns a few of the boundary lines obtained in the iterations,
# for plotting purposes.
# Feel free to play with the learning rate and the num_epochs,
# and see your results plotted below.
def trainPerceptronAlgorithm(X, y, learn_rate = 0.01, num_epochs = 25):
x_min, x_max = min(X.T[0]), max(X.T[0])
y_min, y_max = min(X.T[1]), max(X.T[1])
W = np.array(np.random.rand(2,1))
b = np.random.rand(1)[0] + x_max
# These are the solution lines that get plotted below.
boundary_lines = []
for i in range(num_epochs):
# In each epoch, we apply the perceptron step.
W, b = perceptronStep(X, y, W, b, learn_rate)
boundary_lines.append((-W[0]/W[1], -b/W[1]))
print("Boundary Lines.")
return boundary_lines
# -
X = np.array([[ 0.78051,-0.063669 ],
[ 0.28774, 0.29139 ],
[ 0.40714, 0.17878 ],
[ 0.2923, 0.4217],
[ 0.50922, 0.35256 ],
[ 0.27785, 0.10802 ],
[ 0.27527, 0.33223 ],
[ 0.43999, 0.31245 ],
[ 0.33557, 0.42984 ],
[ 0.23448, 0.24986 ],
[ 0.0084492, 0.13658 ],
[ 0.12419, 0.33595 ],
[ 0.25644, 0.42624 ],
[ 0.4591, 0.40426 ],
[ 0.44547, 0.45117 ],
[ 0.42218, 0.20118 ],
[ 0.49563, 0.21445 ],
[ 0.30848, 0.24306 ],
[ 0.39707, 0.44438 ],
[ 0.32945, 0.39217 ],
[ 0.40739, 0.40271 ],
[ 0.3106, 0.50702 ],
[ 0.49638, 0.45384 ],
[ 0.10073, 0.32053 ],
[ 0.69907, 0.37307 ],
[ 0.29767, 0.69648 ],
[ 0.15099, 0.57341 ],
[ 0.16427, 0.27759 ],
[ 0.33259, 0.055964 ],
[ 0.53741, 0.28637 ],
[ 0.19503, 0.36879 ],
[ 0.40278, 0.035148 ],
[ 0.21296, 0.55169 ],
[ 0.48447, 0.56991 ],
[ 0.25476, 0.34596 ],
[ 0.21726, 0.28641 ],
[ 0.67078, 0.46538 ],
[ 0.3815, 0.4622],
[ 0.53838, 0.32774 ],
[ 0.4849, 0.26071 ],
[ 0.37095, 0.38809 ],
[ 0.54527, 0.63911 ],
[ 0.32149, 0.12007 ],
[ 0.42216, 0.61666 ],
[ 0.10194, 0.060408 ],
[ 0.15254, 0.2168],
[ 0.45558, 0.43769 ],
[ 0.28488, 0.52142 ],
[ 0.27633, 0.21264 ],
[ 0.39748, 0.31902 ],
[ 0.5533, 1. ],
[ 0.44274, 0.59205 ],
[ 0.85176, 0.6612],
[ 0.60436, 0.86605 ],
[ 0.68243, 0.48301 ],
[ 1., 0.76815 ],
[ 0.72989, 0.8107],
[ 0.67377, 0.77975 ],
[ 0.78761, 0.58177 ],
[ 0.71442, 0.7668],
[ 0.49379, 0.54226 ],
[ 0.78974, 0.74233 ],
[ 0.67905, 0.60921 ],
[ 0.6642, 0.72519 ],
[ 0.79396, 0.56789 ],
[ 0.70758, 0.76022 ],
[ 0.59421, 0.61857 ],
[ 0.49364, 0.56224 ],
[ 0.77707, 0.35025 ],
[ 0.79785, 0.76921 ],
[ 0.70876, 0.96764 ],
[ 0.69176, 0.60865 ],
[ 0.66408, 0.92075 ],
[ 0.65973, 0.66666 ],
[ 0.64574, 0.56845 ],
[ 0.89639, 0.7085],
[ 0.85476, 0.63167 ],
[ 0.62091, 0.80424 ],
[ 0.79057, 0.56108 ],
[ 0.58935, 0.71582 ],
[ 0.56846, 0.7406],
[ 0.65912, 0.71548 ],
[ 0.70938, 0.74041 ],
[ 0.59154, 0.62927 ],
[ 0.45829, 0.4641],
[ 0.79982, 0.74847 ],
[ 0.60974, 0.54757 ],
[ 0.68127, 0.86985 ],
[ 0.76694, 0.64736 ],
[ 0.69048, 0.83058 ],
[ 0.68122, 0.96541 ],
[ 0.73229, 0.64245 ],
[ 0.76145, 0.60138 ],
[ 0.58985, 0.86955 ],
[ 0.73145, 0.74516 ],
[ 0.77029, 0.7014],
[ 0.73156, 0.71782 ],
[ 0.44556, 0.57991 ],
[ 0.85275, 0.85987 ],
[ 0.51912, 0.62359 ]])
y = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.
, 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.
, 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.
, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.
, 0., 0., 0., 0.])
trainPerceptronAlgorithm(X, y, learn_rate = 0.01, num_epochs = 25)
# # Note: The plotting part is pending. The above boundary points make no sense without the plots to display them properly.
| lessons/neural_networks/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Import Modules
# +
import os
import glob
import pandas as pd
import numpy as np
import dabest
import datetime
print("We're using DABEST v{}".format(dabest.__version__))
from scipy import stats
import seaborn as sns
import matplotlib.pyplot as plt
# +
# Get analysis date
now = datetime.datetime.now()
analysis_date = now.strftime("%Y%m%d")
path = os.path.abspath('')+'/CSVs/'
full_df = pd.DataFrame()
list_ = []
# For loop to bring in files and concatenate them into a single dataframe
for file_ in glob.glob(path + "/*.csv"):
df = pd.read_csv(file_)
# Determine Image name from file name
df['Image'] = os.path.splitext(os.path.basename(file_))[0]
# Split values in ROI label
df['Fluor'], df['ROI'] = zip(*df['Label'].map(lambda x: x.split(':')))
# Split values in Image name column
(df['ExptDate'], df['Treatment'], df['Dose'], df['Stains'], df['Embryo'],
df['Somites'], df['Section']) = zip(*df['Image'].map(lambda x: x.split('_')))
# Replace background ROI names
# NOTE: I have updated the Fiji macro ('FluorIntensity_2Channel.ijm') to name all background ROIs as 'background',
# so this step will be unnecessary with freshly collected data
#df.replace(to_replace=['back1a', 'back1b', 'back1c', 'back2a', 'back2b', 'back2c'],
# value=['background', 'background', 'background', 'background', 'background', 'background'],
# inplace=True)
list_.append(df)
full_df = pd.concat(list_)
full_df.head()
# +
# Get a list of treatments
treatment_list = full_df.Treatment.unique()
treatment_list = treatment_list.tolist()
# Mean background values and group by Treatment, Embryo, Fluor, ROI and Section
mean_sections = ((full_df.groupby(['Treatment', 'Embryo', 'Fluor', 'ROI', 'Section', 'ExptDate'])
['Area', 'Mean', 'IntDen']).mean())
# Loop trough treatments, performing each analysis and exporting CSV file for each treatment
for i in treatment_list:
# Slice dataframe to process only embryos with given treatment
treatment = i
df_treatment = pd.DataFrame(mean_sections.xs(treatment))
# Determine CTCF values = ROI IntDen - (background mean * ROI area)
# Calculate background (background mean * ROI area)
background_corr_cntl = (df_treatment.xs('background', level='ROI')['Mean']
* df_treatment.xs('Cntl', level='ROI')['Area'])
background_corr_expt = (df_treatment.xs('background', level='ROI')['Mean']
* df_treatment.xs('Expt', level='ROI')['Area'])
# Slice out only Cntl or Expt values in IntDen
intdens_cntl = df_treatment.xs('Cntl', level='ROI')['IntDen']
intdens_expt = df_treatment.xs('Expt', level='ROI')['IntDen']
# Subtract background from IntDens to determine CTCF and concatenate into single dataframe
sub_cntl = pd.DataFrame(intdens_cntl - background_corr_cntl)
sub_expt = pd.DataFrame(intdens_expt - background_corr_expt)
full_ctcf = pd.concat([sub_cntl, sub_expt], keys = ['Cntl', 'Expt'])
full_ctcf.columns = ['CTCF']
# Pull out BREGFP and H2BRFP values
ctcf_BRE = full_ctcf.xs('BREGFP', level='Fluor')['CTCF']
ctcf_RFP = full_ctcf.xs('H2BRFP', level='Fluor')['CTCF']
# Normalize for electroporation efficiency by determining TCFLef/RFP
electroporation_norm = pd.DataFrame(ctcf_BRE / ctcf_RFP)
electroporation_norm.columns = ['CTCF']
electroporation_norm.index.names = ['Side', 'Embryo', 'Section', 'ExptDate']
# Average sections grouped by embryos before generating Expt/Cntl ratio
averaged_sections = electroporation_norm.groupby(['Side','Embryo', 'ExptDate']).mean()
# Pull out Cntl and Expt CTCFs
ctcf_cntl = averaged_sections.xs('Cntl', level='Side')['CTCF']
ctcf_expt = averaged_sections.xs('Expt', level='Side')['CTCF']
# Generate ratios as Expt/Cntl
ratios_sections = pd.DataFrame(ctcf_expt / ctcf_cntl)
ratios_sections.columns = ['Expt/Cntl CTCF']
# Normalize individual values to mean of control group
norm_cntl = ctcf_cntl/(float(ctcf_cntl.mean()))
norm_cntl = pd.DataFrame(norm_cntl)
norm_cntl.columns = ['Control MO']
norm_expt = ctcf_expt/(float(ctcf_cntl.mean()))
norm_expt = pd.DataFrame(norm_expt)
norm_expt.columns = ['nSMase2 MO']
# Combine processed values into single dataframe and output as csv file 'Results.csv'
ctcf_cntl = pd.DataFrame(ctcf_cntl)
ctcf_cntl.columns = ['Cntl CTCF']
ctcf_expt = pd.DataFrame(ctcf_expt)
ctcf_expt.columns = ['Expt CTCF']
results = (pd.concat([ctcf_cntl, ctcf_expt, ratios_sections, norm_cntl, norm_expt], axis=1, sort=True)).reset_index()
results['ID'] = results.ExptDate.str.cat(results.Embryo)
results.to_csv(analysis_date + '_' + treatment + '_CTCFResults.csv')
results
# -
# **Plotting and Statistical Analysis using DABEST**
# results = pd.read_csv('20190823_nSMase2MO_CTCFResults.csv')
results.head()
results = dabest.load(results, idx=('Control MO', 'nSMase2 MO')
,id_col='ID', paired=True)
results.mean_diff.statistical_tests
# +
fig1 = results.mean_diff.plot(
#Set overall figure parameters
dpi=200
,fig_size=(3,3)
#Edit legend features, use matplotlib.Axes.legend kwargs in dictionary format
# ,legend_kwargs={'loc':'upper left'
# ,'frameon':True}
#Edit 0 line features, use matplotlib.Axes.hlines kwargs in dictionary format
,reflines_kwargs= {'linestyle':'dashed'
,'linewidth':.8
,'color' : 'black'}
#Set swarm plot parameters
,swarm_label='Norm. BREGFP Intensity'
# ,swarm_ylim=(0,1.5)
,show_pairs=False #connect paired points? Yes (True), no (False)
# ,color_col='ID' #color points based on defined column identifier
,custom_palette={'Control MO':'#747575'
,'nSMase2 MO':'#139604'}
,swarm_desat=1
,group_summaries='mean_sd' #display mean+/-sd as bars next to swarm plots
,group_summaries_offset=0.15
#Edit swarmplot features, use seaborn.swarmplot kwargs in dictionary format
,swarmplot_kwargs={'size':7}
#Edit group summary line features, use matplotlib.lines.Line2D kwargs in dictionary format
,group_summary_kwargs={'lw':3
,'alpha':.7}
#Set effect size plot parameters
,float_contrast=True #displays mean difference next to graph (True) or below graph (False)
,contrast_label='mean difference'
,es_marker_size=9
,halfviolin_desat=1
,halfviolin_alpha=0.8
#Edit violin features, use sns.violinplot kwargs in dictionary format
,violinplot_kwargs={'widths':0.5}
#Edit legend features, use matplotlib.Axes.legend kwargs in dictionary format
# ,legend_kwargs={'loc':'upper left'
# ,'frameon':True}
#Edit slopegraph features, use
#kwargs in dictionary format
# ,slopegraph_kwargs={'color':'blue'}
)
# -
# +
results = pd.read_csv('20190823_nSMase2MO_CTCFResults.csv')
# Build our plot of these data
# first, define figure size and style
plt.figure(figsize=(3,6))
sns.set(style='white'
,rc={'font.family':'sans-serif'
,'axes.edgecolor':'black'
}
)
# customize style and text on each axis
plt.xticks(size=16
,weight='bold'
,color='black'
,rotation='vertical')
# plt.ylim(0,1.6)
plt.yticks([0, 0.5, 1, 1.5]
,size=16
,weight='bold'
,color='black'
)
plt.ylabel('Norm. BRE::GFP Intensity'
,size=18
,weight='bold'
,color='black'
,labelpad=8
)
# make color palate to label each dataset
# add more colors if using more datasets
my_pal = ('#EDF8E9','#30A354')
# finally, make the boxplot
myplot = sns.boxplot(data=results.iloc[:,5:7]
,palette=my_pal
,saturation=1
,width=0.7
,boxprops = {'edgecolor': 'black', 'linewidth': 2}
,whiskerprops = {'color': 'black', 'linewidth':2}
,medianprops = {'color': 'black', 'linewidth':2}
,capprops = {'color': 'black', 'linewidth':2}
)
# use this segment if you want to add a jitterplot overtop of the box plot
# myplot = sns.swarmplot(data=results.iloc[:,5:7],
# color='#252525',
# size=8)
# adjust the axes around the box: despine to remove the top and right line, then for loop to update the x- and y-axis
sns.despine()
for axis in ['top','bottom','left','right']:
myplot.spines[axis].set_linewidth(3)
# tighten the figure boundaries to the graph
plt.tight_layout()
# +
# Get descriptive statistics
mean = results_6ss.mean()
sem = results_6ss.sem()
corr = results_6ss.corr()
print(mean)
print(sem)
print(corr)
# Perform two-tailed paired ttest from results
ttest = stats.ttest_rel(results_6ss['Control MO'], results_6ss['nSMase2 MO'])
print(ttest)
# -
results_6ss.head()
len(results_6ss['Embryo'])
| Figure3/BREGFPIntensity/.ipynb_checkpoints/20191007_BREGFPAnalysis-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="gEfZf48Wu5f0"
# 
#
# # Implicit Recommendation from ECommerce Data
#
# Some of the material for this work is based on [A Gentle Introduction to Recommender Systems with Implicit Feedback](https://jessesw.com/Rec-System/) by <NAME>. This tutorial includes an implementation of the Alternating Least Squares algorithm and some other useful functions (like the area under the curve calculation). Other parts of the tutorial are based on a previous version of the Implicit library and had to be reworked.
#
# The dataset used for this work is from Kaggle [E-Commerce Data, Actual transactions from UK retailer](https://www.kaggle.com/carrie1/ecommerce-data)
#
# + [markdown] id="OApEEC0_wB4C"
# # Global Imports
# + id="fsb9emt6nrPu"
import pandas as pd
import numpy as np
import random
from matplotlib import pyplot as plt
import implicit
import scipy
from sklearn import metrics
from pandas.api.types import CategoricalDtype
import wandb
# -
# %run Common-Functions.ipynb
# ## Hyperparameter Tuning with Weights & Biases
#
# +
sweep_config = {
"method": "bayes", # grid, random
"metric": {"name": "prediction_auc", "goal": "maximize"},
"parameters": {
"percent_test": {"min":0.1, "max":0.3},
"alpha": {"min":1, "max":30 },
"factors" : {
"values" : [64, 128]
},
"regularization": {"min":0.01, "max":.2},
"iterations": {"min":20, "max":100}
},
}
sweep_id = wandb.sweep(sweep_config, project="ecommerce")
def sweep():
# Initialize a new wandb run
with wandb.init() as run:
selected_df = pd.read_pickle('../data/interim/ecommerce/selected_invoices.gz')
# The Sweep parameters are passed in with the wandb.config parameter
invoices = list(np.sort(selected_df.InvoiceNo.unique())) # Get our unique customers
products = list(selected_df.StockCode.unique()) # Get our unique products that were purchased
quantity = list(selected_df.Quantity) # All of our purchases
cols = selected_df.InvoiceNo.astype(CategoricalDtype(categories=invoices, ordered=True)).cat.codes
# Get the associated row indices
rows = selected_df.StockCode.astype(CategoricalDtype(categories=products, ordered=True)).cat.codes
# Get the associated column indices
purchases_sparse = scipy.sparse.csr_matrix((quantity, (rows, cols)), shape=(len(products), len(invoices)))
product_train, product_test, products_altered, transactions_altered = make_train(purchases_sparse, pct_test = wandb.config['percent_test'])
model = implicit.als.AlternatingLeastSquares(factors=wandb.config['factors'],
regularization=wandb.config['regularization'],
iterations=wandb.config['iterations'])
alpha = wandb.config['alpha']
model.fit((product_train * alpha).astype('double'))
user_vecs = model.user_factors
item_vecs = model.item_factors
test, popular = calc_mean_auc(product_train, products_altered,
[scipy.sparse.csr_matrix(item_vecs), scipy.sparse.csr_matrix(user_vecs.T)], product_test)
wandb.log({
'prediction_auc': test
})
wandb.agent(sweep_id, sweep, count=100)
# -
| notebooks/ECommerce-Tuning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Intro to pandas
#
# #### Review and Outline
#
# Great Work! We have made it this far...we know some basic calculations, built-in data types and structures (lists, tuples, strings, dictionaries), we also know some key operations if else conditional operations, for loops, etc.
#
# Where are we going know...we will get into the key data manipulating package in python: **Pandas**.
#
# What is pandas??? "pan"nel "da"ta "s"tructures. Powerfull, intuitive, data analysis
# tool. This package convinced me to learn and start to use python as a research tool
# Developed at [AQR](https://www.aqr.com/) (a quantative hedgfund) by [<NAME>](http://wesmckinney.com). They made it open source and quickly expanded developed and became widely used.
#
# [This notebook largely follows the discussion in the Book.](https://nyudatabootcamp.gitbooks.io/data-bootcamp/content/py-fun2.html)
#
# #### Python
#
# First we need to import the pandas package...very simmilar to when we imported
# our functions, but this is a MUCH larger. Further more, this is what makes pandas
# a higher-level addon to python. That is at a lower level the objects, methods, functions...
# are already created for us, then when we import pandas they are ready to go.
#
# Then we will learn the key data structures in **Pandas** and their attributes and methods. Moreover, we will learn how to select data in **`DataFrame`** and then do computations afterwards.
#
# **Buzzwords.** DataFrame, Series
#
# ---
# ## Basics
# This says import the package `pandas` then the "as pd" says call it `pd` (our alias)
# this just simplifies our life without having to always be typing `pandas`, we just
# type `pd`. IF you're lost on this, go back to our chapter on [importing packages](https://nyudatabootcamp.gitbooks.io/data-bootcamp/content/packages.html).
#
# Lets first get to know the two most important data structures in `Pandas`.
import pandas as pd
import numpy as np
# ### Series
# The Series is the primary building block of pandas. A Series represents a one-dimensional labeled indexed array based on the NumPy ndarray. It can be created and initialized by passing either a scalar value, a NumPy ndarray, a Python list, or a Python Dict.
# create a series from a dictionary
gdp = {"GDP": [5974.7, 10031.0, 14681.1]}
# what kind of data structure is this
print(type(gdp))
# +
# 'name' parameter specifies the column name of the series object
gdp_s= pd.Series(gdp,name='GDP')
print(type(gdp_s))
gdp_s
# -
# It should tells us that it is a dictionary, with keys and values (which are lists). How do we get those?
# +
# create a series from a numpy 1 dimension array
cpi = np.array([127.5, 169.3, 217.488])
cpi_s= pd.Series(cpi,name='CPI')
# create series from a list
year = [1990, 2000, 2010]
country = ["US", "US", "US"]
year_s = pd.Series(year,name='Year')
country_s = pd.Series(country,name='Country')
# -
print (cpi_s)
# #### From Series to DataFrame
# A `DataFrame` is essentially just a table of data, or a dictionary of `Series` while a `Series` can be thought of as a one columned `DataFrame`.
#
# Let's create a DataFrame from the series previously created by pandas [concat](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.concat.html) methods. It concatenates pandas objects, e.g., Series, DataFrame along a particular axis with optional set logic along the other axes.
#
# I know it might seem a little bit overwhelming right now, e.g., new methods and not understanding what does **axis** mean here? But let's see the examples by setting different axis parameters at first. We will discuss this over and over later.
Series_Df = pd.concat([year_s,country_s],axis=1)
print(Series_Df)
Series_Df = pd.concat([year_s,country_s],axis=0)
print(Series_Df)
# Besides, we can also convert `Dataframe` to `Series` as well via just selecting one column of a `DataFrame`. We will go back with this shortly.
# ---
# ## DataFrame
data = {"GDP": [5974.7, 10031.0, 14681.1],
"CPI": [127.5, 169.3, 217.488],
"Year": [1990, 2000, 2010],
"Country": ["US", "US", "US"]}
# Now we are going to convert the type of data to a "DataFrame" this is the key oject within pandas. (If familiar with R this is simmilar to their dataframe)
#
df = pd.DataFrame(data)
# +
print("\n", type(df))
# Now lets see how cool this is, return to the original data and lets look at it
print(data)
# -
data
df
# In python remember the datafame is an object and with that object comes methods and attributes (we have seen less attributes, but lots of methods)
#
print(df.shape)
# Note that this is an attribute not a method as the method
# takes in arguments through () where as this just asks what is the shape of df
print(df.columns) # which returns an object...but we can get it to a list.
print(df.columns.tolist())
print(df.index) # which is like a range type, but within pandas...
print(df.index.tolist())
print(df.dtypes) # this is an attribute on the dataframe, simmilar to type
# So this is interesting, for the numerical values it says that they are flaoting
# point vlaues, that is great. For the names, strings, it says that they are objects
# NOT stings? Pandas does this (i) if all the data in a column is a number, then
# it is recognized as a number (ii) if not, then it is just going to be an object
# ---
# ### Time to practice
#
# **Exercises.** Consider the following data below:
# +
pwt_data = {'countrycode': ['CHN', 'CHN', 'CHN', 'FRA', 'FRA', 'FRA'],
'pop': [1124.8, 1246.8, 1318.2, 58.2, 60.8, 64.7],
'rgdpe': [2.611, 4.951, 11.106, 1.294, 1.753, 2.032],
'year': [1990, 2000, 2010, 1990, 2000, 2010]}
pwt = pd.DataFrame(pwt_data)
# -
# a) What are the dimensions of pwt?
# b) What dtypes are the variables? What do they mean?
# c) What does pwt.columns.tolist() do? How does it compare to list(pwt)?
# d) Challenging. What is list(pwt)[0]? Why? What type is it?
# e) Challenging. What would you say is the natural index? How would you set it?
# ---
# ## Understanding DataFrame
df
# This lays out the data in a very intuitive way, columns will be labeled as they were in the excel file. Rows are labeled with unique identifiers as well, called the “index.” We have already learned how to retrieve the index of a `DataFrame` by `df.index`.
#
# Amazing. You may be thinking...
# so what, well there is a reason why excel is popular, it is natural for people
# do think about data in a table like formate, a dataframe is always going to
# present this in this intuitive, natural way. This is also important because it
# helps us visualize and then implement calculations, operations, on the table.
# Where as this could be very hard to do in the data variable above.
#
# Therefore, we call the `DataFrame` as symmetric and indexed versions of spreadsheets
#
# ### Play with Columns
# #### Grab one column
df.CPI
df['CPI']
df.iloc[:,0]
# Since `DataFrame` is like an excel, think about the first input in the above bracket as index for rows while second for columns. Here, we want to select all the rows in the first column.
#
# Remember python index starts with **0**.
#
# Regarding `:`, it's similar about we have learned in Python Fundmentals 2 **Slicing** section.
#
# #### Grab several columns
df[["CPI","Country"]]
# We can also do this with iloc
df.iloc[:,0:2]
# How about grabbing columns like CPI, GDP, can we do this with **`iloc`** at once?
# Now we might notice that we can almost use `iloc` and other methods to grab rows interchangeably. However, we might consider by specifying the column name, it is easier for us to **debug** in the future. Think about, once you change the sequence of columns, everything won't work.
# #### Reset the column name
#
# +
df.columns = ["cpi", "country", "gdp", "year"]
# What if the elelments here were less than the number of columns?
df.columns = [var.upper() for var in df.columns]
# Here we can use list comprehension to change the names in columns in the way
# we want...
df
# -
# Another way to rename specific instances... not that if we did not have the df
# in front, nothing would fundementally change, it would just copy and print out
# the new one, but the saved df is the same...
# +
df = df.rename(columns = {"GDP":"NGDP"})
df
# +
namelist = ["NGDP","CPI"]
df[namelist]
# -
# ### Play with Rows
# Below is an example of setting the index. This is a feature that I'm slowing starting to embrace. The idea essentially, is that by setting the index, then we can use the `.loc` or location finding command to pull out only specific entries on a row. For example, if we only want year 2000, then we
# - set the index to be the year
# - then use `.loc` to pull out that particular year.
#
# How you would you do this for the country. Same idea, not that we set the index to countrycode, then select the county code that we want.
#
# Two more points about this:
# - One is that we can multi-index, that is have layers of indexes...why would we want to do this. This relates to the question of the "natural index"...
#
# I would argue the natural index would be on the level of an observation. What does that mean? Think about the data set above, what is an observation look like and what are the variables associated with it. Here an observation is a country time pair. Note an observation is two dimensions a country at a particular time. Then the variables associated with each observation are population, gdp. Back to the natural index, given this argument above, I would actually say that it is a multi index with countries and years.
# - MTWN: This discussion relates to this concept of ["tidy data" which is discusses nicely here](http://vita.had.co.nz/papers/tidy-data.pdf).
#
pwt
# +
pwt.set_index(["year"]).loc[2000]
#pwt.set_index(["year"], inplace = True)
#pwt.set_index(["countrycode","year"]).xs(2000, level = "year")
# +
# pwt.set_index?
# -
# Why is the index back to the original...well its just like string methods, the original data frame is not fundamentally changed. To change it you need to either (i) assign the modified data frame either to itself or to a new name or (ii) use the inplace = True command where it does not create a new object, but directly creates the new index on the old object.
# We can also achieve the above via just **`loc`**.
pwt.loc[pwt['year']==2000]
# This will also work
pwt[pwt['year']==2000]
# Can we use `loc` to achieve the similar thing as `iloc`, e.g., selecting a row?
# Yes, but we have to use different inputs.
pwt.loc[:,'year']
# For iloc
pwt.iloc[:,3]
# ### Remove Stuff by Column or Row
# How do we remove stuff, well there is the `.drop` method. In addition, we come across the `axis` parameter again. Let's become familar with it.
# Reset the df DataFrame
df=pd.DataFrame(data)
df
# Can you guess what will happen, if...
df.drop("CPI", axis = 1)
df.drop(0, axis = 0) # the first 0 here means we want drop the first row which is indexed by 0
df
# Now, we can conclude: if we want to perform some operations columnwise, we should set **axis** = 1 while for row-wise, **axis** = 0. We will see more examples for the `DataFrame` calculations part to help us grasp the idea.
# ---
# ### Time to practice
#
# **Exercise.** For the DataFrame df, create a variable diff equal to the difference of ngdp and rgdp. Verify that diff is now in df.
# **Exercise.** How would you extract the variables ngdp and year?
#
# **Exercise** How would you extract all variables for just one year, say 2000?
#
# **Exercise** How would you drop the variable diff? If you print your dataframe again, is it gone? If not, why do you think it is still there?
#
# **Exercise** How would you drop one year from the data set?
# Hint: the key thing to recognize is the axis, this is saying drop a column named "CPI"
# if you did this with out the axis it would give an error, why the defalut is
# axis = 0 which are rows...and there is no index named "CPI"
# ---
# ## Calculations on a Dataframe
#
# Below are a bunch of calculations. This is essentially, the "excel" functionality of the data frame.
# +
print(type(df["GDP"]))
# then it is easy to do optionation on a series...
print(df["GDP"] + df["GDP"])
print(df["GDP"] / df["CPI"]) # This would be real gdp
print(100*df["GDP"] / df["GDP"][0]) # what is this doing...if you remeber from EGB
# This is a way to index GDP by the first entry...
# Then it is super easy to create a new column based on an operation or existing
# columns, almost excel like...
df['RGDP'] = df['GDP']/df['CPI']
df['GDP_div_1000'] = df['GDP'] / 1000
print("\n",df) # so there is a new column called real gdp now...
# See the digressioini n the book....I don't mind doing this...
# What do you mean by this? I did not find it in the book Tinghao
# -
df
# ### Operations across rows/columns
#
# Here again, we need to set the **axis** parameters. Rememer, for row-wise computations, we need to set it as 0 and wise versa.
# Can you think of the execution results?
df.sum(axis=0)
# How about this one? Can even it be executed? Remember we have one column with string data structures.
df.sum(axis=1)
# Yes, it can. It just ignores the column with string values. Amazing!
# ---
# ### Time to practice
#
# **Exercise.** Can you compute the mean of each column of df?
# **Exercise.** Can you select the year 2010 and compute the row sum of df?
# **Exercise (Challenging).** Can you compute the mean of GDP where it is larger than 6000 of df?
# ---
# ## Simple Statistics
#
# Here are some simple commands that can report basic summary statistics of the data:
# +
test = pd.DataFrame(df.mean())
test.loc["CPI"]
# -
sumstate = df.describe() # This one creates a dataframe. Could grab what we want from there
type(sumstate)
print(sumstate)
df
# **Exercise.** Compute the summary statistics (for the pwt data frame). Write these summary stats to an excel sheet. Can you do this only for China?
# ---
# ## Output/Save Data
#
# We can output data in easy way as well with these commands. Note that it creates the file within your working directory unless you specify otherwise...
# +
pwt.to_csv("pwt.csv")
pwt.to_excel("pwt.xlsx")
# -
# ---
# ## Summary
# **Congratulations!** First, it's amazing that you have made it this far. Reflect on what you knew before working through this notebook, namely what we did in python fundamental notebooks. Now reflect on what you can do...AMAZING!!! Let us summarize some key things that we covered.
#
# * **Pandas Objects**: A `DataFrame` is essentially just a table of data while a `Series` can be thought of as a one columned `DataFrame`.
#
# * **Understanding `DataFrame`**:
# * Become familiar with basic attributes (`.columns`, `.shape`) and methods (`.sum()`, `.mean()`) in `DataFrame` data structure.
# * Know different methods to grab columns and rows, e.g., their pros and cons, especially for the differences between `iloc` and `loc`. They look familiar but the inputs for the two methods are very different. `loc` gets rows (or columns) with particular labels from the index, while `iloc` gets rows (or columns) at particular positions in the index (so it only takes integers).
#
#
# * **Axis Understanding**: when setting **axis**, always think about the operation first, whether it will be done column-wise or row-wise. If the former, setting axis = 1. For this course and the majority of dataframe, the **axis** will always be **0** or **1**.
#
#
| book_notebooks/.ipynb_checkpoints/intro_to_pandas-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Critique of a Data Product (Annual Report)
#
# > Communicating about data is a special art...
# - toc: true
# - branch: master
# - badges: false
# - comments: true
# - hide: false
# - search_exclude: true
# - metadata_key1: metadata_value1
# - metadata_key2: metadata_value2
# - image: images/dpc_fig1_replacement.png
# - categories: [Translation Industry, Data Product,Visualization]
# - show_tags: true
# ## Purpose
# This project involves the critique of the format and contents of the annual report of a non-profit. The annual report is about the operations of an initiative known as MissionTran. The need that the client has is to evaluate the current structure and contents of the annual report in the light of best-practice data communication standards, as well as obtain recommendations to improve the data communication aspects of it. A copy of the annual report may be found [here](/portfolio/AnnualReport2018.pdf).
# ## Description of the Data Product
# The MissionTran project uses a team of volunteers to proofread machine-translated sermons into a number of languages. Users proofread and correct machine-translated sentences one-by-one. Each proofreading effort to a sentence is called a *contribution* and comes in the form of an *edit*.
#
# There are two kinds of contributions: *Votes* and *Creates*. A vote contribution happens when the user decides to simply vote for the provided translation, i.e. the user deems the translation to be correct. A create contribution happens when the user edits the translation to correct it.
# There are a number of *bases* upon which a contribution is made. For *vote* contributions, the bases are:
# - By *accepting* the machine-translated sentence as is (‘a’)
# - By *creating* a new edit (‘c’)
# - By *topping*, i.e. by voting for the current edit with the most votes (‘t’)
# - By *picking* another edit to vote for (‘p’)
# For create contributions the bases are:
# - By *clearing* (‘k’)
# - By *modifying* (‘m’)
# A number of metrics are collected by the system:
# - time used to make a vote contribution
# - time used to make a create contribution
# - vote time spent to proofread a complete translation (assignment)
# - create time spent to proofread a complete translation (assignment)
# The components of the system may be summarized as follows:
# - a Translation has many
# - Sentences has many
# - Edits has many
# - Contributions
# - a Translation has many
# - Assignments has many
# - Contributions
# - A User has many
# - Assignments
# ## Finding Your Purpose and Message
# ### Actionable data
# The location of *actionable data* in the annual report is not straightforward. Most charts seem to simply dump data. Only some charts seem to suggest its use as a trigger for action. One such chart is the report's Figure 12 which shows the number of contributions by username by contribution kind:
# 
# This chart shows how productive users were by showing the total number of contributions each made. This number is further broken down into *vote* contribution and *create* contributions.
# ### Balance
# There should be a balance between the representations for the *data*, the *author*, and the *audience*. In the report, the balance is tilted strongly in the direction of the data. There is a strong impression that all available data has simply been dropped into the document. The consumer is then expected to pick out pieces for consumption.
#
# The document does not reveal much about the objectives of the *author*, except maybe to communicate the progress of the translation team.
#
# There is an implication that the *audience’s* need is simply to know the progress of the project.
# ### Audience factors
# The *role* of the audience seems to be stakeholders interested in the status of the project. There is hardly any structure that would make it easy and direct to answer high-priority questions.
#
# The *workflow* of the audience might be catered for as the information is presented in the form of charts that can be accessed easily in an office setting.
#
# There is no elaboration on the *data comfort and skills* of the audience. The author does point out that “The workings of the translation system will not be presented here as most stakeholders are well aware of the details.” This suggests that the audience possess the *industry and data expertise* and does not need embedded explanations.
# ## Information Discrimination
# There is no identifiable core problem/theme. The report is simply a series of charts that try to convey the performance of the team. There is not a good separation of *reporting* from *exploration*. Some charts, like Figure 10, are totally unnecessary. This chart should have at least been pushed to an appendix but there is no appendix. Here is Figure 10:
# 
# Figure 10 seems much too technical and does not belong with the remaining charts.
# There is a section (at the end) on “Day of Week.” The chart in this section (Figure 13), breaks down contribution effort (in seconds) by day-of-week by contribution kind:
# 
# It seems unnatural to do this kind of breakdown. Why would distribution of effort per contribution vary with day of week. I cannot think of a fundamental reason why a user’s need for time to proofread a specific sentence would vary by day-of-week. This is not a meaningful chart. It should be thrown out.
#
# With a stretch of imagination one could think of the following scenario. Let us say on Mondays, there are always a series of celebrations in the large conference room next door. This might be distracting to the proofreaders. This kind of scenario, however, should be investigated in the 'backroom'. It should not be part of regular annual report.
# If the author happens to insist on keeping this chart, it should be changed considerably. Its current form hardly shows the required information. The presence of a few outliers compresses the real data so much that nothing is revealed. Here is a first suggestion. The weekdays are ordered, the axis labels are improved, and a measure of transparency is given to the markers. This helps to show how much overlap of points there are:
# 
# Even better, we can provide a small amount of sideways random jitter to help reveal the overlap:
# 
# If we remove the outliers, the chart becomes more effective:
# 
# We may also use a boxplot which is commonly used in a case like this:
# 
# A violin plot will also be effective:
# 
# The latest plots all reveal that there is no real variation in contribution effort per sentence over day-of-week. This was certainly not evident from the original chart.
# ## Defining Meaningful and Actionable Metrics
# The area of metrics is the strong point of the document. Metrics are:
# - not too *simplistic*
# - not overly *complex*
# - not too *many*
# - none are *vanity* metrics
# The metrics have a *common interpretation*, e.g.
# - time used to make a vote contribution
# - time used to make a create contribution
# - vote time spent to proofread a complete translation (assignment)
# - create time spent to proofread a complete translation (assignment)
# - total vote time spent during an assignment.
# Metrics are mostly *actionable*; e.g. how much time is spent per sentence.
# Metrics are *accessible* and also *transparent* with simple calculations.
# ## Creating Structure and Flow to your Data Products
# There is a simplistic logical structure and *no narrative* at all – very poor!
# The author could have chosen one of many narrative flows, e.g. to
# - show how machine-translated assisted proofreading is more efficient than humans translating from scratch
# - show how the productivity of users varies by language
# - show the work patterns of users, i.e. a steady amount each workday, or only on one day per week, etc.
# There is no meaningful flow however. There is no notion of the “guided safari” storytelling, or even of a traditional story. This is a glaring weakness in the data product.
# ## Designing Attractive, Easy-to-understand Data Products
# The presentation looks rough and unfinished (low aesthetic value). Given the author’s comment about the stakeholders’ understanding of the workings of the system, this might be overlooked somewhat.
#
# There is no need for *connectivity*, *data detail*, *interactivity*, and *mobility* as this is an annual report.
#
# The use of *color* in charts was mostly acceptable.
# The author often did not choose the most appropriate chart type, e.g. in Figures 1 and 2 a line chart should have been used instead of the heat maps:
# 
# If the author insists on having a heat map, it could have been made simpler and more effective:
# 
# The chart type for Figure 6 was chosen poorly and the presence of a few outliers hide most of the real data:
# 
# The following chart would have been more effective (after removing the outliers):
# 
# ## Creating Dialogue with Your Data Products
# In general, there is not a lot of *chart junk*. The author mostly used sufficient *contrast*. *Readability of labels* are generally bad. A serious omission is that no chart has a heading! For example, Figure 11:
# 
# The time units are always in seconds which is often inappropriate. Although the meaning of variables is explained, it is bothersome that they appear in their “software” form on axes, as in Figure 8:
# 
# Sentence types are not defined anywhere.
# *Sorting for comprehension* could have been done in Figure 5 and 12:
# 
# 
# In addition, monotone *color variants* could have been used in Figure 12.
# ## Summary
# The annual report showcases a lot of data. The disappointing part is that it has been done ineffectively and unprofessionally.
# The main weaknesses are:
# - No purpose or message
# - Use of data indiscriminately
# - Weak structure and no narrative flow
# - No mentionable design
# - Will not trigger conversation and dialogue in a straightforward way
# The one strength:
# - Meaningful metrics
| _notebooks/2018-11-07-DataProductCritique.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7
# language: python
# name: python3
# ---
# # Objective:
#
# Using different classification models such as:
#
# - KNN
# - Decision Tree
# - SVM
# - Logistic Regression
#
# to train over historical customer data and predict wether they will pay their loan debt or not.
# + button=false new_sheet=false run_control={"read_only": false}
# Importing Libraries
import itertools
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
import pandas as pd
import numpy as np
import matplotlib.ticker as ticker
from sklearn import preprocessing
# %matplotlib inline
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ### About dataset
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# This dataset is about past loans. The __Loan_train.csv__ data set includes details of 346 customers whose loan are already paid off or defaulted. It includes following fields:
#
# | Field | Description |
# |----------------|---------------------------------------------------------------------------------------|
# | Loan_status | Whether a loan is paid off on in collection |
# | Principal | Basic principal loan amount at the |
# | Terms | Origination terms which can be weekly (7 days), biweekly, and monthly payoff schedule |
# | Effective_date | When the loan got originated and took effects |
# | Due_date | Since it’s one-time payoff schedule, each loan has one single due date |
# | Age | Age of applicant |
# | Education | Education of applicant |
# | Gender | The gender of applicant |
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Lets download the dataset
# + button=false new_sheet=false run_control={"read_only": false}
# !wget -O loan_train.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/loan_train.csv
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ### Load Data From CSV File
# + button=false new_sheet=false run_control={"read_only": false}
df = pd.read_csv('loan_train.csv')
df.head()
# -
df.shape
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ### Convert to date time object
# + button=false new_sheet=false run_control={"read_only": false}
df['due_date'] = pd.to_datetime(df['due_date'])
df['effective_date'] = pd.to_datetime(df['effective_date'])
df.head()
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# # Data visualization and pre-processing
#
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Let’s see how many of each class is in our data set
# + button=false new_sheet=false run_control={"read_only": false}
df['loan_status'].value_counts()
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# 260 people have paid off the loan on time while 86 have gone into collection
#
# -
# Lets plot some columns to underestand data better:
# notice: installing seaborn might takes a few minutes
# !conda install -c anaconda seaborn -y
# +
import seaborn as sns
bins = np.linspace(df.Principal.min(), df.Principal.max(), 10)
g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set1", col_wrap=2)
g.map(plt.hist, 'Principal', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
# + button=false new_sheet=false run_control={"read_only": false}
bins = np.linspace(df.age.min(), df.age.max(), 10)
g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set1", col_wrap=2)
g.map(plt.hist, 'age', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# # Pre-processing: Feature selection/extraction
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ### Lets look at the day of the week people get the loan
# + button=false new_sheet=false run_control={"read_only": false}
df['dayofweek'] = df['effective_date'].dt.dayofweek
bins = np.linspace(df.dayofweek.min(), df.dayofweek.max(), 10)
g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set1", col_wrap=2)
g.map(plt.hist, 'dayofweek', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# We see that people who get the loan at the end of the week dont pay it off, so lets use Feature binarization to set a threshold values less then day 4
# + button=false new_sheet=false run_control={"read_only": false}
df['weekend'] = df['dayofweek'].apply(lambda x: 1 if (x>3) else 0)
df.head()
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ## Convert Categorical features to numerical values
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Lets look at gender:
# + button=false new_sheet=false run_control={"read_only": false}
df.groupby(['Gender'])['loan_status'].value_counts(normalize=True)
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# 86 % of female pay there loans while only 73 % of males pay there loan
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Lets convert male to 0 and female to 1:
#
# + button=false new_sheet=false run_control={"read_only": false}
df['Gender'].replace(to_replace=['male','female'], value=[0,1],inplace=True)
df.head()
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ## One Hot Encoding
# #### How about education?
# + button=false new_sheet=false run_control={"read_only": false}
df.groupby(['education'])['loan_status'].value_counts(normalize=True)
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# #### Feature before One Hot Encoding
# + button=false new_sheet=false run_control={"read_only": false}
df[['Principal','terms','age','Gender','education']].head()
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# #### Use one hot encoding technique to conver categorical varables to binary variables and append them to the feature Data Frame
# + button=false new_sheet=false run_control={"read_only": false}
Feature = df[['Principal','terms','age','Gender','weekend']]
Feature = pd.concat([Feature,pd.get_dummies(df['education'])], axis=1)
Feature.drop(['Master or Above'], axis = 1,inplace=True)
Feature.head()
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ### Feature selection
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Lets defind feature sets, X:
# + button=false new_sheet=false run_control={"read_only": false}
X = Feature
X[0:5]
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# What are our lables?
# + button=false new_sheet=false run_control={"read_only": false}
y = df['loan_status'].values
y[0:5]
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ## Normalize Data
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Data Standardization give data zero mean and unit variance (technically should be done after train test split )
# + button=false new_sheet=false run_control={"read_only": false}
X= preprocessing.StandardScaler().fit(X).transform(X)
X[0:5]
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# # Classification
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Now, it is time to use the training set to build an accurate model. Then we use the test set to report the accuracy of the model
# We should use the following algorithm:
# - K Nearest Neighbor(KNN)
# - Decision Tree
# - Support Vector Machine
# - Logistic Regression
#
#
# -
# ### Train Test Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=4)
print ('Train set:', X_train.shape, y_train.shape)
print ('Test set:', X_test.shape, y_test.shape)
# # K Nearest Neighbor(KNN)
#
# ### Finding best KNN Classification using sklearn
# +
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import jaccard_score
from sklearn.metrics import f1_score
from sklearn import metrics
import matplotlib.pyplot as plt
# dicts to be filled in with the metrics of each n
f1_score_dict = {}
jarrord_score_dict = {}
accuracy = {}
# Get accruacy for different K
for i in range(1,15):
neigh = KNeighborsClassifier(n_neighbors = i).fit(X_train, y_train)
yhat = neigh.predict(X_test)
f1_score_dict[i] = f1_score(y_test, yhat, average='weighted')
jarrord_score_dict[i] = jaccard_score(y_test,yhat, pos_label="PAIDOFF")
accuracy[i] = metrics.accuracy_score(y_test, yhat)
print("f1_score_dict\n", f1_score_dict)
print("\n\njarrord_score_dict\n", jarrord_score_dict)
print("\n\naccuracy\n", accuracy)
# +
import matplotlib.pyplot as plt
# Plot F-1 score Versus K
plot_x = list(f1_score_dict.keys())
plot_y = list(f1_score_dict.values())
plt.plot(plot_x, plot_y)
plt.show()
# +
import matplotlib.pyplot as plt
# Plot Jaccard Score Versus K
plot_x = list(jarrord_score_dict.keys())
plot_y = list(jarrord_score_dict.values())
plt.plot(plot_x, plot_y)
plt.show()
# +
import matplotlib.pyplot as plt
# Plot Accuracy Versus K
plot_x = list(accuracy.keys())
plot_y = list(accuracy.values())
plt.plot(plot_x, plot_y)
plt.show()
# +
# Choosing best k for the highest accuracy
max_f1_k = max(f1_score_dict, key=f1_score_dict.get)
max_jaccard_k = max(jarrord_score_dict, key=jarrord_score_dict.get)
max_accuracy_k = max(accuracy, key=accuracy.get)
print("max_f1_k: ", max_f1_k)
print("\n\nmax_jaccard_k: ", max_jaccard_k)
print("\n\nmax_accuracy_k: ", max_accuracy_k)
# -
# ### Final KNN model with high accuracy
# We will choose 5 for the k.
# +
knn_model = KNeighborsClassifier(n_neighbors = 5).fit(X_train, y_train)
print("Train set F1-Score: ", metrics.f1_score(y_train, knn_model.predict(X_train), average='weighted'))
print("Test set F1-Score: ", metrics.f1_score(y_test, knn_model.predict(X_test), average='weighted'))
print("Train set Jaccard Index: ", metrics.jaccard_score(y_train, knn_model.predict(X_train), pos_label="PAIDOFF"))
print("Test set Jaccard Index: ", metrics.jaccard_score(y_test, knn_model.predict(X_test), pos_label="PAIDOFF"))
print("Train set Accuracy: ", metrics.accuracy_score(y_train, knn_model.predict(X_train)))
print("Test set Accuracy: ", metrics.accuracy_score(y_test, knn_model.predict(X_test)))
# -
# # Decision Tree
# ### Finding best Decision Tree Classification using sklearn
# +
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import jaccard_score
from sklearn.metrics import f1_score
from sklearn import metrics
import matplotlib.pyplot as plt
# dicts to be filled in with the metrics of each n
f1_score_dict = {}
jarrord_score_dict = {}
accuracy = {}
# Get accruacy for different n
for i in range(1,15):
dTree = DecisionTreeClassifier(criterion="entropy", max_depth=i).fit(X_train, y_train)
yhat = dTree.predict(X_test)
f1_score_dict[i] = f1_score(y_test, yhat, average='weighted')
jarrord_score_dict[i] = jaccard_score(y_test,yhat, pos_label="PAIDOFF")
accuracy[i] = metrics.accuracy_score(y_test, yhat)
print("f1_score_dict\n", f1_score_dict)
print("\n\njarrord_score_dict\n", jarrord_score_dict)
print("\n\naccuracy\n", accuracy)
# +
import matplotlib.pyplot as plt
# Plot F-1 score Versus n
plot_x = list(f1_score_dict.keys())
plot_y = list(f1_score_dict.values())
plt.plot(plot_x, plot_y)
plt.show()
# +
import matplotlib.pyplot as plt
# Plot Jaccard Score Versus n
plot_x = list(jarrord_score_dict.keys())
plot_y = list(jarrord_score_dict.values())
plt.plot(plot_x, plot_y)
plt.show()
# +
import matplotlib.pyplot as plt
# Plot Accuracy Versus n
plot_x = list(accuracy.keys())
plot_y = list(accuracy.values())
plt.plot(plot_x, plot_y)
plt.show()
# +
# Choosing best n for the highest accuracy
max_f1_n = max(f1_score_dict, key=f1_score_dict.get)
max_jaccard_n = max(jarrord_score_dict, key=jarrord_score_dict.get)
max_accuracy_n = max(accuracy, key=accuracy.get)
print("max_f1_n: ", max_f1_n)
print("\n\nmax_jaccard_n: ", max_jaccard_n)
print("\n\nmax_accuracy_n: ", max_accuracy_n)
# -
# ### Final Decision Tree model with high accuracy
# We will choose 6 as the best n.
# +
dTree_model = DecisionTreeClassifier(criterion="entropy", max_depth=6).fit(X_train, y_train)
print("Train set F1-Score: ", metrics.f1_score(y_train, dTree_model.predict(X_train), average='weighted'))
print("Test set F1-Score: ", metrics.f1_score(y_test, dTree_model.predict(X_test), average='weighted'))
print("Train set Jaccard Index: ", metrics.jaccard_score(y_train, dTree_model.predict(X_train), pos_label="PAIDOFF"))
print("Test set Jaccard Index: ", metrics.jaccard_score(y_test, dTree_model.predict(X_test), pos_label="PAIDOFF"))
print("Train set Accuracy: ", metrics.accuracy_score(y_train, dTree_model.predict(X_train)))
print("Test set Accuracy: ", metrics.accuracy_score(y_test, dTree_model.predict(X_test)))
# -
# ## Visualization
# !pip install graphviz
# !pip install pydotplus
# +
from io import StringIO
import matplotlib.image as mpimg
import pydotplus
from sklearn import tree
import graphviz
# %matplotlib inline
dot_data = StringIO()
filename = "classification-tree.png"
featureNames = X[0, :]
out=tree.export_graphviz(dTree_model,feature_names=featureNames, out_file=dot_data, class_names= np.unique(y_train), filled=True, special_characters=True,rotate=False)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png(filename)
img = mpimg.imread(filename)
plt.figure(figsize=(100, 200))
plt.imshow(img,interpolation='nearest')
# -
# # Support Vector Machine
# ### Finding best SVM Classification using sklearn
# +
from sklearn import svm
from sklearn import metrics
from sklearn.metrics import jaccard_score
from sklearn.metrics import f1_score
kernels = ['linear', 'poly', 'rbf', 'sigmoid'] # different kernels to be used
# dicts to be filled in with the metrics of each kernels
f1_score_dict = {}
jarrord_score_dict = {}
accuracy = {}
for i in range(len(kernels)):
clf = svm.SVC(kernel=kernels[i]).fit(X_train, y_train)
yhat = clf.predict(X_test)
f1_score_dict[kernels[i]] = f1_score(y_test, yhat, average='weighted')
jarrord_score_dict[kernels[i]] = jaccard_score(y_test,yhat, pos_label="PAIDOFF")
accuracy[kernels[i]] = metrics.accuracy_score(y_test, yhat)
print("f1_score_dict\n", f1_score_dict)
print("\n\njarrord_score_dict\n", jarrord_score_dict)
print("\n\naccuracy\n", accuracy)
# -
# ### Best SVM Kernel
# +
# Choosing the kernel with the highest accuracy
max_f1_kernel = max(f1_score_dict, key=accuracy.get)
max_jac_kernel = max(jarrord_score_dict, key=accuracy.get)
max_acc_kernel = max(accuracy, key=accuracy.get)
print("max_f1_kernel: ", max_f1_kernel)
print("\n\nmax_jac_kernel: ", max_jac_kernel)
print("\n\nmax_acc_kernel: ", max_acc_kernel)
# -
# ### Final SVM model with the highest accuracy
# +
svm_model = svm.SVC(kernel='poly').fit(X_train, y_train)
print("Train set Accuracy: ", metrics.accuracy_score(y_train, svm_model.predict(X_train)))
print("Test set Accuracy: ", metrics.accuracy_score(y_test, svm_model.predict(X_test)))
print("Train set F1-Score: ", metrics.f1_score(y_train, svm_model.predict(X_train), average='weighted'))
print("Test set F1-Score: ", metrics.f1_score(y_test, svm_model.predict(X_test), average='weighted'))
print("Train set Jaccard Index: ", metrics.jaccard_score(y_train, svm_model.predict(X_train), pos_label="PAIDOFF"))
print("Test set Jaccard Index: ", metrics.jaccard_score(y_test, svm_model.predict(X_test), pos_label="PAIDOFF"))
# -
# # Logistic Regression
# ### Finding best Logistic Regression Classification using sklearn
# +
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.metrics import jaccard_score
from sklearn.metrics import f1_score
from sklearn.metrics import log_loss
solver = ['liblinear', 'lbfgs', 'sag', 'saga', 'newton-cg'] # different solvers that could be used
# dicts to be filled in with the metrics of each solvers
f1_score_dict = {}
jarrord_score_dict = {}
log_loss_dict = {}
accuracy = {}
for i in range(len(solver)):
LR = LogisticRegression(random_state=0, solver=solver[i]).fit(X_train, y_train)
yhat = LR.predict(X_test)
yprob = LR.predict_proba(X_test)
f1_score_dict[solver[i]] = f1_score(y_test, yhat, average='weighted')
jarrord_score_dict[solver[i]] = jaccard_score(y_test, yhat, pos_label="PAIDOFF")
log_loss_dict[solver[i]] = log_loss(y_test, yprob)
accuracy[solver[i]] = metrics.accuracy_score(y_test, yhat)
print("f1_score_dict\n", f1_score_dict)
print("\n\njarrord_score_dict\n", jarrord_score_dict)
print("\n\nlog_loss_dict\n", log_loss_dict)
print("\n\naccuracy\n", accuracy)
# +
# Choosing the solver with the highest accuracy
max_f1_solver = max(f1_score_dict, key=accuracy.get)
max_jac_solver = max(jarrord_score_dict, key=accuracy.get)
max_log_solver = max(log_loss_dict, key=accuracy.get)
max_acc_solver = max(accuracy, key=accuracy.get)
print("max_f1_solver: ", max_f1_solver)
print("\n\nmax_jac_solver: ", max_jac_solver)
print("\n\nmax_log_solver: ", max_log_solver)
print("\n\nmax_acc_solver: ", max_acc_solver)
# -
# ### Final Logistic Regression model with highest accuracy
# +
LR = LogisticRegression(random_state=0, solver='liblinear').fit(X_train, y_train)
print("Train set F1-Score: ", metrics.f1_score(y_train, LR.predict(X_train), average='weighted'))
print("Test set F1-Score: ", metrics.f1_score(y_test, LR.predict(X_test), average='weighted'))
print("Train set Jaccard Index: ", metrics.jaccard_score(y_train, LR.predict(X_train), pos_label="PAIDOFF"))
print("Test set Jaccard Index: ", metrics.jaccard_score(y_test, LR.predict(X_test), pos_label="PAIDOFF"))
print("Train set LogLoss: ", metrics.log_loss(y_train, LR.predict_proba(X_train)))
print("Test set LogLoss: ", metrics.log_loss(y_test, LR.predict_proba(X_test)))
print("Train set Accuracy: ", metrics.accuracy_score(y_train, LR.predict(X_train)))
print("Test set Accuracy: ", metrics.accuracy_score(y_test, LR.predict(X_test)))
# -
# # Model Evaluation using Test set
from sklearn.metrics import jaccard_score
from sklearn.metrics import f1_score
from sklearn.metrics import log_loss
# First, download and load the test set:
# !wget -O loan_test.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/loan_test.csv
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ### Load Test set for evaluation
# + button=false new_sheet=false run_control={"read_only": false}
test_df = pd.read_csv('loan_test.csv')
test_df.head()
# -
# ### Pre-processing Test set data
# +
test_df['due_date'] = pd.to_datetime(test_df['due_date'])
test_df['effective_date'] = pd.to_datetime(test_df['effective_date'])
test_df['dayofweek'] = test_df['effective_date'].dt.dayofweek
test_df['Gender'].replace(to_replace=['male','female'], value=[0,1],inplace=True)
test_df['weekend'] = test_df['dayofweek'].apply(lambda x: 1 if (x>3) else 0)
test_feature = test_df[['Principal','terms','age','Gender','weekend']]
test_feature = pd.concat([test_feature,pd.get_dummies(test_df['education'])], axis=1)
test_feature.drop(['Master or Above'], axis = 1,inplace=True)
test_feature.head()
# -
test_X = test_feature
test_X = preprocessing.StandardScaler().fit(test_X).transform(test_X)
test_X[0:5]
test_y = test_df['loan_status']
test_y[0:5]
# ### Test set accuracy collection with different models
# Dictionary to set the accuracies
test_accuracy = {}
# +
# KNN Model Accuracy
test_accuracy['knn_model_jaccard_score'] = metrics.jaccard_score(test_y, knn_model.predict(test_X), pos_label='PAIDOFF')
test_accuracy['knn_model_f1_score'] = metrics.f1_score(test_y, knn_model.predict(test_X), average='weighted')
test_accuracy
# +
# Decision Tree Model Accuracy
test_accuracy['decision_tree_model_jaccard_score'] = metrics.jaccard_score(test_y, dTree_model.predict(test_X), pos_label='PAIDOFF')
test_accuracy['decision_tree_model_f1_score'] = metrics.f1_score(test_y, dTree_model.predict(test_X), average='weighted')
test_accuracy
# +
# SVM Model Accuracy
test_accuracy['svm_model_jaccard_score'] = metrics.jaccard_score(test_y, svm_model.predict(test_X), pos_label='PAIDOFF')
test_accuracy['svm_model_f1_score'] = metrics.f1_score(test_y, svm_model.predict(test_X), average='weighted')
test_accuracy
# +
# Logistic Model Accuracy
test_accuracy['LR_model_jaccard_score'] = metrics.jaccard_score(test_y, LR.predict(test_X), pos_label='PAIDOFF')
test_accuracy['LR_model_f1_score'] = metrics.f1_score(test_y, LR.predict(test_X), average='weighted')
test_accuracy['LR_model_log_score'] = metrics.log_loss(test_y, LR.predict_proba(test_X))
test_accuracy
# -
# # Report
# | Algorithm | Jaccard | F1-score | LogLoss |
# |--------------------|---------|----------|---------|
# | KNN | 0.68 | 0.686 | NA |
# | Decision Tree | 0.756 | 0.799 | NA |
# | SVM | 0.75 | 0.696 | NA |
# | LogisticRegression | 0.755 | 0.672 | 0.47 |
# + button=false new_sheet=false run_control={"read_only": false}
| Classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Call Detectron2 over API (Segmentation Example)
# * Setup the API endpoint to call
#
# Please configure hostname and path_to_image below
host='hostname'
port=5000
url = f'http://{host}:{port}/api/score-image'
IMAGE_PATH='path_to_image'
# +
import os
import numpy as np
import cv2
from PIL import Image
import io
import base64
from pathlib import Path
from datetime import datetime
import json
import blosc
import requests
import sys
sys.path.append('../')
# -
# * Load the image and encode in Base64
with open(IMAGE_PATH, "rb") as image_file:
data = base64.b64encode(image_file.read())
# * Call the API
print(f'Starting API call at {datetime.now()}')
res = requests.post(url, data=data)
response_json = json.loads(res.text)
print(f'Ending API call at {datetime.now()}')
print(response_json)
# * Get segmentation masks
pred_masks = response_json['pred_masks']
masks = blosc.unpack_array(bytes.fromhex(pred_masks)).reshape((num_seg_classes, 1080, 1920))
| calling-detectron2-flask-api-example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp core
# this automatically reads in the new modules as soon as the python file changes
# %load_ext autoreload
# %autoreload 2
# -
# # Core
#
# > API details, core functions.
#hide
from nbdev.showdoc import *
#hide
from nbdev.export import notebook2script; notebook2script()
| nbs/00_core.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:python3]
# language: python
# name: conda-env-python3-py
# ---
# # Make finger-tapping ROIs for experiment 3 power analysis
# Use Neurosynth meta-analysis and AAL atlas to create left and right finger-tapping ROIs in MNI space for power analysis.
# %matplotlib inline
import matplotlib.pyplot as plt
from nilearn.datasets import fetch_atlas_aal
from nilearn import plotting
import nibabel as nib
import numpy as np
from scipy import ndimage
import seaborn as sns
def get_cluster(mat, thresh=0.):
"""
Return the binarized maximum cluster mask for a statistical map matrix.
"""
conn_mat = np.ones((3, 3, 3), int) # 6-connectivity, aka NN1 or "faces"
binarized = mat > thresh
binarized = binarized.astype(int)
# If the stat threshold is too high simply return an empty dataframe
if np.sum(binarized) == 0:
raise Exception('Attention: No clusters.')
# Extract connected components above cluster size threshold
label_map = ndimage.measurements.label(binarized, conn_mat)[0]
clust_ids = sorted(list(np.unique(label_map)[1:]))
clust_sizes = [np.sum(label_map==cid) for cid in clust_ids]
max_clust_idx = clust_sizes.index(max(clust_sizes))
max_clust_id = clust_ids[max_clust_idx]
clust_map = label_map == max_clust_id
return clust_map.astype(int)
# ## First, we will use hand-traced regions of interest to estimate the typical hand knob size in mm3
# The hand-traced regions of interest have been provided by Dr. <NAME> from [Handwerker et al. (2020)](https://doi.org/10.1162/netn_a_00145).
lh_file = '/Users/tsalo/Downloads/Hand Motor Knob ROIs/SeedMasks.AllSbj.lhK_HandDrawn+tlrc.BRIK.gz'
rh_file = '/Users/tsalo/Downloads/Hand Motor Knob ROIs/SeedMasks.AllSbj.rhK_HandDrawnSeed+tlrc.BRIK.gz'
lh_img = nib.load(lh_file)
rh_img = nib.load(rh_file)
print('There are {} participants.'.format(lh_img.shape[-1]))
# +
# Now to collect the sizes
voxel_size = np.prod(lh_img.header.get_zooms()[:3])
print(voxel_size)
lh_voxel_counts = np.sum(np.sum(np.sum(lh_img.get_fdata(), axis=0), axis=0), axis=0)
lh_mm3 = lh_voxel_counts * voxel_size
rh_voxel_counts = np.sum(np.sum(np.sum(rh_img.get_fdata(), axis=0), axis=0), axis=0)
rh_mm3 = rh_voxel_counts * voxel_size
# -
fig, ax = plt.subplots(figsize=(16, 6))
sns.distplot(lh_mm3, label='Left Hand Knob', ax=ax,
bins=8, norm_hist=False, kde=False)
sns.distplot(rh_mm3, label='Right Hand Knob', ax=ax,
bins=8, norm_hist=False, kde=False)
ax.set_xlabel('Size (mm3)')
ax.set_ylabel('Count')
ax.set_yticks([0, 6])
ax.set_xlim((0, 4000))
ax.legend()
fig.show()
mean_roi_size = np.mean(np.hstack((lh_mm3, rh_mm3)))
print('Average hand knob size: {0:.02f} mm3'.format(mean_roi_size))
target_voxel_size = 8 # 2 x 2 x 2 mm
target_roi_size = int(np.ceil(mean_roi_size / target_voxel_size))
print('Target ROI size: {} voxels'.format(target_roi_size))
# ## Now we can build our power analysis ROIs
# We grab structural masks corresponding to bilateral precentral gyri from the AAL and meta-analytic statistical maps for the "finger" label from Neurosynth. Then, we make the Neurosynth map with each structural ROI, and gradually increase our thresholds until there is roughly the target number of voxels in each hemisphere's ROI.
aal = fetch_atlas_aal()
aal_img = nib.load(aal['maps'])
aal_map = aal_img.get_fdata()
l_precentral_idx = int(aal['indices'][aal['labels'].index('Precentral_L')])
r_precentral_idx = int(aal['indices'][aal['labels'].index('Precentral_R')])
l_precentral = aal_map == l_precentral_idx
r_precentral = aal_map == r_precentral_idx
# +
finger_meta = 'data/finger_association-test_z_FDR_0.01.nii.gz'
finger_img = nib.load(finger_meta)
finger_map = finger_img.get_fdata()
# Use conjunction of hemisphere-specific precentral gyrus masks
# and thresholded finger tapping map. Only keep largest cluster,
# when more than one survives
l_finger_clust = np.ones(finger_img.shape)
thresh = 9.
while np.sum(l_finger_clust) > target_roi_size:
thresh_finger_map = finger_map >= thresh
l_finger = (l_precentral * thresh_finger_map).astype(int)
l_finger_clust = get_cluster(l_finger)
thresh += 0.01
print('Left finger image thresholded at {0:.02f} for {1} '
'voxels'.format(thresh, np.sum(l_finger_clust)))
l_finger_img = nib.Nifti1Image(l_finger_clust, finger_img.affine,
header=finger_img.header)
r_finger_clust = np.ones(finger_img.shape)
thresh = 7.
while np.sum(r_finger_clust) > target_roi_size:
thresh_finger_map = finger_map >= thresh
r_finger = (r_precentral * thresh_finger_map).astype(int)
r_finger_clust = get_cluster(r_finger)
thresh += 0.01
print('Right finger image thresholded at {0:.02f} for {1} '
'voxels'.format(thresh, np.sum(r_finger_clust)))
r_finger_img = nib.Nifti1Image(r_finger_clust, finger_img.affine,
header=finger_img.header)
b_finger_img = nib.Nifti1Image(
l_finger_img.get_fdata()+r_finger_img.get_fdata(),
finger_img.affine)
# -
plotting.plot_glass_brain(finger_img, threshold=thresh)
plotting.plot_glass_brain(l_finger_img)
l_finger_img.to_filename('data/left_finger_ROI.nii.gz')
plotting.plot_glass_brain(r_finger_img)
r_finger_img.to_filename('data/right_finger_ROI.nii.gz')
# Just to see them together
plotting.plot_glass_brain(b_finger_img)
# and save a figure for the manuscript
plotting.plot_glass_brain(b_finger_img, output_file='data/finger_rois.png')
| experiment-3/make_fingertapping_ROIs_for_power_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# Docker image:
# `sipecam/hsi-kale:0.5.1`
# + [markdown] tags=[]
# # String1
# + tags=["block:string1"]
string1 = "Hola"
# + [markdown] tags=[]
# # String2
# + tags=["block:string2"]
string2 = " Mundo!"
# + [markdown] tags=[]
# # Print
# + tags=["block:print", "prev:string1", "prev:string2"]
print("".join([string1, string2]))
# + [markdown] tags=[]
# # Write
# + tags=["block:write", "prev:string1", "prev:string2"]
f = open("/shared_volume/test.txt", 'w')
f.write("".join([string1, string2]))
f.close()
print("listo")
| simple_pipeline/notebooks/simple_pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Comparing machine learning models in scikit-learn
# *From the video series: [Introduction to machine learning with scikit-learn](https://github.com/justmarkham/scikit-learn-videos)*
# ## Agenda
#
# - How do I choose **which model to use** for my supervised learning task?
# - How do I choose the **best tuning parameters** for that model?
# - How do I estimate the **likely performance of my model** on out-of-sample data?
# ## Review
#
# - Classification task: Predicting the species of an unknown iris
# - Used three classification models: KNN (K=1), KNN (K=5), logistic regression
# - Need a way to choose between the models
#
# **Solution:** Model evaluation procedures
# ## Evaluation procedure #1: Train and test on the entire dataset
# 1. Train the model on the **entire dataset**.
# 2. Test the model on the **same dataset**, and evaluate how well we did by comparing the **predicted** response values with the **true** response values.
# +
# read in the iris data
from sklearn.datasets import load_iris
iris = load_iris()
# create X (features) and y (response)
X = iris.data
y = iris.target
# -
# ### Logistic regression
# +
# import the class
from sklearn.linear_model import LogisticRegression
# instantiate the model (using the default parameters)
logreg = LogisticRegression()
# fit the model with data
logreg.fit(X, y)
# predict the response values for the observations in X
logreg.predict(X)
# +
# store the predicted response values
y_pred = logreg.predict(X)
# check how many predictions were generated
len(y_pred)
# -
# Classification accuracy:
#
# - **Proportion** of correct predictions
# - Common **evaluation metric** for classification problems
# compute classification accuracy for the logistic regression model
from sklearn import metrics
print(metrics.accuracy_score(y, y_pred))
# - Known as **training accuracy** when you train and test the model on the same data
# ### KNN (K=5)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X, y)
y_pred = knn.predict(X)
print(metrics.accuracy_score(y, y_pred))
# ### KNN (K=1)
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X, y)
y_pred = knn.predict(X)
print(metrics.accuracy_score(y, y_pred))
# ### Problems with training and testing on the same data
#
# - Goal is to estimate likely performance of a model on **out-of-sample data**
# - But, maximizing training accuracy rewards **overly complex models** that won't necessarily generalize
# - Unnecessarily complex models **overfit** the training data
# 
# *Image Credit: [Overfitting](http://commons.wikimedia.org/wiki/File:Overfitting.svg#/media/File:Overfitting.svg) by Chabacano. Licensed under GFDL via Wikimedia Commons.*
# ## Evaluation procedure #2: Train/test split
# 1. Split the dataset into two pieces: a **training set** and a **testing set**.
# 2. Train the model on the **training set**.
# 3. Test the model on the **testing set**, and evaluate how well we did.
# print the shapes of X and y
print(X.shape)
print(y.shape)
# STEP 1: split X and y into training and testing sets
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=4)
# 
# What did this accomplish?
#
# - Model can be trained and tested on **different data**
# - Response values are known for the testing set, and thus **predictions can be evaluated**
# - **Testing accuracy** is a better estimate than training accuracy of out-of-sample performance
# print the shapes of the new X objects
print(X_train.shape)
print(X_test.shape)
# print the shapes of the new y objects
print(y_train.shape)
print(y_test.shape)
# STEP 2: train the model on the training set
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
# +
# STEP 3: make predictions on the testing set
y_pred = logreg.predict(X_test)
# compare actual response values (y_test) with predicted response values (y_pred)
print(metrics.accuracy_score(y_test, y_pred))
# -
# Repeat for KNN with K=5:
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
print(metrics.accuracy_score(y_test, y_pred))
# Repeat for KNN with K=1:
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
print(metrics.accuracy_score(y_test, y_pred))
# Can we locate an even better value for K?
# try K=1 through K=25 and record testing accuracy
k_range = list(range(1, 26))
scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
scores.append(metrics.accuracy_score(y_test, y_pred))
# +
# import Matplotlib (scientific plotting library)
import matplotlib.pyplot as plt
# allow plots to appear within the notebook
# %matplotlib inline
# plot the relationship between K and testing accuracy
plt.plot(k_range, scores)
plt.xlabel('Value of K for KNN')
plt.ylabel('Testing Accuracy')
# -
# - **Training accuracy** rises as model complexity increases
# - **Testing accuracy** penalizes models that are too complex or not complex enough
# - For KNN models, complexity is determined by the **value of K** (lower value = more complex)
# ## Making predictions on out-of-sample data
# +
# instantiate the model with the best known parameters
knn = KNeighborsClassifier(n_neighbors=11)
# train the model with X and y (not X_train and y_train)
knn.fit(X, y)
# make a prediction for an out-of-sample observation
knn.predict([[3, 5, 4, 2]])
# -
# ## Downsides of train/test split?
# - Provides a **high-variance estimate** of out-of-sample accuracy
# - **K-fold cross-validation** overcomes this limitation
# - But, train/test split is still useful because of its **flexibility and speed**
# ## Resources
#
# - Quora: [What is an intuitive explanation of overfitting?](http://www.quora.com/What-is-an-intuitive-explanation-of-overfitting/answer/Jessica-Su)
# - Video: [Estimating prediction error](https://www.youtube.com/watch?v=_2ij6eaaSl0&t=2m34s) (12 minutes, starting at 2:34) by <NAME> Tibshirani
# - [Understanding the Bias-Variance Tradeoff](http://scott.fortmann-roe.com/docs/BiasVariance.html)
# - [Guiding questions](https://github.com/justmarkham/DAT8/blob/master/homework/09_bias_variance.md) when reading this article
# - Video: [Visualizing bias and variance](http://work.caltech.edu/library/081.html) (15 minutes) by Abu-Mostafa
# ## Comments or Questions?
#
# - Email: <<EMAIL>>
# - Website: http://dataschool.io
# - Twitter: [@justmarkham](https://twitter.com/justmarkham)
from IPython.core.display import HTML
def css_styling():
styles = open("styles/custom.css", "r").read()
return HTML(styles)
css_styling()
| scikit-learn-videos/05_model_evaluation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.2
# language: julia
# name: julia-1.6
# ---
# # Parsing PSS/e `*.RAW` Files
# **Originally Contributed by**: <NAME>
# ## Introduction
# An example of how to parse PSS/e files and create a `System` using [PowerSystems.jl](github.com/NREL-SIIP/PowerSystems.jl)
# ### Dependencies
using PowerSystems
using TimeSeries
# ### Fetch Data
# PowerSystems.jl links to some test data that is suitable for this example.
# Let's download the test data
PowerSystems.download(PowerSystems.TestData; branch = "master")
base_dir = dirname(dirname(pathof(PowerSystems)));
# ### Create a `System`
# +
sys = System(joinpath(base_dir, "data", "psse_raw", "RTS-GMLC.RAW"));
sys
# -
# ---
#
# *This notebook was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).*
| docs/notebook/2_PowerSystems_examples/03_parse_psse.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
os.chdir('models')
# !pwd
# +
from modules_sceneflow import WarpingLayer_SF, WarpingLayer_Flow
from skimage import io
import numpy as np
from ..utils_misc import (
flow_to_png_middlebury,
read_png_flow,
read_png_depth,
numpy2torch,
pixel2pts_ms,
get_pixelgrid,
)
# -
warping_layer_sf = WarpingLayer_SF()
# +
# disp /= 256
def load_sf_png(filename):
sf = io.imread(filename)
return sf
# -
sf_exp = '/mnt/lustre/yslan/Repo/NVS/Projects/self-mono-sf/eval/monosf_selfsup_kitti_test/flow/000000_10.png'
sf_array = load_sf_png(sf_exp)
sf_array
# +
import torch
from torch.nn import functional as F
theta = torch.tensor([
[1,0,-0.25],
[0,1,0]
], dtype=torch.float)
grid = F.affine_grid(theta.unsqueeze(0), [1, 5, 4,4])
# output = F.grid_sample(img_torch.unsqueeze(0), grid)
grid[0][0]
# grid.shape
# -
def warping_demo(img_idx, image_dir, results_dir, tt=None):
idx_curr = "%06d" % (img_idx)
im1_np0 = (
io.imread(os.path.join(image_dir, "image_2/" + idx_curr + "_10.png"))
/ np.float32(255.0)
)[110:, :, :]
flo_f_np0 = read_png_flow(os.path.join(result_dir, "flow/" + idx_curr + "_10.png"))[
110:, :, :
]
disp1_np0 = read_png_depth(
os.path.join(result_dir, "disp_0/" + idx_curr + "_10.png")
)[110:, :, :]
disp2_np0 = read_png_depth(
os.path.join(result_dir, "disp_1/" + idx_curr + "_10.png")
)[110:, :, :]
im1 = numpy2torch(im1_np0).unsqueeze(0)
disp1 = numpy2torch(disp1_np0).unsqueeze(0)
disp_diff = numpy2torch(disp2_np0).unsqueeze(0)
flo_f = numpy2torch(flo_f_np0).unsqueeze(0)
_, _, hh, ww = im1.size()
## Intrinsic
focal_length = width_to_focal[ww]
cx = cam_center_dict[ww][0]
cy = cam_center_dict[ww][1]
k1_np = np.array([[focal_length, 0, cx], [0, focal_length, cy], [0, 0, 1]])
k1 = numpy2torch(k1_np)
# pixel_grid
b, _, h, w = disp_diff.shape
pixel_grid = get_pixelgrid(b, h, w, flo_f)
x_warp = tf.grid_sampel(im1_np0, pixel_grid)
def get_pixelgrid(b, h, w, flow=None, direction="forward"):
# get heterogeneous coordinates pixel grid
"""generate heterogeneous coord pixel grid
Returns:
[torch.Tensor]: heterogenous coordinates pixel grid
"""
assert direction in ["forward", "backward"]
grid_h = torch.linspace(0.0, w - 1, w).view(1, 1, 1, w).expand(b, 1, h, w)
grid_v = torch.linspace(0.0, h - 1, h).view(1, 1, h, 1).expand(b, 1, h, w)
ones = torch.ones_like(grid_h)
if flow is None:
pixelgrid = (
torch.cat((grid_h, grid_v, ones), dim=1).float().requires_grad_(False)
)
else:
if direction == "backward":
flow = -flow
pixelgrid = (
torch.cat(
(grid_h + flow[:, 0:1, :, :], grid_v + flow[:, 1:2, :, :], ones), dim=1
)
.float()
.requires_grad_(False)
)
return pixelgrid
# +
demo_data_dir = "../demo/demo_generator/kitti_img"
demo_res_dir = "../demo/demo_generator/results"
# result_dir = './eval/monosf_selfsup_kitti_test/'
img_idx = 139
# -
warp_x = warping_demo(img_idx, demo_data_dir, result_dir)
| models/.ipynb_checkpoints/vis-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="1C-PE8R83XdV"
# # SQuAD_Dataset
# # Learning Phrase Representations using RNN Encoder-Decoder
#
# + colab={"base_uri": "https://localhost:8080/"} id="j_cnrcYwdX-A" outputId="6cee8050-7425-410a-f0f3-c891e487b3f6"
from google.colab import drive
drive.mount('/content/drive')
# + id="ysfsMhnWfQWs"
import numpy as np
import pandas as pd
import json
import warnings
warnings.filterwarnings("ignore")
# + id="LNhEtnaZfTAk"
def squad_json_to_dataframe_train(input_file_path, record_path = ['data','paragraphs','qas','answers'],
verbose = 1):
"""
input_file_path: path to the squad json file.
record_path: path to deepest level in json file default value is
['data','paragraphs','qas','answers']
verbose: 0 to suppress it default is 1
"""
if verbose:
print("Reading the json file")
file = json.loads(open(input_file_path).read())
if verbose:
print("processing...")
# parsing different level's in the json file
js = pd.io.json.json_normalize(file , record_path )
m = pd.io.json.json_normalize(file, record_path[:-1] )
r = pd.io.json.json_normalize(file,record_path[:-2])
#combining it into single dataframe
idx = np.repeat(r['context'].values, r.qas.str.len())
ndx = np.repeat(m['id'].values,m['answers'].str.len())
m['context'] = idx
js['q_idx'] = ndx
main = pd.concat([ m[['id','question','context']].set_index('id'),js.set_index('q_idx')],1,sort=False).reset_index()
main['c_id'] = main['context'].factorize()[0]
if verbose:
print("shape of the dataframe is {}".format(main.shape))
print("Done")
return main
# + id="foz1uKoPcbX2"
def squad_json_to_dataframe_dev(input_file_path, record_path = ['data','paragraphs','qas','answers'],
verbose = 1):
"""
input_file_path: path to the squad json file.
record_path: path to deepest level in json file default value is
['data','paragraphs','qas','answers']
verbose: 0 to suppress it default is 1
"""
if verbose:
print("Reading the json file")
file = json.loads(open(input_file_path).read())
if verbose:
print("processing...")
# parsing different level's in the json file
js = pd.io.json.json_normalize(file , record_path )
m = pd.io.json.json_normalize(file, record_path[:-1] )
r = pd.io.json.json_normalize(file,record_path[:-2])
#combining it into single dataframe
idx = np.repeat(r['context'].values, r.qas.str.len())
m['context'] = idx
main = m[['id','question','context','answers']].set_index('id').reset_index()
main['c_id'] = main['context'].factorize()[0]
if verbose:
print("shape of the dataframe is {}".format(main.shape))
print("Done")
return main
# + colab={"base_uri": "https://localhost:8080/"} id="WjDvtaksZ_sy" outputId="00736867-0030-4137-e14f-364bc25f9ec0"
!7z e /content/train-v2.0.json.7z
# + [markdown] id="c125yPZO8uJK"
# First, preprocess and prepare training data
# + colab={"base_uri": "https://localhost:8080/", "height": 357} id="EGE6as9s23FW" outputId="17f6423b-89b2-45f3-a080-85838cf52635"
# training data
input_file_path = '/content/train-v2.0.json'
record_path = ['data','paragraphs','qas','answers']
train_df = squad_json_to_dataframe_train(input_file_path=input_file_path,record_path=record_path)
train_df = train_df[~train_df.text.isna()]
train_df.head()
# + [markdown] id="kQWdsDm8co_p"
# Next, preprocess and prepare validation data
# + id="uMz8avsr3-EB" colab={"base_uri": "https://localhost:8080/", "height": 374} outputId="4dc6f5d5-5dc8-41b8-ce8c-4d0a57f67be7"
# validation data
input_file_path = '/content/dev-v2.0.json'
record_path = ['data','paragraphs','qas','answers']
dev_df = squad_json_to_dataframe_dev(input_file_path=input_file_path,record_path=record_path)
dev_df.dropna(subset=['question','answers'], inplace=True)
dev_df = dev_df[~dev_df.answers.str.len().eq(0)]
dev_df.reset_index(drop=True)
dev_df['text'] = [d.get('text') for d in dev_df.answers.str[0]]
dev_df.head()
# + id="xc_xZXxJfvoS"
# Import Library
import random
import torch, torchtext
from torchtext import data
import torch.nn as nn
import torch.optim as optim
from torchtext.data import Field, BucketIterator
import spacy
import numpy as np
import math
import time
#Then set a random seed for deterministic results/reproducability.
SEED = 1234
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
# + [markdown] id="FIqiCwLi9ziA"
# Create our fields to process our data. This will append the "start of sentence" and "end of sentence" tokens as well as converting all words to lowercase.
# + id="mPLXQgHHf1jN"
SRC= data.Field(sequential = True, tokenize = 'spacy',init_token='<sos>',
eos_token='<eos>',
lower=True)
TRG = data.Field(sequential = True, tokenize = 'spacy',init_token='<sos>',
eos_token='<eos>',
lower=True)
# + [markdown] id="2oFGBpLD-SWN"
# Having defined those fields, we now need to produce a list that maps them onto the list of rows that are in the CSV:
# + id="oRu2G06Uu9NT"
fields = [('question', SRC),('text',TRG)]
# + [markdown] id="456Hn8XE-f0Y"
# Armed with our declared fields, lets convert from pandas to list to torchtext.
# + id="D-TlfW5lvDMP"
train_example = [data.Example.fromlist([train_df.question.iloc[i],train_df.text.iloc[i]], fields) for i in range(train_df.shape[0])]
dev_example = [data.Example.fromlist([dev_df.question.iloc[i],dev_df.text.iloc[i]], fields) for i in range(dev_df.shape[0])]
# + [markdown] id="sEBSbtg--kLP"
# Creating dataset
# + id="OgRNKj2FvNq0"
QnADataset_train = data.Dataset(train_example, fields)
QnADataset_dev = data.Dataset(dev_example, fields)
# + [markdown] id="00Dba1Gi-woo"
# Finally, we can split into training, testing, and validation sets by using the split() method:
# + id="Fu7F8QEr0hPI"
(train_data,valid_data) = QnADataset_train.split(split_ratio=[0.80,0.20], random_state=random.seed(SEED))
test_data = QnADataset_dev
# + colab={"base_uri": "https://localhost:8080/"} id="lPnEo75S0tt4" outputId="0c6d5bae-61e7-40c1-d0e6-53ef3de58b37"
(len(train_data), len(valid_data),len(test_data))
# + colab={"base_uri": "https://localhost:8080/"} id="HmFSDM4S07xn" outputId="d062c247-309b-4041-b0b6-135d48baaaab"
print(vars(train_data.examples[0]))
# + [markdown] id="rxX5mgk2-8VB"
# Then create our vocabulary, converting all tokens appearing less than twice into <unk> tokens.
# + id="rpDtuN2p0-K-"
SRC.build_vocab(train_data, min_freq = 2)
TRG.build_vocab(train_data, min_freq = 2)
# + [markdown] id="K8EUkcBO--z-"
# Finally, define the device and create our iterators.
# + id="_-UvpKXg1AkT"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# + id="A19rx2ur1DZ9"
BATCH_SIZE = 64
train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE,
sort_key=lambda x : len(x.question),
sort_within_batch=False,
device = device)
# + id="pMS-S2L61IHE"
class Encoder(nn.Module):
def __init__(self, input_dim, emb_dim, hid_dim, dropout):
super().__init__()
self.hid_dim = hid_dim
self.embedding = nn.Embedding(input_dim, emb_dim) #no dropout as only one layer!
self.rnn = nn.GRU(emb_dim, hid_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, src):
#src = [src len, batch size]
embedded = self.dropout(self.embedding(src))
#embedded = [src len, batch size, emb dim]
outputs, hidden = self.rnn(embedded) #no cell state!
#outputs = [src len, batch size, hid dim * n directions]
#hidden = [n layers * n directions, batch size, hid dim]
#outputs are always from the top hidden layer
return hidden
# + id="Spy0krTg1NUo"
class Decoder(nn.Module):
def __init__(self, output_dim, emb_dim, hid_dim, dropout):
super().__init__()
self.hid_dim = hid_dim
self.output_dim = output_dim
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.GRU(emb_dim + hid_dim, hid_dim)
self.fc_out = nn.Linear(emb_dim + hid_dim * 2, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, context):
#input = [batch size]
#hidden = [n layers * n directions, batch size, hid dim]
#context = [n layers * n directions, batch size, hid dim]
#n layers and n directions in the decoder will both always be 1, therefore:
#hidden = [1, batch size, hid dim]
#context = [1, batch size, hid dim]
input = input.unsqueeze(0)
#input = [1, batch size]
embedded = self.dropout(self.embedding(input))
#embedded = [1, batch size, emb dim]
emb_con = torch.cat((embedded, context), dim = 2)
#emb_con = [1, batch size, emb dim + hid dim]
output, hidden = self.rnn(emb_con, hidden)
#output = [seq len, batch size, hid dim * n directions]
#hidden = [n layers * n directions, batch size, hid dim]
#seq len, n layers and n directions will always be 1 in the decoder, therefore:
#output = [1, batch size, hid dim]
#hidden = [1, batch size, hid dim]
output = torch.cat((embedded.squeeze(0), hidden.squeeze(0), context.squeeze(0)),
dim = 1)
#output = [batch size, emb dim + hid dim * 2]
prediction = self.fc_out(output)
#prediction = [batch size, output dim]
return prediction, hidden
# + id="hYeZdiaO1O5x"
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
assert encoder.hid_dim == decoder.hid_dim, \
"Hidden dimensions of encoder and decoder must be equal!"
def forward(self, src, trg, teacher_forcing_ratio = 0.5):
#src = [src len, batch size]
#trg = [trg len, batch size]
#teacher_forcing_ratio is probability to use teacher forcing
#e.g. if teacher_forcing_ratio is 0.75 we use ground-truth inputs 75% of the time
batch_size = trg.shape[1]
trg_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
#tensor to store decoder outputs
outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device)
#last hidden state of the encoder is the context
context = self.encoder(src)
#context also used as the initial hidden state of the decoder
hidden = context
#first input to the decoder is the <sos> tokens
input = trg[0,:]
for t in range(1, trg_len):
#insert input token embedding, previous hidden state and the context state
#receive output tensor (predictions) and new hidden state
output, hidden = self.decoder(input, hidden, context)
#place predictions in a tensor holding predictions for each token
outputs[t] = output
#decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
#get the highest predicted token from our predictions
top1 = output.argmax(1)
#if teacher forcing, use actual next token as next input
#if not, use predicted token
input = trg[t] if teacher_force else top1
return outputs
# + [markdown] id="xMHSG524_WCr"
# # Training the Seq2Seq Model
#
# The rest of this session is very similar to the previous one.
#
# We initialise our encoder, decoder and seq2seq model (placing it on the GPU if we have one). As before, the embedding dimensions and the amount of dropout used can be different between the encoder and the decoder, but the hidden dimensions must remain the same.
# + id="ftEpd_4a1QmF"
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
ENC_EMB_DIM = 256
DEC_EMB_DIM = 256
HID_DIM = 512
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
enc = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, DEC_DROPOUT)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Seq2Seq(enc, dec, device).to(device)
# + [markdown] id="x8nTb43V_Zyo"
# Next, we initialize our parameters. The paper states the parameters are initialized from a normal distribution with a mean of 0 and a standard deviation of 0.01, i.e. $\mathcal{N}(0, 0.01)$.
#
# It also states we should initialize the recurrent parameters to a special initialization, however to keep things simple we'll also initialize them to $\mathcal{N}(0, 0.01)$.
# + colab={"base_uri": "https://localhost:8080/"} id="bMp_2W9S1SVR" outputId="0b3d265a-f443-4cff-e5cd-359725ddedb6"
def init_weights(m):
for name, param in m.named_parameters():
nn.init.normal_(param.data, mean=0, std=0.01)
model.apply(init_weights)
# + [markdown] id="x5_2i7Dk_fdz"
# We print out the number of parameters.
#
# Even though we only have a single layer RNN for our encoder and decoder we actually have **more** parameters than the last model. This is due to the increased size of the inputs to the GRU and the linear layer. However, it is not a significant amount of parameters and causes a minimal amount of increase in training time (~3 seconds per epoch extra).
# + colab={"base_uri": "https://localhost:8080/"} id="Enkgnath1Tt3" outputId="7f4e036e-3a9e-4349-ce89-2e65a35b471d"
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'The model has {count_parameters(model):,} trainable parameters')
# + [markdown] id="VH3WH5Lh_mPO"
# We initiaize our optimizer.
# + id="CUoyH79C1W7L"
optimizer = optim.Adam(model.parameters())
# + [markdown] id="Khbcq7R4_p-w"
# We also initialize the loss function, making sure to ignore the loss on `<pad>` tokens.
# + id="XFiHkSJC1YWm"
TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]
criterion = nn.CrossEntropyLoss(ignore_index = TRG_PAD_IDX)
# + [markdown] id="6tuZ-o3l_xKO"
# We then create the training loop...
# + id="4GeuBp3t1ZiB"
def train(model, iterator, optimizer, criterion, clip):
model.train()
epoch_loss = 0
for i, batch in enumerate(iterator):
src = batch.question
trg = batch.text
optimizer.zero_grad()
output = model(src, trg)
#trg = [trg len, batch size]
#output = [trg len, batch size, output dim]
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].view(-1)
#trg = [(trg len - 1) * batch size]
#output = [(trg len - 1) * batch size, output dim]
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
# + [markdown] id="3OMTod0P_2I9"
# ...and the evaluation loop, remembering to set the model to eval mode and turn off teaching forcing.
# + id="u3z3OEyq1byl"
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
src = batch.question
trg = batch.text
output = model(src, trg, 0) #turn off teacher forcing
#trg = [trg len, batch size]
#output = [trg len, batch size, output dim]
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].view(-1)
#trg = [(trg len - 1) * batch size]
#output = [(trg len - 1) * batch size, output dim]
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
# + [markdown] id="UjycjEXz_6h4"
# We'll also define the function that calculates how long an epoch takes.
# + id="GkH2vb6z1drm"
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
# + [markdown] id="DuTL4wYQ_8-W"
# Then, we train our model, saving the parameters that give us the best validation loss.
# + colab={"base_uri": "https://localhost:8080/"} id="fGldAX9N1e_8" outputId="cf605a02-ee5e-4146-f7f6-2cb055d84db0"
N_EPOCHS = 10
CLIP = 1
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss = train(model, train_iterator, optimizer, criterion, CLIP)
valid_loss = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'tut2-model.pt')
print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')
# + [markdown] id="yTCrvQ3-ADUe"
# Finally, we test the model on the test set using these "best" parameters.
# + colab={"base_uri": "https://localhost:8080/"} id="0wODowUY1hHo" outputId="6f132f60-b31e-44ff-a548-da09ff876b7f"
model.load_state_dict(torch.load('tut2-model.pt'))
test_loss = evaluate(model, test_iterator, criterion)
print(f'| Test Loss: {test_loss:.3f} | Test PPL: {math.exp(test_loss):7.3f} |')
# + [markdown] id="2kZXSc5SAGiG"
# Just looking at the test loss, we get better performance. This is a pretty good sign that this model architecture is doing something right! Relieving the information compression seems like the way forward
| LanguageModelling-EncoderDecoderAttention/src/SQuAD-dataset/END_NLP_Class_9_Assgn_SQuAD_Dataset - RNN_Encoder_Decoder_for_ML.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Visualizing event horizon and ergosphere of Kerr black hole
#
# ### Importing required modules
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
from einsteinpy.utils import kerr_utils, schwarzschild_radius
# ### Defining the black hole charecteristics
M = 4e30
scr = schwarzschild_radius(M * u.kg).value
# for nearly maximally rotating black hole
a1 = 0.499999*scr
# for ordinary black hole
a2 = 0.3*scr
# ### Calculating the ergosphere and event horizon for spherical coordinates
ergo1, ergo2, hori1, hori2 = list(), list(), list(), list()
thetas = np.linspace(0, np.pi, 720)
for t in thetas:
ergo1.append(kerr_utils.radius_ergosphere(M, a1, t, "Spherical"))
ergo2.append(kerr_utils.radius_ergosphere(M, a2, t, "Spherical"))
hori1.append(kerr_utils.event_horizon(M, a1, t, "Spherical"))
hori2.append(kerr_utils.event_horizon(M, a2, t, "Spherical"))
ergo1, ergo2, hori1, hori2 = np.array(ergo1), np.array(ergo2), np.array(hori1), np.array(hori2)
# ### Calculating the X, Y coordinates for plotting
Xe1, Ye1 = ergo1[:,0] * np.sin(ergo1[:,1]), ergo1[:,0] * np.cos(ergo1[:,1])
Xh1, Yh1 = hori1[:,0] * np.sin(hori1[:,1]), hori1[:,0] * np.cos(hori1[:,1])
Xe2, Ye2 = ergo2[:,0] * np.sin(ergo2[:,1]), ergo2[:,0] * np.cos(ergo2[:,1])
Xh2, Yh2 = hori2[:,0] * np.sin(hori2[:,1]), hori2[:,0] * np.cos(hori2[:,1])
# ### Plot for maximally rotating black hole
# %matplotlib inline
fig, ax = plt.subplots()
# for maximally rotating black hole
ax.fill(Xh1, Yh1, 'b', Xe1, Ye1, 'r', alpha=0.3)
ax.fill(-1*Xh1, Yh1, 'b', -1*Xe1, Ye1, 'r', alpha=0.3)
# ### Plot for rotating(normally) black hole
# %matplotlib inline
fig, ax = plt.subplots()
ax.fill(Xh2, Yh2, 'b', Xe2, Ye2, 'r', alpha=0.3)
ax.fill(-1*Xh2, Yh2, 'b', -1*Xe2, Ye2, 'r', alpha=0.3)
# - The inner body represents event horizon and outer one represents ergosphere. It can be concluded that with decrease in angular momentum, radius of event horizon increases, and that of ergosphere decreases.
| docs/source/examples/Visualizing_event_horizon_and_ergosphere_of_Kerr_black_hole.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="../images/QISKit-c.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="250 px" align="left">
# ## _*Quantum Superdense Coding*_
#
# The latest version of this notebook is available on https://github.com/QISKit/qiskit-tutorial.
#
# ***
# ### Contributor
# <NAME>
# ## Introduction
#
# Superdense coding is a protocol to transmit two classical bits of information using only one qubit. It was devised by <NAME> (IBM) and <NAME> in [1992](https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.69.2881).
# The protocol starts out with a shared entangled state between the sender (Alice) and the receiver (Bob):
# $$|\psi\rangle = \frac{1}{\sqrt{2}}(|0_A 0_B\rangle + |1_A 1_B\rangle)$$
# The first qubit, denoted by subscript $A$, belongs to Alice and the second qubit, $B$, belongs to Bob.
# Alice wants to send a two bit message to Bob, 00, 01, 10, or 11. She performs a single qubit operation on her qubit which transforms the entangled state according to which message she wants to send:
# - For a message of **00**: Alice applies $I = \begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix}$. The resultant state would be $|\psi_{00}\rangle = \frac{1}{\sqrt{2}}(|0_A 0_B\rangle + |1_A 1_B\rangle)$
# - For a message of **01**: Alice applies $X = \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}$. The resultant state would be $|\psi_{01}\rangle = \frac{1}{\sqrt{2}}(|1_A 0_B\rangle + |0_A 1_B\rangle)$
# - For a message of **10**: Alice applies $Z = \begin{bmatrix} 1 & 0 \\ 0 & -1 \end{bmatrix}$. The resultant state would be $|\psi_{10}\rangle = \frac{1}{\sqrt{2}}(|0_A 0_B\rangle - |1_A 1_B\rangle)$
# - For a message of **11**: Alice applies $XZ = \begin{bmatrix} 0 & -1 \\ 1 & 0 \end{bmatrix}$. The resultant state would be $|\psi_{11}\rangle = \frac{1}{\sqrt{2}}(- |1_A 0_B\rangle + |0_A 1_B\rangle $
# The key to superdense coding is that these four states, $|\psi_{00}\rangle, |\psi_{01}\rangle, |\psi_{10}\rangle, |\psi_{11}\rangle$ (otherwise known as the [Bell states](https://en.wikipedia.org/wiki/Bell_state)), are orthonormal and are hence distinguishable by a quantum measurement.
# ## Implementation
#
# The first step is to import the required packages to run the Quantum Experience and set up our quantum program.
# +
# Checking the version of PYTHON; we only support > 3.5
import sys
if sys.version_info < (3,5):
raise Exception('Please use Python version 3.5 or greater.')
# Importing QISKit
from qiskit import QuantumCircuit, QuantumProgram
import Qconfig
# Import basic plotting tools
from qiskit.tools.visualization import plot_histogram
# Quantum program setup
Q_program = QuantumProgram()
Q_program.set_api(Qconfig.APItoken, Qconfig.config["url"]) # set the APIToken and API url
# -
# Recall from [superposition and entanglement](superposition_and_entanglement.ipynb) and [entanglement revisited](entanglement_revisited.ipynb), the steps to make the shared entangled state $|\psi\rangle = \frac{1}{\sqrt{2}}(|0_A 0_B\rangle + |1_A 1_B\rangle)$ are:
# 1. Start with an initial state $|0_A 0_B\rangle$
# 2. Apply $H = \frac{1}{\sqrt{2}}\begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix}$ on $q_A$
# 3. Then a $CNOT = \begin{bmatrix} 1 & 0 & 0 & 0\\ 0 & 0 & 0 & 1\\0& 0& 1 & 0\\0 & 1 & 0 & 0 \end{bmatrix}$ from $q_A$ to $q_B$
#
# With $q_A = q_0$ and $q_B = q_1$, this looks like:
# +
# Creating registers
q = Q_program.create_quantum_register("q", 2)
c = Q_program.create_classical_register("c", 2)
# Quantum circuit to make the shared entangled state
superdense = Q_program.create_circuit("superdense", [q], [c])
superdense.h(q[0])
superdense.cx(q[0], q[1])
# -
# Alice now needs to decide which two bit message she wants to transmit to Bob, ($00$, $01$, $10$, or $11$), and perform the corresponding transformation ($I$, $X$, $Z$ or $XZ$ respectively) to her qubit $q_A$ ($q_0$). In this case, she encodes $11$:
# +
# For 00, do nothing
# For 01, apply $X$
#shared.x(q[0])
# For 01, apply $Z$
#shared.z(q[0])
# For 11, apply $XZ$
superdense.z(q[0])
superdense.x(q[0])
superdense.barrier()
# -
# Bob now needs to 'decode' the message that Alice sent him. Since measurement in the Quantum Experience is only possible in the standard computational basis, he does this by:
# 1. Applying a $CNOT$ from $q_A$ to $q_B$
# 2. Then a $H$ on $q_A$
# 3. And measuring $q_A$ and $q_B$
#
# Recalling that $q_A = q_0$ and $q_B = q_1$, this looks like:
superdense.cx(q[0], q[1])
superdense.h(q[0])
superdense.measure(q[0], c[0])
superdense.measure(q[1], c[1])
# Let's now create, execute the quantum circuits, and plot the results:
circuits = ["superdense"]
print(Q_program.get_qasms(circuits)[0])
# +
backend = 'ibmqx2' # the device to run on
shots = 1024 # the number of shots in the experiment
result = Q_program.execute(circuits, backend=backend, shots=shots, max_credits=3, wait=10, timeout=240)
# -
plot_histogram(result.get_counts("superdense"))
# The state with the highest probability should match the message that Alice encoded earlier ($11$). Mathematically:
# - For a message of **00**: Bob received $|\psi_{00}\rangle = \frac{1}{\sqrt{2}}(|0_A 0_B\rangle + |1_A 1_B\rangle)$. Applying $CNOT$ gives: $\frac{1}{\sqrt{2}}(|0_A 0_B\rangle + |1_A 0_B\rangle)$. Applying $H$ on $q_A$ results in: $\frac{1}{\sqrt{2}}\frac{1}{\sqrt{2}}[(|0_A\rangle + |1_A\rangle)|0_B\rangle + (|0_A\rangle - |1_A\rangle)|0_B\rangle] = |0_A 0_B\rangle$
# - For a message of **01**: Bob received $|\psi_{01}\rangle = \frac{1}{\sqrt{2}}(|1_A 0_B\rangle + |0_A 1_B\rangle)$. Applying $CNOT$ gives: $\frac{1}{\sqrt{2}}(|0_A 1_B\rangle)+ |1_A 1_B\rangle$. Applying $H$ on $q_A$ results in: $\frac{1}{\sqrt{2}}\frac{1}{\sqrt{2}}[(|0_A\rangle + |1_A\rangle)|1_B\rangle + (|0_A\rangle - |1_A\rangle)|1_B\rangle + (|0_A\rangle + |1_A\rangle)|1_B\rangle] = |0_A 1_B\rangle$
# - For a message of **10**: Bob received $|\psi_{10}\rangle = \frac{1}{\sqrt{2}}(|0_A 0_B\rangle - |1_A 1_B\rangle)$. Applying $CNOT$ gives: $\frac{1}{\sqrt{2}}(|0_A 0_B\rangle - |1_A 0_B\rangle)$. Applying $H$ on $q_A$ results in: $\frac{1}{\sqrt{2}}\frac{1}{\sqrt{2}}[(|0_A\rangle + |1_A\rangle)|0_B\rangle - (|0_A\rangle - |1_A\rangle)|0_B\rangle] = |1_A 0_B\rangle$
# - For a message of **11**: Bob received $|\psi_{11}\rangle = \frac{1}{\sqrt{2}}(- |1_A 0_B\rangle + |0_A 1_B\rangle $. Applying $CNOT$ gives: $\frac{1}{\sqrt{2}}(|0_A 1_B\rangle - |1_A 1_B\rangle)$. Applying $H$ on $q_A$ results in: $\frac{1}{\sqrt{2}}\frac{1}{\sqrt{2}}[(|0_A\rangle + |1_A\rangle)|1_B\rangle - (|0_A\rangle - |1_A\rangle)|1_B\rangle] = |1_A 1_B\rangle$
# %run "../version.ipynb"
| 2_quantum_information/superdense_coding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn import datasets
from IPython.display import display,HTML
from pprint import pprint
iris = datasets.load_iris()
# +
pprint(iris.DESCR)
# -
display(iris.feature_names)
display(iris.target_names)
print(datasets.load_boston())
boston = datasets.load_boston()
boston.feature_names
| pycampExer/iris_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="AuxeC8xuDlPY" colab_type="text"
# * What happens when characters in Series?
# * Loc and colon
# * iLoc and colon
# * Describe in string elements
# * inplace error
# + [markdown] id="UZv9SgPaGnVO" colab_type="text"
# # PANDAS
#
# **INTRODUCTION
# Pandas is an open-source, Python library which provides easy-to-use data structures for the data analysis.**
# **Pandas is great for data manipulation, data analysis, and data visualization.**
#
# #### WHY PANDAS?
#
# 1. We can easily read and write from and to CSV files, or even databases
# # + Easy handling of missing data (represented as NaN) in floating point as well as non-floating point data
# # + We can manipulate the data by columns,.Columns can be inserted and deleted from DataFrame and higher dimensional objects
# # + Intuitive merging and joining data sets
# 5. Intelligent label-based slicing, fancy indexing, and subsetting of large data sets
# 6. Robust IO tools for loading data from flat files (CSV and delimited), Excel files, databases, and saving/loading data.
#
#
# + id="61GtmTpbGnVU" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
import pandas as pd
import numpy as np
# + [markdown] id="Tp6NgDszGnVi" colab_type="text"
# # Series
# A series is a 1-D data structure. It is basically a labelled array that can hold different data types:
# * int
# * float
# * String
# * Python object
# * many more
#
# The data is aligned in a row fashon.
# + [markdown] id="hj-n1FcfGnVk" colab_type="text"
# ## Creating the series from the random dataset_for_series
# + id="Dg8VHDtDGnVm" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} outputId="35cc33a5-4161-4519-975a-02a81978c2cb" executionInfo={"status": "ok", "timestamp": 1526067440509, "user_tz": -345, "elapsed": 804, "user": {"displayName": "AIDevNepal", "photoUrl": "//lh3.googleusercontent.com/-MNU86A08wKg/AAAAAAAAAAI/AAAAAAAAARg/nUuKU3ik7XU/s50-c-k-no/photo.jpg", "userId": "103910155252873736634"}}
np.random.seed(10)
data_for_series = np.random.randint(0, 100, size=(10))
print(data_for_series)
# + id="qdsGD6ZmGnV4" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
s = pd.Series(data_for_series)
# + id="LQe1ljrQGnV-" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 208} outputId="38f8bdb1-c90a-4471-9779-b4408a1f587a" executionInfo={"status": "ok", "timestamp": 1526067443090, "user_tz": -345, "elapsed": 1058, "user": {"displayName": "AIDevNepal", "photoUrl": "//lh3.googleusercontent.com/-MNU86A08wKg/AAAAAAAAAAI/AAAAAAAAARg/nUuKU3ik7XU/s50-c-k-no/photo.jpg", "userId": "103910155252873736634"}}
print(s)
# + [markdown] id="7EkpvBmnGnWG" colab_type="text"
# We can get the information
# + [markdown] id="9GEtxPuCDlQs" colab_type="text"
# ## Using Label (String Index)
# + id="JI1qOAu9GnXm" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
index_for_series = 'A B C D E F G H I J'.split()
# + id="FilBhf6MGnXq" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} outputId="bf094d65-36f1-4719-a0ba-e25fe4ad1016" executionInfo={"status": "ok", "timestamp": 1526067445098, "user_tz": -345, "elapsed": 808, "user": {"displayName": "AIDevNepal", "photoUrl": "//lh3.googleusercontent.com/-MNU86A08wKg/AAAAAAAAAAI/AAAAAAAAARg/nUuKU3ik7XU/s50-c-k-no/photo.jpg", "userId": "103910155252873736634"}}
print(index_for_series)
# + id="CghIXzNaGnX2" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
char_series = pd.Series(data_for_series, index=index_for_series)
# + id="vuCnd5vhGnYE" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 208} outputId="1ec1ea32-5892-40d8-ae9f-02c27e22ef21" executionInfo={"status": "ok", "timestamp": 1526067447815, "user_tz": -345, "elapsed": 844, "user": {"displayName": "AIDevNepal", "photoUrl": "//lh3.googleusercontent.com/-MNU86A08wKg/AAAAAAAAAAI/AAAAAAAAARg/nUuKU3ik7XU/s50-c-k-no/photo.jpg", "userId": "103910155252873736634"}}
print(char_series)
# + [markdown] id="YnVuQfdDGnaI" colab_type="text"
# ## DATAFRAME
#
# A Data frame is a two-dimensional data structure, i.e., data is aligned in a tabular fashion in rows and columns. A pandas DataFrame can be created using the following constructor −
#
# ```python
# pandas.DataFrame(data, index, columns)
# ```
# + [markdown] id="LkdU2xqkGnaK" colab_type="text"
# ## Creating random data
# + id="159rCdo3GnaO" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
np.random.seed(3)
data = np.random.randint(0, 10, (5, 4)) # Ranging from 0-10 with 5*4 matrix
# + id="IlAICJYUGnaU" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 104} outputId="2ab3c318-233d-4c51-b998-73652a14e4fe" executionInfo={"status": "ok", "timestamp": 1526067449984, "user_tz": -345, "elapsed": 910, "user": {"displayName": "AIDevNepal", "photoUrl": "//lh3.googleusercontent.com/-MNU86A08wKg/AAAAAAAAAAI/AAAAAAAAARg/nUuKU3ik7XU/s50-c-k-no/photo.jpg", "userId": "103910155252873736634"}}
print(data)
# + id="AMCdzM4jDlSQ" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 202} outputId="2c7588e0-9d2f-485c-d54a-e26b546ac1c1" executionInfo={"status": "ok", "timestamp": 1526067451070, "user_tz": -345, "elapsed": 920, "user": {"displayName": "AIDevNepal", "photoUrl": "//lh3.googleusercontent.com/-MNU86A08wKg/AAAAAAAAAAI/AAAAAAAAARg/nUuKU3ik7XU/s50-c-k-no/photo.jpg", "userId": "103910155252873736634"}}
pd.DataFrame(data)
# + id="ibVihYjLGnag" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 202} outputId="9f1bff56-e4aa-4feb-b7aa-ef63e06c1111" executionInfo={"status": "ok", "timestamp": 1526067452184, "user_tz": -345, "elapsed": 847, "user": {"displayName": "AIDevNepal", "photoUrl": "//lh3.googleusercontent.com/-MNU86A08wKg/AAAAAAAAAAI/AAAAAAAAARg/nUuKU3ik7XU/s50-c-k-no/photo.jpg", "userId": "103910155252873736634"}}
my_index = 'P Q R S T'.split()
my_columns = 'A B C D'.split()
df = pd.DataFrame(data, index=my_index, columns=my_columns)
df
# + [markdown] id="5bQD27dIGnak" colab_type="text"
# ## Checking the information
# + id="QLOtsWhdGnao" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 314} outputId="f3ef2b0f-a20c-4b32-9f8f-3b9889c67e7a" executionInfo={"status": "ok", "timestamp": 1526067453678, "user_tz": -345, "elapsed": 850, "user": {"displayName": "AIDevNepal", "photoUrl": "//lh3.googleusercontent.com/-MNU86A08wKg/AAAAAAAAAAI/AAAAAAAAARg/nUuKU3ik7XU/s50-c-k-no/photo.jpg", "userId": "103910155252873736634"}}
df.describe()
# + [markdown] id="kZjInil6DlTS" colab_type="text"
# ## Selecting elements using Label
# Selecting a single row or column returns a Series while selecting multilple gives a DataFrame.
# First parameter is the row and select parameter is the column and is optional.
# + id="ZckScqYTDlTU" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 222} outputId="f8adf3c7-f798-4d66-f16c-4f7f4b7be2b5" executionInfo={"status": "ok", "timestamp": 1526067454646, "user_tz": -345, "elapsed": 622, "user": {"displayName": "AIDevNepal", "photoUrl": "//lh3.googleusercontent.com/-MNU86A08wKg/AAAAAAAAAAI/AAAAAAAAARg/nUuKU3ik7XU/s50-c-k-no/photo.jpg", "userId": "103910155252873736634"}}
df
# + [markdown] id="FE0Tfed7DlTm" colab_type="text"
# The column names become the labels for the output.
# + id="3fI5Q-flDlTo" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 130} outputId="8bb5ef9f-0e1a-4ad5-c735-8919e78bf902" executionInfo={"status": "ok", "timestamp": 1526067455778, "user_tz": -345, "elapsed": 997, "user": {"displayName": "AIDevNepal", "photoUrl": "//lh3.googleusercontent.com/-MNU86A08wKg/AAAAAAAAAAI/AAAAAAAAARg/nUuKU3ik7XU/s50-c-k-no/photo.jpg", "userId": "103910155252873736634"}}
df.loc[['Q', 'S']]
# + id="VRmLCXUIGna2" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 130} outputId="a7f36951-0f6f-4275-ab1f-7eab0d6b00cc" executionInfo={"status": "ok", "timestamp": 1526067456911, "user_tz": -345, "elapsed": 988, "user": {"displayName": "AIDevNepal", "photoUrl": "//lh3.googleusercontent.com/-MNU86A08wKg/AAAAAAAAAAI/AAAAAAAAARg/nUuKU3ik7XU/s50-c-k-no/photo.jpg", "userId": "103910155252873736634"}}
df.iloc[[0, 3]]
# + [markdown] id="pS4dW65qDlUU" colab_type="text"
# We can take column using the second parameter.
# *The colon **:** implies all of the elements for that dimension.*
# + id="wRHlv2PcGna-" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 222} outputId="12d2989a-0b05-41ce-dbc4-f05c3cd0c84c" executionInfo={"status": "ok", "timestamp": 1526067457970, "user_tz": -345, "elapsed": 913, "user": {"displayName": "AIDevNepal", "photoUrl": "//lh3.googleusercontent.com/-MNU86A08wKg/AAAAAAAAAAI/AAAAAAAAARg/nUuKU3ik7XU/s50-c-k-no/photo.jpg", "userId": "103910155252873736634"}}
df.iloc[:, 1:3]
# + id="7-8PFrvGDlUs" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 130} outputId="07189ca5-b1ec-43fa-eb9c-13d4a43afa4e" executionInfo={"status": "ok", "timestamp": 1526067458991, "user_tz": -345, "elapsed": 829, "user": {"displayName": "AIDevNepal", "photoUrl": "//lh3.googleusercontent.com/-MNU86A08wKg/AAAAAAAAAAI/AAAAAAAAARg/nUuKU3ik7XU/s50-c-k-no/photo.jpg", "userId": "103910155252873736634"}}
df.loc[["P", "T"], ["A", "C", "D"]]
# + [markdown] id="HDo1ztweDlU0" colab_type="text"
# # Titanic
# + [markdown] id="79ci1yx4DlU2" colab_type="text"
# ## Load CSV File
# + id="ENRvpINVDlU4" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 885} outputId="eeb9be29-2a81-49c9-d03e-cc291587da2a" executionInfo={"status": "error", "timestamp": 1526067460009, "user_tz": -345, "elapsed": 847, "user": {"displayName": "AIDevNepal", "photoUrl": "//lh3.googleusercontent.com/-MNU86A08wKg/AAAAAAAAAAI/AAAAAAAAARg/nUuKU3ik7XU/s50-c-k-no/photo.jpg", "userId": "103910155252873736634"}}
train_df = pd.read_csv("Titanic_train.csv")
# + id="341fMIEoDlU-" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="45a6f526-f4a6-45d7-c5b5-5ca759d18298"
train_df.head()
# + id="tAUocnGIDlVM" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="611cad40-7bb0-4da3-b644-4b16090ece63"
train_df.info()
# + [markdown] id="nzdxdRMpDlVa" colab_type="text"
# ## Drop
# + [markdown] id="Vm6CUlh-DlVc" colab_type="text"
# * Cabin data is sparse so remove it.
# * Name is complex to use, so remove that column.
# * Ticket is complex to use, so remove that column.
# + id="e2jt7YKZDlVe" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="fcf43374-7771-4d3b-d275-d1e0291daae3"
train_df.drop("Cabin", axis=1)
# + id="1yYizFmvDlVw" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="8f259a30-8612-4016-bb5f-61f01264345b"
train_df.head()
# + [markdown] id="GIG3M-v0DlV8" colab_type="text"
# <br>
# <br>
# To drop in the Original dataframe, use **inplace**.
# + id="GL3uKOJMDlV8" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
train_df.drop("Name", axis=1, inplace=True)
# + id="QP4fey7yDlWA" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="9aac4419-885a-4213-d86f-94832c06c70d"
train_df.head()
# + [markdown] id="RVDaNOOsDlWI" colab_type="text"
# <br>
# <br>
# To remove multiple columns, use **list** parameter.
# + id="pAICx3BgDlWI" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
train_df.drop(["Ticket", "Cabin"], axis=1, inplace=True)
# + id="z0rvSpvlDlWM" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="843ccc87-74c8-41d1-adbc-2302df075d22"
train_df.head()
# + id="9N7_PX-WDlWS" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="bfa04830-329c-49b8-83b4-8f2b8171e8d8"
train_df.info()
# + [markdown] id="SrRuU7XlDlWa" colab_type="text"
# * Age has null data
# * Embarked has null data
# + [markdown] id="9_q_FuJXDlWa" colab_type="text"
# ## Empty data
# + id="U-b1XcrmDlWc" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
null_embarked = train_df["Embarked"].isnull()
# + id="vAUfm97NDlWu" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="4f13429c-e59b-41cc-e049-d770a083e799"
null_embarked.head()
# + id="Ri4Dqx3oDlW4" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="8bd1837c-809a-44ff-840a-2d3165e8a254"
train_df[null_embarked]
# + id="DJLcY4OPDlXC" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="bf8dd1f2-510b-45d6-ed12-c9b7748388f1"
train_df["Embarked"].describe()
# + [markdown] id="2NGA5nmEDlXM" colab_type="text"
# ### Filling empty Embarked with Mode
# + id="5V3SKj1-DlXO" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
train_df["Embarked"].fillna("S", inplace=True)
# + [markdown] id="NWf1NF7MDlXQ" colab_type="text"
# No Empty Embarked column
# + id="XSOCrPUCDlXS" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="1163f659-ffdc-41b5-9fe2-0bf596ff839b"
new_null_embarked = train_df["Embarked"].isnull()
train_df[new_null_embarked]
# + [markdown] id="zOnPi6HIDlXW" colab_type="text"
# ### Filling empty Age with mean of that column
# + id="5gGSrrRVDlXY" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="53fe8f8d-455b-41e9-b9b3-edd46a9c868a"
age_null = train_df["Age"].isnull()
train_df[age_null]
# + id="EpHJzacPDlXc" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
age_mean = train_df["Age"].mean()
# + id="Zgh73KVgDlXe" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="609e71fa-043a-47e7-ec6a-1c433ba55bbc"
age_mean
# + id="nW7HuesdDlXi" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
train_df["Age"].fillna(age_mean, inplace=True)
# + [markdown] id="OJM3cMNCDlXk" colab_type="text"
# No empty Age value afterwards
# + id="7bYquHAvDlXk" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="a7c3179c-3659-4538-93f1-8be2afa41b8a"
new_age_null = train_df["Age"].isnull()
train_df[new_age_null]
# + [markdown] id="Tx41uov6DlXo" colab_type="text"
# ## Data Mapping
# Mapping Characters to numeric values because Machine learning models only understand numbers.
# + id="0NdClxs1DlXq" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
embark_maps = {
"C" : 1,
"S" : 2,
"Q" : 3
}
# + id="vshS3_wmDlXu" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
train_df["Embarked"] = train_df["Embarked"].map(embark_maps)
# + id="sspwq19mDlX4" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="00aa766d-f47e-4906-c8df-caa23a66390c"
train_df.head()
# + id="l_5BvTqPDlYA" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
gender_maps = {
"male" : 1,
"female" : 2
}
# + id="jlvQtvLrDlYI" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
train_df["Sex"] = train_df["Sex"].map(gender_maps)
# + id="F1-_oWq7DlYO" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="8e554193-372d-4f20-80e0-212363bad016"
train_df.head()
# + id="KMCgcm-xDlYU" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="917614e0-570f-4822-d778-a7c9fcce454b"
train_df.info()
# + [markdown] id="gUwr8gvwDlYa" colab_type="text"
# # Save to CSV File
# + id="sWbZBYYcDlYa" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
train_df.to_csv("Titanic_train_numeric.csv")
# + [markdown] id="7VSqA9VeDlYg" colab_type="text"
# # Correlation Plot
# + id="Le0weR3fDlYg" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
import seaborn as sns
import matplotlib.pyplot as plt
# + [markdown] id="zgVU9m61DlYk" colab_type="text"
# No need to explain this code. Just for visualization.
# + id="4z2M-bu6DlYm" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} outputId="cac8b372-c647-4f85-d4da-cd7f6c1869ec"
# Setting the sizes of the correlatioin figure map
_, ax = plt.subplots(figsize=(10,8))
# Colormap : Colors to denote correlation values
cmap = sns.diverging_palette(250 , 10, s=68, l=40, as_cmap = True )
# Plotting heatmap using seaborn
sns.heatmap(train_df.corr(), cmap=cmap, vmin=-1, vmax=1, annot=True)
# + id="NhBLST6nDlYy" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
# + id="DcBAkrr1tS5f" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
# + id="Z1m1neBOtSBs" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
# + id="wIVbZvPLtSof" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
| AIDevNepal/Pandas-2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Additional Usage with psycopg2
#
# ### Imports
# ```
# import psycopg2
# import age
# ```
# ### Connect to PostgreSQL(with AGE extention)
# +
import psycopg2
import age
# Connect to PostgresDB with AGE
conn = psycopg2.connect(host="172.17.0.3", port="5432", dbname="postgres", user="postgres", password="<PASSWORD>")
cursor = conn.cursor()
# load AGE extention
cursor.execute("LOAD 'age';")
cursor.execute("SET search_path = ag_catalog, '$user', public;")
# Check if age_graph named 'testGraph' exists
cursor.execute("SELECT count(*) FROM ag_graph WHERE name=%s", ('testGraph',))
if cursor.fetchone()[0] == 0 :
# if age_graph named 'testGraph', create that
cursor.execute("SELECT create_graph(%s);", ('testGraph',))
# -
# ### Create & Query Vertices
# +
# Create Vertices
age.execCypher(cursor, "testGraph", "CREATE (n:Person {name: 'Joe'})")
age.execCypher(cursor, "testGraph", "CREATE (n:Person {name: 'Smith'})")
age.execCypher(cursor, "testGraph", "CREATE (n:Person {name: %s})", ('Jack',))
age.execCypher(cursor, "testGraph", "CREATE (n:Person {name: 'Andy', title: 'Developer'})")
age.execCypher(cursor, "testGraph", "CREATE (n:Person {name: %s, title: %s})", ('Tom','Developer',))
conn.commit()
# Query Vertices
age.queryCypher(cursor, "testGraph", "MATCH (n:Person) RETURN n")
# parse each row in cursor
print("---[Parse & print vertices row by row]---")
for row in age.getRows(cursor):
print(row.id, row.label, row.properties)
# Query Vertices with parameters
age.queryCypher(cursor, "testGraph", "MATCH (n:Person {name:%s}) RETURN n", ("Andy",))
# Parse full data in cursor
print("---[Parse all vertices in result set]---")
graph = age.buildGraph(cursor)
print("Count of queried vertices:", graph.size())
print("---[Print vertices in Graph object]---")
for row in graph:
print(row.id, row.label, row.properties)
# -
# ### Create & Query Edges
# +
# Create Edges
age.execCypher(cursor, "testGraph", "MATCH (a:Person), (b:Person) WHERE a.name = 'Joe' AND b.name = 'Smith' CREATE (a)-[r:workWith {weight: 3}]->(b)")
age.execCypher(cursor, "testGraph", "MATCH (a:Person), (b:Person) WHERE a.name = 'Andy' AND b.name = 'Tom' CREATE (a)-[r:workWith {weight: 1}]->(b)")
age.execCypher(cursor, "testGraph", "MATCH (a:Person {name: 'Jack'}), (b:Person {name: 'Andy'}) CREATE (a)-[r:workWith {weight: 5}]->(b)")
age.execCypher(cursor, "testGraph", "MATCH (a:Person {name: 'Joe'}), (b:Person {name: 'Jack'}) CREATE (a)-[r:workWith {weight: 5}]->(b)")
conn.commit()
print("-- [Query paths] --------")
age.queryCypher(cursor, "testGraph", "MATCH p=()-[:workWith]-() RETURN p")
print("---[Parse & print paths row by row]---")
for path in age.getRows(cursor):
print(path.start["name"], '-[',path.rel.label, path.rel.properties, ']-', path.end["name"])
# -
# ## Query Scalar or properties value
# +
# Query scalar value
print("-- Query scalar value --------------------")
age.queryCypher(cursor, "testGraph", "MATCH (n:Person) RETURN id(n)")
value = age.getSingle(cursor)
print(value)
# Query properties
print("-- Query properties --------------------")
age.queryCypher(cursor, "testGraph", "MATCH (n:Person) RETURN properties(n)")
value = age.getSingle(cursor)
print(value)
# Query properties value
print("-- Query properties value --------------------")
age.queryCypher(cursor, "testGraph", "MATCH (n:Person {name: 'Andy'}) RETURN n.title")
value = age.getSingle(cursor)
print(value)
print("-- Query path count --------")
age.queryCypher(cursor, "testGraph", "MATCH p=(:Person {name: 'Andy'})-[:workWith]-() RETURN count(p)")
graph = age.buildGraph(cursor)
print(graph[0])
# -
# ## Close connection
# +
# Clear test data
age.queryCypher(cursor, "testGraph", "MATCH (n:Person) DETACH DELETE n RETURN *")
conn.commit()
conn.close()
# -
| samples/apache-age-additional.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Solutions to Agave OAuth Exercises
#
# +
# import the requests library
import requests
# import getpass to prompt for a password
from getpass import getpass
# -
# the base URL for interacting with the Agave API
base_url = 'https://api.tacc.utexas.edu'
# Set up your TACC credentials. Modify the username appropriately
username = 'jstubbs'
password = getpass(prompt='Hello {}. Please enter your TACC password: '.format(username))
rsp = requests.get(url='{}/clients/v2'.format(base_url), auth=(username, password))
rsp.status_code
# the clients service, like all Agave services, returns us JSON:
rsp.json()
# To create a simple OAuth client that has access to all basic Agave APIs, we need to make a POST request to the clients service. The only required field we need to pass in is `clientName` to give a name to our client. Each client we create must have a unique name.
# +
# Pick a name for your client; this name will have to be different every time you run this cell. Otherwise, you
# will try to recreate a client with the same name and you will get an error.
client_name = 'cic_institute'
# make a POST request to the client's service, passing only that field.
# Note that the parameter name uses camel case
data = {'clientName': client_name}
rsp = requests.post(url='{}/clients/v2'.format(base_url), data=data, auth=(username, password))
rsp.status_code
# -
rsp.json()
key = rsp.json()['result']['consumerKey']
secret = rsp.json()['result']['consumerSecret']
# POST payload for generating a token using the password grant:
# scope will always be PRODUCTION.
data = {'username': username,
'password': password,
'grant_type': 'password',
'scope': 'PRODUCTION'}
# note that authentication is technically HTTPBasicAuth with the OAuth client key and secret
rsp = requests.post('{}/token'.format(base_url), data=data, auth=(key, secret))
rsp.status_code
# check the response message:
rsp.json()
# pull out the access and refresh tokens
access_token = rsp.json()['access_token']
refresh_token = rsp.json()['refresh_token']
# +
# build the Authorization header in a headers dictionary
headers = {'Authorization': 'Bearer {}'.format(access_token)}
# make a request to the profiles service; the "me" endpoint is a special reserved word in Agave to indicate
# we want information about the associated token.
rsp = requests.get(url='{}/profiles/v2/me'.format(base_url), headers=headers)
rsp.status_code
# -
# check the json response
rsp.json()
# Indeed, the profile belongs to me. We are now ready to interact with Agave's cloud storage.
| docs/day2/notebooks/Solutions_Agave_OAuth.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Computing mass functions, halo biases and concentrations
# This notebook illustrates how to compute mass functions, halo biases and concentration-mass relations with CCL, as well as how to translate between different mass definitions.
import numpy as np
import pylab as plt
import pyccl as ccl
# %matplotlib inline
# ## Preliminaries
# Generate a cosmology object and a few mass/redshift arrays
# +
# Cosmology
cosmo = ccl.Cosmology(Omega_c=0.27, Omega_b=0.045,
h=0.67, A_s=2.1e-9, n_s=0.96)
# Array of masses
m_arr = np.geomspace(1E10,1E15,128)
# Array of redshifts
z_arr = np.linspace(0.,1.,16)
# -
# ## Mass definitions
# CCL admits 3 different classes of definitions:
# - Spherical overdensity (SO). The mass is defined as that enclosed by a radius within which the mean density is a factor $\Delta$ larger than the matter or critical density ($x$ is either $M$ or ${\rm crit}$:
#
# \begin{equation}
# M_{\Delta,x} = \frac{4\pi}{3}\Delta\rho_x R_{\Delta,x}^3
# \end{equation},
# - Virial spherical overdensity. The same as SO for the specific choice $\Delta=\Delta_{\rm vir}(z)$ and $x={\rm critical}$, where $\Delta_{\rm vir}$ is the virial overdensity, which CCL computes from Brian & Norman 1998.
# - Friends-of-friends masses (fof).
#
# If you can attach a concentration-mass relation to a given SO mass definition, CCL is then able to translate masses according to that definition into any other SO definition assuming an NFW profile. This is only an approximation, and it's actually better to make sure you use consistent mass definitions throughout, but this functionality is provided for convenience.
#
# These mass definition objects can then be passed around to all halo-model functions to make sure masses are treated consistently.
#
#
# +
# Delta=200 (matter).
# This one has an associated concentration-mass relation,
# so we can convert to other SO mass definitions
hmd_200m = ccl.halos.MassDef200m()
# Delta=200 (critical).
# This one has an associated concentration-mass relation,
# so we can convert to other SO mass definitions
hmd_200c = ccl.halos.MassDef200c()
# You can also change the c(M) relation as follows:
hmd_200c_b = ccl.halos.MassDef200c(c_m='Bhattacharya13')
# Delta=500 (matter).
# This one does not have a c(M) relation.
hmd_500m = ccl.halos.MassDef(500, 'matter')
# Virial overdensity
hmd_vir = ccl.halos.MassDef('vir', 'critical')
# FoF mass definition
hmd_fof = ccl.halos.MassDef('fof', 'matter')
# -
# Note that associating concentration-mass relations with mass definitions is only necessary if you'll want to translate between different mass definitions. Otherwise, you can use any concentration-mass relation you want for a given mass definition as we show further down (even if that c(M) relation is not the one you used to initialize the corresponding mass definition object).
# ## Mass functions
# Mass functions are computed through classes that inherit from the `MassFunc` class. CCL supports a wide variety of mass function parametrizations, but more can be created following the instructions in the documentation.
#
# All mass functions have a mass definition attached to them. Some mass functions support a range of mass definitions, and you can select which one you want when instantiating the class. All mass functions have default mass definitions, which are used if `None` is passed (which is the case below).
# +
hmfs = []
# Press & Schechter mass function
hmfs.append(ccl.halos.MassFuncPress74(cosmo))
# Sheth & Tormen mass function
hmfs.append(ccl.halos.MassFuncSheth99(cosmo))
# Tinker 2008 mass function
hmfs.append(ccl.halos.MassFuncTinker08(cosmo))
# Tinker 2010 mass function
hmfs.append(ccl.halos.MassFuncTinker10(cosmo))
# Bocquet 2016 mass function
hmfs.append(ccl.halos.MassFuncBocquet16(cosmo))
# Let's plot all of them at z=0
plt.figure()
for mf in hmfs:
nm = mf.get_mass_function(cosmo, m_arr, 1.)
plt.plot(m_arr,
m_arr * nm, label=mf.name)
plt.xscale('log')
plt.ylim([1E9,8.5E9])
plt.legend()
plt.xlabel(r'$M/M_\odot$', fontsize=14)
plt.ylabel(r'$M\,\frac{dn}{d\log_{10}M}\,[M_\odot\,{\rm Mpc}^{-3}]$',
fontsize=14);
# -
# Let's explore the time evolution of the mass function
# +
# Look at time evolution
from matplotlib.pyplot import cm
hmf_200m = ccl.halos.MassFuncTinker08(cosmo, mass_def=hmd_200m)
plt.figure()
plt.title(r'$0<z<1$',fontsize=14)
for z in z_arr:
nm = hmf_200m.get_mass_function(cosmo, m_arr, 1./(1+z))
plt.plot(m_arr,
m_arr * nm, c=cm.autumn(z))
plt.xscale('log')
plt.ylim([5E8,7E9])
plt.xlabel(r'$M/M_\odot$',fontsize=14)
plt.ylabel(r'$M\,\frac{dn}{d\log_{10}M}\,[M_\odot\,{\rm Mpc}^{-3}]$',
fontsize=14);
# -
# ## Halo bias
# Similar comments apply to the different halo bias parametrizations supported by CCL.
# +
hbfs = []
# Sheth & Tormen 1999
hbfs.append(ccl.halos.HaloBiasSheth99(cosmo))
# Sheth & Tormen 2001
hbfs.append(ccl.halos.HaloBiasSheth01(cosmo))
# Bhattacharya 2011
hbfs.append(ccl.halos.HaloBiasBhattacharya11(cosmo))
# Tinker 2010
hbfs.append(ccl.halos.HaloBiasTinker10(cosmo))
# Let's plot all of them at z=0
plt.figure()
for bf in hbfs:
bm = bf.get_halo_bias(cosmo, m_arr, 1.)
plt.plot(m_arr, bm, label=bf.name)
plt.xscale('log')
plt.legend()
plt.xlabel(r'$M/M_\odot$', fontsize=14)
plt.ylabel(r'$b_h(M)$', fontsize=14);
# -
# ## Concentration-mass relation
# Concentration-mass relations work in a similar way
# +
cmrs = []
# Diemer 2015
cmrs.append(ccl.halos.ConcentrationDiemer15())
# Bhattacharya 2013
cmrs.append(ccl.halos.ConcentrationBhattacharya13())
# Prada 2012
cmrs.append(ccl.halos.ConcentrationPrada12())
# Klypin 2011
cmrs.append(ccl.halos.ConcentrationKlypin11())
# Duffy 2008
cmrs.append(ccl.halos.ConcentrationDuffy08())
# Let's plot all of them at z=0
plt.figure()
for cmr in cmrs:
cm = cmr.get_concentration(cosmo, m_arr, 1.)
plt.plot(m_arr, cm, label=cmr.name)
plt.xscale('log')
plt.legend()
plt.xlabel(r'$M/M_\odot$', fontsize=14)
plt.ylabel(r'$c(M)$', fontsize=14);
# -
# ## Convenience functions
# It is possible to select mass functions, halo biases and concentration-mass relation from their name as follows
nm = ccl.halos.mass_function_from_name('Tinker08')
bm = ccl.halos.halo_bias_from_name('Tinker10')
cm = ccl.halos.concentration_from_name('Duffy08')
print(nm)
print(bm)
print(cm)
# ## Mass conversion
# The lines below show how to convert between different mass definitions (and the consequences of doing so). First, we generate mass function objects for $\Delta=200$ and $500$. Then, we compute the mass function using both parametrizations, but for masses defined using $\Delta=200$ (the $\Delta=500$ mass function will use the concentration-mass relation to translate masses from $\Delta=200$ to $\Delta=500$ automatically). As you can see, doing so incurrs a systematic error of 5-20%.
# +
# Let's define a mass function object for Delta = 500 (matter)
hmf_500m = ccl.halos.MassFuncTinker08(cosmo, mass_def=hmd_500m)
# Now let's compare the mass function parametrized for 200 (matter)
# with the mass function parametrized for 500 (matter) but
# translated to 200 (matter)
nm = hmf_200m.get_mass_function(cosmo, m_arr, 1.,
mdef_other = hmd_200m)
nm_trans = hmf_500m.get_mass_function(cosmo, m_arr, 1.,
mdef_other = hmd_200m)
plt.figure()
plt.plot(m_arr,nm_trans/nm-1)
plt.xscale('log')
plt.xlabel(r'$M/M_\odot$',fontsize=14)
plt.ylabel('Error from mass translation$',
fontsize=14);
# -
| Halo-mass-function-example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:deeplearning]
# language: python
# name: conda-env-deeplearning-py
# ---
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
import pandas as pd
# ## Load and plot the data
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)
df.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'class']
df.head()
# +
y = df.iloc[0:100, 4].values
# If it is not Setosa then it is Versicolor
y = np.where(y == 'Iris-setosa', 0, 1)
# Features (sepal length and petal length)
x = df.iloc[0:100, [0,2]].values
# -
plt.scatter(x[:,0], x[:,1], c=y)
plt.xlabel('sepal length [cm]')
plt.ylabel('petal length [cm]')
nr_classes = len(set(y))
input_size = x.shape[1]
print('Input size: {}, nr_classes:{}'.format(input_size, nr_classes))
# ## Define Model
inputs = keras.Input(shape=input_size)
#hidden_layer = keras.layers.Dense(3, activation="softmax")(inputs)
output_layer = keras.layers.Dense(1, activation="softmax")(inputs)#(hidden_layer)
model = keras.Model(inputs=inputs, outputs=output_layer)
model.summary()
# ## Train model
model.compile(optimizer=keras.optimizers.SGD(learning_rate=0.0001), loss=keras.losses.MeanSquaredError())
history = model.fit(x, y, batch_size=100, epochs=10)
model.weights
model(x)
| notebooks/1.NeuralNetwork-Keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (herschelhelp_internal)
# language: python
# name: helpint
# ---
# # ELAIS-N1 master catalogue
# ## Preparation of Isaac Newton Telescope / Wide Field Camera (INT/WFC) data
#
# Isaac Newton Telescope / Wide Field Camera (INT/WFC) catalogue: the catalogue comes from `dmu0_INTWFC`.
#
# In the catalogue, we keep:
#
# - The identifier (it's unique in the catalogue);
# - The position;
# - The stellarity;
# - The magnitude for each band in apertude 4 ($1.2 * \sqrt{2}$ arcsec = 1.7 arcsec).
# - The kron magnitude to be used as total magnitude (no “auto” magnitude is provided).
#
# We don't know when the maps have been observed. We will use the year of the reference paper.
from herschelhelp_internal import git_version
print("This notebook was run with herschelhelp_internal version: \n{}".format(git_version()))
# +
# %matplotlib inline
# #%config InlineBackend.figure_format = 'svg'
import matplotlib.pyplot as plt
plt.rc('figure', figsize=(10, 6))
from collections import OrderedDict
import os
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.table import Column, Table
import numpy as np
from herschelhelp_internal.flagging import gaia_flag_column
from herschelhelp_internal.masterlist import nb_astcor_diag_plot, remove_duplicates
from herschelhelp_internal.utils import astrometric_correction, mag_to_flux
# +
OUT_DIR = os.environ.get('TMP_DIR', "./data_tmp")
try:
os.makedirs(OUT_DIR)
except FileExistsError:
pass
RA_COL = "wfc_ra"
DEC_COL = "wfc_dec"
# -
# ## I - Column selection
# +
imported_columns = OrderedDict({
'id': "wfc_id",
'ra': "wfc_ra",
'decl': "wfc_dec",
'pstar': "wfc_stellarity",
'umag4': "m_ap_wfc_u",
'uemag4': "merr_ap_wfc_u",
'ukronmag': "m_wfc_u",
'uekronmag': "merr_wfc_u",
'gmag4': "m_ap_wfc_g",
'gemag4': "merr_ap_wfc_g",
'gkronmag': "m_wfc_g",
'gekronmag': "merr_wfc_g",
'rmag4': "m_ap_wfc_r",
'remag4': "merr_ap_wfc_r",
'rkronmag': "m_wfc_r",
'rekronmag': "merr_wfc_r",
'imag4': "m_ap_wfc_i",
'iemag4': "merr_ap_wfc_i",
'ikronmag': "m_wfc_i",
'iekronmag': "merr_wfc_i",
'zmag4': "m_ap_wfc_z",
'zemag4': "merr_ap_wfc_z",
'zkronmag': "m_wfc_z",
'zekronmag': "merr_wfc_z"
})
catalogue = Table.read("../../dmu0/dmu0_INTWFC/data/en1_intwfc_v2.1_HELP_coverage.fits")[list(imported_columns)]
for column in imported_columns:
catalogue[column].name = imported_columns[column]
epoch = 2011
# Clean table metadata
catalogue.meta = None
# +
# Adding flux and band-flag columns
for col in catalogue.colnames:
if col.startswith('m_'):
errcol = "merr{}".format(col[1:])
# Some object have a magnitude to 0, we suppose this means missing value
catalogue[col][catalogue[col] <= 0] = np.nan
catalogue[errcol][catalogue[errcol] <= 0] = np.nan
flux, error = mag_to_flux(np.array(catalogue[col]), np.array(catalogue[errcol]))
# Fluxes are added in µJy
catalogue.add_column(Column(flux * 1.e6, name="f{}".format(col[1:])))
catalogue.add_column(Column(error * 1.e6, name="f{}".format(errcol[1:])))
# Band-flag column
if "ap" not in col:
catalogue.add_column(Column(np.zeros(len(catalogue), dtype=bool), name="flag{}".format(col[1:])))
# TODO: Set to True the flag columns for fluxes that should not be used for SED fitting.
# -
catalogue[:10].show_in_notebook()
# ## II - Removal of duplicated sources
# We remove duplicated objects from the input catalogues.
# +
SORT_COLS = ['merr_ap_wfc_r', 'merr_ap_wfc_u', 'merr_ap_wfc_g', 'merr_ap_wfc_z']
FLAG_NAME = 'wfc_flag_cleaned'
nb_orig_sources = len(catalogue)
catalogue = remove_duplicates(catalogue, RA_COL, DEC_COL, sort_col=SORT_COLS,flag_name=FLAG_NAME)
nb_sources = len(catalogue)
print("The initial catalogue had {} sources.".format(nb_orig_sources))
print("The cleaned catalogue has {} sources ({} removed).".format(nb_sources, nb_orig_sources - nb_sources))
print("The cleaned catalogue has {} sources flagged as having been cleaned".format(np.sum(catalogue[FLAG_NAME])))
# -
# ## III - Astrometry correction
#
# We match the astrometry to the Gaia one. We limit the Gaia catalogue to sources with a g band flux between the 30th and the 70th percentile. Some quick tests show that this give the lower dispersion in the results.
gaia = Table.read("../../dmu0/dmu0_GAIA/data/GAIA_ELAIS-N1.fits")
gaia_coords = SkyCoord(gaia['ra'], gaia['dec'])
nb_astcor_diag_plot(catalogue[RA_COL], catalogue[DEC_COL],
gaia_coords.ra, gaia_coords.dec)
# +
delta_ra, delta_dec = astrometric_correction(
SkyCoord(catalogue[RA_COL], catalogue[DEC_COL]),
gaia_coords
)
print("RA correction: {}".format(delta_ra))
print("Dec correction: {}".format(delta_dec))
# -
catalogue[RA_COL] += delta_ra.to(u.deg)
catalogue[DEC_COL] += delta_dec.to(u.deg)
nb_astcor_diag_plot(catalogue[RA_COL], catalogue[DEC_COL],
gaia_coords.ra, gaia_coords.dec)
# ## IV - Flagging Gaia objects
catalogue.add_column(
gaia_flag_column(SkyCoord(catalogue[RA_COL], catalogue[DEC_COL]), epoch, gaia)
)
# +
GAIA_FLAG_NAME = "wfc_flag_gaia"
catalogue['flag_gaia'].name = GAIA_FLAG_NAME
print("{} sources flagged.".format(np.sum(catalogue[GAIA_FLAG_NAME] > 0)))
# -
# # V - Saving to disk
catalogue.write("{}/INT-WFC.fits".format(OUT_DIR), overwrite=True)
| dmu1/dmu1_ml_ELAIS-N1/1.1_INT-WFC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
np.random.seed(42)
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
dataset = load_boston()
# #### Einfache Lineare Regression
#
# ## Bedeutung
#
# <font size="4">
#
# - $R^2$: Wie viel Streuung kann von dem Regressionsmodell erklärt werden
# - coef: Steigung der Geraden
# - intercept: y-Achsenabschnitt
#
# </font>
#
# ## Symbole
#
# <font size="4">
#
# - $\bar{x}$: Mittelwert von $x$
# - $\bar{y}$: Mittelwert von $y$
# - $\hat{y}$: Prediktion vom Modell
#
# </font>
#
# ## Datensatz
#
# <font size="4">
#
# - $m$: Anzahl an Samples
# - $n$: Anzahl an Features
# - $x$: Input-Daten (Features)
# - $y$: Output Daten (Targets)
#
# ## Formeln
#
# <font size="4">
#
# - $\beta = (X^TX)^{-1}X^Ty$
# - coef = $\beta$\[1:\]
# - intercept = $\beta$\[0\]
# - $\hat{y} = X\beta$
# - $R^2 = 1 - \frac{\sum_{i=1}^n(y_i-\hat{y})^2}{\sum_{i=1}^n(y_i-\bar{y})^2}$
#
# </font>
class LinearRegression:
def __init__(self, use_intercept: bool = True) -> None:
self.coef_: np.ndarray = None
self.intercept_: np.ndarray = None
self.use_intercept = use_intercept
def _add_intercept(self, x: np.ndarray) -> np.ndarray:
intercepts = np.ones(shape=(x.shape[0]))
x = np.column_stack((intercepts, x))
return x
def fit(self, x: np.ndarray, y: np.ndarray) -> None:
if self.use_intercept:
x = self._add_intercept(x)
inner = np.dot(x.T, x)
inv = np.linalg.inv(inner)
beta = np.dot(np.dot(inv, x.T), y)
self.intercept_ = beta[0]
self.coef_ = beta[1:]
def predict(self, x: np.ndarray) -> np.ndarray:
print(self.coef_.shape)
print(x.shape)
y_pred = x * self.coef_ + self.intercept_
return y_pred
def score(self, x: np.ndarray, y: np.ndarray):
y_pred = self.predict(x)
y_mean = np.mean(y, axis=0)
numerator = np.sum((y - y_pred)**2)
denominator = np.sum((y - y_mean)**2)
r2_score = 1.0 - (numerator / denominator)
return r2_score
# +
x = dataset.data[:, 5]
y = dataset.target
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
# +
regr = LinearRegression()
regr.fit(x_train, y_train)
y_pred = regr.predict(x_test)
r2_own = regr.score(x_test, y_test)
r2_sklearn = r2_score(y_test, y_pred)
print(f"Coef: {regr.coef_}")
print(f"Intercept: {regr.intercept_}")
print(f"R2: {r2_own}")
print(f"R2: {r2_sklearn}")
# +
x_arange = np.arange(start=3.0, stop=10.0, step=0.05)
y_arange = regr.predict(x_arange)
plt.scatter(x_train, y_train)
plt.plot(x_arange, y_arange, color="red")
plt.show()
| Chapter5_Regression/SimpleLinearRegressionImplementation/LinearRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
##################################################################
#《Python机器学习及实践:从零开始通往Kaggle竞赛之路(2023年度版)》开源代码
#-----------------------------------------------------------------
# @章节号:6.4.1(残差神经网络的PyTorch实践)
# @作者:范淼
# @电子邮箱:<EMAIL>
# @微博:https://weibo.com/fanmiaothu
# @官方交流QQ群号:561500762
##################################################################
# +
from torch import nn, optim
#设定超参数。
KERNEL_SIZE = 3
FILTERS = (32, 64)
HIDDEN_SIZE = 256
NUM_CLASSES = 10
EPOCHS = 5
BATCH_SIZE = 64
LEARNING_RATE = 1e-3
class ResNet(nn.Module):
'''
残差神经网络类,继承自nn.Module。
'''
def __init__(self, filters, kernel_size, hidden_size, num_classes):
super(ResNet, self).__init__()
self.conv_1 = nn.Conv2d(in_channels=1, out_channels=filters[0], kernel_size=kernel_size, padding='same')
self.relu = nn.ReLU()
self.conv_2 = nn.Conv2d(in_channels=filters[0], out_channels=filters[1], kernel_size=kernel_size, padding='same')
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.flatten = nn.Flatten()
self.l1 = nn.Linear(int((28/2) ** 2 * filters[1]), 256)
self.l2 = nn.Linear(256, 10)
def forward(self, x):
out = self.conv_1(x)
out = self.relu(out)
out = self.conv_2(out)
out += x
out = self.relu(out)
out = self.pool(out)
out = self.flatten(out)
out = self.l1(out)
out = self.relu(out)
out = self.l2(out)
return out
#初始化前馈神经网络模型。
model = ResNet(FILTERS, KERNEL_SIZE, HIDDEN_SIZE, NUM_CLASSES)
#设定神经网络的损失函数。
criterion = nn.CrossEntropyLoss()
#设定神经网络的优化方法。
optimizer = optim.Adam(model.parameters(), lr = LEARNING_RATE)
# +
import pandas as pd
#使用pandas,读取fashion_mnist的训练和测试数据文件。
train_data = pd.read_csv('../datasets/fashion_mnist/fashion_mnist_train.csv')
test_data = pd.read_csv('../datasets/fashion_mnist/fashion_mnist_test.csv')
#从训练数据中,拆解出训练特征和类别标签。
X_train = train_data[train_data.columns[1:]]
y_train = train_data['label']
#从测试数据中,拆解出测试特征和类别标签。
X_test = test_data[train_data.columns[1:]]
y_test = test_data['label']
# +
from sklearn.preprocessing import StandardScaler
#初始化数据标准化处理器。
ss = StandardScaler()
#标准化训练数据特征。
X_train = ss.fit_transform(X_train)
#标准化测试数据特征。
X_test = ss.transform(X_test)
# +
import torch
from torch.utils.data import TensorDataset, DataLoader
#构建适用于PyTorch模型训练的数据结构。
train_tensor = TensorDataset(torch.tensor(X_train.astype('float32')), torch.tensor(y_train.values))
#构建适用于PyTorch模型训练的数据读取器。
train_loader = DataLoader(dataset = train_tensor, batch_size = BATCH_SIZE, shuffle = True)
n_total_steps = len(train_loader)
#开启模型训练。
model.train()
for epoch in range(EPOCHS):
for i, (features, labels) in enumerate(train_loader):
images = features.reshape([-1, 1, 28, 28])
outputs = model(images)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 300 == 0:
print (f'Epoch [{epoch+1}/{EPOCHS}], Step[{i+1}/{n_total_steps}], Loss: {loss.item():.4f}')
# +
#构建适用于PyTorch模型测试的数据结构。
test_tensor = TensorDataset(torch.tensor(X_test.astype('float32')), torch.tensor(y_test.values))
#构建适用于PyTorch模型测试的数据读取器。
test_loader = DataLoader(dataset = test_tensor, batch_size = BATCH_SIZE, shuffle = False)
#开启模型测试。
model.eval()
n_correct = 0
n_samples = 0
for features, labels in test_loader:
images = features.reshape([-1, 1, 28, 28])
outputs = model(images)
_, predictions = torch.max(outputs.data, 1)
n_samples += labels.size(0)
n_correct += (predictions == labels).sum().item()
acc = 100.0 * n_correct / n_samples
print('残差神经网络(PyTorch版本)在fashion_mnist测试集上的准确率为: %.2f%%。' %acc)
# -
| Chapter_6/.ipynb_checkpoints/Section_6.4.1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project - Power of Encodings and Operators in NAS Building Blocks
# This work will create an evolutionary algorith to evolve the inner structures of larger building blocks of a neural network. The encoding will be manipulated as well as the evolutionary operators. The evovled structures will be placed into the outer building block structure and tested on the MNIST task of identifying digits.
# +
# imports
import numpy as np
import copy
import matplotlib.pyplot as plt
plt.style.use('seaborn')
import scikits.bootstrap as bootstrap
import warnings
warnings.filterwarnings('ignore') # Danger, <NAME>! (not a scalable hack, and may surpress other helpful warning other than for ill-conditioned bootstrapped CI distributions)
import scipy.stats # for finding statistical significance
import time
# -
# This cell preprocesses MNIST by deskewing (standard preprocessing step to straigthen tilted images) and downscaling the images from `28x28` to `14x14` to try and keep the genome size down (at the cost of losing some resolution/information in the images), split out the labels (turning them into one-hot encodings), and separating the train and test sets.
#
# *Note:* This dataset contains 60,000 training examples, and 10,000 testing examples. Feel free to use only a small portion of the training examples/labels provided.
train_x = np.loadtxt("train_x.csv", delimiter=',')
test_x = np.loadtxt("test_x.csv", delimiter=',')
train_y = np.loadtxt("train_y.csv", delimiter=',')
test_y = np.loadtxt("test_y.csv", delimiter=',')
# Let's take a look at the images!
# This is what the image looks like
num_images = 6
fig, axs = plt.subplots(1, num_images, figsize=(3*num_images, 3), sharey=True)
for i in range(num_images):
axs[i].imshow(train_x[i].reshape(14,14)) # we will keep the images flat to easily feed them into the neural network, but we need them back in a square shape to visualize
axs[i].grid(False)
axs[i].axis('off')
axs[i].set_title("Label:"+str(np.argmax(train_y[i]))) # the argmax takes out one-hot encoding and turns it into a readable label
# ### Q1: Implementation
# Our individual solutions this week will be (again keeping things overly simplistic) single-layer neural networks. These networks are defined by a single weight matrix with input dimenion of the size of the flattened image (`14*14=196`) and output dimension of the size of the number of possible classes (`10`). Feel free to implement the genome as the weight matrix, or simply as a flattened float vector of size `1960`.
class Individual:
def __init__(self, fitness_function, genome_length):
self.genome = np.random.rand(14*14*10)*2-1
# self.genome = np.random.normal(loc = 0, scale = 1, size = genome_length)
self.fitness_function = fitness_function
self.fitness = 0
def eval_fitness(self):
self.fitness= self.fitness_function(self.genome)
# There are two main ways to measure the performance of a neural network, loss and accuracy. For the sake of intuition, let's use accuracy here, but I'm providing the implementaition of loss just in case you want to play around with it as well (thought returning the negative of the loss, as the smaller magnitudes are better so this allows us to continue going "uphill" if we do ever choose to optimize for loss).
#
# As we haven't covered neural networks, I'm also providing the implementation of a single layer neural network (desite its apparent simplicity compared to mult-layer networks) in the fitness function below.
# +
def accuracy(output, y):
return np.sum(np.isclose(np.argmax(output,axis=1),np.argmax(y,axis=1)))/y.shape[0]
def loss (output, y):
return -np.sum(np.square(output-y))/y.shape[0]
def neural_network_fitness(weights,x=train_x,y=train_y):
weight_matrix = weights.reshape((14*14,10))
output = x.dot(weight_matrix)
return accuracy(output,y)
# -
# ### Q1b: Real-valued mutation
#
# In class, we've only alluded indrectly to mutating vectors of floats as genomes (like neural network weights). Let's play around with the implmentations of these. For simplicity, we'll ignore crossover for now. Rather than flipping a given number of bits, let's try adding a small random value to each gene's value by adding `(np.random.rand(genome_length)*2-1)*mutation_size` to the genome. This takes a uniform distribution, normalizes it to be between -1 and 1, then scales it by some `mutation_size` scaling factor that you can pass into your `evolutionary_algorithm` function.
# ### Q1c: Diversity Tracking
#
# In addition to keeping track of the best genome, and fitness at each generation, let's also record the diversity of the population at each generation. The metric we talked about most in class was measuring genotypic diversity with the average standard deviation of the distribution across the population of the values for each gene.
# +
def evolutionary_algorithm(fitness_function=None, total_generations=100, num_parents=10, num_children=10, genome_length=10, num_elements_to_mutate=1, mutation_size=0.1, crossover=True, tournament_size=4, num_tournament_winners=2):
""" Evolutinary Algorithm (copied from the basic hillclimber in our last assignment)
parameters:
fitness_funciton: (callable function) that return the fitness of a genome
given the genome as an input parameter (e.g. as defined in Landscape)
total_generations: (int) number of total iterations for stopping condition
num_parents: (int) the number of parents we downselect to at each generation (mu)
num_childre: (int) the number of children (note: parents not included in this count) that we baloon to each generation (lambda)
genome_length: (int) length of the genome to be evoloved
num_elements_to_mutate: (int) number of alleles to modify during mutation (0 = no mutation)
mutation_size: (float) scaling parameter of the magnitidue of mutations for floating point vectors
crossover: (bool) whether to perform crossover when generating children
tournament_size: (int) number of individuals competing in each tournament
num_tournament_winners: (int) number of individuals selected as future parents from each tournament (must be less than tournament_size)
returns:
fitness_over_time: (numpy array) track record of the top fitness value at each generation
solutions_over_time: (numpy array) track record of the top genome value at each generation
diversity_over_time: (numpy array) track record of the population genetic diversity at each generation
"""
# initialize record keeping
solution = None # best genome so far
solution_fitness = -99999 # fitness of best genome so far
best_accuracy = -99999 # fitness of best genome so far
fitness_over_time = np.zeros(total_generations)
solutions_over_time = np.zeros((total_generations,genome_length))
diversity_over_time = np.zeros(total_generations)
# the initialization proceedure
population = [] # keep population of individuals in a list
for i in range(num_parents): # only create parents for initialization (the mu in mu+lambda)
population.append(Individual(fitness_function,genome_length)) # generate new random individuals as parents
# get population fitness
for i in range(len(population)):
population[i].eval_fitness() # evaluate the fitness of each parent
for generation_num in range(total_generations): # repeat
# the modification procedure
new_children = [] # keep children separate for now (lambda in mu+lambda)
while len(new_children) < num_children:
# inheretance
[parent1, parent2] = np.random.choice(population, size=2) # pick 2 random parents
child1 = copy.deepcopy(parent1) # initialize children as perfect copies of their parents
child2 = copy.deepcopy(parent2)
# crossover
# N/A
# mutation
for this_child in [child1,child2]:
this_child.genome += (np.random.rand(genome_length)*2-1)*mutation_size
# this_child.genome += np.random.normal(loc = 0, scale = mutation_size, size = genome_length)
this_child.genome = np.minimum(np.maximum(this_child.genome,-1),1)
new_children.extend((child1,child2)) # add children to the new_children list
# the assessement procedure
for i in range(len(new_children)):
new_children[i].eval_fitness() # assign fitness to each child
# selection procedure
population += new_children # combine parents with new children (the + in mu+lambda)
population = sorted(population, key=lambda individual: individual.fitness, reverse = True) # sort the full population by each individual's fitness (from highers to lowest)
# tournament selection
new_population = []
new_population.append(population[0])
while len(new_population) < num_parents:
tournament = np.random.choice(population, size = tournament_size)
tournament = sorted(tournament, key=lambda individual: individual.fitness, reverse = True)
new_population.extend(tournament[:num_tournament_winners])
population = new_population
# record keeping
if population[0].fitness > solution_fitness: # if the new parent is the best found so far
solution = population[0].genome # update best solution records
solution_fitness = population[0].fitness
solution_generation = generation_num
fitness_over_time[generation_num] = solution_fitness # record the fitness of the current best over evolutionary time
solutions_over_time[generation_num,:] = solution
genome_list = np.array([individual.genome for individual in population])
diversity = np.mean(genome_list.std(axis=0))
diversity_over_time[generation_num] = diversity
# print(generation_num, solution_fitness, best_accuracy, diversity)
return fitness_over_time, solutions_over_time, diversity_over_time
# -
# ### Q2: Experimentation
#
# Due to the high dimensionality of this problem, the runs are a bit slower than before, so let's keep the scale small on this with just `50` generations and `5` repitions. Hopefully this keeps things managable from a runtime persepctive (runs in a little over 30 seconds for each repition, or a little under 3 minutes for all 5, on my machine). Let's use a mutation size of `1.0`, the same `50` parents and `50` children settings from last week, and a tournament size of `20`, choosing `10` winners.
#
# *Hint:* If this still takes to long to run on your machine (especially while debugging/exploring code), feel free to run smaller test runs first by reducing the number of generations for the runs, plotting without bootstrapping, etc.
experiment_results = {}
solutions_results = {}
diversity_results = {}
# +
num_runs = 5
total_generations = 50
genome_length = 14*14*10
num_elements_to_mutate = genome_length
mutation_size = 1.0
num_parents = 50
num_children = 50
tournament_size = 20
num_tournament_winners = 10
for run_name in ["mutate_uniform_1.0_tournamen_20_choose_10"]:
experiment_results[run_name] = np.zeros((num_runs, total_generations))
solutions_results[run_name] = np.zeros((num_runs, total_generations, genome_length))
diversity_results[run_name] = np.zeros((num_runs, total_generations))
for run_num in range(num_runs):
start_time = time.time()
fitness_over_time, solutions_over_time, diversity_over_time = evolutionary_algorithm(fitness_function=neural_network_fitness, total_generations=total_generations, num_parents=num_parents, num_children=num_children, genome_length=genome_length, mutation_size=mutation_size, tournament_size=tournament_size, num_tournament_winners=num_tournament_winners)
experiment_results[run_name][run_num] = fitness_over_time
solutions_results[run_name][run_num] = solutions_over_time
diversity_results[run_name][run_num] = diversity_over_time
print(run_name, run_num, time.time()-start_time, fitness_over_time[-1])
# -
# ### Q2b: Modifying Selection Pressure
# To create conditions which vary the amount of selection pressure, please also run the above runs varying the number of tournament winners to be `1` and `5` (in addition to the `10` you alredy ran).
# +
num_runs = 5
total_generations = 50
genome_length = 14*14*10
num_elements_to_mutate = genome_length
mutation_size = 1.0
num_parents = 50
num_children = 50
tournament_size = 20
num_tournament_winners = 5
for run_name in ["mutate_uniform_1.0_tournamen_20_choose_5"]:
experiment_results[run_name] = np.zeros((num_runs, total_generations))
solutions_results[run_name] = np.zeros((num_runs, total_generations, genome_length))
diversity_results[run_name] = np.zeros((num_runs, total_generations))
for run_num in range(num_runs):
start_time = time.time()
fitness_over_time, solutions_over_time, diversity_over_time = evolutionary_algorithm(fitness_function=neural_network_fitness, total_generations=total_generations, num_parents=num_parents, num_children=num_children, genome_length=genome_length, mutation_size=mutation_size, tournament_size=tournament_size, num_tournament_winners=num_tournament_winners)
experiment_results[run_name][run_num] = fitness_over_time
solutions_results[run_name][run_num] = solutions_over_time
diversity_results[run_name][run_num] = diversity_over_time
print(run_name, run_num, time.time()-start_time, fitness_over_time[-1])
# +
num_runs = 5
total_generations = 50
genome_length = 14*14*10
num_elements_to_mutate = genome_length
mutation_size = 1.0
num_parents = 50
num_children = 50
tournament_size = 20
num_tournament_winners = 1
for run_name in ["mutate_uniform_1.0_tournamen_20_choose_1"]:
experiment_results[run_name] = np.zeros((num_runs, total_generations))
solutions_results[run_name] = np.zeros((num_runs, total_generations, genome_length))
diversity_results[run_name] = np.zeros((num_runs, total_generations))
for run_num in range(num_runs):
start_time = time.time()
fitness_over_time, solutions_over_time, diversity_over_time = evolutionary_algorithm(fitness_function=neural_network_fitness, total_generations=total_generations, num_parents=num_parents, num_children=num_children, genome_length=genome_length, mutation_size=mutation_size, tournament_size=tournament_size, num_tournament_winners=num_tournament_winners)
experiment_results[run_name][run_num] = fitness_over_time
solutions_results[run_name][run_num] = solutions_over_time
diversity_results[run_name][run_num] = diversity_over_time
print(run_name, run_num, time.time()-start_time, fitness_over_time[-1])
# -
# ### Q2c: Visualization
# Like last time, please plot the bootstrapped fitness values over time for all 3 treatments.
def plot_mean_and_bootstrapped_ci_over_time(input_data = None, name = "change me", x_label = "change me", y_label="change me", y_limit = None, plot_bootstrap = True):
"""
parameters:
input_data: (numpy array of shape (max_k, num_repitions)) solution metric to plot
name: (string) name for legend
x_label: (string) x axis label
y_label: (string) y axis label
returns:
None
"""
fig, ax = plt.subplots() # generate figure and axes
if isinstance(name, str): name = [name]; input_data = [input_data]
# for this_input_data, this_name in zip(input_data, name):
for this_name in name:
print("plotting",this_name)
this_input_data = input_data[this_name]
total_generations = this_input_data.shape[1]
if plot_bootstrap:
boostrap_ci_generation_found = np.zeros((2,total_generations))
for this_gen in range(total_generations):
if this_gen%10==0: print(this_gen)
boostrap_ci_generation_found[:,this_gen] = bootstrap.ci(this_input_data[:,this_gen], np.mean, alpha=0.05)
ax.plot(np.arange(total_generations), np.mean(this_input_data,axis=0), label = this_name) # plot the fitness over time
if plot_bootstrap:
ax.fill_between(np.arange(total_generations), boostrap_ci_generation_found[0,:], boostrap_ci_generation_found[1,:],alpha=0.3) # plot, and fill, the confidence interval for fitness over time
ax.set_xlabel(x_label) # add axes labels
ax.set_ylabel(y_label)
if y_limit: ax.set_ylim(y_limit[0],y_limit[1])
plt.legend(loc='best'); # add legend
# plot fitness over time
plot_mean_and_bootstrapped_ci_over_time(input_data = experiment_results, name = ["mutate_uniform_1.0_tournamen_20_choose_10","mutate_uniform_1.0_tournamen_20_choose_5","mutate_uniform_1.0_tournamen_20_choose_11"], x_label = "Generation", y_label = "Fitness", plot_bootstrap = True)
# ### Q3: Visualizing Diversity
# Please also plot the diveristy of our population over evolutionary time.
plot_mean_and_bootstrapped_ci_over_time(input_data = diversity_results, name = ["mutate_uniform_1.0_tournamen_20_choose_10","mutate_uniform_1.0_tournamen_20_choose_5","mutate_uniform_1.0_tournamen_20_choose_11"], x_label = "Generation", y_label = "Diversity", plot_bootstrap = True)
# ### Q3b: Analysis
# What do you notice about the diveristy over time? Is this what you expected to tradeoff exploration and exploitation -- and how it related to fitness?
# **insert text here**
# ### Q4: Generalization to Test Datasets
# Whenever doing classification, it's good to make sure that your algorithm isn't overfitting to the training data. Based on your intuition about diversity and overfitting, what do you expect this relationship to look like?
# **insert text here**
# ### Q5: Evaluating Test Accuracy
# Since we already have test data loaded in above, let's evaluate your already trained algorithms (using your saved best-solution-so-far genomes at each generation) to see how test fitness tracks with the training fitness.
#
# Please implement a script which calcualtes the test accuracy of the solutions over time below.
#
# *Hin:* Look for where the training set is used during fitness evaluation during training for ideas of what functions/syntax to use
# +
test_accuracy_results = {}
def calc_test_accuracy_over_time(name = None):
for run_name in name:
test_accuracy_results[run_name] = np.zeros((num_runs, total_generations))
for run_num in range(len(solutions_results[run_name])):
for gen_num in range(total_generations):
test_accuracy_results[run_name][run_num, gen_num] = neural_network_fitness(solutions_results[run_name][run_num, gen_num,:],x=test_x,y=test_y)
# -
calc_test_accuracy_over_time(name = ["mutate_uniform_1.0_tournamen_20_choose_10","mutate_uniform_1.0_tournamen_20_choose_5","mutate_uniform_1.0_tournamen_20_choose_11"])
plot_mean_and_bootstrapped_ci_over_time(input_data = test_accuracy_results, name = ["mutate_uniform_1.0_tournamen_20_choose_10","mutate_uniform_1.0_tournamen_20_choose_5","mutate_uniform_1.0_tournamen_20_choose_11"], x_label = "Generation", y_label = "Test Accuracy", plot_bootstrap = True)
# ### Q5b: Analysis
# What did you find for a relationship between genetic diversity and overfitting to the training set? Was this what you expected?
# **insert text here**
# ### Q6: Modifying Muation Rates
# Next well modify the mutation rate for our algorithm. Based on the results you see above, and how you expect mutation rate to modify the genetic diveristy of a population, how might you think that increasing or decreasing the mutation rate might effect the different tournament size runs above?
# **insert text here**
# ### Q7: Experimentation
# Let's find out! Let's do a mini grid search on the `mutation_size` and `num_tournament_winners`. To keep the number of runs down, let's just look at the exteme values of `num_tournament_winners` we had above (`1` and `10`), and run these for a `mutation_size` of `0.5` and `2.0` (in addition to the value of `1.0` we had before).
#
# *Hint:* This is a good time to double check that your `mutation_size` parameter you implemented above is working correctly (i.e. your results for how it should effect diversity below make sense)
#
# *Note:* This may take some time to run (if each condition is a couple minutes). Please try debugging code with smaller runs and make sure that if there are errors in along the way, what you've run already is saved and logged (so you don't have to rerun all 10 or 15 mins if you find a bug at the end of your script). And just use this time to go grab a coffee (or do some reading in your lovely evolutionary computation textbooks)!
# +
num_runs = 5
total_generations = 50
genome_length = 14*14*10
num_elements_to_mutate = genome_length
mutation_size = 2.0
num_parents = 50
num_children = 50
tournament_size = 20
num_tournament_winners = 10
for run_name in ["mutate_uniform_2.0_tournamen_20_choose_10"]:
experiment_results[run_name] = np.zeros((num_runs, total_generations))
solutions_results[run_name] = np.zeros((num_runs, total_generations, genome_length))
diversity_results[run_name] = np.zeros((num_runs, total_generations))
for run_num in range(num_runs):
start_time = time.time()
fitness_over_time, solutions_over_time, diversity_over_time = evolutionary_algorithm(fitness_function=neural_network_fitness, total_generations=total_generations, num_parents=num_parents, num_children=num_children, genome_length=genome_length, mutation_size=mutation_size, tournament_size=tournament_size, num_tournament_winners=num_tournament_winners)
experiment_results[run_name][run_num] = fitness_over_time
solutions_results[run_name][run_num] = solutions_over_time
diversity_results[run_name][run_num] = diversity_over_time
print(run_name, run_num, time.time()-start_time, fitness_over_time[-1])
# +
num_runs = 5
total_generations = 50
genome_length = 14*14*10
num_elements_to_mutate = genome_length
mutation_size = 2.0
num_parents = 50
num_children = 50
tournament_size = 20
num_tournament_winners = 1
for run_name in ["mutate_uniform_2.0_tournamen_20_choose_1"]:
experiment_results[run_name] = np.zeros((num_runs, total_generations))
solutions_results[run_name] = np.zeros((num_runs, total_generations, genome_length))
diversity_results[run_name] = np.zeros((num_runs, total_generations))
for run_num in range(num_runs):
start_time = time.time()
fitness_over_time, solutions_over_time, diversity_over_time = evolutionary_algorithm(fitness_function=neural_network_fitness, total_generations=total_generations, num_parents=num_parents, num_children=num_children, genome_length=genome_length, mutation_size=mutation_size, tournament_size=tournament_size, num_tournament_winners=num_tournament_winners)
experiment_results[run_name][run_num] = fitness_over_time
solutions_results[run_name][run_num] = solutions_over_time
diversity_results[run_name][run_num] = diversity_over_time
print(run_name, run_num, time.time()-start_time, fitness_over_time[-1])
# +
num_runs = 5
total_generations = 50
genome_length = 14*14*10
num_elements_to_mutate = genome_length
mutation_size = 0.5
num_parents = 50
num_children = 50
tournament_size = 20
num_tournament_winners = 1
for run_name in ["mutate_uniform_0.5_tournamen_20_choose_1"]:
experiment_results[run_name] = np.zeros((num_runs, total_generations))
solutions_results[run_name] = np.zeros((num_runs, total_generations, genome_length))
diversity_results[run_name] = np.zeros((num_runs, total_generations))
for run_num in range(num_runs):
start_time = time.time()
fitness_over_time, solutions_over_time, diversity_over_time = evolutionary_algorithm(fitness_function=neural_network_fitness, total_generations=total_generations, num_parents=num_parents, num_children=num_children, genome_length=genome_length, mutation_size=mutation_size, tournament_size=tournament_size, num_tournament_winners=num_tournament_winners)
experiment_results[run_name][run_num] = fitness_over_time
solutions_results[run_name][run_num] = solutions_over_time
diversity_results[run_name][run_num] = diversity_over_time
print(run_name, run_num, time.time()-start_time, fitness_over_time[-1])
# +
num_runs = 5
total_generations = 50
genome_length = 14*14*10
num_elements_to_mutate = genome_length
mutation_size = 0.5
num_parents = 50
num_children = 50
tournament_size = 20
num_tournament_winners = 10
for run_name in ["mutate_uniform_0.5_tournamen_20_choose_10"]:
experiment_results[run_name] = np.zeros((num_runs, total_generations))
solutions_results[run_name] = np.zeros((num_runs, total_generations, genome_length))
diversity_results[run_name] = np.zeros((num_runs, total_generations))
for run_num in range(num_runs):
start_time = time.time()
fitness_over_time, solutions_over_time, diversity_over_time = evolutionary_algorithm(fitness_function=neural_network_fitness, total_generations=total_generations, num_parents=num_parents, num_children=num_children, genome_length=genome_length, mutation_size=mutation_size, tournament_size=tournament_size, num_tournament_winners=num_tournament_winners)
experiment_results[run_name][run_num] = fitness_over_time
solutions_results[run_name][run_num] = solutions_over_time
diversity_results[run_name][run_num] = diversity_over_time
print(run_name, run_num, time.time()-start_time, fitness_over_time[-1])
# -
# key_list = copy.deepcopy(experiment_results.keys())
# for key in key_list:
# if "tournamen_4" in key:
# experiment_results.pop(key)
# diversity_results.pop(key)
# solutions_results.pop(key)
for key in diversity_results.keys():
print (key)
# ### Q8: Visualize
# Please plot the results of these experiments (both fitness over time, and diveristy)
# +
run_names = experiment_results.keys()
run_names = ["mutate_uniform_1.0_tournamen_20_choose_10","mutate_uniform_1.0_tournamen_20_choose_11",
"mutate_uniform_0.5_tournamen_20_choose_1","mutate_uniform_0.5_tournamen_20_choose_10",
"mutate_uniform_2.0_tournamen_20_choose_10", "mutate_uniform_2.0_tournamen_20_choose_1"]
plot_mean_and_bootstrapped_ci_over_time(input_data = experiment_results, name = run_names, x_label = "Generation", y_label = "Fitness", plot_bootstrap = False)
plot_mean_and_bootstrapped_ci_over_time(input_data = diversity_results, name = run_names, x_label = "Generation", y_label = "Diversity", plot_bootstrap = False)
# -
# ### Q8b: Analysis
# What patterns do you see? Did you expect this given the implications of each independently? Does the level of diversity match your intuition about how well search should perform? Does this tell you anything about the role/interaction of variation (e.g. mutation rate) and selection (e.g. tournament parameters)?
# **insert text here**
# ### Q9: Dynamic Mutation Rate
# We talked in class about many way to have dynamic or adaptive mutation rates. Let's experiment with the simplest form of this, a mutation rate that changes linearly over generational time, from some provided starting value to some provided ending value. Please modify your evolutionary algorithm code below to enable this.
# +
def evolutionary_algorithm(fitness_function=None, total_generations=100, num_parents=10, num_children=10, genome_length=10, num_elements_to_mutate=1, mutation_size_start=1.0, mutation_size_end = 0.1, crossover=True, tournament_size=4, num_tournament_winners=2):
""" Evolutinary Algorithm (copied from the basic hillclimber in our last assignment)
parameters:
fitness_funciton: (callable function) that return the fitness of a genome
given the genome as an input parameter (e.g. as defined in Landscape)
total_generations: (int) number of total iterations for stopping condition
num_parents: (int) the number of parents we downselect to at each generation (mu)
num_childre: (int) the number of children (note: parents not included in this count) that we baloon to each generation (lambda)
genome_length: (int) length of the genome to be evoloved
num_elements_to_mutate: (int) number of alleles to modify during mutation (0 = no mutation)
mutation_size_start: (float) scaling parameter of the magnitidue of mutations for floating point vectors at the beginning of search
mutation_size_end: (float) scaling parameter of the magnitidue of mutations for floating point vectors at the end of search (note: if same as mutation_size_start, mutation rate is static, otherwise mutation rate is linearly interpolated between the two)
crossover: (bool) whether to perform crossover when generating children
tournament_size: (int) number of individuals competing in each tournament
num_tournament_winners: (int) number of individuals selected as future parents from each tournament (must be less than tournament_size)
returns:
fitness_over_time: (numpy array) track record of the top fitness value at each generation
solutions_over_time: (numpy array) track record of the top genome value at each generation
diversity_over_time: (numpy array) track record of the population genetic diversity at each generation
"""
# initialize record keeping
solution = None # best genome so far
solution_fitness = -99999 # fitness of best genome so far
best_accuracy = -99999 # fitness of best genome so far
fitness_over_time = np.zeros(total_generations)
solutions_over_time = np.zeros((total_generations,genome_length))
diversity_over_time = np.zeros(total_generations)
# the initialization proceedure
population = [] # keep population of individuals in a list
for i in range(num_parents): # only create parents for initialization (the mu in mu+lambda)
population.append(Individual(fitness_function,genome_length)) # generate new random individuals as parents
# get population fitness
for i in range(len(population)):
population[i].eval_fitness() # evaluate the fitness of each parent
for generation_num in range(total_generations): # repeat
# the modification procedure
new_children = [] # keep children separate for now (lambda in mu+lambda)
while len(new_children) < num_children:
# inheretance
[parent1, parent2] = np.random.choice(population, size=2) # pick 2 random parents
child1 = copy.deepcopy(parent1) # initialize children as perfect copies of their parents
child2 = copy.deepcopy(parent2)
# crossover
# N/A
# mutation
mutation_size = mutation_size_start - (mutation_size_start-mutation_size_end) * generation_num/total_generations
for this_child in [child1,child2]:
# elements_to_mutate = np.random.rand(genome_length) < proportion_elements_to_mutate
this_child.genome += (np.random.rand(genome_length)*2-1)* mutation_size #* elements_to_mutate
# this_child.genome += np.random.normal(loc = 0, scale = mutation_size, size = genome_length)
this_child.genome = np.minimum(np.maximum(this_child.genome,-1),1)
new_children.extend((child1,child2)) # add children to the new_children list
# the assessement procedure
for i in range(len(new_children)):
new_children[i].eval_fitness() # assign fitness to each child
# selection procedure
population += new_children # combine parents with new children (the + in mu+lambda)
# tournament = sorted(population, key=lambda individual: individual.accuracy, reverse = True)
population = sorted(population, key=lambda individual: individual.fitness, reverse = True) # sort the full population by each individual's fitness (from highers to lowest)
# tournament selection
new_population = []
new_population.append(population[0])
while len(new_population) < num_parents:
tournament = np.random.choice(population, size = tournament_size)
# tournament = sorted(tournament, key=lambda individual: individual.accuracy, reverse = True)
tournament = sorted(tournament, key=lambda individual: individual.fitness, reverse = True)
new_population.extend(tournament[:num_tournament_winners])
population = new_population
# record keeping
if population[0].fitness > solution_fitness: # if the new parent is the best found so far
solution = population[0].genome # update best solution records
solution_fitness = population[0].fitness
solution_generation = generation_num
fitness_over_time[generation_num] = solution_fitness # record the fitness of the current best over evolutionary time
solutions_over_time[generation_num,:] = solution
genome_list = np.array([individual.genome for individual in population])
diversity = np.mean(genome_list.std(axis=0))
diversity_over_time[generation_num] = diversity
# print(generation_num, solution_fitness, best_accuracy, diversity)
return fitness_over_time, solutions_over_time, diversity_over_time
# -
# ### Q9b: Experimentation
# Please peform a set of runs which decrease the mutation rate from `1.0` to `0.1` linearly over the 50 generations of search for a tournament of size `20` with `1` winner selected.
# +
num_runs = 5
total_generations = 50
genome_length = 14*14*10
proportion_elements_to_mutate = 1.0
mutation_size_start = 1.0
mutation_size_end = 0.1
num_parents = 50
num_children = 50
tournament_size = 20
num_tournament_winners = 1
for run_name in ["mutate_uniform_ramp_1.0_to_0.1_tournament_20_choose_1"]:
experiment_results[run_name] = np.zeros((num_runs, total_generations))
solutions_results[run_name] = np.zeros((num_runs, total_generations, genome_length))
diversity_results[run_name] = np.zeros((num_runs, total_generations))
for run_num in range(num_runs):
start_time = time.time()
fitness_over_time, solutions_over_time, diversity_over_time = evolutionary_algorithm(fitness_function=neural_network_fitness, total_generations=total_generations, num_parents=num_parents, num_children=num_children, genome_length=genome_length, proportion_elements_to_mutate=proportion_elements_to_mutate, mutation_size_start=mutation_size_start, mutation_size_end=mutation_size_end, tournament_size=tournament_size, num_tournament_winners=num_tournament_winners)
experiment_results[run_name][run_num] = fitness_over_time
solutions_results[run_name][run_num] = solutions_over_time
diversity_results[run_name][run_num] = diversity_over_time
print(run_name, run_num, time.time()-start_time, fitness_over_time[-1])
# -
# ### Q10: Visualize
# Please plot (fitness and diversity of) the dynamic mutation rate against fixed mutation rates of `1.0` and `0.5` for the same tournament parameters.
for i in experiment_results.keys():
print(i)
run_names = ["mutate_uniform_0.5_tournamen_20_choose_1",
"mutate_uniform_1.0_tournamen_20_choose_11",
"mutate_uniform_ramp_1.0_to_0.1_tournament_20_choose_1"]
plot_mean_and_bootstrapped_ci_over_time(input_data = experiment_results, name = run_names, x_label = "Generation", y_label = "Fitness", plot_bootstrap = True)
plot_mean_and_bootstrapped_ci_over_time(input_data = diversity_results, name = run_names, x_label = "Generation", y_label = "Diversity", plot_bootstrap = True)
# ### Q10b: Analysis
# What do you see? Does the progress of the dynamic mutation rate track with what you expect given the fixed mutation rates? Why or why not? Talk especially about what happens near the end of search, realtive to what you might expect from that same time period in the case with a fixed mutation rate of `0.1` (feel free to run that experiment if you want, or just speculate based on those that you have run).
# **insert text here**
# ### Q11: Future Work
# We've just begun to scratch the surface here. What other experiments would be intersting to run? What combinations of parameter interactions would be interesting? What other approaches to dynamic/adaptive learning rates would be fun to implement? Could you incorporate information about diversity in informing a dynamic learning rate -- what would that look like?
# **insert text here**
# ### Congratulations, you made it to the end!
# Nice work -- and hopefully you're starting to get the hang of these!
#
# Please save this file as a .ipynb, and also download it as a .pdf, uploading **both** to blackboard to complete this assignment.
#
# For your submission, please make sure that you have renamed this file (and that the resulting pdf follows suit) to replce `[netid]` with your UVM netid. This will greatly simplify our grading pipeline, and make sure that you receive credit for your work.
# #### Academic Integrity Attribution
# During this assignment I collaborated with:
# **insert text here**
| assignment_6/Assignment_6_Neuroevolution_[solutions]-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: torch
# language: python
# name: torch
# ---
# +
import numpy as np
import pandas as pd
# viz
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(rc={'figure.figsize':(18.7,6.27)})
# notebook settings
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('retina')
# -
# ## 11 Classes
exp1 = pd.DataFrame(pd.read_pickle("/srv/nas/mk2/projects/pan-cancer/experiments/feature_sel/2020.03.23_11_19594-20/model_meta_data.pkl"))
exp2 = pd.DataFrame(pd.read_pickle("/srv/nas/mk2/projects/pan-cancer/experiments/feature_sel/2020.03.23_22:56_11_4500-50/model_meta_data.pkl"))
exp3 = pd.DataFrame(pd.read_pickle("/srv/nas/mk2/projects/pan-cancer/experiments/feature_sel/2020.03.24_10:57_11_300-60/model_meta_data.pkl"))
full = pd.concat([exp1, exp2, exp3])
full.boxplot(by='n_features', column='ANMI', rot=45)
_ = plt.ylim((0., 1.))
_ = plt.yticks(np.arange(0., 1.01, 0.05))
_ = plt.xticks(fontsize=6)
full[full['n_features']<=2000].boxplot(by='n_features', column='ANMI', rot=45)
_ = plt.ylim((0., 1.))
_ = plt.yticks(np.arange(0., 1.01, 0.05))
_ = plt.xticks(fontsize=6)
# +
#sns.regplot(x='n_features', y='ANMI', data=full, lowess=True)
# -
# ## 26 Classes
exp26 = pd.DataFrame(pd.read_pickle("/srv/nas/mk2/projects/pan-cancer/experiments/feature_sel/2020.03.24_14:59_26_2000-100/model_meta_data.pkl"))
exp26.boxplot(by='n_features', column='ANMI', rot=45)
_ = plt.ylim((0., 1.))
_ = plt.yticks(np.arange(0., 1.01, 0.05))
_ = plt.xticks(fontsize=6)
fd = full[full['n_features']<=2000].groupby(['n_features']).mean().rolling(5).mean()
dexp26 = exp26.groupby(['n_features']).mean().rolling(5).mean()
plt.plot(fd.index, fd['ANMI'], label='11 Clusters')
plt.plot(dexp26.index, dexp26['ANMI'], label='26 Clusters')
def test_func(x, a):
return x / (x + a)
n_feat = np.arange(0, 2000, 5)
true_anmi = [test_func(n, 20) for n in n_feat]
rand_anmi = np.repeat(0.1, len(n_feat))
diff_anmi = true_anmi - rand_anmi
noise = [0.0001*n for n in n_feat]
diff_noise = np.array(true_anmi) - np.array(noise)
plt.plot(n_feat, anmi)
plt.plot(n_feat, rand_anmi)
plt.plot(n_feat, diff_anmi)
plt.plot(n_feat, noise)
plt.plot(n_feat, diff_noise)
| notebook/2020.03.25_analyze_feat_sel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# plots of SOG model run
# - 1100 x 10 x 101 domain
# - 1 day
# - river on
# +
import os
import re
import netCDF4 as nc
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# -
f=nc.Dataset('/data/eolson/MEOPAR/SS36runs/run_SOG_01/SOG2D_1h_20141019_20141020_ptrc_T.nc')
f2=nc.Dataset('/data/eolson/MEOPAR/SS36runs/run_SOG_01/SOG2D_1h_20141019_20141020_grid_T.nc')
fold=nc.Dataset('/data/eolson/MEOPAR/SS36runs/run_SOG_00/SOG2D_1h_20141019_20141020_ptrc_T.nc')
f2old=nc.Dataset('/data/eolson/MEOPAR/SS36runs/run_SOG_00/SOG2D_1h_20141019_20141020_grid_T.nc')
fkeys=f.variables.keys()
lons=f.variables['nav_lon'][1,:]
lats=f.variables['nav_lat'][:,1]
for ik in fkeys:
match = re.search(r'depth.',ik)
if match:
zkey=match.group(0)
z=f.variables[zkey][:]
t=f.variables['time_counter'][:]
xx,zz=np.meshgrid(lons,-z[:])
tt,tz=np.meshgrid(t,-z[:])
xt,yt=np.meshgrid(lons,lats)
# +
fig, axs = plt.subplots(2,1,figsize=(10,6))
mesh0=axs[0].pcolormesh(xx,zz,f2.variables['vosaline'][0,:,5,:])
cb0=fig.colorbar(mesh0,ax=axs[0])
axs[0].set_ylabel('z (m)', fontsize=18)
axs[0].set_xlabel('x (km)', fontsize=18)
axs[0].set_title('Salinity, t=0', fontsize=18)
#axs[0].autoscale(tight='True')
axs[0].axis([0, 550, -400, 0])
plt.setp(axs[0].get_xticklabels(),fontsize=16)
plt.setp(axs[0].get_yticklabels(),fontsize=16)
cb0.ax.tick_params(labelsize=16)
mesh1=axs[1].pcolormesh(xx,zz,f2.variables['vosaline'][-1,:,5,:])
cb1=fig.colorbar(mesh1,ax=axs[1])
axs[1].set_ylabel('z (m)', fontsize=18)
axs[1].set_xlabel('x (km)', fontsize=18)
axs[1].set_title('Salinity, t=end', fontsize=18)
#axs[1].autoscale(tight='True')
axs[1].axis([0, 550, -400, 0])
plt.setp(axs[1].get_xticklabels(),fontsize=16)
plt.setp(axs[1].get_yticklabels(),fontsize=16)
cb1.ax.tick_params(labelsize=16)
plt.tight_layout()
# +
fig, axs = plt.subplots(2,1,figsize=(10,6))
mesh0=axs[0].pcolormesh(xx,zz,f2.variables['votemper'][0,:,5,:])
cb0=fig.colorbar(mesh0,ax=axs[0])
axs[0].set_ylabel('z (m)', fontsize=18)
axs[0].set_xlabel('x (km)', fontsize=18)
axs[0].set_title('Temperature ($^\circ$C), t=0', fontsize=18)
#axs[0].autoscale(tight='True')
axs[0].axis([0, 550, -400, 0])
plt.setp(axs[0].get_xticklabels(),fontsize=16)
plt.setp(axs[0].get_yticklabels(),fontsize=16)
cb0.ax.tick_params(labelsize=16)
mesh1=axs[1].pcolormesh(xx,zz,f2.variables['votemper'][-1,:,5,:])
cb1=fig.colorbar(mesh1,ax=axs[1])
axs[1].set_ylabel('z (m)', fontsize=18)
axs[1].set_xlabel('x (km)', fontsize=18)
axs[1].set_title('Temperature ($^\circ$C), t=end', fontsize=18)
#axs[1].autoscale(tight='True')
axs[1].axis([0, 550, -400, 0])
plt.setp(axs[1].get_xticklabels(),fontsize=16)
plt.setp(axs[1].get_yticklabels(),fontsize=16)
cb1.ax.tick_params(labelsize=16)
plt.tight_layout()
# +
fig, axs = plt.subplots(2,1,figsize=(10,6))
mesh0=axs[0].pcolormesh(xx,zz,f2.variables['votemper'][0,:,5,:]-f2old.variables['votemper'][0,:,5,:])
cb0=fig.colorbar(mesh0,ax=axs[0])
axs[0].set_ylabel('z (m)', fontsize=18)
axs[0].set_xlabel('x (km)', fontsize=18)
axs[0].set_title('Temperature Difference (01-00) ($^\circ$C), t=0', fontsize=18)
#axs[0].autoscale(tight='True')
axs[0].axis([0, 550, -400, 0])
plt.setp(axs[0].get_xticklabels(),fontsize=16)
plt.setp(axs[0].get_yticklabels(),fontsize=16)
cb0.ax.tick_params(labelsize=16)
mesh1=axs[1].pcolormesh(xx,zz,f2.variables['votemper'][-1,:,5,:]-f2old.variables['votemper'][-1,:,5,:])
cb1=fig.colorbar(mesh1,ax=axs[1])
axs[1].set_ylabel('z (m)', fontsize=18)
axs[1].set_xlabel('x (km)', fontsize=18)
axs[1].set_title('Temperature Difference (01-00) ($^\circ$C), t=end', fontsize=18)
#axs[1].autoscale(tight='True')
axs[1].axis([0, 550, -400, 0])
plt.setp(axs[1].get_xticklabels(),fontsize=16)
plt.setp(axs[1].get_yticklabels(),fontsize=16)
cb1.ax.tick_params(labelsize=16)
plt.tight_layout()
# -
for ik in fkeys:
if np.size(f.variables[ik].shape) == 4:
fig, axs = plt.subplots(2,1,figsize=(16,6))
print (ik)
mesh0=axs[0].pcolormesh(xx,zz,f.variables[ik][0,:,5,:])
cb0=fig.colorbar(mesh0,ax=axs[0])
axs[0].set_ylabel('z (m)', fontsize=18)
axs[0].set_xlabel('x (km)', fontsize=18)
axs[0].set_title(ik + ', t=0', fontsize=18)
#axs[0].autoscale(tight='True')
axs[0].axis([0, 550, -400, 0])
plt.setp(axs[0].get_xticklabels(),fontsize=16)
plt.setp(axs[0].get_yticklabels(),fontsize=16)
cb0.ax.tick_params(labelsize=16)
mesh1=axs[1].pcolormesh(xx,zz,f.variables[ik][-1,:,5,:])
cb1=fig.colorbar(mesh1,ax=axs[1])
axs[1].set_ylabel('z (m)', fontsize=18)
axs[1].set_xlabel('x (km)', fontsize=18)
axs[1].set_title(ik + ', t=end', fontsize=18)
#axs[1].autoscale(tight='True')
axs[1].axis([0, 550, -400, 0])
plt.setp(axs[1].get_xticklabels(),fontsize=16)
plt.setp(axs[1].get_yticklabels(),fontsize=16)
cb1.ax.tick_params(labelsize=16)
plt.tight_layout()
print(np.max(np.absolute(f.variables[ik][:,:,:,:]-fold.variables[ik][:,:,:,:])))
# + active=""
# for ik in fkeys:
# if np.size(f.variables[ik].shape) == 4:
# fig=plt.figure(figsize=(10,3))
# axs = plt.axes()
# print ik
# mesh0=axs.pcolormesh(tt/3600/24,tz,f.variables[ik][:,:,5,105].conj().transpose())
# cb=fig.colorbar(mesh0,ax=axs)
# axs.set_ylabel('z (m)', fontsize=18)
# axs.set_xlabel('time (days)', fontsize=18)
# axs.set_title(ik, fontsize=18)
# axs.autoscale(tight='True')
# plt.setp(axs.get_xticklabels(),fontsize=16)
# plt.setp(axs.get_yticklabels(),fontsize=16)
# cb.ax.tick_params(labelsize=16)
# plt.tight_layout()
# -
| Elise/plotResults/plot-run_SOG_00-codeTest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
from SimPEG import *
from SimPEG import EM
from pymatsolver import MumpsSolver
from scipy.constants import mu_0
# %pylab inline
cs, ncx, ncz, npad = 5, 25, 24, 20.
hx = [(cs,ncx), (cs,npad,1.3)]
hz = [(cs,npad,-1.3), (cs,ncz), (cs,npad,1.3)]
mesh = Mesh.CylMesh([hx,1,hz], '00C')
sighalf = 1e-3
sigma = np.ones(mesh.nC)*1e-8
sigmahomo = sigma.copy()
mu = np.ones(mesh.nC)*mu_0
sigma[mesh.gridCC[:,-1]<0.] = sighalf
blkind = np.logical_and(mesh.gridCC[:,0]<30., (mesh.gridCC[:,2]<0)&(mesh.gridCC[:,2]>-150)&(mesh.gridCC[:,2]<-50))
sigma[blkind] = 1e-1
mu[blkind] = mu_0*1.1
offset = 0.
frequency = np.logspace(1, 5, 21)
rx0 = EM.FDEM.Rx(np.array([[8., 0., 30.]]), 'bzr_sec')
rx1 = EM.FDEM.Rx(np.array([[8., 0., 30.]]), 'bzi_sec')
srcLists = []
nfreq = frequency.size
for ifreq in range(nfreq):
src = EM.FDEM.Src.CircularLoop([rx0, rx1], frequency[ifreq], np.array([[0., 0., 30.]]), radius=5.)
srcLists.append(src)
survey = EM.FDEM.Survey(srcLists)
iMap = Maps.IdentityMap(nP=int(mesh.nC))
maps = [('sigma', iMap), ('mu', iMap)]
prob = EM.FDEM.Problem_b(mesh, mapping=maps)
# prob.setPropMap(mapsdict)
prob.Solver = MumpsSolver
survey.pair(prob)
m = np.r_[sigma, mu]
fig, ax = plt.subplots(1,1, figsize = (5,5))
mesh.plotImage(np.log10(sigma), grid=True, gridOpts={'alpha':0.4, 'color':'k'}, clim=(-3, -1),ax=ax)
ax.plot(np.r_[0, 29.75], np.r_[-50, -50], 'w', lw=3)
ax.plot(np.r_[29.5, 29.5], np.r_[-50, -142.5], 'w', lw=3)
ax.plot(np.r_[0, 29.5], np.r_[-142.5, -142.5], 'w', lw=3)
ax.set_xlim(0, 150.)
ax.set_ylim(-200, 0.)
survey0 = EM.FDEM.Survey(srcLists)
prob0 = EM.FDEM.Problem_b(mesh, mapping=maps)
prob0.Solver = MumpsSolver
survey0.pair(prob0)
m = np.r_[sigma, mu]
m0 = np.r_[sigma, np.ones(mesh.nC)*mu_0]
m00 = np.r_[np.ones(mesh.nC)*1e-8, np.ones(mesh.nC)*mu_0]
# %%time
F = prob.fields(m)
F0 = prob.fields(m0)
F00 = prob.fields(m00)
import matplotlib
matplotlib.rcParams['font.size'] = 16
data = survey.dpred(m, u=F)
data0 = survey.dpred(m0, u=F0)
DATA = data.reshape((nfreq, 2))
DATA0 = data0.reshape((nfreq, 2))
DATAmu = DATA-DATA0
def vizdata(ifreq):
figsize(7,5)
plt.loglog(frequency, -(DATA[:,0]), 'k-')
plt.loglog(frequency, (DATA[:,0]), 'k--')
plt.loglog(frequency, -(DATA0[:,0]), 'k.')
plt.loglog(frequency, -(DATA[:,1]), 'b-')
plt.loglog(frequency, -(DATA0[:,1]), 'b.')
plt.loglog(frequency, DATAmu[:,0], 'r--')
plt.loglog(frequency, -DATAmu[:,0], 'r-')
plt.xlabel("Freqency (Hz)")
plt.ylabel("Bz field (T)")
plt.legend(("Real F[$\sigma$, $\mu$] (-)", "Real F[$\sigma$, $\mu$] (+)",
"Real F[$\sigma$, $\mu_0$] (-)",
"Imag F[$\sigma$, $\mu$] (-)", "Imag F[$\sigma$, $\mu_0$] (-)",
"Real F[$\sigma$, $\mu$]-F[$\sigma$, $\mu_0$] (+)",
"Real F[$\sigma$, $\mu$]-F[$\sigma$, $\mu_0$] (-)"
), bbox_to_anchor=(1.6,1.), fontsize = 14)
plt.plot(np.ones(2)*frequency[ifreq], np.r_[1e-17, 1e-11], 'k:')
from ipywidgets import interactive, ToggleButtons
ifreq=3
def vizfields(ifreq=0, primsec="primary",realimag="real"):
titles = ["F[$\sigma$, $\mu$]", "F[$\sigma$, $\mu_0$]", "F[$\sigma$, $\mu$]-F[$\sigma$, $\mu_0$]"]
actind = np.logical_and(mesh.gridCC[:,0]<200., (mesh.gridCC[:,2]>-400)&(mesh.gridCC[:,2]<200))
if primsec=="secondary":
bCCprim = (mesh.aveF2CCV*F00[:,'b'][:,ifreq]).reshape(mesh.nC, 2, order='F')
bCC = (mesh.aveF2CCV*F[:,'b'][:,ifreq]).reshape(mesh.nC, 2, order='F')-bCCprim
bCC0 = (mesh.aveF2CCV*F0[:,'b'][:,ifreq]).reshape(mesh.nC, 2, order='F')-bCCprim
elif primsec=="primary":
bCC = (mesh.aveF2CCV*F[:,'b'][:,ifreq]).reshape(mesh.nC, 2, order='F')
bCC0 = (mesh.aveF2CCV*F0[:,'b'][:,ifreq]).reshape(mesh.nC, 2, order='F')
XYZ = mesh.gridCC[actind,:]
X = XYZ[:,0].reshape((31,43), order='F')
Z = XYZ[:,2].reshape((31,43), order='F')
bx = bCC[actind,0].reshape((31,43), order='F')
bz = bCC[actind,1].reshape((31,43), order='F')
bx0 = bCC0[actind,0].reshape((31,43), order='F')
bz0 = bCC0[actind,1].reshape((31,43), order='F')
bxsec = (bCC[actind,0]-bCC0[actind,0]).reshape((31,43), order='F')
bzsec = (bCC[actind,1]-bCC0[actind,1]).reshape((31,43), order='F')
absbreal = np.sqrt(bx.real**2+bz.real**2)
absbimag = np.sqrt(bx.imag**2+bz.imag**2)
absb0real = np.sqrt(bx0.real**2+bz0.real**2)
absb0imag = np.sqrt(bx0.imag**2+bz0.imag**2)
absbrealsec = np.sqrt(bxsec.real**2+bzsec.real**2)
absbimagsec = np.sqrt(bxsec.imag**2+bzsec.imag**2)
fig = plt.figure(figsize(15,5))
ax1 = plt.subplot(131)
ax2 = plt.subplot(132)
ax3 = plt.subplot(133)
typefield="real"
if realimag=="real":
ax1.contourf(X, Z,np.log10(absbreal), 100)
ax1.quiver(X, Z,scale*bx.real/absbreal,scale*bz.real/absbreal,scale=0.2,width=0.005, alpha = 0.5)
ax2.contourf(X, Z,np.log10(absb0real), 100)
ax2.quiver(X, Z,scale*bx0.real/absb0real,scale*bz0.real/absb0real,scale=0.2,width=0.005, alpha = 0.5)
ax3.contourf(X, Z,np.log10(absbrealsec), 100)
ax3.quiver(X, Z,scale*bxsec.real/absbrealsec,scale*bzsec.real/absbrealsec,scale=0.2,width=0.005, alpha = 0.5)
elif realimag=="imag":
ax1.contourf(X, Z,np.log10(absbimag), 100)
ax1.quiver(X, Z,scale*bx.imag/absbimag,scale*bz.imag/absbimag,scale=0.2,width=0.005, alpha = 0.5)
ax2.contourf(X, Z,np.log10(absb0imag), 100)
ax2.quiver(X, Z,scale*bx0.imag/absb0imag,scale*bz0.imag/absb0imag,scale=0.2,width=0.005, alpha = 0.5)
ax3.contourf(X, Z,np.log10(absbimagsec), 100)
ax3.quiver(X, Z,scale*bxsec.imag/absbimagsec,scale*bzsec.imag/absbimagsec,scale=0.2,width=0.005, alpha = 0.5)
ax = [ax1, ax2, ax3]
ax3.text(30, 140, ("Frequency=%5.2f Hz")%(frequency[ifreq]))
for i, axtemp in enumerate(ax):
axtemp.plot(np.r_[0, 29.75], np.r_[-50, -50], 'w', lw=3)
axtemp.plot(np.r_[29.5, 29.5], np.r_[-50, -142.5], 'w', lw=3)
axtemp.plot(np.r_[0, 29.5], np.r_[-142.5, -142.5], 'w', lw=3)
axtemp.plot(np.r_[0, 100.], np.r_[0, 0], 'w', lw=3)
axtemp.set_ylim(-200, 100.)
axtemp.set_xlim(10, 100.)
axtemp.set_title(titles[i])
Q = interactive(vizfields, ifreq=(0, frequency.size-1,1)
,primsec = ToggleButtons(options=["primary","secondary"])
,realimag = ToggleButtons(options=["real","imag"]))
Q
vizdata(Q.kwargs['ifreq'])
| notebook/SusEffects.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Practical Deep Learning for Coders Course - Lesson 1
#
# > "This blog-post series captures my weekly notes while I attend the [fastaiv5 course conducted by University of Queensland with fast.ai](https://itee.uq.edu.au/event/2022/practical-deep-learning-coders-uq-fastai). So off to week1."
#
# - toc: true
# - branch: master
# - badges: true
# - hide_binder_badge: true
# - hide_deepnote_badge: true
# - comments: true
# - author: <NAME>
# - categories: [fastaicourse, fastbook]
# - hide: false
# - search_exclude: false
# First there was a set of introductions by university officials at UQ like VC. One curious thing was everyone of UQ staff were honouring something traditionaly of that land to live in reconciliation.
#
# Then lecture of Jeremy starts, seeing his face the chatbox is in delight.
#
# Jeremy mentions there are two categories of students who attend the course:
#
# 1. Students who have enrolled via University of Queensland(with almost 350 people attending in-person and about 100 people remotely as well).
# 2. fastai fellows who have acknowledged for their contribution to community.
#
# Jeremy recommends having study buddies when we are learning the course is important. So he asks to create Study groups wherever possible. This course is now happening after a gap of 2 years, so there is a lot of new things which has to be covered as Deep learning moves so fast.
#
#
# Using Dalle-2 technique we can generative creative images from generate twitter bios. For a creative person, this can be very helpful to create good artwork. Then one of another popular techniques was using Pathways language model which is able to answers question with explanations and even explains why some jokes are funny.
#hide
# !pip install fastai timm -Uqq
# Jeremy talks about his interest in education.He is a homeschooler and learned from books by <NAME> & <NAME> which were inspiration for fast.ai. fastai teaches stuff in top-down manner. You will go into as much technical stuff as you, yet you will learn and implement cool stuff steadily.
#
# **About fast.ai course**
#
# He wrote an awesome book and this course. His book is one of the best sellers in Deep Learning and used to teach folks in companies like Tesla, OpenAI etc. Almost 6 million people watched his videos so far.Jeremy has won multiple competitions in Kaggle, was the CEO of kaggle. He build Enlitic, a medical company which was build for medical purpose with two other succesful startups.
#
# > Jeremy mentioned for this course, we are not using any material directly from Deep Learning For Coders with Fastai & Pytorch book. Yet he recommends to read portions of book after each chapter.
#
# Usually multiple people learn better if the same idea is exposed in different way
# from multiple sources. That's the why behind this approach.
# Jeremy started coding hands-own a bird or park classifier, which was considered as a very difficult problem in 2015. Even a comic depicted this. Yet things have changed so drastically in past few years, that it's very easy to do that now.
#
# > Yet let's look, why we couldn't build a bird classifer in 2015:
#
# - For classifying histopothical images. They used computer vision techniques.
# - They got big team of datascientist, mathematicans with lot of features who build relevant feature for machine learning hand by hand.
# - These project took years
# - Also deep learning was not in radar for researchers then.
#
# > What has now changed?
# - Using neural network they build these features on their own.
# - <NAME> & <NAME>(and actual weights) showed with visualization how neural networks work
# - Combine all features to learn and go slowly in past, neural networks learned on it's own these techniques.
# If it's a bird or not? notebook can be [found here](https://www.kaggle.com/code/jhoward/is-it-a-bird-creating-a-model-from-your-own-data). I am slightly tweaking this model to leverage pytorch image-models released by timm.
# +
# hide
from fastcore.all import *
import time
def search_images(term, max_images=200):
url = 'https://duckduckgo.com/'
res = urlread(url,data={'q':term})
searchObj = re.search(r'vqd=([\d-]+)\&', res)
requestUrl = url + 'i.js'
params = dict(l='us-en', o='json', q=term, vqd=searchObj.group(1), f=',,,', p='1', v7exp='a')
urls,data = set(),{'next':1}
while len(urls)<max_images and 'next' in data:
data = urljson(requestUrl,data=params)
urls.update(L(data['results']).itemgot('image'))
requestUrl = url + data['next']
time.sleep(0.2)
return L(urls)[:max_images]
# -
urls = search_images('bird photos', max_images=1)
urls[0]
# +
from fastdownload import download_url
dest = 'bird.jpg'
download_url(urls[0], dest, show_progress=False)
from fastai.vision.all import *
im = Image.open(dest)
im.to_thumb(256,256)
# -
# **Note:**
#
# - Image based algorithms, are not for images. Image for music classification by Deolho, Ethan sutin sounds from image recognizer. You can do music classification, with some creativity using cnns.
#
# - Also needing lots of data is a myth created by companies who sell data processng units. There are lot of free resources like Kaggle, Colab etc.
# >Observation by Jeremy: Tensorflow is slowly dying. Check this [article which he cited](https://www.assemblyai.com/blog/pytorch-vs-tensorflow-in-2022/). Yet pytorch has lot of hairy code, which can be solved using good abstractions in fastai.
#
# - fastai library tries to provide good and the best fine-tuned models, which work well compared to other libraries. He showed code required for implementing AdamW in pytorch and in fastai.
#
# [<NAME>](https://tmabraham.github.io/) pointed me to implemtation of AdamW to [chapter 16 in fastbook](https://github.com/fastai/fastbook/blob/master/16_accel_sgd.ipynb).
# +
download_url(search_images('forest photos', max_images=1)[0], 'forest.jpg', show_progress=False)
Image.open('forest.jpg').to_thumb(256,256)
searches = 'forest','bird'
path = Path('bird_or_not')
for o in searches:
dest = (path/o)
dest.mkdir(exist_ok=True, parents=True)
download_images(dest, urls=search_images(f'{o} photo'))
resize_images(path/o, max_size=400, dest=path/o)
# -
failed = verify_images(get_image_files(path))
failed.map(Path.unlink)
len(failed)
# As the code showed, data cleaning is a big part of machine learninng. When we are learning this course as practitioners, we will spend lot of time of building and loading models. Like in compiler course lot of time is not spend on techniques, but on getting the environment up and ready.
# +
dls = DataBlock(
blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(valid_pct=0.2, seed=42),
get_y=parent_label,
item_tfms=[Resize(224, method='squish')]
).dataloaders(path)
dls.show_batch()
# -
# After examining, 100s of project and datascience requirments. fastai came up with this approach of DataBlock, which consists of five things:
#
#
# 1. blocks
# 2. get_items
# 3. splitter
# 4. Batch_tfms(optional)
# 5. get_y
# 6. item_tfms
#
# Without validation data, it won't allow to train. parent_label, return parent folder. we saved as
# forests or birds. We need same size. Idea to do quickly, why not publish vision_learners
# with pets dataset.
#
#
# **Now it's time to train our model**
# +
learn = vision_learner(dls, 'vit_tiny_patch16_224', metrics=error_rate)
learn.fine_tune(10)
# -
# One thing which is cool is that the whole presentation is also made with Jupyter Notebooks using [RiseJS](https://rise.readthedocs.io/en/stable/). Also jupyter notebooks can be used for writing books like [Deep Learning for Coders](https://www.amazon.in/Deep-Learning-Coders-fastai-PyTorch/dp/9385889206/ref=asc_df_9385889206/?tag=googleshopdes-21&linkCode=df0&hvadid=397083287744&hvpos=&hvnetw=g&hvrand=16600915651709325915&hvpone=&hvptwo=&hvqmt=&hvdev=c&hvdvcmdl=&hvlocint=&hvlocphy=1007777&hvtargid=pla-992154494864&psc=1&ext_vrnc=hi), for blogging using [fastpages](https://fastpages.fast.ai/), for CI/CD pipeline to run in parallel execution in [fastai repo](https://github.com/fastai/fastai).
#
# <NAME> has summarized on what can be done in this twitter threads.
#
# > twitter: https://twitter.com/iScienceLuvr/status/1519242326517829632
#
# After this Jeremy, showed all the examples in Chapter 1 in Deep Learning for coders. My notes then:
#
# We are still scratching the surface. Lot of marketing out there, some of first open source models available. The deep learning when it broke X, y, z in domain. In NLP it breaks lot of stuff
#
# What's really go in on : in arthur samuel with graph. The graphs are build with gv2 in jupyter
# notebook. Deploying models in ML is a bit tricky. But it's just predict and shows results.
# **Conclusion by Jeremy**
#
# So after first lesson:
#
# a) If you know python, then it's kind of easy for you.
# b) If don't know python, it's very difficult
#
# Regardless of what level you are. Experiment yourself and do something more complex.
# Go ahead and push yourself a little bit, but not much. Then present your work. Do stuff
# on things where you are interested.
#
| _notebooks/2022-04-26-fastai-51.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Functions used:
#
# 1. pd.read_csv
#
# 2. .head() and .tail()
#
# 3. .loc()
#
# 4. .drop()
#
# 5. pd.merge()
#
# 6. pd.to_numeric(dataframe["Col_name"],errors = "coerce")
import pandas as pd
import numpy as np
import os
# # For BankNifty:
link = "C:\\Users\\MAHE\\Desktop\\Data Science\\Projects\\data\\banknifty"
os.chdir(link)
bn = pd.read_csv("all_here.csv", sep = ",")
bn.tail()
bn.tail()
# # For Data:
os.chdir("C:\\Users\\MAHE\\Desktop\\Data Science\\Projects\\data")
df = pd.read_csv("data.csv")
df.tail()
# # Merging:
# ## The goal is to merge both the dataframes on "Expiry":
merge_df = pd.merge(df,bn,on = ["Date"])
merge_df
merge_df = merge_df.drop(["Shares Traded","Turnover (Rs. Cr)"],axis = 1)
merge_df
merge_df = merge_df.set_index("Date")
merge_df
merge_df["Close_x"].loc["10-Jan-2019"]
# ## The bn["Open"] and bn["Close"] were of the type strings, as it had both numeric and non-numeric values, to change it : USE to_numeric
# ### [ pd.to_numeric(df["Col_name"], errors = "coerce) ]
merge_df["BN:O-C"] = pd.to_numeric(merge_df["Open_x"],errors="coerce")-pd.to_numeric(merge_df["Close_x"], errors = "coerce")
merge_df["BN:H-L"] = pd.to_numeric(merge_df["High_x"], errors = "coerce")-pd.to_numeric(merge_df["Low_x"], errors = "coerce")
merge_df["Data:O-C"] = merge_df["Open_y"]-merge_df["Close_y"]
merge_df["Data:H-L"] = merge_df["High_y"]-merge_df["Low_y"]
merge_df.head()
# # Use Multi-Indexing, and refer banknift.ipynb for instructions ( Before day 3 )
| Data-Science-HYD-2k19/Projects/codes/PROJECT 1 (Jupiter ) ( Bank Nifty )/Merge BankNift and Data/Merge Day1 and Day2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
import numpy as np
from pandas import DataFrame
# +
# generate a 2-class classification problem with 2000 data points,
# where each data point is a 2D feature vector
(X, y) = make_blobs(n_samples=20, n_features=2, centers=2,
cluster_std=5.0, random_state=95)
y = np.where(y == 1, 1, -1)
# -
X
# +
from sklearn import svm
import matplotlib.pyplot as plt
import numpy as np
# fit the model, don't regularize for illustration purposes
Cs=0.0080
clf1 = svm.SVC(kernel='linear', C=Cs)
X_train=X
y_train=y
clf1.fit(X_train, y_train)
fig,ax=plt.subplots()
xmin,xmax=-15.0,15.0
ax.set_xlim([xmin,xmax])
ax.set_ylim([-15,15])
# plot the decision function
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
margin = 2 / np.sqrt(np.sum(clf1.coef_ ** 2))
w = clf1.coef_[0]
a = -w[0] / w[1]
# Move left y-axis and bottim x-axis to centre, passing through (0,0)
ax.spines['left'].set_position('zero')
ax.spines['bottom'].set_position('zero')
# Eliminate upper and right axes
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
# Show ticks in the left and lower axes only
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.scatter(X_train[:, 0], X_train[:, 1], marker="o", s=30,color=["r" if y_train == -1 else "b" for y_train in y_train])
# create grid to evaluate model
xx = np.linspace(xlim[0], xlim[1], 30)
yy = np.linspace(ylim[0], ylim[1], 30)
YY, XX = np.meshgrid(yy, xx)
xy = np.vstack([XX.ravel(), YY.ravel()]).T
Z = clf1.decision_function(xy).reshape(XX.shape)
# plot decision boundary and margins
ax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5,linestyles=['--', '-', '--'])
ax.scatter(clf1.support_vectors_[:, 0], clf1.support_vectors_[:, 1], s=100,
linewidth=1, facecolors='none', edgecolors='black')
k=w.tolist()
#ax.scatter(k[0],k[1],s=50,c='green')
#plt.plot([0,k[0]],[0,k[1]],c='green')
ax.set_aspect('equal')
#plt.title('$C_4$=%.1f, Margin Width=%.4f'%(Cs,margin))
plt.show()
# -
| SVM/Notebooks/2.4_a.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import pygal
from pygal.style import DarkStyle
l = ['Offences Relating to Documents & Property Marks' , 'Miscellaneous IPC Crimes' ,
'Offences Affecting the Human Body','Offences Affecting the Human Body-2','Offences against Property',
'Offences against Public Tranquillity','Offences against the State']
for t in l:
line_chart = pygal.Line(style=DarkStyle)
line_chart.x_labels = '2017','2018','2019'
line_chart.title = '{}-(2017-2019)'.format(t)
df = pd.read_csv("Raw Data/csv-files/{}.csv".format(t))
print("Writing {}.svg".format(t))
ch = list(df.get('Crime Head'))
a = list(df.get('2017'))
b = list(df.get('2018'))
c = list(df.get('2019'))
a = [int(x) for x in a]
b = [int(y) for y in b]
c = [int(z) for z in c]
for i in range(len(ch)):
line_chart.add(ch[i],[a[i],b[i],c[i]])
line_chart.render_to_file('svg-files/Line-charts/{}.svg'.format(t))
| Line-Chart-SVG.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# # **Gaussian Processes**
#
# Notebook version: 1.0 (Oct 06, 2015)
#
# Author: <NAME> (<EMAIL>)
#
# Changes: v.1.0 - First version
# v.1.1 - Figures changed to png (tiff not readable in Firefox)
# +
# Import some libraries that will be necessary for working with data and displaying plots
# To visualize plots in the notebook
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.io # To read matlab files
import pylab
# -
# # 4. Gaussian Processes
#
# ## 4.1 The multivariate Gaussian distribution
#
# Let us assume that ${\bf x}$ and ${\bf y}$ are two jointly Gaussian random vectors, whose joint pdf can be expressed as
#
# \begin{equation}
# \left[ \begin{array}{c} {\bf x} \\ {\bf y} \end{array} \right] \sim {\cal{N}} \left( \left[ \begin{array}{c} {\bf m}_{\bf x} \\ {\bf m}_{\bf y} \end{array} \right] , \left[ \begin{array}{cc} {\pmb \Sigma}_{{\bf x}{\bf x}} & {\pmb \Sigma}_{{\bf x}{\bf y}} \\ {\pmb \Sigma}_{{\bf x}{\bf y}}^T & {\pmb \Sigma}_{{\bf y}{\bf y}}\end{array}\right] \right) = {\cal{N}} \left( \left[ \begin{array}{c} {\bf m}_{\bf x} \\ {\bf m}_{\bf y} \end{array} \right] , \left[ \begin{array}{cc} \widetilde{{\pmb \Sigma}}_{{\bf x}{\bf x}} & \widetilde{{\pmb \Sigma}}_{{\bf x}{\bf y}} \\ \widetilde{{\pmb \Sigma}}_{{\bf x}{\bf y}}^T & \widetilde{{\pmb \Sigma}}_{{\bf y}{\bf y}}\end{array}\right]^{-1} \right)
# \end{equation}
#
# where
#
# - ${\bf m}_{\bf x}$: Mean of random vector ${\bf x}$
# - ${\bf m}_{\bf y}$: Mean of random vector ${\bf y}$
# - ${\pmb \Sigma}_{{\bf x}{\bf x}}$: Covariance matrix of random vector ${\bf x}$
# - ${\pmb \Sigma}_{{\bf x}{\bf y}}$: Cross-covariance matrix of ${\bf x}$ and ${\bf y}$
#
# Note that all blocks of the covariance matrix and its inverse are of appropriate sizes, as indicated by the subindexes. We are using tildes for the blocks of the inverse of the covariance matrix. Please, be aware that $\widetilde{\pmb\Sigma}_{{\bf x}{\bf x}} \neq {\pmb\Sigma}_{{\bf x}{\bf x}}^{-1}$.
#
# <img src="figs/Gaussian2D.png", style="width: 80%">
#
# Since random vectors are jointly Gaussian, this means that marginal and conditional distributions are also Gaussian, with distributions that can be expresses as
#
# $${\bf x} \sim {\cal{N}} ({\bf m}_{\bf x}, {\pmb \Sigma}_{{\bf x}{\bf x}}) \qquad \qquad {\bf y} \sim {\cal{N}} ({\bf m}_{\bf y}, {\pmb \Sigma}_{{\bf y}{\bf y}})$$
#
#
# $${\bf x}|{\bf y} \sim {\cal{N}} ( {\bf m}_{\bf x} - \widetilde{{\pmb \Sigma}}_{{\bf x}{\bf x}}^{-1} \widetilde{{\pmb \Sigma}}_{{\bf x}{\bf y}} ({\bf y} - {\bf m}_{\bf y}), \widetilde{{\pmb \Sigma}}_{{\bf x}{\bf x}}^{-1})$$
#
# $${\bf x}|{\bf y} \sim {\cal{N}} ( {\bf m}_{\bf x} + {{\pmb \Sigma}}_{{\bf x}{\bf y}} {{\pmb \Sigma}}_{{\bf y}{\bf y}}^{-1} ({\bf y} - {\bf m}_{\bf y}), {{\pmb \Sigma}}_{{\bf x}{\bf x}} - {\pmb \Sigma}_{{\bf x}{\bf y}} {{\pmb \Sigma}}_{{\bf y}{\bf y}}^{-1} {\pmb \Sigma}_{{\bf x}{\bf y}}^\top)$$
#
# The demonstration of these expressions is left as an exercise
# ### Generation of Samples from a Multivariate Gaussian Distribution
#
# Although there are python routines that can help to generate points from a multivariate Gaussian distribution, a standard procedure is based upon the generation of samples from the normal distribution with zero mean and unitary standard deviation, which are then transformed in an appropritate manner.
#
# The Cholesky decomposition of a symmetric positive-definite matrix ${\bf A}$ is (the definition can be slightly modified for complex matrices, but here we restrict our selves to matrices with real values):
#
# $${\bf A} = {\bf L} {\bf L}^\top$$
#
# where ${\bf L}$ is a lower triangular matrix with positive diagonal entries. It can be shown that such decomposition exists and is unique, and can be computed in a very stable an efficient way.
#
# Now, imaging the problem of drawing points from the Gaussian distribution
#
# $$p({\bf x}) = {\cal N}\left({\bf m},{\pmb \Sigma} \right)$$
#
# This distribution can be sampled by drawing points ${\bf x}'$ from ${\cal N}\left({\bf 0},{\bf I} \right)$, and then applying the transformation
#
# $${\bf x} = {\bf L} {\bf x}'~+~{\bf m}$$
#
# where ${\bf L}$ is obtained from the Cholesky decomposition of $\pmb \Sigma$.
#
# The demonstration of the validity of the process goes along these lines:
#
# - Since ${\bf x}$ is a linear transformation of a Gaussian variable, it should also be normally distributed
#
# - ${\mathbb E}\{{\bf x}\} = {\bf L} \mathbb{E}\{{\bf x}'\} + {\bf m} = {\bf m}$
#
# - $\text{Cov}\{{\bf x}\} = \text{Cov}\{{\bf L} {\bf x}'\} = {\mathbb E} \{{\bf L} {\bf x}' {{\bf x}'}^\top {\bf L}^\top\} = {\bf L} {\bf L}^\top = {\pmb \Sigma}$
# +
#This code illustrates the generation of multivariate Gaussian points with arbitrary distribution
m = np.array([3,3])
V = np.array([[1,-0.95],[-0.95,1]])
L = np.linalg.cholesky(V)
#Generation of points from the normal distribution with zero mean and unit variance
n_points = 300
dim = 2
x_prime = np.random.randn(dim,n_points)
#Linear transformation
x = L.dot(x_prime) + np.matrix(m).T
#Plot the results
plt.plot(x_prime[0,],x_prime[1,],'b.');
plt.plot(x[0,],x[1,],'r.');
# -
# ## 4.2 Non-parametric Inference with Gaussian Processes
#
# In the previous lesson we assumed the following generative model for the patterns in the training set
#
# $${\bf s} = {\bf f} + {\pmb \varepsilon}$$
#
# where
#
# - ${\bf s}$ is a vector containing all target values in the training set
#
# - ${\bf f}$ is a vector containing the true values of the function we would like to approximate at all points ${\bf x}^{(k)}$ in the training set
#
# - ${\pmb \varepsilon}$ is a vector of i.i.d. noise samples, whose distribution is assumed ${\cal N}({\bf 0},\sigma_\varepsilon^2{\bf I})$
#
# This model essentially tells us that the observations in the training set are the values of a hidden function $f({\bf x})$ contaminated by additive noise.
#
# - In the previous lesson, we assumed a parametric model ${\bf f} = {\bf Z}{\bf w}$, and use it to make Bayesian Inference on the weight vector characterizing the model
#
# - In this lesson we will proceed non-parametrically:
#
# * We do not assume any particular analytic shape for $f({\bf x})$
#
# * Inference is done directly on the values of the function, i.e., on ${\bf f}$
#
#
#
# How can we made inference directly on the function values? The answer is provided by the concept of Gaussian Process (GP)
# ### 4.2.1 Gaussian Processes
#
# - A Gaussian process is a collection of random variables, any finite number of which have (consistent) Gaussian distributions
#
# - You can think of a Gaussian Process as a collection of infinite random variables with joint Gaussian distribution
#
# - The standard way we will use them is to characterize the joint distribution of the values of $f({\bf x})$
#
# <img src="./figs/GP_concept.png" style="width: 50%">
#
# - In this way, we can generate the values of $f({\bf x})$ according to the joint distribution of the points, if the values of $({\bf x})$ are close enough, this will produce a regression curve
#
# In other words, rather than having a parametric model, and sampling ${\bf w}$ from a given distribution, GPs allow us to directly sample the values of the function
#
#
# #### Characterization of a Gaussian Process
#
# - A Gaussian distribution is characterized by a mean vector and and a covariance matrix
#
# $${\bf f}~\sim~{\cal N}\left({\bf m},{\pmb \Sigma}\right)$$
#
#
# - A Gaussian Process is characterized by a mean function and a covariance function
#
# $$f({\bf x})~\sim~{\cal GP} \left( m{\bf x}, k({\bf x},{\bf x'}) \right)$$
#
#
# - A typical selection for the covariance function is the Squared Exponential (SE) kernel
#
# $$k({\bf x},{\bf x'}) = \exp\left( -\frac{1}{2l} (\|{\bf x}-{\bf x'}\|^2)\right)$$
#
#
# Intuitively, this kernel specifies that the values of $f({\bf x})$ and $f({\bf x}')$ should be more correlated the closer the two points ${\bf x}$ and ${\bf x}'$. What we consider to be close or far, is modulated by the selection of the length-parameter $l$.
#
#
# #### Example: Using a Gaussian Process to generate functions
#
# The concept of Gaussian Process is probably better illustrated with the following code fragment, where we generate functions from a GP with mean $m({\bf x}) = 0$ and SE covariance function
# +
from scipy import spatial
pylab.subplots_adjust(hspace=0.7)
n_points = 300
n_curves = 5
l_values = [.001, .01, 0.1]
X_grid = np.linspace(-2,2,n_points)
X_grid = np.matrix(X_grid).T
color = 'rgbcm'
for i,l in enumerate(l_values):
plt.subplot(len(l_values),1,i+1)
#The GP allow us to obtain the Gaussian distribution of the
#values of f for any particular selection of points x
#We do so for the points in X_grid
#We compute the distances among each pair of points in X_grid
distances = spatial.distance.cdist(X_grid,X_grid,'euclidean')
#And the covariance matrix
K = np.exp(-np.power(distances,2)/(2*l))
#For numerical reasons: we add a small constant along the main
#diagonal to make sure K is positive-definite
L = np.linalg.cholesky(K+1e-10*np.eye(n_points))
#Next, we draw n_curves samples from the Gaussian distribution
#with mean 0 and covariance matrix K
for iter in range(n_curves):
f_prime = np.random.randn(n_points,1)
f = L.dot(f_prime)
plt.plot(X_grid,f,color[iter%n_curves])
plt.title('Length-scale parameter: '+str(l))
# -
# ### 4.2.2 Modeling the posterior distribution of the targets
#
# Let us assume that we wish to make predictions of the target variable at a collection of points $\{{{\bf x}^*}^{(1)}, {{\bf x}^*}^{(2)}, \dots, {{\bf x}^*}^{(P)}\}$. Let us put together the corresponding values of $f(\cdot)$ into a column vector ${\bf f}^*$. For the particular case in which we need to make predictions at a single point, such vector would become a scalar
#
# #### Joint distribution of ${\bf f}$ and ${\bf f}^*$
#
# Using the generative model:
#
# $$s^{(k)} = f({\bf x}^{(k)}) + \varepsilon^{(k)}$$
#
# and assuming further that $f(\bf x)$ is a GP with mean 0 and covariance function $k({\bf x},{\bf x}')$, we can model the joint distribution of ${\bf f}$ and ${\bf f}^*$ as
#
# $$\left[\begin{array}{c}{\bf f}\\{\bf f}^*\end{array}\right]~\sim~{\cal N}\left({\bf 0},\left[\begin{array}{cc}{\bf K} & {\bf K}_*^\top \\ {\bf K}_* & {\bf K}_{**} \end{array}\right]\right)$$
#
# where:
#
# - ${\bf K}$ contains the covariance among the components of ${\bf f}$
# - ${\bf K}_{**}$ contains the covariance among the components of ${\bf f}^*$
# - ${\bf K}_{*}$ contains the covariance between components of ${\bf f}^*$ and ${\bf f}$
#
#
# #### Joint distribution of ${\bf s}$ and ${\bf f}^*$
#
# Since ${\bf s} = {\bf f} + {\pmb \varepsilon}$, and since the noise is independent of the values of ${\bf f}$, we have that
#
# $$\text{Cov}\{{\bf s}\} = \text{Cov}\{{\bf f}\} + \text{Cov}\{{\pmb \varepsilon}\} = {\bf K} + \sigma_\varepsilon^2 {\bf I}$$
#
# Next, we can also see that
#
# $$\text{Cov}\{{\bf f}^*,{\bf s}\} = \text{Cov}\{{\bf f}^*,({\bf f}+{\pmb \varepsilon})\} = \text{Cov}\{{\bf f}^*,{\bf f}\} = {\bf K}_*$$
#
# Therefore, the joint distribution of ${\bf s}$ and ${\bf f}^*$ is
#
# $$\left[\begin{array}{c}{\bf s}\\{\bf f}^*\end{array}\right]~\sim~{\cal N}\left({\bf 0},\left[\begin{array}{cc}{\bf K} + \sigma_\varepsilon^2 {\bf I}& {\bf K}_*^\top \\ {\bf K}_* & {\bf K}_{**} \end{array}\right]\right)$$
#
#
# #### Predictive distribution of ${\bf f}^*$ given ${\bf s}$
#
# Using the results of Section 4.1.1, it is now straightforward to obtain the distribution of ${\bf f}^*$ given ${\bf s}$:
#
# $${\bf f}^* | {\bf s}~\sim~{\cal N}\left({\bf K}_*[{\bf K+\sigma_\varepsilon^2 {\bf I}}]^{-1}{\bf s}, {\bf K}_{**} - {\bf K}_* [{\bf K+\sigma_\varepsilon^2 {\bf I}}]^{-1} {\bf K}_*^\top\right)$$
# ### 4.2.3 Gaussian Processes in practice
#
# #### Predictive distribution computation
# +
n_points = 15
n_grid = 200
frec = 3
std_n = 0.2
#Generating the training points
X_tr = 3 * np.random.random((n_points,1)) - 0.5
S_tr = - np.cos(frec*X_tr) + std_n * np.random.randn(n_points,1)
#These will be the points where the model will be evaluated
X_grid = np.linspace(-1,3,n_grid)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(X_tr,S_tr,'b.',markersize=10)
ax.set_xlim(-.5,2.5)
#GP hyperparameters
sigma_eps = 0.1
l = .1
#Covariance matrix computation
X_tr = np.matrix(X_tr)
X_grid = np.matrix(X_grid).T
dist = spatial.distance.cdist(X_tr,X_tr,'euclidean')
dist_ss = spatial.distance.cdist(X_grid,X_grid,'euclidean')
dist_s = spatial.distance.cdist(X_grid,X_tr,'euclidean')
K = np.exp(-np.power(dist,2)/(2*l))
K_ss = np.exp(-np.power(dist_ss,2)/(2*l))
K_s = np.exp(-np.power(dist_s,2)/(2*l))
#Posterior distribution of f_star
m = K_s.dot(np.linalg.inv(K + sigma_eps**2 * np.eye(n_points))).dot(S_tr)
m = np.array(m).flatten()
Cov = K_ss - K_s.dot(np.linalg.inv(K + sigma_eps**2 * np.eye(n_points))).dot(K_s.T)
#Variance of each prediction
v = np.diagonal(Cov)
plt.plot(X_grid,m,'r',label='Predictive mean')
plt.fill_between(np.array(X_grid).flatten(), m-np.sqrt(v), m+np.sqrt(v),
alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF',
linewidth=4, linestyle='dashdot', antialiased=True)
plt.show()
# -
# You can play modify the values of $\mbox{sigma_eps}$ and $\mbox{l}$, to see how the predictive distribution changes
# ## 4.3 Maximum likelihood selection of hyperparameters
#
# As we did in the previous unit, we can find select the value of the hyperparameters to maximize the likelihood of the model. To this end, let us note that the pdf of ${\bf s}$ for given hyperparameters $\sigma_\varepsilon$ and $l$ is given by
#
# $$p({\bf s}|\sigma_\varepsilon,l) = {\cal N}({\bf 0},{\bf K} + \sigma_\varepsilon^2 {\bf I}) = \frac{1}{(2 \pi)^{K/2}\mid{\bf K} + \sigma_\varepsilon^2 {\bf I}\mid^{1/2}} \exp\left(-\frac{1}{2}{\bf s}^\top ({\bf K} + \sigma_\varepsilon^2 {\bf I})^{-1} {\bf s}\right)$$
#
# Instead of directly maximizing this expression, it is convenient to take the log, arriving at
#
# $$\log p({\bf s}|\sigma_\varepsilon,l) = - \frac{K}{2}\log(2\pi) - \frac{1}{2} \log \mid{\bf K} + \sigma_\varepsilon^2 {\bf I}\mid -\frac{1}{2}{\bf s}^\top ({\bf K} + \sigma_\varepsilon^2 {\bf I})^{-1} {\bf s}$$
#
# (be aware of the difference between $K$, the number of points in the training dataset, and ${\bf K}$, the covariance matrix of ${\bf f}$)
#
# Selection of hyperparameters can be done with the objective of maximize this marginal likelihood, as illustrated next
# +
from math import pi
n_points = 15
frec = 3
std_n = 0.2
#Generating the training points
X_tr = 3 * np.random.random((n_points,1)) - 0.5
S_tr = - np.cos(frec*X_tr) + std_n * np.random.randn(n_points,1)
#GP hyperparameters
sigma_eps = 0.3
l = np.logspace(-5,3,100)
#Covariance matrix computation
X_tr = np.matrix(X_tr)
X_grid = np.matrix(X_grid).T
dist = spatial.distance.cdist(X_tr,X_tr,'euclidean')
log_E = []
for l_iter in l:
K = np.exp(-np.power(dist,2)/(2*l_iter))
log_E_iter = -(n_points/2)*np.log(2*pi) \
- np.log(np.linalg.det(K + sigma_eps**2 * np.eye(n_points))) \
- 0.5 * S_tr.T.dot(np.linalg.inv(K + sigma_eps**2 * np.eye(n_points))).dot(S_tr)
log_E.append(log_E_iter[0,0])
plt.semilogx(l,log_E),
plt.xlabel('length-scale hyperparameter')
plt.ylabel('Log likelihood')
selected_l = l[np.argmax(log_E)]
# +
n_grid = 200
#These will be the points where the model will be evaluated
X_grid = np.linspace(-1,3,n_grid)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(X_tr,S_tr,'b.',markersize=10)
ax.set_xlim(-.5,2.5)
#GP hyperparameters
sigma_eps = 0.3
l = selected_l
#Covariance matrix computation
X_tr = np.matrix(X_tr)
X_grid = np.matrix(X_grid).T
dist = spatial.distance.cdist(X_tr,X_tr,'euclidean')
dist_ss = spatial.distance.cdist(X_grid,X_grid,'euclidean')
dist_s = spatial.distance.cdist(X_grid,X_tr,'euclidean')
K = np.exp(-np.power(dist,2)/(2*l))
K_ss = np.exp(-np.power(dist_ss,2)/(2*l))
K_s = np.exp(-np.power(dist_s,2)/(2*l))
#Posterior distribution of f_star
m = K_s.dot(np.linalg.inv(K + sigma_eps**2 * np.eye(n_points))).dot(S_tr)
m = np.array(m).flatten()
Cov = K_ss - K_s.dot(np.linalg.inv(K + sigma_eps**2 * np.eye(n_points))).dot(K_s.T)
#Variance of each prediction
v = np.diagonal(Cov)
plt.plot(X_grid,m,'r',label='Predictive mean')
plt.fill_between(np.array(X_grid).flatten(), m-np.sqrt(v), m+np.sqrt(v),
alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF',
linewidth=4, linestyle='dashdot', antialiased=True)
plt.show()
# -
| R6.Gaussian_Processes/.ipynb_checkpoints/GPs-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # How to create STAC Catalogs
# ## STAC Community Sprint, Arlington, November 7th 2019
# This notebook runs through some of the basics of using PySTAC to create a static STAC. It was part of a 30 minute presentation at the [community STAC sprint](https://github.com/radiantearth/community-sprints/tree/master/11052019-arlignton-va) in Arlington, VA in November 2019.
# This tutorial will require the `boto3`, `rasterio`, and `shapely` libraries:
# !pip install boto3
# !pip install rasterio
# !pip install shapely
# We can import pystac and access most of the functionality we need with the single import:
import pystac
# ## Creating a catalog from a local file
# To give us some material to work with, lets download a single image from the [Spacenet 5 challenge](https://www.topcoder.com/challenges/30099956). We'll use a temporary directory to save off our single-item STAC.
# +
import os
import urllib.request
from tempfile import TemporaryDirectory
tmp_dir = TemporaryDirectory()
img_path = os.path.join(tmp_dir.name, 'image.tif')
# -
url = ('https://spacenet-dataset.s3.amazonaws.com/'
'spacenet/SN5_roads/train/AOI_7_Moscow/MS/'
'SN5_roads_train_AOI_7_Moscow_MS_chip996.tif')
urllib.request.urlretrieve(url, img_path)
# We want to create a Catalog. Let's check the pydocs for `Catalog` to see what information we'll need. (We use `__doc__` instead of `help()` here to avoid printing out all the docs for the class.)
print(pystac.Catalog.__doc__)
# Let's just give an ID and a description. We don't have to worry about the HREF right now; that will be set later.
catalog = pystac.Catalog(id='test-catalog', description='Tutorial catalog.')
# There are no children or items in the catalog, since we haven't added anything yet.
print(list(catalog.get_children()))
print(list(catalog.get_items()))
# We'll now create an Item to represent the image. Check the pydocs to see what you need to supply:
print(pystac.Item.__doc__)
# Using [rasterio](https://rasterio.readthedocs.io/en/stable/), we can pull out the bounding box of the image to use for the image metadata. If the image contained a NoData border, we would ideally pull out the footprint and save it as the geometry; in this case, we're working with a small chip that most likely has no NoData values.
# +
import rasterio
from shapely.geometry import Polygon, mapping
def get_bbox_and_footprint(raster_uri):
with rasterio.open(raster_uri) as ds:
bounds = ds.bounds
bbox = [bounds.left, bounds.bottom, bounds.right, bounds.top]
footprint = Polygon([
[bounds.left, bounds.bottom],
[bounds.left, bounds.top],
[bounds.right, bounds.top],
[bounds.right, bounds.bottom]
])
return (bbox, mapping(footprint))
# -
bbox, footprint = get_bbox_and_footprint(img_path)
print(bbox)
print(footprint)
# We're also using `datetime.utcnow()` to supply the required datetime property for our Item. Since this is a required property, you might often find yourself making up a time to fill in if you don't know the exact capture time.
# +
from datetime import datetime
item = pystac.Item(id='local-image',
geometry=footprint,
bbox=bbox,
datetime=datetime.utcnow(),
properties={})
# -
# We haven't added it to a catalog yet, so it's parent isn't set. Once we add it to the catalog, we can see it correctly links to it's parent.
item.get_parent() is None
catalog.add_item(item)
item.get_parent()
# `describe()` is a useful method on `Catalog` - but be careful when using it on large catalogs, as it will walk the entire tree of the STAC.
catalog.describe()
# ### Adding Assets
#
# We've created an Item, but there aren't any assets associated with it. Let's create one:
print(pystac.Asset.__doc__)
item.add_asset(
key='image',
asset=pystac.Asset(
href=img_path,
media_type=pystac.MediaType.GEOTIFF
)
)
# At any time we can call `to_dict()` on STAC objects to see how the STAC JSON is shaping up. Notice the asset is now set:
import json
print(json.dumps(item.to_dict(), indent=4))
# Note that the link `href` properties are `null`. This is OK, as we're working with the STAC in memory. Next, we'll talk about writing the catalog out, and how to set those HREFs.
# ### Saving the catalog
# As the JSON above indicates, there's no HREFs set on these in-memory items. PySTAC uses the `self` link on STAC objects to track where the file lives. Because we haven't set them, they evaluate to `None`:
print(catalog.get_self_href() is None)
print(item.get_self_href() is None)
# In order to set them, we can use `normalize_hrefs`. This method will create a normalized set of HREFs for each STAC object in the catalog, according to the [best practices document](https://github.com/radiantearth/stac-spec/blob/v0.8.1/best-practices.md#catalog-layout)'s recommendations on how to lay out a catalog.
catalog.normalize_hrefs(os.path.join(tmp_dir.name, 'stac'))
# Now that we've normalized to a root directory (the temporary directory), we see that the `self` links are set:
print(catalog.get_self_href())
print(item.get_self_href())
# We can now call `save` on the catalog, which will recursively save all the STAC objects to their respective self HREFs.
#
# Save requires a `CatalogType` to be set. You can review the [API docs](https://pystac.readthedocs.io/en/stable/api.html#catalogtype) on `CatalogType` to see what each type means (unfortunately `help` doesn't show docstrings for attributes).
catalog.save(catalog_type=pystac.CatalogType.SELF_CONTAINED)
# !ls {tmp_dir.name}/stac/*
with open(catalog.get_self_href()) as f:
print(f.read())
with open(item.get_self_href()) as f:
print(f.read())
# As you can see, all links are saved with relative paths. That's because we used `catalog_type=CatalogType.SELF_CONTAINED`. If we save an Absolute Published catalog, we'll see absolute paths:
catalog.save(catalog_type=pystac.CatalogType.ABSOLUTE_PUBLISHED)
# Now the links included in the STAC item are all absolute:
with open(item.get_self_href()) as f:
print(f.read())
# Notice that the Asset HREF is absolute in both cases. We can make the Asset HREF relative to the STAC Item by using `.make_all_asset_hrefs_relative()`:
catalog.make_all_asset_hrefs_relative()
catalog.save(catalog_type=pystac.CatalogType.SELF_CONTAINED)
with open(item.get_self_href()) as f:
print(f.read())
# ### Creating an Item that implements the EO extension
#
# In the code above our item only implemented the core STAC Item specification. With [extensions](https://github.com/radiantearth/stac-spec/tree/v0.9.0/extensions) we can record more information and add additional functionality to the Item. Given that we know this is a World View 3 image that has earth observation data, we can enable the [eo extension](https://github.com/radiantearth/stac-spec/tree/v0.8.1/extensions/eo) to add band information.
# To add eo information to an item we'll need to specify some more data. First, let's define the bands of World View 3:
# +
from pystac.extensions.eo import Band
# From: https://www.spaceimagingme.com/downloads/sensors/datasheets/DG_WorldView3_DS_2014.pdf
wv3_bands = [Band.create(name='Coastal', description='Coastal: 400 - 450 nm', common_name='coastal'),
Band.create(name='Blue', description='Blue: 450 - 510 nm', common_name='blue'),
Band.create(name='Green', description='Green: 510 - 580 nm', common_name='green'),
Band.create(name='Yellow', description='Yellow: 585 - 625 nm', common_name='yellow'),
Band.create(name='Red', description='Red: 630 - 690 nm', common_name='red'),
Band.create(name='Red Edge', description='Red Edge: 705 - 745 nm', common_name='rededge'),
Band.create(name='Near-IR1', description='Near-IR1: 770 - 895 nm', common_name='nir08'),
Band.create(name='Near-IR2', description='Near-IR2: 860 - 1040 nm', common_name='nir09')]
# -
# Notice that we used the `.create` method create new band information.
# We can now create an Item, enable the eo extension, add the band information and add it to our catalog:
# +
eo_item = pystac.Item(id='local-image-eo',
geometry=footprint,
bbox=bbox,
datetime=datetime.utcnow(),
properties={})
eo_item.ext.enable(pystac.Extensions.EO)
eo_item.ext.eo.apply(bands=wv3_bands)
# -
# There are also [common metadata](https://github.com/radiantearth/stac-spec/blob/v0.9.0/item-spec/common-metadata.md) fields that we can use to capture additional information about the WorldView 3 imagery:
eo_item.common_metadata.platform = "Maxar"
eo_item.common_metadata.instrument="WorldView3"
eo_item.common_metadata.gsd = 0.3
eo_item
# We can use the eo extension to add bands to the assets we add to the item:
# +
eo_ext = eo_item.ext.eo
help(eo_ext.set_bands)
#eo_item.add_asset(key='image', asset=)
# -
asset = pystac.Asset(href=img_path,
media_type=pystac.MediaType.GEOTIFF)
eo_ext.set_bands(wv3_bands, asset)
eo_item.add_asset("image", asset)
# If we look at the asset's JSON representation, we can see the appropriate band indexes are set:
asset.to_dict()
# Let's clear the in-memory catalog, add the EO item, and save to a new STAC:
catalog.clear_items()
list(catalog.get_items())
catalog.add_item(eo_item)
list(catalog.get_items())
catalog.normalize_and_save(root_href=os.path.join(tmp_dir.name, 'stac-eo'),
catalog_type=pystac.CatalogType.SELF_CONTAINED)
# Now, if we read the catalog from the filesystem, PySTAC recognizes that the item implements eo and so use it's functionality, e.g. getting the bands off the asset:
catalog2 = pystac.read_file(os.path.join(tmp_dir.name, 'stac-eo', 'catalog.json'))
list(catalog2.get_items())
item = next(catalog2.get_all_items())
item.ext.implements('eo')
item.ext.eo.get_bands(item.assets['image'])
# ### Collections
#
# Collections are a subtype of Catalog that have some additional properties to make them more searchable. They also can define common properties so that items in the collection don't have to duplicate common data for each item. Let's create a collection to hold common properties between two images from the Spacenet 5 challenge.
#
# First we'll get another image, and it's bbox and footprint:
url2 = ('https://spacenet-dataset.s3.amazonaws.com/'
'spacenet/SN5_roads/train/AOI_7_Moscow/MS/'
'SN5_roads_train_AOI_7_Moscow_MS_chip997.tif')
img_path2 = os.path.join(tmp_dir.name, 'image.tif')
urllib.request.urlretrieve(url2, img_path2)
bbox2, footprint2 = get_bbox_and_footprint(img_path2)
# We can take a look at the pydocs for Collection to see what information we need to supply in order to satisfy the spec.
print(pystac.Collection.__doc__)
# Beyond what a Catalog reqiures, a Collection requires a license, and an `Extent` that describes the range of space and time that the items it hold occupy.
print(pystac.Extent.__doc__)
# An Extent is comprised of a SpatialExtent and a TemporalExtent. These hold one or more bounding boxes and time intervals, respectively, that completely cover the items contained in the collections.
#
# Let's start with creating two new items - these will be core Items. We can set these items to implement the `eo` extension by specifying them in the `stac_extensions`.
# +
collection_item = pystac.Item(id='local-image-col-1',
geometry=footprint,
bbox=bbox,
datetime=datetime.utcnow(),
properties={},
stac_extensions=[pystac.Extensions.EO])
collection_item.common_metadata.gsd = 0.3
collection_item.common_metadata.platform = 'Maxar'
collection_item.common_metadata.instruments = ['WorldView3']
asset = pystac.Asset(href=img_path,
media_type=pystac.MediaType.GEOTIFF)
collection_item.ext.eo.set_bands(wv3_bands, asset)
collection_item.add_asset('image', asset)
collection_item2 = pystac.Item(id='local-image-col-2',
geometry=footprint2,
bbox=bbox2,
datetime=datetime.utcnow(),
properties={},
stac_extensions=[pystac.Extensions.EO])
collection_item2.common_metadata.gsd = 0.3
collection_item2.common_metadata.platform = 'Maxar'
collection_item2.common_metadata.instruments = ['WorldView3']
asset2 = pystac.Asset(href=img_path,
media_type=pystac.MediaType.GEOTIFF)
collection_item2.ext.eo.set_bands([
band for band in wv3_bands if band.name in ["Red", "Green", "Blue"]
], asset2)
collection_item2.add_asset('image', asset2)
# -
# We can use our two items' metadata to find out what the proper bounds are:
# +
from shapely.geometry import shape
unioned_footprint = shape(footprint).union(shape(footprint2))
collection_bbox = list(unioned_footprint.bounds)
spatial_extent = pystac.SpatialExtent(bboxes=[collection_bbox])
# -
collection_interval = sorted([collection_item.datetime, collection_item2.datetime])
temporal_extent = pystac.TemporalExtent(intervals=[collection_interval])
collection_extent = pystac.Extent(spatial=spatial_extent, temporal=temporal_extent)
collection = pystac.Collection(id='wv3-images',
description='Spacenet 5 images over Moscow',
extent=collection_extent,
license='CC-BY-SA-4.0')
# Now if we add our items to our Collection, and our Collection to our Catalog, we get the following STAC that can be saved:
collection.add_items([collection_item, collection_item2])
catalog.clear_items()
catalog.clear_children()
catalog.add_child(collection)
catalog.describe()
catalog.normalize_and_save(root_href=os.path.join(tmp_dir.name, 'stac-collection'),
catalog_type=pystac.CatalogType.SELF_CONTAINED)
# ### Cleanup
#
# Don't forget to clean up the temporary directory!
tmp_dir.cleanup()
# ## Creating a STAC of imagery from Spacenet 5 data
# Now, let's take what we've learned and create a Catalog with more data in it.
#
# ### Allowing PySTAC to read from AWS S3
#
# PySTAC aims to be virtually zero-dependency (notwithstanding the why-isn't-this-in-stdlib datetime-util), so it doesn't have the ability to read from or write to anything but the local file system. However, we can hook into PySTAC's IO in the following way. Learn more about how to use STAC_IO in the [documentation on the topic](https://pystac.readthedocs.io/en/latest/concepts.html#using-stac-io):
# +
from urllib.parse import urlparse
import boto3
from pystac import STAC_IO
def my_read_method(uri):
parsed = urlparse(uri)
if parsed.scheme == 's3':
bucket = parsed.netloc
key = parsed.path[1:]
s3 = boto3.resource('s3')
obj = s3.Object(bucket, key)
return obj.get()['Body'].read().decode('utf-8')
else:
return STAC_IO.default_read_text_method(uri)
def my_write_method(uri, txt):
parsed = urlparse(uri)
if parsed.scheme == 's3':
bucket = parsed.netloc
key = parsed.path[1:]
s3 = boto3.resource("s3")
s3.Object(bucket, key).put(Body=txt)
else:
STAC_IO.default_write_text_method(uri, txt)
STAC_IO.read_text_method = my_read_method
STAC_IO.write_text_method = my_write_method
# -
# We'll need a utility to list keys for reading the lists of files from S3:
# +
# From https://alexwlchan.net/2017/07/listing-s3-keys/
def get_s3_keys(bucket, prefix):
"""Generate all the keys in an S3 bucket."""
s3 = boto3.client('s3')
kwargs = {'Bucket': bucket, 'Prefix': prefix}
while True:
resp = s3.list_objects_v2(**kwargs)
for obj in resp['Contents']:
yield obj['Key']
try:
kwargs['ContinuationToken'] = resp['NextContinuationToken']
except KeyError:
break
# -
# Let's make a STAC of imagery over Moscow as part of the Spacenet 5 challenge. As a first step, we can list out the imagery and extract IDs from each of the chips.
moscow_training_chip_uris = list(get_s3_keys(bucket='spacenet-dataset',
prefix='spacenet/SN5_roads/train/AOI_7_Moscow/PS-MS'))
# +
import re
chip_id_to_data = {}
def get_chip_id(uri):
return re.search(r'.*\_chip(\d+)\.', uri).group(1)
for uri in moscow_training_chip_uris:
chip_id = get_chip_id(uri)
chip_id_to_data[chip_id] = { 'img': 's3://spacenet-dataset/{}'.format(uri) }
# -
# For this tutorial, we'll only take a subset of the data.
chip_id_to_data = dict(list(chip_id_to_data.items())[:10])
chip_id_to_data
# Let's turn each of those chips into a STAC Item that represents the image.
chip_id_to_items = {}
# We'll create core `Item`s for our imagery, but mark them with the `eo` extension as we did above, and store the `eo` data in a `Collection`.
#
# Note that the image CRS is in WGS:84 (Lat/Lng). If it wasn't, we'd have to reproject the footprint to WGS:84 in order to be compliant with the spec (which can easily be done with [pyproj](https://github.com/pyproj4/pyproj)).
#
# Here we're taking advantage of `rasterio`'s ability to read S3 URIs, which only grabs the GeoTIFF metadata and does not pull the whole file down.
for chip_id in chip_id_to_data:
img_uri = chip_id_to_data[chip_id]['img']
print('Processing {}'.format(img_uri))
bbox, footprint = get_bbox_and_footprint(img_uri)
item = pystac.Item(id='img_{}'.format(chip_id),
geometry=footprint,
bbox=bbox,
datetime=datetime.utcnow(),
properties={},
stac_extensions=[pystac.Extensions.EO])
item.common_metadata.gsd = 0.3
item.common_metadata.platform = 'Maxar'
item.common_metadata.instruments = ['WorldView3']
item.ext.eo.bands = wv3_bands
asset = pystac.Asset(href=img_uri,
media_type=pystac.MediaType.COG)
item.ext.eo.set_bands(wv3_bands, asset)
item.add_asset(key='ps-ms', asset=asset)
chip_id_to_items[chip_id] = item
# ### Creating the Collection
#
# All of these images are over Moscow. In Spacenet 5, we have a couple cities that have imagery; a good way to separate these collections of imagery. We can store all of the common `eo` metadata in the collection.
# +
from shapely.geometry import (shape, MultiPolygon)
footprints = list(map(lambda i: shape(i.geometry).envelope,
chip_id_to_items.values()))
collection_bbox = MultiPolygon(footprints).bounds
spatial_extent = pystac.SpatialExtent(bboxes=[collection_bbox])
# -
datetimes = sorted(list(map(lambda i: i.datetime,
chip_id_to_items.values())))
temporal_extent = pystac.TemporalExtent(intervals=[[datetimes[0], datetimes[-1]]])
collection_extent = pystac.Extent(spatial=spatial_extent, temporal=temporal_extent)
collection = pystac.Collection(id='wv3-images',
description='Spacenet 5 images over Moscow',
extent=collection_extent,
license='CC-BY-SA-4.0')
collection.add_items(chip_id_to_items.values())
collection.describe()
# Now, we can create a Catalog and add the collection.
catalog = pystac.Catalog(id='spacenet5', description='Spacenet 5 Data (Test)')
catalog.add_child(collection)
catalog.describe()
# ## Adding items with the label extension to the Spacenet 5 catalog
#
# We can use the [label extension](https://github.com/radiantearth/stac-spec/tree/v0.8.1/extensions/label) of the STAC spec to represent the training data in our STAC. For this, we need to grab the URIs of the GeoJSON of roads:
moscow_training_geojson_uris = list(get_s3_keys(bucket='spacenet-dataset',
prefix='spacenet/SN5_roads/train/AOI_7_Moscow/geojson_roads_speed/'))
for uri in moscow_training_geojson_uris:
chip_id = get_chip_id(uri)
if chip_id in chip_id_to_data:
chip_id_to_data[chip_id]['label'] = 's3://spacenet-dataset/{}'.format(uri)
# We'll add the items to their own subcatalog; since they don't inherit the Collection's `eo` properties, they shouldn't go in the Collection.
label_catalog = pystac.Catalog(id='spacenet-data-labels', description='Labels for Spacenet 5')
catalog.add_child(label_catalog)
# To see the required fields for the label extension we can check the pydocs on the `apply` method of the extension:
from pystac.extensions import label
print(label.LabelItemExt.apply.__doc__)
# This loop creates our label items and associates each to the appropriate source image Item.
for chip_id in chip_id_to_data:
img_item = collection.get_item('img_{}'.format(chip_id))
label_uri = chip_id_to_data[chip_id]['label']
label_item = pystac.Item(id='label_{}'.format(chip_id),
geometry=img_item.geometry,
bbox=img_item.bbox,
datetime=datetime.utcnow(),
properties={},
stac_extensions=[pystac.Extensions.LABEL])
label_item.ext.label.apply(label_description="SpaceNet 5 Road labels",
label_type=label.LabelType.VECTOR,
label_tasks=['segmentation', 'regression'])
label_item.ext.label.add_source(img_item)
label_item.ext.label.add_geojson_labels(label_uri)
label_catalog.add_item(label_item)
# Now we have a STAC of training data!
catalog.describe()
label_item = catalog.get_child('spacenet-data-labels').get_item('label_1')
label_item.to_dict()
| docs/tutorials/how-to-create-stac-catalogs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="DnLV1HUefFtW"
# # Text Features In CatBoost
# + [markdown] colab_type="text" id="0UAHpnD8fFtZ"
# [](https://colab.research.google.com/github/catboost/tutorials/blob/master/events/2020_06_04_catboost_tutorial/text_features.ipynb)
#
# **Set GPU as hardware accelerator**
#
# First of all, you need to select GPU as hardware accelerator. There are two simple steps to do so:
# Step 1. Navigate to **Runtime** menu and select **Change runtime type**
# Step 2. Choose **GPU** as hardware accelerator.
# That's all!
# + [markdown] colab_type="text" id="9FM0IRyi8NOw"
# Let's install CatBoost.
# + colab={"base_uri": "https://localhost:8080/", "height": 361} colab_type="code" id="TpJdgt63fSOv" outputId="d62a776e-f741-4192-b919-91903ea0441b"
# !pip install catboost
# + [markdown] colab_type="text" id="viF18QJqfFtd"
# In this tutorial we will use dataset **Rotten Tomatoes Movie Reviews** from [Kaggle](https://www.kaggle.com) competition for our experiments. Data can be downloaded [here](https://www.kaggle.com/rpnuser8182/rotten-tomatoes/data).
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="MNC1tP0UfFtd" outputId="2c0abe55-df9c-4a0f-daa4-dc8c8d858f63"
import os
import pandas as pd
import numpy as np
np.set_printoptions(precision=4)
import catboost
print(catboost.__version__)
# + [markdown] colab_type="text" id="OkexL1k7fFti"
# ## Reading the data
# + colab={"base_uri": "https://localhost:8080/", "height": 284} colab_type="code" id="m11CtnPEfFtj" outputId="715d43f8-ab44-44e0-ebd5-5b4327be07b7"
from catboost.datasets import rotten_tomatoes
train_df, test_df = rotten_tomatoes()
train_df.head(2)
# + [markdown] colab_type="text" id="8IeOEa1gfFtm"
# ### Features description
#
# |Id | Feature name | Description |
# |---|-------------------|----------------------------------------------------------------------------------------------|
# | 1 | ``id`` | unique movie id |
# | 2 | ``synopsis`` | brief summary of the major points of a movie |
# | 3 | ``rating_MPAA`` | film rating by MPAA rating system |
# | 4 | ``genre`` | list of genres that are suitable for this film (e.g. Action, Adventure, Comedy,... |
# | 5 | ``director`` | list of persons who direct the making of a film |
# | 6 | ``writer`` | list of persons who write a screenplay |
# | 7 | ``theater_date`` | the date when film was first shown to the public in cinema (string) |
# | 8 | ``dvd_date`` | the date when film was released on DVD (string) |
# | 9 | ``box_office`` | the amount of money raised by ticket sales (revenue) |
# | 10 | ``runtime`` | film duration in minutes |
# | 11 | ``studio`` | is a major entertainment company or motion picture company (20th Century Fox, Sony Pictures)|
# | 12 | ``dvd_date_int`` | the date when film was released on DVD (converted to integer) |
# | 13 | ``theater_date_int`` | the date when film was first shown to the public in cinema (converted to integer) |
# | 14 | ``review`` | review of a movie, that was written by a critic |
# | 15 | ``rating`` | float rating from 0 to 1 of the film according to the Rotten tomatoes web site |
# | 16 | ``fresh`` | freshness of review - fresh or rotten |
# | 17 | ``critic`` | name of reviewer |
# | 18 | ``top_critic`` | binary feature, is reviewer a top critic or not |
# | 19 | ``publisher`` | journal or website where the review was published |
# | 20 | ``date`` | the date when critic publish review (string) |
# | 21 | ``date_int`` | the date when critic publish review (converted to integer) |
# | 22 | ``rating_10`` | integer rating from 0 to 10 of the film according to the critic |
#
# We mark as **auxiliary** columnns 'id' and 'rating', because they can be the reason of overfitting, 'theater_date','dvd_date','date' because we convert them into integers.
#
# We mark as **text** features 'synopsis' because it is short *text* description of a film, 'genre' because it is combination of categories (we know that strings have structure where words define categories), for example 'Action | Comedy | Adventure', 'director' and 'writer' features are included to the text features by the same reason, 'review' becuase it is a *text* summary of critic opinion.
#
# We mark as **categorical** features 'rating_MPAA', 'studio', 'fresh', 'critic', 'top_critic' and 'publisher' because they can not be splitted into the group of categorical features and feature values can not be compared.
#
# The other columns considered as **numeric**.
# + [markdown] colab_type="text" id="wJRY9YyVfFtl"
# ## Preparing the data
# + colab={} colab_type="code" id="Qy_gcs7qfFtn"
auxiliary_columns = ['id', 'theater_date', 'dvd_date', 'rating', 'date']
cat_features = ['rating_MPAA', 'studio', 'fresh', 'critic', 'top_critic', 'publisher']
text_features = ['synopsis', 'genre', 'director', 'writer', 'review']
# + colab={} colab_type="code" id="WkV114UDfFtp"
def fill_na(df, features):
for feature in features:
df[feature].fillna('', inplace=True)
def preprocess_data_part(data_part):
data_part = data_part.drop(auxiliary_columns, axis=1)
fill_na(data_part, cat_features)
fill_na(data_part, text_features)
X = data_part.drop(['rating_10'], axis=1)
y = data_part['rating_10']
return X, y
X_train, y_train = preprocess_data_part(train_df)
X_test, y_test = preprocess_data_part(test_df)
X_train_no_text = X_train.drop(text_features, axis=1)
X_test_no_text = X_test.drop(text_features, axis=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 111} colab_type="code" id="OfkxzEZXfFtr" outputId="294c112f-e382-4f0c-8b53-28c3158aa721"
X_train_no_text.head(2)
# + colab={"base_uri": "https://localhost:8080/", "height": 53} colab_type="code" id="CTq7w0U9fFtt" outputId="c0b10680-d537-49c2-ef0c-4d5579b672f0"
from catboost import Pool
train_pool_no_text = Pool(
X_train_no_text, y_train,
cat_features=cat_features,
)
validation_pool_no_text = Pool(
X_test_no_text, y_test,
cat_features=cat_features,
)
print('Train dataset shape: {}\n'.format(train_pool_no_text.shape))
# + colab={"base_uri": "https://localhost:8080/", "height": 269} colab_type="code" id="VTi3eN58fFt6" outputId="e694fed2-1341-45a3-c799-334b32fbc01e"
from catboost import CatBoostClassifier
def fit_model(train_pool, validation_pool, **kwargs):
model = CatBoostClassifier(
iterations=1000,
learning_rate=0.05,
eval_metric='Accuracy',
task_type='GPU',
**kwargs
)
return model.fit(
train_pool,
eval_set=validation_pool,
verbose=100,
)
model_no_text = fit_model(train_pool_no_text, validation_pool_no_text)
# + [markdown] colab_type="text" id="QhF2RAAhfFuJ"
# # Text Features
# + colab={"base_uri": "https://localhost:8080/", "height": 305} colab_type="code" id="Aw0M5trY8Dmg" outputId="bde6afe0-cf94-46a4-ae36-19bb5b6361e3"
train_pool = Pool(
X_train, y_train,
cat_features=cat_features,
text_features=text_features,
)
validation_pool = Pool(
X_test, y_test,
cat_features=cat_features,
text_features=text_features,
)
print('Train dataset shape: {}\n'.format(train_pool.shape))
model = fit_model(train_pool, validation_pool)
# + colab={} colab_type="code" id="HsuS5qKnfFuQ"
def print_score_diff(first_model, second_model):
first_accuracy = first_model.best_score_['validation']['Accuracy']
second_accuracy = second_model.best_score_['validation']['Accuracy']
gap = (second_accuracy - first_accuracy) / first_accuracy * 100
print('{} vs {} ({:+.2f}%)'.format(first_accuracy, second_accuracy, gap))
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="O-3uDpJafFuS" outputId="9827b6fb-4408-4725-f267-f38a6ee642ba"
print_score_diff(model_no_text, model)
# + [markdown] colab_type="text" id="Ym-fEV-mfFuU"
# <span style="color:red">Note!</span>
#
# 1. Text features also cannot contain NaN values, so we converted them into strings manually.
# 2. The training may be performed only with classification losses and targets.
# + [markdown] colab_type="text" id="IiHpTGfbfFuV"
# ## How it works?
#
# 1. **Text Tokenization**
# 2. **Dictionary Creation**
# 3. **Feature Calculation**
# + [markdown] colab_type="text" id="MszSnbqH8NR3"
# ## Text Tokenization
# + [markdown] colab_type="text" id="mOBGuexjb8tr"
# Usually we get our text as a sequence of Unicode symbols. So, if the task isn't a DNA classification we don't need such granularity, moreover, we need to extract more complicated entities, e.g. words. The process of extraction tokens -- words, numbers, punctuation symbols or special symbols which defines emoji from a sequence is called **tokenization**.<br>
#
# Tokenization is the first part of text preprocessing in CatBoost and performed as a simple splitting a sequence on a string pattern (e.g. space).
# + colab={} colab_type="code" id="NAeELULufFuV"
text_small = [
"Cats are so cute :)",
"Mouse scare...",
"The cat defeated the mouse",
"Cute: Mice gather an army!",
"Army of mice defeated the cat :(",
"Cat offers peace",
"Cat is scared :(",
"Cat and mouse live in peace :)"
]
target_small = [1, 0, 1, 1, 0, 1, 0, 1]
# + colab={"base_uri": "https://localhost:8080/", "height": 161} colab_type="code" id="E21CQ8ocfFuX" outputId="f78b995b-29fc-41c9-b28c-b3adee167ba7"
from catboost.text_processing import Tokenizer
simple_tokenizer = Tokenizer()
def tokenize_texts(texts):
return [simple_tokenizer.tokenize(text) for text in texts]
simple_tokenized_text = tokenize_texts(text_small)
simple_tokenized_text
# + [markdown] colab_type="text" id="ChZQ5cpJfFuZ"
# ### More preprocessing!
#
# Lets take a closer look on the tokenization result of small text example -- the tokens contains a lot of mistakes:
#
# 1. They are glued with punctuation 'Cute:', 'army!', 'skare...'.
# 2. The words 'Cat' and 'cat', 'Mice' and 'mice' seems to have same meaning, perhaps they should be the same tokens.
# 3. The same problem with tokens 'are'/'is' -- they are inflected forms of same token 'be'.
#
# **Punctuation handling**, **lowercasing**, and **lemmatization** processes help to overcome these problems.
# + [markdown] colab_type="text" id="qaoTjEmR8NSM"
# ### Punctuation handling and lowercasing
# + colab={"base_uri": "https://localhost:8080/", "height": 161} colab_type="code" id="6cPpYpmtfFuZ" outputId="2bc7abef-5828-43af-d588-48edb490eed9"
tokenizer = Tokenizer(
lowercasing=True,
separator_type='BySense',
token_types=['Word', 'Number']
)
tokenized_text = [tokenizer.tokenize(text) for text in text_small]
tokenized_text
# + [markdown] colab_type="text" id="JDhBkZzJfFua"
# ### Removing stop words
#
# **Stop words** - the words that are considered to be uninformative in this task, e.g. function words such as *the, is, at, which, on*.
# Usually stop words are removed during text preprocessing to reduce the amount of information that is considered for further algorithms.
# Stop words are collected manually (in dictionary form) or automatically, for example taking the most frequent words.
# + colab={"base_uri": "https://localhost:8080/", "height": 161} colab_type="code" id="d1MYzKgTfFub" outputId="865f655e-0cb9-4626-9d40-e459b9487b0f"
stop_words = set(('be', 'is', 'are', 'the', 'an', 'of', 'and', 'in'))
def filter_stop_words(tokens):
return list(filter(lambda x: x not in stop_words, tokens))
tokenized_text_no_stop = [filter_stop_words(tokens) for tokens in tokenized_text]
tokenized_text_no_stop
# + [markdown] colab_type="text" id="vxofPVc1fFuc"
# ### Lemmatization
#
# Lemma (Wikipedia) -- is the canonical form, dictionary form, or citation form of a set of words.<br>
# For example, the lemma "go" represents the inflected forms "go", "goes", "going", "went", and "gone".<br>
# The process of convertation word to its lemma called **lemmatization**.
#
# + colab={"base_uri": "https://localhost:8080/", "height": 89} colab_type="code" id="HWrijpMGfFud" outputId="1b6b8015-8cf9-47c5-89cf-5d5fc8b5f794"
import nltk
nltk_data_path = os.path.join(os.path.dirname(nltk.__file__), 'nltk_data')
nltk.data.path.append(nltk_data_path)
nltk.download('wordnet', nltk_data_path)
lemmatizer = nltk.stem.WordNetLemmatizer()
def lemmatize_tokens_nltk(tokens):
return list(map(lambda t: lemmatizer.lemmatize(t), tokens))
# + colab={"base_uri": "https://localhost:8080/", "height": 161} colab_type="code" id="XfyhV9ONfFuf" outputId="4b0568c9-3bb8-483a-8f86-dd358c6fd2c5"
text_small_lemmatized_nltk = [lemmatize_tokens_nltk(tokens) for tokens in tokenized_text_no_stop]
text_small_lemmatized_nltk
# + [markdown] colab_type="text" id="y63KVna4fFui"
# Now words with same meaning represented by the same token, tokens are not glued with punctuation.
#
# <span style="color:red">Be carefull.</span> You should verify for your own task:<br>
# Is it realy necessary to remove punctuation, lowercasing sentences or performing a lemmatization and/or by word tokenization?<br>
# + [markdown] colab_type="text" id="qFWoSX-kfFui"
# ### Let's check up accuracy with new text preprocessing
#
# Since CatBoost doesn't perform spacing punctuation, lowercasing letters and lemmatization, we need to preprocess text manually and then pass it to learning algorithm.
#
# Since the natural text features is only synopsis and review, we will preprocess only them.
# + colab={"base_uri": "https://localhost:8080/", "height": 53} colab_type="code" id="ZHL3x7NwfFuj" outputId="85135452-02ea-4644-882d-726fcc568605"
# %%time
def preprocess_data(X):
X_preprocessed = X.copy()
for feature in ['synopsis', 'review']:
X_preprocessed[feature] = X[feature].apply(lambda x: ' '.join(lemmatize_tokens_nltk(tokenizer.tokenize(x))))
return X_preprocessed
X_preprocessed_train = preprocess_data(X_train)
X_preprocessed_test = preprocess_data(X_test)
train_processed_pool = Pool(
X_preprocessed_train, y_train,
cat_features=cat_features,
text_features=text_features,
)
validation_processed_pool = Pool(
X_preprocessed_test, y_test,
cat_features=cat_features,
text_features=text_features,
)
# + colab={"base_uri": "https://localhost:8080/", "height": 269} colab_type="code" id="0jJJSrFJfFuk" outputId="6baeef42-d430-4793-fc33-556095416a9b"
model_on_processed_data = fit_model(train_processed_pool, validation_processed_pool)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="AXDdPAgyfFum" outputId="61e26e81-b858-4675-ab58-aaf3384428ae"
print_score_diff(model, model_on_processed_data)
# + [markdown] colab_type="text" id="CJr7fXN7fFun"
# ## Dictionary Creation
#
# After the first stage, preprocessing of text and tokenization, the second stage starts. The second stage uses the prepared text to select a set of units, which will be used for building new numerical features.
#
# A set of selected units is called dictionary. It might contain words, word bigramms, or character n-gramms.
# + colab={} colab_type="code" id="D6H1MXf9fFuo"
from catboost.text_processing import Dictionary
# + colab={} colab_type="code" id="Rn402k78fFuq"
dictionary = Dictionary(occurence_lower_bound=0, max_dictionary_size=10)
dictionary.fit(text_small_lemmatized_nltk);
#dictionary.fit(text_small, tokenizer)
# + colab={"base_uri": "https://localhost:8080/", "height": 253} colab_type="code" id="KJr0UBzOfFur" outputId="4ab23b42-0fb7-4ac4-c878-63da839c8635"
dictionary.save('dictionary.tsv')
# !cat dictionary.tsv
# + [markdown] colab_type="text" id="U1wLb5MX8NTY"
# ## Feature Calculation
# + [markdown] colab_type="text" id="KYzNqXgcfFut"
# ### Convertation into fixed size vectors
#
# The majority of classic ML algorithms are computing and performing predictions on a fixed number of features $F$.<br>
# That means that learning set $X = \{x_i\}$ contains vectors $x_i = (a_0, a_1, ..., a_F)$ where $F$ is constant.
#
# Since text object $x$ is not a fixed length vector, we need to perform preprocessing of the origin set $D$.<br>
# One of the simplest text to vector encoding technique is **Bag of words (BoW)**.
#
# ### Bag of words algorithm
#
# The algorithm takes in a dictionary and a text.<br>
# During the algorithm text $x = (a_0, a_1, ..., a_k)$ converted into vector $\tilde x = (b_0, b_1, ..., b_F)$,<br> where $b_i$ is 0/1 (depending on whether there is a word with id=$i$ from dictionary into text $x$).
# + colab={"base_uri": "https://localhost:8080/", "height": 161} colab_type="code" id="7Ea944JbfFuu" outputId="5f788c52-345c-4703-957a-4f57dd29c418"
text_small_lemmatized_nltk
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="bRm5Cf5qkzlJ" outputId="6226eea1-ab2b-4924-df6c-a006e71965f5"
dictionary.apply([text_small_lemmatized_nltk[0]])
# + colab={"base_uri": "https://localhost:8080/", "height": 305} colab_type="code" id="ga0AfpT8fFuv" outputId="6b6e9abb-3e2a-4a8e-eac9-dacbac3c33fd"
def bag_of_words(tokenized_text, dictionary):
features = np.zeros((len(tokenized_text), dictionary.size))
for i, tokenized_sentence in enumerate(tokenized_text):
indices = np.array(dictionary.apply([tokenized_sentence])[0])
features[i, indices] = 1
return features
bow_features = bag_of_words(text_small_lemmatized_nltk, dictionary)
bow_features
# + colab={} colab_type="code" id="vhr-EyPyfFuy"
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from scipy.sparse import csr_matrix
from sklearn.metrics import log_loss
def fit_linear_model(X, c):
model = LogisticRegression()
model.fit(X, c)
return model
def fit_naive_bayes(X, c):
clf = MultinomialNB()
if isinstance(X, csr_matrix):
X.eliminate_zeros()
clf.fit(X, c)
return clf
def evaluate_model_logloss(model, X, y):
y_pred = model.predict_proba(X)[:,1]
metric = log_loss(y, y_pred)
print('Logloss: ' + str(metric))
# + colab={"base_uri": "https://localhost:8080/", "height": 125} colab_type="code" id="GekNCx5ofFuz" outputId="5b218b73-c7fd-4628-f218-29d0d30686eb"
def evaluate_models(X, y):
linear_model = fit_linear_model(bow_features, target_small)
naive_bayes = fit_naive_bayes(bow_features, target_small)
print('Linear model')
evaluate_model_logloss(linear_model, X, y)
print('Naive bayes')
evaluate_model_logloss(naive_bayes, X, y)
print('Comparing to constant prediction')
logloss_constant_prediction = log_loss(y, np.ones(shape=(len(text_small), 2)) * 0.5)
print('Logloss: ' + str(logloss_constant_prediction))
evaluate_models(bow_features, target_small)
# + colab={"base_uri": "https://localhost:8080/", "height": 125} colab_type="code" id="uFsAWNE9fFu2" outputId="7197acdf-71ac-4c81-b507-4f06cafdbea8"
dictionary = Dictionary(occurence_lower_bound=0)
dictionary.fit(text_small_lemmatized_nltk)
bow_features = bag_of_words(text_small_lemmatized_nltk, dictionary)
evaluate_models(bow_features, target_small)
# + [markdown] colab_type="text" id="yvjUACB_fFu6"
# ### Looking at sequences of letters / words
#
# Let's look at the example: texts 'The cat defeated the mouse' and 'Army of mice defeated the cat :('<br>
# Simplifying it we have three tokens in each sentence 'cat defeat mouse' and 'mouse defeat cat'.<br>
# After applying BoW we get two equal vectors with the opposite meaning:
#
# | cat | mouse | defeat |
# |-----|-------|--------|
# | 1 | 1 | 1 |
# | 1 | 1 | 1 |
#
# How to distinguish them?
# Lets add sequences of words as a single tokens into our dictionary:
#
# | cat | mouse | defeat | cat_defeat | mouse_defeat | defeat_cat | defeat_mouse |
# |-----|-------|--------|------------|--------------|------------|--------------|
# | 1 | 1 | 1 | 1 | 0 | 0 | 1 |
# | 1 | 1 | 1 | 0 | 1 | 1 | 0 |
#
# **N-gram** is a continguous sequence of $n$ items from a given sample of text or speech (Wikipedia).<br>
# In example above Bi-gram (Bigram) = 2-gram of words.
#
# Ngrams help to add into vectors more information about text structure, moreover there are n-grams has no meanings in separation, for example, 'Mickey Mouse company'.
# + colab={"base_uri": "https://localhost:8080/", "height": 379} colab_type="code" id="WU6iWFPZClrf" outputId="b666b9a2-0782-472a-a729-0fa1b15bd9f2"
dictionary = Dictionary(occurence_lower_bound=0, gram_order=2)
dictionary.fit(text_small_lemmatized_nltk)
dictionary.save('dictionary.tsv')
# !cat dictionary.tsv
# + colab={"base_uri": "https://localhost:8080/", "height": 125} colab_type="code" id="ypPTi_XXfFu7" outputId="59136696-c457-4f99-b884-cf1e2e68fb80"
bow_features = bag_of_words(text_small_lemmatized_nltk, dictionary)
evaluate_models(bow_features, target_small)
# + [markdown] colab_type="text" id="1uLlIfJHodEL"
# ### Unigram + Bigram
# + colab={"base_uri": "https://localhost:8080/", "height": 125} colab_type="code" id="XaRC74kNfFu8" outputId="f67a5ea4-0795-4b16-db80-2bff733109e9"
dictionary1 = Dictionary(occurence_lower_bound=0)
dictionary1.fit(text_small_lemmatized_nltk)
bow_features1 = bag_of_words(text_small_lemmatized_nltk, dictionary1)
dictionary2 = Dictionary(occurence_lower_bound=0, gram_order=2)
dictionary2.fit(text_small_lemmatized_nltk)
bow_features2 = bag_of_words(text_small_lemmatized_nltk, dictionary2)
bow_features = np.concatenate((bow_features1, bow_features2), axis=1)
evaluate_models(bow_features, target_small)
# + [markdown] colab_type="text" id="oFR_rMfH8NT_"
# ## CatBoost Configuration
# + [markdown] colab_type="text" id="8xoFAOiz8NT_"
# Parameter names:
#
# 1. **Text Tokenization** - `tokenizers`
# 2. **Dictionary Creation** - `dictionaries`
# 3. **Feature Calculation** - `feature_calcers`
#
# \* More complex configuration with `text_processing` parameter
# + [markdown] colab_type="text" id="Wntt3XrYgkhf"
# ### `tokenizers`
#
# Tokenizers used to preprocess Text type feature columns before creating the dictionary.
#
# [Documentation](https://catboost.ai/docs/references/tokenizer_options.html).
#
# ```
# tokenizers = [{
# 'tokenizer_id': 'Space',
# 'delimiter': ' ',
# 'separator_type': 'ByDelimiter',
# },{
# 'tokenizer_id': 'Sense',
# 'separator_type': 'BySense',
# }]
# ```
# + [markdown] colab_type="text" id="aKqHyav7fFu-"
# ### `dictionaries`
#
# Dictionaries used to preprocess Text type feature columns.
#
# [Documentation](https://catboost.ai/docs/references/dictionaries_options.html).
#
# ```
# dictionaries = [{
# 'dictionary_id': 'Unigram',
# 'max_dictionary_size': '50000',
# 'gram_count': '1',
# },{
# 'dictionary_id': 'Bigram',
# 'max_dictionary_size': '50000',
# 'gram_count': '2',
# },{
# 'dictionary_id': 'Trigram',
# 'token_level_type': 'Letter',
# 'max_dictionary_size': '50000',
# 'gram_count': '3',
# }]
# ```
# + [markdown] colab_type="text" id="JT6I_LN98NUC"
# ### `feature_calcers`
#
# Feature calcers used to calculate new features based on preprocessed Text type feature columns.
#
# 1. **`BoW`**<br>
# Bag of words: 0/1 features (text sample has or not token_id).<br>
# Number of produced numeric features = dictionary size.<br>
# Parameters: `top_tokens_count` - maximum number of tokens that will be used for vectorization in bag of words, the most frequent $n$ tokens are taken (**highly affect both on CPU ang GPU RAM usage**).
#
# 2. **`NaiveBayes`**<br>
# NaiveBayes: [Multinomial naive bayes](https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Multinomial_naive_Bayes) model. As many new features as classes are added. This feature is calculated by analogy with counters in CatBoost by permutation ([estimation of CTRs](https://catboost.ai/docs/concepts/algorithm-main-stages_cat-to-numberic.html)). In other words, a random permutation is made and then we go from top to bottom on the dataset and calculate the probability of its belonging to this class for each object.
#
# 3. **`BM25`**<br>
# [BM25](https://en.wikipedia.org/wiki/Okapi_BM25). As many new features as classes are added. The idea is the same as in Naive Bayes, but for each class we calculate not the conditional probability, but a certain relevance, which is similar to tf-idf, where the tokens instead of the words and the classes instead of the documents (or rather, the unification of all texts of this class). Only the tf multiplier in BM25 is replaced with another multiplier, which gives an advantage to classes that contain rare tokens.
#
# ```
# feature_calcers = [
# 'BoW:top_tokens_count=1000',
# 'NaiveBayes',
# 'BM25',
# ]
# ```
# + [markdown] colab_type="text" id="02lH5f1PgpYM"
# ### `text_processing`
#
# ```
# text_processing = {
# "tokenizers" : [{
# "tokenizer_id" : "Space",
# "separator_type" : "ByDelimiter",
# "delimiter" : " "
# }],
#
# "dictionaries" : [{
# "dictionary_id" : "BiGram",
# "max_dictionary_size" : "50000",
# "occurrence_lower_bound" : "3",
# "gram_order" : "2"
# }, {
# "dictionary_id" : "Word",
# "max_dictionary_size" : "50000",
# "occurrence_lower_bound" : "3",
# "gram_order" : "1"
# }],
#
# "feature_processing" : {
# "default" : [{
# "dictionaries_names" : ["BiGram", "Word"],
# "feature_calcers" : ["BoW"],
# "tokenizers_names" : ["Space"]
# }, {
# "dictionaries_names" : ["Word"],
# "feature_calcers" : ["NaiveBayes"],
# "tokenizers_names" : ["Space"]
# }],
# }
# }
# ```
# + colab={"base_uri": "https://localhost:8080/", "height": 269} colab_type="code" id="-HOhMr-ffFu_" outputId="d28394e4-fb6e-4a63-9090-8860a3a27333"
model_on_processed_data_2 = fit_model(
train_processed_pool,
validation_processed_pool,
text_processing = {
"tokenizers" : [{
"tokenizer_id" : "Space",
"separator_type" : "ByDelimiter",
"delimiter" : " "
}],
"dictionaries" : [{
"dictionary_id" : "BiGram",
"max_dictionary_size" : "50000",
"occurrence_lower_bound" : "3",
"gram_order" : "2"
}, {
"dictionary_id" : "Word",
"max_dictionary_size" : "50000",
"occurrence_lower_bound" : "3",
"gram_order" : "1"
}],
"feature_processing" : {
"default" : [{
"dictionaries_names" : ["BiGram", "Word"],
"feature_calcers" : ["BoW"],
"tokenizers_names" : ["Space"]
}, {
"dictionaries_names" : ["Word"],
"feature_calcers" : ["NaiveBayes"],
"tokenizers_names" : ["Space"]
}],
}
}
)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="HFJRD9RofFvC" outputId="08f27541-75fe-4c0e-dd88-3b6e9a716035"
print_score_diff(model_no_text, model_on_processed_data_2)
# + [markdown] colab_type="text" id="xlo77dzufFvE"
# # Summary: Text features in CatBoost
#
# ### The algorithm:
# 1. Input text is loaded as a usual column. ``text_column: [string]``.
# 2. Each text sample is tokenized via splitting by space. ``tokenized_column: [[string]]``.
# 3. Dictionary estimation.
# 4. Each string in tokenized column is converted into token_id from dictionary. ``text: [[token_id]]``.
# 5. Depending on the parameters CatBoost produce features basing on the resulting text column: Bag of words, Multinomial naive bayes or Bm25.
# 6. Computed float features are passed into the usual CatBoost learning algorithm.
# + colab={} colab_type="code" id="_A87DhGF8SIa"
| catboost/tutorials/events/2020_06_04_catboost_tutorial/text_features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Objective" data-toc-modified-id="Objective-1"><span class="toc-item-num">1 </span>Objective</a></span></li><li><span><a href="#Used-Python-Libraries" data-toc-modified-id="Used-Python-Libraries-2"><span class="toc-item-num">2 </span>Used Python Libraries</a></span></li><li><span><a href="#Load-Data" data-toc-modified-id="Load-Data-3"><span class="toc-item-num">3 </span>Load Data</a></span><ul class="toc-item"><li><span><a href="#Example-Conversation--Data" data-toc-modified-id="Example-Conversation--Data-3.1"><span class="toc-item-num">3.1 </span>Example Conversation Data</a></span></li><li><span><a href="#Extracting-Conversation-Data" data-toc-modified-id="Extracting-Conversation-Data-3.2"><span class="toc-item-num">3.2 </span>Extracting Conversation Data</a></span></li><li><span><a href="#Talking-Points-Data" data-toc-modified-id="Talking-Points-Data-3.3"><span class="toc-item-num">3.3 </span>Talking Points Data</a></span></li><li><span><a href="#Mapping-of-Conversations-to-Talking-Points---NOT-USED-YET" data-toc-modified-id="Mapping-of-Conversations-to-Talking-Points---NOT-USED-YET-3.4"><span class="toc-item-num">3.4 </span>Mapping of Conversations to Talking Points - NOT USED YET</a></span></li></ul></li><li><span><a href="#Pre-Trained-Sentence-Encoder" data-toc-modified-id="Pre-Trained-Sentence-Encoder-4"><span class="toc-item-num">4 </span>Pre-Trained Sentence Encoder</a></span><ul class="toc-item"><li><span><a href="#Example-of-Using-Encoder" data-toc-modified-id="Example-of-Using-Encoder-4.1"><span class="toc-item-num">4.1 </span>Example of Using Encoder</a></span></li><li><span><a href="#Using-Encoder-for-Chats" data-toc-modified-id="Using-Encoder-for-Chats-4.2"><span class="toc-item-num">4.2 </span>Using Encoder for Chats</a></span></li></ul></li><li><span><a href="#Data-Processing" data-toc-modified-id="Data-Processing-5"><span class="toc-item-num">5 </span>Data Processing</a></span><ul class="toc-item"><li><span><a href="#Processing-Chats" data-toc-modified-id="Processing-Chats-5.1"><span class="toc-item-num">5.1 </span>Processing Chats</a></span><ul class="toc-item"><li><span><a href="#Example-of-Processed-Chat-Data" data-toc-modified-id="Example-of-Processed-Chat-Data-5.1.1"><span class="toc-item-num">5.1.1 </span>Example of Processed Chat Data</a></span></li></ul></li></ul></li><li><span><a href="#Analyzing-Processed-Data" data-toc-modified-id="Analyzing-Processed-Data-6"><span class="toc-item-num">6 </span>Analyzing Processed Data</a></span><ul class="toc-item"><li><span><a href="#Analyzing-Chats" data-toc-modified-id="Analyzing-Chats-6.1"><span class="toc-item-num">6.1 </span>Analyzing Chats</a></span></li></ul></li><li><span><a href="#Interactive-Visualization-of-Results" data-toc-modified-id="Interactive-Visualization-of-Results-7"><span class="toc-item-num">7 </span>Interactive Visualization of Results</a></span></li></ul></div>
# -
# ## Objective
#
# This project will prototype a tool to:
# 1. identify utterances where a speaker uses "talking points", i.e. talks about a topic that is popped up to them during a conversation;
# 2.
#
# We will also understand what typical "similarity" distance values between topics and utterances where these topics are used can be found in natural conversations (see Using Encoder for Chats section).
#
# We use data provided by Amazon Science ([Topical Chats](https://www.amazon.science/blog/amazon-releases-data-set-of-annotated-conversations-to-aid-development-of-socialbots) project).
# [Back to Contents](#Table-of-Contents)
# ## Used Python Libraries
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text #needed to avoid crashes
import os
import os
import glob
import pandas as pd
import numpy as np
import json
from datetime import timedelta
import re
from sentence_transformers import SentenceTransformer, util
import psutil
import ray
import sys
from random import sample
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
# [Back to Contents](#Table-of-Contents)
# ## Load Data
# + code_folding=[7]
#https://registry.opendata.aws/topical-chat-enriched/
# data can be also download from the github repo
import boto3
import os
from botocore import UNSIGNED
from botocore.client import Config
def download_all_files():
#initiate s3 resource
s3 = boto3.resource('s3', config=Config(signature_version=UNSIGNED))
# select bucket
my_bucket = s3.Bucket('enriched-topical-chat')
# download file into current directory
for s3_object in my_bucket.objects.all():
filename = s3_object.key
my_bucket.download_file(s3_object.key, filename)
download_all_files()
# -
# !mkdir alexa
# !mv *.json alexa/
# ### Example Conversation Data
train_df = pd.read_json('alexa/conversations/train.json').T
train_df.head()
cont_tmp = train_df.iloc[2]['content']
cont_tmp
# [Back to Contents](#Table-of-Contents)
# ### Extracting Conversation Data
chats_datafiles = glob.glob("./alexa/conversations/*.json")
chats_df = pd.concat([pd.read_json(fp).T.drop(['config','conversation_rating'], axis=1)
for fp in chats_datafiles], ignore_index=False)
print(chats_df.shape)
chats_df.head()
# +
#[ic['message'] for ic in chats_df.iloc[0]['content']]
# -
[' '.join(ic['message']) for ic in chats_df.iloc[0]['content']]
# [Back to Contents](#Table-of-Contents)
# ### Talking Points Data
#https://github.com/alexa/Topical-Chat/blob/master/src/wiki/wiki.json
with open("alexa/wiki/wiki.json", "r") as f:
wiki_data = json.load(f)
shortened_wiki_lead_section = wiki_data['shortened_wiki_lead_section']
summarized_wiki_lead_section = wiki_data['summarized_wiki_lead_section']
talking_points = list(shortened_wiki_lead_section.keys())
talking_points_idxs = list(shortened_wiki_lead_section.values())
talking_points[:5]
# + code_folding=[]
#[re.split(r"(?<!^)\s*[.\n]+\s*(?!$)", i) for i in talking_points[:5]]
# -
# [Back to Contents](#Table-of-Contents)
# ### Mapping of Conversations to Talking Points - NOT USED YET
#
# For each conversation, topics labeled as FS1, FS2, FS3 are shown to the partners of the conversation.
#
# https://github.com/alexa/Topical-Chat/tree/master/reading_sets/pre-build
# !wget https://raw.githubusercontent.com/alexa/Topical-Chat/master/reading_sets/pre-build/train.json && mv train.json alexa/reading_sets/
# !wget https://raw.githubusercontent.com/alexa/Topical-Chat/master/reading_sets/pre-build/test_freq.json && mv test_freq.json alexa/reading_sets/
# !wget https://raw.githubusercontent.com/alexa/Topical-Chat/master/reading_sets/pre-build/test_rare.json && mv test_rare.json alexa/reading_sets/
# !wget https://raw.githubusercontent.com/alexa/Topical-Chat/master/reading_sets/pre-build/valid_freq.json && mv valid_freq.json alexa/reading_sets/
# !wget https://raw.githubusercontent.com/alexa/Topical-Chat/master/reading_sets/pre-build/valid_rare.json && mv valid_rare.json alexa/reading_sets/
reading_datafiles = glob.glob("./alexa/reading_sets//*.json")
reading_df = pd.concat([pd.read_json(fp).T.drop(['config','article_url'], axis=1)
for fp in reading_datafiles], ignore_index=False)
print(reading_df.shape)
reading_df.head()
talking_points[talking_points_idxs.index(reading_df.iloc[0]['agent_1']['FS1']['shortened_wiki_lead_section'])]
{'FS1': [reading_df.iloc[i]['agent_1']['FS1']['shortened_wiki_lead_section'] for i in range(len(reading_df))]}
reading_df.iloc[0]['agent_1']
# [Back to Contents](#Table-of-Contents)
# ## Pre-Trained Sentence Encoder
# +
#https://www.sbert.net/docs/installation.html
#these lines below are example for downloading pre-trained models
#embed_bert = SentenceTransformer('paraphrase-xlm-r-multilingual-v1')
#embed_bert = SentenceTransformer('stsb-roberta-large') #textual similarity
#embed_bert = SentenceTransformer('distiluse-base-multilingual-cased-v2')
#the models are downloaded to here
#/Users/atambu/.cache/torch/sentence_transformers/sbert.net_models_paraphrase-xlm-r-multilingual-v1_part
# #copy to desired folder, ex: ./models
# -
# ### Example of Using Encoder
# +
# this is equivalent to Google Universal Sentence Encoder
embed_bert = SentenceTransformer('../models/sbert.net_models_distiluse-base-multilingual-cased-v2')
# Two lists of sentences
sentences1 = ['The cat sits outside',
'A man is playing guitar',
'The new movie is awesome']
sentences2 = ['The dog plays in the garden',
'A woman watches TV',
'The new movie is so great']
#Compute embedding for both lists
embeddings1 = embed_bert.encode(sentences1, convert_to_tensor=True)
embeddings2 = embed_bert.encode(sentences2, convert_to_tensor=True)
#Compute cosine-similarity
cosine_scores = util.pytorch_cos_sim(embeddings1, embeddings2)
#Output the pairs with their score
for i in range(len(sentences1)):
print("{} \t\t {} \t\t Score: {:.4f}".format(sentences1[i], sentences2[i], cosine_scores[i][i]))
# +
#lower-casing or space removal are not needed with this model. Apparently, some preproc
#is performed internally to the model
util.pytorch_cos_sim(embed_bert.encode(["A man is playing guitar."], convert_to_tensor=True),
embed_bert.encode(["A man's playing guitar "], convert_to_tensor=True)).detach().cpu().numpy()
# -
# ### Using Encoder for Chats
talking_points_emb = embed_bert.encode(talking_points, convert_to_tensor=True)
cT_example = [' '.join(ic['message']) for ic in chats_df.iloc[0]['content']]
agent_1_tp = talking_points[talking_points_idxs.index(reading_df.iloc[0]['agent_1']['FS1']['shortened_wiki_lead_section'])]
agent_2_tp = talking_points[talking_points_idxs.index(reading_df.iloc[0]['agent_2']['FS1']['shortened_wiki_lead_section'])]
agent_1_tp
agent_2_tp
cT_example
cT_example_emb = embed_bert.encode(cT_example, convert_to_tensor=True)
# +
scores = []
is_talking_points = {}
sim_mat = util.pytorch_cos_sim(cT_example_emb,
talking_points_emb).detach().cpu().numpy()
# for each sentence find matching phrases above threshold.
# In a chat mutiple talking points can be mentioned
for j in range(sim_mat.shape[0]):
sim_idx = np.where(sim_mat[j,]>0.5)[0]
sim_values = [(k, sim_mat[j,k]) for k in sim_idx]
if len(sim_idx)>0:
print(cT_example[j])
print('***', sim_values[0][1], talking_points[sim_values[0][0]])
# -
# [Back to Contents](#Table-of-Contents)
# ## Data Processing
# + code_folding=[3, 18, 33]
@ray.remote
class processText:
def __init__(self):
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text #needed to avoid crashes
self.texts = ray.get(texts_ray)
print('loading model')
self.embed = SentenceTransformer('../models/sbert.net_models_distiluse-base-multilingual-cased-v2')
print('loading model...done')
self.talking_points_emb = self.embed.encode(ray.get(talking_points_ray), convert_to_tensor=True)
def clean_sentences(self, cT):
#create list of sentences from text
#cT = [s.lower() for s in cT] #lower case
cT = [" ".join([w for w in s.split() if len(s.split())>5]) for s in cT] #extra spaces, at least 5 words
if len(cT)==0:
return []
if len(cT)<3: # at least 3 sentences
return []
else:
return cT
def get_points(self, text_idxs, phrase_thr=0.60):
is_talking_points = {}
print('start', text_idxs)
for i in range(text_idxs[0], text_idxs[1]):
cT = self.texts[i]
cT = self.clean_sentences(cT)
if len(cT)==0:
continue
emb_cT_i = self.embed.encode(cT, convert_to_tensor=True)
# similarity
sim_mat = util.pytorch_cos_sim(emb_cT_i,
self.talking_points_emb).detach().cpu().numpy()
# loop over individual sentences and check similarity to each of the phrases
#best_match = np.unravel_index(np.argmax(sim_mat, axis=None), sim_mat.shape)
#best_match_value = sim_mat[best_match]
#if best_match_value>phrase_thr:
# is_talking_points.update({(i,cT[best_match[0]]): [(best_match[1], best_match_value)]})
# for each sentence find matching phrases above threshold.
# In a chat mutiple talking points can be mentioned
for j in range(sim_mat.shape[0]):
sim_idx = np.where(sim_mat[j,]>phrase_thr)[0]
sim_values = [(k, sim_mat[j,k]) for k in sim_idx] # this is for debugging purposes, later just select best matching phrase
if len(sim_idx)>0:
is_talking_points.update({(i,cT[j]): sim_values})
print('end', text_idxs)
return is_talking_points
# -
ray.shutdown()
# [Back to Contents](#Table-of-Contents)
# ### Processing Chats
all_chats = []
for i in chats_df['content'].values :
all_chats_tmp = []
for j in i:
all_chats_tmp.append(' '.join(j['message']))
all_chats.append(all_chats_tmp)
len(all_chats)
all_chats[1]
# +
# %%time
num_cpus = psutil.cpu_count(logical=False)
max_chats = len(all_chats)
n_cells = 100 #to run in a single process
print('Processing', max_chats, 'chats')
# split task into chunks
run_list_chnks = np.linspace(0, max_chats, int(max_chats/n_cells), dtype=int)
run_list_chnks = [(run_list_chnks[i],run_list_chnks[i+1]) for i in range(0,len(run_list_chnks)-1)]
# init ray
ray.init(num_cpus=num_cpus)
phrase_thr = 0.50
texts_ray = ray.put(all_chats[:max_chats])
talking_points_ray = ray.put(talking_points)
pT = [processText.remote() for _ in range(num_cpus)]
# every 'num_cpus' jobs, start from worker 0 again
result_ids = [pT[i % num_cpus].get_points.remote(run_list_chnks[i],
phrase_thr=phrase_thr) for i in range(len(run_list_chnks))]
# Fetch the results.
#results = ray.get(result_ids)
results_c = ray.get(result_ids)
ray.shutdown()
# -
ray.shutdown()
# #### Example of Processed Chat Data
results_c[0] # (chat index, utterance text above threshold): [(talking point 1, similarity to utterance),(talking point 2, similarity to utterance)]
talking_points[35]
# [Back to Contents](#Table-of-Contents)
# ## Analyzing Processed Data
#
# For each talking point in the table below, we randomly sample few transcript extracts to check reliability of extract retrieval approach used in this notebook
# + code_folding=[0]
def analyze_results(results, n_texts, phrase_thr, verbose=True):
talking_points_cnts = {k: 0 for k in talking_points}
# loop over processed chunks and extract statistics
for rr in range(len(results)):
for chat_no_extract,tp_no_sim in results[rr].items():
chat_no, chat_extract = chat_no_extract[0], chat_no_extract[1]
tp_no_sim_sorted = sorted(tp_no_sim, key=lambda x: x[1], reverse=True) # sort by similarity values
for kk in tp_no_sim_sorted[:1]: # take best
tp_no = kk[0]
tp_sim = kk[1]
if tp_sim>phrase_thr:
if verbose:
print('------> Chat no. ', chat_no, 'Type: ', df.chatQueueLob[chat_no])
print('---> Extract:', chat_extract)
print('---> Phrase ', talking_points[tp_no])
print('---> Similarity index', tp_sim)
talking_points_cnts[talking_points[tp_no]] += 1
#extract some examples of match
sample_extracts = {tp: [] for tp in talking_points}
for tp in talking_points:
for rr in range(len(results)):
for chat_no_extract, tp_no_sim in results[rr].items():
chat_no, chat_extract = chat_no_extract[0], chat_no_extract[1]
tp_no_sim_sorted = sorted(tp_no_sim, key=lambda x: x[1], reverse=True)
for kk in tp_no_sim_sorted[:1]: # take best
tp_no = kk[0]
tp_sim = kk[1]
if tp_sim>phrase_thr and tp_no==talking_points.index(tp):
sample_extracts[tp].append('**' + chat_extract)
#print('----->', tp)
#print('---> Extract:', chat_extract)
if verbose:
print(tp, len(sample_extracts[tp]), len(sample(sample_extracts[tp],min(20, len(sample_extracts[tp])))))
sample_extracts[tp] = '\n'.join(sample(sample_extracts[tp],min(20, len(sample_extracts[tp])))) # sample only 5 examples
#sample_extracts[tp] = '\n'.join(sample_extracts[tp]) # sample only 5 examples
sample_extracts = [v for k,v in sample_extracts.items()]
## put in excel
results_df = pd.DataFrame({'Talking Points': [k for k,v in talking_points_cnts.items()],
'Perc. Used': [100*v/n_texts for k,v in talking_points_cnts.items()],
'Sample Extracts': sample_extracts,
'Human Corrected': [0 for k in talking_points_cnts]})# randomly sample 20 extracts for each talking point and visually inspect how many are right
#results_df.to_excel('extracts.xlsx')
return results_df
# -
# [Back to Contents](#Table-of-Contents)
# ### Analyzing Chats
results_c_df = analyze_results(results_c, max_chats, phrase_thr=0.5, verbose=False)
results_c_df
# +
#results_c_df['Perc. Used'].sum()
# -
i = 0
results_c_df.iloc[i,0], results_c_df.iloc[i,2].split('\n')
# +
i = 2
results_c_df.iloc[i,0], results_c_df.iloc[i,2].split('\n')
# -
# [Back to Contents](#Table-of-Contents)
# ## Interactive Visualization of Results
# + code_folding=[]
embed = SentenceTransformer('../models/sbert.net_models_distiluse-base-multilingual-cased-v2')
# + code_folding=[]
talking_points_emb = embed.encode(talking_points, convert_to_tensor=True)
# + code_folding=[1]
# input text gets extra slash in front of \n, so we added \\n for splitting sentences
def get_points(cT, phrase_thr=0.60):
#print(len(cT.split('\\n')))
cT = cT.split('\\n')
if len(cT)==0:
return ''
emb_cT = embed.encode(cT, convert_to_tensor=True)
# similarity
sim_mat = util.pytorch_cos_sim(emb_cT,
talking_points_emb).detach().cpu().numpy()
# for each sentence find matching phrases above threshold.
for j in range(sim_mat.shape[0]):
sim_idx = np.where(sim_mat[j,]>phrase_thr)[0]
sim_values = [(k, sim_mat[j,k]) for k in sim_idx]
sim_sorted = sorted(sim_values, key=lambda x: x[1], reverse=True) # sort by similarity values
if len(sim_sorted)>0:
print('**utterance:', cT[j])
print('Talking Point:', talking_points[sim_sorted[0][0]])
print('Similarity: ', sim_sorted[0][1])
# -
print('**Available talking points**')
print('')
for i in talking_points:
print(i)
#Chats
i=1
cT = all_chats[i]
print("\\n".join(cT) )
@interact_manual(in_text='', phrase_thr=widgets.FloatSlider(min=0.5, max=0.8, step=0.1, value=0.5))
def g(in_text, phrase_thr):
return get_points(in_text, phrase_thr)
# [Back to Contents](#Table-of-Contents)
| topical_chats.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 文字识别
# + active=""
# 第一步、导入库
# -
# -*- coding: UTF-8 -*-
from aip import AipOcr
from xugu import Pin
import time
# + active=""
# 第二步、初始化LED
# -
led = Pin(13, Pin.OUT)
led.write_digital(0) # 关闭连接 13 号引脚的 LED 灯
time.sleep(1)
# + active=""
# 第三步、设置认证信息
#
# 注:这里用的是测试账号,有访问次数的限制,请使用自己的账号信息。
# -
""" 你的 APPID AK SK """
APP_ID = '17592596'
API_KEY = 'WVIgzetjM6CZzN6kBcWf5MqS'
SECRET_KEY = '<KEY>'
# + active=""
# 第四步、初始化AipFace对象
# -
aipOcr = AipOcr(APP_ID, API_KEY, SECRET_KEY)
# + active=""
# 第五步、读取图片,图片不能过大,最好100KB左右
# +
filePath = "num.png"
def get_file_content(filePath):
with open(filePath, 'rb') as fp:
return fp.read()
# + active=""
# 第六步、定义参数变量
# -
options = {
'recognize_granularity': 'big',
'words_type': 'number',
}
# + active=""
# 第六步、调用手写文字识别接口
# -
result = aipOcr.handwriting(get_file_content(filePath), options)
print(result)
words_result=result['words_result']
for i in range(len(words_result)):
number = words_result[i]['words']
print(number)
# + active=""
# 第七步、根据识别的数字,去闪烁相应的次数
# -
num = int(number)
while num > 0:
# #用循环实现持续地开灯关灯,到达闪烁的效果
led.write_digital(1) # 点亮连接 13 号引脚的 LED 灯
time.sleep(1) # 持续 1 秒
led.write_digital(0) # 关闭 LED 灯
time.sleep(0.5) # 持续 1 秒
num -= 1
| 课程汇集/虚谷号内置课程目录/9.人工智能综合应用/03.手写数字识别/.ipynb_checkpoints/文字识别-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tfod
# language: python
# name: tfod
# ---
import os
CUSTOM_MODEL_NAME = 'my_ssd_mobnet_tuned3'
PRETRAINED_MODEL_NAME = 'ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8'
PRETRAINED_MODEL_URL = 'http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz'
TF_RECORD_SCRIPT_NAME = 'generate_tfrecord.py'
LABEL_MAP_NAME = 'label_map.pbtxt'
paths = {
'WORKSPACE_PATH': os.path.join('Tensorflow', 'workspace'),
'SCRIPTS_PATH': os.path.join('Tensorflow','scripts'),
'APIMODEL_PATH': os.path.join('Tensorflow','models'),
'ANNOTATION_PATH': os.path.join('Tensorflow', 'workspace','annotations'),
'IMAGE_PATH': os.path.join('Tensorflow', 'workspace','images'),
'MODEL_PATH': os.path.join('Tensorflow', 'workspace','models'),
'PRETRAINED_MODEL_PATH': os.path.join('Tensorflow', 'workspace','pre-trained-models'),
'CHECKPOINT_PATH': os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME),
'OUTPUT_PATH': os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'export'),
'TFJS_PATH':os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'tfjsexport'),
'TFLITE_PATH':os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'tfliteexport'),
'PROTOC_PATH':os.path.join('Tensorflow','protoc')
}
files = {
'PIPELINE_CONFIG':os.path.join('Tensorflow', 'workspace','models', CUSTOM_MODEL_NAME, 'pipeline.config'),
'TF_RECORD_SCRIPT': os.path.join(paths['SCRIPTS_PATH'], TF_RECORD_SCRIPT_NAME),
'LABELMAP': os.path.join(paths['ANNOTATION_PATH'], LABEL_MAP_NAME)
}
# Requirement for installing necessary dependencies from GitHub
if os.name=='nt':
# !pip install wget
import wget
# Cloning object detection modelpath
if not os.path.exists(os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection')):
# !git clone https://github.com/tensorflow/models {paths['APIMODEL_PATH']}
# +
# Install Tensorflow Object Detection
# Also installing and setting up Protocol Buffers
if os.name=='posix':
# !apt-get install protobuf-compiler
# !cd Tensorflow/models/research && protoc object_detection/protos/*.proto --python_out=. && cp object_detection/packages/tf2/setup.py . && python -m pip install .
if os.name=='nt':
url="https://github.com/protocolbuffers/protobuf/releases/download/v3.15.6/protoc-3.15.6-win64.zip"
wget.download(url)
# !move protoc-3.15.6-win64.zip {paths['PROTOC_PATH']}
# !cd {paths['PROTOC_PATH']} && tar -xf protoc-3.15.6-win64.zip
os.environ['PATH'] += os.pathsep + os.path.abspath(os.path.join(paths['PROTOC_PATH'], 'bin'))
# !cd Tensorflow/models/research && protoc object_detection/protos/*.proto --python_out=. && copy object_detection\\packages\\tf2\\setup.py setup.py && python setup.py build && python setup.py install
# !cd Tensorflow/models/research/slim && pip install -e .
# -
# !pip uninstall protobuf matplotlib -y
# !pip install protobuf matplotlib==3.2
# !pip install pillow
# !pip install pyyaml
# !pip install tensorflow --upgrade
# !pip install opencv
# Install all the modules the script gives you errors for
# Keep Running Script untill you get OK message at the end
VERIFICATION_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'builders', 'model_builder_tf2_test.py')
# Verify Installation
# !python {VERIFICATION_SCRIPT}
# Restart Kernel if script gives you an error for modules that you've already installed
# Retriving model from tensorflow model zoo
if os.name =='posix':
# !wget {PRETRAINED_MODEL_URL}
# !mv {PRETRAINED_MODEL_NAME+'.tar.gz'} {paths['PRETRAINED_MODEL_PATH']}
# !cd {paths['PRETRAINED_MODEL_PATH']} && tar -zxvf {PRETRAINED_MODEL_NAME+'.tar.gz'}
if os.name == 'nt':
wget.download(PRETRAINED_MODEL_URL)
# !move {PRETRAINED_MODEL_NAME+'.tar.gz'} {paths['PRETRAINED_MODEL_PATH']}
# !cd {paths['PRETRAINED_MODEL_PATH']} && tar -zxvf {PRETRAINED_MODEL_NAME+'.tar.gz'}
| 1. Installation Master Script.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import PyPDF2
import glob
import os
def merge_pdf_in_dir(dir_path, dst_path):
l = glob.glob(os.path.join(dir_path, '*.pdf'))
l.sort()
merger = PyPDF2.PdfFileMerger()
for p in l:
if not PyPDF2.PdfFileReader(p).isEncrypted:
merger.append(p)
merger.write(dst_path)
merger.close()
merge_pdf_in_dir('data/src/pdf', 'data/temp/sample_dir.pdf')
| notebook/pypdf2_merge_dir.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Import relevant packages
import pandas as pd
import requests
from pandas.io.json import json_normalize
import matplotlib.pyplot as plt
import json
# GET HTTP Requests for questions tagged as R and Pandas across the Stack Overflow subcommunity sorted by popularity
stackR = requests.get("https://api.stackexchange.com/2.2/tags/r/info?order=desc&sort=popular&site=stackoverflow")
stackP = requests.get("https://api.stackexchange.com/2.2/tags/pandas/info?order=desc&sort=popular&site=stackoverflow")
# GET HTTP Requests for questions tagged as R and Pandas across the Cross Validated subcommunity sorted by popularity
crossR = requests.get("https://api.stackexchange.com/2.2/tags/r/info?order=desc&sort=popular&site=stats")
crossP = requests.get("https://api.stackexchange.com/2.2/tags/pandas/info?order=desc&sort=popular&site=stats")
# GET HTTP Requests for questions tagged as R and Pandas across the Data Science subcommunity sorted by popularity
dataR = requests.get("https://api.stackexchange.com/2.2/tags/r/info?order=desc&sort=popular&site=datascience")
dataP = requests.get("https://api.stackexchange.com/2.2/tags/pandas/info?order=desc&sort=popular&site=datascience")
# Converting the results of the HTTP requests into JSON objects
stackRjson = stackR.json()
stackPjson = stackP.json()
crossRjson = crossR.json()
crossPjson = crossP.json()
dataRjson = dataR.json()
dataPjson = dataP.json()
stackRjson
# Normalize the JSON objects into individual dataframes
stackRdf = json_normalize(stackRjson["items"])
stackRcount = stackRdf["count"][0]
stackPdf = json_normalize(stackPjson["items"])
stackPcount = stackPdf["count"][0]
crossRdf = json_normalize(crossRjson["items"])
crossRcount = crossRdf["count"][0]
crossPdf = json_normalize(crossPjson["items"])
crossPcount = crossPdf["count"][0]
dataRdf = json_normalize(dataRjson["items"])
dataRcount = dataRdf["count"][0]
dataPdf = json_normalize(dataPjson["items"])
dataPcount = dataPdf["count"][0]
# Creating a summary dataframe that combines the number of tags for both data science tools across each of the three stack exchange subcommunities
summarydf = pd.DataFrame({'Subcommunity': ["Stack Overflow", "Cross Validated", "Data Science", "All"], 'Number of Questions Tagged as R': [stackRcount, crossRcount, dataRcount, stackRcount+crossRcount+dataRcount], 'Number of Questions Tagged as Pandas': [stackPcount, crossPcount, dataPcount, stackPcount+crossPcount+dataPcount]})
summarydf
# +
n_groups = 4
vals1 = summarydf["Number of Questions Tagged as R"]
vals2 = summarydf["Number of Questions Tagged as Pandas"]
# Create Barplot
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.35
opacity = 0.8
rect1 = plt.bar(index, vals1, bar_width, alpha=opacity, color='b', label='R')
rect2 = plt.bar(index + bar_width, vals2, bar_width, alpha=opacity, color='g', label='Pandas')
plt.xlabel('Stack Exchange Subcommunities')
plt.ylabel('Number of Questions Tagged')
plt.title('Number of Questions Tagged by Data Science Tool based on Subcommunity')
plt.xticks(index + bar_width, ('Stack Overflow', 'Cross Validated', 'Data Science', 'All'))
plt.legend(loc='upper right', bbox_to_anchor=(1.25,1), title="Data Science Tool")
plt.rcParams["figure.figsize"] = [10, 6]
plt.tight_layout()
plt.show()
# -
# <b>Analysis</b>
#
# Based on the graph above and the dataframe's data, it is cleas that R is the more popular data science tool over Pandas across all subcommunities (Stack Overflow, Cross Validated, and Data Science). The stack overlow community had the greatest number of tags regarding the data science tool however, this was expected since this community is the gneral community for all questions on Stack Exchange. Additionally, R's popularity in the Cross Validated community based on its number of tags also presents the fact that it is a tool generally used for statistics.
# <b>Conclusion</b>
#
# In conclusion, it can be observed that R is the more popular data science tool across all communitiies and as such, it is the most widely used tool in the data science community. Hence, if a person were to enter this industry professionally, they would be require to posses skills and knowledge in R as a necessity over Pandas.
#
# <b>Limitations</b>
#
# Based on research performed by third parties, Python is consistently more popular than R as a data science tool however our conclusions specifically focused on a single package of Python and its comparison to R's overall popularity. As such, a limitation exists in the fact that we are comparing Pandas, a specific package for the Python language, against R which is an entire language in-and-of itself. Hence, R would have been expected to have more tags over Pandas. Therefore, our conclusions are not entirely comparable to the outside research performed however it is interesting to note that as a whole, the most popular data manipulation tool used in Python is less popular than R among data science communities despite the overwhelming dominance of Python in such communities. Based on our data, it was clear to see the divide between R and Pandas tags was the least in the data science community which could be due to this reason as well.
#
# Furthermore, a better method of comparing the two tools would consist of comparing the tags of both languages against each other instead of analysing the mentions of a package against a language. Additionally, another limitation of this study is that we purely looked into the number of tags used for questions to determine the popularity of the data science tool in each community and did not pay attention to the number of answers provided to the questions. Therefore, even if the tool is mentioned more and has a greater level of popularity, this does not necessarily mean that the community has greater knowledge of the tool. As such, focusing on the number of answers instead of the number of tags for each language would be a better method of evaluating the popularity and knowledge of a data science tool within multiple communities.
# <b>Data Source Citation and Explanation</b>
#
# For this project, I accessed all my data through an HTTP GET request that extracted information regarding tagged questions directly from Stack Exchange through their API. The data collected included information regarding the specific tags I was examining and the subcommunity I was attempting to collect information regarding. The data also included information regarding whether the subcommunity was moderated however, the most important information collected from the API consisted of the count of questions within a subcommunity that used the tag being examined.
#
# <b>Citations</b>
#
# - Data-Driven Science. “Python vs R for Data Science: And the Winner Is..” Medium, Medium, 31 Jan. 2018, https://medium.com/@data_driven/python-vs-r-for-data-science-and-the-winner-is-3ebb1a968197.Theuwissen,
# - Martijn. “R Vs Python for Data Science: The Winner Is ...” KDnuggets, https://www.kdnuggets.com/2015/05/r-vs-python-data-science.html.
# - “Stack Exchange API.” Stack Exchange API, https://api.stackexchange.com/.
| Analyzing the popularity of data science tools using Stack Exchange API.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Comparing, Reconciling, and Combining COVID-19 Data Sources
# ### Summary
#
# In this note we use `gs-quant` to compare COVID-19 data sources. To do this, we first retrieve COVID-19-related time
# series data and preprocess it, joining different sources together to analyze confirmed cases.
#
# The contents of this notebook are as follows:
#
# - [1 - Getting Started](#1---Getting-Started)
# - [2 - COVID-19 Data](#2---COVID-19-Data)
# - [3 - Comparing Global Sources](#3---Comparing-Global-Sources)
# - [4 - Comparing US Sources](#4---Comparing-US-Sources)
# - [5 - Comparing subregions, combining with mobility data](#5---Comparing-subregions,-combining-with-mobility-data)
# ### 1 - Getting Started
# Start every session with authenticating with your unique client id and secret. For information on
# how to get setup on GS Quant, see [Getting Started](/covid/guides/getting-started). Below produced
# using gs-quant version 0.8.126
# + pycharm={"is_executing": false}
from gs_quant.session import GsSession, Environment
GsSession.use(client_id=None, client_secret=None, scopes=('read_product_data',))
# -
# ### 2 - COVID-19 Data
# We'll start by defining a general function to load various datasets, which includes regional data, for the past week:
# + pycharm={"is_executing": false}
from gs_quant.data import Dataset
import datetime
# Note: There is data going back to 2019-12-31, you will need to write your own code to batch data fetching
def get_datasets(datasets):
ds_dict = {}
end = datetime.date(2020, 7, 9)
start = end - datetime.timedelta(weeks=1)
for dataset in datasets:
try:
df = Dataset(dataset).get_data(start, end)
keys = [x for x in ['countryId', 'subdivisionId'] if x in df.columns] + ['date']
val_map = {'newConfirmed': 'totalConfirmed', 'newFatalities': 'totalFatalities'}
vals = [x for x in list(val_map.keys()) if x in df.columns]
df_t = df.groupby(keys).sum().groupby(level=0).cumsum().reset_index()[keys + vals].rename(columns=val_map)
ds_dict[dataset] = df.reset_index().merge(df_t, on=keys, suffixes=('', '_y')).set_index('date')
except Exception as err:
print(f'Failed to obtain {dataset} with {getattr(err, "message", repr(err))}')
return ds_dict
# -
# We create a list of some of the available datasets, and fetch all of them, so that we can compare them.
# + pycharm={"is_executing": false}
country_datasets = [
'COVID19_COUNTRY_DAILY_ECDC',
'COVID19_COUNTRY_DAILY_WHO',
'COVID19_COUNTRY_DAILY_WIKI',
'COVID19_US_DAILY_CDC'
]
df = get_datasets(country_datasets)
# -
# Next we look at the date ranges of each dataset to determine how much history they have, and ensure they are
# up-to-date:
# + pycharm={"is_executing": false, "name": "#%%\n"}
for name, ds in df.items():
print('{:<30} {} {}'.format(name, ds.index[0].date(), ds.index[-1].date()))
# -
# ### 3 - Comparing Global Sources
#
# Below is a general function to compare the time series of certain columns across datasets:
# + pycharm={"is_executing": false}
import re
from typing import Union
def compare_time_series(df, datasets, columns: Union[str, list], grouping: str = 'countryId', suffix_identifier: float = 1):
columns = [columns] if isinstance(columns, str) else columns
suffixes = list(map(lambda ds_name: '_' + re.findall('\_([A-Z]+)', ds_name)[-suffix_identifier], datasets))
df_combo = None
for ds_name in datasets:
ds = df[ds_name]
df_combo = ds if df_combo is None else df_combo
df_suffixes = ('', '_' + re.findall('\_([A-Z]+)', ds_name)[-suffix_identifier])
df_combo = df_combo.merge(ds, on=['date', grouping], suffixes=df_suffixes)
return df_combo[[grouping] + [column + suffix for suffix in suffixes for column in columns]]
# -
# For example, if we want to compare the time series for total confirmed cases across the WHO, ECDC, and Wikipedia
# datasets globally, we can do the following:
# + pycharm={"is_executing": false, "name": "#%%\n"}
datasets = ['COVID19_COUNTRY_DAILY_ECDC', 'COVID19_COUNTRY_DAILY_WHO', 'COVID19_COUNTRY_DAILY_WIKI']
df_to_compare = compare_time_series(df, datasets, columns='totalConfirmed')
df_to_compare.describe().style.background_gradient(cmap='Blues', axis=1).format('{:,.2f}')
# + [markdown] pycharm={"name": "#%% md\n"}
# This shows statistical properties for each dataset for all common countries and dates. As we can see, there's some
# variation in the data sources. Let's dig in a little further and plot the relationship between the WHO and ECDC for
# a number of countries:
# +
import seaborn as sns
select_countries = ['GB', 'DE', 'IT', 'ES', 'FR', 'RU']
to_plot = df_to_compare[df_to_compare.countryId.isin(select_countries)]
sns.lmplot(x="totalConfirmed_ECDC", y="totalConfirmed_WHO", col="countryId", data=to_plot, col_wrap=3, height=3, fit_reg=False);
# + [markdown] pycharm={"name": "#%% md\n"}
# As we can see, there is some dispersion between sources for certain countries. For information on the various ISO
# country codes, see [this guide](https://developer.gs.com/docs/covid/guides/standards/iso-countries/).
#
# ### 4 - Comparing US Sources
#
# Now let's take a closer look at the US data, adding in the CDC dataset:
# + pycharm={"is_executing": false, "name": "#%%\n"}
datasets = ['COVID19_US_DAILY_CDC', 'COVID19_COUNTRY_DAILY_ECDC', 'COVID19_COUNTRY_DAILY_WHO', 'COVID19_COUNTRY_DAILY_WIKI']
df_to_compare = compare_time_series(df, datasets, columns='totalConfirmed')
df_to_compare.describe().style.background_gradient(cmap='Blues',axis=1).format('{:,.2f}')
# -
# As of 21 of May 2020, CDC had the most confirmed cases, followed by Wikipedia, and then ECDC and WHO. This is not
# overly surprising given the information collection and validation flows. Now let's examine the last few points:
# Now let's compare all the series side by side:
# + pycharm={"is_executing": false, "name": "#%%\n"}
df_to_compare.plot(figsize=(10, 6), title='US')
# +
import matplotlib.pyplot as plt
(df_to_compare['totalConfirmed_WHO']-df_to_compare['totalConfirmed_ECDC']).plot(figsize=(10, 6), title='Differences vs WHO',
label='ECDC')
(df_to_compare['totalConfirmed_WHO']-df_to_compare['totalConfirmed_CDC']).plot(label='CDC')
(df_to_compare['totalConfirmed_WHO']-df_to_compare['totalConfirmed_WIKI']).plot(label='WIKI')
plt.legend()
# -
# This chart illustrates how the ECDC and CDC map cases versus the WHO. At the start of the epidemic these sources were
# much closer, and diverged over time, with CDC leading in reporting for the US versus the ECDC and WHO.
# ### 5 - Comparing subregions, combining with mobility data
# Finally, we illustrate how to compare datasets for specific countries (in this case, Italy) at different level of granularity (region, province, etc.) and how to ccombine epidemic data with mobility data from Google.
#
# As before, we fetch data for Italy, at three levels of granularity.
datasets = ['COVID19_ITALY_DAILY_DPC', 'COVID19_REGION_DAILY_DPC', 'COVID19_PROVINCE_DAILY_DPC']
df = get_datasets(datasets)
# +
df_to_compare = compare_time_series(df, datasets, columns='totalConfirmed', suffix_identifier=3)
df_to_compare.describe().style.background_gradient(cmap='Blues',axis=1).format('{:,.2f}')
# -
# We write a function to compare the data across different geographic subdivisions.
# +
from functools import reduce
import pandas as pd
def compare_totals_across_breakdowns(df, data1, data2, column_to_check):
# pick the common indices between the data being compared
common_idx_province = reduce(lambda x, y: x & y,
df[data1[0]].groupby(data1[1]).apply(lambda x: x.index).tolist())
common_idx_region = reduce(lambda x, y: x & y,
df[data2[0]].groupby(data2[1]).apply(lambda x: x.index).tolist())
idx = common_idx_province & common_idx_region
# calculate the difference, and rename column
diff = df[data1[0]].groupby(data1[1]).apply(lambda x : x.loc[idx][column_to_check]).T.apply(sum,axis=1) -\
df[data2[0]].groupby(data2[1]).apply(lambda x : x.loc[idx][column_to_check]).T.apply(sum,axis=1)
diff = pd.DataFrame(diff).rename(columns={0: f'{data1[0]}-{data2[0]}'})
return diff
diff1 = compare_totals_across_breakdowns(df, ('COVID19_ITALY_DAILY_DPC','countryId'),
('COVID19_REGION_DAILY_DPC','subdivisionId'),'totalConfirmed')
diff2 = compare_totals_across_breakdowns(df, ('COVID19_REGION_DAILY_DPC','subdivisionId'),
('COVID19_PROVINCE_DAILY_DPC','administrativeRegion'),'totalConfirmed')
# -
# We plot the discrepancies below...
# +
to_plot = diff1.join(diff2)
sns.lmplot(x="COVID19_ITALY_DAILY_DPC-COVID19_REGION_DAILY_DPC", y="COVID19_REGION_DAILY_DPC-COVID19_PROVINCE_DAILY_DPC",
data=to_plot)
# -
# ... and interestingly, this indicates there is no discrepancy at all when we compare country-level aggregate data with region-level aggregate data, but we do see discrepancies when we compate province-level with region-level data.
# Finally, we illustrate how to join region-level data from Italy with mobility data from Google, which allows us to check, for example, how the increase in cases of COVID-19 affected mobility patterns in the population.
# +
from datetime import datetime
df_mob = Dataset('COVID19_SUBDIVISION_DAILY_GOOGLE').get_data(start_date=datetime(2020,2,1).date(), countryId='IT')
# -
df_mob.head(2)
# We now join mobility data with region-level data in Italy, for the subdivision of Liguria.
# +
def join_dfs(subdivision, mobility, column_to_compare):
df_red = df['COVID19_REGION_DAILY_DPC'][df['COVID19_REGION_DAILY_DPC']['subdivisionId'] == subdivision]
subdivision_name = df_red['subdivisionName'].unique()[0]
df1 = df_red[[column_to_compare]]
df2 = df_mob[df_mob.subdivisionId.isin([subdivision]) & df_mob.group.isin([mobility])]
df_joint = df1.merge(df2, on='date')
return df_joint, mobility, subdivision_name
df_joint, mobility, subdivision_name = join_dfs('IT-42', 'retailRecreation', 'newConfirmed')
# -
# Finally, we plot a chart comparing mobility data with data on the growth of the epidemic.
df_joint['Change in Mobility'] = df_joint['value'].diff()
df_joint[['newConfirmed','value']].rename(columns={'newConfirmed':'New Confirmed Cases',
'value':f'Change in {mobility} mobility'}).plot(figsize=(10, 7,), grid=True,
title=f'Comparison of new confirmed cases and mobility in {subdivision_name}')
# We can observe a dramatic drop in moblity as the rate of new cases began to increase, a pattern that persisted during the peak of the epidemic in the region of Liguria.
#
# Please reach out to `<EMAIL>` with any questions.
# ### Disclaimer
# This website may contain links to websites and the content of third parties ("Third Party Content"). We do not monitor,
# review or update, and do not have any control over, any Third Party Content or third party websites. We make no
# representation, warranty or guarantee as to the accuracy, completeness, timeliness or reliability of any
# Third Party Content and are not responsible for any loss or damage of any sort resulting from the use of, or for any
# failure of, products or services provided at or from a third party resource. If you use these links and the
# Third Party Content, you acknowledge that you are doing so entirely at your own risk.
| gs_quant/documentation/00_data/covid/Comparing, Reconciling, and Combining COVID-19 Data Sources.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
#
#
# Euler deconvolution with a moving window
# ----------------------------------------
#
# Euler deconvolution attempts to estimate the coordinates of simple (idealized)
# sources from the input potential field data. There is a strong assumption that
# the sources have simple geometries, like spheres, vertical pipes, vertical
# planes, etc. So it wouldn't be much of a surprise if the solutions aren't great
# when sources are complex.
#
# Let's test the Euler deconvolution using a moving window scheme, a very common
# approach used in all industry software. This is implemented in
# :class:`fatiando.gravmag.euler.EulerDeconvMW`.
#
#
#
#
# +
from __future__ import print_function
from fatiando.gravmag import sphere, transform, euler
from fatiando import gridder, utils, mesher
import matplotlib.pyplot as plt
# Make some synthetic magnetic data to test our Euler deconvolution.
# The regional field
inc, dec = -45, 0
# Make a model of two spheres magnetized by induction only
model = [
mesher.Sphere(x=-1000, y=-1000, z=1500, radius=1000,
props={'magnetization': utils.ang2vec(2, inc, dec)}),
mesher.Sphere(x=1000, y=1500, z=1000, radius=1000,
props={'magnetization': utils.ang2vec(1, inc, dec)})]
print("Centers of the model spheres:")
print(model[0].center)
print(model[1].center)
# Generate some magnetic data from the model
shape = (100, 100)
area = [-5000, 5000, -5000, 5000]
x, y, z = gridder.regular(area, shape, z=-150)
data = sphere.tf(x, y, z, model, inc, dec)
# We also need the derivatives of our data
xderiv = transform.derivx(x, y, data, shape)
yderiv = transform.derivy(x, y, data, shape)
zderiv = transform.derivz(x, y, data, shape)
# Now we can run our Euler deconv solver on a moving window over the data.
# Each window will produce an estimated point for the source.
# We use a structural index of 3 to indicate that we think the sources are
# spheres.
# Run the Euler deconvolution on moving windows to produce a set of solutions
# by running the solver on 10 x 10 windows of size 1000 x 1000 m
solver = euler.EulerDeconvMW(x, y, z, data, xderiv, yderiv, zderiv,
structural_index=3, windows=(10, 10),
size=(1000, 1000))
# Use the fit() method to obtain the estimates
solver.fit()
# The estimated positions are stored as a list of [x, y, z] coordinates
# (actually a 2D numpy array)
print('Kept Euler solutions after the moving window scheme:')
print(solver.estimate_)
# Plot the solutions on top of the magnetic data. Remember that the true depths
# of the center of these sources is 1500 m and 1000 m.
plt.figure(figsize=(6, 5))
plt.title('Euler deconvolution with a moving window')
plt.contourf(y.reshape(shape), x.reshape(shape), data.reshape(shape), 30,
cmap="RdBu_r")
plt.scatter(solver.estimate_[:, 1], solver.estimate_[:, 0],
s=50, c=solver.estimate_[:, 2], cmap='cubehelix')
plt.colorbar(pad=0).set_label('Depth (m)')
plt.xlim(area[2:])
plt.ylim(area[:2])
plt.tight_layout()
plt.show()
| _downloads/euler_moving_window.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Scraping LQ45 IDX Stock Price using DataReader
# import packages
from pandas_datareader.data import DataReader
import pandas as pd
from datetime import date # Date & time functionality
import numpy as np
# import stock code
lq45 = pd.read_csv('lq45.csv', header=None, usecols=[0], names=['nama'])
lq45.head()
# add stock code + .JK
lq45["IDX"] = lq45["nama"].map(str) + [".JK"]
lq45.head()
#start and end date
start = date(2015, 1, 1) # Default: Jan 1, 2010
end = date(2019, 3, 5) # Default: today
# Scrap data from Yahoo Finance
ticker = lq45['IDX']
data_source = 'yahoo'
price = []
for i in ticker:
data = DataReader(i, data_source, start, end)
data['stock'] = i
price.append(data)
# save stock data to csv with '\' separator and 'utf-8' encoding
stock_price = pd.concat(price)
stock_price.to_csv('stock_lq45.csv', sep='\t', encoding='utf-8')
# read data head
print(stock_price.head())
| scrap_stock_price.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Finding the Data
#
# We need to install [newsapi-python](https://github.com/mattlisiv/newsapi-python) package. We can do this by entering ! in the beginning of a cell to directly access to the system terminal. Using exclamation mark is an easy way to access system terminal and install required packages as well undertake other work such as finding paths for working directory or other files.
# ```
# $ pip install newsapi-python
# ```
# After installing the package, we can start to send queries to retrive data. First we need to import NewsApiClient from _newsapi_ module.
from newsapi import NewsApiClient
# We need to use the key from the News Api application that we earlier created. In order to not to show your secret and key, it is a good practice to save them as in a different python file. Then we can import that here and attacht to variables to prevent exposure. I save mine in a file called __'nws_token.py'__. Using the code below, I __import__ key and secret string objects __from__ nws_token module that I created.
#
# In Python there are various ways to __import__ a module, here are some examples.
#
# ```Python
# import module #method 1
# from module import something #method 2
# from module import * #method 3 imports all
# ```
# If you use the first method, later you need to use the syntax below by first calling the module name then the function/variable name later on:
# ```Python
# x = module.function_name() #if you use the first method
# ```
# Otherwise, you can just call the method/variable from that module by its name. Here, we use the second method to import a variable from a module since there will not be any other variables with the same name that might cause bugs.
#
# After importing the key, we will create an instance of the NewsApiClient object by passing our individual key as a parameter.
#
from nws_token import key
api = NewsApiClient(api_key=key)
# Since we created an instance of _NewsApiClient_ object, we are now ready find the data we are looking for. It is always a good practice to refer to the official documentation to find out what parametres we can pass, and what kind of data we can retrive. You can reach the official documentation of News API [here!](https://newsapi.org/docs) After reading through the documentation, we have a better understanding of the parameters we want to use.
#
# Now, let's try to retrive all 100 most recent news articles mentioning 2020 Taiwan Presidential Elections and save all into a __dictionary__ object called _articles_.
#
articles = {}
for i in range(1,6):
articles.update({'page'+str(i): (api.get_everything(q='Taiwan AND Elections',
language= 'en', page = i))})
# All the information of the articles are now saved in our dictionary object called _articles_. It has a nested data structure that the iteration above saved every 20 articles for each page. As it stands, _articles_ does not have much use for us. It is complex, hard to read data object with numerous information for each article(i.e. date posted, author, source, abstract, full content,). If you want to take a look just run this code in an empty cell:
# ```Python
# print(articles)
# ```
# Looks complex and hard to read! As an example, let's take a look at the data on one article.
print(articles['page1']['articles'][0])
# __It is still complicated but gives a better view on the available data. Given that we have 100 of such a data, we need to manipulate and filter these information into a more useful form.__
# News API does not provide the full content of the articles. We need to use webscrapping to retrive the full content of each article. For now, we can use a function to parse the results to only save the fields we need. We need Title, Source, Publication Date,description and the URL.
#
# #### Functions in Python
#
# Functions are the fundamental programming tools that enables to wrap several statements and procudes the values that we desire. They make it easy for the code reusability and recyclability. For this workshop, it is sufficient just to grasp the basics of the functions in Python. A Function code usually basically looks like this:
# ```Python
# def func_name(args):
# statement
# return result
# ```
# You can also use 'yield' instead of return if your function is a generator. But it is a more advanced technique that we will not use in this workshop. After you define the function, you need to call it by its name, and if required you can bind the returned object to variable.
# ```Python
# func_name() ## calls the function
# x = func_name() ## binds the returned object to a variable called x
# ```
# Functions are a rich and powerful way in Python, and I recommend you to read more about them.
#
# We will now use a function to grap the information we need from the articles.
# Let's __```import datetime```__ and __```dateutil.parser```__ modules for formatting existing publication date into a more readable format.
from dateutil.parser import parse
from datetime import datetime
# Let's first create a helper function to make the publication date more readable.
def reformat_date(date):
'''takes a string and returns a reformatted string'''
newdate = parse(date)
return newdate.strftime("%d-%B-%Y")
# Now, we will create another helper function to prevent duplicate articles appearing in our dataset.
# +
def check_duplicate(dataset,title):
'''
takes a list of dictionaries and a title string
to check for duplication of same articles
'''
for i in dataset:
if i['title'] == title:
return True
# -
# Since the article News API does not provide the full text of the articles, we need a web scrabbing function to retrive the full text of the each articles. We need to import __```requests```__ and __```BeautifulSoup```__ packages.
import requests
from bs4 import BeautifulSoup
# Now we can write another helper function to retrive the full text of the articles. Since we might face errors and exceptions while retriving the full text from a website, it is important to catch the possible exceptions and handle them to prevent our application from breaking. We can do this using this syntax:
# ```Python
# try:
# some_code()
# except:
# some_exception_handling()
# ```
# Below, we use __"```Exception as e```"__ expression so that we can print the properties of the error to be able to handle it better next time.
# +
def get_fulltext(url):
'''
Takes the URL and returns
article full text.
'''
HEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5)'
' AppleWebKit/537.36 (KHTML, like Gecko) Cafari/537.36'}
try:
page = requests.get(url,headers = HEADERS)
soup = BeautifulSoup(page.content, 'html.parser')
texts = soup.find_all('p')
article = ''
for i in texts:
article += str(i.get_text())
return article
except Exception as e:
print(e)
return None
# -
# We can now create a function to extract the information we want in a readable way.
# +
def article_extract(articles):
'''
takes a dictionary object returned from News API and
returns a list of dictionary with the required fields
'''
news_data = []
for i in articles.keys():
for n in range(0,len(articles[i]['articles'])):
if not check_duplicate(news_data,articles[i]['articles'][n]['title']):
news_data.append({'title':articles[i]['articles'][n]['title'],
'source': articles[i]['articles'][n]['source']['name'],
'URL': articles[i]['articles'][n]['url'],
'description': articles[i]['articles'][n]['description'],
'date': reformat_date(articles[i]['articles'][n]['publishedAt']),
'fulltext': get_fulltext(articles[i]['articles'][n]['url'])})
return news_data
# -
# __Now our function is ready for operation. Let's call it and see the first item of the dataset created by our function. It must be more readible with only required fields.__
data_set = article_extract(articles)
print(data_set[0])
# ***
# It seems from the results that we managed to create our data set. Now we can save it in a commo seperated value file to start our analysis. For this, we need to __```import csv```__ module.
import csv
with open("tw_dataset.csv", 'w') as file:
tw_dt= csv.DictWriter(file,data_set[0].keys())
tw_dt.writeheader()
tw_dt.writerows(data_set)
# __Our Data Set is saved in our working directory and now ready for exploration and analysis!__
#
# <img src="images/dataset.gif" style="width: 650px" align="middle" />
#
# - __[Previous: Setting the Scene](0 - Setting the Scene.ipynb)__
# - __[Next: Data Exploration](2 - Data Exploration.ipynb)__
| 1 - Finding the Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:cd_env] *
# language: python
# name: conda-env-cd_env-py
# ---
# +
import sys
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import random
import math
# -
seed = 123
num_samples = 500
rgen = np.random.RandomState(seed)
train = np.random.multivariate_normal(mean=[5,5], cov=[[1,0],[0,1]], size=num_samples)
train[:5,:]
np.var(train[:,1])
test = np.random.multivariate_normal(mean=[5,5], cov=[[3,0],[0,3]], size=num_samples)
np.var(test[:,1])
# +
sns.set(style="darkgrid")
fig = sns.kdeplot(train[:,0], shade=True, color="r", label="x1 - train")
fig = sns.kdeplot(test[:,0], shade=True, color="b", label="x1 - test")
plt.xlim(-3, 12)
plt.show()
# +
train = np.random.multivariate_normal(mean=[5,5], cov=[[1,0],[0,1]], size=num_samples)
test = np.random.multivariate_normal(mean=[7,5], cov=[[1,0],[0,1]], size=num_samples)
sns.set(style="darkgrid")
fig = sns.kdeplot(train[:,0], shade=True, color="r", label="x1 - train")
fig = sns.kdeplot(test[:,0], shade=True, color="b", label="x1 - test")
plt.xlim(-3, 12)
plt.show()
# +
train = np.random.multivariate_normal(mean=[5,5], cov=[[1,0],[0,1]], size=num_samples)
test = np.random.multivariate_normal(mean=[7,5], cov=[[3,0],[0,1]], size=num_samples)
sns.set(style="darkgrid")
fig = sns.kdeplot(train[:,0], shade=True, color="r", label="x1 - train")
fig = sns.kdeplot(test[:,0], shade=True, color="b", label="x1 - test")
plt.xlim(-3, 12)
plt.show()
# -
| notebooks/archive/changing-distributions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
## what is pandas loc method based on label
## what is pandas iloc method based on ( integer location-based indexing
# ## Pandas loc
# ### Access a group of rows and columns by labels or a boolean array
#
# +
# DataFrame.loc[]
# -
import pandas as pd
df=pd.read_csv('csv_files/Fortune_10.csv')
df
df.loc[0]
df.loc[9]
df.loc[[0,1]]
df.loc[[2,4]]
df.loc[4,'Name']
df.loc[0:3,'Name']
df.loc[0:2,'Growth']
df.loc[[False,False,True]]
df.loc[df['Profit']>1233332,['Name']]
df.iloc[0]
df.iloc[:,0]
df.loc[[0,1]]
df.iloc[[True,False,True]]
| 15_Pandas loc and iloc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from examplecode.example import DirectoryTree
# +
directory_list = {
"sub-01_part-mag_T1w.nii.gz" : "", # leave this value empty for files
"sub-01_part-mag_T1w.json" : "",
"sub-01_part-phase_T1w.nii.gz" : " comments can be added here",
"sub-01_part-phase_T1w.json" : " but padding them could be optimised",
}
tree = DirectoryTree(directory_list)
text = tree.generate()
print(text)
# +
directory_dict = {
"sub-01": { # use nested dictionaries to represent directories
"sub-01_part-mag_T1w.nii.gz" : "",
"sub-01_part-mag_T1w.json" : "",
"sub-01_part-phase_T1w.nii.gz" : "",
"sub-01_part-phase_T1w.json" : "",
},
"sub-02": {
"sub-02_part-mag_T1w.nii.gz" : "",
}
}
tree = DirectoryTree(directory_dict)
text = tree.generate()
print(text)
# +
nested_directory_dict = {
"sub-01" : {
"anat": { # you can represent subdirectories by nesting directories
"sub-01_part-mag_T1w.nii.gz" : "",
"sub-01_part-mag_T1w.json" : "",
"sub-01_part-phase_T1w.nii.gz" : "",
"sub-01_part-phase_T1w.json" : "",
}
}
}
tree = DirectoryTree(nested_directory_dict)
text = tree.generate()
print(text)
# +
directory = {
"my_processed_data": {
"code":{
"processing_pipeline-1.0.0.img" : "",
"hpc_submitter.sh" : "",
"..." : "",
},
"sourcedata": {
"sub-01" : {}, # use empty dictionaries to represent directories without specifying their content
"sub-02" : {},
"..." : "",
},
"sub-01" : {},
"sub-02" : {},
"..." : ""
}
}
tree = DirectoryTree(directory)
text = tree.generate()
print(text)
# +
# you can also represent files and directories on the same level
directory = {
"dataset_description.json" : "",
"sub-01" : {
"sessions.tsv" : "",
"ses-01" : {
"anat" : {
"sub-01_part-mag_T1w.nii.gz" : "",
"sub-01_part-mag_T1w.json" : "",
"sub-01_part-phase_T1w.nii.gz" : "",
"sub-01_part-phase_T1w.json" : "",
}
},
"scans.tsv": "",
},
"ses-02": {
"func": {
"sub-01_bold.nii.gz" : "",
}
}
}
tree = DirectoryTree(directory)
text = tree.generate()
print(text)
| tools/filetree_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# tgb - 11/13/2019 - Testing if rescaling the inputs changes the ability of the network to generalize.
# Architecture = From [], predict []
# Test 1 = Rephrase all water concentrations as RH
# Test 2 = Rephrase all temperature as deviations from moist adiabat
#
# tgb - 11/15/2019 - This notebook is mostly used for pre-processing
# # 0) Imports
# +
from cbrain.imports import *
from cbrain.data_generator import *
from cbrain.cam_constants import *
from cbrain.losses import *
from cbrain.utils import limit_mem
from cbrain.layers import *
import tensorflow as tf
from tensorflow import math as tfm
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
# physical_devices = tf.config.experimental.list_physical_devices('GPU')
# tf.config.experimental.set_memory_growth(physical_devices[0], True)
# tf.config.experimental.set_memory_growth(physical_devices[1], True)
# tf.config.experimental.set_memory_growth(physical_devices[2], True)
import xarray as xr
import numpy as np
from cbrain.model_diagnostics import ModelDiagnostics
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as imag
import scipy.integrate as sin
TRAINDIR = '/local/Tom.Beucler/SPCAM_PHYS/'
DATADIR = '/project/meteo/w2w/A6/S.Rasp/SP-CAM/fluxbypass_aqua/'
PREFIX = '8col009_01_'
# %cd /filer/z-sv-pool12c/t/Tom.Beucler/SPCAM/CBRAIN-CAM
# -
# # 1) Preprocessing
# %cd /export/nfs0home/tbeucler/CBRAIN-CAM
# !python preprocessing-11132019.py -c pp_config/2021_11_30_Norm_For_Nando_Cl_Inv.yml
# !python preprocessing-11132019.py -c pp_config/2021_09_02_Train_Valid_For_Nando.yml
# !python preprocessing-11132019.py -c pp_config/2021_09_02_Test_for_Nando.yml
# !python preprocessing-11132019.py -c pp_config/2021_09_03_Valid_for_Nando_Cl_Inv.yml
# !python preprocessing-11132019.py -c pp_config/2021_09_03_Test_for_Nando_Cl_Inv.yml
# !python preprocessing-11132019.py -c pp_config/2021_09_13_Nando_ClInv_small_data_test.yml
# !python preprocessing-11132019.py -c pp_config/2021_06_16_T_BMSE_input_small_data_test.yml
# !python preprocessing-11132019.py -c pp_config/2021_06_06_RG_Test-0K.yml
# !python preprocessing-11132019.py -c pp_config/2021_06_03_RG_Train_Valid-0K.yml
# !python preprocessing-11132019.py -c pp_config/2021_04_24_RG_PERC_Train_Valid-M4K.yml
# !python preprocessing-11132019.py -c pp_config/2021_04_24_RG_PERC_Train_Valid-P4K.yml
# !python preprocessing-11132019.py -c pp_config/2021_04_24_RG_PERC_Test-P4K.yml
# !python preprocessing-11132019.py -c pp_config/2021_04_24_RG_PERC_Test-M4K.yml
# !python preprocessing-11132019.py -c pp_config/2021_04_09_PERC_Test-P4K.yml
# !python preprocessing-11132019.py -c pp_config/2021_04_09_PERC_Test-M4K.yml
# !python preprocessing-11132019.py -c pp_config/2021_04_09_PERC_Train_Valid-P4K.yml
# !python preprocessing-11132019.py -c pp_config/2021_04_09_PERC_Train_Valid.yml
# !python preprocessing-11132019.py -c pp_config/2021_04_09_PERC_Test.yml
# !python preprocessing-11132019.py -c pp_config/2021_03_31_O3_T_NSto220_input_small_data_test.yml
# !python preprocessing-11132019.py -c pp_config/2021_03_18_O3_Train_Valid-P4K.yml
# !python preprocessing-11132019.py -c pp_config/2021_03_18_O3_Test-P4K.yml
# !python preprocessing-11132019.py -c pp_config/2021_03_18_O3_Train_Valid-M4K.yml
# !python preprocessing-11132019.py -c pp_config/2021_03_18_O3_Test-M4K.yml
# !python preprocessing-11132019.py -c pp_config/2021_02_01_O3_BCONS_input_small_data_test.yml
# !python preprocessing-11132019.py -c pp_config/2021_03_01_O3_LHFnsDELQ_input_small_data_test.yml
# !python preprocessing-11132019.py -c pp_config/2021_03_01_O3_LHFnsQ_input_small_data_test.yml
# !python preprocessing-11132019.py -c /export/nfs0home/tbeucler/CBRAIN-CAM/pp_config/2021_03_01_O3_LHFnsDELQ_input_small_data_test.yml
# !python preprocessing-11132019.py -c /export/nfs0home/tbeucler/CBRAIN-CAM/pp_config/2021_03_01_O3_LHFnsQ_input_small_data_test.yml
# !python preprocessing-11132019.py -c /export/nfs0home/tbeucler/CBRAIN-CAM/pp_config/2021_02_01_O3_QSATdeficit_input_small_data_test.yml
# !python preprocessing-11132019.py -c /export/nfs0home/tbeucler/CBRAIN-CAM/pp_config/2021_01_24_O3_input_small_data_test
# !python preprocessing-11132019.py -c /export/nfs0home/tbeucler/CBRAIN-CAM/pp_config/2021_01_24_O3_Train_Valid.yml
# !python preprocessing-11132019.py -c /export/nfs0home/tbeucler/CBRAIN-CAM/pp_config/2021_01_24_O3_Test.yml
# !python preprocessing-11132019.py -c /home/t/Tom.Beucler/SPCAM/CBRAIN-CAM/pp_config/147_POG_RHinput_FLUXoutput_test.yml
# !python preprocessing-11132019.py -c /home/t/Tom.Beucler/SPCAM/CBRAIN-CAM/pp_config/118_POG_TfromTSinput_test.yml
# !ls -la -h /local/Tom.Beucler/SPCAM_PHYS/1*
# # 2) Check the preprocessed data
path_data = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/SPCAM_PHYS/'
path_RGsmall = path_data + '2021_04_18_NORM_RG_small.nc'
path_QSATdeficit = path_data+'2021_02_01_NORM_O3_QSATdeficit_small.nc'
path_TfromNS = path_data+'2021_02_01_NORM_O3_TfromNS_small.nc'
path_Bcons = path_data+'2021_03_17_NORM_O3_BCONS_small.nc'
path_BMSE = path_data + '2021_06_16_NORM_BMSE_small.nc'
data_RGsmall = xr.open_dataset(path_RGsmall)
# data_QSATdeficit = xr.open_dataset(path_QSATdeficit)
# data_TfromNS = xr.open_dataset(path_TfromNS)
# data_Bcons = xr.open_dataset(path_Bcons)
data_Bmse = xr.open_dataset(path_BMSE)
data_QSATdeficit
data_TfromNS
data_Bmse['var_names']
data_TfromNS['mean'].shape
data_Bmse['mean'][:30].values
data_Bmse['mean'][30:60].values
plt.hist(data_Bmse['mean'])
data_Bcons['mean'][30:60]
data_Bcons['std'][30:60]
# # 3) Preprocessing for PRL paper
# !python preprocessing-11132019.py -c /home/t/Tom.Beucler/SPCAM/CBRAIN-CAM/pp_config/8col_rad_tbeucler_local-RH-test.yml
# !python preprocessing-11132019.py -c /home/t/Tom.Beucler/SPCAM/CBRAIN-CAM/pp_config/8col_rad_tbeucler_local-test.yml
| notebooks/tbeucler_devlog/028_Physical_inputs_experiments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Binning
#
# This notebook shows the code to implement binning.
# The details of how this works are provided by <NAME>'s slides.
# Here, we take a look at the main problem the technique addresses and we see how the procedure works, graphically.
import pickle # saving Python objects in binary form
import numpy as np
from scipy import sparse as sp
from scipy.sparse import linalg as spla
from matplotlib import pyplot as plt
# In short, what is binning?
# Let's take the Aiyagari model as a framework.
# Every agent is subject to labor endowment shocks and they can accumulate capital.
# Suppose we solve an individual's problem and we obtain a policy function $k'(k, l)$.
# In solving this, we have used a grid $\mathcal{K} \in \{k_0, k_1, \ldots, k_{n-1} \}$.
# Let $k^* \equiv k'(k_i, l_j)$ denote the optimal action for some value of $k_i$ and $l_j$ on the discretized state space.
# Assume that $k^* \notin \mathcal{K}$.
# For simplicity, assume that there exists an index $q$ such that $k_q \leq k^* \leq k_{q+1}$.
# In laymen terms, the policy function is commanding that the agent accumulates capital for a value that is not on the grid, but whose closest points on the grid are $k_q$ and $k_{q+1}$.
#
# Assume that a positive mass $\nu$ of agents happens to require $k^*$.
# Then we can divide $\nu$ in fractions $\omega$ and $1 - \omega$ and assign those fractions to grid points $k_q$ and $k_{q+1}$.
# However, we have to be careful in doing so.
# Remember that the policy function, together with the exogenous processes and the endogenous unconditional distribution over the state space induce an endogenous law of motion for the conditional distribution over the state space.
# When we pin down the fractions $\omega$, we must be careful not to affect this law of motion.
# This is because we are in a rational expectations setting: endogenous law of motions known to the modelist are also known to the agents in the model.
# Those expectations do affect their policy functions.
#
# [Young (2010)](https://doi.org/10.1016/j.jedc.2008.11.010) suggests a simple way of forming $\omega$.
# The intution is simple: how big $\omega$ is depends on how far apart $k^*$ is from $k_q$.
# The farther away, the higher $\omega$.
# In particular, $\omega$ will be proportional (actually, equal) to the distance between $k^*$ and $k_q$ relative to the distance between $k_q$ and $k_{q+1}$.
# This works if the grid over which the problem arises is _linearly spaced_.
# ## The algorithm, in code
#
# The algorithm is relatively simple.
# Based on the explanation above (based on the Aiyagari model), we need three ingredients: the grid $\mathcal{K}$, the value (scalar) $k^*$ and the exogenous transition matrix.
# As we arbitrarily picked $k_i$ and $l_j$, we need to compute weights for every point on the state space grid.
#
# As we can imagine, we are going to build quite big sparse matrices.
# Hence, we can leverage the module `scipy.sparse`.
# Note one (big) limitation of `numpy`: **functions in Numpy are generally unaware of sparse matrices** and hence cannot operate efficiently ([Scipy even warns users](https://docs.scipy.org/doc/scipy/reference/sparse.html#usage-information) that using Numpy on sparse matrices may lead to entirely unpredictable results!).
# Hence, we should try to find functions in the `scipy.sparse` library, which are assured to make efficient use of sparse arrays.
# In the function `sparse_ergodic_distribution` we use the function [`scipy.sparse.linalg.spsolve`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.spsolve.html) to compute the stationary distribution implied by $\Pi$ by solving the system of equations $( I - \Pi' + A ) \pi = b$, where $A$ is a matrix of zeros except for the first row (containing all ones) and $b$ is a vector of zeros except for the first element (that is a one).
def sparse_ergodic_distribution(PiSparse, tol=1e-8):
n = PiSparse.shape[0]
A = sp.lil_matrix((n, n), dtype=float)
A[0,:] = np.ones((1, n))
I = sp.eye(n)
b = np.zeros(n, dtype=float)
b[0] += 1.0
pi = spla.spsolve(I - PiSparse.T + A, b)
return pi
# Next, we need to write a function that tries to find the index $q$ such that $k_q \leq k^* \leq k_{q+1}$.
# In doing this, we must be careful not to hit the boundaries of the grid $\mathcal{K}$.
# Additionally, given that we will find such index, we can also compute the weight $\omega$.
def locate(grid, value):
sGrid = grid.squeeze()
n = sGrid.size
if value >= np.max(sGrid):
q = n - 2 # -1 because 0-based counting, -1 because [omega, 1-omega]
omega = 0.0
elif value <= np.min(sGrid):
q = 0
omega = 1.0
else:
q = np.nonzero( value >= sGrid )[0][-1]
omega = 1 - ( value - sGrid[q] ) / ( sGrid[q+1] - sGrid[q] )
return q, omega
# Finally, we can move on to construct the (enormous) sparse matrix that regulates the transition probabilities taking into account that some agents are going to points not-on-grid.
# Here we follow the notation in Maffezzoli's slides, where `g` corresponds to a representation of the block-diagonal matrix $G$ that governs the transition between one unconditional distribution $\lambda_t$ to the next one $\lambda_{t+1}$.
def qMatrix(k, kOpt, Pi):
n, m = kOpt.shape
g = []
for j in range(m):
w = sp.lil_matrix((n, n), dtype=float)
for i in range(n):
q, omega = locate(k, kOpt[i, j])
w[i, q] = omega
w[i, q+1] = 1 - omega
g.append(w.tocsr())
I = sp.eye(n)
q = sp.kron(Pi, I).dot(sp.block_diag(g))
return q
# In the last TA session we solved the Aiyagari model.
# This gave back a policy function and a transition matrix for the exogenous process, among other things.
# To test the code above, we will borrow that policy function, without recomputing everything.
# We can do so with the `pickle` module, which essentially allows us to save files like `.mat`, `.RData` and `.dta` that contain Python objects.
with open('./aiyagari_polfun.pkl', mode='rb') as rick:
stuffFromAiyagari = pickle.load(rick)
l = stuffFromAiyagari['labor'].squeeze()
k = stuffFromAiyagari['capital'].squeeze()
Pi = stuffFromAiyagari['exoTrans']
k1 = stuffFromAiyagari['polFun'].T
# What we do now is simply test our binning algorithm.
# Given the policy function and the transition matrix for the exogeonous process, we have a sense of what the endogeonous distribution evolves over time.
# Adding an initial condition (hidden in `sparse_ergodic_distribution`) is sufficient to generate an ergodic distribution.
# The code we wrote above should yield the endogenous distribution of agents across states.
Q = qMatrix(k, k1, Pi)
ergoDist = sparse_ergodic_distribution(Q).reshape(k1.shape)
# As we can see, the matrix `Q` is quite sparse.
fig0, ax0 = plt.subplots(figsize=(12,12))
ax0.spy(Q.toarray())
# The dimensions of the matrix correspond to $\text{vec}(\lambda(k, l))$.
# In this case, the state space has two dimensions, one for the grid of capital and one for the grid of labor endowments.
# The matrix `Q` is right-stochastic (its rows sum to one) and the ergodic distribution associated to the Markov Chain with transition matrix `Q` is the ergodic distribution over the state space.
fig1, ax1 = plt.subplots(figsize=(12, 5))
ax1.plot(k, ergoDist[:, 0])
| code_examples/binning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tarea 1: Semana 1
#
# <img style="float: right; margin: 0px 0px 15px 15px;" src="https://www.python.org/static/community_logos/python-logo.png" width="200px" height="200px" />
#
# Cada clase que veamos tendrá una tarea asignada, la cual contendrá problemas varios que se pueden resolver con lo visto en clase, de manera que puedas practicar lo que acabas de aprender.
#
# En esta ocasión, dados los problemas que tuvimos, la primer tarea tendrá conceptos de las dos primeras clases.
#
# Para resolver la tarea, por favor cambiar el nombre del archivo a "Tarea1_ApellidoNombre.ipynb", sin acentos ni letras ñ (ejemplo: en mi caso, el archivo se llamaría "Tarea1_JimenezEsteban.ipynb"). Luego de haber cambiado el nombre, resolver cada uno de los puntos en los espacios provistos.
#
# Referencia:
# - https://www.kaggle.com/learn/python
# ___
# ## 1.
# Complete el código
# +
pi = 3.14159 # aproximadamente
diametro = 3
# Crear una variable llamada "radio" (sin las comillas), igual a la mitad del diámetro
# Imprimir el radio
# Crear una variable llamada "area" (sin las comillas), usando la formula para el area del circulo: pi por el radio al cuadrado
# Imprimir el area
# -
# ## 2.
# Añada código para intercambiar los valores de las variables `a` y `b`, de manera que `a` tome el valor actual de `b` y viceversa.
#
# Mucho cuidado, no se vale escribir directamente los valores. Es decir
#
# `>>> a = [3, 2, 1]`
#
# `>>> b = [1, 2, 3]`
#
# no es una solución válida.
# +
########### Código inicial. No tocar esta parte ######################
# El tipo de variables a continuación son listas. Estudiaremos este
# tipo de variables la próxima clase. Por ahora, basta con saber que
# son otro tipo de variables de Python, como los int, o float.
a = [1, 2, 3]
b = [3, 2, 1]
######################################################################
# Tu código va acá.
# -
# ## 3.
#
# ### a)
# Añadir paréntesis a la siguiente expresión de modo que el resultado sea 1
5 - 3 // 2
# ### b)
# Añadir paréntesis a la siguiente expresión de modo que el resultado sea 0
8 - 3 * 2 - 1 + 1
# ## 4.
#
# Juan, María y Alberto acordaron juntar todos los dulces que obtuvieran en Halloween y dividirlo en partes iguales para todos. Para que no haya ningún problema y su amistad se conserve, los dulces que sobren despues de dividir en 3 partes iguales serán tirados. Por ejemplo, si entre todos juntan 91 dulces, tomarán 30 cada uno y tirarán 1.
#
# Escribe una expresión aritmética para calcular cuántos dulces deben ser tirados dados los dulces que recojan cada uno.
# +
# Variables que representan el número de dulces juntados por <NAME> y Alberto
dulces_juan = 121
dulces_maria = 77
dulces_alberto = 109
# Escriba en el lado derecho una expresión aritmética de los dulces que deben ser tirados
dulces_sobrantes =
# -
# ## 5.
# Escriba el código para la siguiente función de acuerdo a la descripción en la documentación.
#
# **Asegúrate de probar tu función**
def redondear_a_dos_decimales(num):
"""
Devuelve el número dado redondeado a dos cifras decimales.
>>> redondear_a_dos_decimales(3.14159)
3.14
"""
# Reemplaza el código con tu solución
# ("pass" es una palabra clave que no hace nada. La usamos acá
# para que haga espacio, porque una vez que empezamos un bloque
# de código Python requiere que se escriba por lo menos una línea.)
pass
# ## 6.
# Aplicando la función `help()` sobre la función `round()`, dice que el parámetro de entrada `ndigits` puede ser negativo. Elija bien el valor del parámetro `ndigits` para que la siguiente celda imprima el número 2300.
print(round(2345.32, ndigits=))
# ## 7.
#
# En el ejercicio 4, presentamos tres amigos que compartían dulces. Como resultado, ellos tenían un total de dulces que debían repartir en partes iguales entre ellos. Los dulces restantes debían ser tirados. Por ejemplo, si entre todos juntaban 91 dulces, tomarían 30 cada uno y tiraían 1.
#
# Allí, escribiste un código para calcular cuántos dulces debían tirar, suponiendo que cada uno había juntado un número particular.
#
# Abajo presentamos una simple función que calcula el número de dulces a tirar para cualquier cantidad de dulces totales. Modificarla de manera que reciba opcionalmente un segundo argumento que represente el número de amigos entre los cuales se van a repartir los dulces. Si no se recibe un segundo argumento, por defecto se deben asumir 3 amigos, como antes.
#
# **Asegúrate de probar tu función**
def dulces_sobrantes(dulces_totales):
"""
Devuelve el número de dulces sobrantes que deben ser tirados luego
de distribuir el número de dulces_totales en partes iguales entre
los tres amigos.
>>> dulces_sobrantes(91)
1
"""
return dulces_totales % 3
# ## 8.
#
# Puede no ser divertido, pero leer y entender los mensajes de error que nos brinda Python es muy, MUY importante.
#
# Cada celda abajo contiene codigos con errores.
#
# Para cada celda:
# 1. Leer el código y pensar qué crees que pasa cuando corra.
# 2. Correr la celda y ver qué pasa.
# 3. Arregla el código, de modo que no arroje ningún error.
redondear_a_dos_decimales(9,9999)
x = -10
y = 5
# Cuál de las dos variables tiene el valor absoluto más pequeño?
abs_mas_pequeño = min(abs(x) abs(y))
abs_mas_pequeño
# +
def f(x):
y = abs(x)
return y
print(f(5))
# -
# ## 9.
#
# Muchos lenguajes de programación tienen la función [sign](https://en.wikipedia.org/wiki/Sign_function) disponible como parte de la librería estándar. Python no la tiene, ¡pero podemos definir nuestra propia función!
#
# En la celda siguiente, defina una función llamada `sign`, que tome un argumento numérico y devuelva -1 si el número es negativo, 1 si es positivo y 0 si es 0.
#
# **Asegúrate de probar tu función**
# Definir una función llamada sign con las especificaciones dadas
# ## 10.
#
# La función `es_negativo()` en la celda de abajo es correcta (devuelve `True` si el número dado es negativo y `False` de otro modo).
#
# Sin embargo, se usan muchas líneas de código para una tarea tan simple. Podemos reducir el número de líneas de código en esta función en un 75`%`, con exactamente el mismo resultado.
#
# Veamos si puedes obtener una función equivalente que use solo una línea de código. Escribir tu solución en la función `es_negativo_conciso()`.
#
# **Asegúrate de probar tu función**
# +
def es_negativo(num):
if num < 0:
return True
else:
return False
def es_negativo_conciso(number):
pass
# -
# ## 11.
#
# Las variables booleanas `ketchup`, `mostaza` y `cebolla` representan si un cliente quiere un ingrediente en particular en su hot dog. Queremos implementar varias funciones booleanas que correspondan a ciertas preguntas de si o no acerca del pedido del criente.
#
# Por ejemplo:
def sin_cebolla(ketchup, mostaza, cebolla):
"""
Devuelve True si el cliente no quiere cebolla, y False de lo contrario.
"""
return not cebolla
sin_cebolla(True, False, True), sin_cebolla(False, True, False)
# Para las siguientes funciones escriba el código para que la función haga lo que se dice en su documentación.
#
# **Asegúrate de probar tus funciones**
def todos_los_ingredientes(ketchup, mostaza, cebolla):
"""
Devuelve True si el cliente quiere todos los ingredientes, y False de lo contrario
"""
return ketchup and mostaza and cebolla
def sin_ingrediente(ketchup, mostaza, cebolla):
"""
Devuelve True si el cliente no quiere ningún ingrediente, y False de lo contrario.
"""
pass
def solo_un_aderezo(ketchup, mostaza, cebolla):
"""
Devuelve True si el cliente quiere ketchup o mostaza, pero no ambos.
"""
pass
# ## 12.
# Hemos visto que llamar la función `bool()` sobre un número entero devuelve `False` si el número es igual a 0 y `True` en otro caso. Qué pasa si llamamos `int()` sobre un booleano? Inténtalo en la siguiente celda.
# ¿Puedes usar esto para escribir una función que devuelva `True` si el cliente quiere uno y sólo un ingrediente y `False` de lo contrario?
#
# **Asegúrate de probar tu función**
def exactamente_un_ingrediente(ketchup, mostaza, cebolla):
"""
Devuelve True si el ciente quiere exactamente un ingrediente, False de lo contrario.
"""
pass
# ## 13.
#
# Escriba el cuerpo de la siguiente función de acuerdo a su documentación.
def segundo_elemento(L):
"""
Devuelve el segundo elemento de la lista dada. Si la lista no tiene
segundo elemento, devuelve None.
>>> segundo_elemento([10, 4, 9, 10])
4
>>> segundo_elemento([5])
"""
return L[1] if len(L)>=2 else None
def elemento_n(L, n):
return L[n-1] if len(L)>=n else None
# Prueba tu función:
# ## 14.
#
# Implemente el cuerpo de la siguiente función de acuerdo a su domuentación **utilizando únicamente listas y sus métodos: NO SE ACEPTA EL USO DE CICLOS!**
def cuantos_negativos(lista):
"""
Devuelve la cantidad de números negativos en la lista dada.
>>> cuantos_negativos([5, -1, -2, 0, 3])
2
"""
# **Ayuda: `help(list.sort)`, `help(list.index)`, `help(list.append)`**
#
# Prueba tu función:
# ## 15.
#
# La siguiente función no funciona correctamente de acuerdo a lo descrito en su documentación. A esto en programación se le llama un bug. Al correr la función no se marca ningún error, pero la función no hace lo que debería hacer.
#
# Identifica el bug de la siguiente función y arréglalo:
def es_chida(nums):
"""
Devuelve True si la lista dada es chida. Una lista chida es aquella que
contiene al menos un número divisible por 7.
>>> es_chida([1, 14, 3, 8])
True
"""
for num in nums:
if num % 7 == 0:
return True
else:
return False
# Muestra que tu función corregida funciona correctamente:
# ## 16.
#
# Algunos dicen que "los científicos de datos usan el 80% de su tiempo limpiando datos, y el 20% restante quejándose acerca de la limpieza de datos".
#
# Veamos si puedes escribir una función que nos ayude a limpiar datos correspondientes a códigos postales.
#
# Dado un string, la función debe devolver True si dicho string representa un código postal válido, de lo contrario devuelve False. Para nuestros propósitos, un código postal es válido si es un string que contiene exactamente **5 dígitos**.
#
# Ayuda: **help(str.isdigit)**
def cp_valido(cp):
"""
Devuelve True si el string dado es un código postal válido:
string de 5 dígitos.
>>> cp_valido('45017')
True
>>> cp_valido('450015')
False
>>> cp_valido('45a15')
False
"""
pass
# Prueba que tu función trabaje correctamente:
# <script>
# $(document).ready(function(){
# $('div.prompt').hide();
# $('div.back-to-top').hide();
# $('nav#menubar').hide();
# $('.breadcrumb').hide();
# $('.hidden-print').hide();
# });
# </script>
#
# <footer id="attribution" style="float:right; color:#808080; background:#fff;">
# Created with Jupyter by <NAME>.
# </footer>
| Semana1/Tarea1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## 3.5 The Lexical-Analyzer Generator Lex
# ### 3.5.1
#
# > Describe how to make the following modifications to the `Lex` program of Fig. 3.23:
# >
# > a\) Add the keyword `while`.
# ```
# %%
# while {return(WHILE);}
# %%
# ```
# > b\) Change the comparision operators to be the C operators of that kind.
# ```
# %%
# "==" {yylval = EQ; return(RELOP);}
# "!=" {yylval = NE; return(RELOP);}
# %%
# ```
# > c\) Allow the underscore \(\_\) as an additional letter.
# ```
# id ({letter}|_)({letter}|{digit}|_)*
# %%
# %%
# ```
# > d\) Add a new pattern with token `STRING`.
# ```
# string "([^\\"]|\\\\|\\")*"
# %%
# {string} {yylval = (int) installString(); return(STRING);}
# %%
# int installString() {
# }
# ```
# ### 3.5.2
#
# > Write a `Lex` program that copies a file, replacing each non-empty sequence of white space by a single blank.
# ```
# %%
# [ \t\n]+ { putchar(' '); }
# . { ECHO; }
# %%
#
# int yywrap() {
# return 1;
# }
#
# int main() {
# yylex();
# return 0;
# }
# ```
# ### 3.5.3
#
# > Write a `Lex` program that copies a C program, replacing each instance of the keyword `float` to `double`.
# ```
# %%
# float { printf("double"); }
# . { ECHO; }
# %%
#
# int yywrap() {
# return 1;
# }
#
# int main() {
# yylex();
# return 0;
# }
# ```
# ### 3.5.4
#
# > Write a `Lex` program that converts a file to "Pig latin."
# ```
# letter [a-zA-Z]
# vowel [aeiouAEIOU]
# consonant [^{vowel}]
# vword {vowel}{letter}*
# cword {letter}+
#
# %%
#
# {vword} { printf("%s", yytext); printf("ay"); }
# {cword} { printf("%s", yytext + 1); putchar(yytext[0]); printf("ay"); }
# . { ECHO; }
#
# %%
#
# int yywrap() {
# return 1;
# }
#
# int main() {
# yylex();
# return 0;
# }
# ```
# ### 3.5.5
#
# > In SQL, keywords and identifiers are case-insensitive. Write a `Lex` program that recognizes the keywords `SELECT`, `FROM` and `WHERE`, and token `ID`.
# ```
# delim [ \t\n]
# ws {delim}+
# select [sS][eE][lL][eE][cC][tT]
# from [fF][rR][oO][mM]
# where [wW][hH][eE][rR][eE]
#
# letter [a-zA-Z]
# digit [0-9]
# id {letter}({letter}|{digit})*
#
# %%
#
# {ws} { }
# {select} { return(SELECT); }
# {from} { return(FROM); }
# {where} { return(WHERE); }
# {id} { str2lower(yytext); yylval = (int) installID(); return(ID); }
#
# %%
#
# void str2lower(char* text) {
# while (*text) {
# *text = toupper(*text);
# ++text;
# }
# }
#
# int installID() {
# }
# ```
| 03/3.5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: DESI development
# language: python
# name: desi-development
# ---
# # Applying depth cuts to CCD files for DR8
# I (<NAME>) have not tested these instructions, I'm merely relating something <NAME> wrote in an email so that it's recorded for posterity.
# ## Making depth cuts for DR8
# The script for making depth cuts is `legacyanalysis/depth-cut.py`, and the `legacyanalysis/depthcut.sh` script can be used to set environment variables and give it args. That script will need to be edited with appropriate input and output paths. This gets run on each brick, producing a `ccds-BRICK.fits` file for each brick. Running it on each brick can be done using qdo. For more information on using qdo to process bricks see, e.g., [<NAME>'s cookbook](https://github.com/legacysurvey/legacypipe/blob/master/doc/cookbook.md) or [Adam Myers' tutorial](https://github.com/legacysurvey/legacypipe/blob/master/doc/nb/running-dr8-test-bricks-with-docker.ipynb).
#
# After that finishes for each brick (this takes some time....), run the `legacyanalysis/depth-cut-dr8.py` script (again, modifying paths appropriately). The other "secret" step is converting the `survey-ccds.fits` files into the KD-tree-ified files using the `legacypipe/create_kdtrees.py` script.
#
# Further details, taken from the header of [the depth cut script](https://github.com/legacysurvey/legacypipe/blob/master/py/legacyanalysis/depth-cut-dr8.py#L10):
#
# ```
# Start with CCDs tables / zeropoints files.
# Create survey-ccd-*.kd.fits files via
# python legacypipe/create-kdtrees.py
#
# Create $CSCRATCH/dr8new containing:
# calib
# images
# survey-bricks.fits.gz
# survey-ccds-decam-g.kd.fits
# survey-ccds-decam-r.kd.fits
# survey-ccds-decam-z.kd.fits
#
# Create "depthcut" qdo queue:
# LEGACY_SURVEY_DIR=$CSCRATCH/dr8new python -u legacypipe/queue-calibs.py --region dr8-decam > bricks-decam.txt
#
# (hand-edit off the first few chatter lines)
#
# qdo load depthcut bricks-decam.txt
#
# Run "depth-cut.py" on each brick:
# QDO_BATCH_PROFILE=cori-shifter qdo launch -v depthcut 32 --cores_per_worker 1 --walltime=30:00 --batchqueue=debug --keep_env --batchopts "--image=docker:dstndstn/legacypipe:intel" --script "/src/legacypipe/py/legacyanalysis/depthcut.sh"
# ```
# Then run [the depth cut script](https://github.com/legacysurvey/legacypipe/blob/master/py/legacyanalysis/depth-cut-dr8.py#L10) itself.
#
#
#
#
#
# ## Making depth cuts in the context of a full processing run
# To summarize all of the steps needed to create calibration files and to use those files to process bricks:
#
# - `legacyzpts/legacy_zeropoints.py` takes filename to `*-legacypipe.fits` files.
# - `legacyzpts/legacy_zeropoints_merge.py` takes a set of `*-legacypipe.fits` files to a `survey-ccds.fits` file.
# - `legacypipe/create_kdtrees.py` takes `survey-ccds.fits` to `survey-ccds-init.kd.fits`.
# - build an initial `LEGACY_SURVEY_DIR` containing that `survey-ccds-init.kd.fits`.
# - `legacypipe/queue-calibs.py` creates a list of bricks (eg to feed qdo).
# - `legacyanalysis/depthcut.sh` takes bricks to `ccds-BRICK.fits` files.
# - `legacyanalysis/depth-cut-dr8.py` takes all the `ccds-BRICK.fits` files to a `survey-ccds-depthcut.fits` file.
# - `legacypipe/create_kdtrees.py` takes that `survey-ccds-depthcut.fits` file to `survey-ccds-depthcut.kd.fits`.
# - create a final `LEGACY_SURVEY_DIR` containing that new `survey-ccds-depthcut.kd.fits`.
# - `legacypipe/runbrick.py` on all bricks (see [this notebook](https://github.com/legacysurvey/legacypipe/blob/master/doc/nb/running-dr8-test-bricks-with-docker.ipynb)).
| doc/nb/making-dr8-depth-cuts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
total=0
for itervar in [3,41,12,9,74,15]:
total = total+itervar
print ("total:", total)
var = 'bannana'
dir (var)
# +
def pyramid_volume (base_length, base_wdith,pyramid_height):
base_area = base_length*base_width
volume = base_area*pyramid_height*1/3
return volume
print('Volume for 4.5, 2.1, 3.0 is:', pyramid_volume(4.5, 2.1, 3.0))
# -
new_word = upper (var)
# +
air_temperature = 36.4158102
print ("%.1f"%)
# -
'\n \n'.isspace()
name = input().strip(" ")
for alphabets in name:
count "o"
print(name)
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.9 64-bit (''elections-analysis-DVAw9u_J-py3.9'': venv)'
# language: python
# name: python3
# ---
# # NLP model tests
# ---
#
# Here I test a few NLP models and their relevance for the project.
# ## Setup
# ### Import libraries
import os
import sys
from ipywidgets import interact
import plotly.io as pio
from transformers import pipeline
os.chdir("..")
sys.path.append("utils/")
from data_utils import DATA_DIR, load_yaml_file, load_markdown_file
from nlp_utils import get_sentences, get_sentiment, get_hate_speech
from viz_utils import plot_sentiment, plot_hate_speech
# ### Parameters
# Set the plotly style:
pio.templates.default = "plotly_white"
# Get the party names from the data:
data_name = None
party_data = None
party_names = None
selected_party = None
data_names = os.listdir(DATA_DIR)
data_names = [name for name in data_names if name != ".DS_Store"]
@interact
def set_data(data=data_names):
global data_name
global party_data
global party_names
data_name = data
party_data = load_yaml_file(os.path.join(DATA_DIR, data_name, "parties_data.yml"))
party_names = list(party_data.keys())
# Select a party:
@interact
def select_party(party=party_names):
global selected_party
selected_party = party
# ### Load data
program_txt = load_markdown_file(os.path.join(DATA_DIR, data_name, "programs", f"{selected_party}.md"))
# sample of the text
program_txt[:100]
sentences = get_sentences(program_txt)
sentences = [s.replace("*", "").replace("#", "") for s in sentences]
sentences[:10]
# ## Test models
# ### Sentiment analysis
# #### Apply to all sentences
sentiment_df = get_sentiment(sentences)
sentiment_df
# #### Plot it
sentiment_df[sentiment_df.label == "negativo"].sentence.sample(10).values
sentiment_df[sentiment_df.label == "positivo"].sentence.sample(10).values
plot_sentiment(sentiment_df)
# ### Hate speech
# #### Apply to all sentences
hate_df = get_hate_speech(sentences)
hate_df
# #### Plot it
hate_df[hate_df.label == "ódio"].sentence.sample(10).values
hate_df[hate_df.label == "neutro"].sentence.sample(10).values
hate_df.label.value_counts()
plot_hate_speech(hate_df)
# ### Summarization
# #### Load the model
sum_model_path = "csebuetnlp/mT5_multilingual_XLSum"
sum_task = pipeline("text2text-generation", model=sum_model_path, tokenizer=sum_model_path)
# #### Apply to the whole program
program_summary = sum_task(program_txt.replace("*", "").replace("#", ""))
program_summary
# #### Apply to chapters
| notebooks/02_nlp_model_tests.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Transformer, Self-Attention и моделирование языка
# +
# Если Вы запускаете ноутбук на colab,
# выполните следующие строчки, чтобы подгрузить библиотеку dlnlputils:
# # !git clone https://github.com/Samsung-IT-Academy/stepik-dl-nlp.git
# import sys; sys.path.append('/content/stepik-dl-nlp')
# +
# %load_ext autoreload
# %autoreload 2
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
import youtokentome as yttm
import dlnlputils
from dlnlputils.data import tokenize_corpus, build_vocabulary, \
save_texts_to_file, LanguageModelDataset, load_war_and_piece_chunks, \
GreedyGenerator, BeamGenerator
from dlnlputils.pipeline import train_eval_loop, init_random_seed
from dlnlputils.base import get_params_number
init_random_seed()
# -
# ## Загрузка текстов и разбиение на обучающую и тестовую подвыборки
# Если Вы запускаете ноутбук на colab, добавьте в начало пути /content/stepik-dl-nlp
all_chunks = load_war_and_piece_chunks('./datasets/war_and_peace.txt')
len(all_chunks)
print(all_chunks[10])
# +
np.random.shuffle(all_chunks)
TRAIN_SPLIT = int(len(all_chunks) * 0.7)
train_texts = all_chunks[:TRAIN_SPLIT]
test_texts = all_chunks[TRAIN_SPLIT:]
print('Размер обучающей выборки', len(train_texts))
print('Размер валидационной выборки', len(test_texts))
# -
# ## Токенизация корпуса с помощью BPE
#
# BPE - Byte Pair Encoding
#
# YouTokenToMe - быстрая реализация BPE
# Если Вы запускаете ноутбук на colab, добавьте в начало пути /content/stepik-dl-nlp
BPE_MODEL_FILENAME = './models/war_and_peace_bpe.yttm'
# Если Вы запускаете ноутбук на colab, добавьте в начало пути /content/stepik-dl-nlp
TRAIN_TEXTS_FILENAME = './datasets/war_and_peace_bpe_train.txt'
save_texts_to_file(train_texts, TRAIN_TEXTS_FILENAME)
yttm.BPE.train(data=TRAIN_TEXTS_FILENAME, vocab_size=1000, model=BPE_MODEL_FILENAME);
tokenizer = yttm.BPE(BPE_MODEL_FILENAME)
print(' '.join(tokenizer.vocab()))
print(tokenizer.encode(train_texts[:1]))
train_token_ids = tokenizer.encode(train_texts, bos=True, eos=True)
test_token_ids = tokenizer.encode(test_texts, bos=True, eos=True)
plt.hist([len(sent) for sent in train_token_ids], bins=30)
plt.title('Распределение длин фрагментов в токенах')
plt.yscale('log');
token_counts = np.bincount([token_id for text in train_token_ids for token_id in text])
plt.hist(token_counts, bins=100)
plt.title('Распределение количества упоминаний токенов')
plt.yscale('log');
unknown_subwords_in_test = sum(1 for text in test_token_ids for token_id in text if token_id == 1)
print('Количество случаев с неизвестными n-граммами символов в валидационной выборке',
unknown_subwords_in_test)
# ## Подготовка датасетов для PyTorch
# +
CHUNK_LENGTH = 80
train_dataset = LanguageModelDataset(train_token_ids,
chunk_length=CHUNK_LENGTH)
test_dataset = LanguageModelDataset(test_token_ids,
chunk_length=CHUNK_LENGTH)
# -
train_dataset[0]
tokenizer.decode(list(train_dataset[0]))
# ## Общие классы и функции
# ### Маска зависимостей
# +
def make_target_dependency_mask(length):
full_mask = torch.ones(length, length)
ignore_mask = torch.tril(full_mask) < 1
full_mask.masked_fill_(ignore_mask, float('-inf'))
full_mask.masked_fill_(~ignore_mask, 0)
return full_mask
make_target_dependency_mask(10)
# -
# ### Кодирование позиции
def make_positional_encoding(max_length, embedding_size):
time = np.pi * torch.arange(0, max_length).float()
freq_dividers = torch.arange(1, embedding_size // 2 + 1).float()
inputs = time[:, None] / freq_dividers[None, :]
result = torch.zeros(max_length, embedding_size)
result[:, 0::2] = torch.sin(inputs)
result[:, 1::2] = torch.cos(inputs)
return result
sample_pos_codes = make_positional_encoding(30, 30)
plt.plot(sample_pos_codes[:, ::3].numpy());
plt.gcf().set_size_inches((15, 5))
# ### Основной класс - языковая модель
class LanguageModel(nn.Module):
def __init__(self, vocab_size, embedding_size, backbone, emb_dropout=0.0):
super().__init__()
self.embedding_size = embedding_size
self.embeddings = nn.Embedding(vocab_size, embedding_size, padding_idx=0)
self.emb_dropout = nn.Dropout(emb_dropout)
self.backbone = backbone
self.out = nn.Linear(embedding_size, vocab_size)
def forward(self, seed_token_ids):
"""
seed_token_ids - BatchSize x MaxInLen
"""
batch_size, max_in_length = seed_token_ids.shape
seed_padding_mask = seed_token_ids == 0
dependency_mask = make_target_dependency_mask(max_in_length) \
.to(seed_token_ids.device)
seed_embs = self.embeddings(seed_token_ids) # BatchSize x MaxInLen x EmbSize
pos_codes = make_positional_encoding(max_in_length,
self.embedding_size).unsqueeze(0).to(seed_embs.device)
seed_embs = seed_embs + pos_codes
seed_embs = self.emb_dropout(seed_embs)
# BatchSize x TargetLen x EmbSize
target_features = seed_embs
target_features = self.backbone(seed_embs,
mask=dependency_mask,
src_key_padding_mask=seed_padding_mask)
logits = self.out(target_features) # BatchSize x TargetLen x VocabSize
return logits
# ### Утилиты для обучения - функция потерь и расписание изменения длины градиентного шага
# +
def lm_cross_entropy(pred, target):
"""
pred - BatchSize x TargetLen x VocabSize
target - BatchSize x TargetLen
"""
pred_flat = pred.view(-1, pred.shape[-1]) # BatchSize*TargetLen x VocabSize
target_flat = target.view(-1) # BatchSize*TargetLen
return F.cross_entropy(pred_flat, target_flat, ignore_index=0)
def lr_scheduler(optimizer):
return torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
patience=20,
factor=0.5,
verbose=True)
# -
# ## Реализация Transformer из PyTorch 1.2
class BatchFirstTransformerEncoder(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.impl = nn.TransformerEncoder(*args, **kwargs)
self.initialize_weights()
def forward(self, src, *args, **kwargs):
src = src.transpose(0, 1).contiguous() # MaxInLen x BatchSize x EmbSize
result = self.impl(src, *args, **kwargs) # TargetLen x BatchSize x EmbSize
result = result.transpose(0, 1).contiguous() # BatchSize x TargetLen x EmbSize
return result
def initialize_weights(self):
for param in self.impl.parameters():
if param.dim() > 1:
nn.init.xavier_uniform_(param)
torch_transf_model = LanguageModel(tokenizer.vocab_size(),
256,
BatchFirstTransformerEncoder(
nn.TransformerEncoderLayer(
d_model=256,
nhead=16,
dim_feedforward=512,
dropout=0.1),
num_layers=3),
emb_dropout=0.1)
print('Количество параметров', get_params_number(torch_transf_model))
(best_val_loss,
best_torch_transf_model) = train_eval_loop(torch_transf_model,
train_dataset,
test_dataset,
lm_cross_entropy,
lr=2e-3,
epoch_n=2000,
batch_size=8,
device='cuda',
early_stopping_patience=50,
max_batches_per_epoch_train=1000,
max_batches_per_epoch_val=1000,
lr_scheduler_ctor=lr_scheduler)
# Если Вы запускаете ноутбук на colab, добавьте в начало пути /content/stepik-dl-nlp
torch.save(best_torch_transf_model.state_dict(), './models/war_and_peace_torch_transf_best.pth')
# Если Вы запускаете ноутбук на colab, добавьте в начало пути /content/stepik-dl-nlp
torch_transf_model.load_state_dict(torch.load('./models/war_and_peace_torch_transf_best.pth'))
# ## Генерация текста с помощью языковой модели
# ### Жадная генерация
greedy_generator = GreedyGenerator(torch_transf_model, tokenizer)
# +
# %%time
print(greedy_generator('сказала княжна, оглядывая Бона'))
# -
print(greedy_generator('смеялась княжна, оглядывая Наполе'))
print(greedy_generator('сказала княжна, оглядывая Кутуз'))
print(greedy_generator('сказал Кутузов, оглядывая Наполеона'))
# ### Генерация с помощью лучевого поиска - Beam Search
beam_generator = BeamGenerator(torch_transf_model, tokenizer)
# +
# %%time
beam_gen_variants = beam_generator('сказала княжна, оглядывая Наполе',
beamsize=5,
return_hypotheses_n=5)
for score, pred_txt in beam_gen_variants:
print('****')
print(score)
print(pred_txt)
print()
# +
# %%time
beam_gen_variants = beam_generator('сказала княжна, оглядывая Наполе',
beamsize=20,
return_hypotheses_n=20)
for score, pred_txt in beam_gen_variants:
print('****')
print(score)
print(pred_txt)
print()
# +
# %%time
beam_gen_variants = beam_generator('сказала княжна, оглядывая Наполе',
beamsize=100,
return_hypotheses_n=20)
for score, pred_txt in beam_gen_variants:
print('****')
print(score)
print(pred_txt)
print()
# -
# ## Собственная реализация MultiHeadAttention
def my_multihead_attention(queries, keys, values,
keys_padding_mask, dependency_mask,
is_training,
weights_dropout):
"""
queries - BatchSize x ValuesLen x HeadN x KeySize
keys - BatchSize x KeysLen x HeadN x KeySize
values - BatchSize x KeysLen x HeadN x ValueSize
keys_padding_mask - BatchSize x KeysLen
dependency_mask - ValuesLen x KeysLen
is_training - bool
weights_dropout - float
result - tuple of two:
- BatchSize x ValuesLen x HeadN x ValueSize - resulting features
- BatchSize x ValuesLen x KeysLen x HeadN - attention map
"""
# BatchSize x ValuesLen x KeysLen x HeadN
relevances = torch.einsum('bvhs,bkhs->bvkh', (queries, keys))
# замаскировать элементы, выходящие за длины последовательностей ключей
padding_mask_expanded = keys_padding_mask[:, None, :, None].expand_as(relevances)
relevances.masked_fill_(padding_mask_expanded, float('-inf'))
# замаскировать пары <выходная позиция, входная позиция>
relevances = relevances + dependency_mask[None, :, :, None].expand_as(relevances)
normed_rels = F.softmax(relevances, dim=2)
normed_rels = F.dropout(normed_rels, weights_dropout, is_training)
# BatchSize x ValuesLen x KeysLen x HeadN x 1
normed_rels_expanded = normed_rels.unsqueeze(-1)
# BatchSize x 1 x KeysLen x HeadN x ValueSize
values_expanded = values.unsqueeze(1)
# BatchSize x ValuesLen x KeysLen x HeadN x ValueSize
weighted_values = normed_rels_expanded * values_expanded
result = weighted_values.sum(2) # BatchSize x ValuesLen x HeadN x ValueSize
return result, normed_rels
# ## Self-Attention - это Attention, в котором ключи, значения и запросы вычисляются из элементов одной и той же последовательности
class MyMultiheadSelfAttention(nn.Module):
def __init__(self, model_size, n_heads, dropout=0):
super().__init__()
assert model_size % n_heads == 0, 'Размерность модели должна делиться нацело на количество голов'
self.n_heads = n_heads
self.queries_proj = nn.Linear(model_size, model_size)
self.keys_proj = nn.Linear(model_size, model_size)
self.values_proj = nn.Linear(model_size, model_size)
self.dropout = dropout
self.last_attention_map = None
def forward(self, sequence, padding_mask, dependency_mask):
"""
sequence - BatchSize x Len x ModelSize
padding_mask - BatchSize x Len
dependency_mask - Len x Len
result - BatchSize x Len x ModelSize
"""
batch_size, max_len, model_size = sequence.shape
queries_flat = self.queries_proj(sequence) # BatchSize x Len x ModelSize
queries = queries_flat.view(batch_size, max_len, self.n_heads, -1)
keys_flat = self.keys_proj(sequence) # BatchSize x Len x ModelSize
keys = keys_flat.view(batch_size, max_len, self.n_heads, -1)
values_flat = self.values_proj(sequence) # BatchSize x Len x ModelSize
values = values_flat.view(batch_size, max_len, self.n_heads, -1)
# BatchSize x Len x HeadsN x ValueSize
result, att_map = my_multihead_attention(queries, keys, values,
padding_mask, dependency_mask,
self.training, self.dropout)
result_flat = result.view(batch_size, max_len, model_size)
self.last_attention_map = att_map.detach()
return result_flat
# ## Один слой трансформера - Self-Attention, Feed-Forward, skip-connections, LayerNorm
class MyTransformerEncoderLayer(nn.Module):
def __init__(self, model_size, n_heads, dim_feedforward, dropout):
super().__init__()
self.self_attention = MyMultiheadSelfAttention(model_size,
n_heads,
dropout=dropout)
self.first_dropout = nn.Dropout(dropout)
self.first_norm = nn.LayerNorm(model_size)
self.feedforward = nn.Sequential(
nn.Linear(model_size, dim_feedforward),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(dim_feedforward, model_size),
nn.Dropout(dropout)
)
self.second_norm = nn.LayerNorm(model_size)
def forward(self, sequence, padding_mask, dependency_mask):
att_features = self.self_attention(sequence, padding_mask, dependency_mask)
sequence = sequence + self.first_dropout(att_features)
sequence = self.first_norm(sequence)
sequence = sequence + self.feedforward(sequence)
sequence = self.second_norm(sequence)
return sequence
# ## Энкодер Трансформера - стопка из нескольких слоёв
class MyTransformerEncoder(nn.Module):
def __init__(self, n_layers, **layer_kwargs):
super().__init__()
self.layers = nn.ModuleList([
MyTransformerEncoderLayer(**layer_kwargs)
for _ in range(n_layers)
])
self.initialize_weights()
def forward(self, sequence, mask, src_key_padding_mask):
for layer in self.layers:
sequence = layer(sequence, src_key_padding_mask, mask)
return sequence
def initialize_weights(self):
for param in self.parameters():
if param.dim() > 1:
nn.init.xavier_uniform_(param)
# ## Попробуем обучить языковую модель с нашим Трансформером
my_transf_model = LanguageModel(tokenizer.vocab_size(),
256,
MyTransformerEncoder(
n_layers=3,
model_size=256,
n_heads=16,
dim_feedforward=512,
dropout=0.1),
emb_dropout=0.1)
print('Количество параметров', get_params_number(my_transf_model))
(best_val_loss,
best_my_transf_model) = train_eval_loop(my_transf_model,
train_dataset,
test_dataset,
lm_cross_entropy,
lr=2e-3,
epoch_n=2000,
batch_size=8,
device='cuda',
early_stopping_patience=50,
max_batches_per_epoch_train=1000,
max_batches_per_epoch_val=1000,
lr_scheduler_ctor=lr_scheduler)
# Если Вы запускаете ноутбук на colab, добавьте в начало пути /content/stepik-dl-nlp
torch.save(best_my_transf_model.state_dict(), './models/war_and_peace_my_transf_best.pth')
# Если Вы запускаете ноутбук на colab, добавьте в начало пути /content/stepik-dl-nlp
my_transf_model.load_state_dict(torch.load('./models/war_and_peace_my_transf_best.pth'))
# ## Наша реализация - жадная генерация
my_greedy_generator = GreedyGenerator(my_transf_model, tokenizer)
my_greedy_generator('сказала княжна, оглядывая Андре')
# ## Визуализация карт внимания
def plot_attention_maps(model, input_string, tokenizer, device='cuda', max_heads=2, figsize=(16, 10)):
device = torch.device(device)
token_ids = tokenizer.encode([input_string])[0]
token_strs = [tokenizer.id_to_subword(i) for i in token_ids]
in_len = len(token_ids)
ticks = np.arange(0, in_len)
model.to(device)
model.eval()
in_batch = torch.tensor(token_ids).unsqueeze(0).to(device)
model(in_batch)
for module in model.modules():
if isinstance(module, MyMultiheadSelfAttention):
cur_last_attention_map = module.last_attention_map[0].cpu().numpy()
n_heads = cur_last_attention_map.shape[-1]
n_heads_to_vis = min(n_heads, max_heads)
fig, axes = plt.subplots(1, n_heads_to_vis)
fig.set_size_inches(figsize)
for head_i in range(n_heads_to_vis):
ax = axes[head_i]
ax.imshow(cur_last_attention_map[..., head_i])
ax.set_yticks(ticks)
ax.set_ylim(bottom=in_len - 0.5, top=-0.5)
ax.set_yticklabels(token_strs)
ax.set_xticks(ticks)
ax.set_xticklabels(token_strs)
plot_attention_maps(my_transf_model, 'сказал Кутузов, оглядывая Бонапарта', tokenizer)
| task5_text_transformer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="Awq8sMaIFHJ1"
# Install Model maker
# !pip install -q tflite-model-maker &> /dev/null
# + id="2fEAqoLLF6O9"
# Imports and check that we are using TF2.x
import numpy as np
import os
from tflite_model_maker import configs
from tflite_model_maker import ExportFormat
from tflite_model_maker import model_spec
from tflite_model_maker import text_classifier
from tflite_model_maker import TextClassifierDataLoader
import tensorflow as tf
assert tf.__version__.startswith('2')
tf.get_logger().setLevel('ERROR')
# + id="lsuDZvjgREsS"
# Download the dataset as a CSV and store as data_file
data_file = tf.keras.utils.get_file(fname='comment-spam.csv', origin='https://storage.googleapis.com/laurencemoroney-blog.appspot.com/lmblog_comments.csv', extract=False)
# + id="pbew43TbG9HQ"
# Use a model spec from model maker. Options are 'mobilebert_classifier', 'bert_classifier' and 'average_word_vec'
# The first 2 use the BERT model, which is accurate, but larger and slower to train
# Average Word Vec is kinda like transfer learning where there are pre-trained word weights
# and dictionaries
spec = model_spec.get('average_word_vec')
spec.num_words = 2000
spec.seq_len = 20
spec.wordvec_dim = 7
# + id="-WdQmzTKHFVn"
# Load the CSV using DataLoader.from_csv to make the training_data
data = TextClassifierDataLoader.from_csv(
filename=data_file,
text_column='commenttext',
label_column='spam',
model_spec=spec,
delimiter=',',
shuffle=True,
is_training=True)
train_data, test_data = data.split(0.9)
# + id="qThBoIIyG_Du"
# Build the model
model = text_classifier.create(train_data, model_spec=spec, epochs=50, validation_data=test_data)
# + id="5QXEwKYxdPzs"
loss, accuracy = model.evaluate(train_data)
# + id="In-1-rzW-_9b"
# This will export to TFLite format with the model only.
# if you see a .json file in this directory, it is NOT the JSON model for TFJS
# See below for how to generate that.
# Please note that if you run this cell to create the tflite model then the
# export to TFJS will fail. You'll need to rerun the model training first
model.export(export_dir='/mm_spam')
# If you want the labels and the vocab, for example for iOS, you can use this
model.export(export_dir='/mm_spam/', export_format=[ExportFormat.LABEL, ExportFormat.VOCAB])
# You can find your files in colab by clicking the 'folder' tab to the left of
# this code window, and then navigating 'up' a directory to find the root
# directory listing -- and from there you should see /mm_spam/
# + id="sW5vfrLHTDM8"
# Use this section for export to TFJS
# Please note that if you run the above cell to create the tflite model then the
# export to TFJS will fail. You'll need to rerun the model training first
model.export(export_dir="/mm_js/", export_format=[ExportFormat.TFJS, ExportFormat.LABEL, ExportFormat.VOCAB])
# + id="3UNNrCWbKbxh"
# Optional extra
# You can use this cell to export details for projector.tensorflow.org
# Where you can explore the embeddings that were learned for this dataset
embeddings = model.model.layers[0]
weights = embeddings.get_weights()[0]
tokenizer = model.model_spec.vocab
import io
out_v = io.open('vecs.tsv', 'w', encoding='utf-8')
out_m = io.open('meta.tsv', 'w', encoding='utf-8')
for word in tokenizer:
#word = tokenizer.decode([word_num])
value = tokenizer[word]
embeddings = weights[value]
out_m.write(word + "\n")
out_v.write('\t'.join([str(x) for x in embeddings]) + "\n")
out_v.close()
out_m.close()
try:
from google.colab import files
except ImportError:
pass
else:
files.download('vecs.tsv')
files.download('meta.tsv')
| TextClassificationOnMobile/colabs/SpamCommentsModelMaker.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 2D ring resonator
#
# See on [github](https://github.com/flexcompute/tidy3d-notebooks/blob/main/RingResonator.ipynb), run on [colab](https://colab.research.google.com/github/flexcompute/tidy3d-notebooks/blob/main/RingResonator.ipynb), or just follow along with the output below.
#
# This is a simple example of using Tidy3D to simulate a common photonics application.
#
# We perform a 2D simulation of a ring resonator side coupled to a dielectric waveguide.
#
# <img src="img/ring.png" alt="diagram" width="300"/>
#
# With a center wavelength of 500 nm and 10 nm resolution, this is a challenging FDTD problem because of the large simulation size. The simulation contains 1 million grid points to model the entire domain and 290,000 time steps to capture the resonance of the ring.
#
# With Tidy3D, we perform each simulation in just a few minutes.
# +
# get the most recent version of tidy3d
# !pip install -q --upgrade tidy3d
# make sure notebook plots inline
# %matplotlib inline
# standard python imports
import numpy as np
from numpy import random
import matplotlib.pyplot as plt
# tidy3D import
import tidy3d as td
from tidy3d import web
# -
# ## Initial setup
#
# Our ring resonator will include a ring centered at (0,0) with a waveguide just above the ring spanning the x direction.
#
# ```
# (waveguide)
# in -> ========== -> out
# 0
# (resonator)
# ```
# +
# resolution
dl = 0.012
# define geometry
wg_width = 0.25
couple_width = 0.05
ring_radius = 3.5
ring_wg_width = 0.25
wg_spacing = 2.0
buffer = 2.0
# compute quantities based on geometry parameters
x_span = 2*wg_spacing + 2*ring_radius + 2*buffer
y_span = 2*ring_radius + 2*ring_wg_width + wg_width + couple_width + 2*buffer
wg_insert_x = ring_radius + wg_spacing
wg_center_y = ring_radius + ring_wg_width/2. + couple_width + wg_width/2.
# set number of PMLs on each side
npml = 15
# -
# define pulse parameters
pol = 'TE' # 'TE' or 'TM'. TE means waveguide excited Ey, TM means waveguide excited Ez
lambda_central = 0.5
f_center = td.C_0 /lambda_central
f_width = f_center / 6
grids_per_wavelength = lambda_central / dl
run_time_norm = 1e-13 # run time for normalization run without ring
run_time = 5e-12 # run time for simulation with ring
# Define materials. [(docs)](https://simulation.cloud/docs/html/api.html#material)
n_bg = 1.0
n_solid = 1.5
background = td.Medium(epsilon=n_bg**2)
solid = td.Medium(epsilon=n_solid**2)
# Define structures. [(docs)](https://simulation.cloud/docs/html/api.html#structure)
#
# +
# background of entire domain (set explicitly as a box)
background_box = td.Box(
center=[0, 0, 0],
size=[td.inf, td.inf, td.inf],
material=background,
name='background')
# waveguide
waveguide = td.Box(
center=[0, wg_center_y, 0],
size=[td.inf, wg_width, td.inf],
material=solid,
name='waveguide')
# outside ring
outer_ring = td.Cylinder(
center=[0,0,0],
axis='z',
radius=ring_radius+ring_wg_width/2.0,
height=td.inf,
material=solid,
name='outer_ring')
# inside ring fill
inner_ring = td.Cylinder(
center=[0,0,0],
axis='z',
radius=ring_radius-ring_wg_width/2.0,
height=td.inf,
material=background,
name='inner_ring')
# -
# Define modal source. [(docs)](https://simulation.cloud/docs/html/api.html#source)
source = td.ModeSource(
td.GaussianPulse(f_center, f_width, phase=0),
center=[-wg_insert_x, wg_center_y, 0],
size=[0, 2, td.inf],
direction='forward',
amplitude=1.0,
name='modal_source')
# Define monitors. [(docs)](https://simulation.cloud/docs/html/api.html#monitor)
# +
# monitor steady state fields at central frequency over whole domain
field_monitor = td.FreqMonitor(
center=[0, 0, 0],
size=[x_span, y_span, 0],
freqs=f_center,
name='full_domain_fields')
# monitor the time series at a point in the center of the output waveguide
time_monitor = td.TimeMonitor(
center=[wg_insert_x, wg_center_y, 0],
size=[0, 0, 0],
name='time_series')
# -
# Define simulation. [(docs)](https://simulation.cloud/docs/html/api.html#simulation)
#
# Here, we turn off the automatic shutoff factor by setting it to 0. This is because we will compute the spectrum by a fourier transform of the time response, and we would like to run the normalization simulation and the simulation with ring to exactly the same `run_time`.
# +
# create normalization simulation (no ring)
sim0 = td.Simulation(
size=[x_span, y_span, 1*dl],
mesh_step=[dl, dl, dl],
structures=[background_box, waveguide],
sources=[source],
monitors=[field_monitor, time_monitor],
run_time = run_time_norm,
pml_layers=[npml, npml, 0])
# create simulation (with ring)
sim = td.Simulation(
size=[x_span, y_span, 1*dl],
mesh_step=[dl, dl, dl],
structures=[background_box, waveguide, outer_ring, inner_ring],
sources=[source],
monitors=[field_monitor, time_monitor],
run_time = run_time,
pml_layers=[npml, npml, 0])
# -
# ----
# Visualize structure, source, and modes. [(docs)](https://simulation.cloud/docs/html/api.html#plotting-tools)
# plot the two simulations
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
sim0.viz_eps_2D(normal='z', position=0.0, ax=ax1)
sim.viz_eps_2D(normal='z', position=0.0, ax=ax2)
plt.show()
# visualize time series of source
ax1, ax2 = sim.viz_source(source)
ax1.set_xlim((0, 0.1e-12)) # note the pulse extends far beyond this time, adjust lims to inspect
plt.show()
# Visualize the first two modes of the mode source
sim.compute_modes(source, Nmodes=2)
sim.viz_modes(source);
# From the above plots, we see that
#
# `mode_index=0` corresponds to exciting 0-th order TM mode (E=Ez) and
#
# `mode_index=1` corresponds to exciting 0-th order TE mode (E=Ey).
#
# We can therefore switch the mode index accordingly based on our polarization.
# +
if pol == 'TM':
mode_index = 0
elif pol == 'TE':
mode_index = 1
else:
raise ValueError('pol must be TM or TE')
# set the mode index of modal source in each sim
sim0.set_mode(source, mode_ind=mode_index)
sim.set_mode(source, mode_ind=mode_index)
# -
# ----
# ## Run Simulation
# Run simulations on our server. [(docs)](https://simulation.cloud/docs/html/api.html#web-api)
def run_tidy3d(sim, task_name=''):
# Convenience function to run a simulation and download results
# create a project, upload to our server to run
project = web.new_project(sim.export(), task_name=task_name)
task_id = project['taskId']
web.monitor_project(task_id)
# download the results and load into the original simulation
print('downloading results...')
web.download_results(task_id, target_folder='output')
print('done\n')
sim.load_results('output/monitor_data.hdf5')
print('\n')
# use function above to run simulation with and without ring
run_tidy3d(sim0, task_name='normalization')
run_tidy3d(sim, task_name='with_ring')
# which components of the electric field to plot, depending on polarization
if pol == 'TM':
comps = ('z',)
elif pol == 'TE':
comps = ('x', 'y')
else:
raise ValueError('pol must be TM or TE')
# visualize normalization run
fig, axes = plt.subplots(2, len(comps), figsize=(14, 10))
for axe, val in zip(axes.reshape(2, len(comps)), ('re', 'abs')):
for ax, comp in zip(axe, comps):
im = sim0.viz_field_2D(field_monitor, sample_ind=0, eps_alpha=0.15, comp=comp, val=val, cbar=True, ax=ax)
# visualize run with ring
fig, axes = plt.subplots(2, len(comps), figsize=(14, 10))
for axe, val in zip(axes.reshape(2, len(comps)), ('re', 'abs')):
for ax, comp in zip(axe, comps):
im = sim.viz_field_2D(field_monitor, sample_ind=0, eps_alpha=0.15, comp=comp, val=val, cbar=True, ax=ax)
# ## Analyze Spectrum
# Analyze transmission spectrum using fourier transform. [(docs)](https://simulation.cloud/docs/html/api.html#miscellaneous)
# +
# Get data from the TimeMonitor
tdata0 = sim0.data(time_monitor)
tdata = sim.data(time_monitor)
tmesh0 = tdata0["tmesh"]
tmesh = tdata["tmesh"]
Nt = tmesh.size
dt = tmesh[1] - tmesh[0]
# select output fields as Ez if TM or Ey if TE
field_index = 2 if pol == 'TM' else 1
E_out0 = tdata0['E'][field_index, 0, 0, 0, :]
E_out = tdata['E'][field_index, 0, 0, 0, :]
# frequencies to measure output spectrum at
freqs = td.C_0 / np.linspace(0.4, 0.6, 1000)
# fourier transform output time series to get spectrum at freqs
E_out0_f = td.dft_spectrum(E_out0, dt, freqs)
E_out_f = td.dft_spectrum(E_out, dt, freqs)
# -
# Plot results.
# +
# plot time series with ring
fig, ax = plt.subplots(1, figsize=(8, 4), tight_layout=True)
ax.plot(tmesh, np.abs(E_out))
ax.set_xlabel("Time [s]")
ax.set_ylabel("Electric field [a.u.]");
ax.set_title("STRUCTURE $E_{out}(t)$")
ax.set_ylim((1e-4, 1e3))
ax.set_yscale('log')
ax.set_xlim((tmesh[0], tmesh[-1]));
# +
fig, ax = plt.subplots(2, 2, figsize=(12, 8), tight_layout=True)
# plot spectrum of normalization (input)
ax[0, 0].plot(freqs, np.abs(E_out0_f))
ax[0, 0].set_xlabel("Frequency [Hz]")
ax[0, 0].set_ylabel("Electric field [a.u.]");
ax[0, 0].set_title("NORMALIZE Spectrum");
# plot transmission spectrum with ring
ax[0, 1].plot(freqs, np.abs(E_out_f))
ax[0, 1].set_xlabel("Frequency [Hz]")
ax[0, 1].set_ylabel("Electric field [a.u.]");
ax[0, 1].set_title("STRUCTURE Spectrum");
# plot spectrum ratio |E_ring(f)| / |E_norm(f)|
ax[1, 0].plot(freqs, np.abs(E_out_f) / np.abs(E_out0_f))
ax[1, 0].set_xlabel("Frequency [Hz]")
ax[1, 0].set_ylabel("enhancement");
ax[1, 0].set_title("Structure / Normalize Spectrum");
# plot square of spectrum ratio ( |E_ring(f)| / |E_norm(f)| )^2
ax[1, 1].plot(freqs, (np.abs(E_out_f) / np.abs(E_out0_f))**2)
ax[1, 1].set_xlabel("Frequency [Hz]")
ax[1, 1].set_ylabel("enhancement^2");
ax[1, 1].set_title("|Structure / Normalize Spectrum|^2");
# +
# add your analysis here!
| RingResonator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Our Mission ##
#
# You recently used Naive Bayes to classify spam in this [dataset](https://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection). In this notebook, we will expand on the previous analysis by using a few of the new techniques you saw throughout this lesson.
#
#
# > In order to get caught back up to speed with what was done in the previous notebook, run the cell below
# +
# Import our libraries
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
# Read in our dataset
df = pd.read_table('smsspamcollection/SMSSpamCollection',
sep='\t',
header=None,
names=['label', 'sms_message'])
# Fix our response value
df['label'] = df.label.map({'ham':0, 'spam':1})
# Split our dataset into training and testing data
X_train, X_test, y_train, y_test = train_test_split(df['sms_message'],
df['label'],
random_state=1)
# Instantiate the CountVectorizer method
count_vector = CountVectorizer()
# Fit the training data and then return the matrix
training_data = count_vector.fit_transform(X_train)
# Transform testing data and return the matrix. Note we are not fitting the testing data into the CountVectorizer()
testing_data = count_vector.transform(X_test)
# Instantiate our model
naive_bayes = MultinomialNB()
# Fit our model to the training data
naive_bayes.fit(training_data, y_train)
# Predict on the test data
predictions = naive_bayes.predict(testing_data)
# Score our model
print('Accuracy score: ', format(accuracy_score(y_test, predictions)))
print('Precision score: ', format(precision_score(y_test, predictions)))
print('Recall score: ', format(recall_score(y_test, predictions)))
print('F1 score: ', format(f1_score(y_test, predictions)))
# -
# ### Turns Out...
#
# It turns out that our naive bayes model actually does a pretty good job. However, let's take a look at a few additional models to see if we can't improve anyway.
#
# Specifically in this notebook, we will take a look at the following techniques:
#
# * [BaggingClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingClassifier.html#sklearn.ensemble.BaggingClassifier)
# * [RandomForestClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html#sklearn.ensemble.RandomForestClassifier)
# * [AdaBoostClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostClassifier.html#sklearn.ensemble.AdaBoostClassifier)
#
# Another really useful guide for ensemble methods can be found [in the documentation here](http://scikit-learn.org/stable/modules/ensemble.html).
#
# These ensemble methods use a combination of techniques you have seen throughout this lesson:
#
# * **Bootstrap the data** passed through a learner (bagging).
# * **Subset the features** used for a learner (combined with bagging signifies the two random components of random forests).
# * **Ensemble learners** together in a way that allows those that perform best in certain areas to create the largest impact (boosting).
#
#
# In this notebook, let's get some practice with these methods, which will also help you get comfortable with the process used for performing supervised machine learning in python in general.
#
# Since you cleaned and vectorized the text in the previous notebook, this notebook can be focused on the fun part - the machine learning part.
#
# ### This Process Looks Familiar...
#
# In general, there is a five step process that can be used each type you want to use a supervised learning method (which you actually used above):
#
# 1. **Import** the model.
# 2. **Instantiate** the model with the hyperparameters of interest.
# 3. **Fit** the model to the training data.
# 4. **Predict** on the test data.
# 5. **Score** the model by comparing the predictions to the actual values.
#
# Follow the steps through this notebook to perform these steps using each of the ensemble methods: **BaggingClassifier**, **RandomForestClassifier**, and **AdaBoostClassifier**.
#
# > **Step 1**: First use the documentation to `import` all three of the models.
# Import the Bagging, RandomForest, and AdaBoost Classifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
# > **Step 2:** Now that you have imported each of the classifiers, `instantiate` each with the hyperparameters specified in each comment. In the upcoming lessons, you will see how we can automate the process to finding the best hyperparameters. For now, let's get comfortable with the process and our new algorithms.
# +
# Instantiate a BaggingClassifier with:
# 200 weak learners (n_estimators) and everything else as default values
bc = BaggingClassifier(n_estimators = 200)
# Instantiate a RandomForestClassifier with:
# 200 weak learners (n_estimators) and everything else as default values
rf = RandomForestClassifier(n_estimators = 200)
# Instantiate an a AdaBoostClassifier with:
# With 300 weak learners (n_estimators) and a learning_rate of 0.2
ada = AdaBoostClassifier(n_estimators = 300)
# -
# > **Step 3:** Now that you have instantiated each of your models, `fit` them using the **training_data** and **y_train**. This may take a bit of time, you are fitting 700 weak learners after all!
# +
# Fit your BaggingClassifier to the training data
bc.fit(training_data, y_train)
# Fit your RandomForestClassifier to the training data
rf.fit(training_data, y_train)
# Fit your AdaBoostClassifier to the training data
ada.fit(training_data, y_train)
# -
# > **Step 4:** Now that you have fit each of your models, you will use each to `predict` on the **testing_data**.
# +
# Predict using BaggingClassifier on the test data
bc_pred = bc.predict(testing_data)
# Predict using RandomForestClassifier on the test data
rf_pred = rf.predict(testing_data)
# Predict using AdaBoostClassifier on the test data
ada_pred = ada.predict(testing_data)
# -
# > **Step 5:** Now that you have made your predictions, compare your predictions to the actual values using the function below for each of your models - this will give you the `score` for how well each of your models is performing. It might also be useful to show the naive bayes model again here.
def print_metrics(y_true, preds, model_name=None):
'''
INPUT:
y_true - the y values that are actually true in the dataset (numpy array or pandas series)
preds - the predictions for those values from some model (numpy array or pandas series)
model_name - (str - optional) a name associated with the model if you would like to add it to the print statements
OUTPUT:
None - prints the accuracy, precision, recall, and F1 score
'''
if model_name == None:
print('Accuracy score: ', format(accuracy_score(y_true, preds)))
print('Precision score: ', format(precision_score(y_true, preds)))
print('Recall score: ', format(recall_score(y_true, preds)))
print('F1 score: ', format(f1_score(y_true, preds)))
print('\n\n')
else:
print('Accuracy score for ' + model_name + ' :' , format(accuracy_score(y_true, preds)))
print('Precision score ' + model_name + ' :', format(precision_score(y_true, preds)))
print('Recall score ' + model_name + ' :', format(recall_score(y_true, preds)))
print('F1 score ' + model_name + ' :', format(f1_score(y_true, preds)))
print('\n\n')
# +
# Print Bagging scores
print_metrics(y_test, bc_pred, model_name='bagging')
# Print Random Forest scores
print_metrics(y_test, rf_pred, model_name='random forest')
# Print AdaBoost scores
print_metrics(y_test, ada_pred, model_name='adaboost')
# Naive Bayes Classifier scores
print_metrics(y_test, predictions, model_name='naive bayes')
# -
# ### Recap
#
# Now you have seen the whole process for a few ensemble models!
#
# 1. **Import** the model.
# 2. **Instantiate** the model with the hyperparameters of interest.
# 3. **Fit** the model to the training data.
# 4. **Predict** on the test data.
# 5. **Score** the model by comparing the predictions to the actual values.
#
# And that's it. This is a very common process for performing machine learning.
#
#
# ### But, Wait...
#
# You might be asking -
#
# * What do these metrics mean?
#
# * How do I optimize to get the best model?
#
# * There are so many hyperparameters to each of these models, how do I figure out what the best values are for each?
#
# **This is exactly what the last two lessons of this course on supervised learning are all about.**
#
# **Notice, you can obtain a solution to this notebook by clicking the orange icon in the top left!**
#
| Supervised_Learning/Spam_&_Ensembles.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import bibtexparser
from collections import defaultdict
from lib.constants import *
from lib.util import *
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from collections import Counter
def print_dict(dictionary):
for key, value in dictionary.items():
print(f"{key} : {value}")
with open('../map.bib') as bibtex_file:
bib_db = bibtexparser.load(bibtex_file)
bib_db.entries = [i for i in bib_db.entries if i.get('problem',None)]
Counter([i['year'] for i in bib_db.entries])
# +
### vals = []
for entry in reversed(sorted(bib_db.entries,key= lambda x: int(x['num_citations']))):
if len(set(['textual']).intersection(set(entry['information'].split(',')))) == 1:
print('------------------------')
print_dict(entry)
vals.append(len(entry['information']))
np.median(vals)
| scripts/.ipynb_checkpoints/data_exploration-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pykat import finesse
from pykat.commands import *
import numpy as np
import matplotlib.pyplot as plt
import scipy
from IPython import display
import matplotlib.colors as colors
colors_list = list(colors._colors_full_map.values())
# %matplotlib inline
pykat.init_pykat_plotting(dpi=200)
# -
basecode = """
#input laser
l L0 1 0 n0
const l 9 #cavity length
const fin 30 #change finesse from here
const pi 3.14159265359 #pi
const tr 0.1 #T of ETM, ITM
const L 0 #L of ETM, ITM
#HG modes
tem L0 0 0 1 0
tem L0 1 0 1 0
tem L0 2 0 1 0
tem L0 3 0 1 0
tem L0 4 0 1 0
tem L0 5 0 1 0
tem L0 6 0 1 0
tem L0 7 0 1 0
tem L0 8 0 1 0
tem L0 9 0 1 0
tem L0 10 0 1 0
#cavity
s s0 1 n0 nITM1
m1 ITM $tr $L 0 nITM1 nITM2
s scav $l nITM2 nETM1
m1 ETM $tr $L 0 nETM1 nETM2
#mirror Roc
attr ITM Rc 0
attr ETM Rcx 10
attr ETM Rcy 10
cav FP ITM nITM2 ETM nETM1
#plot g vs order
xaxis ETM Rcx lin 10 700 8000
func g = 1-($l/$x1)
put ETM Rcy $x1
#ad detectors
ad order0 0 0 0 nETM2
ad order1 1 0 0 nETM2
ad order2 2 0 0 nETM2
ad order3 3 0 0 nETM2
ad order4 4 0 0 nETM2
ad order5 5 0 0 nETM2
ad order6 6 0 0 nETM2
ad order7 7 0 0 nETM2
ad order8 8 0 0 nETM2
ad order9 9 0 0 nETM2
ad order10 10 0 0 nETM2
cp FP x finesse
maxtem 10
phase 2
#noplot Rc2
"""
# +
basekat = finesse.kat()
basekat.verbose = 1
basekat.parse(basecode)
out = basekat.run()
out.info()
#out.plot()
# +
y=[]
x= out['g']
colors = ['b','g','r','c','m','y','k','teal','violet','pink','olive']
plt.figure(figsize=(8,6))
#append all output detectors in an array
for i in range(0,10,1):
y.append(out['order'+str(i+1)]/out['order0'])
#plot all outputs
for k in range(0,10,1):
plt.semilogy(x,y[k],antialiased=False,label='order'+str(k),c=colors[k])
#label and other stuff
plt.grid(linewidth=1)
plt.legend(["order1","order2","order3","order4","order5","order6","order7","order8","order9","order10"],loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("g (1-L/R) \n Finesse = "+str(out['FP_x_finesse'][1]))
plt.ylabel("HG modes intensity(rel to fund. mode)",verticalalignment='center')
# -
| Fabry Perot Cavity/HG_resonance_v1.1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
class NeuronCell(object):
def __init__(self, ID):
# 自身ID
self.id = ID
# 单数据最大值
self.dataMax = 100
# 数据 id: input data
self.data = {
'in': {},
'out': {}
}
# 权值 id: weight
self.weights = {
'in': {},
'out': {}
}
# 聚合数据大小
self.totalData = 0
# 激发阙值
self.threshold = 15
# 最大输入输出端口
self.portMax = {
'in': 1000,
'out': 1000
}
# 连接数
self.connected = {
'in': 0,
'out': 0
}
# 目标神经元
self.targetNeurons = None
def setMaxConnection(self, maxIn=1000, maxOut=1000):
self.portMax['in'], self.portMax['out'] = maxIn, maxOut
# conn = True or False, port = 'in' or 'out'
def conn(self, conn, port, ID):
if self.connected[port] >= self.portMax[port]:
# 连接失败
return False
if conn:
self.data[port][ID] = 0
self.weights[port][ID] = 0.5
self.sortByID(port)
self.connected[port] += 1
else:
del self.data[port][ID]
del self.weights[port][ID]
self.sortByID(port)
self.connected[port] -= 1
self.targetNeurons = set(self.data['out'].keys())
# 连接成功
return True
def inputData(self, data, sourceID):
self.data['in'][sourceID] = data
def prepareForFire(self):
inputs = np.array(list(self.data['in'].values()), dtype=np.float16)
weightsIn = np.array(list(self.weights['in'].values()), dtype=np.float16)
self.totalData = np.sum(inputs * weightsIn) / len(self.data['in'])
for Id in self.data['in'].keys():
self.data['in'][Id] = 0
def setTotalData(self, data):
self.totalData = data
def fire(self):
if self.totalData > self.threshold:
weightsOut = np.array(self.weights['out'], dtype=np.float16)
dataOut = self.leakyRelu(weightsOut * self.totalData).tolist()
IDs = list(self.data['out'].keys())
self.data['out'] = dict(zip(IDs, dataOut))
self.totalData = 0
# 返回激发成功
return True
# 激发失败
return False
def transToTarget(self, neurons):
for Id, data in self.data['out']:
neurons[Id].inputData(data, self.id)
self.data['out'][Id] = 0
return self.targetNeurons
def sortById(self, port):
if port == 'all':
self.data['in'] = dict(sorted(self.data['in'].items()))
self.data['out'] = dict(sorted(self.data['out'].items()))
self.weights['in'] = dict(sorted(self.weights['in'].items()))
self.weights['out'] = dict(sorted(self.weights['out'].items()))
else:
self.data[port] = dict(sorted(self.data[port].items()))
self.weights[port] = dict(sorted(self.weights[port].items()))
def leakyRelu(self, x, p=0.01):
return np.maximum(x, x*p)
def updateWeight(self, port, neuronID, times):
self.weights[port][neuronID] *= times
self.weights[port][neuronID] = min(self.weights[port][neuronID], 1)
# stdp学习方法,两个神经元放电时间 <= 5ms,则连接权重增加70%, 否则减少20%
def stdp(self, fire, totalNeurons):
# 1.7^(1/2) = 1.3 0.8^(1/2) = 0.89
weightTimes = {
'in': (1.25, 0.95),
'out': (1.4, 0.85)
}
index = 0 if fire else 1
for port, weights in self.weights.items():
for ID, weight in weights.items():
# 如果本神经元fire为True,则增加权重,否则减少
self.updateWeight(port, ID, weightTimes[port][index])
if port == 'in':
totalNeurons[ID].updateWeight('out', self.id, weightTimes['out'][index])
def getParams(self):
params = {
'weights': self.weights,
'threshold': self.threshold,
'connected': self.connected
}
return params
class Brain(object):
def __init__(self):
# 神经元数量
self.neuronNum = 0
# 邻接矩阵,行 是否连接 列
# 例:self.Adjaceny['from'][1][2] = 1, 代表id:1 from id:2 True
# self.Adjaceny['to'][1][2] = 1, 代表id:1 to id:2 True
self.Adjacency = {
'from': None,
'to': None
}
# 所有神经元, id: neuron
self.totalNeurons = {}
# 神经元分区
self.sections = {
'input':dict(),
'think':dict(),
'output':dict()
}
# 准备发射神经元ID(梯队)
self.readyNeurons = {
0: set(),
1: set()
}
# 增加神经元
def createNeuron(self, number):
newNum = self.neuronNum + number
for ID in range(self.neuronNum, newNum):
self.totalNeurons[ID] = NeuronCell(ID)
temp = self.Adjaceny
self.Adjacency = {
'from': np.zeros((newNum, newNum)),
'to': np.zeros((newNum, newNum))
}
self.Adjaceny['from'][0:self.neuronNum][0:self.neuronNum] = temp['from']
self.Adjaceny['to'][0:self.neuronNum][0:self.neuronNum] = temp['to']
self.neuronNum = newNum
# 删除神经元
def deleteNeuron(self, IDs):
for ID in IDs:
try:
del self.totalNeurons[ID]
except:
continue
# 连接神经元
def connectNeuron(self, sourceID, targetID):
source.conn(True, 'out', targetID)
target.conn(True, 'in', sourceID)
self.Adjacency['from'][targetID][sourceId] = 1
self.Adjacency['to'][sourceID][targetId] = 1
# 断开连接
def disconnect(self, sourceID, targetID):
source.conn(False, 'out', targetID)
target.conn(False, 'in', sourceID)
self.Adjacency['from'][targetID][sourceId] = 0
self.Adjacency['to'][sourceID][targetId] = 0
# 分配初始神经元
def assignNeuron(self, section, index, neuronIds):
for ID in neuronIds:
self.sections[section][index].add(ID)
# 全连接指定神经元群
def fullConn(self, neuronIDs):
for ID1 in neurons:
for ID2 in neurons:
self.connectNeuron(ID1, ID2)
if ID1 != ID2:
self.connectNeuron(ID2, ID1)
# 单向全连接
def oneWayFullConn(self, source, target):
for ID_From in source:
for ID_To in target:
self.connectNeuron(ID_From, ID_To)
# 增加群组
def addGroup(self, section, ID, neuronIDs):
self.sections[section][ID] = set(neuronIDs)
# 初始化神经元连接
def initialConn(self, **kwargs):
# MAP 0
# CREATE SECTIONS, ASSIGN NEURONS
# input 0: vision, 1: hearing
self.addGroup('input', 0, kwargs['inputVision'])
self.addGroup('input', 1, kwargs['inputHearing'])
# output 0: draw(vision), 1: sound(hearing)
self.addGroup('input', 0, kwargs['outputVision'])
self.addGroup('input', 1, kwargs['outputHearing'])
# think 0:Brain0
# 'receiving0': part neurons of Brain0 that receiving the vision input and spread to whole think network
# 'receiving1': part neurons of Brain0 that receiving the hearing input and spread to whole think network
# 'send0': part neurons of Brain0 that send the vision data and spread to drawing output
# 'send1': part neurons of Brain0 that send the hearing data and spread to sound output
self.addGroup('think', 0, kwargs['think0'])
self.addGroup('think', 'receiving0', kwargs['think0'][0:len(kwargs['inputVision'])])
self.addGroup('think', 'receiving1', kwargs['think0'][0:len(kwargs['inputHearing'])])
self.addGroup('think', 'send0', kwargs['think0'][0:len(kwargs['outputVision'])])
self.addGroup('think', 'send1', kwargs['think0'][0:len(kwargs['outputHearing'])])
# CONNECT
# Input ==> Brain0, One way
self.oneWayFullConn(self.sections['input'][0], self.sections['think']['receiving0'])
self.oneWayFullConn(self.sections['input'][1], self.sections['think']['receiving1'])
# full connect brain0
self.fullConn(self.sections['think'][0])
# Brain0 ==> Output, One way
self.oneWayFullConn(self.sections['think']['send0'], self.sections['output'][0])
self.oneWayFullConn(self.sections['think']['send1'], self.sections['output'][1])
# 想
def think(self, steps=1000):
for i in range(steps):
for id_ready in self.readyNeurons[0]:
neuron = self.totalNeurons[id_ready]
neuron.prepareForFire()
if neuron.fire():
self.readyNeurons[1] = self.readyNeurons[1] | neuron.transToTarget(self.totalNerons)
self.readyNeurons[0] = self.readyNeurons[1]
# 输入数据(数据已加工,(原数据 * 传输数据最大限制)/最大原数据)
def inputs(self, data, sectID):
neuronIDs = self.sections['input'][sectID]
if len(data) > len(neuronIDs):
print('data length more than number of input neurons')
return False
for index, ID in enumerate(neurons):
self.totalNeurons[ID].setTotalData(data[index])
self.readyNeurons[0].add(ID)
return True
# 输出
def outputs(self, sectID, maxData, minData):
neuronIDs = self.sections['output'][sectID]
outputs = np.empty(len(neuronIDs))
for i, ID in enumerate(neuronIDs):
outputs[i] = self.totalNeurons[ID].totalData
outputs[outputs>maxData] = maxData
outputs[outputs<minData] = minData
return outputs
# 更新权重
def updateWeights(self):
pass
# 保存
def save(self):
params = {
'neuronNum': self.neuronNum,
'Adjacency': self.Adjacency,
'sections': self.sections,
'neuronParams': {}
}
for ID, neuron in self.totalNeurons:
params['neuronParams'][ID] = self.totalNeurons[ID].getParams()
| Brain01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt;
import numpy as np;
import pandas as pd;
# %matplotlib inline
# -
x = np.linspace(0,10,20)
y = np.random.randint(0,50,20)
plt.plot(x,y)
np.sort(y)
y = np.sort(y)
plt.plot(x,y)
plt.plot(x,y,'r')
plt.plot(x,y,color='#ffb546')
plt.plot(x,y,color='#262626')
plt.xlabel('X-axis')
plt.ylabel('Y-axis')
plt.title('Learning Matplotlib')
plt.show()
plt.plot(x,y,'ro-',markersize=5)
plt.plot(x,y2,'b*--')
y2 = y*x
plt.subplot(1,2,1)
plt.plot(x,y,'ro-',markersize=5)
plt.subplot(1,2,2)
plt.plot(x,y2,'b*--')
plt.show()
# ### Matlab vs Matplotlib
fig = plt.figure()
axes = fig.add_axes([0.1,0.1,1,1])
axes.plot(x,y,'r')
axes.set_xlabel('xLabel')
axes.set_ylabel('yLabel')
axes.set_title('Title')
# +
fig = plt.figure()
ax1 = fig.add_axes([0,0,1,1])
ax2 = fig.add_axes([0.1,0.6,0.4,0.3])
ax1.plot(x,y,'r')
ax1.set_xlabel('xLabel')
ax1.set_ylabel('yLabel')
ax1.set_title('Title')
ax2.plot(x,y2,'b')
ax2.set_xlabel('xLabel')
ax2.set_ylabel('yLabel')
ax2.set_title('Title')
# -
fig,ax = plt.subplots(1,2)
ax[0].plot(x,y,'b')
ax[1].plot(x,y2,'r')
fig,ax = plt.subplots(figsize=(8,4),dpi=100)
ax.plot(x,y,'r')
fig,ax = plt.subplots()
ax.plot(x,y,'bH--')
plt.scatter(x,y)
plt.hist(y)
data = [np.random.normal(0,3.3,100) for std in range(0,3)]
data
plt.boxplot(data,vert=True, patch_artist=True)
import seaborn as sns;
sns.boxplot(data,orient='h')
fig,ax = plt.subplots(1,2, figsize=(10,4))
ax[0].plot(x,y,x,y2)
ax[1].plot(x,np.exp(x))
ax[1].set_yscale('log')
fig.tight_layout()
| Matplotlib - Crash Course/Matplotlib Crash Course.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Implementing Adaline Model
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
# # Loading Iris Data set
column = ['sepal length', 'sepal width', 'petal length', 'petal width', 'class']
df = pd.read_csv('iris.data', names = column, header = None)
df.info()
# For easiness of using data we will select 2 features, one is sepal length and one is petal length and 2 classes only
X = df.iloc[:100,[0,2]].values
y = df.iloc[:100,-1].values
np.unique(y)
y = np.where(y == 'Iris-setosa',1,-1)
# # Model
# Adaline is a improvement on perceptron, it uses activation function to update weight rather than the threshold function, Adaline improves over unit function in a way that it uses cost function which is differentiable and optimizable because the cost function use is convex.
# +
class Adaline:
def __init__(self, epoch = 50, learning_rate = 0.001, random_state = 42):
self.epoch = epoch
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self,X,y):
random = np.random.RandomState(self.random_state)
self.w_ = random.normal(loc = 0.0, scale = 0.001, size = 1 + X.shape[1])
self.costs_ = [] #it will store cost function for every epoch
for _ in range(self.epoch):
net_input = self.net_input(X)
output = self.activation(net_input) #in that case our activation function will be an identity function
errors = (y - output)
self.w_[1:] += self.learning_rate * X.T.dot(errors)
self.w_[0] += self.learning_rate * errors.sum()
cost = (errors ** 2).sum() / 2.0
self.costs_.append(cost)
return self
def net_input(self,X):
return np.dot(X,self.w_[1:]) + self.w_[0]
def predict(self,X):
return np.where(self.activation(self.net_input(X)) >= 0.0, 1, -1)
def activation(self,X):
return X #identity function f(x) = x
# -
# # Training our model
# we will use two learning rate 0.01, and 0.0001, learning rate finding is quite experimental.
# +
fig, ax = plt.subplots(nrows = 1, ncols = 2 , figsize = (10,4))
ada1 = Adaline(learning_rate=0.01, epoch = 10).fit(X,y)
ax[0].plot(range(1,len(ada1.costs_)+1), np.log10(ada1.costs_), marker = 'o')
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('log(Sum of square Errors)')
ax[0].set_title('Adaline - Learning rate 0.01')
ada2 = Adaline(learning_rate = 0.0001, epoch = 10).fit(X,y)
ax[1].plot(range(1,len(ada2.costs_)+1), np.log10(ada2.costs_), marker = 'o')
ax[1].set_xlabel('Epoch')
ax[1].set_ylabel('log(sum of square Errors)')
ax[1].set_title('Adaline - Learning rate 0.0001')
# -
# you can observe that in learning_rate 0.01 our errors become large, that is because we overshoot our global minimum values that's why learning_rate should be carefully choosen
# # Decision Plot
# +
def decision_plot(X, y, classifier, resolution = 0.02):
color = ('red','green','blue','sky','violet','pink')
markers = ('^','v','s','o','*')
cmap = ListedColormap(color[:len(np.unique(y))])
x1_min, x1_max = X[:,0].min() - 1, X[:,0].max() + 1
x2_min, x2_max = X[:,1].min() - 1, X[:,1].max() + 1
xx1, xx2 = np.meshgrid(
np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution)
)
Z = classifier.predict(np.array([xx1.ravel(),xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, cmap = cmap, alpha = 0.7)
for ix , cl in enumerate(np.unique(y)):
plt.scatter(x = X[y==cl,0], y = X[y==cl,1], label = cl, edgecolor = 'black', marker = markers[ix], color = color[ix])
plt.legend(loc = 'upper left')
# +
ax = plt.subplot(1,2,1)
plt.figure(ax,figsize=(10,5))
decision_plot(X,y,ada1)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.title('Adaline - Learning rate 0.01')
plt.subplot(1,2,2)
decision_plot(X,y,ada2)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.title('Adaline - Learning rate 0.0001')
# +
# plt.figure?
# -
| .ipynb_checkpoints/Adaline-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: TensorFlow-GPU
# language: python
# name: tf-gpu
# ---
# # Classifier for Inception Score
#
# by <NAME> and <NAME>
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
# # Data preprocessing
# +
mnist = tf.keras.datasets.mnist.load_data()
(X_train, Y_train), (X_test_validation, Y_test_validation) = mnist
Y_train = tf.keras.utils.to_categorical(Y_train)
Y_test_validation = tf.keras.utils.to_categorical(Y_test_validation)
# Dividing the second dataset into test and validation sets
X_validation = X_test_validation[len(X_test_validation)//2:]
X_test = X_test_validation[:len(X_test_validation)//2]
Y_validation = Y_test_validation[len(Y_test_validation)//2:]
Y_test = Y_test_validation[:len(Y_test_validation)//2]
# -
def normalize(images):
return (images - 127.5) / 127.5
X_train = normalize(X_train)
X_validation = normalize(X_validation)
X_test = normalize(X_test)
image = X_train[0]
IMAGE_WIDTH = image.shape[0]
IMAGE_HEIGHT = image.shape[1]
BATCH_SIZE = 256
train = tf.data.Dataset.from_tensor_slices(tuple((X_train, Y_train))).shuffle(len(X_train)).batch(BATCH_SIZE)
# # Activation Functions
# The code is from tensorflow.contrib.layers.maxout
import tensorflow as tf
def maxout(inputs, num_units, axis=-1):
'''
inputs: Tensor input
num_units: The num of unit keeped after amxout
axis: The dimension max op performed
scope: Optional scope for variable_scope
Note: This is a slightly modified version. Replaced some unused API functions
'''
shape = inputs.get_shape().as_list()
num_channels = shape[axis]
if num_channels % num_units:
raise ValueError('number of features({}) is not '
'a multiple of num_units({})'.format(
num_channels, num_units))
shape[axis] = -1
shape += [num_channels // num_units]
# Dealing with batches with arbitrary sizes
for i in range(len(shape)): # This is used to handle the case where None is included in the shape
if shape[i] is None:
shape[i] = tf.shape(inputs)[i]
outputs = tf.reduce_max( tf.reshape(inputs, shape), -1)
return outputs
# # Layer Functions
# +
def denseRelu(inputs, weights, bias, leaky_relu_alpha = 0.2):
return tf.nn.leaky_relu(tf.nn.bias_add(tf.matmul(inputs, weights), bias), alpha=leaky_relu_alpha)
def denseMaxout(inputs, weights, bias, num_of_units=2, dropout_rate=0.5):
z = tf.nn.bias_add(tf.matmul(inputs, weights), bias)
z_dropout = tf.nn.dropout(z, rate=dropout_rate)
return maxout(z_dropout, num_of_units)
# -
# # Parameter Initialization
# +
initializer = tf.initializers.glorot_normal()
bias_initializer = tf.initializers.zeros()
def get_biases(n_units, name):
return tf.Variable(bias_initializer(n_units, dtype=tf.float64), name = name, trainable = True, dtype=tf.float64)
def get_weights(shape, name):
return tf.Variable(initializer(shape, dtype=tf.float64), name = name, trainable = True, dtype=tf.float64)
# +
biases = [
get_biases(1200, 'bias0'),
get_biases(1200, 'bias1'),
get_biases(10, 'bias2')
]
weights = [
get_weights([image.shape[0] * image.shape[1], 1200], 'weights0'),
get_weights([240, 1200], 'weights1'),
get_weights([240, 10], 'weights2'),
]
parameters = weights + biases
# -
# # Model
# +
dropout_rate = 0.5
@tf.function
def classifier(x):
x = tf.cast(x, dtype=tf.float64)
x = tf.reshape(x, shape=[x.shape[0], x.shape[1] * x.shape[2]])
d1 = denseMaxout(x, weights[0], biases[0], num_of_units=240, dropout_rate=dropout_rate)
d2 = denseMaxout(d1, weights[1], biases[1], num_of_units=240, dropout_rate=dropout_rate)
return tf.nn.softmax(tf.nn.bias_add(tf.matmul(d2, weights[2]), biases[2]))
# -
# # Measurement Functions
# +
def calculate_F1(input, true_labels, message):
predicted = tf.convert_to_tensor([tf.one_hot(tf.argmax(t), depth = 10) for t in classifier(input)])
predicted_T = predicted.numpy().T
true_labels_T = true_labels.T
total_positive = np.sum(predicted_T, axis=1)
true_positive = np.sum([[predicted_T[i][j] == true_labels_T[i][j] == 1 for j in range(5000)] for i in range(10)], axis = 1)
false_negative = np.sum([[abs(predicted_T[i][j]-1) == true_labels_T[i][j] == 1 for j in range(5000)] for i in range(10)], axis = 1)
precision = np.average(true_positive / total_positive)
recall = np.average(true_positive / (true_positive + false_negative))
F_1 = 2 * ((precision * recall) / (precision + recall))
print(message)
print("Precision:", precision)
print("Recall:", recall)
print("F1:", F_1)
def accuracy(input, true_labels, message):
predicted = tf.convert_to_tensor([tf.one_hot(tf.argmax(t), depth = 10) for t in classifier(input)])
equal = [ (tf.argmax(predicted[i]) == tf.argmax(true_labels[i])).numpy() for i in range(len(predicted))]
print(message, sum(equal)/len(equal))
# -
# # Hyperparameters
cross_entropy = tf.keras.losses.CategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam(1e-4)
EPOCHS = 300
# # Training
def train_step(images, true_labels):
with tf.GradientTape() as disc_tape:
output = classifier(images)
loss = cross_entropy(true_labels, output)
gradients = disc_tape.gradient(loss, parameters)
optimizer.apply_gradients(zip(gradients, parameters))
return loss
# +
from IPython.display import clear_output
for e in range(EPOCHS):
if(e + 1 % 10 == 0):
calculate_F1(X_validation, Y_validation, "Validation set Measurements: ")
loss_accumulator = 0
for i, batch in enumerate(train):
loss_accumulator += train_step(batch[0], batch[1])
clear_output(wait=True)
print("Epochs: " + str(e+1) + "\\" + str(EPOCHS))
print("Loss: ", loss_accumulator.numpy()/len(train))
loss_accumulator = 0
# -
# # Measurement for Test Set
accuracy(X_test, Y_test, "Accuracy:")
calculate_F1(X_test, Y_test, "Test set Measurements:")
# # Saving the model
np_weights = [w.numpy() for w in weights]
np_biases = [w.numpy() for w in biases]
np.save('./Classifier_params/weights.npy', np_weights)
np.save('./Classifier_params/biases.npy', np_biases)
| Inception_Classifier_Maxout.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hello World, `pyhf` style
# **Two bin counting experiment with a background uncertainty**
import pyhf
# **Returning the observed and expected** $\mathrm{CL}_{s}$
pdf = pyhf.simplemodels.hepdata_like(signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0])
CLs_obs, CLs_exp = pyhf.infer.hypotest(1.0, [51, 48] + pdf.config.auxdata, pdf, return_expected=True)
print('Observed: {}, Expected: {}'.format(CLs_obs, CLs_exp))
# **Returning the observed** $\mathrm{CL}_{s}$, $\mathrm{CL}_{s+b}$, **and** $\mathrm{CL}_{b}$
CLs_obs, p_values = pyhf.infer.hypotest(1.0, [51, 48] + pdf.config.auxdata, pdf, return_tail_probs=True)
print('Observed CL_s: {}, CL_sb: {}, CL_b: {}'.format(CLs_obs, p_values[0], p_values[1]))
# A reminder that
# $$
# \mathrm{CL}_{s} = \frac{\mathrm{CL}_{s+b}}{\mathrm{CL}_{b}} = \frac{p_{s+b}}{1-p_{b}}
# $$
assert CLs_obs == p_values[0]/p_values[1]
# **Returning the expected** $\mathrm{CL}_{s}$ **band values**
import numpy as np
CLs_obs, CLs_exp_band = pyhf.infer.hypotest(1.0, [51, 48] + pdf.config.auxdata, pdf, return_expected_set=True)
print('Observed CL_s: {}\n'.format(CLs_obs))
for p_value, n_sigma in enumerate(np.arange(-2,3)):
print('Expected CL_s{}: {}'.format(' ' if n_sigma==0 else '({} σ)'.format(n_sigma),CLs_exp_band[p_value]))
| docs/examples/notebooks/hello-world.ipynb |