text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
```
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import manage_data
from text_network import TextNetwork
from tensorflow.contrib import learn
```
### Set Parameters
```
# Model Hyperparameters
tf.flags.DEFINE_integer("embedding_dim", 128, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularizaion lambda (default: 0.0)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 200, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
```
### Data Preparation
```
# Load data
print("Loading data...")
x_text, y = manage_data.load_data_and_labels()
# Build vocabulary
max_document_length = max([len(x.split(" ")) for x in x_text])
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(x_text)))
# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]
# Split train/test set
# TODO: This is very crude, should use cross-validation
x_train, x_dev = x_shuffled[:-1000], x_shuffled[-1000:]
y_train, y_dev = y_shuffled[:-1000], y_shuffled[-1000:]
print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
```
### Training The Model
```
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
cnn = TextNetwork(
sequence_length=x_train.shape[1],
num_classes=y_train.shape[1],
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
num_filters=FLAGS.num_filters,
l2_reg_lambda=FLAGS.l2_reg_lambda)
# Define Training procedure
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(1e-3)
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in grads_and_vars:
if g is not None:
grad_hist_summary = tf.histogram_summary("{}/grad/hist".format(v.name), g)
sparsity_summary = tf.scalar_summary("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.merge_summary(grad_summaries)
# Output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
print("Writing to {}\n".format(out_dir))
# Summaries for loss and accuracy
loss_summary = tf.scalar_summary("loss", cnn.loss)
acc_summary = tf.scalar_summary("accuracy", cnn.accuracy)
# Train Summaries
train_summary_op = tf.merge_summary([loss_summary, acc_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.train.SummaryWriter(train_summary_dir, sess.graph)
# Dev summaries
dev_summary_op = tf.merge_summary([loss_summary, acc_summary])
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
dev_summary_writer = tf.train.SummaryWriter(dev_summary_dir, sess.graph)
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.all_variables())
# Write vocabulary
vocab_processor.save(os.path.join(out_dir, "vocab"))
# Initialize all variables
sess.run(tf.initialize_all_variables())
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: FLAGS.dropout_keep_prob
}
_, step, summaries, loss, accuracy = sess.run(
[train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
train_summary_writer.add_summary(summaries, step)
def dev_step(x_batch, y_batch, writer=None):
"""
Evaluates model on a dev set
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: 1.0
}
step, summaries, loss, accuracy = sess.run(
[global_step, dev_summary_op, cnn.loss, cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
if writer:
writer.add_summary(summaries, step)
# Generate batches
batches = manage_data.batch_iter(
list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
# Training loop. For each batch...
for batch in batches:
x_batch, y_batch = zip(*batch)
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
if current_step % FLAGS.evaluate_every == 0:
print("\nEvaluation:")
dev_step(x_dev, y_dev, writer=dev_summary_writer)
print("")
if current_step % FLAGS.checkpoint_every == 0:
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print("Saved model checkpoint to {}\n".format(path))
```
| github_jupyter |
# PLOT Notes
# Matplib - generating plots concoiusly
2019.07.12.
based on https://dev.to/skotaro/artist-in-matplotlib---something-i-wanted-to-know-before-spending-tremendous-hours-on-googling-how-tos--31oo
## Pyplot and object-oriented API
these are two different coding styles to make plots in matplolib, Object-oriented (OO) API style is officially recommended - we utilize an instance of axes.Axes in order to render visualizations on an instance of figure.Figure. The second is based on MATLAB and uses a state-based interface. This is encapsulated in the pyplot module. important thinkgs:
* The Figure is the final image that may contain 1 or more Axes.
* The Axes represent an individual plot (don't confuse this with the word "axis", which refers to the x/y axis of a plot).
For more info see:
* pyplot tutorial https://matplotlib.org/tutorials/introductory/pyplot.html
* OO API tutorial https://matplotlib.org/tutorials/introductory/lifecycle.html
## Pylot interface
* MATLAB-user-friendly style in which everything is done with plt.***
* very fast, but has limited options
* Example 1: Pyplot example - simple plots
* called "stateful interface" - which figure and subplot you are currently in
```
"""
Example 1: Pyplot example - simple plots
"""
import numpy as np
import matplotlib.pyplot as plt
#https://matplotlib.org/tutorials/introductory/pyplot.html
def f(t):
return np.exp(-t) * np.cos(2*np.pi*t)
t1 = np.arange(0.0, 5.0, 0.1)
t2 = np.arange(0.0, 5.0, 0.02)
plt.figure(1)
plt.subplot(211)
plt.plot(t1, f(t1), 'bo', t2, f(t2), 'k')
plt.subplot(212)
plt.plot(t2, np.cos(2*np.pi*t2), 'r--')
plt.show();
```
## OO API style
* fig, ax = plt.subplots(), followed by ax.plot, ax.imshow etc. fig and ax are, artists.
* fig.add_subplot, alternative starting stetment
* fig = plt.gcf() and ax = plt.gca(). used when you switch from Pyplot interface to OO interface
### The hierarchy in matplotlib
* matplotlib has a hierarchical structure of specia artist elelemnts called as "containers"
* figure - wholle arrea to display
* axes - individual ploits
* axis - x,y axis to plot the data
* 4th containers are ticks!
* see figure at:https://res.cloudinary.com/practicaldev/image/fetch/s--dNi3F76s--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://thepracticaldev.s3.amazonaws.com/i/rr39m52m6peef1drke7m.png
* starting a figure
> fig, ax = plt.subplots() # make Figure and Axes which belongs to 'fig'
* ot
> fig = plt.figure() # make Figure
> ax = fig.add_subplot(1,1,1) # make Axes belonging to fig
* rules to remember:
* Figure can contain multiple Axes because fig.axes is a list of Axes.
* Axes can belong to only single Figure because ax.figure is not a list.
* Axes can have one XAxis and YAxis respectively for similar reason.
* XAxis and YAxis can belong to single Axes and, accordingly, single Figure.
> fig.axes
> ax.figure
> ax.xaxis
> ax.xaxis.axes
> ax.xaxis.figure
* Artists
* every single component in a figure is an Artist object
* names of all elements are here: https://res.cloudinary.com/practicaldev/image/fetch/s--1x1epD95--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://thepracticaldev.s3.amazonaws.com/i/b9psb0mtz7yk8qmfe26f.png
* two types of artists objects:
* CONTAINERS;
Figure, Axes, Axis and Tick
have many "boxes" (Python lists,) for each type of primitives.
eg:, an Axes obj (ax), has an empty list ax.lines.
a command ax.plot adds a Line2D obj to that list and does other accompanying settings silently.
* PRIMITIVES; placed inside our containers, eg: Line2D made by ax.plot, PathCollection by ax.scatter, or Text by ax.annotate
see Example 2: Containers and Primitives.
```
"""
Example 2: Containers and Primitives
"""
# data
x = np.linspace(0, 2*np.pi, 100) # 100 numbers, equally distributed
#
fig = plt.figure()
ax = fig.add_subplot(1,1,1) # make a blank plotting area
print('ax.lines before plot:\n', ax.lines) # empty
line1, = ax.plot(x, np.sin(x), label='1st plot') # add Line2D in ax.lines
print('ax.lines after 1st plot:\n', ax.lines)
line2, = ax.plot(x, np.sin(x+np.pi/8), label='2nd plot') # add another Line2D
print('ax.lines after 2nd plot:\n', ax.lines)
ax.legend()
print('line1:', line1)
print('line2:', line2)
```
## FIGURE CONTAINER
Important:
* Attributes with a plural name are lists and those with a singular name represent a single object.
* Fig attributes can be chnaged into axis or axes attributes with Transforms
Figure attributes & description:
* fig.axes // A list of Axes instances (includes Subplot)
* fig.patch // The Rectangle background
* fig.images // A list of FigureImages patches - useful for raw pixel display
* fig.legends // A list of Figure Legend instances (different from Axes.legends)
* fig.lines // A list of Figure Line2D instances (rarely used, see Axes.lines)
* fig.patches // A list of Figure patches (rarely used, see Axes.patches)
* fig.texts // A list Figure Text instances
Legend
* we have ax.legend and fig.legend
* ax.legend only collects labels from Artists belonging to ax
* fig.legend gathers labels from all Axes under fig, eg for large number of plots wiht the same elements
```
"""
Example 3: Combining legends from different sources
"""
x = np.linspace(0, 2*np.pi, 100)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, np.sin(x), label='sin(x)')
ax1 = ax.twinx() # Create a twin Axes sharing the xaxis, ie second y axis on the right site
ax1.plot(x, 2*np.cos(x), c='C1', label='2*cos(x)')
# cf. 'CN' notation
# https://matplotlib.org/tutorials/colors/colors.html#cn-color-selection
# combined ax.legends
handler, label = ax.get_legend_handles_labels()
handler1, label1 = ax1.get_legend_handles_labels()
ax.legend(handler+handler1, label+label1, loc='upper center', title='ax.legend')
# Legend made by ax1.legend remains
# easy way with fig.legend and all handlers
fig.legend(loc='upper right', bbox_to_anchor=(1,1),
bbox_transform=ax.transAxes, title='fig.legend\nax.transAxes')
plt.show();
"""
Example 3b: Using ax.twinx() to create second y axis with different scale
"""
import numpy as np
import matplotlib.pyplot as plt
# Create some mock data
t = np.arange(0.01, 10.0, 0.01)
data1 = np.exp(t)
data2 = np.sin(2 * np.pi * t)
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('time (s)')
ax1.set_ylabel('exp', color=color)
ax1.plot(t, data1, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('sin', color=color) # we already handled the x-label with ax1
ax2.plot(t, data2, color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
```
## AXES CONTAINER
The matplotlib.axes.Axes is the center of the matplotlib universe
Has the following objects:
* XAXIS
* YAXIS
* Ticks container
How it works?
* Frequently-used commands such as ax.plot and ax.scatter are called "helper methods"
* helper methods add corresponding Artists in appropriate containers and do other miscellaneous jobs.
* ie. ax.plot and ax.scatter add Line2D and PathCollection objects in corresponding lists.
Reusing a plotted object is not recommended
* helper methods do many things other than creating an Artist
ax.set_*** methods
* Used to modify attributes and values of Axis and Tick instances
* static - Changes made with them are not updated when something changed.
* ie. if you chnage them for plot1, you will also get the same on another plot, unless chnage them again
Ticker.
* automatically update ticks for each new plot; formatter and locator
* ax.xaxis.get_major_formatter()
* ax.xaxis.get_major_locator()
* Tick formatters: https://matplotlib.org/gallery/ticks_and_spines/tick-formatters.html
## TICK CONTAINER
for a short line for a tick itself and a text for a tick label.
```
"""
Example 4: Using Ticker for customized ticks & labels
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.ticker as ticker # this is required to used `Ticker`
x = np.linspace(0, 2*np.pi, 100)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
line1, = ax.plot(x, np.sin(x), label='') # X range: 0 to 2pi
ax.set_xticks([0, 0.5*np.pi, np.pi, 1.5*np.pi, 2*np.pi])
line2, = ax.plot(1.5*x, np.sin(x), label='') # X range: 0 to 3pi
# locate ticks at every 0.5*pi
ax.xaxis.set_major_locator(ticker.MultipleLocator(0.5*np.pi)) # locate ticks at every 0.5*pi
# custome tick labels
@ticker.FuncFormatter # FuncFormatter can be used as a decorator
def major_formatter_radian(x, pos):
return '{}$\pi$'.format(x/np.pi) # probably not the best way to show radian tick labels
ax.xaxis.set_major_formatter(major_formatter_radian)
plt.show();
"""
Example 5: Tick formatters:
https://matplotlib.org/gallery/ticks_and_spines/tick-formatters.html
"""
# Setup a plot such that only the bottom spine is shown
def setup(ax):
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('none')
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(which='major', width=1.00, length=5)
ax.tick_params(which='minor', width=0.75, length=2.5, labelsize=10)
ax.set_xlim(0, 5)
ax.set_ylim(0, 1)
ax.patch.set_alpha(0.0)
fig = plt.figure(figsize=(8, 6))
n = 7
# Null formatter
ax = fig.add_subplot(n, 1, 1)
setup(ax)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1.00))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.text(0.0, 0.1, "NullFormatter()", fontsize=16, transform=ax.transAxes)
# Fixed formatter
ax = fig.add_subplot(n, 1, 2)
setup(ax)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1.0))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.25))
majors = ["", "0", "1", "2", "3", "4", "5"]
ax.xaxis.set_major_formatter(ticker.FixedFormatter(majors))
minors = [""] + ["%.2f" % (x-int(x)) if (x-int(x))
else "" for x in np.arange(0, 5, 0.25)]
ax.xaxis.set_minor_formatter(ticker.FixedFormatter(minors))
ax.text(0.0, 0.1, "FixedFormatter(['', '0', '1', ...])",
fontsize=15, transform=ax.transAxes)
# FuncFormatter can be used as a decorator
@ticker.FuncFormatter
def major_formatter(x, pos):
return "[%.2f]" % x
ax = fig.add_subplot(n, 1, 3)
setup(ax)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1.00))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax.xaxis.set_major_formatter(major_formatter)
ax.text(0.0, 0.1, 'FuncFormatter(lambda x, pos: "[%.2f]" % x)',
fontsize=15, transform=ax.transAxes)
# FormatStr formatter
ax = fig.add_subplot(n, 1, 4)
setup(ax)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1.00))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter(">%d<"))
ax.text(0.0, 0.1, "FormatStrFormatter('>%d<')",
fontsize=15, transform=ax.transAxes)
# Scalar formatter
ax = fig.add_subplot(n, 1, 5)
setup(ax)
ax.xaxis.set_major_locator(ticker.AutoLocator())
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
ax.xaxis.set_major_formatter(ticker.ScalarFormatter(useMathText=True))
ax.text(0.0, 0.1, "ScalarFormatter()", fontsize=15, transform=ax.transAxes)
# StrMethod formatter
ax = fig.add_subplot(n, 1, 6)
setup(ax)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1.00))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax.xaxis.set_major_formatter(ticker.StrMethodFormatter("{x}"))
ax.text(0.0, 0.1, "StrMethodFormatter('{x}')",
fontsize=15, transform=ax.transAxes)
# Percent formatter
ax = fig.add_subplot(n, 1, 7)
setup(ax)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1.00))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax.xaxis.set_major_formatter(ticker.PercentFormatter(xmax=5))
ax.text(0.0, 0.1, "PercentFormatter(xmax=5)",
fontsize=15, transform=ax.transAxes)
# Push the top of the top axes outside the figure because we only show the
# bottom spine.
fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=1.05)
plt.show()
"""
Example 6; Tick Locators
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# Setup a plot such that only the bottom spine is shown
def setup(ax):
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('none')
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(which='major', width=1.00)
ax.tick_params(which='major', length=5)
ax.tick_params(which='minor', width=0.75)
ax.tick_params(which='minor', length=2.5)
ax.set_xlim(0, 5)
ax.set_ylim(0, 1)
ax.patch.set_alpha(0.0)
plt.figure(figsize=(8, 6))
n = 8
# Null Locator
ax = plt.subplot(n, 1, 1)
setup(ax)
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.xaxis.set_minor_locator(ticker.NullLocator())
ax.text(0.0, 0.1, "NullLocator()", fontsize=14, transform=ax.transAxes)
# Multiple Locator
ax = plt.subplot(n, 1, 2)
setup(ax)
ax.xaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.1))
ax.text(0.0, 0.1, "MultipleLocator(0.5)", fontsize=14,
transform=ax.transAxes)
# Fixed Locator
ax = plt.subplot(n, 1, 3)
setup(ax)
majors = [0, 1, 5]
ax.xaxis.set_major_locator(ticker.FixedLocator(majors))
minors = np.linspace(0, 1, 11)[1:-1]
ax.xaxis.set_minor_locator(ticker.FixedLocator(minors))
ax.text(0.0, 0.1, "FixedLocator([0, 1, 5])", fontsize=14,
transform=ax.transAxes)
# Linear Locator
ax = plt.subplot(n, 1, 4)
setup(ax)
ax.xaxis.set_major_locator(ticker.LinearLocator(3))
ax.xaxis.set_minor_locator(ticker.LinearLocator(31))
ax.text(0.0, 0.1, "LinearLocator(numticks=3)",
fontsize=14, transform=ax.transAxes)
# Index Locator
ax = plt.subplot(n, 1, 5)
setup(ax)
ax.plot(range(0, 5), [0]*5, color='white')
ax.xaxis.set_major_locator(ticker.IndexLocator(base=.5, offset=.25))
ax.text(0.0, 0.1, "IndexLocator(base=0.5, offset=0.25)",
fontsize=14, transform=ax.transAxes)
# Auto Locator
ax = plt.subplot(n, 1, 6)
setup(ax)
ax.xaxis.set_major_locator(ticker.AutoLocator())
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
ax.text(0.0, 0.1, "AutoLocator()", fontsize=14, transform=ax.transAxes)
# MaxN Locator
ax = plt.subplot(n, 1, 7)
setup(ax)
ax.xaxis.set_major_locator(ticker.MaxNLocator(4))
ax.xaxis.set_minor_locator(ticker.MaxNLocator(40))
ax.text(0.0, 0.1, "MaxNLocator(n=4)", fontsize=14, transform=ax.transAxes)
# Log Locator
ax = plt.subplot(n, 1, 8)
setup(ax)
ax.set_xlim(10**3, 10**10)
ax.set_xscale('log')
ax.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=15))
ax.text(0.0, 0.1, "LogLocator(base=10, numticks=15)",
fontsize=15, transform=ax.transAxes)
# Push the top of the top axes outside the figure because we only show the
# bottom spine.
plt.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=1.05)
plt.show()
```
| github_jupyter |
# Control Flow Graph
The code in this notebook helps with obtaining the control flow graph of python functions.
**Prerequisites**
* This notebook needs some understanding on advanced concepts in Python, notably
* classes
## Control Flow Graph
The class `PyCFG` allows one to obtain the control flow graph.
```Python
from ControlFlow import gen_cfg, to_graph
cfg = gen_cfg(inspect.getsource(my_function))
to_graph(cfg)
```
```
import bookutils
from bookutils import print_content
import ast
import re
from graphviz import Source, Digraph
```
### Registry
```
REGISTRY_IDX = 0
REGISTRY = {}
def get_registry_idx():
global REGISTRY_IDX
v = REGISTRY_IDX
REGISTRY_IDX += 1
return v
def reset_registry():
global REGISTRY_IDX
global REGISTRY
REGISTRY_IDX = 0
REGISTRY = {}
def register_node(node):
node.rid = get_registry_idx()
REGISTRY[node.rid] = node
def get_registry():
return dict(REGISTRY)
```
### CFGNode
We start with the `CFGNode` representing each node in the control flow graph.
\todo{Augmented and annotated assignments (`a += 1`), (`a:int = 1`)}.
```
class CFGNode(dict):
def __init__(self, parents=[], ast=None):
assert type(parents) is list
register_node(self)
self.parents = parents
self.ast_node = ast
self.update_children(parents) # requires self.rid
self.children = []
self.calls = []
def i(self):
return str(self.rid)
def update_children(self, parents):
for p in parents:
p.add_child(self)
def add_child(self, c):
if c not in self.children:
self.children.append(c)
def lineno(self):
return self.ast_node.lineno if hasattr(self.ast_node, 'lineno') else 0
def __str__(self):
return "id:%d line[%d] parents: %s : %s" % (
self.rid, self.lineno(), str([p.rid for p in self.parents]),
self.source())
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.rid == other.rid
def __neq__(self, other):
return self.rid != other.rid
def set_parents(self, p):
self.parents = p
def add_parent(self, p):
if p not in self.parents:
self.parents.append(p)
def add_parents(self, ps):
for p in ps:
self.add_parent(p)
def add_calls(self, func):
self.calls.append(func)
def source(self):
return ast.unparse(self.ast_node).strip()
def to_json(self):
return {
'id': self.rid,
'parents': [p.rid for p in self.parents],
'children': [c.rid for c in self.children],
'calls': self.calls,
'at': self.lineno(),
'ast': self.source()
}
REGISTRY_IDX = 0
REGISTRY = {}
def get_registry_idx():
global REGISTRY_IDX
v = REGISTRY_IDX
REGISTRY_IDX += 1
return v
def reset_registry():
global REGISTRY_IDX
global REGISTRY
REGISTRY_IDX = 0
REGISTRY = {}
def register_node(node):
node.rid = get_registry_idx()
REGISTRY[node.rid] = node
```
### PyCFG
Next, the `PyCFG` class which is responsible for parsing, and holding the graph.
```
class PyCFG:
def __init__(self):
self.founder = CFGNode(
parents=[], ast=ast.parse('start').body[0]) # sentinel
self.founder.ast_node.lineno = 0
self.functions = {}
self.functions_node = {}
class PyCFG(PyCFG):
def parse(self, src):
return ast.parse(src)
class PyCFG(PyCFG):
def walk(self, node, myparents):
fname = "on_%s" % node.__class__.__name__.lower()
if hasattr(self, fname):
fn = getattr(self, fname)
v = fn(node, myparents)
return v
else:
return myparents
class PyCFG(PyCFG):
def on_module(self, node, myparents):
"""
Module(stmt* body)
"""
# each time a statement is executed unconditionally, make a link from
# the result to next statement
p = myparents
for n in node.body:
p = self.walk(n, p)
return p
class PyCFG(PyCFG):
def on_augassign(self, node, myparents):
"""
AugAssign(expr target, operator op, expr value)
"""
p = [CFGNode(parents=myparents, ast=node)]
p = self.walk(node.value, p)
return p
class PyCFG(PyCFG):
def on_annassign(self, node, myparents):
"""
AnnAssign(expr target, expr annotation, expr? value, int simple)
"""
p = [CFGNode(parents=myparents, ast=node)]
p = self.walk(node.value, p)
return p
class PyCFG(PyCFG):
def on_assign(self, node, myparents):
"""
Assign(expr* targets, expr value)
"""
if len(node.targets) > 1:
raise NotImplemented('Parallel assignments')
p = [CFGNode(parents=myparents, ast=node)]
p = self.walk(node.value, p)
return p
class PyCFG(PyCFG):
def on_pass(self, node, myparents):
return [CFGNode(parents=myparents, ast=node)]
class PyCFG(PyCFG):
def on_break(self, node, myparents):
parent = myparents[0]
while not hasattr(parent, 'exit_nodes'):
# we have ordered parents
parent = parent.parents[0]
assert hasattr(parent, 'exit_nodes')
p = CFGNode(parents=myparents, ast=node)
# make the break one of the parents of label node.
parent.exit_nodes.append(p)
# break doesn't have immediate children
return []
class PyCFG(PyCFG):
def on_continue(self, node, myparents):
parent = myparents[0]
while not hasattr(parent, 'exit_nodes'):
# we have ordered parents
parent = parent.parents[0]
assert hasattr(parent, 'exit_nodes')
p = CFGNode(parents=myparents, ast=node)
# make continue one of the parents of the original test node.
parent.add_parent(p)
# return the parent because a continue is not the parent
# for the just next node
return []
class PyCFG(PyCFG):
def on_for(self, node, myparents):
# node.target in node.iter: node.body
# The For loop in python (no else) can be translated
# as follows:
#
# for a in iterator:
# mystatements
#
# __iv = iter(iterator)
# while __iv.__length_hint() > 0:
# a = next(__iv)
# mystatements
init_node = CFGNode(parents=myparents,
ast=ast.parse('__iv = iter(%s)' % ast.unparse(node.iter).strip()).body[0])
ast.copy_location(init_node.ast_node, node.iter)
_test_node = CFGNode(
parents=[init_node],
ast=ast.parse('_for: __iv.__length__hint__() > 0').body[0])
ast.copy_location(_test_node.ast_node, node)
# we attach the label node here so that break can find it.
_test_node.exit_nodes = []
test_node = self.walk(node.iter, [_test_node])
extract_node = CFGNode(parents=test_node,
ast=ast.parse('%s = next(__iv)' % ast.unparse(node.target).strip()).body[0])
ast.copy_location(extract_node.ast_node, node.iter)
# now we evaluate the body, one at a time.
p1 = [extract_node]
for n in node.body:
p1 = self.walk(n, p1)
# the test node is looped back at the end of processing.
_test_node.add_parents(p1)
return _test_node.exit_nodes + test_node
class PyCFG(PyCFG):
def on_while(self, node, myparents):
# For a while, the earliest parent is the node.test
_test_node = CFGNode(
parents=myparents,
ast=ast.parse(
'_while: %s' % ast.unparse(node.test).strip()).body[0])
ast.copy_location(_test_node.ast_node, node.test)
_test_node.exit_nodes = []
test_node = self.walk(node.test, [_test_node])
# we attach the label node here so that break can find it.
# now we evaluate the body, one at a time.
assert len(test_node) == 1
p1 = test_node
for n in node.body:
p1 = self.walk(n, p1)
# the test node is looped back at the end of processing.
_test_node.add_parents(p1)
# link label node back to the condition.
return _test_node.exit_nodes + test_node
class PyCFG(PyCFG):
def on_if(self, node, myparents):
_test_node = CFGNode(
parents=myparents,
ast=ast.parse(
'_if: %s' % ast.unparse(node.test).strip()).body[0])
ast.copy_location(_test_node.ast_node, node.test)
test_node = self.walk(node.test, [ _test_node])
assert len(test_node) == 1
g1 = test_node
for n in node.body:
g1 = self.walk(n, g1)
g2 = test_node
for n in node.orelse:
g2 = self.walk(n, g2)
return g1 + g2
class PyCFG(PyCFG):
def on_binop(self, node, myparents):
left = self.walk(node.left, myparents)
right = self.walk(node.right, left)
return right
class PyCFG(PyCFG):
def on_compare(self, node, myparents):
left = self.walk(node.left, myparents)
right = self.walk(node.comparators[0], left)
return right
class PyCFG(PyCFG):
def on_unaryop(self, node, myparents):
return self.walk(node.operand, myparents)
class PyCFG(PyCFG):
def on_call(self, node, myparents):
def get_func(node):
if type(node.func) is ast.Name:
mid = node.func.id
elif type(node.func) is ast.Attribute:
mid = node.func.attr
elif type(node.func) is ast.Call:
mid = get_func(node.func)
else:
raise Exception(str(type(node.func)))
return mid
#mid = node.func.value.id
p = myparents
for a in node.args:
p = self.walk(a, p)
mid = get_func(node)
myparents[0].add_calls(mid)
# these need to be unlinked later if our module actually defines these
# functions. Otherwsise we may leave them around.
# during a call, the direct child is not the next
# statement in text.
for c in p:
c.calllink = 0
return p
class PyCFG(PyCFG):
def on_expr(self, node, myparents):
p = [CFGNode(parents=myparents, ast=node)]
return self.walk(node.value, p)
class PyCFG(PyCFG):
def on_return(self, node, myparents):
if type(myparents) is tuple:
parent = myparents[0][0]
else:
parent = myparents[0]
val_node = self.walk(node.value, myparents)
# on return look back to the function definition.
while not hasattr(parent, 'return_nodes'):
parent = parent.parents[0]
assert hasattr(parent, 'return_nodes')
p = CFGNode(parents=val_node, ast=node)
# make the break one of the parents of label node.
parent.return_nodes.append(p)
# return doesnt have immediate children
return []
class PyCFG(PyCFG):
def on_functiondef(self, node, myparents):
# a function definition does not actually continue the thread of
# control flow
# name, args, body, decorator_list, returns
fname = node.name
args = node.args
returns = node.returns
enter_node = CFGNode(
parents=[],
ast=ast.parse('enter: %s(%s)' % (node.name, ', '.join(
[a.arg for a in node.args.args]))).body[0]) # sentinel
enter_node.calleelink = True
ast.copy_location(enter_node.ast_node, node)
exit_node = CFGNode(
parents=[],
ast=ast.parse('exit: %s(%s)' % (node.name, ', '.join(
[a.arg for a in node.args.args]))).body[0]) # sentinel
exit_node.fn_exit_node = True
ast.copy_location(exit_node.ast_node, node)
enter_node.return_nodes = [] # sentinel
p = [enter_node]
for n in node.body:
p = self.walk(n, p)
for n in p:
if n not in enter_node.return_nodes:
enter_node.return_nodes.append(n)
for n in enter_node.return_nodes:
exit_node.add_parent(n)
self.functions[fname] = [enter_node, exit_node]
self.functions_node[enter_node.lineno()] = fname
return myparents
class PyCFG(PyCFG):
def get_defining_function(self, node):
if node.lineno() in self.functions_node:
return self.functions_node[node.lineno()]
if not node.parents:
self.functions_node[node.lineno()] = ''
return ''
val = self.get_defining_function(node.parents[0])
self.functions_node[node.lineno()] = val
return val
class PyCFG(PyCFG):
def link_functions(self):
for nid, node in REGISTRY.items():
if node.calls:
for calls in node.calls:
if calls in self.functions:
enter, exit = self.functions[calls]
enter.add_parent(node)
if node.children:
# # until we link the functions up, the node
# # should only have succeeding node in text as
# # children.
# assert(len(node.children) == 1)
# passn = node.children[0]
# # We require a single pass statement after every
# # call (which means no complex expressions)
# assert(type(passn.ast_node) == ast.Pass)
# # unlink the call statement
assert node.calllink > -1
node.calllink += 1
for i in node.children:
i.add_parent(exit)
# passn.set_parents([exit])
# ast.copy_location(exit.ast_node, passn.ast_node)
# #for c in passn.children: c.add_parent(exit)
# #passn.ast_node = exit.ast_node
class PyCFG(PyCFG):
def update_functions(self):
for nid, node in REGISTRY.items():
_n = self.get_defining_function(node)
class PyCFG(PyCFG):
def update_children(self):
for nid, node in REGISTRY.items():
for p in node.parents:
p.add_child(node)
class PyCFG(PyCFG):
def gen_cfg(self, src):
"""
>>> i = PyCFG()
>>> i.walk("100")
5
"""
node = self.parse(src)
nodes = self.walk(node, [self.founder])
self.last_node = CFGNode(parents=nodes, ast=ast.parse('stop').body[0])
ast.copy_location(self.last_node.ast_node, self.founder.ast_node)
self.update_children()
self.update_functions()
self.link_functions()
```
### Supporting Functions
```
def compute_dominator(cfg, start=0, key='parents'):
dominator = {}
dominator[start] = {start}
all_nodes = set(cfg.keys())
rem_nodes = all_nodes - {start}
for n in rem_nodes:
dominator[n] = all_nodes
c = True
while c:
c = False
for n in rem_nodes:
pred_n = cfg[n][key]
doms = [dominator[p] for p in pred_n]
i = set.intersection(*doms) if doms else set()
v = {n} | i
if dominator[n] != v:
c = True
dominator[n] = v
return dominator
def compute_flow(pythonfile):
cfg, first, last = get_cfg(pythonfile)
return cfg, compute_dominator(
cfg, start=first), compute_dominator(
cfg, start=last, key='children')
def gen_cfg(fnsrc, remove_start_stop=True):
reset_registry()
cfg = PyCFG()
cfg.gen_cfg(fnsrc)
cache = dict(REGISTRY)
if remove_start_stop:
return {
k: cache[k]
for k in cache if cache[k].source() not in {'start', 'stop'}
}
else:
return cache
def get_cfg(src):
reset_registry()
cfg = PyCFG()
cfg.gen_cfg(src)
cache = dict(REGISTRY)
g = {}
for k, v in cache.items():
j = v.to_json()
at = j['at']
parents_at = [cache[p].to_json()['at'] for p in j['parents']]
children_at = [cache[c].to_json()['at'] for c in j['children']]
if at not in g:
g[at] = {'parents': set(), 'children': set()}
# remove dummy nodes
ps = set([p for p in parents_at if p != at])
cs = set([c for c in children_at if c != at])
g[at]['parents'] |= ps
g[at]['children'] |= cs
if v.calls:
g[at]['calls'] = v.calls
g[at]['function'] = cfg.functions_node[v.lineno()]
return (g, cfg.founder.ast_node.lineno, cfg.last_node.ast_node.lineno)
def to_graph(cache, arcs=[]):
graph = Digraph(comment='Control Flow Graph')
colors = {0: 'blue', 1: 'red'}
kind = {0: 'T', 1: 'F'}
cov_lines = set(i for i, j in arcs)
for nid, cnode in cache.items():
lineno = cnode.lineno()
shape, peripheries = 'oval', '1'
if isinstance(cnode.ast_node, ast.AnnAssign):
if cnode.ast_node.target.id in {'_if', '_for', '_while'}:
shape = 'diamond'
elif cnode.ast_node.target.id in {'enter', 'exit'}:
shape, peripheries = 'oval', '2'
else:
shape = 'rectangle'
graph.node(cnode.i(), "%d: %s" % (lineno, unhack(cnode.source())),
shape=shape, peripheries=peripheries)
for pn in cnode.parents:
plineno = pn.lineno()
if hasattr(pn, 'calllink') and pn.calllink > 0 and not hasattr(
cnode, 'calleelink'):
graph.edge(pn.i(), cnode.i(), style='dotted', weight=100)
continue
if arcs:
if (plineno, lineno) in arcs:
graph.edge(pn.i(), cnode.i(), color='green')
elif plineno == lineno and lineno in cov_lines:
graph.edge(pn.i(), cnode.i(), color='green')
# child is exit and parent is covered
elif hasattr(cnode, 'fn_exit_node') and plineno in cov_lines:
graph.edge(pn.i(), cnode.i(), color='green')
# parent is exit and one of its parents is covered.
elif hasattr(pn, 'fn_exit_node') and len(
set(n.lineno() for n in pn.parents) | cov_lines) > 0:
graph.edge(pn.i(), cnode.i(), color='green')
# child is a callee (has calleelink) and one of the parents is covered.
elif plineno in cov_lines and hasattr(cnode, 'calleelink'):
graph.edge(pn.i(), cnode.i(), color='green')
else:
graph.edge(pn.i(), cnode.i(), color='red')
else:
order = {c.i(): i for i, c in enumerate(pn.children)}
if len(order) < 2:
graph.edge(pn.i(), cnode.i())
else:
o = order[cnode.i()]
graph.edge(pn.i(), cnode.i(), color=colors[o], label=kind[o])
return graph
def unhack(v):
for i in ['if', 'while', 'for', 'elif']:
v = re.sub(r'^_%s:' % i, '%s:' % i, v)
return v
```
### Examples
#### check_triangle
```
def check_triangle(a, b, c):
if a == b:
if a == c:
if b == c:
return "Equilateral"
else:
return "Isosceles"
else:
return "Isosceles"
else:
if b != c:
if a == c:
return "Isosceles"
else:
return "Scalene"
else:
return "Isosceles"
import inspect
to_graph(gen_cfg(inspect.getsource(check_triangle)))
```
#### cgi_decode
Note that we do not yet support _augmented assignments_: i.e assignments such as `+=`
```
def cgi_decode(s):
hex_values = {
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4,
'5': 5, '6': 6, '7': 7, '8': 8, '9': 9,
'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15,
'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15,
}
t = ""
i = 0
while i < len(s):
c = s[i]
if c == '+':
t += ' '
elif c == '%':
digit_high, digit_low = s[i + 1], s[i + 2]
i += 2
if digit_high in hex_values and digit_low in hex_values:
v = hex_values[digit_high] * 16 + hex_values[digit_low]
t += chr(v)
else:
raise ValueError("Invalid encoding")
else:
t += c
i += 1
return t
to_graph(gen_cfg(inspect.getsource(cgi_decode)))
```
#### gcd
```
def gcd(a, b):
if a<b:
c: int = a
a: int = b
b: int = c
while b != 0 :
c: int = a
a: int = b
b: int = c % b
return a
to_graph(gen_cfg(inspect.getsource(gcd)))
def compute_gcd(x, y):
if x > y:
small = y
else:
small = x
for i in range(1, small+1):
if((x % i == 0) and (y % i == 0)):
gcd = i
return gcd
to_graph(gen_cfg(inspect.getsource(compute_gcd)))
```
#### fib
Note that the *for-loop* requires additional massaging. While we show the labels correctly, the *comparison node* needs to be extracted. Hence, the representation is not accurate.
```
def fib(n,):
ls = [0, 1]
for i in range(n-2):
ls.append(ls[-1] + ls[-2])
return ls
to_graph(gen_cfg(inspect.getsource(fib)))
```
#### quad_solver
```
def quad_solver(a, b, c):
discriminant = b^2 - 4*a*c
r1, r2 = 0, 0
i1, i2 = 0, 0
if discriminant >= 0:
droot = math.sqrt(discriminant)
r1 = (-b + droot) / (2*a)
r2 = (-b - droot) / (2*a)
else:
droot = math.sqrt(-1 * discriminant)
droot_ = droot/(2*a)
r1, i1 = -b/(2*a), droot_
r2, i2 = -b/(2*a), -droot_
if i1 == 0 and i2 == 0:
return (r1, r2)
return ((r1,i1), (r2,i2))
to_graph(gen_cfg(inspect.getsource(quad_solver)))
```
## Call Graph
### Install: Pyan Static Call Graph Lifter
```
import os
import networkx as nx
```
### Call Graph Helpers
```
import shutil
PYAN = 'pyan3' if shutil.which('pyan3') is not None else 'pyan'
if shutil.which(PYAN) is None:
# If installed from pypi, pyan may still be missing
os.system('pip install "git+https://github.com/uds-se/pyan#egg=pyan"')
PYAN = 'pyan3' if shutil.which('pyan3') is not None else 'pyan'
assert shutil.which(PYAN) is not None
def construct_callgraph(code, name="callgraph"):
file_name = name + ".py"
with open(file_name, 'w') as f:
f.write(code)
cg_file = name + '.dot'
os.system(f'{PYAN} {file_name} --uses --defines --colored --grouped --annotated --dot > {cg_file}')
def callgraph(code, name="callgraph"):
if not os.path.isfile(name + '.dot'):
construct_callgraph(code, name)
return Source.from_file(name + '.dot')
def get_callgraph(code, name="callgraph"):
if not os.path.isfile(name + '.dot'):
construct_callgraph(code, name)
return nx.drawing.nx_pydot.read_dot(name + '.dot')
```
### Example: Maze
To provide a meaningful example where you can easily change the code complexity and target location, we generate the maze source code from the maze provided as string. This example is loosely based on an old [blog post](https://feliam.wordpress.com/2010/10/07/the-symbolic-maze/) on symbolic execution by Felipe Andres Manzano (Quick shout-out!).
You simply specify the maze as a string. Like so.
```
maze_string = """
+-+-----+
|X| |
| | --+ |
| | | |
| +-- | |
| |#|
+-----+-+
"""
```
Each character in `maze_string` represents a tile. For each tile, a tile-function is generated.
* If the current tile is "benign" (` `), the tile-function corresponding to the next input character (D, U, L, R) is called. Unexpected input characters are ignored. If no more input characters are left, it returns "VALID" and the current maze state.
* If the current tile is a "trap" (`+`,`|`,`-`), it returns "INVALID" and the current maze state.
* If the current tile is the "target" (`#`), it returns "SOLVED" and the current maze state.
The code is generated using the function `generate_maze_code`.
```
def generate_print_maze(maze_string):
return """
def print_maze(out, row, col):
output = out +"\\n"
c_row = 0
c_col = 0
for c in list(\"\"\"%s\"\"\"):
if c == '\\n':
c_row += 1
c_col = 0
output += "\\n"
else:
if c_row == row and c_col == col: output += "X"
elif c == "X": output += " "
else: output += c
c_col += 1
return output
""" % maze_string
def generate_trap_tile(row, col):
return """
def tile_%d_%d(input, index):
try: HTMLParser().feed(input)
except: pass
return print_maze("INVALID", %d, %d)
""" % (row, col, row, col)
def generate_good_tile(c, row, col):
code = """
def tile_%d_%d(input, index):
if (index == len(input)): return print_maze("VALID", %d, %d)
elif input[index] == 'L': return tile_%d_%d(input, index + 1)
elif input[index] == 'R': return tile_%d_%d(input, index + 1)
elif input[index] == 'U': return tile_%d_%d(input, index + 1)
elif input[index] == 'D': return tile_%d_%d(input, index + 1)
else : return tile_%d_%d(input, index + 1)
""" % (row, col, row, col,
row, col - 1,
row, col + 1,
row - 1, col,
row + 1, col,
row, col)
if c == "X":
code += """
def maze(input):
return tile_%d_%d(list(input), 0)
""" % (row, col)
return code
def generate_target_tile(row, col):
return """
def tile_%d_%d(input, index):
return print_maze("SOLVED", %d, %d)
def target_tile():
return "tile_%d_%d"
""" % (row, col, row, col, row, col)
def generate_maze_code(maze, name="maze"):
row = 0
col = 0
code = generate_print_maze(maze)
for c in list(maze):
if c == '\n':
row += 1
col = 0
else:
if c == "-" or c == "+" or c == "|":
code += generate_trap_tile(row, col)
elif c == " " or c == "X":
code += generate_good_tile(c, row, col)
elif c == "#":
code += generate_target_tile(row, col)
else:
print("Invalid maze! Try another one.")
col += 1
return code
```
Now you can generate the maze code for an arbitrary maze.
```
maze_code = generate_maze_code(maze_string)
print_content(maze_code, filename='.py')
exec(maze_code)
# Appending one more 'D', you have reached the target.
print(maze("DDDDRRRRUULLUURRRRDDD"))
```
This is the corresponding call graph.
```
callgraph(maze_code)
```
## Cleanup
We're done, so we clean up:
```
if os.path.exists('callgraph.dot'):
os.remove('callgraph.dot')
if os.path.exists('callgraph.py'):
os.remove('callgraph.py')
```
| github_jupyter |
```
import numpy as np
from scipy import spatial
def evaluate_tour_len(x,d):
'''
x: solution
d: DxD matrix of Euclidean distance
'''
L = 0
for i in range(len(x)-1):
# print(x[i],x[i+1])
L += d[x[i],x[i+1]]
# print(d[x[i],x[i+1]],L)
L += d[len(x)-1,0]
# print(d[x[len(x)-1],x[0]],L)
return L
x = np.array([2,3,1,0])
y = np.matrix([[5,5,6,6],
[7,7,7,7],
[1,2,3,4],
[8,8,8,8]])
evaluate_tour_len(x,y)
def order_crossover(xa,xb):
xa = np.copy(xa)
xb = np.copy(xb)
D = len(xa)
r = np.arange(D)
np.random.shuffle(r)
if r[0]<r[1]:
c1 = r[0]
c2 = r[1]
else:
c1 = r[1]
c2 = r[0]
u = xa
#print(c1,c2)
for j in range(c1,c2+1):
h = np.where(u==xb[j])[0][0]
l = h + 1
while h!=c2:
# print(h,l)
if h == D :
h = 0
if l == D :
l = 0
u[h] = u [l]
h += 1
l += 1
# print(u)
for j in range(c1,c2+1):
u[j] = xb[j]
return u
```
D = 10
a = np.arange(D)
np.random.shuffle(a)
b = np.arange(D)
np.random.shuffle(b)
u = order_crossover(a,b)
print(a,b,u)
```
def inversion_mutation(vector,probability):
'''
a kind of mutation machanism for permutation problem
flip
'''
if np.random.rand() > probability:
D = len(vector)
r = np.arange(D)
np.random.shuffle(r)
[m1,m2] = sorted([r[0],r[1]])
# print(m1,m2)
vector[m1:(m2+1)] = np.flip(vector[m1:(m2+1)],0)
return vector
def random_init(mu,P,D, evaluate_func,d):
'''
initialize and evaluate the population
mu: number of the individuals
P: the list for the population and value
D: dimension
'''
x = np.arange(D)
for i in range(mu):
np.random.shuffle(x)
vector = np.copy(x)
#print(evaluate_func(vector,d))
P.append((vector,evaluate_func(vector,d)))
return P
def get_distance_matrix(TSP_data):
'''
get the distance matrix
'''
x = scipy.spatial.distance.pdist(TSP_data,'euclidean')
d = scipy.spatial.distance.squareform(x)
return d
def genetic_algorithm(TSP_data):
'''
converge condition: bsf not change for 20 generations
'''
D = len(TSP_data)
pm = 1/D
n = 0
mu = D
t = 0
lambda_ = 2*mu
d = get_distance_matrix(TSP_data)
P = list()
random_init(mu,P,D,evaluate_tour_len,d)
x_bsf = sorted(P,key=lambda x:x[1])[0]
count_no_change = 0
while count_no_change<200:
Q = list()
updated = False
for i in range(lambda_):
# Step1 Mating Selection
r = np.arange(len(P))
np.random.shuffle(r)
selected = r[:2]
# Step2: Variation operator : Order Crossover
u = order_crossover(P[selected[0]][0],P[selected[1]][0])
# Step3: Variation operator2: inversion_mutation
u = inversion_mutation(u,pm)
# Step4: Evaluate
new_value = evaluate_tour_len(u,d)
n += 1
Q.append((u,new_value))
# Step5: Update bsf solution
if new_value <x_bsf[1]:
updated = True
x_bsf=(u,new_value)
print(x_bsf)
# Step6: Environment Selection
R = P + Q
sort_result = sorted(R,key=lambda x:x[1])
P = sort_result[:int(len(R)/2)]
t += 1
if updated == True:
count_no_change = 0
else:
count_no_change += 1
return (t,n,x_bsf)
def main():
data = list()
# dj38.tsp
with open("wi29.tsp") as tspdata:
for line in tspdata:
linedata = line.split(' ')
if linedata[0].isdigit():
data.append((float(linedata[1]),float(linedata[2])))
#print(data[-1])
#print(d)
x = genetic_algorithm(data)
print(x)
return
main()
```
### Final solution
wi29
t = 883, n = 51214
```
[ 0, 1, 5, 4, 3, 2, 6, 8, 7, 9, 10, 11, 12, 13, 16, 17, 14, 18, 21, 22, 20, 28, 27, 25, 19, 15, 24, 26, 23]
31525.83488130699
```
dj38
t = 1552, n = 117952
```
[28, 29, 31, 34, 36, 37, 32, 33, 35, 30, 26, 27, 23, 21, 24, 25, 22, 19, 14, 12, 15, 16, 17, 18, 10, 11, 8, 7, 6, 5, 4, 2, 3, 1, 0, 9, 13, 20]
8021.0298369392722)
```
| github_jupyter |
# Setup
```
import sys
import os
import re
import collections
import itertools
import bcolz
import pickle
sys.path.append('../../lib')
sys.path.append('../')
import numpy as np
import pandas as pd
import gc
import random
import smart_open
import h5py
import csv
import json
import functools
import time
import string
import datetime as dt
from tqdm import tqdm_notebook as tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import global_utils
random_state_number = 967898
import tensorflow as tf
from tensorflow.python.client import device_lib
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
get_available_gpus()
%pylab
%matplotlib inline
%load_ext line_profiler
%load_ext memory_profiler
%load_ext autoreload
pd.options.mode.chained_assignment = None
pd.options.display.max_columns = 999
color = sns.color_palette()
```
# Data
```
store = pd.HDFStore('../../data_prep/processed/stage1/data_frames.h5')
train_df = store['train_df']
test_df = store['test_df']
display(train_df.head())
display(test_df.head())
corpus_vocab_list, corpus_vocab_wordidx = None, None
with open('../../data_prep/processed/stage1/vocab_words_wordidx.pkl', 'rb') as f:
(corpus_vocab_list, corpus_wordidx) = pickle.load(f)
print(len(corpus_vocab_list), len(corpus_wordidx))
```
# Data Prep
To control the vocabulary pass in updated corpus_wordidx
```
from sklearn.model_selection import train_test_split
x_train_df, x_val_df = train_test_split(train_df,
test_size=0.10, random_state=random_state_number,
stratify=train_df.Class)
print(x_train_df.shape)
print(x_val_df.shape)
from tensorflow.contrib.keras.python.keras.utils import np_utils
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
vocab_size=len(corpus_vocab_list)
```
## T:sent_words
### generate data
```
custom_unit_dict = {
"gene_unit" : "words",
"variation_unit" : "words",
# text transformed to sentences attribute
"doc_unit" : "words",
"doc_form" : "sentences",
"divide_document": "multiple_unit"
}
%autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_train_df, corpus_wordidx)
x_train_21_T, x_train_21_G, x_train_21_V, x_train_21_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Train data")
print(np.array(x_train_21_T).shape, x_train_21_T[0])
print(np.array(x_train_21_G).shape, x_train_21_G[0])
print(np.array(x_train_21_V).shape, x_train_21_V[0])
print(np.array(x_train_21_C).shape, x_train_21_C[0])
gen_data = global_utils.GenerateDataset(x_val_df, corpus_wordidx)
x_val_21_T, x_val_21_G, x_val_21_V, x_val_21_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Val data")
print("text",np.array(x_val_21_T).shape)
print("gene",np.array(x_val_21_G).shape, x_val_21_G[0])
print("variation",np.array(x_val_21_V).shape, x_val_21_V[0])
print("classes",np.array(x_val_21_C).shape, x_val_21_C[0])
```
### format data
```
word_unknown_tag_idx = corpus_wordidx["<UNK>"]
char_unknown_tag_idx = global_utils.char_unknown_tag_idx
MAX_SENT_LEN = 60
x_train_21_T = pad_sequences(x_train_21_T, maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
x_val_21_T = pad_sequences(x_val_21_T, maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
print(x_train_21_T.shape, x_val_21_T.shape)
```
keras np_utils.to_categorical expects zero index categorical variables
https://github.com/fchollet/keras/issues/570
```
x_train_21_C = np.array(x_train_21_C) - 1
x_val_21_C = np.array(x_val_21_C) - 1
x_train_21_C = np_utils.to_categorical(np.array(x_train_21_C), 9)
x_val_21_C = np_utils.to_categorical(np.array(x_val_21_C), 9)
print(x_train_21_C.shape, x_val_21_C.shape)
```
## T:text_words
### generate data
```
custom_unit_dict = {
"gene_unit" : "words",
"variation_unit" : "words",
# text transformed to sentences attribute
"doc_unit" : "words",
"doc_form" : "text",
"divide_document": "single_unit"
}
%autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_train_df, corpus_wordidx)
x_train_22_T, x_train_22_G, x_train_22_V, x_train_22_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Train data")
print("text",np.array(x_train_22_T).shape)
print("gene",np.array(x_train_22_G).shape, x_train_22_G[0])
print("variation",np.array(x_train_22_V).shape, x_train_22_V[0])
print("classes",np.array(x_train_22_C).shape, x_train_22_C[0])
gen_data = global_utils.GenerateDataset(x_val_df, corpus_wordidx)
x_val_22_T, x_val_22_G, x_val_22_V, x_val_22_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Val data")
print("text",np.array(x_val_22_T).shape)
print("gene",np.array(x_val_22_G).shape, x_val_22_G[0])
print("variation",np.array(x_val_22_V).shape, x_val_22_V[0])
print("classes",np.array(x_val_22_C).shape, x_val_22_C[0])
```
### format data
```
word_unknown_tag_idx = corpus_wordidx["<UNK>"]
char_unknown_tag_idx = global_utils.char_unknown_tag_idx
MAX_TEXT_LEN = 5000
x_train_22_T = pad_sequences(x_train_22_T, maxlen=MAX_TEXT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
x_val_22_T = pad_sequences(x_val_22_T, maxlen=MAX_TEXT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
print(x_train_22_T.shape, x_val_22_T.shape)
MAX_GENE_LEN = 1
MAX_VAR_LEN = 4
x_train_22_G = pad_sequences(x_train_22_G, maxlen=MAX_GENE_LEN, value=word_unknown_tag_idx)
x_train_22_V = pad_sequences(x_train_22_V, maxlen=MAX_VAR_LEN, value=word_unknown_tag_idx)
x_val_22_G = pad_sequences(x_val_22_G, maxlen=MAX_GENE_LEN, value=word_unknown_tag_idx)
x_val_22_V = pad_sequences(x_val_22_V, maxlen=MAX_VAR_LEN, value=word_unknown_tag_idx)
print(x_train_22_G.shape, x_train_22_V.shape)
print(x_val_22_G.shape, x_val_22_V.shape)
```
keras np_utils.to_categorical expects zero index categorical variables
https://github.com/fchollet/keras/issues/570
```
x_train_22_C = np.array(x_train_22_C) - 1
x_val_22_C = np.array(x_val_22_C) - 1
x_train_22_C = np_utils.to_categorical(np.array(x_train_22_C), 9)
x_val_22_C = np_utils.to_categorical(np.array(x_val_22_C), 9)
print(x_train_22_C.shape, x_val_22_C.shape)
```
### test Data setup
```
gen_data = global_utils.GenerateDataset(test_df, corpus_wordidx)
x_test_22_T, x_test_22_G, x_test_22_V, _ = gen_data.generate_data(custom_unit_dict,
has_class=False,
add_start_end_tag=True)
del gen_data
print("Test data")
print("text",np.array(x_test_22_T).shape)
print("gene",np.array(x_test_22_G).shape, x_test_22_G[0])
print("variation",np.array(x_test_22_V).shape, x_test_22_V[0])
x_test_22_T = pad_sequences(x_test_22_T, maxlen=MAX_TEXT_LEN, value=word_unknown_tag_idx,
padding="post",truncating="post")
print(x_test_22_T.shape)
MAX_GENE_LEN = 1
MAX_VAR_LEN = 4
x_test_22_G = pad_sequences(x_test_22_G, maxlen=MAX_GENE_LEN, value=word_unknown_tag_idx)
x_test_22_V = pad_sequences(x_test_22_V, maxlen=MAX_VAR_LEN, value=word_unknown_tag_idx)
print(x_test_22_G.shape, x_test_22_V.shape)
```
## T:text_chars
### generate data
```
custom_unit_dict = {
"gene_unit" : "raw_chars",
"variation_unit" : "raw_chars",
# text transformed to sentences attribute
"doc_unit" : "raw_chars",
"doc_form" : "text",
"divide_document" : "multiple_unit"
}
%autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_train_df, corpus_wordidx)
x_train_33_T, x_train_33_G, x_train_33_V, x_train_33_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Train data")
print("text",np.array(x_train_33_T).shape, x_train_33_T[0])
print("gene",np.array(x_train_33_G).shape, x_train_33_G[0])
print("variation",np.array(x_train_33_V).shape, x_train_33_V[0])
print("classes",np.array(x_train_33_C).shape, x_train_33_C[0])
%autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_val_df, corpus_wordidx)
x_val_33_T, x_val_33_G, x_val_33_V, x_val_33_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Val data")
print("text",np.array(x_val_33_T).shape, x_val_33_T[98])
print("gene",np.array(x_val_33_G).shape, x_val_33_G[0])
print("variation",np.array(x_val_33_V).shape, x_val_33_V[0])
print("classes",np.array(x_val_33_C).shape, x_val_33_C[0])
```
### format data
```
word_unknown_tag_idx = corpus_wordidx["<UNK>"]
char_unknown_tag_idx = global_utils.char_unknown_tag_idx
MAX_CHAR_IN_SENT_LEN = 150
x_train_33_T = pad_sequences(x_train_33_T, maxlen=MAX_CHAR_IN_SENT_LEN, value=char_unknown_tag_idx,
padding="post",truncating="post")
x_val_33_T = pad_sequences(x_val_33_T, maxlen=MAX_CHAR_IN_SENT_LEN, value=char_unknown_tag_idx,
padding="post",truncating="post")
print(x_train_33_T.shape, x_val_33_T.shape)
x_train_33_G = pad_sequences(x_train_33_G, maxlen=MAX_CHAR_IN_SENT_LEN, value=char_unknown_tag_idx)
x_train_33_V = pad_sequences(x_train_33_V, maxlen=MAX_CHAR_IN_SENT_LEN, value=char_unknown_tag_idx)
x_val_33_G = pad_sequences(x_val_33_G, maxlen=MAX_CHAR_IN_SENT_LEN, value=char_unknown_tag_idx)
x_val_33_V = pad_sequences(x_val_33_V, maxlen=MAX_CHAR_IN_SENT_LEN, value=char_unknown_tag_idx)
print(x_train_33_G.shape, x_train_33_V.shape)
print(x_val_33_G.shape, x_val_33_V.shape)
```
keras np_utils.to_categorical expects zero index categorical variables
https://github.com/fchollet/keras/issues/570
```
x_train_33_C = np.array(x_train_33_C) - 1
x_val_33_C = np.array(x_val_33_C) - 1
x_train_33_C = np_utils.to_categorical(np.array(x_train_33_C), 9)
x_val_33_C = np_utils.to_categorical(np.array(x_val_33_C), 9)
print(x_train_33_C.shape, x_val_33_C.shape)
```
## T:text_sent_words
### generate data
```
custom_unit_dict = {
"gene_unit" : "words",
"variation_unit" : "words",
# text transformed to sentences attribute
"doc_unit" : "word_list",
"doc_form" : "text",
"divide_document" : "single_unit"
}
%autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_train_df, corpus_wordidx)
x_train_34_T, x_train_34_G, x_train_34_V, x_train_34_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Train data")
print("text",np.array(x_train_34_T).shape, x_train_34_T[0][:1])
print("gene",np.array(x_train_34_G).shape, x_train_34_G[0])
print("variation",np.array(x_train_34_V).shape, x_train_34_V[0])
print("classes",np.array(x_train_34_C).shape, x_train_34_C[0])
%autoreload
import global_utils
gen_data = global_utils.GenerateDataset(x_val_df, corpus_wordidx)
x_val_34_T, x_val_34_G, x_val_34_V, x_val_34_C = gen_data.generate_data(custom_unit_dict,
has_class=True,
add_start_end_tag=True)
del gen_data
print("Val data")
print("text",np.array(x_val_34_T).shape, x_val_34_T[98][:1])
print("gene",np.array(x_val_34_G).shape, x_val_34_G[0])
print("variation",np.array(x_val_34_V).shape, x_val_34_V[0])
print("classes",np.array(x_val_34_C).shape, x_val_34_C[0])
```
### format data
```
word_unknown_tag_idx = corpus_wordidx["<UNK>"]
char_unknown_tag_idx = global_utils.char_unknown_tag_idx
MAX_DOC_LEN = 500 # no of sentences in a document
MAX_SENT_LEN = 80 # no of words in a sentence
for doc_i, doc in enumerate(x_train_34_T):
x_train_34_T[doc_i] = x_train_34_T[doc_i][:MAX_DOC_LEN]
# padding sentences
if len(x_train_34_T[doc_i]) < MAX_DOC_LEN:
for not_used_i in range(0,MAX_DOC_LEN - len(x_train_34_T[doc_i])):
x_train_34_T[doc_i].append([word_unknown_tag_idx]*MAX_SENT_LEN)
# padding words
x_train_34_T[doc_i] = pad_sequences(x_train_34_T[doc_i], maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx)
for doc_i, doc in enumerate(x_val_34_T):
x_val_34_T[doc_i] = x_val_34_T[doc_i][:MAX_DOC_LEN]
# padding sentences
if len(x_val_34_T[doc_i]) < MAX_DOC_LEN:
for not_used_i in range(0,MAX_DOC_LEN - len(x_val_34_T[doc_i])):
x_val_34_T[doc_i].append([word_unknown_tag_idx]*MAX_SENT_LEN)
# padding words
x_val_34_T[doc_i] = pad_sequences(x_val_34_T[doc_i], maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx)
x_train_34_T = np.array(x_train_34_T)
x_val_34_T = np.array(x_val_34_T)
print(x_val_34_T.shape, x_train_34_T.shape)
x_train_34_G = pad_sequences(x_train_34_G, maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx)
x_train_34_V = pad_sequences(x_train_34_V, maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx)
x_val_34_G = pad_sequences(x_val_34_G, maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx)
x_val_34_V = pad_sequences(x_val_34_V, maxlen=MAX_SENT_LEN, value=word_unknown_tag_idx)
print(x_train_34_G.shape, x_train_34_V.shape)
print(x_val_34_G.shape, x_val_34_V.shape)
```
keras np_utils.to_categorical expects zero index categorical variables
https://github.com/fchollet/keras/issues/570
```
x_train_34_C = np.array(x_train_34_C) - 1
x_val_34_C = np.array(x_val_34_C) - 1
x_train_34_C = np_utils.to_categorical(np.array(x_train_34_C), 9)
x_val_34_C = np_utils.to_categorical(np.array(x_val_34_C), 9)
print(x_train_34_C.shape, x_val_34_C.shape)
```
Need to form 3 dimensional target data for rationale model training
```
temp = (x_train_34_C.shape[0],1,x_train_34_C.shape[1])
x_train_34_C_sent = np.repeat(x_train_34_C.reshape(temp[0],temp[1],temp[2]), MAX_DOC_LEN, axis=1)
#sentence test targets
temp = (x_val_34_C.shape[0],1,x_val_34_C.shape[1])
x_val_34_C_sent = np.repeat(x_val_34_C.reshape(temp[0],temp[1],temp[2]), MAX_DOC_LEN, axis=1)
print(x_train_34_C_sent.shape, x_val_34_C_sent.shape)
```
## Embedding layer
### for words
```
WORD_EMB_SIZE1 = 300
WORD_EMB_SIZE2 = 200
WORD_EMB_SIZE3 = 100
%autoreload
import global_utils
ft_file_path = "/home/bicepjai/Projects/Deep-Survey-Text-Classification/data_prep/processed/stage1/pretrained_word_vectors/ft_sg_300d_50e.vec"
trained_embeddings1 = global_utils.get_embeddings_from_ft(ft_file_path, WORD_EMB_SIZE1, corpus_vocab_list)
ft_file_path = "/home/bicepjai/Projects/Deep-Survey-Text-Classification/data_prep/processed/stage1/pretrained_word_vectors/ft_sg_200d_50e.vec"
trained_embeddings2 = global_utils.get_embeddings_from_ft(ft_file_path, WORD_EMB_SIZE2, corpus_vocab_list)
ft_file_path = "/home/bicepjai/Projects/Deep-Survey-Text-Classification/data_prep/processed/stage1/pretrained_word_vectors/ft_sg_100d_20e.vec"
trained_embeddings3 = global_utils.get_embeddings_from_ft(ft_file_path, WORD_EMB_SIZE3, corpus_vocab_list)
print (trained_embeddings1.shape)
print (trained_embeddings2.shape)
print (trained_embeddings3.shape)
```
### for characters
```
CHAR_EMB_SIZE = 64
char_embeddings = np.random.randn(global_utils.CHAR_ALPHABETS_LEN, CHAR_EMB_SIZE)
char_embeddings.shape
```
# Models
## prep
```
%autoreload
import tensorflow.contrib.keras as keras
import tensorflow as tf
from keras import backend as K
from keras.engine import Layer, InputSpec, InputLayer
from keras.models import Model, Sequential
from keras.layers import Dropout, Embedding, concatenate
from keras.layers import Conv1D, MaxPool1D, Conv2D, MaxPool2D, ZeroPadding1D, GlobalMaxPool1D
from keras.layers import Dense, Input, Flatten, BatchNormalization
from keras.layers import Concatenate, Dot, Merge, Multiply, RepeatVector
from keras.layers import Bidirectional, TimeDistributed
from keras.layers import SimpleRNN, LSTM, GRU, Lambda, Permute
from keras.layers.core import Reshape, Activation
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint,EarlyStopping,TensorBoard
from keras.constraints import maxnorm
from keras.regularizers import l2
from paper_2_cnn_modelling_sentences.utils import KMaxPooling, Folding
```
## model_1: paper
refer https://github.com/bwallace/rationale-CNN
```
text_seq_input = Input(shape=(MAX_SENT_LEN,), dtype='int32')
text_embedding1 = Embedding(vocab_size, WORD_EMB_SIZE1, input_length=MAX_SENT_LEN,
weights=[trained_embeddings1], trainable=True)(text_seq_input)
text_embedding2 = Embedding(vocab_size, WORD_EMB_SIZE2, input_length=MAX_SENT_LEN,
weights=[trained_embeddings2], trainable=True)(text_seq_input)
text_embedding3 = Embedding(vocab_size, WORD_EMB_SIZE3, input_length=MAX_SENT_LEN,
weights=[trained_embeddings3], trainable=True)(text_seq_input)
k_top = 4
filter_sizes = [3,5]
conv_pools = []
for text_embedding in [text_embedding1, text_embedding2, text_embedding3]:
for filter_size in filter_sizes:
l_zero = ZeroPadding1D((filter_size-1,filter_size-1))(text_embedding)
l_conv = Conv1D(filters=16, kernel_size=filter_size, padding='same', activation='tanh')(l_zero)
l_pool = GlobalMaxPool1D()(l_conv)
conv_pools.append(l_pool)
l_merge = Concatenate(axis=1)(conv_pools)
l_dense = Dense(128, activation='relu', kernel_regularizer=l2(0.01))(l_merge)
l_out = Dense(9, activation='softmax')(l_dense)
model_1 = Model(inputs=[text_seq_input], outputs=l_out)
```
#### training
```
model_1.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['categorical_accuracy'])
model_1.summary()
%rm -rf ./tb_graphs/*
tb_callback = keras.callbacks.TensorBoard(log_dir='./tb_graphs', histogram_freq=0, write_graph=True, write_images=True)
checkpointer = ModelCheckpoint(filepath="model_1_weights.hdf5",
verbose=1,
monitor="val_categorical_accuracy",
save_best_only=True,
mode="max")
with tf.Session() as sess:
# model = keras.models.load_model('current_model.h5')
sess.run(tf.global_variables_initializer())
try:
model_1.load_weights("model_1_weights.hdf5")
except IOError as ioe:
print("no checkpoints available !")
model_1.fit(x_train_21_T, x_train_21_C,
validation_data=(x_val_21_T, x_val_21_C),
epochs=5, batch_size=1024, shuffle=True,
callbacks=[tb_callback,checkpointer])
#model.save('current_sent_model.h5')
```
| github_jupyter |
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/W2D1-postcourse-bugfix/tutorials/W2D2_LinearSystems/W2D2_Tutorial4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Neuromatch Academy 2020, Week 2, Day 2, Tutorial 4
# Autoregressive models
**Content Creators**: Bing Wen Brunton, Biraj Pandey
**Content Reviewers**: Norma Kuhn, John Butler, Matthew Krause, Ella Batty, Richard Gao, Michael Waskom
---
# Tutorial Objectives
The goal of this tutorial is to use the modeling tools and intuitions developed in the previous few tutorials and use them to _fit data_. The concept is to flip the previous tutorial -- instead of generating synthetic data points from a known underlying process, what if we are given data points measured in time and have to learn the underlying process?
This tutorial is in two sections.
**Section 1** walks through using regression of data to solve for the coefficient of an OU process from Tutorial 3. Next, **Section 2** generalizes this auto-regression framework to high-order autoregressive models, and we will try to fit data from monkeys at typewriters.
---
# Setup
```
import numpy as np
import matplotlib.pyplot as plt
#@title Figure settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# @title Helper Functions
# drift-diffusion model, from Tutorial 3
def ddm(T, x0, xinfty, lam, sig):
'''
Samples a trajectory of a drift-diffusion model.
args:
T (integer): length of time of the trajectory
x0 (float): position at time 0
xinfty (float): equilibrium position
lam (float): process param
sig: standard deviation of the normal distribution
returns:
t (numpy array of floats): time steps from 0 to T sampled every 1 unit
x (numpy array of floats): position at every time step
'''
t = np.arange(0, T, 1.)
x = np.zeros_like(t)
x[0] = x0
for k in range(len(t)-1):
x[k+1] = xinfty + lam * (x[k] - xinfty) + sig * np.random.standard_normal(size=1)
return t, x
def build_time_delay_matrices(x, r):
"""
Builds x1 and x2 for regression
Args:
x (numpy array of floats): data to be auto regressed
r (scalar): order of Autoregression model
Returns:
(numpy array of floats) : to predict "x2"
(numpy array of floats) : predictors of size [r,n-r], "x1"
"""
# construct the time-delayed data matrices for order-r AR model
x1 = np.ones(len(x)-r)
x1 = np.vstack((x1, x[0:-r]))
xprime = x
for i in range(r-1):
xprime = np.roll(xprime, -1)
x1 = np.vstack((x1, xprime[0:-r]))
x2 = x[r:]
return x1, x2
def AR_model(x, r):
"""
Solves Autoregression problem of order (r) for x
Args:
x (numpy array of floats): data to be auto regressed
r (scalar): order of Autoregression model
Returns:
(numpy array of floats) : to predict "x2"
(numpy array of floats) : predictors of size [r,n-r], "x1"
(numpy array of floats): coefficients of length [r] for prediction after
solving the regression problem "p"
"""
x1, x2 = build_time_delay_matrices(x, r)
# solve for an estimate of lambda as a linear regression problem
p, res, rnk, s = np.linalg.lstsq(x1.T, x2, rcond=None)
return x1, x2, p
def AR_prediction(x_test, p):
"""
Returns the prediction for test data "x_test" with the regression
coefficients p
Args:
x_test (numpy array of floats): test data to be predicted
p (numpy array of floats): regression coefficients of size [r] after
solving the autoregression (order r) problem on train data
Returns:
(numpy array of floats): Predictions for test data. +1 if positive and -1
if negative.
"""
x1, x2 = build_time_delay_matrices(x_test, len(p)-1)
# Evaluating the AR_model function fit returns a number.
# We take the sign (- or +) of this number as the model's guess.
return np.sign(np.dot(x1.T, p))
def error_rate(x_test, p):
"""
Returns the error of the Autoregression model. Error is the number of
mismatched predictions divided by total number of test points.
Args:
x_test (numpy array of floats): data to be predicted
p (numpy array of floats): regression coefficients of size [r] after
solving the autoregression (order r) problem on train data
Returns:
(float): Error (percentage).
"""
x1, x2 = build_time_delay_matrices(x_test, len(p)-1)
return np.count_nonzero(x2 - AR_prediction(x_test, p)) / len(x2)
def plot_residual_histogram(res):
"""Helper function for Exercise 4A"""
fig = plt.figure()
plt.hist(res)
plt.xlabel('error in linear model')
plt.title('stdev of errors = {std:.4f}'.format(std=res.std()))
plt.show()
def plot_training_fit(x1, x2, p):
"""Helper function for Exercise 4B"""
fig = plt.figure()
plt.scatter(x2 + np.random.standard_normal(len(x2))*0.02,
np.dot(x1.T, p), alpha=0.2)
plt.title('Training fit, order {r:d} AR model'.format(r=r))
plt.xlabel('x')
plt.ylabel('estimated x')
plt.show()
```
# Section 1: Fitting data to the OU process
```
#@title Video 1: Autoregressive models
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="VdiVSTPbJ7I", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
```
To see how this works, let's continue the previous example with the drift-diffusion (OU) process. Our process had the following form:
$x_{k+1} = x_{\infty} + \lambda(x_k - x_{\infty}) + \sigma \eta$
where $\eta$ is sampled from a standard normal distribution.
For simplity, we set $x_\infty = 0$. Let's plot a trajectory for this process again below. Take note of the parameters of the process because they will be important later. **Run the code cell below.**
```
#@title Simulating the drift diffusion model
np.random.seed(2020) # set random seed
# parameters
T = 200
x0 = 10
xinfty = 0
lam = 0.9
sig = 0.2
# drift-diffusion model from tutorial 3
t, x = ddm(T, x0, xinfty, lam, sig)
fig = plt.figure()
plt.title('$x_0=%d, x_{\infty}=%d, \lambda=%0.1f, \sigma=%0.1f$' % (x0, xinfty, lam, sig))
plt.plot(t, x, 'k.')
plt.xlabel('time')
plt.ylabel('position x')
plt.show()
```
What if we were given these positions $x$ as they evolve in time as data, how would we get back out the dynamics of the system $\lambda$?
Since a little bird told us that this system takes on the form
$x_{k+1} = \lambda x_k + \eta$,
where $\eta$ is noise from a normal distribution, our approach is to solve for $\lambda$ as a **regression problem**.
As a check, let's plot every pair of points adjacent in time ($x_{k+1}$ vs. $x_k$) against eachother to see if there is a linear relationship between them. **Run the code cell below.**
```
# @title X(k) vs. X(k+1)
# make a scatter plot of every data point in x
# at time k versus time k+1
fig = plt.figure()
plt.scatter(x[0:-2], x[1:-1], color='k')
plt.plot([0, 10], [0, 10], 'k--', label='$x_{k+1} = x_k$ line')
plt.xlabel('$x_k$')
plt.ylabel('$x_{k+1}$')
plt.legend()
plt.show()
```
Hooray, it's a line! This is evidence that that the _dynamics that generated the data_ is **linear**. We can now reformulate this task as a regression problem.
Let $\mathbf{x_1} = x_{0:T-1}$ and $\mathbf{x_2} = x_{1:T}$ be vectors of the data indexed so that they are shifted in time by one. Then, our regression problem is
$$\mathbf{x}_2 = \lambda \mathbf{x}_1$$
This model is **autoregressive**, where _auto_ means self. In other words, it's a regression of the time series on itself from the past. The equation as written above is only a function of itself from _one step_ in the past, so we can call it a _first order_ autoregressive model.
Now, let's set up the regression problem below and solve for $\lambda.$ We will plot our data with the regression line to see if they agree. **Run the code cell below.**
```
#@title Solving for lambda through autoregression
# build the two data vectors from x
x1 = x[0:-2]
x1 = x1[:, np.newaxis]**[0, 1]
x2 = x[1:-1]
# solve for an estimate of lambda as a linear regression problem
p, res, rnk, s = np.linalg.lstsq(x1, x2, rcond=None)
# here we've artificially added a vector of 1's to the x1 array,
# so that our linear regression problem has an intercept term to fit.
# we expect this coefficient to be close to 0.
# the second coefficient in the regression is the linear term:
# that's the one we're after!
lam_hat = p[1]
# plot the data points
fig = plt.figure()
plt.scatter(x[0:-2], x[1:-1], color='k')
plt.xlabel('$x_k$')
plt.ylabel('$x_{k+1}$')
# plot the 45 degree line
plt.plot([0, 10], [0, 10], 'k--', label='$x_{k+1} = x_k$ line')
# plot the regression line on top
xx = np.linspace(-sig*10, max(x), 100)
yy = p[0] + lam_hat * xx
plt.plot(xx, yy, 'r', linewidth=2, label='regression line')
mytitle = 'True $\lambda$ = {lam:.4f}, Estimate $\lambda$ = {lam_hat:.4f}'
plt.title(mytitle.format(lam=lam, lam_hat=lam_hat))
plt.legend()
plt.show()
```
Pretty cool! So now we have a way to predict $x_{k+1}$ if given any data point $x_k$. Let's take a look at how accurate this one-step prediction might be by plotting the residuals.
## Exercise 1 (4A): Residuals of the autoregressive model
Plot a histogram of residuals of our autoregressive model, by taking the difference between the _data_ $\mathbf{x_2}$ and the _model_ prediction. Do you notice anything about the standard deviation of these residuals and the equations that generated this synthetic dataset?
```
##############################################################################
## Insert your code here take to compute the residual (error)
##############################################################################
# compute the predicted values using the autoregressive model (lam_hat), and
# the residual is the difference between x2 and the prediction
# res = ...
# Uncomment once you fill out above
#plot_residual_histogram(res)
# to_remove solution
# compute the predicted values using the autoregressive model (lam_hat), and
# the residual is the difference between x2 and the prediction
res = x2 - (lam_hat * x[0:-2])
with plt.xkcd():
plot_residual_histogram(res)
```
---
# Section 2: Higher order autoregressive models
```
#@title Video 2: Monkey at a typewriter
# Insert the ID of the corresponding youtube video
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="f2z0eopWB8Y", width=854, height=480, fs=1)
print("Video available at https://youtu.be/" + video.id)
video
```
Now that we have established the autoregressive framework, generalizing for dependence on data points from the past is straightfoward. **Higher order** autoregression models a future time point based on _more than one points in the past_.
In one dimension, we can write such an order-$r$ model as
$x_{k+1} = \alpha_0 + \alpha_1 x_k + \alpha_2 x_{k-1} + \alpha_3 x_{k-2} + \dots + \alpha_{r+1} x_{k-r}$,
where the $\alpha$'s are the $r+1$ coefficients to be fit to the data available.
These models are useful to account for some **history dependence** in the trajectory of timeseries. This next part of the tutorial will explore one such timeseries, and you can do an experiment on yourself!
In particular, we will explore a binary random sequence of 0's and 1's that would occur if you flipped a coin and jotted down the flips.
The difference is that, instead of actually flipping a coin (or using code to generate such a sequence), you -- yes you, human -- are going to generate such a random Bernoulli sequence as best as you can by typing in 0's and 1's. We will then build higher-order AR models to see if we can identify predictable patterns in the time-history of digits you generate.
**But first**, let's try this on a sequence with a simple pattern, just to make sure the framework is functional. Below, we generate an entirely predictable sequence and plot it.
```
# this sequence is entirely predictable, so an AR model should work
monkey_at_typewriter = '1010101010101010101010101010101010101010101010101'
# Bonus: this sequence is also predictable, but does an order-1 AR model work?
#monkey_at_typewriter = '100100100100100100100100100100100100100'
# function to turn chars to numpy array,
# coding it this way makes the math easier
# '0' -> -1
# '1' -> +1
def char2array(s):
m = [int(c) for c in s]
x = np.array(m)
return x*2 - 1
x = char2array(monkey_at_typewriter)
fig = plt.figure()
plt.step(x, '.-')
plt.xlabel('time')
plt.ylabel('random variable')
plt.show()
```
Now, let's set up our regression problem (order 1 autoregression like above) by defining $\mathbf{x_1}$ and $\mathbf{x_2}$ and solve it.
```
# build the two data vectors from x
x1 = x[0:-2]
x1 = x1[:, np.newaxis]**[0, 1]
x2 = x[1:-1]
# solve for an estimate of lambda as a linear regression problem
p, res, rnk, s = np.linalg.lstsq(x1, x2, rcond=None)
# take a look at the resulting regression coefficients
print('alpha_0 = {a0:.2f}, alpha_1 = {a1:.2f}'.format(a0=p[0], a1=p[1]))
```
## Think:
Do the values we got for $\alpha_0$ and $\alpha_1$ make sense? Write down the corresponding autoregressive model and convince yourself that it gives the alternating 0's and 1's we asked it to fit as data.
```
# to_remove explanation
"""
The corresponding autoregressive model is:
x_{k+1} = 0 - x_{k}
""";
```
Truly random sequences of numbers have no structure and should not be predictable by an AR or any other models.
However, humans are notoriously terrible at generating random sequences of numbers! (Other animals are no better...)
To test out an application of higher-order AR models, let's use them to **model a sequence of 0's and 1's that a human tried to produce at random**. In particular, I convinced my 9-yr-old monkey to sit at a typewriter (my laptop) and enter some digits as randomly as he is able. The digits he typed in are in the code, and we can plot them as a timeseries of digits here.
If the digits really have no structure, then we expect our model to do about as well as guessing, producing an error rate of 0.5. Let's see how well we can do!
```
# data generated by 9-yr-ld JAB:
# we will be using this sequence to train the data
monkey_at_typewriter = '10010101001101000111001010110001100101000101101001010010101010001101101001101000011110100011011010010011001101000011101001110000011111011101000011110000111101001010101000111100000011111000001010100110101001011010010100101101000110010001100011100011100011100010110010111000101'
# we will be using this sequence to test the data
test_monkey = '00100101100001101001100111100101011100101011101001010101000010110101001010100011110'
x = char2array(monkey_at_typewriter)
test = char2array(test_monkey)
## testing: machine generated randint should be entirely unpredictable
## uncomment the lines below to try random numbers instead
# np.random.seed(2020) # set random seed
# x = char2array(np.random.randint(2, size=500))
# test = char2array(np.random.randint(2, size=500))
fig = plt.figure()
plt.step(x, '.-')
plt.show()
```
## Exercise 2 (4B): Fitting AR models
Fit a order-5 ($r=5$) AR model to the data vector $x$. To do this, we have included some helper functions, including ``AR_model``.
We will then plot the observations against the trained model. Note that this means we are using a sequence of the previous 5 digits to predict the next one.
Additionally, output from our regression model are continuous (real numbers) whereas our data are scalar (+1/-1). So, we will take the sign of our continuous outputs (+1 if positive and -1 if negative) as our predictions to make them comparable with data. Our error rate will simply be the number of mismatched predictions divided by the total number of predictions.
```
# Let's see what our function AR model entails
help(AR_model)
##############################################################################
## TODO: Insert your code here for fitting the AR model
##############################################################################
# define the model order, and use AR_model() to generate the model and prediction
# r = ...
# x1, x2, p = AR_model(...)
# Uncomment below once you've completed above
# Plot the Training data fit
# Note that this adds a small amount of jttter to horizontal axis for visualization purposes
# plot_training_fit(x1, x2, p)
# to_remove solution
# define the model order, and use AR_model() to generate the model and prediction
r = 5 # remove later
x1, x2, p = AR_model(x, r)
# Uncomment below once you've completed above
# Plot the Training data fit
# Note that this adds a small amount of jttter to horizontal axis for visualization purposes
with plt.xkcd():
plot_training_fit(x1, x2, p)
```
Let's check out how the model does on the test data that it's never seen before!
```
x1_test, x2_test = build_time_delay_matrices(test, r)
fig = plt.figure()
plt.scatter(x2_test+np.random.standard_normal(len(x2_test))*0.02,
np.dot(x1_test.T, p), alpha=0.5)
mytitle = 'Testing fit, order {r:d} AR model, err = {err:.3f}'
plt.title(mytitle.format(r=r, err=error_rate(test, p)))
plt.xlabel('test x')
plt.ylabel('estimated x')
```
Not bad! We're getting errors that are smaller than 0.5 (what we would have gotten by chance).
Let's now try **AR models of different orders** systematically, and plot the test error of each.
_Remember_: The model has never seen the test data before, and random guessing would produce an error of $0.5$.
```
# range of r's to try
r = np.arange(1, 21)
err = np.ones_like(r) * 1.0
for i, rr in enumerate(r):
# fitting the model on training data
x1, x2, p = AR_model(x, rr)
# computing and storing the test error
test_error = error_rate(test, p)
err[i] = test_error
fig = plt.figure()
plt.plot(r, err, '.-')
plt.plot([1, r[-1]], [0.5, 0.5], c='r', label='random chance')
plt.xlabel('Order r of AR model')
plt.ylabel('Test error')
plt.xticks(np.arange(0,25,5))
plt.legend()
plt.show()
```
Notice that there's a sweet spot in the test error! The 6th order AR model does a really good job here, and for larger $r$'s, the model starts to overfit the training data and does not do well on the test data.
In summary:
"**I can't believe I'm so predictable!**" - JAB
---
# Summary
In this tutorial, we learned:
* How learning the parameters of a linear dynamical system can be formulated as a regression problem from data.
* Time-history dependence can be incorporated into the regression framework as a multiple regression problem.
* That humans are no good at generating random (not predictable) sequences. Try it on yourself!
| github_jupyter |
# Plagiarism Detection Model
Now that you've created training and test data, you are ready to define and train a model. Your goal in this notebook, will be to train a binary classification model that learns to label an answer file as either plagiarized or not, based on the features you provide the model.
This task will be broken down into a few discrete steps:
* Upload your data to S3.
* Define a binary classification model and a training script.
* Train your model and deploy it.
* Evaluate your deployed classifier and answer some questions about your approach.
To complete this notebook, you'll have to complete all given exercises and answer all the questions in this notebook.
> All your tasks will be clearly labeled **EXERCISE** and questions as **QUESTION**.
It will be up to you to explore different classification models and decide on a model that gives you the best performance for this dataset.
---
## Load Data to S3
In the last notebook, you should have created two files: a `training.csv` and `test.csv` file with the features and class labels for the given corpus of plagiarized/non-plagiarized text data.
>The below cells load in some AWS SageMaker libraries and creates a default bucket. After creating this bucket, you can upload your locally stored data to S3.
Save your train and test `.csv` feature files, locally. To do this you can run the second notebook "2_Plagiarism_Feature_Engineering" in SageMaker or you can manually upload your files to this notebook using the upload icon in Jupyter Lab. Then you can upload local files to S3 by using `sagemaker_session.upload_data` and pointing directly to where the training data is saved.
```
import pandas as pd
import boto3
import sagemaker
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# session and role
sagemaker_session = sagemaker.Session()
role = sagemaker.get_execution_role()
# create an S3 bucket
bucket = sagemaker_session.default_bucket()
```
## EXERCISE: Upload your training data to S3
Specify the `data_dir` where you've saved your `train.csv` file. Decide on a descriptive `prefix` that defines where your data will be uploaded in the default S3 bucket. Finally, create a pointer to your training data by calling `sagemaker_session.upload_data` and passing in the required parameters. It may help to look at the [Session documentation](https://sagemaker.readthedocs.io/en/stable/session.html#sagemaker.session.Session.upload_data) or previous SageMaker code examples.
You are expected to upload your entire directory. Later, the training script will only access the `train.csv` file.
```
import os
# should be the name of directory you created to save your features data
data_dir = 'plagiarism_data'
# set prefix, a descriptive name for a directory
prefix = 'udacity-plagiarism-detection'
# upload all data to S3
test_location = sagemaker_session.upload_data(os.path.join(data_dir, 'test.csv'), key_prefix=prefix)
train_location = sagemaker_session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix)
```
### Test cell
Test that your data has been successfully uploaded. The below cell prints out the items in your S3 bucket and will throw an error if it is empty. You should see the contents of your `data_dir` and perhaps some checkpoints. If you see any other files listed, then you may have some old model files that you can delete via the S3 console (though, additional files shouldn't affect the performance of model developed in this notebook).
```
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# confirm that data is in S3 bucket
empty_check = []
for obj in boto3.resource('s3').Bucket(bucket).objects.all():
empty_check.append(obj.key)
print(obj.key)
assert len(empty_check) !=0, 'S3 bucket is empty.'
print('Test passed!')
```
---
# Modeling
Now that you've uploaded your training data, it's time to define and train a model!
The type of model you create is up to you. For a binary classification task, you can choose to go one of three routes:
* Use a built-in classification algorithm, like LinearLearner.
* Define a custom Scikit-learn classifier, a comparison of models can be found [here](https://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html).
* Define a custom PyTorch neural network classifier.
It will be up to you to test out a variety of models and choose the best one. Your project will be graded on the accuracy of your final model.
---
## EXERCISE: Complete a training script
To implement a custom classifier, you'll need to complete a `train.py` script. You've been given the folders `source_sklearn` and `source_pytorch` which hold starting code for a custom Scikit-learn model and a PyTorch model, respectively. Each directory has a `train.py` training script. To complete this project **you only need to complete one of these scripts**; the script that is responsible for training your final model.
A typical training script:
* Loads training data from a specified directory
* Parses any training & model hyperparameters (ex. nodes in a neural network, training epochs, etc.)
* Instantiates a model of your design, with any specified hyperparams
* Trains that model
* Finally, saves the model so that it can be hosted/deployed, later
### Defining and training a model
Much of the training script code is provided for you. Almost all of your work will be done in the `if __name__ == '__main__':` section. To complete a `train.py` file, you will:
1. Import any extra libraries you need
2. Define any additional model training hyperparameters using `parser.add_argument`
2. Define a model in the `if __name__ == '__main__':` section
3. Train the model in that same section
Below, you can use `!pygmentize` to display an existing `train.py` file. Read through the code; all of your tasks are marked with `TODO` comments.
**Note: If you choose to create a custom PyTorch model, you will be responsible for defining the model in the `model.py` file,** and a `predict.py` file is provided. If you choose to use Scikit-learn, you only need a `train.py` file; you may import a classifier from the `sklearn` library.
```
# directory can be changed to: source_sklearn or source_pytorch
!pygmentize source_pytorch/train.py
```
### Provided code
If you read the code above, you can see that the starter code includes a few things:
* Model loading (`model_fn`) and saving code
* Getting SageMaker's default hyperparameters
* Loading the training data by name, `train.csv` and extracting the features and labels, `train_x`, and `train_y`
If you'd like to read more about model saving with [joblib for sklearn](https://scikit-learn.org/stable/modules/model_persistence.html) or with [torch.save](https://pytorch.org/tutorials/beginner/saving_loading_models.html), click on the provided links.
---
# Create an Estimator
When a custom model is constructed in SageMaker, an entry point must be specified. This is the Python file which will be executed when the model is trained; the `train.py` function you specified above. To run a custom training script in SageMaker, construct an estimator, and fill in the appropriate constructor arguments:
* **entry_point**: The path to the Python script SageMaker runs for training and prediction.
* **source_dir**: The path to the training script directory `source_sklearn` OR `source_pytorch`.
* **entry_point**: The path to the Python script SageMaker runs for training and prediction.
* **source_dir**: The path to the training script directory `train_sklearn` OR `train_pytorch`.
* **entry_point**: The path to the Python script SageMaker runs for training.
* **source_dir**: The path to the training script directory `train_sklearn` OR `train_pytorch`.
* **role**: Role ARN, which was specified, above.
* **train_instance_count**: The number of training instances (should be left at 1).
* **train_instance_type**: The type of SageMaker instance for training. Note: Because Scikit-learn does not natively support GPU training, Sagemaker Scikit-learn does not currently support training on GPU instance types.
* **sagemaker_session**: The session used to train on Sagemaker.
* **hyperparameters** (optional): A dictionary `{'name':value, ..}` passed to the train function as hyperparameters.
Note: For a PyTorch model, there is another optional argument **framework_version**, which you can set to the latest version of PyTorch, `1.0`.
## EXERCISE: Define a Scikit-learn or PyTorch estimator
To import your desired estimator, use one of the following lines:
```
from sagemaker.sklearn.estimator import SKLearn
```
```
from sagemaker.pytorch import PyTorch
```
```
# your import and estimator code, here
from sagemaker.pytorch import PyTorch
# specify an output path
# prefix is specified above
output_path = 's3://{}/{}'.format(bucket, prefix)
# instantiate a pytorch estimator
estimator = PyTorch(entry_point='train.py',
source_dir='source_pytorch',
role=role,
framework_version='1.0',
train_instance_count=1,
train_instance_type='ml.m5.4xlarge',
output_path=output_path,
sagemaker_session=sagemaker_session,
hyperparameters={
'input_features': 3, # num of features
'hidden_dim': 40,
'output_dim': 1,
'epochs': 100 # could change to higher
})
```
## EXERCISE: Train the estimator
Train your estimator on the training data stored in S3. This should create a training job that you can monitor in your SageMaker console.
```
%%time
# Train your estimator on S3 training data
estimator.fit({'train': train_location})
```
## EXERCISE: Deploy the trained model
After training, deploy your model to create a `predictor`. If you're using a PyTorch model, you'll need to create a trained `PyTorchModel` that accepts the trained `<model>.model_data` as an input parameter and points to the provided `source_pytorch/predict.py` file as an entry point.
To deploy a trained model, you'll use `<model>.deploy`, which takes in two arguments:
* **initial_instance_count**: The number of deployed instances (1).
* **instance_type**: The type of SageMaker instance for deployment.
Note: If you run into an instance error, it may be because you chose the wrong training or deployment instance_type. It may help to refer to your previous exercise code to see which types of instances we used.
```
%%time
# uncomment, if needed
# from sagemaker.pytorch import PyTorchModel
from sagemaker.pytorch import PyTorchModel
# Create a model from the trained estimator data
# And point to the prediction script
model = PyTorchModel(model_data=estimator.model_data,
role = role,
framework_version='1.0',
entry_point='predict.py',
source_dir='source_pytorch')
# deploy your model to create a predictor
predictor = model.deploy(initial_instance_count=1, instance_type='ml.t2.medium')
```
---
# Evaluating Your Model
Once your model is deployed, you can see how it performs when applied to our test data.
The provided cell below, reads in the test data, assuming it is stored locally in `data_dir` and named `test.csv`. The labels and features are extracted from the `.csv` file.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
import os
# read in test data, assuming it is stored locally
test_data = pd.read_csv(os.path.join(data_dir, "test.csv"), header=None, names=None)
# labels are in the first column
test_y = test_data.iloc[:,0]
test_x = test_data.iloc[:,1:]
```
## EXERCISE: Determine the accuracy of your model
Use your deployed `predictor` to generate predicted, class labels for the test data. Compare those to the *true* labels, `test_y`, and calculate the accuracy as a value between 0 and 1.0 that indicates the fraction of test data that your model classified correctly. You may use [sklearn.metrics](https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics) for this calculation.
**To pass this project, your model should get at least 90% test accuracy.**
```
# First: generate predicted, class labels
import numpy as np
test_y_preds = np.squeeze(np.round(predictor.predict(test_x)))
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# test that your model generates the correct number of labels
assert len(test_y_preds)==len(test_y), 'Unexpected number of predictions.'
print('Test passed!')
# Second: calculate the test accuracy
tp = np.logical_and(test_y, test_y_preds).sum()
fp = np.logical_and(1-test_y, test_y_preds).sum()
tn = np.logical_and(1-test_y, 1-test_y_preds).sum()
fn = np.logical_and(test_y, 1-test_y_preds).sum()
# calculate binary classification metrics
recall = tp / (tp + fn)
precision = tp / (tp + fp)
accuracy = (tp + tn) / (tp + fp + tn + fn)
print(accuracy)
## print out the array of predicted and true labels, if you want
print('\nPredicted class labels: ')
print(test_y_preds)
print('\nTrue class labels: ')
print(test_y.values)
```
### Question 1: How many false positives and false negatives did your model produce, if any? And why do you think this is?
** Answer**:
no false negatives or positives. It looks to be working fine for the test set.
### Question 2: How did you decide on the type of model to use?
** Answer**:
I went with PyTorch, because it's relatively new to me compared to scikit-learn. I came here to learn something new, after all. Deep Learning solves a very wife variety of problems and from what I hear it's 99% of the cutting edge ML these days, so to try a NN was natural. Then the model turned out to work pretty well on the test set, so there was no need to explore any further.
----
## EXERCISE: Clean up Resources
After you're done evaluating your model, **delete your model endpoint**. You can do this with a call to `.delete_endpoint()`. You need to show, in this notebook, that the endpoint was deleted. Any other resources, you may delete from the AWS console, and you will find more instructions on cleaning up all your resources, below.
```
# uncomment and fill in the line below!
predictor.delete_endpoint()
```
### Deleting S3 bucket
When you are *completely* done with training and testing models, you can also delete your entire S3 bucket. If you do this before you are done training your model, you'll have to recreate your S3 bucket and upload your training data again.
```
# deleting bucket, uncomment lines below
# bucket_to_delete = boto3.resource('s3').Bucket(bucket)
# bucket_to_delete.objects.all().delete()
```
### Deleting all your models and instances
When you are _completely_ done with this project and do **not** ever want to revisit this notebook, you can choose to delete all of your SageMaker notebook instances and models by following [these instructions](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-cleanup.html). Before you delete this notebook instance, I recommend at least downloading a copy and saving it, locally.
---
## Further Directions
There are many ways to improve or add on to this project to expand your learning or make this more of a unique project for you. A few ideas are listed below:
* Train a classifier to predict the *category* (1-3) of plagiarism and not just plagiarized (1) or not (0).
* Utilize a different and larger dataset to see if this model can be extended to other types of plagiarism.
* Use language or character-level analysis to find different (and more) similarity features.
* Write a complete pipeline function that accepts a source text and submitted text file, and classifies the submitted text as plagiarized or not.
* Use API Gateway and a lambda function to deploy your model to a web application.
These are all just options for extending your work. If you've completed all the exercises in this notebook, you've completed a real-world application, and can proceed to submit your project. Great job!
| github_jupyter |
```
% pylab inline
from __future__ import print_function
import os.path
import pandas
import src
import sklearn
import os
import scipy
import scipy.stats
def fake(*args, **kwargs):
print('Fake called with', str(args), str(kwargs))
sys.exit(1)
# fake out the create_model so we don't accidentally attempt to create data
src.common.create_model = fake
# import seaborn
# seaborn.set_palette("colorblind")
print(os.getcwd())
if os.getcwd().endswith('notebooks'):
os.chdir('..')
print(os.getcwd())
args = dict(level='file', force=False, model='lda', source=['release', 'changeset', 'temporal'], random_seed_value=1)
model_config, model_config_string = src.main.get_default_model_config(args)
args.update({'model_config': model_config, 'model_config_string': model_config_string})
changeset_config, changeset_config_string = src.main.get_default_changeset_config()
args.update({'changeset_config': changeset_config, 'changeset_config_string': changeset_config_string})
projects = src.common.load_projects(args)
projects
data = dict()
csvs = dict()
for project in projects:
ownership = src.ownership.read_ownership(project)
devs = set()
for v in ownership.values():
devs.update(v.keys())
goldsets = pandas.read_csv(os.path.join(project.full_path, 'goldset-info.csv'))
changes = pandas.read_csv(os.path.join(project.full_path, 'changeset-info.csv'))
release = pandas.read_csv(os.path.join(project.full_path, 'releasefile-info.csv'))
queries = pandas.read_csv(os.path.join(project.full_path, 'queries-info.csv'))
info = {"Developers": len(devs), "Changesets": len(changes), "Files": len(release), "Issues": len(queries)}
data[project.printable_name] = info
csvs[project.name] = {'g': goldsets, 'c': changes, 'r': release, 'q': queries, 'd': devs, 'o': ownership}
df = pandas.DataFrame(data)
df['Total'] = df.T.sum()
df.T
with open(os.path.expanduser('~/git/dissertation/tables/subjects.tex'), 'w') as f:
header = ["\\begin{table}",
"\\centering",
"\\caption{Subject system corpora and dataset sizes}",
"\\label{table:subjects}"]
f.write('\n'.join(header) + '\n')
latex = df.T.to_latex(columns=["Developers", "Files", "Changesets", "Issues"]).splitlines()
latex.insert(-3, '\\midrule')
f.write('\n'.join(latex))
f.write("\n\\end{table}\n")
for project in projects:
print(project.name, 'q total ', csvs[project.name]['q'].total_words.sum() / len(csvs[project.name]['q']))
print(project.name, 'q unique', csvs[project.name]['q'].unique_words.sum() / len(csvs[project.name]['q']))
print()
print(project.name, 'c total ', csvs[project.name]['c'].total_words.sum() / len(csvs[project.name]['c']))
print(project.name, 'c unique', csvs[project.name]['c'].unique_words.sum() / len(csvs[project.name]['c']))
print()
print(project.name, 'r total ', csvs[project.name]['r'].total_words.sum() / len(csvs[project.name]['r']))
print(project.name, 'r unique', csvs[project.name]['r'].unique_words.sum() / len(csvs[project.name]['r']))
print('********************')
pigo = pandas.DataFrame(csvs['pig']['o'])
jpao = pandas.DataFrame(csvs['openjpa']['o'])
booko = pandas.DataFrame(csvs['bookkeeper']['o'])
pigo.T.describe().T.sort("count")["count"].plot()
jpao.T.describe().T.sort("count")["count"].plot()
booko.T.describe().T.sort("count")["count"].plot()
def plot_ownership(data):
m = dict()
for each in data:
z = data[each].argmax()
if z not in m:
m[z] = list()
m[z].append(each)
zz = pandas.Series([len(v) for v in m.values()], index=[k for k in m])
zz.sort()
zz.plot()
return zz
zz = plot_ownership(booko)
zz / zz.sum()
zz = plot_ownership(pigo)
zz / zz.sum()
zz = plot_ownership(jpao)
zz / zz.sum()
```
# Data read
```
ALL_ORDER = ["Snapshot", "Changesets", "Historical"]
RQ1_ORDER = ["Snapshot", "Changesets"]
RQ2_ORDER = ["Changesets", "Historical"]
def get_panel(projects, fn):
datarank = dict()
for project in projects:
results = fn(project)
x, y = src.common.merge_first_rels(results['changeset'], results['release'], ignore=True)
_, z = src.common.merge_first_rels(results['changeset'], results['temporal'], ignore=True)
print(len(x), len(y), len(z))
datarank[project.printable_name] = {'Changesets': pandas.Series(x),
'Snapshot': pandas.Series(y),
'Historical': pandas.Series(z)}
return pandas.Panel(datarank)
tpanel = get_panel(projects, src.triage.run_experiment)
fpanel = get_panel(projects, src.feature_location.run_experiment)
FIG_TEX="""
\\begin{figure}
\\centering
\\begin{subfigure}{.4\\textwidth}
\\centering
\\includegraphics[height=0.4\\textheight]{%s}
\\caption{Including outliers}\\label{fig:%s_outlier}
\\end{subfigure}%%
\\begin{subfigure}{.4\\textwidth}
\\centering
\\includegraphics[height=0.4\\textheight]{%s_no_outlier}
\\caption{Excluding outliers}\\label{fig:%s_no_outlier}
\\end{subfigure}
\\caption{%s: %s effectiveness measures for %s}
\\label{fig:%s}
\\end{figure}
"""
def plot_panel(panel, order, name, kind):
limitgrowth = 0.5
size = (len(order)*1.6, 4.5)
fontsize = None
widths = 0.3
kinds = {"flt": "Feature Location", "dit": "Developer Identification"}
rqs = {"flt": {"rq1": "\\fone", "rq2": "\\ftwo", "all": "Overview"},
"dit": {"rq1": "\\done", "rq2": "\\dtwo", "all": "Overview"}}
allt = pandas.DataFrame()
for each in panel:
allt = allt.append(panel[each], ignore_index=True)
result = panel[each].plot(kind='box',
fontsize=fontsize,
figsize=size,
widths=widths,
y=order)
limit = result.get_ylim()
lower = limit[0] - limitgrowth
if (lower < 0):
lower = 0
result.set_ylim(lower, limit[1] + limitgrowth)
#plt.gca().invert_yaxis()
plt.tight_layout()
short_each = each.lower().split(' ')[0]
fig_name = 'figures/%s/%s_%s' % (kind, name, short_each)
path = os.path.expanduser('~/git/dissertation/') + fig_name
plt.savefig(path + ".pdf", dpi=300)
with open(path + ".tex", "wt") as f:
figlabel = ":".join([x.lower() for x in [kind, name, short_each]])
f.write(FIG_TEX % (fig_name, figlabel,
fig_name, figlabel,
rqs[kind][name], kinds[kind], each, figlabel))
result = panel[each].plot(kind='box',
fontsize=fontsize,
figsize=size,
widths=widths,
y=order,
showfliers=False)
limit = result.get_ylim()
lower = limit[0] - limitgrowth
if (lower < 0):
lower = 0
result.set_ylim(lower, limit[1] + limitgrowth)
#plt.gca().invert_yaxis()
plt.tight_layout()
fig_name = 'figures/%s/%s_%s_no_outlier' % (kind, name, short_each)
path = os.path.expanduser('~/git/dissertation/') + fig_name
plt.savefig(path + ".pdf", dpi=300)
allt.plot(kind='box', figsize=(4,1.5), grid=False, vert=False, y=list(reversed(order)))
plt.tight_layout()
short_each = "tiny"
fig_name = 'figures/%s/%s_%s' % (kind, name, short_each)
path = os.path.expanduser('~/git/dissertation/') + fig_name
plt.savefig(path + ".pdf", dpi=300)
with open(path + ".tex", "wt") as f:
figlabel = ":".join([x.lower() for x in [kind, name, short_each]])
f.write(FIG_TEX % (fig_name, figlabel,
fig_name, figlabel,
rqs[kind][name], kinds[kind], "all subject systems", figlabel))
result = allt.plot(kind='box',
fontsize=fontsize,
figsize=size,
widths=widths,
y=order)
limit = result.get_ylim()
lower = limit[0] - limitgrowth
if (lower < 0):
lower = 0
result.set_ylim(lower, limit[1] + limitgrowth)
#plt.gca().invert_yaxis()
plt.tight_layout()
short_each = "overview"
fig_name = 'figures/%s/%s_%s' % (kind, name, short_each)
path = os.path.expanduser('~/git/dissertation/') + fig_name
plt.savefig(path + ".pdf", dpi=300)
with open(path + ".tex", "wt") as f:
figlabel = ":".join([x.lower() for x in [kind, name, short_each]])
f.write(FIG_TEX % (fig_name, figlabel,
fig_name, figlabel,
rqs[kind][name], kinds[kind], "all subject systems", figlabel))
result = allt.plot(kind='box',
fontsize=fontsize,
figsize=size,
widths=widths,
y=order,
showfliers=False)
limit = result.get_ylim()
lower = limit[0] - limitgrowth
if (lower < 0):
lower = 0
result.set_ylim(lower, limit[1] + limitgrowth)
#plt.gca().invert_yaxis()
plt.tight_layout()
short_each = "overview"
fig_name = 'figures/%s/%s_%s_no_outlier' % (kind, name, short_each)
path = os.path.expanduser('~/git/dissertation/') + fig_name
plt.savefig(path + ".pdf", dpi=300)
```
# Triage
```
plot_panel(tpanel, RQ1_ORDER, "rq1", "dit")
plot_panel(tpanel, RQ2_ORDER, "rq2", "dit")
plot_panel(tpanel, ALL_ORDER, "all", "dit")
```
# Feature loc
```
plot_panel(fpanel, RQ1_ORDER, "rq1", "flt")
plot_panel(fpanel, RQ2_ORDER, "rq2", "flt")
plot_panel(fpanel, ALL_ORDER, "all", "flt")
b = tpanel['BookKeeper v4.3.0'].dropna(how='all')
c = b[pandas.isnull(b).any(axis=1)]
c
projects[4]
set([y for x,y,z in src.triage.run_experiment(projects[4])['changeset']]) - set([y for x,y,z in src.triage.run_experiment(projects[4])['temporal']])
```
# failure analysis
## mahout
{'1554', '1565', '1616'}
### First commit(s) by author
1554, 1565, 1616
### Interesting
1565 author, gcapan, was first commit under that email (apache offical), but author has many emails in this project:
u'Gokhan_<gkhncpn@gmail.com>',
u'Gokhan_Capan_<gcapan@apache.org>',
u'gcapan_<gcapan@anadolu.edu.tr>',
u'gcapan_<gcapan@unknown>',
1616 was same author, but gmail email.
```
csvs['mahout']['d']
```
## OpenJPA
{'2282'}
### First commit(s) by author
2282
```
csvs['openjpa']['d']
```
## Bookkeeper
{'561'}
### First commit(s) by author
561
```
csvs['bookkeeper']['d']
```
## Pig
{'4127'}
### First commit(s) by author
4127
```
csvs['pig']['d']
```
## Tika
{'1575'}
### First commit(s) by author
### Interesting
#### Wierd committer name (duplicate author address linking problem?)
1575,
Has both a weird committer name AND is the first commit from this author
commit bea95ec81acdd04bada5651d37e0e605ed4f8222
Author: grossws <grossws@unknown>
Date: Sun Mar 29 14:33:46 2015 +0000
Update pdfbox to 1.8.9
Fixes TIKA-1575
git-svn-id: https://svn.apache.org/repos/asf/tika/trunk@1669912 13f79535-47bb-0310-9956-ffa450edef68
most recent commit
commit ab1158a238571382da0a7ea72aca6eeca8552535
Author: Konstantin Gribov <grossws@apache.org>
Date: Tue Jul 28 13:00:16 2015 +0000
Remove junit from OSGi bundle deps
Test dependencies removed from OSGi bundle `Import-Package` manifest header.
Extra integration test by Bob Pailin <bob@apache.org> added to avoid regressions
with junit packages included to inappropriate manifest entries.
Fixes TIKA-1524
git-svn-id: https://svn.apache.org/repos/asf/tika/trunk@1693089 13f79535-47bb-0310-9956-ffa450edef68
```
csvs['tika']['d']
```
## Zookeeper
{'1357', '1413', '1695', '1900', '1909'}
### First commit(s) by author
1413, 1695, 1900, 1909
### Interesting
#### Wierd committer name (duplicate author address linking problem?)
Wierd email? Would have been first & only commit, hence it was not located.
1357
commit 03218f40e396949fe007c276ba837ec78a3de2a1
Author: Michi Mutsuzaki <michim@apache.org>
Date: Wed Apr 16 06:15:28 2014 +0000
ZOOKEEPER-1887. C implementation of removeWatches (Raul Gutierrez Segales via michim)
git-svn-id: https://svn.apache.org/repos/asf/zookeeper/trunk@1587812 13f79535-47bb-0310-9956-ffa450edef68
commit 94880dd88002dc37deaedd72bb08fe9b705bcbe8
Author: Michi Mutsuzaki <michim@apache.org = michim = Michi Mutsuzaki michim@apache.org@apache.org>
Date: Mon Apr 14 21:51:55 2014 +0000
ZOOKEEPER-1357. Zab1_0Test uses hard-wired port numbers. Specifically, it uses the same port for leader in two different tests. The second test periodically fails complaining that the port is still in use. (Alexander Shraer via michim)
git-svn-id: https://svn.apache.org/repos/asf/zookeeper/trunk@1587335 13f79535-47bb-0310-9956-ffa450edef68
commit 644542390d75af0b752ab34fde0ccbf995bb05cf
Author: Michi Mutsuzaki <michim@apache.org>
Date: Thu Apr 10 03:20:17 2014 +0000
zkEnv.cmd: Set svn:eol-style property to 'native'.
git-svn-id: https://svn.apache.org/repos/asf/zookeeper/trunk@1586200 13f79535-47bb-0310-9956-ffa450edef68
1909
commit 68fe08a80e0896967d536f87fffb0b7f89846b73
Author: rakeshr <rakeshr@unknown>
Date: Thu Apr 17 06:47:18 2014 +0000
ZOOKEEPER-1909. removeWatches doesn't return NOWATCHER when there is
no watch set (Raul Gutierrez Segales via rakeshr)
git-svn-id: https://svn.apache.org/repos/asf/zookeeper/trunk@1588141 13f79535-47bb-0310-9956-ffa450edef68
```
csvs['zookeeper']['d']
```
| github_jupyter |
```
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from tqdm import tqdm
%matplotlib inline
from torch.utils.data import Dataset, DataLoader
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
from torch.nn import functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
```
# Generate dataset
```
y = np.random.randint(0,7,2100)
idx= []
for i in range(7):
print(i,sum(y==i))
idx.append(y==i)
x = np.zeros((2100,2))
x[idx[0],:] = np.random.uniform(low=[1,5],high=[2,6],size=(sum(idx[0]),2))
x[idx[1],:] = np.random.uniform(low=[1,3],high=[2,4],size=(sum(idx[1]),2))
x[idx[2],:] = np.random.uniform(low=[1,1],high=[2,2],size=(sum(idx[2]),2))
x[idx[3],:] = np.random.uniform(low=[1,-1],high=[2,0],size=(sum(idx[3]),2))
x[idx[4],:] = np.random.uniform(low=[1,-3],high=[2,-2],size=(sum(idx[4]),2))
x[idx[5],:] = np.random.uniform(low=[1,-5],high=[2,-4],size=(sum(idx[5]),2))
x[idx[6],:] = np.random.uniform(low=[4,-5],high=[5,6],size=(sum(idx[6]),2))
for i in range(7):
plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig("type3_2_dist.png",bbox_inches="tight")
plt.savefig("type3_2_dist.pdf",bbox_inches="tight")
foreground_classes = {'class_0','class_1'}
background_classes = {'class_2'}
fg_class = np.random.randint(0,6)
fg_idx = np.random.randint(0,2) #m=2
a = []
for i in range(2): #m=2
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(6,7)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
print(a.shape)
print(fg_class , fg_idx)
a.shape
np.reshape(a,(4,1))
desired_num = 3000
mosaic_list =[]
mosaic_label = []
fore_idx=[]
for j in range(desired_num):
fg_class = np.random.randint(0,6)
fg_idx = np.random.randint(0,2) #m=2
a = []
for i in range(2): #m=2
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
# print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(6,7)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
# print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
mosaic_list.append(np.reshape(a,(4,1)))
mosaic_label.append(fg_class)
fore_idx.append(fg_idx)
mosaic_list = np.concatenate(mosaic_list,axis=1).T
# print(mosaic_list)
print(np.shape(mosaic_label))
print(np.shape(fore_idx))
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list, mosaic_label, fore_idx):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list
self.label = mosaic_label
self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx], self.fore_idx[idx]
batch = 250
msd = MosaicDataset(mosaic_list, mosaic_label , fore_idx)
train_loader = DataLoader( msd,batch_size= batch ,shuffle=True)
class Wherenet(nn.Module):
def __init__(self):
super(Wherenet,self).__init__()
self.linear1 = nn.Linear(2,1)
def forward(self,z):
x = torch.zeros([batch,2],dtype=torch.float64) #m=2
y = torch.zeros([batch,2], dtype=torch.float64)
#x,y = x.to("cuda"),y.to("cuda")
for i in range(2): #m=9
x[:,i] = self.helper(z[:,2*i:2*i+2])[:,0]
#print(k[:,0].shape,x[:,i].shape)
x = F.softmax(x,dim=1) # alphas
x1 = x[:,0]
for i in range(2): #m=2
x1 = x[:,i]
#print()
y = y+torch.mul(x1[:,None],z[:,2*i:2*i+2])
return y , x
def helper(self,x):
#x = F.relu(self.linear1(x))
#x = F.relu(self.linear2(x))
x = self.linear1(x)
return x
trainiter = iter(train_loader)
input1,labels1,index1 = trainiter.next()
where = Wherenet().double()
where = where
out_where,alphas = where(input1)
out_where.shape,alphas.shape
class Whatnet(nn.Module):
def __init__(self):
super(Whatnet,self).__init__()
self.linear1 = nn.Linear(2,6)
#self.linear2 = nn.Linear(4,3)
# self.linear3 = nn.Linear(8,3)
def forward(self,x):
#x = F.relu(self.linear1(x))
#x = F.relu(self.linear2(x))
x = self.linear1(x)
return x
what = Whatnet().double()
# what(out_where)
test_data_required = 1000
mosaic_list_test =[]
mosaic_label_test = []
fore_idx_test=[]
for j in range(test_data_required):
fg_class = np.random.randint(0,6)
fg_idx = np.random.randint(0,2) #m=2
a = []
for i in range(2): #m=2
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
# print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(6,7)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
# print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
mosaic_list_test.append(np.reshape(a,(4,1)))
mosaic_label_test.append(fg_class)
fore_idx_test.append(fg_idx)
mosaic_list_test = np.concatenate(mosaic_list_test,axis=1).T
print(mosaic_list_test.shape)
test_data = MosaicDataset(mosaic_list_test,mosaic_label_test,fore_idx_test)
test_loader = DataLoader( test_data,batch_size= batch ,shuffle=False)
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
col1=[]
col2=[]
col3=[]
col4=[]
col5=[]
col6=[]
col7=[]
col8=[]
col9=[]
col10=[]
col11=[]
col12=[]
col13=[]
criterion = nn.CrossEntropyLoss()
optimizer_where = optim.Adam(where.parameters(), lr=0.1)#,momentum=0.9)
optimizer_what = optim.Adam(what.parameters(), lr=0.1)#, momentum=0.9)
nos_epochs = 200
train_loss=[]
test_loss =[]
train_acc = []
test_acc = []
for epoch in range(nos_epochs): # loop over the dataset multiple times
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
running_loss = 0.0
cnt=0
iteration = desired_num // batch
#training data set
for i, data in enumerate(train_loader):
inputs , labels , fore_idx = data
#inputs,labels,fore_idx = inputs.to(device),labels.to(device),fore_idx.to(device)
# zero the parameter gradients
optimizer_what.zero_grad()
optimizer_where.zero_grad()
avg_inp,alphas = where(inputs)
outputs = what(avg_inp)
_, predicted = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
loss.backward()
optimizer_what.step()
optimizer_where.step()
running_loss += loss.item()
if cnt % 6 == 5: # print every 6 mini-batches
print('[%d, %5d] loss: %.3f' %(epoch + 1, cnt + 1, running_loss / 6))
running_loss = 0.0
cnt=cnt+1
if epoch % 5 == 4:
for j in range (batch):
focus = torch.argmax(alphas[j])
if(alphas[j][focus] >= 0.5):
argmax_more_than_half +=1
else:
argmax_less_than_half +=1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true +=1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false +=1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false +=1
if epoch % 5 == 4:
col1.append(epoch)
col2.append(argmax_more_than_half)
col3.append(argmax_less_than_half)
col4.append(focus_true_pred_true)
col5.append(focus_false_pred_true)
col6.append(focus_true_pred_false)
col7.append(focus_false_pred_false)
#************************************************************************
#testing data set
with torch.no_grad():
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
for data in test_loader:
inputs, labels , fore_idx = data
#inputs,labels,fore_idx = inputs.to(device),labels.to(device),fore_idx.to(device)
# print(inputs.shtorch.save(where.state_dict(),"model_epoch"+str(epoch)+".pt")ape,labels.shape)
avg_inp,alphas = where(inputs)
outputs = what(avg_inp)
_, predicted = torch.max(outputs.data, 1)
for j in range (batch):
focus = torch.argmax(alphas[j])
if(alphas[j][focus] >= 0.5):
argmax_more_than_half +=1
else:
argmax_less_than_half +=1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true +=1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false +=1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false +=1
col8.append(argmax_more_than_half)
col9.append(argmax_less_than_half)
col10.append(focus_true_pred_true)
col11.append(focus_false_pred_true)
col12.append(focus_true_pred_false)
col13.append(focus_false_pred_false)
#torch.save(where.state_dict(),"where_model_epoch"+str(epoch)+".pt")
#torch.save(what.state_dict(),"what_model_epoch"+str(epoch)+".pt")
print('Finished Training')
#torch.save(where.state_dict(),"where_model_epoch"+str(nos_epochs)+".pt")
#torch.save(what.state_dict(),"what_model_epoch"+str(epoch)+".pt")
columns = ["epochs", "argmax > 0.5" ,"argmax < 0.5", "focus_true_pred_true", "focus_false_pred_true", "focus_true_pred_false", "focus_false_pred_false" ]
df_train = pd.DataFrame()
df_test = pd.DataFrame()
df_train[columns[0]] = col1
df_train[columns[1]] = col2
df_train[columns[2]] = col3
df_train[columns[3]] = col4
df_train[columns[4]] = col5
df_train[columns[5]] = col6
df_train[columns[6]] = col7
df_test[columns[0]] = col1
df_test[columns[1]] = col8
df_test[columns[2]] = col9
df_test[columns[3]] = col10
df_test[columns[4]] = col11
df_test[columns[5]] = col12
df_test[columns[6]] = col13
df_train
plt.plot(col1,col2, label='argmax > 0.5')
plt.plot(col1,col3, label='argmax < 0.5')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.title("On Training set")
plt.show()
plt.plot(col1,col4, label ="focus_true_pred_true ")
plt.plot(col1,col5, label ="focus_false_pred_true ")
plt.plot(col1,col6, label ="focus_true_pred_false ")
plt.plot(col1,col7, label ="focus_false_pred_false ")
plt.title("On Training set")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.savefig("linear_type3_21.png",bbox_inches="tight")
plt.savefig("linear_type3_21.pdf",bbox_inches="tight")
plt.show()
df_test
plt.plot(col1,col8, label='argmax > 0.5')
plt.plot(col1,col9, label='argmax < 0.5')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("Testing data")
plt.title("On Testing set")
plt.show()
plt.plot(col1,col10, label ="focus_true_pred_true ")
plt.plot(col1,col11, label ="focus_false_pred_true ")
plt.plot(col1,col12, label ="focus_true_pred_false ")
plt.plot(col1,col13, label ="focus_false_pred_false ")
plt.title("On Testing set")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("Testing data")
plt.show()
# where.state_dict()["linear1.weight"][:] = torch.Tensor(np.array([[ 0, -1]]))
# where.state_dict()["linear1.bias"][:] = torch.Tensor(np.array([0]))
for param in where.named_parameters():
print(param)
# what.state_dict()["linear1.weight"][:] = torch.Tensor(np.array([[ 5, 0],
# [0,5],
# [ 0, 0]]))
# what.state_dict()["linear1.bias"][:] = torch.Tensor(np.array([0, 0, 0]))
for param in what.named_parameters():
print(param)
xx,yy= np.meshgrid(np.arange(0.9,6.5,0.05),np.arange(-5.1,6.5,0.05))
X = np.concatenate((xx.reshape(-1,1),yy.reshape(-1,1)),axis=1)
X = torch.Tensor(X).double()
Y = where.helper(X)
Y1 = what(X)
X.shape,Y.shape
X = X.detach().numpy()
Y = Y[:,0].detach().numpy()
fig = plt.figure(figsize=(6,6))
cs = plt.contourf(X[:,0].reshape(xx.shape),X[:,1].reshape(yy.shape),Y.reshape(xx.shape))
plt.xlabel("X1")
plt.ylabel("X2")
fig.colorbar(cs)
for i in range(7):
plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig("focus_contour.png")#,bbox_inches='tight')
Y1 = Y1.detach().numpy()
Y1 = torch.softmax(torch.Tensor(Y1),dim=1)
_,Z4= torch.max(Y1,1)
Z1 = Y1[:,0]
Z2 = Y1[:,1]
#Z3 = Y1[:,2]
np.unique(Z4)
#fig = plt.figure(figsize=(6,6))
# plt.scatter(X[:,0],X[:,1],c=Z1)
# plt.scatter(X[:,0],X[:,1],c=Z2)
# plt.scatter(X[:,0],X[:,1],c=Z3)
#cs = plt.contourf(X[:,0].reshape(xx.shape),X[:,1].reshape(yy.shape),Z1.reshape(xx.shape))
# #plt.colorbar(cs)
# cs = plt.contourf(X[:,0].reshape(xx.shape),X[:,1].reshape(yy.shape),Z2.reshape(xx.shape))
# #plt.colorbar(cs)
# cs = plt.contourf(X[:,0].reshape(xx.shape),X[:,1].reshape(yy.shape),Z3.reshape(xx.shape))
#plt.colorbar(cs)
# plt.xlabel("X1")
# plt.ylabel("X2")
#ax.view_init(60,100)
#plt.savefig("non_interpretable_class_2d.pdf",bbox_inches='tight')
avrg = []
with torch.no_grad():
for i, data in enumerate(train_loader):
inputs , labels , fore_idx = data
avg_inp,alphas = where(inputs)
avrg.append(avg_inp)
avrg= np.concatenate(avrg,axis=0)
plt.scatter(X[:,0],X[:,1],c=Z4)
for i in range(7):
plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.scatter(avrg[:,0],avrg[:,1],c="c")
plt.savefig("decision_boundary.png",bbox_inches="tight")
true = []
pred = []
acc= 0
for i, data in enumerate(train_loader):
inputs , labels , fore_idx = data
avg_inp,alphas = where(inputs)
outputs = what(avg_inp)
_, predicted = torch.max(outputs.data, 1)
true.append(labels)
pred.append(predicted)
acc+=sum(predicted == labels)
true = np.concatenate(true,axis=0)
pred = np.concatenate(pred,axis=0)
from sklearn.metrics import confusion_matrix
confusion_matrix(true,pred)
sum(true==pred)
```
| github_jupyter |
```
import tensorflow as tf
import keras
from keras.applications import DenseNet201
import numpy as np
from keras.preprocessing.image import ImageDataGenerator, NumpyArrayIterator
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.metrics import confusion_matrix
from keras import models, layers, optimizers
from sklearn.svm import SVC
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
conv_base = DenseNet201(weights='imagenet', include_top = False, pooling = 'max', input_shape=(300,300,3))
from google.colab import drive
drive.mount("/content/gdrive/")
X = np.load("gdrive/My Drive/pcb/xtrain.npy")
y = np.load("gdrive/My Drive/pcb/ytrain.npy")
# Training set has 472 samples
```
Training with k-fold Validation
```
X.shape, (y == 0).sum(), (y == 1).sum()
nos = X.shape[0]
samples = conv_base.predict(X)
labels = y.copy()
samples.shape, labels.shape
# To save memory
del X
del y
from sklearn.model_selection import StratifiedKFold
skf = StratifiedKFold(n_splits=8)
accuracies = []
f = 1
for train_index, test_index in skf.split(samples, labels):
X_train = samples[train_index]
X_test = samples[test_index]
y_train = labels[train_index]
y_test = labels[test_index]
print('Fold {}'.format(f))
clf = make_pipeline(StandardScaler(),SVC(gamma='auto'))
clf.fit(X_train, y_train)
test_predclass = clf.predict(X_test)
accuracies.append([confusion_matrix(y_test,test_predclass)[0][0]/(y_test == 0.0).sum(), confusion_matrix(y_test,test_predclass)[1][1]/(y_test == 1.0).sum()])
f+=1
zero_acc = 0
one_acc = 0
for i in accuracies:
zero_acc+= i[0]
one_acc+=i[1]
zero_acc/=8
one_acc/=8
print(zero_acc, one_acc)
```
Training on whole Training Dataset
```
xtest = np.load('gdrive/My Drive/pcb/xtest.npy')
ytest = np.load('gdrive/My Drive/pcb/ytest.npy')
testnos = ytest.shape[0]
test_samples = conv_base.predict(xtest)
test_labels = ytest.copy()
test_samples.shape, test_labels.shape
clftest = make_pipeline(StandardScaler(), SVC(gamma='auto'))
clftest.fit(samples, labels)
test_predclass = clftest.predict(test_samples)
confusion_matrix(test_labels,test_predclass)[0][0]/(ytest == 0.0).sum(), confusion_matrix(test_labels,test_predclass)[1][1]/(ytest == 1.0).sum()
# Saving model
# model_json = model.to_json()
# with open("gdrive/My Drive/models/VGG19_12.json", "w") as json_file:
# json_file.write(model_json)
# model.save_weights("gdrive/My Drive/models/VGG19_12.h5")
# print("Saved model to disk")
# Saving and loading models
# https://machinelearningmastery.com/save-load-keras-deep-learning-models/
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Ramaseshanr/ANLP/blob/master/CosDistance.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
# MIT License
# Copyright (c) 2019.
#
from numpy import *
from numpy import dot
from numpy.linalg import norm
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
from matplotlib.patches import Arc
def find_end_points(point, angle, length):
'''
#Source - https://stackoverflow.com/questions/28417604/plotting-a-line-from-a-coordinate-with-and-angle
#
point - Tuple (x, y)
angle - Angle you want your end point at in degrees.
length - Length of the line you want to plot.
Will plot the line on a 10 x 10 plot.
'''
# unpack the first point
x, y = point
# find the end point
endy = length * math.sin(math.radians(angle))
endx = length * math.cos(math.radians(angle))
# plot the points
#fig = plt.figure()
#ax = plt.subplot(111)
return ([x,endx],[y,endy])
#ax.plot([x, endx], [y, endy])
#return fig
doc_term = array([
[0.1, 0.1, 0.0, 0.1, 0.2, 0.0, 0.1, 0.9, 0.9, 0.3, 0.0, 0.8],
[0.1, 0.1, 0.0, 0.1, 0.2, 0.0, 0.1, 0.9, 0.9, 0.3, 0.0, 0.8],
[0.0, 0.0, 0.9, 0.2, 0.3, 0.1, 0.7, 0.0, 0.2, 0.7, 0.5, 0.5],
[0.0, 0.0, 0.9, 0.9, 0.5, 0.1, 0.9, 0.3, 0.8, 0.4, 0.1, 0.4],
[0.4, 0.0, 0.0, 0.2, 0.5, 0.9, 0.3, 0.7, 0.4, 0.6, 0.0, 0.3],
[0.6, 0.6, 0.0, 0.7, 0.3, 0.3, 0.9, 0.1, 0.9, 0.0, 0.0, 0.3],
[0.0, 0.0, 0.8, 0.6, 0.6, 0.6, 0.0, 0.1, 0.4, 0.9, 0.3, 0.1],
[0.4, 0.4, 0.0, 0.5, 0.5, 0.1, 0.7, 0.1, 0.5, 0.3, 0.8, 0.1],
[0.3, 0.3, 0.0, 0.9, 0.8, 0.7, 0.7, 0.8, 0.6, 0.6, 0.8, 0.0],
[0.0, 0.0, 0.5, 0.0, 0.2, 0.0, 0.0, 0.1, 0.3, 0.4, 0.5, 0.3]
])
cos_list = []
pd_cols = []
header = ['D0','D1','D2','D3','D4','D5','D6','D7','D8','D9','D10']
for i in range(0,11):
for j in range(0,11):
cos_value = dot(transpose(doc_term[:, [i]]), doc_term[:, [j]]) / (norm(doc_term[:, [j]]) * norm(doc_term[:, [i]])).tolist()
cos_list.append( asscalar( around(math.degrees(math.acos(min(max(cos_value,-1.0),1.0))), decimals=1) ))
pd_cols.append(cos_list)
cos_list = []
df = pd.DataFrame(pd_cols, columns=header)
print(df)
fig = plt.figure()
ax = plt.subplot(111)
ax.set_ylim([0, 1.75]) # set the bounds to be 10, 10
ax.set_xlim([0, 1.75])
ref_doc = 0
for i in range (0,11):
X, Y = find_end_points([0, 0], df.iloc[ref_doc][i], norm(doc_term[:, [i]]))
ax.plot(X,Y)
ax.annotate("", xy=(X[1],Y[1]), xytext=(0, 0),arrowprops=dict(arrowstyle="->"))
ax.text(X[1],Y[1],"D" + str(ref_doc) + "-"+"D"+str(i)+"-("+str(df.iloc[ref_doc][i]) +u"\u00b0"+")")
#ax.add_patch(Arc((0,0), .25+i/12.0, .25+i/12.0, theta1=0.0, theta2=df.iloc[0][i], edgecolor='r', lw=1.5, label = str(df.iloc[0][i])+u"\u00b0"))
fig.show()
```
| github_jupyter |
```
from NewsContent import *
from UserContent import *
from preprocessing import *
from PEGenerator import *
import PEGenerator
from models import *
from utils import *
from Encoders import *
import os
import numpy as np
import json
import random
data_root_path = None
embedding_path = None
KG_root_path = None
popularity_path = '../popularity/'
config = {'title_length':30,
'body_length':100,
'max_clicked_news':50,
'npratio':1,
'news_encoder_name':"CNN",
'user_encoder_name':"Att",
'attrs':['title','vert','entity'],
'word_filter':0,
'data_root_path':data_root_path,
'embedding_path':embedding_path,
'KG_root_path':KG_root_path,
'popularity_path':popularity_path,
'max_entity_num':5}
News = NewsContent(config)
TrainUsers = UserContent(News.news_index,config,'train.tsv',2)
ValidUsers = UserContent(News.news_index,config,'val.tsv',1)
TestUsers = UserContent(News.news_index,config,'test.tsv',2)
train_sess,train_buckets, train_user_id, train_label = get_train_input(TrainUsers.session,News.news_index,config)
test_impressions, test_userids = get_test_input(TestUsers.session,News.news_index)
val_impressions, val_userids = get_test_input(ValidUsers.session,News.news_index)
title_word_embedding_matrix, have_word = load_matrix(embedding_path,News.word_dict)
train_generator = TrainGenerator(News,TrainUsers,train_sess,train_user_id,train_buckets,train_label,32)
test_user_generator = UserGenerator(News,TestUsers,32)
val_user_generator = UserGenerator(News,ValidUsers,32)
news_generator = NewsGenerator(News,32)
for i in range(10):
model_config = {
'news_encoder':1,
'popularity_user_modeling':True,
'rel':True,
'ctr':True,
'content':True,
'rece_emb':True,
'activity':True
}
model,user_encoder,news_encoder,bias_news_encoder,bias_content_scorer,scaler,time_embedding_layer,activity_gater = create_pe_model(config,model_config,News,title_word_embedding_matrix,News.entity_embedding)
model.fit_generator(train_generator,epochs=2)
news_scoring = news_encoder.predict_generator(news_generator,verbose=True)
user_scoring = user_encoder.predict_generator(test_user_generator,verbose=True)
val_user_scoring = user_encoder.predict_generator(val_user_generator,verbose=True)
news_bias_vecs = bias_news_encoder.predict_generator(news_generator,verbose=True)
if model_config['content'] and not model_config['rece_emb']:
bias_candidate_score = bias_content_scorer.predict(news_bias_vecs,batch_size=32,verbose=True)
bias_candidate_score = bias_candidate_score[:,0]
else:
bias_candidate_score = 0
ctr_weight = scaler.get_weights()[0][0,0]
time_embedding_matrix = time_embedding_layer.get_weights()[0]
predicted_activity_gates = activity_gater.predict(user_scoring,verbose=True)
predicted_activity_gates = predicted_activity_gates[:,0]
val_predicted_activity_gates = activity_gater.predict(val_user_scoring,verbose=True)
val_predicted_activity_gates = val_predicted_activity_gates[:,0]
rankings = news_ranking(model_config,ctr_weight,predicted_activity_gates,user_scoring,news_scoring,
bias_candidate_score,news_bias_vecs,time_embedding_matrix,bias_content_scorer,
News,test_impressions)
val_rankings = news_ranking(model_config,ctr_weight,val_predicted_activity_gates,val_user_scoring,news_scoring,
bias_candidate_score,news_bias_vecs,time_embedding_matrix,bias_content_scorer,
News,val_impressions)
performance = evaluate_performance(rankings,test_impressions)
val_performance = evaluate_performance(val_rankings,val_impressions)
cold = []
for TOP_COLD_NUM in [0,1,3,5,]:
g = evaluate_cold_users(rankings,test_impressions,TestUsers.click,TOP_COLD_NUM)
cold.append(g)
diversity = []
for TOP_DIVERSITY_NUM in range(1,11):
div_top = evaluate_diversity_topic_all(TOP_DIVERSITY_NUM,rankings,test_impressions,News,TestUsers)
div_ilxd = evaluate_density_ILxD(TOP_DIVERSITY_NUM,rankings,test_impressions,news_scoring)
diversity.append([div_top,div_ilxd])
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import math
import sklearn
from sklearn.cross_validation import cross_val_score
from subprocess import check_output
from sklearn.metrics import make_scorer, mean_squared_error
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import normalize
from sklearn.linear_model import SGDRegressor
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder()
def rmsle_func(actual, predicted):
return np.sqrt(msle(actual, predicted))
def msle(actual, predicted):
return np.mean(sle(actual, predicted))
def sle(actual, predicted):
return (np.power(np.log(np.array(actual)+1) -
np.log(np.array(predicted)+1), 2))
dtypes = {'Semana' : 'int32',
'Agencia_ID' :'int32',
'Canal_ID' : 'int32',
'Ruta_SAK' : 'int32',
'Cliente-ID' : 'int32',
'Producto_ID':'int32',
'Venta_hoy':'float32',
'Venta_uni_hoy': 'int32',
'Dev_uni_proxima':'int32',
'Dev_proxima':'float32',
'Demanda_uni_equil':'int32'}
model = SGDRegressor(loss='squared_loss', penalty='l2', alpha=0.0001,
fit_intercept=True, n_iter=10, shuffle=True, verbose=0,
epsilon=0.1, learning_rate='invscaling',
eta0=0.01, power_t=0.25, warm_start=True, average=False)
from sklearn.feature_extraction import FeatureHasher
h = FeatureHasher(n_features=8000, input_type = 'string')
# Cliente_ID: # of unique = 880604 - многовато, дропаем
df_train = pd.read_csv('train.csv', dtype = dtypes, usecols=["Semana", "Agencia_ID", "Canal_ID", 'Ruta_SAK',
'Producto_ID','Demanda_uni_equil'], chunksize=16000)
i = 1
num = 15
def loc (x):
return math.loc(x+1)
#pd.concat([train, pd.get_dummies(train['Semana'],sparse=True)], axis=1, join_axes=[train.index])
for chunk in df_train:
if i < num :
X_chunk = h.fit_transform(chunk[["Semana", "Agencia_ID", "Canal_ID", 'Ruta_SAK', 'Producto_ID']].astype('string').as_matrix())
y_chunk = np.log(np.ravel(chunk[['Demanda_uni_equil']].as_matrix()) +1)
model.partial_fit(X_chunk, y_chunk)
i = i + 1
elif i == num:
X_chunk = h.fit_transform(chunk[["Semana", "Agencia_ID", "Canal_ID", 'Ruta_SAK','Producto_ID']].astype('string').values)
y_chunk = np.log(np.ravel(chunk[['Demanda_uni_equil']].values) + 1)
print 'rmsle: ', rmsle_func(y_chunk, model.predict(X_chunk))
print 'RMSE ', math.sqrt(sklearn.metrics.mean_squared_error(y_chunk, model.predict(X_chunk)))
i = i + 1
else:
break
print 'Finished the fitting'
# Now make predictions with trained model
X_test = pd.read_csv('test.csv',dtype = dtypes,usecols=['id', "Semana", "Agencia_ID", "Canal_ID", 'Ruta_SAK',
'Producto_ID'], nrows = 1501)
ids = X_test['id']
X_test.drop(['id'], axis =1, inplace = True)
y_predicted = np.exp(model.predict(h.fit_transform(X_test.astype('string').values)))-1
submission = pd.DataFrame({"id":ids, "Demanda_uni_equil": y_predicted})
def nonnegative(x):
if (x > 0) or (x == 0):
return x
else:
return 3.9
y_predicted = map(nonnegative, y_predicted)
submission = pd.DataFrame({"id":ids, "Demanda_uni_equil": y_predicted})
cols = ['id',"Demanda_uni_equil"]
submission = submission[cols]
submission.to_csv("submission.csv", index=False)
print('Completed!')
submission = pd.DataFrame({"id":ids, "Demanda_uni_equil": y_predicted})
y_predicted = map(nonnegative, y_predicted)
submission = pd.DataFrame({"id":ids, "Demanda_uni_equil": y_predicted})
cols = ['id',"Demanda_uni_equil"]
submission = submission[cols]
submission.to_csv("submission.csv", index=False)
print('Completed!')
k = submission.Demanda_uni_equil.values
```
| github_jupyter |
```
from nltk.book import *
text2.common_contexts(["monstrous", "very"])
```
1. Try using the Python interpreter as a calculator, and typing expressions like 12 / (4 + 1).
```
12 / (4 + 1)
```
2. Given an alphabet of 26 letters, there are 26 to the power 10, or 26 ** 10, ten-letter strings we can form. That works out to 141167095653376. How many hundred-letter strings are possible?
```
26 ** 10
```
3. The Python multiplication operation can be applied to lists. What happens when you type ['Monty', 'Python'] * 20, or 3 * sent1?
```
print(['Monty', 'Python'] * 20)
```
4. Review 1 on computing with language. How many words are there in text2? How many distinct words are there?
```
print(len(text2))
len(set(text2))
```
5. Compare the lexical diversity scores for humor and romance fiction in 1.1. Which genre is more lexically diverse?
Table 1.1:
Lexical Diversity of Various Genres in the Brown Corpus
Genre Tokens Types Lexical diversity
humor 21695 5017 0.231
romance 70022 8452 0.121
6. Produce a dispersion plot of the four main protagonists in Sense and Sensibility: Elinor, Marianne, Edward, and Willoughby. What can you observe about the different roles played by the males and females in this novel? Can you identify the couples?
```
text2.dispersion_plot(["Elinor", "Marianne", "Edward", "Willoughby"])
```
Females have bigger role, males appears from time to time. Perhaps, Elinor and Marianne are friends and appear together
7. Find the collocations in text5.
```
text5.collocations()
```
8. Consider the following Python expression: len(set(text4)). State the purpose of this expression. Describe the two steps involved in performing this computation.
```
len(set(text4))
```
The size of the vocabulary (unique words):
1. Get the vocabulary items
2. Get the actual number of the vocabulary
9. Review 2 on lists and strings.
Define a string and assign it to a variable, e.g., my_string = 'My String' (but put something more interesting in the string). Print the contents of this variable in two ways, first by simply typing the variable name and pressing enter, then by using the print statement.
Try adding the string to itself using my_string + my_string, or multiplying it by a number, e.g., my_string * 3. Notice that the strings are joined together without any spaces. How could you fix this?
```
my_string = 'Try adding the string to itself using my_string'
my_string
print(my_string)
my_string + my_string
my_string + " " + my_string
my_string * 3
```
10. Define a variable my_sent to be a list of words, using the syntax my_sent = ["My", "sent"] (but with your own words, or a favorite saying).
Use ' '.join(my_sent) to convert this into a string.
Use split() to split the string back into the list form you had to start with.
```
my_sent = ['Try', 'adding', 'the', 'string', 'to', 'itself', 'using', 'my_string']
a = ' '.join(my_sent)
print(a)
print(a.split())
```
11. Define several variables containing lists of words, e.g., phrase1, phrase2, and so on. Join them together in various combinations (using the plus operator) to form whole sentences. What is the relationship between len(phrase1 + phrase2) and
len(phrase1) + len(phrase2)?
```
phrase1 = ['Try', 'adding', 'the']
phrase2 = ['string', 'to', 'itself', 'using', 'my_string']
print(phrase1 + phrase2)
print(len(phrase1 + phrase2))
print(len(phrase1) + len(phrase2))
```
12. Consider the following two expressions, which have the same value. Which one will typically be more relevant in NLP? Why?
a. "Monty Python"[6:12]
b. ["Monty", "Python"][1]
second expression is more relelvant, sice we are dilling with the list of words
13. We have seen how to represent a sentence as a list of words, where each word is a sequence of characters. What does sent1[2][2] do? Why? Experiment with other index values.
```
sent1[2][2]
```
we get 3rd element of the 3rd item
14. The first sentence of text3 is provided to you in the variable sent3. The index of the in sent3 is 1, because sent3[1] gives us 'the'. What are the indexes of the two other occurrences of this word in sent3?
```
print(sent3)
[i for i,d in enumerate(sent3) if d=='the']
```
15. Review the discussion of conditionals in Section 1.4. Find all words in the Chat Corpus (text5) starting with the letter b. Show them in alphabetical order.
```
text5 = sorted(set(text5))
for w in text5:
if w.startswith("b"):
print (w)
print(sorted(w for w in set(text5) if w.startswith('b')))
```
16. Type the expression range(10) at the interpreter prompt. Now try range(10, 20), range(10, 20, 2), and range(20, 10, -2). We will see a variety of uses for this built-in function in later chapters.
```
list(range(10))
list(range(10,20))
list(range(10,20, 2))
list(range(20, 10, -2))
```
17. Use text9.index() to find the index of the word sunset. You’ll need to insert this word as an argument between the parentheses. By a process of trial and error, find the slice for the complete sentence that contains this word.
```
text9.index('sunset')
print(text9[621:644])
```
18. Using list addition, and the set and sorted operations, compute the vocabulary of the sentences sent1 ... sent8.
```
len(sorted(set(sent1 + sent8 + sent2 + sent3 + sent4 + sent5 + sent6 + sent7)))
```
19. What is the difference between the following two lines? Which one will give a larger value? Will this be the case for other texts?
sorted(set([w.lower() for w in text1]))
sorted([w.lower() for w in set(text1)])
```
len(sorted(set([w.lower() for w in text1])))
len(sorted([w.lower() for w in set(text1)]))
```
in the second case words were converted to lower case after the set(text1) was implemented, so the whole list has repetitions
20. What is the difference between the following two tests: w.isupper() and not w.islower()?
```
w = 'What'
print(w.isupper())
print(not w.islower())
```
The isupper() methods returns “True” if all characters in the string are uppercase, Otherwise, It returns “False”.
The islower() methods returns “True” if all characters in the string are lowercase, Otherwise, It returns “False”.
not w.islower() requires at least one of the characters is upper case.
21. Write the slice expression that extracts the last two words of text2.
```
text2[-2:]
```
22. Find all the four-letter words in the Chat Corpus (text5). With the help of a frequency distribution (FreqDist), show these words in decreasing order of frequency.
```
fq = FreqDist(w for w in text5 if len(w) == 4)
```
23. Review the discussion of looping with conditions in Section 1.4. Use a combination of for and if statements to loop over the words of the movie script for Monty Python and the Holy Grail (text6) and print all the uppercase words, one per line.
```
[w for w in text6 if w.isupper()]
```
24. Write expressions for finding all words in text6 that meet the following conditions. The result should be in the form of a list of words: ['word1', 'word2', ...].
Ending in ise
Containing the letter z
Containing the sequence of letters pt
Having all lowercase letters except for an initial capital (i.e., titlecase)
```
[w for w in text6 if w.endswith('ise')]
[w for w in text6 if 'z' in w]
[w for w in text6 if 'pt' in w]
[w for w in text6 if w.istitle()]
```
25. Define sent to be the list of words ['she', 'sells', 'sea', 'shells', 'by', 'the', 'sea', 'shore']. Now write code to perform the following tasks:
a. Print all words beginning with sh.
b. Print all words longer than four characters
```
sent = ['she', 'sells', 'sea', 'shells', 'by', 'the', 'sea', 'shore']
print([w for w in sent if w.startswith('sh')])
print([w for w in sent if len(w) > 4])
```
26. What does the following Python code do? sum([len(w) for w in text1]) Can you use it to work out the average word length of a text?
```
sum([len(w) for w in text1])
sum([len(w) for w in text1])/len(text1)
```
27. Define a function called vocab_size(text) that has a single parameter for the text, and which returns the vocabulary size of the text.
```
def vocab_size(text):
return len(set(text))
```
28. Define a function percent(word, text) that calculates how often a given word occurs in a text and expresses the result as a percentage.
```
def percent(word, text):
for word in text:
perc = text.count(word) * 100 / len(text)
return perc
percent('the', text6)
```
29. We have been using sets to store vocabularies. Try the following Python expression:
set(sent3) < set(text1). Experiment with this using different arguments to set(). What does it do? Can you think of a practical application for this?
```
set(sent3) < set(text1)
```
| github_jupyter |
# Bring your own data to create a music genre model for AWS DeepComposer
---
This notebook is for the <b>Bring your own data to create a music genre model for AWS DeepComposer</b> blog and is associated with the <b> AWS DeepComposer: Train it Again Maestro </b> web series on the <b>A Cloud Guru</b> platform.
This covers preparing your data to train a custom music genre model for AWS DeepComposer.
---
```
# Create the environment
!conda update --all --y
!pip install numpy==1.16.4
!pip install pretty_midi
!pip install pypianoroll
# IMPORTS
import os
import numpy as np
from numpy import save
import pypianoroll
from pypianoroll import Multitrack, Track
from utils import display_utils
import matplotlib.pyplot as plt
%matplotlib inline
root_dir = './2Experiments'
# Directory to save checkpoints
model_dir = os.path.join(root_dir,'2Reggae') # JSP: 229, Bach: 19199
# Directory to save pianorolls during training
train_dir = os.path.join(model_dir, 'train')
# Location of the original MIDI files used for training; place your MIDI files here
reggae_midi_location = './reggae_midi/'
# Directory to save eval data
dataset_eval_dir = './dataset/'
```
# Prepare Training Data (MIDI files -----> .npy)
---
This section of code demonstrates the process of converting MIDI files to the needed format for training, which is a .npy file. The final shape on the .npy file should be (x, 32, 128, 4), which represents (number of samples, number of time steps per sample, pitch range, instruments).
---
<img src="images/training-image.png" alt="multitrack object" width="600">
```
#helper function that stores the reshaped arrays, per instrument
def store_track(track, collection):
"""
Pull out the 4 selected instrument types based on program number
The program number represents the unique identifier for the instrument (ie. track.program)
https://en.wikipedia.org/wiki/General_MIDI
"""
instrument1_program_numbers = [1,2,3,4,5,6,7,8] #Piano
instrument2_program_numbers = [17,18,19,20,21,22,23,24] #Organ
instrument3_program_numbers = [33,34,35,36,37,38,39,40] #Bass
instrument4_program_numbers = [25,26,27,28,29,30,31,32] #Guitar
if isinstance (collection, dict):
if track.program in instrument1_program_numbers:
collection['Piano'].append(track)
elif track.program in instrument2_program_numbers:
collection['Organ'].append(track)
elif track.program in instrument3_program_numbers:
collection['Bass'].append(track)
elif track.program in instrument4_program_numbers:
collection['Guitar'].append(track)
else:
print("Skipping this instrument------------------->", track.name)
else: #collection will hold chosen tracks
if track.program in instrument1_program_numbers:
collection.append(track)
elif track.program in instrument2_program_numbers:
collection.append(track)
elif track.program in instrument3_program_numbers:
collection.append(track)
elif track.program in instrument4_program_numbers:
collection.append(track)
else:
print("Skipping this instrument------------------->", track.name)
return collection
#helper function that returns the pianorolls merged to 4 tracks for 4 chosen instruments
def get_merged(music_tracks, filename):
chosen_tracks = []
#choose the tracks from the Multitrack object
for index, track in enumerate(music_tracks.tracks):
chosen_tracks = store_track(track, chosen_tracks)
#dictionary to hold reshaped pianorolls for 4 chosen instruments
reshaped_piano_roll_dict = {'Piano': [], 'Organ': [], 'Bass': [], 'Guitar': []}
#loop thru chosen tracks
for index, track in enumerate(chosen_tracks):
fig, ax = track.plot()
plt.show()
try:
#reshape pianoroll to 2 bar (i.e. 32 time step) chunks
track.pianoroll = track.pianoroll.reshape( -1, 32, 128)
#store reshaped pianoroll per instrument
reshaped_piano_roll_dict = store_track(track, reshaped_piano_roll_dict)
except Exception as e:
print("ERROR!!!!!----> Skipping track # ", index, " with error ", e)
#will hold all merged instrument tracks
merge_piano_roll_list = []
for instrument in reshaped_piano_roll_dict:
try:
merged_pianorolls = np.empty(shape=(0,32,128))
#concatenate/stack all tracks for a single instrument
if len(reshaped_piano_roll_dict[instrument]) > 0:
if reshaped_piano_roll_dict[instrument]:
merged_pianorolls = np.stack([track.pianoroll for track in reshaped_piano_roll_dict[instrument]], -1)
merged_pianorolls = merged_pianorolls[:, :, :, 0]
merged_piano_rolls = np.any(merged_pianorolls, axis=0)
merge_piano_roll_list.append(merged_piano_rolls)
except Exception as e:
print("ERROR!!!!!----> Cannot concatenate/merge track for instrument", instrument, " with error ", e)
continue;
merge_piano_roll_list = np.stack([track for track in merge_piano_roll_list], -1)
return merge_piano_roll_list.reshape(-1,32,128,4)
```
<img src="images/multi_track_object.png" alt="multitrack object" width="600">
<img src="images/track_object.png" alt="track object" width="600">
```
#holds final reshaped tracks that will be saved to training .npy file
track_list = np.empty(shape=(0,32,128,4))
#init with beat resolution of 4
music_tracks = pypianoroll.Multitrack(beat_resolution=4)
#loop through all the .mid files
for filename in os.listdir(reggae_midi_location):
print("Starting to process filename---->", reggae_midi_location + filename)
if filename.endswith(".mid"):
try:
#Load MIDI file using parse_midi
#returns Multi-Track object containing Track objects
music_tracks.parse_midi(reggae_midi_location + filename)
#add padding to avoid reshape errors
#pad the pianorolls with zeros making the length a multiple of 32
music_tracks.pad_to_multiple(32)
music_tracks.pad_to_same()
#merge pianoroll objects by instrument
merged_tracks_to_add_to_training_file = get_merged(music_tracks, filename)
#concatenate merged pianoroll objects to final training data track list
track_list = np.concatenate((merged_tracks_to_add_to_training_file, track_list))
print("Successfully processed filename---->", reggae_midi_location + filename)
except Exception as e:
print("**********ERROR**************It's possible that not all 4 instruments exist in this track; at least one is 0")
print("Skipping file---->", filename, e)
print(e)
# binarize data
track_list[track_list == 0] = -1
track_list[track_list >= 0] = 1
#split the data into training and evaluation datasets
training_data, eval_data = np.split(track_list, 2)
#save training data
save(train_dir + '/reggae-train.npy', np.array(training_data))
#save evaluation data
save(dataset_eval_dir + '/eval.npy', np.array(eval_data))
```
# Review Training Data
```
#double check the shape on training data, should be (x, 32, 128, 4), where x represents the amount of records
training_data = np.load(train_dir + '/reggae-train.npy')
print("Testing the training shape: ", training_data.shape)
#view sample of data that will be fed to model, four graphs == four tracks
display_utils.show_pianoroll(training_data)
```
| github_jupyter |
```
import os
import re
import sklearn
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.metrics import *
from sklearn.linear_model import *
from sklearn.model_selection import *
pd.set_option('display.max_columns', None)
# DATA_PATH = '../input/ncaam-march-mania-2021/'
DATA_PATH_W = r'C:\Users\FLUXNATURE\Desktop\New Kaggle world\NCAAW'
for filename in os.listdir(DATA_PATH_W):
print(filename)
```
DATA PREPARATION AND PROCESSING
Data: WNCAATourneySeeds.csv
"This file identifies the seeds for all teams in each NCAA® tournament, for all seasons of historical data. Thus, there are exactly 64 rows for each year, since there are no play-in teams in the women's tournament. We will not know the seeds of the respective tournament teams, or even exactly which 64 teams it will be, until Selection Monday on March 16, 2020 (DayNum=133).
Season - the year that the tournament was played in
Seed - this is a 3-character identifier of the seed, where the first character is either W, X, Y, or Z (identifying the region the team was in) and the next two digits (either 01, 02, ..., 15, or 16) tell you the seed within the region. For example, the first record in the file is seed W01, which means we are looking at the #1 seed in the W region (which we can see from the "WSeasons.csv" file was the East region).
TeamID - this identifies the id number of the team, as specified in the WTeams.csv file"
```
# df_seeds = pd.read_csv(DATA_PATH + "MNCAATourneySeeds.csv")
df_seeds = pd.read_csv(r"C:\Users\FLUXNATURE\Desktop\New Kaggle world\NCAAW\WNCAATourneySeeds.csv")
df_seeds.head()
```
SEASON'S RESULTS
Data: WRegularSeasonCompactResults.csv
This file identifies the game-by-game results for many seasons of historical data, starting with the 1998 season. For each season, the file includes all games played from DayNum 0 through 132. It is important to realize that the "Regular Season" games are simply defined to be all games played on DayNum=132 or earlier (DayNum=133 is Selection Monday). Thus a game played before Selection Monday will show up here whether it was a pre-season tournament, a non-conference game, a regular conference game, a conference tournament game, or whatever.
Season - this is the year of the associated entry in WSeasons.csv (the year in which the final tournament occurs). For example, during the 2016 season, there were regular season games played between November 2015 and March 2016, and all of those games will show up with a Season of 2016.
DayNum - this integer always ranges from 0 to 132, and tells you what day the game was played on. It represents an offset from the "DayZero" date in the "WSeasons.csv" file. For example, the first game in the file was DayNum=18. Combined with the fact from the "WSeasons.csv" file that day zero was 10/27/1997 that year, this means the first game was played 18 days later, or 11/14/1997. There are no teams that ever played more than one game on a given date, so you can use this fact if you need a unique key (combining Season and DayNum and WTeamID).
WTeamID - this identifies the id number of the team that won the game, as listed in the "WTeams.csv" file. No matter whether the game was won by the home team or visiting team, or if it was a neutral-site game, the "WTeamID" always identifies the winning team.
WScore - this identifies the number of points scored by the winning team.
LTeamID - this identifies the id number of the team that lost the game.
LScore - this identifies the number of points scored by the losing team. Thus you can be confident that WScore will be greater than LScore for all games listed.
NumOT - this indicates the number of overtime periods in the game, an integer 0 or higher.
WLoc - this identifies the "location" of the winning team. If the winning team was the home team, this value will be "H". If the winning team was the visiting team, this value will be "A". If it was played on a neutral court, then this value will be "N".
```
#Dropping NumOt and Wloc
df_season_results = pd.read_csv(r"C:\Users\FLUXNATURE\Desktop\New Kaggle world\NCAAW\WRegularSeasonCompactResults.csv")
df_season_results.drop(['NumOT', 'WLoc'], axis=1, inplace=True)
df_season_results['ScoreGap'] = df_season_results['WScore'] - df_season_results['LScore']
df_season_results.head()
```
FEATURE ENGINEERING
For each team at each season, I compute :
Number of wins
Number of losses
Average score gap of wins
Average score gap of losses
And use the following features :
Win Ratio
Average score gap
```
num_win = df_season_results.groupby(['Season', 'WTeamID']).count()
num_win = num_win.reset_index()[['Season', 'WTeamID', 'DayNum']].rename(columns={"DayNum": "NumWins", "WTeamID": "TeamID"})
num_loss = df_season_results.groupby(['Season', 'LTeamID']).count()
num_loss = num_loss.reset_index()[['Season', 'LTeamID', 'DayNum']].rename(columns={"DayNum": "NumLosses", "LTeamID": "TeamID"})
gap_win = df_season_results.groupby(['Season', 'WTeamID']).mean().reset_index()
gap_win = gap_win[['Season', 'WTeamID', 'ScoreGap']].rename(columns={"ScoreGap": "GapWins", "WTeamID": "TeamID"})
gap_loss = df_season_results.groupby(['Season', 'LTeamID']).mean().reset_index()
gap_loss = gap_loss[['Season', 'LTeamID', 'ScoreGap']].rename(columns={"ScoreGap": "GapLosses", "LTeamID": "TeamID"})
```
MERGE COMPUTATIONS
```
df_features_season_w = df_season_results.groupby(['Season', 'WTeamID']).count().reset_index()[['Season', 'WTeamID']].rename(columns={"WTeamID": "TeamID"})
df_features_season_l = df_season_results.groupby(['Season', 'LTeamID']).count().reset_index()[['Season', 'LTeamID']].rename(columns={"LTeamID": "TeamID"})
df_features_season = pd.concat([df_features_season_w, df_features_season_l], 0).drop_duplicates().sort_values(['Season', 'TeamID']).reset_index(drop=True)
df_features_season = df_features_season.merge(num_win, on=['Season', 'TeamID'], how='left')
df_features_season = df_features_season.merge(num_loss, on=['Season', 'TeamID'], how='left')
df_features_season = df_features_season.merge(gap_win, on=['Season', 'TeamID'], how='left')
df_features_season = df_features_season.merge(gap_loss, on=['Season', 'TeamID'], how='left')
df_features_season.fillna(0, inplace=True)
```
COMPUTATIONAL FEATURES
```
df_features_season['WinRatio'] = df_features_season['NumWins'] / (df_features_season['NumWins'] + df_features_season['NumLosses'])
df_features_season['GapAvg'] = (
(df_features_season['NumWins'] * df_features_season['GapWins'] -
df_features_season['NumLosses'] * df_features_season['GapLosses'])
/ (df_features_season['NumWins'] + df_features_season['NumLosses'])
)
df_features_season.drop(['NumWins', 'NumLosses', 'GapWins', 'GapLosses'], axis=1, inplace=True)
```
TOURNEY
Data: WNCAATourneyCompactResults.csv
This file identifies the game-by-game NCAA® tournament results for all seasons of historical data. The data is formatted exactly like the WRegularSeasonCompactResults data. Each season you will see 63 games listed, since there are no women's play-in games.
Although the scheduling of the men's tournament rounds has been consistent for many years, there has been more variety in the scheduling of the women's rounds. There have been four different schedules over the course of the past 20+ years for the women's tournament, as follows:
```
#DROPPED NumOT and Wloc
df_tourney_results = pd.read_csv(R"C:\Users\FLUXNATURE\Desktop\New Kaggle world\NCAAW\WNCAATourneyCompactResults.csv")
df_tourney_results.drop(['NumOT', 'WLoc'], axis=1, inplace=True)
```
The DayNum features can be improved by replacing it by the corresponding round.
```
df_tourney_results.head(4)
def get_round(day):
round_dic = {137: 0, 138: 0, 139: 1, 140: 1, 141: 2, 144: 3, 145: 3, 146: 4, 147: 4, 148: 4, 151:5, 153: 5, 155: 6} # probably wrong but I don't use it anyways
try:
return round_dic[day]
except:
print(f'Unknow day : {day}')
return 0
df_tourney_results['Round'] = df_tourney_results['DayNum'].apply(get_round)
df_tourney_results.head()
```
Feature Engineering
Train data
```
df_tourney_results.tail(4)
df = df_tourney_results.copy()
df = df[df['Season'] >= 2003].reset_index(drop=True)
df.head()
df.tail()
```
Each row corresponds to a match between WTeamID and LTeamID, which was won by WTeamID.
I only keep matches after 2003 since I don't have the ratings for the older ones.
I start by aggregating features coresponding to each tem.
Seeds
SeedW is the seed of the winning team
SeedL is the seed of the losing team
```
df = pd.merge(
df,
df_seeds,
how='left',
left_on=['Season', 'WTeamID'],
right_on=['Season', 'TeamID']
).drop('TeamID', axis=1).rename(columns={'Seed': 'SeedW'})
df = pd.merge(
df,
df_seeds,
how='left',
left_on=['Season', 'LTeamID'],
right_on=['Season', 'TeamID']
).drop('TeamID', axis=1).rename(columns={'Seed': 'SeedL'})
def treat_seed(seed):
return int(re.sub("[^0-9]", "", seed))
df['SeedW'] = df['SeedW'].apply(treat_seed)
df['SeedL'] = df['SeedL'].apply(treat_seed)
df.head()
df.tail()
```
Season Stats
WinRatioW is the win ratio of the winning team during the season
WinRatioL is the win ratio of the losing team during the season
```
df = pd.merge(
df,
df_features_season,
how='left',
left_on=['Season', 'WTeamID'],
right_on=['Season', 'TeamID']
).rename(columns={
'NumWins': 'NumWinsW',
'NumLosses': 'NumLossesW',
'GapWins': 'GapWinsW',
'GapLosses': 'GapLossesW',
'WinRatio': 'WinRatioW',
'GapAvg': 'GapAvgW',
}).drop(columns='TeamID', axis=1)
df = pd.merge(
df,
df_features_season,
how='left',
left_on=['Season', 'LTeamID'],
right_on=['Season', 'TeamID']
).rename(columns={
'NumWins': 'NumWinsL',
'NumLosses': 'NumLossesL',
'GapWins': 'GapWinsL',
'GapLosses': 'GapLossesL',
'WinRatio': 'WinRatioL',
'GapAvg': 'GapAvgL',
}).drop(columns='TeamID', axis=1)
df.head()
df.tail(2)
```
Add symetrical
Right now our data only consists of won matches
We duplicate our data, get rid of the winner loser
```
def add_loosing_matches(win_df):
win_rename = {
"WTeamID": "TeamIdA",
"WScore" : "ScoreA",
"LTeamID" : "TeamIdB",
"LScore": "ScoreB",
"SeedW": "SeedA",
"SeedL": "SeedB",
'WinRatioW' : 'WinRatioA',
'WinRatioL' : 'WinRatioB',
'GapAvgW' : 'GapAvgA',
'GapAvgL' : 'GapAvgB',
# "OrdinalRankW": "OrdinalRankA",
# "OrdinalRankL": "OrdinalRankB",
}
lose_rename = {
"WTeamID": "TeamIdB",
"WScore" : "ScoreB",
"LTeamID" : "TeamIdA",
"LScore": "ScoreA",
"SeedW": "SeedB",
"SeedL": "SeedA",
'GapAvgW' : 'GapAvgB',
'GapAvgL' : 'GapAvgA',
'WinRatioW' : 'WinRatioB',
'WinRatioL' : 'WinRatioA',
# "OrdinalRankW": "OrdinalRankB",
# "OrdinalRankL": "OrdinalRankA",
}
win_df = win_df.copy()
lose_df = win_df.copy()
win_df = win_df.rename(columns=win_rename)
lose_df = lose_df.rename(columns=lose_rename)
return pd.concat([win_df, lose_df], 0, sort=False)
df = add_loosing_matches(df)
```
Differences
We compute the difference between the team for each feature.
This helps further assessing how better (or worse) team A is from team B
```
df['SeedDiff'] = df['SeedA'] - df['SeedB']
df['WinRatioDiff'] = df['WinRatioA'] - df['WinRatioB']
df['GapAvgDiff'] = df['GapAvgA'] - df['GapAvgB']
# df['OrdinalRankDiff'] = df['OrdinalRankA'] - df['OrdinalRankB']
df.head()
```
Test Data
Preparing
```
df_test = pd.read_csv(r"C:\Users\FLUXNATURE\Desktop\New Kaggle world\NCAAW\WSampleSubmissionStage1.csv")
df_test['Season'] = df_test['ID'].apply(lambda x: int(x.split('_')[0]))
df_test['TeamIdA'] = df_test['ID'].apply(lambda x: int(x.split('_')[1]))
df_test['TeamIdB'] = df_test['ID'].apply(lambda x: int(x.split('_')[2]))
df_test.head()
df_test.tail()
```
SEEDS
```
df_test = pd.merge(
df_test,
df_seeds,
how='left',
left_on=['Season', 'TeamIdA'],
right_on=['Season', 'TeamID']
).drop('TeamID', axis=1).rename(columns={'Seed': 'SeedA'})
df_test = pd.merge(
df_test,
df_seeds,
how='left',
left_on=['Season', 'TeamIdB'],
right_on=['Season', 'TeamID']
).drop('TeamID', axis=1).rename(columns={'Seed': 'SeedB'})
df_test['SeedA'] = df_test['SeedA'].apply(treat_seed)
df_test['SeedB'] = df_test['SeedB'].apply(treat_seed)
```
SEASON'S STATS
```
df_test = pd.merge(
df_test,
df_features_season,
how='left',
left_on=['Season', 'TeamIdA'],
right_on=['Season', 'TeamID']
).rename(columns={
'NumWins': 'NumWinsA',
'NumLosses': 'NumLossesA',
'GapWins': 'GapWinsA',
'GapLosses': 'GapLossesA',
'WinRatio': 'WinRatioA',
'GapAvg': 'GapAvgA',
}).drop(columns='TeamID', axis=1)
df_test = pd.merge(
df_test,
df_features_season,
how='left',
left_on=['Season', 'TeamIdB'],
right_on=['Season', 'TeamID']
).rename(columns={
'NumWins': 'NumWinsB',
'NumLosses': 'NumLossesB',
'GapWins': 'GapWinsB',
'GapLosses': 'GapLossesB',
'WinRatio': 'WinRatioB',
'GapAvg': 'GapAvgB',
}).drop(columns='TeamID', axis=1)
```
DIFFERENCES
```
df_test['SeedDiff'] = df_test['SeedA'] - df_test['SeedB']
df_test['WinRatioDiff'] = df_test['WinRatioA'] - df_test['WinRatioB']
df_test['GapAvgDiff'] = df_test['GapAvgA'] - df_test['GapAvgB']
# df_test['OrdinalRankDiff'] = df_test['OrdinalRankA'] - df_test['OrdinalRankB']
df_test.head()
```
TARGET
```
df['ScoreDiff'] = df['ScoreA'] - df['ScoreB']
df['WinA'] = (df['ScoreDiff'] > 0).astype(int)
```
MODELLING
```
features = [
'SeedA',
'SeedB',
'WinRatioA',
'GapAvgA',
'WinRatioB',
'GapAvgB',
# 'OrdinalRankA',
# 'OrdinalRankB',
'SeedDiff',
'WinRatioDiff',
'GapAvgDiff'
# 'OrdinalRankDiff',
]
def rescale(features, df_train, df_val, df_test=None):
min_ = df_train[features].min()
max_ = df_train[features].max()
df_train[features] = (df_train[features] - min_) / (max_ - min_)
df_val[features] = (df_val[features] - min_) / (max_ - min_)
if df_test is not None:
df_test[features] = (df_test[features] - min_) / (max_ - min_)
return df_train, df_val, df_test
```
Cross Validation
Validate on season n, for n in the 10 last seasons.
Train on earlier seasons
Pipeline support classification (predict the team that wins) and regression (predict the score gap)
```
def kfold_reg(df, df_test_=None, plot=False, verbose=0, mode="reg"):
seasons = df['Season'].unique()
cvs = []
pred_tests = []
target = "ScoreDiff" if mode == "reg" else "WinA"
for season in seasons[10:]:
if verbose:
print(f'\nValidating on season {season}')
df_train = df[df['Season'] < season].reset_index(drop=True).copy()
df_val = df[df['Season'] == season].reset_index(drop=True).copy()
df_test = df_test_.copy()
df_train, df_val, df_test = rescale(features, df_train, df_val, df_test)
if mode == "reg":
model = ElasticNet(alpha=1, l1_ratio=0.5)
else:
model = LogisticRegression(C=10)
model.fit(df_train[features], df_train[target])
if mode == "reg":
pred = model.predict(df_val[features])
pred = (pred - pred.min()) / (pred.max() - pred.min())
else:
pred = model.predict_proba(df_val[features])[:, 1]
if df_test is not None:
if mode == "reg":
pred_test = model.predict(df_test[features])
pred_test = (pred_test - pred_test.min()) / (pred_test.max() - pred_test.min())
else:
pred_test = model.predict_proba(df_test[features])[:, 1]
pred_tests.append(pred_test)
if plot:
plt.figure(figsize=(15, 6))
plt.subplot(1, 2, 1)
plt.scatter(pred, df_val['ScoreDiff'].values, s=5)
plt.grid(True)
plt.subplot(1, 2, 2)
sns.histplot(pred)
plt.show()
loss = log_loss(df_val['WinA'].values, pred)
cvs.append(loss)
if verbose:
print(f'\t -> Scored {loss:.3f}')
print(f'\n Local CV is {np.mean(cvs):.3f}')
return pred_tests
pred_tests = kfold_reg(df, df_test, plot=False, verbose=1, mode="cls")
```
Submission
Note that this pipeline is leaky during the first stage of the competition : the LB will be underestimated since the last 4
models were trained
```
pred_test = np.mean(pred_tests, 0)
sub = df_test[['ID', 'Pred']].copy()
sub['Pred'] = pred_test
sub.to_csv('submission_file_Ismail.csv', index=False)
sub.head()
sub.tail()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/dcshapiro/AI-Feynman/blob/master/AI_Feynman_cleared_output.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# AI Feynman 2.0: Learning Regression Equations From Data
### Clone repository and install dependencies
```
!git clone https://github.com/SJ001/AI-Feynman.git
```
Look at what we downloaded
```
!ls /content/AI-Feynman
# %pycat AI-Feynman/requirements.txt if you need to fix the dependencies
```
Fix broken requirements file (may not be needed if later versions fix this).
```
%%writefile AI-Feynman/requirements.txt
torch>=1.4.0
matplotlib
sympy==1.4
pandas
scipy
sortedcontainers
```
Install dependencies not already installed in Google Collab
```
!pip install -r AI-Feynman/requirements.txt
```
Check that fortran is installed
```
!gfortran --version
```
Check the OS version
```
!lsb_release -a
```
Install the csh shell
```
!sudo apt-get install csh
```
Set loose permissions to avoid some reported file permissions issues
```
!chmod +777 /content/AI-Feynman/Code/*
```
### Compile the fortran code
Look at the code directory
```
!ls -l /content/AI-Feynman/Code
```
Compile .f files into .x files
```
!cd /content/AI-Feynman/Code/ && ./compile.sh
```
### Run the first example from the AI-Feynman repository
Change working directory to the Code directory
```
import os
os.chdir("/content/AI-Feynman/Code/")
print(os.getcwd())
!pwd
%%writefile ai_feynman_magic.py
from S_run_aifeynman import run_aifeynman
# Run example 1 as the regression dataset
run_aifeynman("/content/AI-Feynman/example_data/","example1.txt",30,"14ops.txt", polyfit_deg=3, NN_epochs=400)
```
Look at the first line of the example 1 file
```
!head -n 1 /content/AI-Feynman/example_data/example1.txt
# Example 1 has data generated from an equation, where the last column is the regression target, and the rest of the columns are the input data
# The following example shows the relationship between the first line of the file example1.txt and the formula used to make the data
x=[1.6821347439986711,1.1786188905177983,4.749225735259924,1.3238356535004034,3.462199507094163]
x0,x1,x2,x3=x[0],x[1],x[2],x[3]
(x0**2 - 2*x0*x1 + x1**2 + x2**2 - 2*x2*x3 + x3**2)**0.5
```
Run the code. It takes a long time, so go get some coffee.
```
!cd /content/AI-Feynman/Code/ && python3 ai_feynman_magic.py
```
### Assess the results
```
!cat results.dat
```
We found a candidate with an excellent fit, let's see what we got
```
!ls -l /content/AI-Feynman/Code/results/
!ls -l /content/AI-Feynman/Code/results/NN_trained_models/models
!cat /content/AI-Feynman/Code/results/solution_example1.txt
```
Note in the cell above that the solution with the lowest loss is the formula this data was generated from
### Try our own dataset generation and equation learning
Until now we were not storing the results in Google Drive. We might want to keep the data in Drive so that the results don't disappear when this Collab instance gets nice and dead.
```
from google.colab import drive
drive.mount('/content/gdrive', force_remount=True)
```
Make a directory in the mounted Google Drive where we will do our work
```
!mkdir -p /content/gdrive/My\ Drive/Lemay.ai_research/AI-Feynman
```
Copy over the stuff we did so far, and from now on we work out of Google Drive
```
!cp -r /content/AI-Feynman /content/gdrive/My\ Drive/Lemay.ai_research/
```
The code below generates our regression example dataset
We generate points for 4 columns, where x0 is from the same equation as x1, and x2 is from the same equation as x3
The last column is Y
```
import os
import random
os.chdir("/content/gdrive/My Drive/Lemay.ai_research/AI-Feynman/example_data")
def getY(x01,x23):
y = -0.5*x01+0.5*x23+3
return y
def getRow():
[x0,x2]=[random.random() for x in range(2)]
x1=x0
x3=x2
y=getY(x1,x3)
return str(x0)+" "+str(x1)+" "+str(x2)+" "+str(x3)+" "+str(y)+"\n"
with open("duplicateVarsExample.txt", "w") as f:
for _ in range(10000):
f.write(getRow())
f.close()
# switch back to the code directory
os.chdir("/content/gdrive/My Drive/Lemay.ai_research/AI-Feynman/Code")
```
Let's look at our data
```
!head -n 20 ../example_data/duplicateVarsExample.txt
```
Let's also plot the data for x01 and x23 against Y
```
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
plt.style.use('seaborn-whitegrid')
import numpy as np
df=pd.read_csv("../example_data/duplicateVarsExample.txt",sep=" ",header=None)
df.plot.scatter(x=0, y=4)
df.plot.scatter(x=2, y=4)
!pwd
```
Let's write out the runner file for this experiment
```
%%writefile ai_feynman_duplicate_variables.py
from S_run_aifeynman import run_aifeynman
run_aifeynman("/content/gdrive/My Drive/Lemay.ai_research/AI-Feynman/example_data/","duplicateVarsExample.txt",30,"14ops.txt", polyfit_deg=3, NN_epochs=400)
```
Don't forget to lower the file permissions
```
!chmod 777 /content/gdrive/My\ Drive/Lemay.ai_research/AI-Feynman/Code/*
!chmod +x /content/gdrive/My\ Drive/Lemay.ai_research/AI-Feynman/Code/*.scr
```
Now we run the file, and go get more coffee, because this is not going to be fast...
```
!python3 ai_feynman_duplicate_variables.py
```
Initial models quickly mapped to x0 and x2 (the system realized x1 and x3 are duplicates and so not needed)
Later on the system found 3.000000000000+log(sqrt(exp((x2-x1)))) which is a bit crazy but looks like a plane
We can see on Wolfram alpha that an equivalent form of this equation is:
(x2 - x1)/2 + 3.000000000000
which is what we used to generate the dataset!
Link: https://www.wolframalpha.com/input/?i=3.000000000000%2Blog%28sqrt%28exp%28%28x2-x1%29%29%29%29
```
!ls -l /content/gdrive/My\ Drive/Lemay.ai_research/AI-Feynman/Code/results/
!cat /content/gdrive/My\ Drive/Lemay.ai_research/AI-Feynman/Code/results/solution_duplicateVarsExample.txt
```
The solver settled on *log(sqrt(exp(-x1 + x3))) + 3.0* which we know is correct
Now, that was a bit of a softball problem as it has an exact solution. Let's now add noise to the dataset and see how the library holds up
### Let's add small amount of noise to every variabe and see the fit quality
We do the same thing as before, but now we add or subtract noise to x0,x1,x2,x3 after generating y
```
import os
import random
os.chdir("/content/gdrive/My Drive/Lemay.ai_research/AI-Feynman/example_data")
def getY(x01,x23):
y = -0.5*x01+0.5*x23+3
return y
def getRow():
x=[random.random() for x in range(4)]
x[1]=x[0]
x[3]=x[2]
y=getY(x[1],x[3])
mu=0
sigma=0.05
noise=np.random.normal(mu, sigma, 4)
x=x+noise
return str(x[0])+" "+str(x[1])+" "+str(x[2])+" "+str(x[3])+" "+str(y)+"\n"
with open("duplicateVarsWithNoise100k.txt", "w") as f:
for _ in range(100000):
f.write(getRow())
f.close()
# switch back to the code directory
os.chdir("/content/gdrive/My Drive/Lemay.ai_research/AI-Feynman/Code")
```
Let's have a look at the data
```
!head -n 20 ../example_data/duplicateVarsWithNoise100k.txt
```
Now let's plot the data
```
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
plt.style.use('seaborn-whitegrid')
import numpy as np
df=pd.read_csv("../example_data/duplicateVarsWithNoise100k.txt",sep=" ",header=None)
df.plot.scatter(x=0, y=4)
df.plot.scatter(x=1, y=4)
df.plot.scatter(x=2, y=4)
df.plot.scatter(x=3, y=4)
%%writefile ai_feynman_duplicateVarsWithNoise.py
from S_run_aifeynman import run_aifeynman
run_aifeynman("/content/gdrive/My Drive/Lemay.ai_research/AI-Feynman/example_data/","duplicateVarsWithNoise100k.txt",30,"14ops.txt", polyfit_deg=3, NN_epochs=600)
!chmod +777 /content/gdrive/My\ Drive/Lemay.ai_research/AI-Feynman/Code/*
!chmod +777 /content/gdrive/My\ Drive/Lemay.ai_research/AI-Feynman/*
# switch back to the code directory
os.chdir("/content/gdrive/My Drive/Lemay.ai_research/AI-Feynman/Code/")
!pwd
!chmod +x /content/gdrive/My\ Drive/Lemay.ai_research/AI-Feynman/Code/*.scr
!ls -l *.scr
print(os.getcwd())
!sudo python3 ai_feynman_duplicateVarsWithNoise.py
%%writefile ai_feynman_duplicateVarsWithNoise3.py
from S_run_aifeynman import run_aifeynman
run_aifeynman("/content/gdrive/My Drive/Lemay.ai_research/AI-Feynman/example_data/","duplicateVarsWithNoise.txt",30,"19ops.txt", polyfit_deg=3, NN_epochs=1000)
print(os.getcwd())
!sudo python3 ai_feynman_duplicateVarsWithNoise3.py
```
### No duplicate columns but same noise
```
import os
import random
import numpy as np
os.chdir("/content/gdrive/My Drive/Lemay.ai_research/AI-Feynman/example_data")
def getY(x01,x23):
y = -0.5*x01+0.5*x23+3
return y
def getRow():
x=[0 for x in range(4)]
x[1]=random.random()
x[3]=random.random()
y=getY(x[1],x[3])
mu=0
sigma=0.05
noise=np.random.normal(mu, sigma, 4)
x=x+noise
return str(x[1])+" "+str(x[3])+" "+str(y)+"\n"
with open("varsWithNoise.txt", "w") as f:
for _ in range(100000):
f.write(getRow())
f.close()
# switch back to the code directory
os.chdir("/content/gdrive/My Drive/Lemay.ai_research/AI-Feynman/Code")
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
plt.style.use('seaborn-whitegrid')
import numpy as np
df=pd.read_csv("../example_data/varsWithNoise.txt",sep=" ",header=None)
df.plot.scatter(x=0, y=2)
df.plot.scatter(x=1, y=2)
%%writefile ai_feynman_varsWithNoise.py
from S_run_aifeynman import run_aifeynman
run_aifeynman("/content/gdrive/My Drive/Lemay.ai_research/AI-Feynman/example_data/","varsWithNoise.txt",30,"14ops.txt", polyfit_deg=3, NN_epochs=1000)
!sudo python3 ai_feynman_varsWithNoise.py
```
| github_jupyter |
# oneDPL- Gamma Correction example
#### Sections
- [Gamma Correction](#Gamma-Correction)
- [Why use buffer iterators?](#Why-use-buffer-iterators?)
- _Lab Exercise:_ [Gamma Correction](#Lab-Exercise:-Gamma-Correction)
- [Image outputs](#Image-outputs)
## Learning Objectives
* Build a sample __DPC++ application__ to perform Image processing (gamma correction) using oneDPL.
## Gamma Correction
Gamma correction is an image processing algorithm where we enhance the image brightness and contrast levels to have a better view of the image.
Below example creates a bitmap image, and applies the gamma to the image using the DPC++ library offloading to a device. Once we run the program we can view the original image and the gamma corrected image in the corresponding cells below
In the below program we write a data parallel algorithm using the DPC++ library to leverage the computational power in __heterogenous computers__. The DPC++ platform model includes a host computer and a device. The host offloads computation to the device, which could be a __GPU, FPGA, or a multi-core CPU__.
As a first step in a regular DPC++ program we create a __queue__ ere. We offload computation to a __device__ by submitting tasks to a queue. The programmer can choose CPU, GPU, FPGA, and other devices through the __selector__. This program uses the `default_selector{}` that is passed as an argument to q here, which means DPC++ runtime selects the most capable device available at runtime by using the default selector.
We create a buffer, being responsible for moving data around and counting dependencies. DPC++ Library provides `dpstd::begin()` and `dpstd::end()` interfaces for getting buffer iterators and we implemented as below.
### Why use buffer iterators?
Using buffer iterators will ensure that memory is not copied back and forth in between each algorithm execution on device. The code example below shows how the same example above is implemented using buffer iterators which make sure the memory stays on device until the buffer is destructed.
We create the device policy using `make_device_policy` passing the queue as the argument. Finally we pass the execution policy as the first argument to the `std::for_each` function, and pass the __'begin'__ and __'end'__ buffer iterators as the second and third arguments. The Parallel STL API handles the data transfer and compute.
### Lab Exercise: Gamma Correction
* In this example the student will learn how to use oneDPL library to perform the gamma correction.
* Follow the __Steps 1 to 3__ in the below code to create a SYCL buffer, create buffer iterators, and then call the std::for each function with DPC++ support.
1. Select the code cell below, __follow the STEPS 1 to 3__ in the code comments, click run ▶ to save the code to file.
2. Next run ▶ the cell in the __Build and Run__ section below the code to compile and execute the code.
```
%%writefile gamma-correction/src/main.cpp
//==============================================================
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <iomanip>
#include <iostream>
#include <CL/sycl.hpp>
#include <oneapi/dpl/algorithm>
#include <oneapi/dpl/execution>
#include <oneapi/dpl/iterator>
#include "utils.hpp"
using namespace sycl;
using namespace std;
int main() {
// Image size is width x height
int width = 1440;
int height = 960;
Img<ImgFormat::BMP> image{width, height};
ImgFractal fractal{width, height};
// Lambda to process image with gamma = 2
auto gamma_f = [](ImgPixel &pixel) {
auto v = (0.3f * pixel.r + 0.59f * pixel.g + 0.11f * pixel.b) / 255.0f;
auto gamma_pixel = static_cast<uint8_t>(255 * v * v);
if (gamma_pixel > 255) gamma_pixel = 255;
pixel.set(gamma_pixel, gamma_pixel, gamma_pixel, gamma_pixel);
};
// fill image with created fractal
int index = 0;
image.fill([&index, width, &fractal](ImgPixel &pixel) {
int x = index % width;
int y = index / width;
auto fractal_pixel = fractal(x, y);
if (fractal_pixel < 0) fractal_pixel = 0;
if (fractal_pixel > 255) fractal_pixel = 255;
pixel.set(fractal_pixel, fractal_pixel, fractal_pixel, fractal_pixel);
++index;
});
string original_image = "fractal_original.png";
string processed_image = "fractal_gamma.png";
Img<ImgFormat::BMP> image2 = image;
image.write(original_image);
// call standard serial function for correctness check
image.fill(gamma_f);
// use default policy for algorithms execution
auto policy = oneapi::dpl::execution::dpcpp_default;
// We need to have the scope to have data in image2 after buffer's destruction
{
// ****Step 1: Uncomment the below line to create a buffer, being responsible for moving data around and counting dependencies
//buffer<ImgPixel> b(image2.data(), image2.width() * image2.height());
// create iterator to pass buffer to the algorithm
// **********Step 2: Uncomment the below lines to create buffer iterators. These are passed to the algorithm
//auto b_begin = oneapi::dpl::begin(b);
//auto b_end = oneapi::dpl::end(b);
//*****Step 3: Uncomment the below line to call std::for_each with DPC++ support
//std::for_each(policy, b_begin, b_end, gamma_f);
}
image2.write(processed_image);
// check correctness
if (check(image.begin(), image.end(), image2.begin())) {
cout << "success\n";
} else {
cout << "fail\n";
return 1;
}
cout << "Run on "
<< policy.queue().get_device().template get_info<info::device::name>()
<< "\n";
cout << "Original image is in " << original_image << "\n";
cout << "Image after applying gamma correction on the device is in "
<< processed_image << "\n";
return 0;
}
```
#### Build and Run
Select the cell below and click run ▶ to compile and execute the code:
```
! chmod 755 q; chmod 755 run_gamma_correction.sh; if [ -x "$(command -v qsub)" ]; then ./q run_gamma_correction.sh; else ./run_gamma_correction.sh; fi
```
_If the Jupyter cells are not responsive or if they error out when you compile the code samples, please restart the Jupyter Kernel:
"Kernel->Restart Kernel and Clear All Outputs" and compile the code samples again_
### Image outputs
once you run the program sucessfuly it creates gamma corrected image and the original image. You can see the difference by running the two cells below and visually compare it.
##### View the gamma corrected Image
Select the cell below and click run ▶ to view the generated image using gamma correction:
```
from IPython.display import display, Image
display(Image(filename='gamma-correction/build/src/fractal_gamma.png'))
```
##### View the original Image
Select the cell below and click run ▶ to view the generated image using gamma correction:
```
from IPython.display import display, Image
display(Image(filename='gamma-correction/build/src/fractal_original.png'))
```
# Summary
In this module you will have learned how to apply gamma correction to Images using Data Parallel C++ Library
| github_jupyter |
```
import healpy as hp
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import astropy.units as u
```
# White noise NET in Radio-astronomy and Cosmology
> Create a white noise map and compare with power spectrum expected from the NET
- categories: [cosmology, python, healpy]
Noise-Equivalent-Temperature, it is a measure of sensitivity of a detector, in cosmology, it is often quoted
in $\mu K \sqrt(s)$, i.e. it is the sensitivity per unit time and can be divided by the integration time to
get the actual standard deviation of the white noise of the instrument.
For example let's consider a white noise NET of $200 \mu K \sqrt(s)$
it means that if you integrate for 100 seconds for each pixel, the standard deviation will be $20 \mu K$.
```
net = 200 * u.Unit("uK * sqrt(s)")
net
integration_time_per_pixel = 100 * u.s
standard_deviation = net / np.sqrt(integration_time_per_pixel)
```
## Create a white noise map
Now that we have an estimate of the standard deviation per pixel, we can use `numpy` to create a map of gaussian white noise.
```
nside = 128
npix = hp.nside2npix(nside)
m = np.random.normal(scale = standard_deviation.value, size=npix) * standard_deviation.unit
hp.mollview(m, unit=m.unit, title="White noise map")
```
## Power spectrum
Finally we can compute the angular power spectrum with `anafast`, i.e. the power as a function of the angular scales, from low $\ell$ values for large angular scales, to high $\ell$ values for small angular scales.
At low $\ell$ there is not much statistics and the power spectrum is biased, but if we exclude lower ells, we can have an estimate of the white noise $C_\ell$ coefficients. We can then compare with the theoretical power computed as:
$$ C_\ell = \Omega_{pix}\sigma^2 $$
Where: $\Omega_{pix}$ is the pixel are in square-radians and $\sigma^2$ is the white noise standard variance.
```
cl = hp.anafast(m)
cl[100:].mean()
pixel_area = hp.nside2pixarea(nside)
white_noise_cl = standard_deviation.value**2 * pixel_area
white_noise_cl
plt.figure(figsize=(6,4))
plt.loglog(cl, label="Map power spectrum", alpha=.7)
plt.hlines(white_noise_cl, 0, len(cl), label="White noise level")
plt.xlabel("$\ell$")
plt.ylabel("$C_\ell [\mu K ^ 2]$");
```
## Masking
In case we are removing some pixels from a map, for example to mask out a strong signal (e.g. the Milky Way), our estimate of the power spectrum on the partial sky is lower.
However we assume that the properties of the noise will be the same also in the masked region.
At first order, for simple masks, we can just correct for the amplitude by dividing the power spectrum by the sky fraction.
```
m.value[len(m)//2-30000:len(m)//2+30000] = hp.UNSEEN
hp.mollview(m, unit=m.unit, title="White noise map")
cl_masked = hp.anafast(m)
plt.figure(figsize=(6,4))
plt.loglog(cl, label="Map power spectrum", alpha=.7)
plt.loglog(cl_masked, label="Map power spectrum (Masked)", alpha=.7)
plt.hlines(white_noise_cl, 0, len(cl), label="White noise level")
plt.xlabel("$\ell$")
plt.ylabel("$C_\ell [\mu K ^ 2]$")
plt.legend();
sky_fraction = hp.mask_good(m).sum() / len(m)
print(sky_fraction)
plt.figure(figsize=(6,4))
plt.loglog(cl, label="Map power spectrum", alpha=.7)
plt.loglog(cl_masked / sky_fraction, label="Map power spectrum (Masked) - corrected", alpha=.7)
plt.hlines(white_noise_cl, 0, len(cl), label="White noise level")
plt.xlabel("$\ell$")
plt.ylabel("$C_\ell [\mu K ^ 2]$")
plt.legend();
```
| github_jupyter |
**TASK-3 Exploratory Data Analysis - Retail**
**IMPORTING THE LIBRARIES**
```
import numpy as np # linear algebra
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
```
**LOADING THE DATASET**
```
df= pd.read_csv("/content/sample_data/SampleSuperstore.csv")
df.head()
```
**PRINT RANDOM NINE ROW**
```
df.sample(9)
```
**PRINT LAST FIVE ROW**
```
df.tail()
```
**CHECK THE MISSING VALUE**
```
df.isnull().sum()
```
**FINDING TOTAL NUMBER OF NULL VALUES IN A DATASET**
```
print("total number of null values = ",df.isnull().sum().sum())
```
**FULL SUMMARY OF THE DATAFRAME**
```
print(df.info())
```
**STASTICAL DETAILS OF THE DATASET**
```
df.describe()
```
**SHAPE OF THE DATASET**
```
df.shape
```
**FIND THE dtypes IN THE DATASET**
```
df.dtypes
```
**FINDING ALL THE COLUMN NAMES INSIDE THE DATASET**
```
df.columns
```
**CHECK THE DATSET FOR DUPLICATE AND DROPPING ELEMENT**
```
df.duplicated().sum()
df.drop_duplicates()
```
**Function return Series with number of distinct observations over requested axis**
```
df.nunique()
```
**FIND THE CORRELATION OFTHE DATASET**
```
df.corr()
```
**FIND THE COVARIANCE OF THE DATASET**
```
df.cov()
```
**Find the Series containing counts of unique values**
```
df.value_counts()
```
**DELETING THE VARIABLE**
```
col=['Postal Code']
df1=df.drop(columns=col,axis=1)
```
**VISUALIZATION OF THE DATASET**
```
plt.figure(figsize=(16,8))
plt.bar('Sub-Category','Category', data=df)
plt.show()
print(df1['State'].value_counts())
plt.figure(figsize=(15,8))
sns.countplot(x=df1['State'])
plt.xticks(rotation=90)
plt.show()
print(df['Sub-Category'].value_counts())
plt.figure(figsize=(12,6))
sns.countplot(x=df['Sub-Category'])
plt.xticks(rotation=90)
plt.show()
```
**HEATMAP OF DATASET**
```
fig,axes = plt.subplots(1,1,figsize=(9,6))
sns.heatmap(df.corr(), annot= True)
plt.show()
fig,axes = plt.subplots(1,1,figsize=(9,6))
sns.heatmap(df.cov(), annot= True)
plt.show()
```
**COUNTPLOT**
```
sns.countplot(x=df['Segment'])
sns.countplot(x=df['Region'])
```
**BAR PLOT**
```
plt.figure(figsize=(40,25))
sns.barplot(x=df['Sub-Category'], y=df['Profit'])
```
**LINE PLOT**
```
plt.figure(figsize = (10,4))
sns.lineplot('Discount', 'Profit', data = df, color = 'r', label= 'Discount')
plt.legend()
```
**HISTOGRAM**
```
df1.hist(bins=50 ,figsize=(20,15))
plt.show()
```
**PAIR PLOT**
```
figsize=(15,10)
sns.pairplot(df1,hue='Sub-Category')
```
So Now we Grouped or sum the sales ,profit,discount,quantity according to every state of region and also according to sub-categories sales
```
grouped=pd.DataFrame(df.groupby(['Ship Mode','Segment','Category','Sub-Category','State','Region'])['Quantity','Discount','Sales','Profit'].sum().reset_index())
grouped
```
**sum,mean,min,max,count median,standard deviation,Variance of each states of Profit**
```
df.groupby("State").Profit.agg(["sum","mean","min","max","count","median","std","var"])
```
**APPLYING KMEANS**
```
x = df.iloc[:, [9, 10, 11, 12]].values
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters = i, init = 'k-means++',
max_iter = 300, n_init = 10, random_state = 0).fit(x)
wcss.append(kmeans.inertia_)
sns.set_style("whitegrid")
sns.FacetGrid(df, hue ="Sub-Category",height = 6).map(plt.scatter,'Sales','Quantity')
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:,1],
s = 100, c = 'yellow', label = 'Centroids')
plt.legend()
sns.pairplot(df1)
fig, axes = plt.subplots(figsize = (10 , 10))
sns.boxplot(df['Sales'])
fig, axes = plt.subplots(figsize = (10 , 10))
sns.boxplot(df['Discount'])
fig, axes = plt.subplots(figsize = (10 , 10))
sns.boxplot(df['Profit'])
Q1 = df.quantile(q = 0.25, axis = 0, numeric_only = True, interpolation = 'linear')
Q3 = df.quantile(q = 0.75, axis = 0, numeric_only = True, interpolation = 'linear')
IQR = Q3 - Q1
print(IQR)
df.value_counts().nlargest().plot(kind = 'bar' , figsize = (10 , 5))
```
**SCATTER PLOT**
```
fig, ax = plt.subplots(figsize = (10 , 6))
ax.scatter(df["Sales"] , df["Profit"])
ax.set_xlabel('Sales')
ax.set_ylabel('Profit')
plt.show()
```
**DISTRIBUTION PLOT**
```
print(df['Sales'].describe())
plt.figure(figsize = (9 , 8))
sns.distplot(df['Sales'], color = 'b', bins = 100, hist_kws = {'alpha': 0.4});
```
| github_jupyter |
# GGS416 Satellite Image Analysis
In this tutorial we are going to cover:
- Spatial referencing systems.
- Satellite image metadata.
## Working with a Coordinate Reference System (CRS)
We need to be able to map data points to precise locations across space. Indeed, this underpins our ability to process and analyze satellite images.
There are hundreds of different types of Coordinate Reference Systems, with many geographical regions specifying their own to enable local consistency and precision.
- A **Geographic Coordinate System** measures locations on Earth in latitude and longitude and is based on either a spherical or ellipsoidal coordinate system.
- Latitude is measured in degrees north or south of the equator.
- Longitude is measured in degrees east or west of a prime meridian (a meridian divides a spheroid into two hemispheres).
- See the World Geodetic System (WGS84):https://en.wikipedia.org/wiki/World_Geodetic_System
- WGS84 can be defined in `geopandas` by the code 'epsg:4326'.
- A **Projected Coordinate System** instead represents Earth locations via a specific map projection using cartesian coordinates (x,y) on a planar (2D) surface.
- This approach maps a curved Earth surface onto a flat 2D plane.
- Common units include metric meters and imperial feet.
- See the Universal Transverse Mercator (UTM): https://en.wikipedia.org/wiki/Universal_Transverse_Mercator_coordinate_system
Today we will work different coordinate reference systems, after exploring image metadata.
## Satellite imagery metadata
We often have information about our data which is not actually the data itself.
This is referred to as **Metadata**.
('Meta' meaning 'above' or 'beyond')
We will need to import `rasterio` so that we can load the Planet image data we downloaded in the previous tutorial.
```
# Load rasterio into our jupyter session
import rasterio
```
Let's get started using the 4-band Planet image we downloaded in the previous session.
We will need to specify the image name, and then use the `rasterio` open function to load the raster.
As we downloaded the images last week, in the 'week3' directory, we ill need to navigate to their location.
The desired image filename is '20190321_174348_0f1a_3B_AnalyticMS.tif', which is in the 'week3' folder.
As we need to go up one folder, we can use a double period ('..').
Then we can can into the 'week3' folder.
We can now put that together into a single path string, as follows:
```
# This path instructs the function to go up one director '..' and then into the 'week3' folder:
image_filename = "../week3/20190321_174348_0f1a_3B_AnalyticMS.tif"
# Remember that the 4-band image is comprised of blue, green, red and near-infrared
# PlanetScope images should be in a UTM projection.
my_image = rasterio.open(image_filename)
# We can view the rasterio object as follows:
my_image
```
We can now begin to explore information about the loaded imagery.
For example, we can view the filename for the given image asset:
```
print(my_image.name)
```
We can also view the image tags associated which include:
- 'AREA_OR_POINT' - indication of whether this is an area or a point representation.
- 'TIFFTAG_DATETIME' - the specific date and time the image was taken in Coordinated Universal Time (UTC).
```
print(my_image.tags())
```
In case we need to check, we can obtain the number of bands and indexes which are present within this image:
```
# Present number of image bands
print(my_image.count)
# Present number of indexes
print(my_image.indexes)
```
By querying the image object with these basic functions, we can establish information prior to visualizing.
Finally, we can unpack these different layers as follows (remember we practiced unpacking in the intro to python lecture):
```
# Unpacking our image layers into separate variables for blue, green, red and infrared:
blue, green, red, nir = my_image.read()
# Let's inspect our blue variable
blue
```
Remember that these are `numpy` arrays:
e.g. `array([0, 0, ..., 0, 0])`
There are actually many ways we can unpack these bands, they might just take a few more lines of code. For example:
```
blue = my_image.read(1)
green = my_image.read(2)
red = my_image.read(3)
nir = my_image.read(4)
blue
```
Or it is possible to just read all the layers at once, creating a large multidimensional array:
```
data = my_image.read()
data
```
As this multidimensional array is essentially a list of lists, we can still index into the array like we have previously in the Python tutorial example:
```
# Extract the blue array which will be in position zero
blue = data[0]
blue
```
Finally, we can examine the dimensions of one of these layers:
```
# Print the data type of the blue layer (which will be a NumPy data type)
print(blue.dtype)
# Using the blue band as an example, examine the width & height of the image (in pixels)
w = blue.shape[0]
h = blue.shape[1]
# Let's print the dimensions of the blue layer
print("width: {w}, height: {h}".format(w=w, h=h))
```
We can get the bounds of the current image in the current projected coordinate reference system using the bounds command.
Remember, PlanetScope data should be in UTM, the Universal Transverse Mercator system: https://en.wikipedia.org/wiki/Universal_Transverse_Mercator_coordinate_system
The measurement unit should be meters (as opposed to degrees when using lat-lon coordinates via WGS84).
```
# Find the bounding box of the image.
# The bounding box is the minimum possible box which envelopes the present data.
print(my_image.bounds)
```
We can then get the map unit dimensions in the original units of the coordinate reference system, by subtracting the different bounds of the image, as follows:
```
# Find the image bound in the original measurement units
width_in_projected_units = my_image.bounds.right - my_image.bounds.left
height_in_projected_units = my_image.bounds.top - my_image.bounds.bottom
print("Width: {}, Height: {}".format(width_in_projected_units, height_in_projected_units))
```
Remember that this raster image will be comprised of a grid.
We can therefore find the total number of rows and columns by using the height and width commands, as follows:
```
# Find the height and width of our image using the relevant functions:
print("Rows: {}, Columns: {}".format(my_image.height, my_image.width))
```
We may want to clarify the dimensions of a single pixel in our raster grid.
Thus, we can find the resolution of the x and y pixels as follows:
```
# Find the resolution of a single pixel
x_length = (my_image.bounds.right - my_image.bounds.left) / my_image.width
y_length = (my_image.bounds.top - my_image.bounds.bottom) / my_image.height
print("Length of x is: {}. Length of y is: {}".format(x_length, y_length))
print("Therefore, it is {} that the pixels are square, with dimensions {} x {} meters.".format(
x_length == y_length, x_length, y_length))
```
We can actually get the CRS of the data as follows (which is super handy to know):
```
# Print the current coordinate reference system of the image
my_image.crs
```
It is important for us to be able to change the pixel coordinates, which we can do via an affine transformation.
See here for more info: https://en.wikipedia.org/wiki/Affine_transformation
This of an affine transformation as a geometry transformation. Let's cover a basic example:
```
# To convert from pixel coordinates to world coordinates, we need the min and max index values.
# Upper left pixel coordinates
row_min = 0
col_min = 0
# Lower right pixel coordinates.
# Remember our index starts at zero, hence we need to subtract 1.
row_max = my_image.height - 1
col_max = my_image.width - 1
print(
'The top left coordinate is {}.'.format((row_min,col_min)),
'The lower right coordinate is {}.'.format((row_max, col_max)),
)
```
Now we can transform these coordinates using the available `.transform` function.
This converts our given row coordinates into our present CRS coordinates.
```
# Transform coordinates with the dataset's affine transformation.
topleft = my_image.transform * (row_min, col_min)
botright = my_image.transform * (row_max, col_max)
print("Top left corner coordinates: {}".format(topleft))
print("Bottom right corner coordinates: {}".format(botright))
```
Finally, we can access any of our image metadata by using the `.profile` function, as follows:
```
my_image.profile
```
As this is a dictionary, we can just index into it as we usually do, as follows:
```
print(my_image.profile['crs'], my_image.profile['dtype'])
```
| github_jupyter |
# Regression Week 3: Assessing Fit (polynomial regression)
In this notebook you will compare different regression models in order to assess which model fits best. We will be using polynomial regression as a means to examine this topic. In particular you will:
* Write a function to take an SArray and a degree and return an SFrame where each column is the SArray to a polynomial value up to the total degree e.g. degree = 3 then column 1 is the SArray column 2 is the SArray squared and column 3 is the SArray cubed
* Use matplotlib to visualize polynomial regressions
* Use matplotlib to visualize the same polynomial degree on different subsets of the data
* Use a validation set to select a polynomial degree
* Assess the final fit using test data
We will continue to use the House data from previous notebooks.
# Fire up graphlab create
```
import graphlab
```
Next we're going to write a polynomial function that takes an SArray and a maximal degree and returns an SFrame with columns containing the SArray to all the powers up to the maximal degree.
The easiest way to apply a power to an SArray is to use the .apply() and lambda x: functions.
For example to take the example array and compute the third power we can do as follows: (note running this cell the first time may take longer than expected since it loads graphlab)
```
tmp = graphlab.SArray([1., 2., 3.])
tmp_cubed = tmp.apply(lambda x: x**3)
print tmp
print tmp_cubed
```
We can create an empty SFrame using graphlab.SFrame() and then add any columns to it with ex_sframe['column_name'] = value. For example we create an empty SFrame and make the column 'power_1' to be the first power of tmp (i.e. tmp itself).
```
ex_sframe = graphlab.SFrame()
ex_sframe['power_1'] = tmp
print ex_sframe
```
# Polynomial_sframe function
Using the hints above complete the following function to create an SFrame consisting of the powers of an SArray up to a specific degree:
```
def polynomial_sframe(feature, degree):
# assume that degree >= 1
# initialize the SFrame:
poly_sframe = graphlab.SFrame()
# and set poly_sframe['power_1'] equal to the passed feature
# first check if degree > 1
if degree > 1:
# then loop over the remaining degrees:
# range usually starts at 0 and stops at the endpoint-1. We want it to start at 2 and stop at degree
for power in range(2, degree+1):
# first we'll give the column a name:
name = 'power_' + str(power)
# then assign poly_sframe[name] to the appropriate power of feature
return poly_sframe
```
To test your function consider the smaller tmp variable and what you would expect the outcome of the following call:
```
print polynomial_sframe(tmp, 3)
```
# Visualizing polynomial regression
Let's use matplotlib to visualize what a polynomial regression looks like on some real data.
```
sales = graphlab.SFrame('kc_house_data.gl/')
```
As in Week 3, we will use the sqft_living variable. For plotting purposes (connecting the dots), you'll need to sort by the values of sqft_living. For houses with identical square footage, we break the tie by their prices.
```
sales = sales.sort(['sqft_living', 'price'])
```
Let's start with a degree 1 polynomial using 'sqft_living' (i.e. a line) to predict 'price' and plot what it looks like.
```
poly1_data = polynomial_sframe(sales['sqft_living'], 1)
poly1_data['price'] = sales['price'] # add price to the data since it's the target
```
NOTE: for all the models in this notebook use validation_set = None to ensure that all results are consistent across users.
```
model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = ['power_1'], validation_set = None)
#let's take a look at the weights before we plot
model1.get("coefficients")
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(poly1_data['power_1'],poly1_data['price'],'.',
poly1_data['power_1'], model1.predict(poly1_data),'-')
```
Let's unpack that plt.plot() command. The first pair of SArrays we passed are the 1st power of sqft and the actual price we then ask it to print these as dots '.'. The next pair we pass is the 1st power of sqft and the predicted values from the linear model. We ask these to be plotted as a line '-'.
We can see, not surprisingly, that the predicted values all fall on a line, specifically the one with slope 280 and intercept -43579. What if we wanted to plot a second degree polynomial?
```
poly2_data = polynomial_sframe(sales['sqft_living'], 2)
my_features = poly2_data.column_names() # get the name of the features
poly2_data['price'] = sales['price'] # add price to the data since it's the target
model2 = graphlab.linear_regression.create(poly2_data, target = 'price', features = my_features, validation_set = None)
model2.get("coefficients")
plt.plot(poly2_data['power_1'],poly2_data['price'],'.',
poly2_data['power_1'], model2.predict(poly2_data),'-')
```
The resulting model looks like half a parabola. Try on your own to see what the cubic looks like:
Now try a 15th degree polynomial:
What do you think of the 15th degree polynomial? Do you think this is appropriate? If we were to change the data do you think you'd get pretty much the same curve? Let's take a look.
# Changing the data and re-learning
We're going to split the sales data into four subsets of roughly equal size. Then you will estimate a 15th degree polynomial model on all four subsets of the data. Print the coefficients (you should use .print_rows(num_rows = 16) to view all of them) and plot the resulting fit (as we did above). The quiz will ask you some questions about these results.
To split the sales data into four subsets, we perform the following steps:
* First split sales into 2 subsets with `.random_split(0.5, seed=0)`.
* Next split the resulting subsets into 2 more subsets each. Use `.random_split(0.5, seed=0)`.
We set `seed=0` in these steps so that different users get consistent results.
You should end up with 4 subsets (`set_1`, `set_2`, `set_3`, `set_4`) of approximately equal size.
Fit a 15th degree polynomial on set_1, set_2, set_3, and set_4 using sqft_living to predict prices. Print the coefficients and make a plot of the resulting model.
Some questions you will be asked on your quiz:
**Quiz Question: Is the sign (positive or negative) for power_15 the same in all four models?**
**Quiz Question: (True/False) the plotted fitted lines look the same in all four plots**
# Selecting a Polynomial Degree
Whenever we have a "magic" parameter like the degree of the polynomial there is one well-known way to select these parameters: validation set. (We will explore another approach in week 4).
We split the sales dataset 3-way into training set, test set, and validation set as follows:
* Split our sales data into 2 sets: `training_and_validation` and `testing`. Use `random_split(0.9, seed=1)`.
* Further split our training data into two sets: `training` and `validation`. Use `random_split(0.5, seed=1)`.
Again, we set `seed=1` to obtain consistent results for different users.
Next you should write a loop that does the following:
* For degree in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] (to get this in python type range(1, 15+1))
* Build an SFrame of polynomial data of train_data['sqft_living'] at the current degree
* hint: my_features = poly_data.column_names() gives you a list e.g. ['power_1', 'power_2', 'power_3'] which you might find useful for graphlab.linear_regression.create( features = my_features)
* Add train_data['price'] to the polynomial SFrame
* Learn a polynomial regression model to sqft vs price with that degree on TRAIN data
* Compute the RSS on VALIDATION data (here you will want to use .predict()) for that degree and you will need to make a polynmial SFrame using validation data.
* Report which degree had the lowest RSS on validation data (remember python indexes from 0)
(Note you can turn off the print out of linear_regression.create() with verbose = False)
**Quiz Question: Which degree (1, 2, …, 15) had the lowest RSS on Validation data?**
Now that you have chosen the degree of your polynomial using validation data, compute the RSS of this model on TEST data. Report the RSS on your quiz.
**Quiz Question: what is the RSS on TEST data for the model with the degree selected from Validation data?**
| github_jupyter |
```
# Import all the necessary files!
import os
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import Model
# Download the inception v3 weights
!wget --no-check-certificate \
https://storage.googleapis.com/mledu-datasets/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 \
-O /tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5
# Import the inception model
from tensorflow.keras.applications.inception_v3 import InceptionV3
# Create an instance of the inception model from the local pre-trained weights
local_weights_file = '/tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
pre_trained_model = InceptionV3(input_shape = (150,150,3),
include_top = False,
weights = None)
pre_trained_model.load_weights(local_weights_file)
# Make all the layers in the pre-trained model non-trainable
for layer in pre_trained_model.layers:
layer.trainable = False
# Print the model summary
#pre_trained_model.summary()
# Expected Output is extremely large, but should end with:
#batch_normalization_v1_281 (Bat (None, 3, 3, 192) 576 conv2d_281[0][0]
#__________________________________________________________________________________________________
#activation_273 (Activation) (None, 3, 3, 320) 0 batch_normalization_v1_273[0][0]
#__________________________________________________________________________________________________
#mixed9_1 (Concatenate) (None, 3, 3, 768) 0 activation_275[0][0]
# activation_276[0][0]
#__________________________________________________________________________________________________
#concatenate_5 (Concatenate) (None, 3, 3, 768) 0 activation_279[0][0]
# activation_280[0][0]
#__________________________________________________________________________________________________
#activation_281 (Activation) (None, 3, 3, 192) 0 batch_normalization_v1_281[0][0]
#__________________________________________________________________________________________________
#mixed10 (Concatenate) (None, 3, 3, 2048) 0 activation_273[0][0]
# mixed9_1[0][0]
# concatenate_5[0][0]
# activation_281[0][0]
#==================================================================================================
#Total params: 21,802,784
#Trainable params: 0
#Non-trainable params: 21,802,784
last_layer = pre_trained_model.get_layer('mixed7')
print('last layer output shape: ', last_layer.output_shape)
last_output = last_layer.output
# Expected Output:
# ('last layer output shape: ', (None, 7, 7, 768))
# Define a Callback class that stops training once accuracy reaches 99.9%
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('acc')>0.999):
print("\nReached 99.9% accuracy so cancelling training!")
self.model.stop_training = True
from tensorflow.keras.optimizers import RMSprop
# Flatten the output layer to 1 dimension
x = layers.Flatten()(last_output)
# Add a fully connected layer with 1,024 hidden units and ReLU activation
x = layers.Dense(1024, activation='relu')(x)
# Add a dropout rate of 0.2
x = layers.Dropout(0.2)(x)
# Add a final sigmoid layer for classification
x = layers.Dense(1, activation = 'sigmoid')(x)
model = Model(pre_trained_model.input, x)
model.compile(optimizer = RMSprop(lr=0.0001),
loss = 'binary_crossentropy',
metrics = ['acc'])
#model.summary()
# Expected output will be large. Last few lines should be:
# mixed7 (Concatenate) (None, 7, 7, 768) 0 activation_248[0][0]
# activation_251[0][0]
# activation_256[0][0]
# activation_257[0][0]
# __________________________________________________________________________________________________
# flatten_4 (Flatten) (None, 37632) 0 mixed7[0][0]
# __________________________________________________________________________________________________
# dense_8 (Dense) (None, 1024) 38536192 flatten_4[0][0]
# __________________________________________________________________________________________________
# dropout_4 (Dropout) (None, 1024) 0 dense_8[0][0]
# __________________________________________________________________________________________________
# dense_9 (Dense) (None, 1) 1025 dropout_4[0][0]
# ==================================================================================================
# Total params: 47,512,481
# Trainable params: 38,537,217
# Non-trainable params: 8,975,264
# Get the Horse or Human dataset
!wget --no-check-certificate https://storage.googleapis.com/laurencemoroney-blog.appspot.com/horse-or-human.zip -O /tmp/horse-or-human.zip
# Get the Horse or Human Validation dataset
!wget --no-check-certificate https://storage.googleapis.com/laurencemoroney-blog.appspot.com/validation-horse-or-human.zip -O /tmp/validation-horse-or-human.zip
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import zipfile
local_zip = '//tmp/horse-or-human.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/training')
zip_ref.close()
local_zip = '//tmp/validation-horse-or-human.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/validation')
zip_ref.close()
train_horses_dir = "/tmp/training/horses"
train_humans_dir = "/tmp/training/humans"
validation_horses_dir ="/tmp/validation/horses"
validation_humans_dir = "/tmp/validation/humans"
train_horses_fnames = os.listdir(train_horses_dir)
train_humans_fnames = os.listdir(train_humans_dir)
validation_horses_fnames = os.listdir(validation_horses_dir)
validation_humans_fnames = os.listdir(validation_humans_dir)
print(len(train_horses_fnames))
print(len(train_humans_fnames))
print(len(validation_horses_fnames))
print(len(validation_humans_fnames))
# Expected Output:
# 500
# 527
# 128
# 128
# Define our example directories and files
train_dir = '/tmp/training'
validation_dir = '/tmp/validation'
# Add our data-augmentation parameters to ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale = 1.0/255.,
rotation_range = 40,
height_shift_range = 0.2,
width_shift_range = 0.2,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
fill_mode='nearest'
)
# Note that the validation data should not be augmented!
test_datagen = ImageDataGenerator(rescale=1.0/255. )
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(150,150),
batch_size= 64,
class_mode ='binary'
)
# Flow validation images in batches of 20 using test_datagen generator
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size = (150,150),
batch_size = 32,
class_mode='binary'
)
# Expected Output:
# Found 1027 images belonging to 2 classes.
# Found 256 images belonging to 2 classes.
# Run this and see how many epochs it should take before the callback
# fires, and stops training at 99.9% accuracy
# (It should take less than 100 epochs)
callbacks = myCallback()
history = model.fit_generator(
train_generator,
steps_per_epoch = 16,
epochs = 100,
validation_data = validation_generator,
validation_steps = 4,
verbose = 1,
callbacks= [callbacks]
)
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend(loc=0)
plt.figure()
plt.show()
```
| github_jupyter |
## Import Libraries and Read Dataset
```
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
#machine learning libraries
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV #cross validation and split dataset
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.neighbors import KNeighborsClassifier, NeighborhoodComponentsAnalysis, LocalOutlierFactor
from sklearn.decomposition import pca
#warning library
import warnings
warnings.filterwarnings("ignore")
#read dataset into data variable
data = pd.read_csv("data.csv")
```
## Descriptive Statistic
```
# preview dataset
data.head()
# Dataset dimensions - (rows, columns)
data.shape
# Features data-type
data.info()
# Statistical summary
data.describe().T
# Count of null values
data.isnull().sum()
```
## Observations:
1. There are a total of 569 records and 33 features in the dataset.
2. Each feature can be integer, float or object dataype.
3. There are zero NaN values in the dataset.
4. In the outcome column, M represents malignant cancer and 0 represents benign cancer.
# Data Preprocessing
```
#drop unnecessary columns
data.drop(['Unnamed: 32','id'], inplace=True, axis=1) #axis=1 -> cloumn drop
#rename diagnosis as target feature
data = data.rename(columns={"diagnosis":"target"})
#visualize target feature count
sns.countplot(data["target"])
print(data.target.value_counts()) # B 357, M 212
#set the value of string target feature to integer
data["target"] = [1 if i.strip() == 'M' else 0 for i in data.target]
```
# Exploratory Data Analysis
```
#Correlation Matrix
corr_matrix = data.corr()
sns.clustermap(corr_matrix, annot=True, fmt = ".2f")
plt.title("Correlation Matrix")
plt.show()
#Correlation Matrix with values bigger than 0.75
threshold = 0.75
filtre = np.abs(corr_matrix["target"]) > threshold
corr_features = corr_matrix.columns[filtre].tolist()
sns.clustermap(data[corr_features].corr(), annot=True, fmt = ".2f")
plt.title("Correlation Between features with correlation threshold 0.75")
plt.show()
""" there are correlated features """
#pair plot
sns.pairplot(data[corr_features], diag_kind="kde",markers="+",hue="target")
plt.show()
"""there are skewness"""
```
# Outlier Detection
```
#outlier values
y = data["target"]
X = data.drop(["target"], axis=1) #axis=1 -> cloumn drop
columns = X.columns.tolist()
#LOF<1 inlier values
#LOF>1 outlier values
clf = LocalOutlierFactor()
y_pred = clf.fit_predict(X) #ourlier or not
X_score = clf.negative_outlier_factor_
outlier_score = pd.DataFrame()
outlier_score["score"] = X_score
#scatter plot to detect outlier values
threshold_outlier = -2
filtre_outlier = outlier_score["score"] < threshold_outlier
outlier_index = outlier_score[filtre_outlier].index.tolist()
#visualize the outlier values
plt.figure()
plt.scatter(X.iloc[outlier_index,0], X.iloc[outlier_index,1], color="b", s = 50, label = "Outlier Points")
plt.scatter(X.iloc[:,0], X.iloc[:,1], color="k", s = 3, label = "Data Points")
# circles are drawn around the points to show outliers
radius = (X_score.max() - X_score)/(X_score.max() - X_score.min())
outlier_score["radius"] = radius
plt.scatter(X.iloc[:,0], X.iloc[:,1], s = 1000*radius, edgecolors="r", facecolors = "none" , label = "Outlier Scores")
plt.legend()
plt.show()
#drop outliers
X = X.drop(outlier_index)
y = y.drop(outlier_index).tolist()
```
# Train Test Split
```
#train test split
test_size = 0.2 # 20% test, %80 train
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_size, random_state = 42 ) #random_state is set to 42 to ensure consistency
```
# Standardization
```
"""Since there is a big difference between the values in the dataset, we should standardize."""
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
X_train_df = pd.DataFrame(X_train, columns = columns)
X_train_df["target"] = y_train
data_melted = pd.melt(X_train_df, id_vars="target", var_name="features", value_name="value")
plt.figure()
sns.boxplot(x = "features", y = "value", hue = "target", data = data_melted)
plt.xticks(rotation = 90)
plt.show()
```
# Basic KNN Model
```
#model creation
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
#test the resulting model with X_test
y_predict = knn.predict(X_test)
cm = confusion_matrix(y_test, y_predict) #confusion matrix
acc = accuracy_score(y_test, y_predict) #accuracy value
print("Confusion Matrix:", cm)
print("Basic Knn Accuracy Score:", acc)
def KNN_Best_Params(x_train, x_test, Y_train, Y_test):
#find best k and weight value
k_range = list(range(1,31))
weight_options = ["uniform", "distance"]
print(" ")
param_grid = dict(n_neighbors = k_range, weights = weight_options)
knn = KNeighborsClassifier()
grid = GridSearchCV(knn, param_grid=param_grid, cv = 10, scoring = "accuracy")
grid.fit(x_train, Y_train)
print("Best training score: {} with parameters: {} ".format(grid.best_score_, grid.best_params_))
knn = KNeighborsClassifier(**grid.best_params_)
knn.fit(x_train, Y_train)
y_pred_test = knn.predict(x_test)
y_pred_train = knn.predict(x_train)
cm_test = confusion_matrix(Y_test, y_pred_test )
cm_train = confusion_matrix(Y_train, y_pred_train)
acc_test = accuracy_score(Y_test, y_pred_test)
acc_train = accuracy_score(Y_train, y_pred_train)
print("Test Score: {} , Train Score: {}".format( acc_test,acc_train ))
print()
print("CM test:", cm_test)
print("CM ttrain", cm_train)
return grid
grid = KNN_Best_Params(X_train, X_test, y_train, y_test)
```
| github_jupyter |
# LFD Homework 2
Second week homework for the "Learning from Data" course offerd by [Caltech on edX](https://courses.edx.org/courses/course-v1:CaltechX+CS1156x+3T2017). This notebook only contains the simulation / exploration problems.
```
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib notebook
```
## P: Hoeffding Inequality
Task: Run a computer simulation of $10$ times simultaneously and independently flipping $1000$ virtual fair coins. Record the the following fractions of heads in these $10$ runs as
* $\nu_1$ fraction of heads for the first coin $c_1$
* $\nu_{\mathrm{rand}}$ fraction of heads for a randomly chosen coin $c_{\mathrm{rand}}$
* $\nu_\min$ fraction of heads for the coin with the minimal frequency of heads $c_\min$
This can be implemented as:
```
def hfd_experiment(number_coins=1000, runs=10):
''' Creates one experiment of [number_coins] simultaneously flipped fair coins for [runs].'''
coins = (np.random.rand(number_coins, runs) > .5).astype(float)
coins_sum = coins.sum(axis=1, keepdims=True)
nu_1 = coins_sum[0,0] / runs
nu_rand = coins_sum[np.random.randint(number_coins),0] / runs
nu_min = coins_sum[coins_sum.argmin(),0] / runs
return nu_1, nu_rand, nu_min
```
Now the task is to repeat this experiment $100000$ times in order to get a simulated distribution of $\nu_1, \nu_{\mathrm{rand}}$ and $\nu_\min$ respectively.
```
full_distribution = np.array([hfd_experiment() for i in range(100000)])
```
The distributions look as follows:
```
fig, ax = plt.subplots(1, 3, sharex=True, figsize=(9.75, 4.5))
fig.suptitle('Distributions for $\\nu_1, \\nu_{\mathrm{rand}}$ and $\\nu_\\min$')
sns.distplot(full_distribution[:,0], bins=15, kde_kws={'bw':.075}, ax=ax[0], axlabel='$\\nu_1$')
sns.distplot(full_distribution[:,1], bins=15, kde_kws={'bw':.075}, ax=ax[1], axlabel='$\\nu_{\mathrm{rand}}$')
sns.distplot(full_distribution[:,2], bins=3, kde_kws={'bw':.075}, ax=ax[2], axlabel='$\\nu_\\min$')
for x in ax: x.set_xlim(0., 1.)
```
The average value of the different $\nu$ is:
```
nu_bar = full_distribution.mean(axis=0)
print('nu_1_bar\t= {:.3f}\nnu_rand_bar\t= {:.3f}\nnu_min_bar\t= {:.3f}'.format(*nu_bar))
```
## P: Linear Regression
In this problem we use the same target function $f: \mathcal{X} \mapsto \mathcal{Y}$ as in the last homework (Perceptron). Therefore we can re-use its code (with a few cosmetic changes):
```
def generate_data(N = 10, f=None):
''' Generates linear target function f and labeled, linearly separable test data generated by f.'''
if f is None:
# choose two random points p1, p2 and compute a vector p orthogonal to their difference
p1, p2 = (np.random.rand(2,2) - 0.5) * 2.
p = np.array([1, -(p2 - p1)[0]/(p2 - p1)[1]])
p /= np.linalg.norm(p)
f = lambda x: np.sign((x - p1) @ p).reshape(-1,1)
f.db = lambda x: (p2[1] - p1[1])/(p2[0] - p1[0]) * (x - p1[0]) + p1[1]
# generate uniformely distributed data points and apply classifier to label them
X = (np.random.rand(N, 2) - 0.5) * 2
Y = f(X)
return X,Y,f
def plot_data(X, Y, db = None):
''' Plots two dimensional, linearly separable data from the interval [-1, 1] and the optional decision boundary db.'''
plt.figure()
pos_examples = X[(Y == 1).reshape(-1)]
neg_examples = X[(Y == -1).reshape(-1)]
neu_examples = X[(Y == 0).reshape(-1)]
# plot the three groups of examples
plt.scatter(pos_examples[:,0], pos_examples[:,1], color='steelblue', marker='+')
plt.scatter(neg_examples[:,0], neg_examples[:,1], color='red', marker='o')
plt.scatter(neu_examples[:,0], neu_examples[:,1], color='black', marker='o')
# plot the decision boundary if provided
if db is not None:
x = np.arange(-1., 1., 0.01)
plt.plot(x, db(x), c='red', ls='dashed', lw=1.)
plt.grid(alpha=.3)
plt.gca().set_xlim(-1, 1)
plt.gca().set_ylim(-1, 1)
```
Note that we provide the option to pass in the target function $f$. This will come in handy later. Now we are ready to generate some linearly separable test data for classification with linear regression or perceptrons. For instance, with $N = 100$ our functions generates:
```
X, Y, f = generate_data(100)
plot_data(X, Y, f.db)
```
### Linear Model
The next step is to learn a linear model in the generated data. As demonstrated in the lecture, the weights of the linear model can be computed using the normal equation of the least squares method for linear regression as
$$
\mathbf{w} = \left(\mathbf{X}^\intercal\mathbf{X}\right)^{-1} \mathbf{X}^\intercal \mathbf{y}
$$
then the elected hypothesis function $g: \mathcal{X} \mapsto \mathcal{Y}$ can perform binary classification on a single example $\mathbf{x} \in \mathcal{X}$ as $g(\mathbf{x}) = \mathrm{sign}{(\mathbf{w}^\intercal\mathbf{x})}$ which can be computed for all training examples in a single run (batch computation) as
$$
h(\mathbf{X}) = \mathrm{sign}\left(\mathbf{X}\mathbf{w}\right)
$$
```
class LRBClassifier:
''' Simple linear regression based binary classifier.'''
def __init__(self, X, Y, add_intercept=True):
N, d = X.shape
if add_intercept:
X = np.concatenate((np.ones((N, 1)), X), axis=1)
self.w = np.linalg.pinv(X.T @ X) @ (X.T) @ Y
self.E_in = np.sum(self(X, add_intercept=False) != Y)/N
def __call__(self, X, add_intercept=True):
N, d = X.shape
if add_intercept:
X = np.concatenate((np.ones((N, 1)), X), axis=1)
return np.sign(X @ self.w).reshape(-1,1)
```
Let's test this new linear classifier with some generated data and plot what it is doing. Thereby it is particularly interesting to plot decision boundaries for $f$ and $g$. The symbol for classes is based on the actual $f$, but we highlight the decision boundary of $g$ (in red) to quickly spot classification errors. The decision boundary of $f$ is also added for reference (in gray).
```
X, Y, f = generate_data(100)
g = LRBClassifier(X, Y)
# we compute the decision boundary through finding an orthogonal vector to w (10e-5 term avoids division by zero)
g.db = lambda x: (- g.w[1] * x - g.w[0])/ (g.w[2] + 10e-5)
plot_data(X, Y, g.db)
# also, we can plot the actual decision boundary of f
x = np.arange(-1., 1., 0.01)
plt.plot(x, f.db(x), c='gray', ls='dashed', lw=2., alpha=.3)
print('E_in = {:.3f}'.format(g.E_in))
```
Now we can prepare the experiment as required by problems 5 and 6:
```
def experiment_lrbc(N=100, N_test=1000, repeat=1000, f=None, gen=generate_data):
data = []
for i in range(repeat):
# generate test data and function
X, Y, f = gen(N, f)
# train a linear regression based classifier and obtain its E_in
g = LRBClassifier(X, Y)
E_in = g.E_in
# obtain the out of sample error rate using the generated function f
X_test, Y_test, _ = gen(N_test, f)
E_out = np.sum(Y_test != g(X_test)) / float(N_test)
data.append((E_in, E_out))
if i%100 == 0:
print('experiment (run={}): E_in={:.3f} / E_out={:.3f}'.format(i, E_in, E_out))
results = np.array(data)
print('\nAverage Errors\n--------------\nE_in\t= {:.3f}\nE_out\t= {:.3f}'.format(*np.mean(results, axis=0)))
return results
```
And finally run the first experiments:
```
results = experiment_lrbc()
```
### Linear Model and Perceptron
Here we have to train a `LRBClassifier` and use its weights as initialization to the perceptron learning algorithm `pla`. We can recycle the perceptron learning algorithm developed in the last homework:
```
class PerceptronClassifier:
'''Perceptron binary classifier.'''
def __init__(self, X, Y, add_intercept=True, init_w=None, max_iter=10e5):
N, d = X.shape
if add_intercept:
X = np.concatenate((np.ones((N, 1)), X), axis=1)
self.w = np.zeros((d+1, 1)) if init_w is None else init_w
# perceptron learning algorithm
X_prime, Y_prime = X.copy(), Y.copy()
self.iterations = 0
while X_prime.shape[0] > 0:
# randomly select misclassified point
i = np.random.randint(X_prime.shape[0])
x_i, y_i = X_prime[i], Y_prime[i]
# update hypothesis
self.w += y_i * x_i.reshape(-1,1)
# identify misclassified points
idx = (self(X, add_intercept=False) != Y).reshape(-1)
X_prime, Y_prime = X[idx], Y[idx]
self.iterations += 1
# divergence circuit breaker
if self.iterations >= max_iter:
raise StopIteration('maximum of {} iterations reached'.format(max_iter))
def __call__(self, X, add_intercept=True):
N = X.shape[0]
if add_intercept:
X = np.concatenate((np.ones((N, 1)), X), axis=1)
return np.sign(X @ self.w).reshape(-1,1)
```
The experiment requires us to set $N = 10$ and find the weights using linear regression, then run the `pla` on these weights to to find a $g$ without in sample classification errors. Thereby we are interested in the number of iterations it takes the `pla` to converge:
```
def experiment_pbc_w_init(N=10, repeat=1000, f=None):
data = []
for i in range(repeat):
# generate test data and function
X, Y, f = generate_data(N, f)
# train a linear regression based classifier on the data, then use this as
# initializing weights for the pla
g_lrbc = LRBClassifier(X, Y)
g_pbc = PerceptronClassifier(X, Y, init_w=g_lrbc.w)
# obtain the number of iterations until convergence for the pla
iterations = g_pbc.iterations
data.append(iterations)
if i%100 == 0:
print('experiment (run={}): iterations={}'.format(i, iterations))
results = np.array(data)
print('\nAverage Iterations\n------------------\nIterations\t= {}'.format(np.mean(results)))
return results
```
Finally, we can run the experiment for problem 7:
```
results = experiment_pbc_w_init()
```
## P: Nonlinear Transformation
These problems again refer to the linear regression based binary classifier. The nonlinear target function is defined as
$$
f(\mathbf{x}) = \mathrm{sign}\left(x_1^2 + x_2^2 - 0.6\right)
$$
Our implementation to genrate data from above had already been prepared for passing in a target function. So all we need to do now is to implement $f$ and provide a mechanism to add some random noise to the data:
```
f = lambda X: np.sign(np.sum(X**2, axis=1, keepdims=True) - .6)
def generate_noisy_data(N = 10, f=None, noise_ratio=.1):
'''Generates linear target function f and labeled, linearly separable test data with added noise.'''
X, Y, f = generate_data(N, f)
# add some random noise
n_noise = np.round(noise_ratio * N).astype(int)
idx = np.random.randint(N, size=n_noise)
Y[idx] = -Y[idx]
return X, Y, f
```
Let's plot this to get a feeling of what's going on:
```
X, Y, _ = generate_noisy_data(100, f)
plot_data(X, Y)
```
Now the first task in problem 8 is to apply linear regression without any nonlinear transformation of features on a training set of size $N=1000$ and determine its in-sample error $E_{\mathrm{in}}$. Here we can re-use the experiment from above:
```
results = experiment_lrbc(N=1000, f=f, gen=generate_noisy_data)
```
### Applying the Nonlinear Transformation
Next we transform $\mathbf{X}$ by applying the nonlinear transformation $\Phi: \mathcal{X} \mapsto \mathcal{Z}$ which adds nonlinear features as $\Phi(\mathbf{x}) = (1, x_1, x_2, x_1 x_2, x_1^2, x_2^2)$. In the implementation we will not add the intercept feature $x_0$ as this happens already in the linear regression classifier implementation.
```
def phi(X):
X1, X2 = np.hsplit(X, 2)
Z = np.concatenate((X, X1 * X2, X1**2, X2**2), axis=1)
return Z
```
Armed with this nonlinear transformation, we can finally prepare the last experiments:
```
def experiment_lrbc_transform(N=100, N_test=1000, repeat=1000, f=None, gen=generate_data):
data = []
w_acc = np.zeros((6,1))
for i in range(repeat):
# generate test data and function
X, Y, f = gen(N, f)
Z = phi(X)
# train a linear regression based classifier and obtain its E_in
g = LRBClassifier(Z, Y)
w_acc += g.w
E_in = g.E_in
# obtain the out of sample error rate using the generated function f
X_test, Y_test, _ = gen(N_test, f)
Z_test = phi(X_test)
E_out = np.sum(Y_test != g(Z_test)) / float(N_test)
data.append((E_in, E_out))
if i%100 == 0:
print('experiment (run={}): E_in={:.3f} / E_out={:.3f}'.format(i, E_in, E_out))
results = np.array(data)
print('\nAverage Errors\n--------------\nE_in\t= {:.3f}\nE_out\t= {:.3f}'.format(*np.mean(results, axis=0)))
return results, w_acc / repeat
```
Note that the arithmetic average over the weight vectors gives us a vector capturing the general direction of the weight vectors. This experiment yields:
```
results, w = experiment_lrbc_transform(N=1000, f=f, gen=generate_noisy_data)
print('\n--------------\n{:.3f} + {:.3f}x_1 + {:.3f}x_2 + {:.3f}x_1x_2 + {:.3f}x_1^2 + {:.3f}x_2^2'.format(*w.flat))
```
| github_jupyter |
# Creating the action server
In this section, we'll discuss **demo_action_server.py**. The action server receives a goal value that is a number. When the server gets this goal value, it'll start counting from zero to this number. If the counting is complete, it'll successfully finish the action, if it is preempted before finishing, the action server will look for another goal value.
To do this, first create a python file called **demo_action_server.py** in the scripts folder of action_tutorials package. Then, copy the following into it:
```
#! /usr/bin/env python
import actionlib
import rospy
from action_tutorials.msg import DemoFeedback, DemoResult, DemoAction
class DemoClass(object):
_feedback = DemoFeedback()
_result = DemoResult()
_feedback.current_number = 0
def __init__(self):
self._as = actionlib.SimpleActionServer("demo_as", DemoAction, self.goal_callback, False)
self._as.start()
def goal_callback(self, goal):
r = rospy.Rate(1)
success = True
progress = goal.count
for i in range(0, progress):
if self._as.is_preempt_requested():
rospy.loginfo("The goal has been cancelled")
self._as.set_preempted()
success = False
break
self._feedback.current_number += 1
self._as.publish_feedback(self._feedback)
r.sleep()
if success:
self._result.final_count = self._feedback.current_number
rospy.loginfo("Succeeded! Final goal, count = %s" % self._result.final_count)
self._as.set_succeeded(self._result)
if __name__ == '__main__':
rospy.init_node("demo")
DemoClass()
rospy.spin()
```
Compile and run it.
```
roscore
rosrun action_tutorials demo_action_server.py
```
You won't see anything. Nothing happens right now. Let's check our nodes and topics list in the new tab:
```
rosnode list
```
#### /demo
#### /rosout
```
rostopic list
```
** /demo_as/cancel **
<t>
** /demo_as/feedback **
<t>
**/demo_as/goal**
<t>
**/demo_as/result**
<t>
**/demo_as/status**
<t>
**/rosout**
<t>
**/rosout_agg**
You'll see the **'demo_as'** action server topics.
Okay, in the new tab, let's echo the feedback topic.
```
rostopic echo /demo_as/feedback
```
Nothing happens rightnow. Because we don't give any goals to the server. So, let's try now by publishing:
```
rostopic pub /demo_as/goal action_tutorials/DemoActionGoal "header:
seq: 0
stamp:
secs: 0
nsecs: 0
frame_id: ''
goal_id:
stamp:
secs: 0
nsecs: 0
id: ''
goal:
count: 10"
```
In the feedback tab, you will see the count messages increasing from 1 to the goal. For us, the goal is 10.
<t>
When the count reaches 10, you'll see the success message in the server tab.
#### [INFO] [1519666564.791267]: Succeeded! Final goal, count = 10
You can also cancel the process by publishing through the cancel topic to the server. Let's do it.
First, close the goal publisher teminal and publish again the goal with more counts.
```
rostopic pub /demo_as/goal action_tutorials/DemoActionGoal "header:
seq: 0
stamp:
secs: 0
nsecs: 0
frame_id: ''
goal_id:
stamp:
secs: 0
nsecs: 0
id: ''
goal:
count: 50"
```
While running, open new tab and publish cancel request to the server
```
rostopic pub /demo_as/cancel actionlib_msgs/GoalID "stamp:
secs: 0
nsecs: 0
id: ''"
range(0, 10)
```
This time you'll see the cancel message in the server terminal:
#### [INFO] [1519666873.781875]: The goal has been cancelled
### Yes, that's all. You can now create your own action server.
<t>
### In the next section, we'll create our own action client, see you!
| github_jupyter |
<h1> Create TensorFlow model </h1>
This notebook illustrates:
<ol>
<li> Creating a model using the high-level Estimator API
</ol>
```
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.1
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/; then
gsutil mb -l ${REGION} gs://${BUCKET}
fi
```
<h2> Create TensorFlow model using TensorFlow's Estimator API </h2>
<p>
First, write an input_fn to read the data.
<p>
## Lab Task 1
Verify that the headers match your CSV output
```
import shutil
import numpy as np
import tensorflow as tf
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
TRAIN_STEPS = 1000
```
## Lab Task 2
Fill out the details of the input function below
```
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(filename_pattern, mode, batch_size = 512):
def _input_fn():
def decode_csv(line_of_text):
# TODO #1: Use tf.decode_csv to parse the provided line
# TODO #2: Make a Python dict. The keys are the column names, the values are from the parsed data
# TODO #3: Return a tuple of features, label where features is a Python dict and label a float
return features, label
# TODO #4: Use tf.gfile.Glob to create list of files that match pattern
file_list = None
# Create dataset from file list
dataset = (tf.compat.v1.data.TextLineDataset(file_list) # Read text file
.map(decode_csv)) # Transform each elem by applying decode_csv fn
# TODO #5: In training mode, shuffle the dataset and repeat indefinitely
# (Look at the API for tf.data.dataset shuffle)
# The mode input variable will be tf.estimator.ModeKeys.TRAIN if in training mode
# Tell the dataset to provide data in batches of batch_size
# This will now return batches of features, label
return dataset
return _input_fn
```
## Lab Task 3
Use the TensorFlow feature column API to define appropriate feature columns for your raw features that come from the CSV.
<b> Bonus: </b> Separate your columns into wide columns (categorical, discrete, etc.) and deep columns (numeric, embedding, etc.)
```
# Define feature columns
```
## Lab Task 4
To predict with the TensorFlow model, we also need a serving input function (we'll use this in a later lab). We will want all the inputs from our user.
Verify and change the column names and types here as appropriate. These should match your CSV_COLUMNS
```
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
feature_placeholders = {
'is_male': tf.compat.v1.placeholder(tf.string, [None]),
'mother_age': tf.compat.v1.placeholder(tf.float32, [None]),
'plurality': tf.compat.v1.placeholder(tf.string, [None]),
'gestation_weeks': tf.compat.v1.placeholder(tf.float32, [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.compat.v1.estimator.export.ServingInputReceiver(features, feature_placeholders)
```
## Lab Task 5
Complete the TODOs in this code:
```
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
EVAL_INTERVAL = 300
run_config = tf.estimator.RunConfig(save_checkpoints_secs = EVAL_INTERVAL,
keep_checkpoint_max = 3)
# TODO #1: Create your estimator
estimator = None
train_spec = tf.estimator.TrainSpec(
# TODO #2: Call read_dataset passing in the training CSV file and the appropriate mode
input_fn = None,
max_steps = TRAIN_STEPS)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
eval_spec = tf.estimator.EvalSpec(
# TODO #3: Call read_dataset passing in the evaluation CSV file and the appropriate mode
input_fn = None,
steps = None,
start_delay_secs = 60, # start evaluating after N seconds
throttle_secs = EVAL_INTERVAL, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
```
Finally, train!
```
# Run the model
shutil.rmtree('babyweight_trained', ignore_errors = True) # start fresh each time
tf.compat.v1.summary.FileWriterCache.clear()
train_and_evaluate('babyweight_trained')
```
The exporter directory contains the final model.
Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| github_jupyter |
# seq2seq构建写对联AI
### 代码参考:[seq2seq-couplet](https://github.com/wb14123/seq2seq-couplet)
### 问题背景介绍
对联又称对子,对仗工整,平仄协调,是一字一音的汉文语言独特的艺术形式,是中国传统文化瑰宝。对联的上下联有着非常工整的对应关系,我们可以尝试使用神经网络学习对应关系,进而完成对对联任务,而之前提到的seq2seq模型,是非常典型的序列映射学习模型,可以在本场景下使用。

### seq2seq对对联
##### \[稀牛学院 x 网易云课程\]《AI工程师(自然语言处理方向)》课程资料 by [@寒小阳](https://blog.csdn.net/han_xiaoyang)
这里构建的对对联AI应用也是seq2seq模型,使用的就是我们在上一门中讲解到的模型。



## 数据读取
```
from queue import Queue
from threading import Thread
import random
def padding_seq(seq):
"""padding每个输入sequence为最大的sequence长度
arg:seq of ids
return: results, padding到max_len的id list
"""
results = []
max_len = 0
for s in seq:
if max_len < len(s):
max_len = len(s)
for i in range(0, len(seq)):
l = max_len - len(seq[i])
results.append(seq[i] + [0 for j in range(l)])
return results
def encode_text(words, vocab_indices):
"""把文本序列映射为id序列
args: words, 输入对联中每个字组成的list
vocab_indices,词到id的dict
return:文本序列对应的id序列
"""
return [vocab_indices[word] for word in words if word in vocab_indices]
def decode_text(labels, vocabs, end_token='</s>'):
"""把id序列映射为文本序列
args: labels, decoder输出的预测结果list
vocab,id到词的dict
return:results,' '连接的预测文本
"""
results = []
for idx in labels:
word = vocabs[idx]
if word == end_token:
return ' '.join(results)
results.append(word)
return ' '.join(results)
def read_vocab(vocab_file):
"""读取词表文件
return:vocabs,list包含文件中的所有字及<s>,</s>,','
"""
f = open(vocab_file, 'rb')
vocabs = [line.decode('utf-8')[:-1] for line in f]
f.close()
return vocabs
class SeqReader():
"""输入序列读取类"""
def __init__(self,
input_file,
target_file,
vocab_file,
batch_size,
queue_size=2048,
worker_size=2,
end_token='</s>',
padding=True,
max_len=50):
self.input_file = input_file
self.target_file = target_file
self.vocabs = read_vocab(vocab_file)
# 词到id的dict
self.vocab_indices = dict((c, i) for i, c in enumerate(self.vocabs))
self.batch_size = batch_size
self.padding = padding
self.data_queue = Queue(queue_size)
self.worker_size = worker_size
self.end_token = end_token
self.max_len = max_len
with open(self.input_file, 'rb') as f:
for i, line in enumerate(f):
pass
f.close()
self.single_lines = i + 1 # 输入文件总行数
self.data_size = int(self.single_lines / batch_size) # batch总数
self.data_pos = 0 # 指针,self.data中的某一个索引
self._init_reader()
def start(self):
"""多线程运行_init_reader()"""
for i in range(self.worker_size):
t = Thread(target=self._init_reader())
t.daemon = True # 守护线程,后台运行
t.start()
return
def read_single_data(self):
"""读取一组数据,
return:{
'in_seq': in_seq,
'in_seq_len': len(in_seq),
'target_seq': target_seq,
'target_seq_len': len(target_seq) - 1
}
"""
if self.data_pos >= len(self.data):
random.shuffle(self.data)
self.data_pos = 0
result = self.data[self.data_pos]
self.data_pos += 1
return result
def read(self):
"""batch生成器
yield:batch,dict类型,{
'in_seq': [[seq1], [seq2], ...],
'in_seq_len': [int, int, ...],
'target_seq': [[seq1], [seq2], ...],
'target_seq_len': [int, int, ...]
}
"""
while True:
batch = {
'in_seq': [],
'in_seq_len': [],
'target_seq': [],
'target_seq_len': []
}
for i in range(0, self.batch_size):
item = self.read_single_data()
batch['in_seq'].append(item['in_seq'])
batch['in_seq_len'].append(item['in_seq_len'])
batch['target_seq'].append(item['target_seq'])
batch['target_seq_len'].append(item['target_seq_len'])
if self.padding:
batch['in_seq'] = padding_seq(batch['in_seq'])
batch['target_seq'] = padding_seq(batch['target_seq'])
yield batch
def _init_reader(self):
"""文件读取,预处理数据格式
self.data保存了转化为id的每组input sequence、target sequence的dict,储
存在list中
"""
self.data = [] # 初始化输出数据为空list
input_f = open(self.input_file, 'rb')
target_f = open(self.target_file, 'rb')
for input_line in input_f:
input_line = input_line.decode('utf-8')[:-1]
# target_line按行读取
target_line = target_f.readline().decode('utf-8')[:-1]
# 文本以' '为每个字的分隔符
input_words = [x for x in input_line.split(' ') if x != '']
if len(input_words) >= self.max_len:
input_words = input_words[:self.max_len - 1]
input_words.append(self.end_token)
target_words = [x for x in target_line.split(' ') if x != '']
if len(target_words) >= self.max_len:
target_words = target_words[:self.max_len - 1]
target_words = ['<s>',] + target_words # 加入开始符
target_words.append(self.end_token) # 加入结束符
in_seq = encode_text(input_words, self.vocab_indices)
target_seq = encode_text(target_words, self.vocab_indices)
self.data.append({
'in_seq': in_seq,
'in_seq_len': len(in_seq),
'target_seq': target_seq,
'target_seq_len': len(target_seq) - 1 # <s>不计入
})
input_f.close()
target_f.close()
self.data_pos = len(self.data)
```
## 评估函数
```
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python implementation of BLEU and smooth-BLEU.
This module provides a Python implementation of BLEU and smooth-BLEU.
Smooth BLEU is computed following the method outlined in the paper:
Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic
evaluation metrics for machine translation. COLING 2004.
"""
import collections
import math
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
(all n-grams upto max_order),keys:n-gram,value:count
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus,
translation_corpus,
max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
bleu:float,翻译句子的bleu得分,
precisions:list, 包含每种ngram的准确率,
bp:brevity penalty, 短句惩罚系数,
ratio:translation_length / min(reference_length),
translation_length:int,翻译长度,
reference_length:int,最短的reference长度
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus, translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
# 同时考虑多个references
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)# 位或
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts # 位与
# matches_by_order:{len(ngram):sum of counts}
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
# possible_matches_by_order(可匹配n-gram总数):
# {len(ngram):sum of each ngram}
for order in range(1, max_order + 1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order - 1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (
float(matches_by_order[i]) / possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
# 翻译长度惩罚(对较短的翻译基于较大的惩罚,以防止短翻译准确率会更高的问题)
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
```
## 定义seq2seq
```
import tensorflow as tf
from tensorflow.contrib import rnn
from tensorflow.python.layers import core as layers_core
def getLayeredCell(layer_size,
num_units,
input_keep_prob,
output_keep_prob=1.0):
'''多层rnn单元构造'''
return rnn.MultiRNNCell([
rnn.DropoutWrapper(
tf.nn.rnn_cell.LSTMCell(
name='basic_lstm_cell', num_units=num_units), input_keep_prob,
output_keep_prob) for i in range(layer_size)
])
def bi_encoder(embed_input, in_seq_len, num_units, layer_size,
input_keep_prob):
'''双向rnn编码器
embed_input:embeddirding后的输入序列
num_units:隐藏层单元数
layer_size:rnn层数
return:
bidirectional_dynamic_rnn不同于bidirectional_rnn,结果级联分层输出,可
concat到一起
encoder_output: 每个timestep输出,每层输出按最后一维concat到一起
encoder_state:每层的final state,(output_state_fw, output_state_bw)
'''
# encode input into a vector
bi_layer_size = int(layer_size / 2)
encode_cell_fw = getLayeredCell(bi_layer_size, num_units, input_keep_prob)
encode_cell_bw = getLayeredCell(bi_layer_size, num_units, input_keep_prob)
bi_encoder_output, bi_encoder_state = tf.nn.bidirectional_dynamic_rnn(
cell_fw=encode_cell_fw,
cell_bw=encode_cell_bw,
inputs=embed_input,
sequence_length=in_seq_len,
dtype=embed_input.dtype,
time_major=False)
# concat encode output and state
encoder_output = tf.concat(bi_encoder_output, -1)
encoder_state = []
for layer_id in range(bi_layer_size):
encoder_state.append(bi_encoder_state[0][layer_id])
encoder_state.append(bi_encoder_state[1][layer_id])
encoder_state = tuple(encoder_state)
return encoder_output, encoder_state
def attention_decoder_cell(encoder_output, in_seq_len, num_units, layer_size,
input_keep_prob):
'''attention decoder
return: 加入attention_mechanim的decoder cell
'''
attention_mechanim = tf.contrib.seq2seq.BahdanauAttention(
num_units, encoder_output, in_seq_len, normalize=True)
# attention_mechanim = tf.contrib.seq2seq.LuongAttention(num_units,
# encoder_output, in_seq_len, scale = True)
cell = getLayeredCell(layer_size, num_units, input_keep_prob)
cell = tf.contrib.seq2seq.AttentionWrapper(
cell, attention_mechanim, attention_layer_size=num_units)
return cell
def decoder_projection(output, output_size):
return tf.layers.dense(
output,
output_size,
activation=None,
use_bias=False,
name='output_mlp')
def train_decoder(encoder_output, in_seq_len, target_seq, target_seq_len,
encoder_state, num_units, layers, embedding, output_size,
input_keep_prob, projection_layer):
'''只进行train过程'''
decoder_cell = attention_decoder_cell(encoder_output, in_seq_len,
num_units, layers, input_keep_prob)
batch_size = tf.shape(in_seq_len)[0]
init_state = decoder_cell.zero_state(
batch_size, tf.float32).clone(cell_state=encoder_state)
helper = tf.contrib.seq2seq.TrainingHelper(
target_seq, target_seq_len, time_major=False)
decoder = tf.contrib.seq2seq.BasicDecoder(
decoder_cell, helper, init_state, output_layer=projection_layer)
outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder, maximum_iterations=100)
return outputs.rnn_output
def infer_decoder(encoder_output, in_seq_len, encoder_state, num_units, layers,
embedding, output_size, input_keep_prob, projection_layer):
'''seq2seq函数中可以使用beamsearch方法来进行decoder'''
decoder_cell = attention_decoder_cell(encoder_output, in_seq_len,
num_units, layers, input_keep_prob)
batch_size = tf.shape(in_seq_len)[0]
init_state = decoder_cell.zero_state(
batch_size, tf.float32).clone(cell_state=encoder_state)
# TODO: start tokens and end tokens are hard code
"""
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding, tf.fill([batch_size], 0), 1)
decoder = tf.contrib.seq2seq.BasicDecoder(decoder_cell, helper,
init_state, output_layer=projection_layer)
"""
decoder = tf.contrib.seq2seq.BeamSearchDecoder(
cell=decoder_cell,
embedding=embedding,
start_tokens=tf.fill([batch_size], 0),
end_token=1,
initial_state=init_state,
beam_width=10,
output_layer=projection_layer,
length_penalty_weight=1.0)
outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder, maximum_iterations=100)
return outputs.sample_id
def seq2seq(in_seq, in_seq_len, target_seq, target_seq_len, vocab_size,
num_units, layers, dropout):
"""seq2seq模型建立
return: training -- outputs.rnn_output: rnn输出的概率分布结果
infering -- outputs.sample_id:输出词id
"""
in_shape = tf.shape(in_seq)
batch_size = in_shape[0]
# 训练开启dropout,预测不开启
if target_seq != None:
input_keep_prob = 1 - dropout
else:
input_keep_prob = 1
# 全连接层输出预测
projection_layer = layers_core.Dense(vocab_size, use_bias=False)
# embedding input and target sequence
with tf.device('/cpu:0'):
embedding = tf.get_variable(
name='embedding', shape=[vocab_size, num_units])
embed_input = tf.nn.embedding_lookup(embedding, in_seq, name='embed_input')
# encode and decode
encoder_output, encoder_state = bi_encoder(
embed_input, in_seq_len, num_units, layers, input_keep_prob)
decoder_cell = attention_decoder_cell(encoder_output, in_seq_len,
num_units, layers, input_keep_prob)
batch_size = tf.shape(in_seq_len)[0]
# decoder初始化,权重初始化,并且将cell state初始化为encoder的final state
init_state = decoder_cell.zero_state(
batch_size, tf.float32).clone(cell_state=encoder_state)
if target_seq != None:
embed_target = tf.nn.embedding_lookup(
embedding, target_seq, name='embed_target')
helper = tf.contrib.seq2seq.TrainingHelper(
embed_target, target_seq_len, time_major=False)
else:
# TODO: start tokens and end tokens are hard code
# 0,1分别应对应句子的起始符id和终止符id
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding, tf.fill([batch_size], 0), 1)
decoder = tf.contrib.seq2seq.BasicDecoder(
decoder_cell, helper, init_state, output_layer=projection_layer)
outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder, maximum_iterations=100)
if target_seq != None:
return outputs.rnn_output
else:
return outputs.sample_id
def seq_loss(output, target, seq_len):
'''计算损失
target:包括一个起始符
'''
target = target[:, 1:]
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=output, labels=target)
batch_size = tf.shape(target)[0]
# 不每个句子对都达到max_timestep,排除多余字符带来的损失
loss_mask = tf.sequence_mask(seq_len, tf.shape(output)[1])
cost = cost * tf.to_float(loss_mask)
return tf.reduce_sum(cost) / tf.to_float(batch_size)
```
## 模型定义
```
import tensorflow as tf
from os import path
import random
class Model():
"""创建模型类"""
def __init__(self,
train_input_file,
train_target_file,
test_input_file,
test_target_file,
vocab_file,
num_units,
layers,
dropout,
batch_size,
learning_rate,
output_dir,
save_step=100,
eval_step=1000,
param_histogram=False,
restore_model=False,
init_train=True,
init_infer=False):
self.num_units = num_units # 单个RNN结构中,神经元数目
self.layers = layers
self.dropout = dropout
self.batch_size = batch_size
self.learning_rate = learning_rate
self.save_step = save_step
self.eval_step = eval_step
self.param_histogram = param_histogram
self.restore_model = restore_model # boolen
self.init_train = init_train
self.init_infer = init_infer
if init_train:
self.train_reader = SeqReader(train_input_file, train_target_file,
vocab_file, batch_size)
self.train_reader.start() # 多线程
self.train_data = self.train_reader.read() # yield batch
self.eval_reader = SeqReader(test_input_file, test_target_file,
vocab_file, batch_size)
self.eval_reader.start()
self.eval_data = self.eval_reader.read()
self.model_file = path.join(output_dir, 'model.ckpl')
self.log_writter = tf.summary.FileWriter(output_dir)
if init_train:
self._init_train()
self._init_eval()
if init_infer:
self.infer_vocabs = reader.read_vocab(vocab_file)
self.infer_vocab_indices = dict(
(c, i) for i, c in enumerate(self.infer_vocabs))
self._init_infer()
self.reload_infer_model()
def gpu_session_config(self):
# allow_growth: 刚一开始分配少量的GPU容量,然后按需慢慢的增加
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return config
def _init_train(self):
'''初始化训练会话'''
self.train_graph = tf.Graph()
with self.train_graph.as_default():
# 输入
self.train_in_seq = tf.placeholder(
tf.int32, shape=[self.batch_size, None])
self.train_in_seq_len = tf.placeholder(
tf.int32, shape=[self.batch_size])
self.train_target_seq = tf.placeholder(
tf.int32, shape=[self.batch_size, None])
self.train_target_seq_len = tf.placeholder(
tf.int32, shape=[self.batch_size])
# 输出
output = seq2seq(self.train_in_seq, self.train_in_seq_len,
self.train_target_seq, self.train_target_seq_len,
len(self.train_reader.vocabs), self.num_units,
self.layers, self.dropout)
self.train_output = tf.argmax(tf.nn.softmax(output), 2)
# 损失
self.loss = seq_loss(output, self.train_target_seq,
self.train_target_seq_len)
# 梯度截断
params = tf.trainable_variables()
gradients = tf.gradients(self.loss, params)
clipped_gradients, _ = tf.clip_by_global_norm(gradients, 0.5)
self.train_op = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).apply_gradients(
list(zip(clipped_gradients, params)))
# 变量统计输出历史变化情况
if self.param_histogram:
for v in tf.trainable_variables():
tf.summary.histogram('train_' + v.name, v)
tf.summary.scalar('loss', self.loss)
self.train_summary = tf.summary.merge_all()
self.train_init = tf.global_variables_initializer()
self.train_saver = tf.train.Saver()
self.train_session = tf.Session(
graph=self.train_graph, config=self.gpu_session_config())
def _init_eval(self):
'''初始化测试操作'''
self.eval_graph = tf.Graph()
with self.eval_graph.as_default():
self.eval_in_seq = tf.placeholder(
tf.int32, shape=[self.batch_size, None])
self.eval_in_seq_len = tf.placeholder(
tf.int32, shape=[self.batch_size])
self.eval_output = seq2seq(
self.eval_in_seq, self.eval_in_seq_len, None, None,
len(self.eval_reader.vocabs), self.num_units, self.layers,
self.dropout)
if self.param_histogram:
for v in tf.trainable_variables():
tf.summary.histogram('eval_' + v.name, v)
self.eval_summary = tf.summary.merge_all()
self.eval_saver = tf.train.Saver()
self.eval_session = tf.Session(
graph=self.eval_graph, config=self.gpu_session_config())
def _init_infer(self):
'''初始化推断'''
self.infer_graph = tf.Graph()
with self.infer_graph.as_default():
self.infer_in_seq = tf.placeholder(tf.int32, shape=[1, None])
self.infer_in_seq_len = tf.placeholder(tf.int32, shape=[1])
self.infer_output = seq2seq(self.infer_in_seq,
self.infer_in_seq_len, None, None,
len(self.infer_vocabs), self.num_units,
self.layers, self.dropout)
self.infer_saver = tf.train.Saver()
self.infer_session = tf.Session(
graph=self.infer_graph, config=self.gpu_session_config())
def train(self, epochs, start=0):
if not self.init_train:
raise Exception('Train graph is not inited!')
with self.train_graph.as_default():
if path.isfile(self.model_file + '.meta') and self.restore_model:
print("Reloading model file before training.")
self.train_saver.restore(self.train_session, self.model_file)
else:
self.train_session.run(self.train_init)
total_loss = 0
for step in range(start, epochs):
data = next(self.train_data) # yeild
in_seq = data['in_seq']
in_seq_len = data['in_seq_len']
target_seq = data['target_seq']
target_seq_len = data['target_seq_len']
output, loss, train, summary = self.train_session.run(
[
self.train_output,
self.loss,
self.train_op,
self.train_summary
],
feed_dict={
self.train_in_seq: in_seq,
self.train_in_seq_len: in_seq_len,
self.train_target_seq: target_seq,
self.train_target_seq_len: target_seq_len
})
total_loss += loss
self.log_writter.add_summary(summary, step)
if step % self.save_step == 0:
self.train_saver.save(self.train_session, self.model_file)
print(("Saving model. Step: %d, loss: %f" %
(step, total_loss / self.save_step)))
# print sample output
sid = random.randint(0, self.batch_size - 1)
input_text = decode_text(in_seq[sid],
self.eval_reader.vocabs)
output_text = decode_text(output[sid],
self.train_reader.vocabs)
target_text = decode_text(
target_seq[sid],
self.train_reader.vocabs).split(' ')[1:]
target_text = ' '.join(target_text)
print('******************************')
print(('src: ' + input_text))
print(('output: ' + output_text))
print(('target: ' + target_text))
if step % self.eval_step == 0:
bleu_score = self.eval(step)
print(("Evaluate model. Step: %d, score: %f, loss: %f" %
(step, bleu_score, total_loss / self.save_step)))
eval_summary = tf.Summary(value=[
tf.Summary.Value(tag='bleu', simple_value=bleu_score)
])
self.log_writter.add_summary(eval_summary, step)
if step % self.save_step == 0:
total_loss = 0
def eval(self, train_step):
'''测试函数,bleu_score'''
with self.eval_graph.as_default():
self.eval_saver.restore(self.eval_session, self.model_file)
bleu_score = 0
target_results = []
output_results = []
for step in range(0, self.eval_reader.data_size):
data = next(self.eval_data)
in_seq = data['in_seq']
in_seq_len = data['in_seq_len']
target_seq = data['target_seq']
target_seq_len = data['target_seq_len']
outputs = self.eval_session.run(
self.eval_output,
feed_dict={
self.eval_in_seq: in_seq,
self.eval_in_seq_len: in_seq_len
})
for i in range(len(outputs)):
output = outputs[i]
target = target_seq[i]
output_text = decode_text(
output, self.eval_reader.vocabs).split(' ')
target_text = decode_text(
target[1:], self.eval_reader.vocabs).split(' ')
prob = int(
self.eval_reader.data_size * self.batch_size / 10)
target_results.append([target_text])
output_results.append(output_text)
# 随机输出结果
if random.randint(1, prob) == 1:
print('====================')
input_text = decode_text(
in_seq[i], self.eval_reader.vocabs)
print(('src:' + input_text))
print(('output: ' + ' '.join(output_text)))
print(('target: ' + ' '.join(target_text)))
return compute_bleu(target_results, output_results)[0] * 100
def reload_infer_model(self):
# 重新加载推理图
with self.infer_graph.as_default():
self.infer_saver.restore(self.infer_session, self.model_file)
def infer(self, text):
if not self.init_infer:
raise Exception('Infer graph is not inited!')
with self.infer_graph.as_default():
in_seq = encode_text(
text.split(' ') + [
'</s>',
], self.infer_vocab_indices)
in_seq_len = len(in_seq)
outputs = self.infer_session.run(
self.infer_output,
feed_dict={
self.infer_in_seq: [in_seq],
self.infer_in_seq_len: [in_seq_len]
})
output = outputs[0]
output_text = decode_text(output, self.infer_vocabs)
return output_text
```
## 模型训练
```
m = Model(
'./couplet/train/in.txt',
'./couplet/train/out.txt',
'./couplet/test/in.txt',
'./couplet/test/out.txt',
'./couplet/vocabs',
num_units=256,
layers=4,
dropout=0.2,
batch_size=32,
learning_rate=0.001,
output_dir='./models/output_couplet',
restore_model=False
)
m.train(5000000)
```
| github_jupyter |
# Distributed DeepRacer RL training with SageMaker and RoboMaker
---
## Introduction
This notebook is an enhanced version of [AWS DeepRacer](https://console.aws.amazon.com/deepracer/home#welcome), for AIDO-3 NeurIPS DeepRacer challenge. The notebook is an expansion of the original [Amazon SageMaker notebook](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/reinforcement_learning/rl_deepracer_robomaker_coach_gazebo) provides additional instructions to customize over the deep reinforcement learning algorithms.
---
## How it works?

The reinforcement learning agent (i.e. our autonomous car) learns to drive by interacting with its environment, e.g., the track, by taking an action in a given state to maximize the expected reward. The agent learns the optimal plan of actions in training by trial-and-error through repeated episodes.
The figure above shows an example of distributed RL training across SageMaker and two RoboMaker simulation envrionments that perform the **rollouts** - execute a fixed number of episodes using the current model or policy. The rollouts collect agent experiences (state-transition tuples) and share this data with SageMaker for training. SageMaker updates the model policy which is then used to execute the next sequence of rollouts. This training loop continues until the model converges, i.e. the car learns to drive and stops going off-track. More formally, we can define the problem in terms of the following:
1. **Objective**: Learn to drive autonomously by staying close to the center of the track.
2. **Environment**: A 3D driving simulator hosted on AWS RoboMaker.
3. **State**: The driving POV image captured by the car's head camera, as shown in the illustration above.
4. **Action**: Six discrete steering wheel positions at different angles (configurable)
5. **Reward**: Positive reward for staying close to the center line; High penalty for going off-track. This is configurable and can be made more complex (for e.g. steering penalty can be added).
## Prequisites
### Imports
To get started, we'll import the Python libraries we need, set up the environment with a few prerequisites for permissions and configurations.
You can run this notebook from your local machine or from a SageMaker notebook instance. In both of these scenarios, you can run the following to launch a training job on SageMaker and a simulation job on RoboMaker.
```
import boto3
import sagemaker
import sys
import os
import re
import numpy as np
import subprocess
sys.path.append("common")
from misc import get_execution_role, wait_for_s3_object
from docker_utils import build_and_push_docker_image
from sagemaker.rl import RLEstimator, RLToolkit, RLFramework
from time import gmtime, strftime
import time
from IPython.display import Markdown
from markdown_helper import *
```
### Initializing basic parameters
```
# Select the instance type
#instance_type = "ml.c4.2xlarge"
instance_type = "ml.p2.xlarge"
#instance_type = "ml.c5.4xlarge"
# Starting SageMaker session
sage_session = sagemaker.session.Session()
# Create unique job name.
job_name_prefix = 'sahika-neuripschallenge-2019'
# Duration of job in seconds (1 hours)
job_duration_in_seconds = 60 * 20
# AWS Region
aws_region = sage_session.boto_region_name
if aws_region not in ["us-west-2", "us-east-1", "eu-west-1"]:
raise Exception("This notebook uses RoboMaker which is available only in US East (N. Virginia),"
"US West (Oregon) and EU (Ireland). Please switch to one of these regions.")
```
### Setup S3 bucket
Set up the linkage and authentication to the S3 bucket that we want to use for checkpoint and metadata.
```
# S3 bucket
s3_bucket = sage_session.default_bucket()
# SDK appends the job name and output folder
s3_output_path = 's3://{}/'.format(s3_bucket)
#Ensure that the S3 prefix contains the keyword 'sagemaker'
s3_prefix = job_name_prefix + "-sagemaker-" + strftime("%y%m%d-%H%M%S", gmtime())
# Get the AWS account id of this account
sts = boto3.client("sts")
account_id = sts.get_caller_identity()['Account']
print("Using s3 bucket {}".format(s3_bucket))
print("Model checkpoints and other metadata will be stored at: \ns3://{}/{}".format(s3_bucket, s3_prefix))
```
### Create an IAM role
Either get the execution role when running from a SageMaker notebook `role = sagemaker.get_execution_role()` or, when running from local machine, use utils method `role = get_execution_role('role_name')` to create an execution role.
```
try:
sagemaker_role = sagemaker.get_execution_role()
except:
sagemaker_role = get_execution_role('sagemaker')
print("Using Sagemaker IAM role arn: \n{}".format(sagemaker_role))
```
> Please note that this notebook cannot be run in `SageMaker local mode` as the simulator is based on AWS RoboMaker service.
### Permission setup for invoking AWS RoboMaker from this notebook
In order to enable this notebook to be able to execute AWS RoboMaker jobs, we need to add one trust relationship to the default execution role of this notebook.
```
display(Markdown(generate_help_for_robomaker_trust_relationship(sagemaker_role)))
```
### Permission setup for Sagemaker to S3 bucket
The sagemaker writes the Redis IP address, models to the S3 bucket. This requires PutObject permission on the bucket. Make sure the sagemaker role you are using as this permissions.
```
display(Markdown(generate_s3_write_permission_for_sagemaker_role(sagemaker_role)))
```
### Permission setup for Sagemaker to create KinesisVideoStreams
The sagemaker notebook has to create a kinesis video streamer. You can observer the car making epsiodes in the kinesis video streamer.
```
display(Markdown(generate_kinesis_create_permission_for_sagemaker_role(sagemaker_role)))
```
### Build and push docker image
The file ./Dockerfile contains all the packages that are installed into the docker. Instead of using the default sagemaker container. We will be using this docker container.
```
%%time
cpu_or_gpu = 'gpu' if instance_type.startswith('ml.p') else 'cpu'
repository_short_name = "sagemaker-docker-%s" % cpu_or_gpu
docker_build_args = {
'CPU_OR_GPU': cpu_or_gpu,
'AWS_REGION': boto3.Session().region_name,
}
custom_image_name = build_and_push_docker_image(repository_short_name, build_args=docker_build_args)
print("Using ECR image %s" % custom_image_name)
```
### Configure VPC
Since SageMaker and RoboMaker have to communicate with each other over the network, both of these services need to run in VPC mode. This can be done by supplying subnets and security groups to the job launching scripts.
We will check if the deepracer-vpc stack is created and use it if present (This is present if the AWS Deepracer console is used atleast once to create a model). Else we will use the default VPC stack.
```
ec2 = boto3.client('ec2')
#
# Check if the user has Deepracer-VPC and use that if its present. This will have all permission.
# This VPC will be created when you have used the Deepracer console and created one model atleast
# If this is not present. Use the default VPC connnection
#
deepracer_security_groups = [group["GroupId"] for group in ec2.describe_security_groups()['SecurityGroups']\
if group['GroupName'].startswith("deepracer-vpc")]
if(deepracer_security_groups):
print("Using the DeepRacer VPC stacks")
deepracer_vpc = [vpc['VpcId'] for vpc in ec2.describe_vpcs()['Vpcs'] \
if "Tags" in vpc for val in vpc['Tags'] \
if val['Value'] == 'deepracer-vpc'][0]
deepracer_subnets = [subnet["SubnetId"] for subnet in ec2.describe_subnets()["Subnets"] \
if subnet["VpcId"] == deepracer_vpc]
else:
print("Using the default VPC stacks")
deepracer_vpc = [vpc['VpcId'] for vpc in ec2.describe_vpcs()['Vpcs'] if vpc["IsDefault"] == True][0]
deepracer_security_groups = [group["GroupId"] for group in ec2.describe_security_groups()['SecurityGroups'] \
if 'VpcId' in group and group["GroupName"] == "default" and group["VpcId"] == deepracer_vpc]
deepracer_subnets = [subnet["SubnetId"] for subnet in ec2.describe_subnets()["Subnets"] \
if subnet["VpcId"] == deepracer_vpc and subnet['DefaultForAz']==True]
print("Using VPC:", deepracer_vpc)
print("Using security group:", deepracer_security_groups)
print("Using subnets:", deepracer_subnets)
```
### Create Route Table
A SageMaker job running in VPC mode cannot access S3 resourcs. So, we need to create a VPC S3 endpoint to allow S3 access from SageMaker container. To learn more about the VPC mode, please visit [this link.](https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)
```
#TODO: Explain to customer what CREATE_ROUTE_TABLE is doing
CREATE_ROUTE_TABLE = True
def create_vpc_endpoint_table():
print("Creating ")
try:
route_tables = [route_table["RouteTableId"] for route_table in ec2.describe_route_tables()['RouteTables']\
if route_table['VpcId'] == deepracer_vpc]
except Exception as e:
if "UnauthorizedOperation" in str(e):
display(Markdown(generate_help_for_s3_endpoint_permissions(sagemaker_role)))
else:
display(Markdown(create_s3_endpoint_manually(aws_region, deepracer_vpc)))
raise e
print("Trying to attach S3 endpoints to the following route tables:", route_tables)
if not route_tables:
raise Exception(("No route tables were found. Please follow the VPC S3 endpoint creation "
"guide by clicking the above link."))
try:
ec2.create_vpc_endpoint(DryRun=False,
VpcEndpointType="Gateway",
VpcId=deepracer_vpc,
ServiceName="com.amazonaws.{}.s3".format(aws_region),
RouteTableIds=route_tables)
print("S3 endpoint created successfully!")
except Exception as e:
if "RouteAlreadyExists" in str(e):
print("S3 endpoint already exists.")
elif "UnauthorizedOperation" in str(e):
display(Markdown(generate_help_for_s3_endpoint_permissions(role)))
raise e
else:
display(Markdown(create_s3_endpoint_manually(aws_region, default_vpc)))
raise e
if CREATE_ROUTE_TABLE:
create_vpc_endpoint_table()
```
## Setup the environment
The environment is defined in a Python file called “deepracer_racetrack_env.py” and the file can be found at `src/markov/environments/`. This file implements the gym interface for our Gazebo based RoboMakersimulator. This is a common environment file used by both SageMaker and RoboMaker. The environment variable - `NODE_TYPE` defines which node the code is running on. So, the expressions that have `rospy` dependencies are executed on RoboMaker only.
We can experiment with different reward functions by modifying `reward_function` in `src/markov/rewards/`. Action space and steering angles can be changed by modifying `src/markov/actions/`.json file
### Configure the preset for RL algorithm
The parameters that configure the RL training job are defined in `src/markov/presets/`. Using the preset file, you can define agent parameters to select the specific agent algorithm. We suggest using Clipped PPO for this example.
You can edit this file to modify algorithm parameters like learning_rate, neural network structure, batch_size, discount factor etc.
```
# Uncomment the pygmentize code lines to see the code
# Environmental File
#!pygmentize src/markov/environments/deepracer_racetrack_env.py
# Reward function
#!pygmentize src/markov/rewards/default.py
# Action space
#!pygmentize src/markov/actions/model_metadata_10_state.json
# Preset File
#!pygmentize src/markov/presets/default.py
#!pygmentize src/markov/presets/preset_attention_layer.py
```
### Copy custom files to S3 bucket so that sagemaker & robomaker can pick it up
```
s3_location = "s3://%s/%s" % (s3_bucket, s3_prefix)
print(s3_location)
# Clean up the previously uploaded files
!aws s3 rm --recursive {s3_location}
# Make any changes to the environment and preset files below and upload these files
!aws s3 cp src/markov/environments/deepracer_racetrack_env.py {s3_location}/environments/deepracer_racetrack_env.py
!aws s3 cp src/markov/rewards/default.py {s3_location}/rewards/reward_function.py
!aws s3 cp src/markov/actions/model_metadata_10_state.json {s3_location}/model_metadata.json
!aws s3 cp src/markov/presets/default.py {s3_location}/presets/preset.py
#!aws s3 cp src/markov/presets/preset_attention_layer.py {s3_location}/presets/preset.py
```
### Train the RL model using the Python SDK Script mode
Next, we define the following algorithm metrics that we want to capture from cloudwatch logs to monitor the training progress. These are algorithm specific parameters and might change for different algorithm. We use [Clipped PPO](https://coach.nervanasys.com/algorithms/policy_optimization/cppo/index.html) for this example.
```
metric_definitions = [
# Training> Name=main_level/agent, Worker=0, Episode=19, Total reward=-102.88, Steps=19019, Training iteration=1
{'Name': 'reward-training',
'Regex': '^Training>.*Total reward=(.*?),'},
# Policy training> Surrogate loss=-0.32664725184440613, KL divergence=7.255815035023261e-06, Entropy=2.83156156539917, training epoch=0, learning_rate=0.00025
{'Name': 'ppo-surrogate-loss',
'Regex': '^Policy training>.*Surrogate loss=(.*?),'},
{'Name': 'ppo-entropy',
'Regex': '^Policy training>.*Entropy=(.*?),'},
# Testing> Name=main_level/agent, Worker=0, Episode=19, Total reward=1359.12, Steps=20015, Training iteration=2
{'Name': 'reward-testing',
'Regex': '^Testing>.*Total reward=(.*?),'},
]
```
We use the RLEstimator for training RL jobs.
1. Specify the source directory which has the environment file, preset and training code.
2. Specify the entry point as the training code
3. Specify the choice of RL toolkit and framework. This automatically resolves to the ECR path for the RL Container.
4. Define the training parameters such as the instance count, instance type, job name, s3_bucket and s3_prefix for storing model checkpoints and metadata. **Only 1 training instance is supported for now.**
4. Set the RLCOACH_PRESET as "deepracer" for this example.
5. Define the metrics definitions that you are interested in capturing in your logs. These can also be visualized in CloudWatch and SageMaker Notebooks.
```
estimator = RLEstimator(entry_point="training_worker.py",
source_dir='src',
image_name=custom_image_name,
dependencies=["common/"],
role=sagemaker_role,
train_instance_type=instance_type,
train_instance_count=1,
output_path=s3_output_path,
base_job_name=job_name_prefix,
metric_definitions=metric_definitions,
train_max_run=job_duration_in_seconds,
hyperparameters={
"s3_bucket": s3_bucket,
"s3_prefix": s3_prefix,
"aws_region": aws_region,
"preset_s3_key": "%s/presets/preset.py"% s3_prefix,
"model_metadata_s3_key": "%s/model_metadata.json" % s3_prefix,
"environment_s3_key": "%s/environments/deepracer_racetrack_env.py" % s3_prefix,
},
subnets=deepracer_subnets,
security_group_ids=deepracer_security_groups,
)
estimator.fit(wait=False)
job_name = estimator.latest_training_job.job_name
print("Training job: %s" % job_name)
```
### Create the Kinesis video stream
```
kvs_stream_name = "dr-kvs-{}".format(job_name)
!aws --region {aws_region} kinesisvideo create-stream --stream-name {kvs_stream_name} --media-type video/h264 --data-retention-in-hours 24
print ("Created kinesis video stream {}".format(kvs_stream_name))
```
### Start the Robomaker job
```
robomaker = boto3.client("robomaker")
```
### Create Simulation Application
```
robomaker_s3_key = 'robomaker/simulation_ws.tar.gz'
robomaker_source = {'s3Bucket': s3_bucket,
's3Key': robomaker_s3_key,
'architecture': "X86_64"}
simulation_software_suite={'name': 'Gazebo',
'version': '7'}
robot_software_suite={'name': 'ROS',
'version': 'Kinetic'}
rendering_engine={'name': 'OGRE',
'version': '1.x'}
```
Download the DeepRacer bundle provided by RoboMaker service and upload it in our S3 bucket to create a RoboMaker Simulation Application
```
# Download Robomaker simApp for the deepracer public s3 bucket
simulation_application_bundle_location = "s3://deepracer-managed-resources-us-east-1/deepracer-simapp.tar.gz"
#simulation_application_bundle_location = "s3://sahika-neuripschallenge-2019/deepracer-simapp.tar.gz"
!aws s3 cp {simulation_application_bundle_location} ./
# Remove if the Robomaker sim-app is present in s3 bucket
!aws s3 rm s3://{s3_bucket}/{robomaker_s3_key}
# Uploading the Robomaker SimApp to your S3 bucket
!aws s3 cp ./deepracer-simapp.tar.gz s3://{s3_bucket}/{robomaker_s3_key}
# Cleanup the locally downloaded version of SimApp
#!rm deepracer-simapp.tar.gz
app_name = "sahika-neuripschallenge-2019" + strftime("%y%m%d-%H%M%S", gmtime())
print(app_name)
try:
response = robomaker.create_simulation_application(name=app_name,
sources=[robomaker_source],
simulationSoftwareSuite=simulation_software_suite,
robotSoftwareSuite=robot_software_suite,
renderingEngine=rendering_engine)
simulation_app_arn = response["arn"]
print("Created a new simulation app with ARN:", simulation_app_arn)
except Exception as e:
if "AccessDeniedException" in str(e):
display(Markdown(generate_help_for_robomaker_all_permissions(role)))
raise e
else:
raise e
```
### Launch the Simulation job on RoboMaker
We create [AWS RoboMaker](https://console.aws.amazon.com/robomaker/home#welcome) Simulation Jobs that simulates the environment and shares this data with SageMaker for training.
```
num_simulation_workers = 1
envriron_vars = {
"WORLD_NAME": "reinvent_base",
"KINESIS_VIDEO_STREAM_NAME": kvs_stream_name,
"SAGEMAKER_SHARED_S3_BUCKET": s3_bucket,
"SAGEMAKER_SHARED_S3_PREFIX": s3_prefix,
"TRAINING_JOB_ARN": job_name,
"APP_REGION": aws_region,
"METRIC_NAME": "TrainingRewardScore",
"METRIC_NAMESPACE": "AWSDeepRacer",
"REWARD_FILE_S3_KEY": "%s/rewards/reward_function.py" % s3_prefix,
"MODEL_METADATA_FILE_S3_KEY": "%s/model_metadata.json" % s3_prefix,
"METRICS_S3_BUCKET": s3_bucket,
"METRICS_S3_OBJECT_KEY": s3_bucket + "/training_metrics.json",
"TARGET_REWARD_SCORE": "None",
"NUMBER_OF_EPISODES": "0",
"ROBOMAKER_SIMULATION_JOB_ACCOUNT_ID": account_id
}
simulation_application = {"application":simulation_app_arn,
"launchConfig": {"packageName": "deepracer_simulation_environment",
"launchFile": "distributed_training.launch",
"environmentVariables": envriron_vars}
}
vpcConfig = {"subnets": deepracer_subnets,
"securityGroups": deepracer_security_groups,
"assignPublicIp": True}
client_request_token = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
responses = []
for job_no in range(num_simulation_workers):
response = robomaker.create_simulation_job(iamRole=sagemaker_role,
clientRequestToken=client_request_token,
maxJobDurationInSeconds=job_duration_in_seconds,
failureBehavior="Continue",
simulationApplications=[simulation_application],
vpcConfig=vpcConfig
)
responses.append(response)
print("Created the following jobs:")
job_arns = [response["arn"] for response in responses]
for response in responses:
print("Job ARN", response["arn"])
```
### Visualizing the simulations in RoboMaker
You can visit the RoboMaker console to visualize the simulations or run the following cell to generate the hyperlinks.
```
display(Markdown(generate_robomaker_links(job_arns, aws_region)))
```
### Creating temporary folder top plot metrics
```
tmp_dir = "/tmp/{}".format(job_name)
os.system("mkdir {}".format(tmp_dir))
print("Create local folder {}".format(tmp_dir))
```
### Plot metrics for training job
```
%matplotlib inline
import pandas as pd
import json
training_metrics_file = "training_metrics.json"
training_metrics_path = "{}/{}".format(s3_bucket, training_metrics_file)
wait_for_s3_object(s3_bucket, training_metrics_path, tmp_dir)
json_file = "{}/{}".format(tmp_dir, training_metrics_file)
with open(json_file) as fp:
data = json.load(fp)
df = pd.DataFrame(data['metrics'])
x_axis = 'episode'
y_axis = 'reward_score'
plt = df.plot(x=x_axis,y=y_axis, figsize=(12,5), legend=True, style='b-')
plt.set_ylabel(y_axis);
plt.set_xlabel(x_axis);
```
### Clean up RoboMaker and SageMaker training job
Execute the cells below if you want to kill RoboMaker and SageMaker job.
```
# # Cancelling robomaker job
# for job_arn in job_arns:
# robomaker.cancel_simulation_job(job=job_arn)
# # Stopping sagemaker training job
# sage_session.sagemaker_client.stop_training_job(TrainingJobName=estimator._current_job_name)
```
### Evaluation - ReInvent Track
```
sys.path.append("./src")
num_simulation_workers = 1
envriron_vars = {
"WORLD_NAME": "reinvent_base",
"KINESIS_VIDEO_STREAM_NAME": "SilverstoneStream",
"MODEL_S3_BUCKET": s3_bucket,
"MODEL_S3_PREFIX": s3_prefix,
"APP_REGION": aws_region,
"MODEL_METADATA_FILE_S3_KEY": "%s/model_metadata.json" % s3_prefix,
"METRICS_S3_BUCKET": s3_bucket,
"METRICS_S3_OBJECT_KEY": s3_bucket + "/evaluation_metrics.json",
"NUMBER_OF_TRIALS": "5",
"ROBOMAKER_SIMULATION_JOB_ACCOUNT_ID": account_id
}
simulation_application = {
"application":simulation_app_arn,
"launchConfig": {
"packageName": "deepracer_simulation_environment",
"launchFile": "evaluation.launch",
"environmentVariables": envriron_vars
}
}
vpcConfig = {"subnets": deepracer_subnets,
"securityGroups": deepracer_security_groups,
"assignPublicIp": True}
responses = []
for job_no in range(num_simulation_workers):
response = robomaker.create_simulation_job(clientRequestToken=strftime("%Y-%m-%d-%H-%M-%S", gmtime()),
outputLocation={
"s3Bucket": s3_bucket,
"s3Prefix": s3_prefix
},
maxJobDurationInSeconds=job_duration_in_seconds,
iamRole=sagemaker_role,
failureBehavior="Continue",
simulationApplications=[simulation_application],
vpcConfig=vpcConfig)
responses.append(response)
# print("Created the following jobs:")
for response in responses:
print("Job ARN", response["arn"])
```
### Creating temporary folder top plot metrics
```
evaluation_metrics_file = "evaluation_metrics.json"
evaluation_metrics_path = "{}/{}".format(s3_bucket, evaluation_metrics_file)
wait_for_s3_object(s3_bucket, evaluation_metrics_path, tmp_dir)
json_file = "{}/{}".format(tmp_dir, evaluation_metrics_file)
with open(json_file) as fp:
data = json.load(fp)
df = pd.DataFrame(data['metrics'])
# Converting milliseconds to seconds
df['elapsed_time'] = df['elapsed_time_in_milliseconds']/1000
df = df[['trial', 'completion_percentage', 'elapsed_time']]
display(df)
```
### Clean Up Simulation Application Resource
```
# robomaker.delete_simulation_application(application=simulation_app_arn)
```
### Clean your S3 bucket (Uncomment the awscli commands if you want to do it)
```
## Uncomment if you only want to clean the s3 bucket
# sagemaker_s3_folder = "s3://{}/{}".format(s3_bucket, s3_prefix)
# !aws s3 rm --recursive {sagemaker_s3_folder}
# robomaker_s3_folder = "s3://{}/{}".format(s3_bucket, job_name)
# !aws s3 rm --recursive {robomaker_s3_folder}
# robomaker_sim_app = "s3://{}/{}".format(s3_bucket, 'robomaker')
# !aws s3 rm --recursive {robomaker_sim_app}
# model_output = "s3://{}/{}".format(s3_bucket, s3_bucket)
# !aws s3 rm --recursive {model_output}
```
### Clean the docker images
Remove this only when you want to completely remove the docker or clean up the space of the sagemaker instance
```
# !docker rmi -f $(docker images -q)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/projects/modelingsteps/ModelingSteps_1through4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Modeling Steps 1 - 4
**By Neuromatch Academy**
__Content creators:__ Marius 't Hart, Megan Peters, Paul Schrater, Gunnar Blohm
__Content reviewers:__ Eric DeWitt, Tara van Viegen, Marius Pachitariu
__Production editors:__ Ella Batty
**Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**
<p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>
**Note that this is the same as W1D2 Tutorial 1 - we provide it here as well for ease of access.**
---
# Tutorial objectives
Yesterday you gained some understanding of what models can buy us in neuroscience. But how do you build a model? Today, we will try to clarify the process of computational modeling, by thinking through the logic of modeling based on your project ideas.
We assume that you have a general idea of a project in mind, i.e. a preliminary question, and/or phenomenon you would like to understand. You should have started developing a project idea yesterday with [this brainstorming demo](https://youtu.be/H6rSlZzlrgQ). Maybe you have a goal in mind. We will now work through the 4 first steps of modeling ([Blohm et al., 2019](https://doi.org/10.1523/ENEURO.0352-19.2019)):
**Framing the question**
1. finding a phenomenon and a question to ask about it
2. understanding the state of the art
3. determining the basic ingredients
4. formulating specific, mathematically defined hypotheses
The remaining steps 5-10 will be covered in a second notebook that you can consult throughout the modeling process when you work on your projects.
**Importantly**, we will guide you through Steps 1-4 today. After you do more work on projects, you likely have to revite some or all of these steps *before* you move on the the remaining steps of modeling.
**Note**: there will be no coding today. It's important that you think through the different steps of this how-to-model tutorial to maximize your chance of succeeding in your group projects. **Also**: "Models" here can be data analysis pipelines, not just computational models...
**Think! Sections**: All activities you should perform are labeled with **Think!**. These are discussion based exercises and can be found in the Table of Content on the left side of the notebook. Make sure you complete all within a section before moving on!
### Demos
We will demo the modeling process to you based on the train illusion. The introductory video will explain the phenomenon to you. Then we will do roleplay to showcase some common pitfalls to you based on a computational modeling project around the train illusion. In addition to the computational model, we will also provide a data neuroscience project example to you so you can appreciate similarities and differences.
Enjoy!
```
# @title Video 1: Introduction to tutorial
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1Mf4y1b7xS", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="GyGNs1fLIYQ", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
```
# Setup
```
# Imports
import numpy as np
import matplotlib.pyplot as plt
# for random distributions:
from scipy.stats import norm, poisson
# for logistic regression:
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
# @title Plotting Functions
def rasterplot(spikes,movement,trial):
[movements, trials, neurons, timepoints] = np.shape(spikes)
trial_spikes = spikes[movement,trial,:,:]
trial_events = [((trial_spikes[x,:] > 0).nonzero()[0]-150)/100 for x in range(neurons)]
plt.figure()
dt=1/100
plt.eventplot(trial_events, linewidths=1);
plt.title('movement: %d - trial: %d'%(movement, trial))
plt.ylabel('neuron')
plt.xlabel('time [s]')
def plotCrossValAccuracies(accuracies):
f, ax = plt.subplots(figsize=(8, 3))
ax.boxplot(accuracies, vert=False, widths=.7)
ax.scatter(accuracies, np.ones(8))
ax.set(
xlabel="Accuracy",
yticks=[],
title=f"Average test accuracy: {accuracies.mean():.2%}"
)
ax.spines["left"].set_visible(False)
#@title Generate Data
def generateSpikeTrains():
gain = 2
neurons = 50
movements = [0,1,2]
repetitions = 800
np.random.seed(37)
# set up the basic parameters:
dt = 1/100
start, stop = -1.5, 1.5
t = np.arange(start, stop+dt, dt) # a time interval
Velocity_sigma = 0.5 # std dev of the velocity profile
Velocity_Profile = norm.pdf(t,0,Velocity_sigma)/norm.pdf(0,0,Velocity_sigma) # The Gaussian velocity profile, normalized to a peak of 1
# set up the neuron properties:
Gains = np.random.rand(neurons) * gain # random sensitivity between 0 and `gain`
FRs = (np.random.rand(neurons) * 60 ) - 10 # random base firing rate between -10 and 50
# output matrix will have this shape:
target_shape = [len(movements), repetitions, neurons, len(Velocity_Profile)]
# build matrix for spikes, first, they depend on the velocity profile:
Spikes = np.repeat(Velocity_Profile.reshape([1,1,1,len(Velocity_Profile)]),len(movements)*repetitions*neurons,axis=2).reshape(target_shape)
# multiplied by gains:
S_gains = np.repeat(np.repeat(Gains.reshape([1,1,neurons]), len(movements)*repetitions, axis=1).reshape(target_shape[:3]), len(Velocity_Profile)).reshape(target_shape)
Spikes = Spikes * S_gains
# and multiplied by the movement:
S_moves = np.repeat( np.array(movements).reshape([len(movements),1,1,1]), repetitions*neurons*len(Velocity_Profile), axis=3 ).reshape(target_shape)
Spikes = Spikes * S_moves
# on top of a baseline firing rate:
S_FR = np.repeat(np.repeat(FRs.reshape([1,1,neurons]), len(movements)*repetitions, axis=1).reshape(target_shape[:3]), len(Velocity_Profile)).reshape(target_shape)
Spikes = Spikes + S_FR
# can not run the poisson random number generator on input lower than 0:
Spikes = np.where(Spikes < 0, 0, Spikes)
# so far, these were expected firing rates per second, correct for dt:
Spikes = poisson.rvs(Spikes * dt)
return(Spikes)
def subsetPerception(spikes):
movements = [0,1,2]
split = 400
subset = 40
hwin = 3
[num_movements, repetitions, neurons, timepoints] = np.shape(spikes)
decision = np.zeros([num_movements, repetitions])
# ground truth for logistic regression:
y_train = np.repeat([0,1,1],split)
y_test = np.repeat([0,1,1],repetitions-split)
m_train = np.repeat(movements, split)
m_test = np.repeat(movements, split)
# reproduce the time points:
dt = 1/100
start, stop = -1.5, 1.5
t = np.arange(start, stop+dt, dt)
w_idx = list( (abs(t) < (hwin*dt)).nonzero()[0] )
w_0 = min(w_idx)
w_1 = max(w_idx)+1 # python...
# get the total spike counts from stationary and movement trials:
spikes_stat = np.sum( spikes[0,:,:,:], axis=2)
spikes_move = np.sum( spikes[1:,:,:,:], axis=3)
train_spikes_stat = spikes_stat[:split,:]
train_spikes_move = spikes_move[:,:split,:].reshape([-1,neurons])
test_spikes_stat = spikes_stat[split:,:]
test_spikes_move = spikes_move[:,split:,:].reshape([-1,neurons])
# data to use to predict y:
x_train = np.concatenate((train_spikes_stat, train_spikes_move))
x_test = np.concatenate(( test_spikes_stat, test_spikes_move))
# this line creates a logistics regression model object, and immediately fits it:
population_model = LogisticRegression(solver='liblinear', random_state=0).fit(x_train, y_train)
# solver, one of: 'liblinear', 'newton-cg', 'lbfgs', 'sag', and 'saga'
# some of those require certain other options
#print(population_model.coef_) # slope
#print(population_model.intercept_) # intercept
ground_truth = np.array(population_model.predict(x_test))
ground_truth = ground_truth.reshape([3,-1])
output = {}
output['perception'] = ground_truth
output['spikes'] = spikes[:,split:,:subset,:]
return(output)
def getData():
spikes = generateSpikeTrains()
dataset = subsetPerception(spikes=spikes)
return(dataset)
dataset = getData()
perception = dataset['perception']
spikes = dataset['spikes']
```
----
# Step 1: Finding a phenomenon and a question to ask about it
```
# @title Video 2: Asking a question
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1VK4y1M7dc", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="4Gl8X_y_uoA", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Example projects step 1
from ipywidgets import widgets
from IPython.display import Markdown
markdown1 = '''
## Step 1
<br>
<font size='3pt'>
The train illusion occurs when sitting on a train and viewing another train outside the window. Suddenly, the other train *seems* to move, i.e. you experience visual motion of the other train relative to your train. But which train is actually moving?
Often people have the wrong percept. In particular, they think their own train might be moving when it's the other train that moves; or vice versa. The illusion is usually resolved once you gain vision of the surroundings that lets you disambiguate the relative motion; or if you experience strong vibrations indicating that it is indeed your own train that is in motion.
We asked the following (arbitrary) question for our demo project: "How do noisy vestibular estimates of motion lead to illusory percepts of self motion?"
</font>
'''
markdown2 = '''
## Step 1
<br>
<font size='3pt'>
The train illusion occurs when sitting on a train and viewing another train outside the window. Suddenly, the other train *seems* to move, i.e. you experience visual motion of the other train relative to your train. But which train is actually moving?
Often people mix this up. In particular, they think their own train might be moving when it's the other train that moves; or vice versa. The illusion is usually resolved once you gain vision of the surroundings that lets you disambiguate the relative motion; or if you experience strong vibrations indicating that it is indeed your own train that is in motion.
We assume that we have build the train illusion model (see the other example project colab). That model predicts that accumulated sensory evidence from vestibular signals determines the decision of whether self-motion is experienced or not. We now have vestibular neuron data (simulated in our case, but let's pretend) and would like to see if that prediction holds true.
The data contains *N* neurons and *M* trials for each of 3 motion conditions: no self-motion, slowly accelerating self-motion and faster accelerating self-motion. In our data,
*N* = 40 and *M* = 400.
**So we can ask the following question**: "Does accumulated vestibular neuron activity correlate with self-motion judgements?"
</font>
'''
out2 = widgets.Output()
with out2:
display(Markdown(markdown2))
out1 = widgets.Output()
with out1:
display(Markdown(markdown1))
out = widgets.Tab([out1, out2])
out.set_title(0, 'Computational Model')
out.set_title(1, 'Data Analysis')
display(out)
```
## Think! 1: Asking your own question
*Please discuss the following for about 25 min*
You should already have a project idea from your brainstorming yesterday. **Write down the phenomenon, question and goal(s) if you have them.**
As a reminder, here is what you should discuss and write down:
* What exact aspect of data needs modeling?
* Answer this question clearly and precisely!
Otherwise you will get lost (almost guaranteed)
* Write everything down!
* Also identify aspects of data that you do not want to address (yet)
* Define an evaluation method!
* How will you know your modeling is good?
* E.g. comparison to specific data (quantitative method of comparison?)
* For computational models: think of an experiment that could test your model
* You essentially want your model to interface with this experiment, i.e. you want to simulate this experiment
You can find interesting questions by looking for phenomena that differ from your expectations. In *what* way does it differ? *How* could that be explained (starting to think about mechanistic questions and structural hypotheses)? *Why* could it be the way it is? What experiment could you design to investigate this phenomenon? What kind of data would you need?
**Make sure to avoid the pitfalls!**
<details>
<summary>Click here for a recap on pitfalls</summary>
Question is too general
<ul>
<li>Remember: science advances one small step at the time. Get the small step right…</li>
</ul>
Precise aspect of phenomenon you want to model is unclear
<ul>
<li>You will fail to ask a meaningful question</li>
</ul>
You have already chosen a toolkit
<ul>
<li>This will prevent you from thinking deeply about the best way to answer your scientific question</li>
</ul>
You don’t have a clear goal
<ul>
<li>What do you want to get out of modeling?</li>
</ul>
You don’t have a potential experiment in mind
<ul>
<li>This will help concretize your objectives and think through the logic behind your goal</li>
</ul>
</details>
**Note**
The hardest part is Step 1. Once that is properly set up, all other should be easier. **BUT**: often you think that Step 1 is done only to figure out in later steps (anywhere really) that you were not as clear on your question and goal than you thought. Revisiting Step 1 is frequent necessity. Don't feel bad about it. You can revisit Step 1 later; for now, let's move on to the nest step.
----
# Step 2: Understanding the state of the art & background
Here you will do a literature review (**to be done AFTER this tutorial!**).
```
# @title Video 3: Literature Review & Background Knowledge
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1by4y1M7TZ", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="d8zriLaMc14", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Example projects step 2
from ipywidgets import widgets
from IPython.display import Markdown
markdown1 = '''
## Step 2
<br>
<font size='3pt'>
You have learned all about the vestibular system in the Intro video. This is also where you would do a literature search to learn more about what's known about self-motion perception and vestibular signals. You would also want to examine any attempts to model self-motion, perceptual decision making and vestibular processing.</font>
'''
markdown21 = '''
## Step 2
<br>
<font size='3pt'>
While it seems a well-known fact that vestibular signals are noisy, we should check if we can also find this in the literature.
Let's also see what's in our data, there should be a 4d array called `spikes` that has spike counts (positive integers), a 2d array called `perception` with self-motion judgements (0=no motion or 1=motion). Let's see what this data looks like:
</font><br>
'''
markdown22 = '''
<br>
<font size='3pt'>
In the `spikes` array, we see our 3 acceleration conditions (first dimension), with 400 trials each (second dimensions) and simultaneous recordings from 40 neurons (third dimension), across 3 seconds in 10 ms bins (fourth dimension). The first two dimensions are also there in the `perception` array.
Perfect perception would have looked like [0, 1, 1]. The average judgements are far from correct (lots of self-motion illusions) but they do make some sense: it's closer to 0 in the no-motion condition and closer to 1 in both of the real-motion conditions.
The idea of our project is that the vestibular signals are noisy so that they might be mis-interpreted by the brain. Let's see if we can reproduce the stimuli from the data:
</font>
<br>
'''
markdown23 = '''
<br>
<font size='3pt'>
Blue is the no-motion condition, and produces flat average spike counts across the 3 s time interval. The orange and green line do show a bell-shaped curve that corresponds to the acceleration profile. But there also seems to be considerable noise: exactly what we need. Let's see what the spike trains for a single trial look like:
</font>
<br>
'''
markdown24 = '''
<br>
<font size='3pt'>
You can change the trial number in the bit of code above to compare what the rasterplots look like in different trials. You'll notice that they all look kind of the same: the 3 conditions are very hard (impossible?) to distinguish by eye-balling.
Now that we have seen the data, let's see if we can extract self-motion judgements from the spike counts.
</font>
<br>
'''
display(Markdown(r""))
out2 = widgets.Output()
with out2:
display(Markdown(markdown21))
print(f'The shape of `spikes` is: {np.shape(spikes)}')
print(f'The shape of `perception` is: {np.shape(perception)}')
print(f'The mean of `perception` is: {np.mean(perception, axis=1)}')
display(Markdown(markdown22))
for move_no in range(3):
plt.plot(np.arange(-1.5,1.5+(1/100),(1/100)),np.mean(np.mean(spikes[move_no,:,:,:], axis=0), axis=0), label=['no motion', '$1 m/s^2$', '$2 m/s^2$'][move_no])
plt.xlabel('time [s]');
plt.ylabel('averaged spike counts');
plt.legend()
plt.show()
display(Markdown(markdown23))
for move in range(3):
rasterplot(spikes = spikes, movement = move, trial = 0)
plt.show()
display(Markdown(markdown24))
out1 = widgets.Output()
with out1:
display(Markdown(markdown1))
out = widgets.Tab([out1, out2])
out.set_title(0, 'Computational Model')
out.set_title(1, 'Data Analysis')
display(out)
```
Here you will do a literature review (**to be done AFTER this tutorial!**). For the projects, do not spend too much time on this. A thorough literature review could take weeks or months depending on your prior knowledge of the field...
The important thing for your project here is not to exhaustively survey the literature but rather to learn the process of modeling. 1-2 days of digging into the literature should be enough!
**Here is what you should get out of it**:
* Survey the literature
* What’s known?
* What has already been done?
* Previous models as a starting point?
* What hypotheses have been emitted in the field?
* Are there any alternative / complementary modeling approaches?
* What skill sets are required?
* Do I need learn something before I can start?
* Ensure that no important aspect is missed
* Potentially provides specific data sets / alternative modeling approaches for comparison
**Do this AFTER the tutorial**
----
# Step 3: Determining the basic ingredients
```
# @title Video 4: Determining basic ingredients
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1Mq4y1x77s", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="XpEj-p7JkFE", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Example projects step 3
from ipywidgets import widgets
from IPython.display import Markdown, Math
markdown1 = r'''
## Step 3
<br>
<font size='3pt'>
We determined that we probably needed the following ingredients for our model:
* Vestibular input: *v(t)*
* Binary decision output: *d* - time dependent?
* Decision threshold: θ
* A filter (maybe running average?): *f*
* An integration mechanism to get from vestibular acceleration to sensed velocity: ∫
</font>
'''
markdown2 = '''
## Step 3
<br>
<font size='3pt'>
In order to address our question we need to design an appropriate computational data analysis pipeline. We did some brainstorming and think that we need to somehow extract the self-motion judgements from the spike counts of our neurons. Based on that, our algorithm needs to make a decision: was there self motion or not? This is a classical 2-choice classification problem. We will have to transform the raw spike data into the right input for the algorithm (spike pre-processing).
So we determined that we probably needed the following ingredients:
* spike trains *S* of 3-second trials (10ms spike bins)
* ground truth movement *m<sub>r</sub>* (real) and perceived movement *m<sub>p</sub>*
* some form of classifier *C* giving us a classification *c*
* spike pre-processing
</font>
'''
# No idea why this is necessary but math doesn't render properly without it
display(Markdown(r""))
out2 = widgets.Output()
with out2:
display(Markdown(markdown2))
out1 = widgets.Output()
with out1:
display(Markdown(markdown1))
out = widgets.Tab([out1, out2])
out.set_title(0, 'Computational Model')
out.set_title(1, 'Data Analysis')
display(out)
```
## Think! 3: Determine your basic ingredients
*Please discuss the following for about 25 min*
This will allow you to think deeper about what your modeling project will need. It's a crucial step before you can formulate hypotheses because you first need to understand what your modeling approach will need. There are 2 aspects you want to think about:
1. What parameters / variables are needed?]
* Constants?
* Do they change over space, time, conditions…?
* What details can be omitted?
* Constraints, initial conditions?
* Model inputs / outputs?
2. Variables needed to describe the process to be modelled?
* Brainstorming!
* What can be observed / measured? latent variables?
* Where do these variables come from?
* Do any abstract concepts need to be instantiated as variables?
* E.g. value, utility, uncertainty, cost, salience, goals, strategy, plant, dynamics
* Instantiate them so that they relate to potential measurements!
This is a step where your prior knowledge and intuition is tested. You want to end up with an inventory of *specific* concepts and/or interactions that need to be instantiated.
**Make sure to avoid the pitfalls!**
<details>
<summary>Click here for a recap on pitfalls</summary>
I’m experienced, I don’t need to think about ingredients anymore
<ul>
<li>Or so you think…</li>
</ul>
I can’t think of any ingredients
<ul>
<li>Think about the potential experiment. What are your stimuli? What parameters? What would you control? What do you measure?</li>
</ul>
I have all inputs and outputs
<ul>
<li>Good! But what will link them? Thinking about that will start shaping your model and hypotheses</li>
</ul>
I can’t think of any links (= mechanisms)
<ul>
<li>You will acquire a library of potential mechanisms as you keep modeling and learning</li>
<li>But the literature will often give you hints through hypotheses</li>
<li>If you still can't think of links, then maybe you're missing ingredients?</li>
</ul>
</details>
----
# Step 4: Formulating specific, mathematically defined hypotheses
```
# @title Video 5: Formulating a hypothesis
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1fh411h7aX", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="nHXMSXLcd9A", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# @title Example projects step 4
from ipywidgets import widgets
from IPython.display import Markdown
# Not writing in latex because that didn't render in jupyterbook
markdown1 = r'''
## Step 4
<br>
<font size='3pt'>
Our main hypothesis is that the strength of the illusion has a linear relationship to the amplitude of vestibular noise.
Mathematically, this would write as
<div align="center">
<em>S</em> = <em>k</em> ⋅ <em>N</em>
</div>
where *S* is the illusion strength and *N* is the noise level, and *k* is a free parameter.
>we could simply use the frequency of occurance across repetitions as the "strength of the illusion"
We would get the noise as the standard deviation of *v(t)*, i.e.
<div align="center">
<em>N</em> = <b>E</b>[<em>v(t)</em><sup>2</sup>],
</div>
where **E** stands for the expected value.
Do we need to take the average across time points?
> doesn't really matter because we have the generative process, so we can just use the σ that we define
</font>
'''
markdown2 = '''
## Step 4
<br>
<font size='3pt'>
We think that noise in the signal drives whether or not people perceive self motion. Maybe the brain uses the strongest signal at peak acceleration to decide on self motion, but we actually think it is better to accumulate evidence over some period of time. We want to test this. The noise idea also means that when the signal-to-noise ratio is higher, the brain does better, and this would be in the faster acceleration condition. We want to test this too.
We came up with the following hypotheses focussing on specific details of our overall research question:
* Hyp 1: Accumulated vestibular spike rates explain self-motion judgements better than average spike rates around peak acceleration.
* Hyp 2: Classification performance should be better for faster vs slower self-motion.
> There are many other hypotheses you could come up with, but for simplicity, let's go with those.
Mathematically, we can write our hypotheses as follows (using our above ingredients):
* Hyp 1: **E**(c<sub>accum</sub>) > **E**(c<sub>win</sub>)
* Hyp 2: **E**(c<sub>fast</sub>) > **E**(c<sub>slow</sub>)
Where **E** denotes taking the expected value (in this case the mean) of its argument: classification outcome in a given trial type.
</font>
'''
# No idea why this is necessary but math doesn't render properly without it
display(Markdown(r""))
out2 = widgets.Output()
with out2:
display(Markdown(markdown2))
out1 = widgets.Output()
with out1:
display(Markdown(markdown1))
out = widgets.Tab([out1, out2])
out.set_title(0, 'Computational Model')
out.set_title(1, 'Data Analysis')
display(out)
```
## Think! 4: Formulating your hypothesis
*Please discuss the following for about 25 min*
Once you have your question and goal lines up, you have done a literature review (let's assume for now) and you have thought about ingredients needed for your model, you're now ready to start thinking about *specific* hypotheses.
Formulating hypotheses really consists of two consecutive steps:
1. You think about the hypotheses in words by relating ingredients identified in Step 3
* What is the model mechanism expected to do?
* How are different parameters expected to influence model results?
2. You then express these hypotheses in mathematical language by giving the ingredients identified in Step 3 specific variable names.
* Be explicit, e.g. $y(t)=f(x(t),k)$ but $z(t)$ doesn’t influence $y$
There are also "structural hypotheses" that make assumptions on what model components you hypothesize will be crucial to capture the phenomenon at hand.
**Important**: Formulating the hypotheses is the last step before starting to model. This step determines the model approach and ingredients. It provides a more detailed description of the question / goal from Step 1. The more precise the hypotheses, the easier the model will be to justify.
**Make sure to avoid the pitfalls!**
<details>
<summary>Click here for a recap on pitfalls</summary>
I don’t need hypotheses, I will just play around with the model
<ul>
<li>Hypotheses help determine and specify goals. You can (and should) still play…</li>
</ul>
My hypotheses don’t match my question (or vice versa)
<ul>
<li>This is a normal part of the process!</li>
<li>You need to loop back to Step 1 and revisit your question / phenomenon / goals</li>
</ul>
I can’t write down a math hypothesis
<ul>
<li>Often that means you lack ingredients and/or clarity on the hypothesis</li>
<li>OR: you have a “structural” hypothesis, i.e. you expect a certain model component to be crucial in explaining the phenomenon / answering the question</li>
</ul>
</details>
----
# Summary
In this tutorial, we worked through some steps of the process of modeling.
- We defined a phenomenon and formulated a question (step 1)
- We collected information the state-of-the-art about the topic (step 2)
- We determined the basic ingredients (step 3), and used these to formulate a specific mathematically defined hypothesis (step 4)
You are now in a position that you could start modeling without getting lost. But remember: you might have to work through steps 1-4 again after doing a literature review and/or if there were other pitfalls you identified along the way (which is totally normal).
----
# Next steps
In [a follow-up notebook](https://compneuro.neuromatch.io/projects/modelingsteps/ModelingSteps_5through10.html), we will continue with the steps 5-10 to guide you through the implementation and completion stages of the projects. You can also find this in the Modeling Steps section of the Project Booklet.
----
# Reading
Blohm G, Kording KP, Schrater PR (2020). _A How-to-Model Guide for Neuroscience_. eNeuro, 7(1) ENEURO.0352-19.2019. https://doi.org/10.1523/ENEURO.0352-19.2019
Kording KP, Blohm G, Schrater P, Kay K (2020). _Appreciating the variety of goals in computational neuroscience_. Neurons, Behavior, Data Analysis, and Theory 3(6). https://nbdt.scholasticahq.com/article/16723-appreciating-the-variety-of-goals-in-computational-neuroscience
Schrater PR, Peters MK, Kording KP, Blohm G (2019). _Modeling in Neuroscience as a Decision Process_. OSF pre-print. https://osf.io/w56vt/
| github_jupyter |
# Artificial Intelligence Nanodegree
## Convolutional Neural Networks
---
In this notebook, we train a CNN to classify images from the CIFAR-10 database.
### 1. Load CIFAR-10 Database
```
import keras
from keras.datasets import cifar10
# load the pre-shuffled train and test data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
```
### 2. Visualize the First 24 Training Images
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
fig = plt.figure(figsize=(20,5))
for i in range(36):
ax = fig.add_subplot(3, 12, i + 1, xticks=[], yticks=[])
ax.imshow(np.squeeze(x_train[i]))
```
### 3. Rescale the Images by Dividing Every Pixel in Every Image by 255
```
# rescale [0,255] --> [0,1]
x_train = x_train.astype('float32')/255
x_test = x_test.astype('float32')/255
```
### 4. Break Dataset into Training, Testing, and Validation Sets
```
from keras.utils import np_utils
# one-hot encode the labels
num_classes = len(np.unique(y_train))
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# break training set into training and validation sets
(x_train, x_valid) = x_train[5000:], x_train[:5000]
(y_train, y_valid) = y_train[5000:], y_train[:5000]
# print shape of training set
print('x_train shape:', x_train.shape)
# print number of training, validation, and test images
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print(x_valid.shape[0], 'validation samples')
```
### 5. Define the Model Architecture
```
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, Activation
# My modified model
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=4, padding='same', input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(filters=64, kernel_size=4, padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.15))
model.add(Conv2D(filters=128, kernel_size=4, padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=4))
model.add(Dropout(0.25))
model.add(Conv2D(filters=256, kernel_size=4, padding='same'))
model.add(MaxPooling2D(pool_size=4))
model.add(Dropout(0.35))
# model.add(Conv2D(filters=512, kernel_size=4, padding='same'))
model.add(Dropout(0.45))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.55))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# Original model
# model = Sequential()
# model.add(Conv2D(filters=16, kernel_size=2, padding='same', activation='relu',
# input_shape=(32, 32, 3)))
# model.add(MaxPooling2D(pool_size=2))
# model.add(Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
# model.add(MaxPooling2D(pool_size=2))
# model.add(Conv2D(filters=64, kernel_size=2, padding='same', activation='relu'))
# model.add(MaxPooling2D(pool_size=2))
# model.add(Dropout(0.3))
# model.add(Flatten())
# model.add(Dense(500, activation='relu'))
# model.add(Dropout(0.4))
# model.add(Dense(10, activation='softmax'))
model.summary()
```
### 6. Compile the Model
```
# compile the model
model.compile(loss='categorical_crossentropy', optimizer='rmsprop',
metrics=['accuracy'])
```
### 7. Train the Model
```
from keras.callbacks import ModelCheckpoint
# train the model
checkpointer = ModelCheckpoint(filepath='model.weights.best.hdf5', verbose=1,
save_best_only=True)
hist = model.fit(x_train, y_train, batch_size=32, epochs=100,
validation_data=(x_valid, y_valid), callbacks=[checkpointer],
verbose=2, shuffle=True)
```
### 8. Load the Model with the Best Validation Accuracy
```
# load the weights that yielded the best validation accuracy
model.load_weights('model.weights.best.hdf5')
```
### 9. Calculate Classification Accuracy on Test Set
```
# evaluate and print test accuracy
score = model.evaluate(x_test, y_test, verbose=0)
print('\n', 'Test accuracy:', score[1])
```
### 10. Visualize Some Predictions
This may give you some insight into why the network is misclassifying certain objects.
```
# get predictions on the test set
y_hat = model.predict(x_test)
# define text labels (source: https://www.cs.toronto.edu/~kriz/cifar.html)
cifar10_labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# plot a random sample of test images, their predicted labels, and ground truth
fig = plt.figure(figsize=(20, 8))
for i, idx in enumerate(np.random.choice(x_test.shape[0], size=32, replace=False)):
ax = fig.add_subplot(4, 8, i + 1, xticks=[], yticks=[])
ax.imshow(np.squeeze(x_test[idx]))
pred_idx = np.argmax(y_hat[idx])
true_idx = np.argmax(y_test[idx])
ax.set_title("{} ({})".format(cifar10_labels[pred_idx], cifar10_labels[true_idx]),
color=("green" if pred_idx == true_idx else "red"))
```
| github_jupyter |
```
from __future__ import division, print_function, absolute_import
```
# Introduction to Visualization:
Density Estimation and Data Exploration
========
##### Version 0.1
There are many flavors of data analysis that fall under the "visualization" umbrella in astronomy. Today, by way of example, we will focus on 2 basic problems.
***
By AA Miller
16 September 2017
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
## Problem 1) Density Estimation
Starting with 2MASS and SDSS and extending through LSST, we are firmly in an era where data and large statistical samples are cheap. With this explosion in data volume comes a problem: we do not know the underlying probability density function (PDF) of the random variables measured via our observations. Hence - density estimation: an attempt to recover the unknown PDF from observations. In some cases theory can guide us to a parametric form for the PDF, but more often than not such guidance is not available.
There is a common, simple, and very familiar tool for density estimation: histograms.
But there is also a problem:
HISTOGRAMS LIE!
We will "prove" this to be the case in a series of examples. For this exercise, we will load the famous Linnerud data set, which tested 20 middle aged men by measuring the number of chinups, situps, and jumps they could do in order to compare these numbers to their weight, pulse, and waist size. To load the data (just chinups for now) we will run the following:
from sklearn.datasets import load_linnerud
linnerud = load_linnerud()
chinups = linnerud.data[:,0]
```
from sklearn.datasets import load_linnerud
linnerud = load_linnerud()
chinups = linnerud.data[:,0]
```
**Problem 1a**
Plot the histogram for the number of chinups using the default settings in pyplot.
```
plt.hist( # complete
```
Already with this simple plot we see a problem - the choice of bin centers and number of bins suggest that there is a 0% probability that middle aged men can do 10 chinups. Intuitively this seems incorrect, so lets examine how the histogram changes if we change the number of bins or the bin centers.
**Problem 1b**
Using the same data make 2 new histograms: (i) one with 5 bins (`bins = 5`), and (ii) one with the bars centered on the left bin edges (`align = "left"`).
*Hint - if overplotting the results, you may find it helpful to use the `histtype = "step"` option*
```
plt.hist( # complete
# complete
```
These small changes significantly change the output PDF. With fewer bins we get something closer to a continuous distribution, while shifting the bin centers reduces the probability to zero at 9 chinups.
What if we instead allow the bin width to vary and require the same number of points in each bin? You can determine the bin edges for bins with 5 sources using the following command:
bins = np.append(np.sort(chinups)[::5], np.max(chinups))
**Problem 1c**
Plot a histogram with variable width bins, each with the same number of points.
*Hint - setting `normed = True` will normalize the bin heights so that the PDF integrates to 1.*
```
# complete
plt.hist(# complete
```
*Ending the lie*
Earlier I stated that histograms lie. One simple way to combat this lie: show all the data. Displaying the original data points allows viewers to somewhat intuit the effects of the particular bin choices that have been made (though this can also be cumbersome for very large data sets, which these days is essentially all data sets). The standard for showing individual observations relative to a histogram is a "rug plot," which shows a vertical tick (or other symbol) at the location of each source used to estimate the PDF.
**Problem 1d** Execute the cell below to see an example of a rug plot.
```
plt.hist(chinups, histtype = 'step')
# this is the code for the rug plot
plt.plot(chinups, np.zeros_like(chinups), '|', color='k', ms = 25, mew = 4)
```
Of course, even rug plots are not a perfect solution. Many of the chinup measurements are repeated, and those instances cannot be easily isolated above. One (slightly) better solution is to vary the transparency of the rug "whiskers" using `alpha = 0.3` in the whiskers plot call. But this too is far from perfect.
To recap, histograms are not ideal for density estimation for the following reasons:
* They introduce discontinuities that are not present in the data
* They are strongly sensitive to user choices ($N_\mathrm{bins}$, bin centering, bin grouping), without any mathematical guidance to what these choices should be
* They are difficult to visualize in higher dimensions
Histograms are useful for generating a quick representation of univariate data, but for the reasons listed above they should never be used for analysis. Most especially, functions should not be fit to histograms given how greatly the number of bins and bin centering affects the output histogram.
Okay - so if we are going to rail on histograms this much, there must be a better option. There is: [Kernel Density Estimation](https://en.wikipedia.org/wiki/Kernel_density_estimation) (KDE), a nonparametric form of density estimation whereby a normalized kernel function is convolved with the discrete data to obtain a continuous estimate of the underlying PDF. As a rule, the kernel must integrate to 1 over the interval $-\infty$ to $\infty$ and be symmetric. There are many possible kernels (gaussian is highly popular, though Epanechnikov, an inverted parabola, produces the minimal mean square error).
KDE is not completely free of the problems we illustrated for histograms above (in particular, both a kernel and the width of the kernel need to be selected), but it does manage to correct a number of the ills. We will now demonstrate this via a few examples using the `scikit-learn` implementation of KDE: [`KernelDensity`](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KernelDensity.html#sklearn.neighbors.KernelDensity), which is part of the [`sklearn.neighbors`](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.neighbors) module.
*Note* There are many implementations of KDE in Python, and Jake VanderPlas has put together [an excellent description of the strengths and weaknesses of each](https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/). We will use the `scitkit-learn` version as it is in many cases the fastest implementation.
To demonstrate the basic idea behind KDE, we will begin by representing each point in the dataset as a block (i.e. we will adopt the tophat kernel). Borrowing some code from Jake, we can estimate the KDE using the following code:
from sklearn.neighbors import KernelDensity
def kde_sklearn(data, grid, bandwidth = 1.0, **kwargs):
kde_skl = KernelDensity(bandwidth = bandwidth, **kwargs)
kde_skl.fit(data[:, np.newaxis])
log_pdf = kde_skl.score_samples(grid[:, np.newaxis]) # sklearn returns log(density)
return np.exp(log_pdf)
The two main options to set are the bandwidth and the kernel.
```
# execute this cell
from sklearn.neighbors import KernelDensity
def kde_sklearn(data, grid, bandwidth = 1.0, **kwargs):
kde_skl = KernelDensity(bandwidth = bandwidth, **kwargs)
kde_skl.fit(data[:, np.newaxis])
log_pdf = kde_skl.score_samples(grid[:, np.newaxis]) # sklearn returns log(density)
return np.exp(log_pdf)
```
**Problem 1e**
Plot the KDE of the PDF for the number of chinups middle aged men can do using a bandwidth of 0.1 and a tophat kernel.
*Hint - as a general rule, the grid should be smaller than the bandwidth when plotting the PDF.*
```
grid = # complete
PDFtophat = kde_sklearn( # complete
plt.plot( # complete
```
In this representation, each "block" has a height of 0.25. The bandwidth is too narrow to provide any overlap between the blocks. This choice of kernel and bandwidth produces an estimate that is essentially a histogram with a large number of bins. It gives no sense of continuity for the distribution. Now, we examine the difference (relative to histograms) upon changing the the width (i.e. kernel) of the blocks.
**Problem 1f**
Plot the KDE of the PDF for the number of chinups middle aged men can do using bandwidths of 1 and 5 and a tophat kernel. How do the results differ from the histogram plots above?
```
PDFtophat1 = # complete
# complete
# complete
# complete
```
It turns out blocks are not an ideal representation for continuous data (see discussion on histograms above). Now we will explore the resulting PDF from other kernels.
**Problem 1g** Plot the KDE of the PDF for the number of chinups middle aged men can do using a gaussian and Epanechnikov kernel. How do the results differ from the histogram plots above?
*Hint - you will need to select the bandwidth. The examples above should provide insight into the useful range for bandwidth selection. You may need to adjust the values to get an answer you "like."*
```
PDFgaussian = # complete
PDFepanechnikov = # complete
```
So, what is the *optimal* choice of bandwidth and kernel? Unfortunately, there is no hard and fast rule, as every problem will likely have a different optimization. Typically, the choice of bandwidth is far more important than the choice of kernel. In the case where the PDF is likely to be gaussian (or close to gaussian), then [Silverman's rule of thumb](https://en.wikipedia.org/wiki/Kernel_density_estimation#A_rule-of-thumb_bandwidth_estimator) can be used:
$$h = 1.059 \sigma n^{-1/5}$$
where $h$ is the bandwidth, $\sigma$ is the standard deviation of the samples, and $n$ is the total number of samples. Note - in situations with bimodal or more complicated distributions, this rule of thumb can lead to woefully inaccurate PDF estimates. The most general way to estimate the choice of bandwidth is via cross validation (we will cover cross-validation later today).
*What about multidimensional PDFs?* It is possible using many of the Python implementations of KDE to estimate multidimensional PDFs, though it is very very important to beware the curse of dimensionality in these circumstances.
## Problem 2) Data Exploration
Now a more open ended topic: data exploration. In brief, data exploration encompases a large suite of tools (including those discussed above) to examine data that live in large dimensional spaces. There is no single best method or optimal direction for data exploration. Instead, today we will introduce some of the tools available via python.
As an example we will start with a basic line plot - and examine tools beyond `matplotlib`.
```
x = np.arange(0, 6*np.pi, 0.1)
y = np.cos(x)
plt.plot(x,y, lw = 2)
plt.xlabel('X')
plt.ylabel('Y')
plt.xlim(0, 6*np.pi)
```
### Seaborn
[`Seaborn`](https://stanford.edu/~mwaskom/software/seaborn/index.html) is a plotting package that enables many useful features for exploration. In fact, a lot of the functionality that we developed above can readily be handled with `seaborn`.
To begin, we will make the same plot that we created in matplotlib.
```
import seaborn as sns
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x,y, lw = 2)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_xlim(0, 6*np.pi)
```
These plots look identical, but it is possible to change the style with `seaborn`.
`seaborn` has 5 style presets: `darkgrid`, `whitegrid`, `dark`, `white`, and `ticks`. You can change the preset using the following:
sns.set_style("whitegrid")
which will change the output for all subsequent plots. Note - if you want to change the style for only a single plot, that can be accomplished with the following:
with sns.axes_style("dark"):
with all ploting commands inside the `with` statement.
**Problem 3a**
Re-plot the sine curve using each `seaborn` preset to see which you like best - then adopt this for the remainder of the notebook.
```
sns.set_style( # complete
# complete
```
The folks behind `seaborn` have thought a lot about color palettes, which is a good thing. Remember - the choice of color for plots is one of the most essential aspects of visualization. A poor choice of colors can easily mask interesting patterns or suggest structure that is not real. To learn more about what is available, see the [`seaborn` color tutorial](http://stanford.edu/~mwaskom/software/seaborn/tutorial/color_palettes.html).
Here we load the default:
```
# default color palette
current_palette = sns.color_palette()
sns.palplot(current_palette)
```
which we will now change to `colorblind`, which is clearer to those that are colorblind.
```
# set palette to colorblind
sns.set_palette("colorblind")
current_palette = sns.color_palette()
sns.palplot(current_palette)
```
Now that we have covered the basics of `seaborn` (and the above examples truly only scratch the surface of what is possible), we will explore the power of `seaborn` for higher dimension data sets. We will load the famous Iris data set, which measures 4 different features of 3 different types of Iris flowers. There are 150 different flowers in the data set.
*Note - for those familiar with `pandas` `seaborn` is designed to integrate easily and directly with `pandas DataFrame` objects. In the example below the Iris data are loaded into a `DataFrame`. `iPython` notebooks also display the `DataFrame` data in a nice readable format.*
```
iris = sns.load_dataset("iris")
iris
```
Now that we have a sense of the data structure, it is useful to examine the distribution of features. Above, we went to great pains to produce histograms, KDEs, and rug plots. `seaborn` handles all of that effortlessly with the `distplot` function.
**Problem 3b**
Plot the distribution of petal lengths for the Iris data set.
```
# note - hist, kde, and rug all set to True, set to False to turn them off
with sns.axes_style("dark"):
sns.distplot(iris['petal_length'], bins=20, hist=True, kde=True, rug=True)
```
Of course, this data set lives in a 4D space, so plotting more than univariate distributions is important (and as we will see tomorrow this is particularly useful for visualizing classification results). Fortunately, `seaborn` makes it very easy to produce handy summary plots.
At this point, we are familiar with basic scatter plots in matplotlib.
**Problem 3c**
Make a matplotlib scatter plot showing the Iris petal length against the Iris petal width.
```
plt.scatter( # complete
```
Of course, when there are many many data points, scatter plots become difficult to interpret. As in the example below:
```
with sns.axes_style("darkgrid"):
xexample = np.random.normal(loc = 0.2, scale = 1.1, size = 10000)
yexample = np.random.normal(loc = -0.1, scale = 0.9, size = 10000)
plt.scatter(xexample, yexample)
```
Here, we see that there are many points, clustered about the origin, but we have no sense of the underlying density of the distribution. 2D histograms, such as `plt.hist2d()`, can alleviate this problem. I prefer to use `plt.hexbin()` which is a little easier on the eyes (though note - these histograms are just as subject to the same issues discussed above).
```
# hexbin w/ bins = "log" returns the log of counts/bin
# mincnt = 1 displays only hexpix with at least 1 source present
with sns.axes_style("darkgrid"):
plt.hexbin(xexample, yexample, bins = "log", cmap = "viridis", mincnt = 1)
plt.colorbar()
```
While the above plot provides a significant improvement over the scatter plot by providing a better sense of the density near the center of the distribution, the binedge effects are clearly present. An even better solution, like before, is a density estimate, which is easily built into `seaborn` via the `kdeplot` function.
```
with sns.axes_style("darkgrid"):
sns.kdeplot(xexample, yexample,shade=False)
```
This plot is much more appealing (and informative) than the previous two. For the first time we can clearly see that the distribution is not actually centered on the origin. Now we will move back to the Iris data set.
Suppose we want to see univariate distributions in addition to the scatter plot? This is certainly possible with `matplotlib` and you can find examples on the web, however, with `seaborn` this is really easy.
```
sns.jointplot(x=iris['petal_length'], y=iris['petal_width'])
```
But! Histograms and scatter plots can be problematic as we have discussed many times before.
**Problem 3d**
Re-create the plot above but set `kind='kde'` to produce density estimates of the distributions.
```
sns.jointplot( # complete
```
That is much nicer than what was presented above. However - we still have a problem in that our data live in 4D, but we are (mostly) limited to 2D projections of that data. One way around this is via the `seaborn` version of a `pairplot`, which plots the distribution of every variable in the data set against each other. (Here is where the integration with `pandas DataFrame`s becomes so powerful.)
```
sns.pairplot(iris[["sepal_length", "sepal_width", "petal_length", "petal_width"]])
```
For data sets where we have classification labels, we can even color the various points using the `hue` option, and produce KDEs along the diagonal with `diag_type = 'kde'`.
```
sns.pairplot(iris, vars = ["sepal_length", "sepal_width", "petal_length", "petal_width"],
hue = "species", diag_kind = 'kde')
```
Even better - there is an option to create a `PairGrid` which allows fine tuned control of the data as displayed above, below, and along the diagonal. In this way it becomes possible to avoid having symmetric redundancy, which is not all that informative. In the example below, we will show scatter plots and contour plots simultaneously.
```
g = sns.PairGrid(iris, vars = ["sepal_length", "sepal_width", "petal_length", "petal_width"],
hue = "species", diag_sharey=False)
g.map_lower(sns.kdeplot)
g.map_upper(plt.scatter, edgecolor='white')
g.map_diag(sns.kdeplot, lw=3)
```
Note - one disadvantage to the plot above is that the contours do not share the same color scheme as the KDE estimates and the scatter plot. I have not been able to figure out how to change this in a satisfactory way. (One potential solution is detailed [here](http://stackoverflow.com/questions/32889590/seaborn-pairgrid-using-kdeplot-with-2-hues), however, it is worth noting that this solution restricts your color choices to a maximum of ~5 unless you are a colormaps wizard, and I am not.)
| github_jupyter |
# Part 9: Train an Encrypted NN on Encrypted Data
In this notebook, we're going to use all the techniques we've learned thus far to perform neural network training (and prediction) while both the model and the data are encrypted.
Note that Autograd is not *yet* supported for encrypted variables, thus we'll have to roll our own gradients ourselves. This functionality will be added in the next PySyft version.
Authors:
- Andrew Trask - Twitter: [@iamtrask](https://twitter.com/iamtrask)
# Step 1: Create Workers and Toy Data
```
import syft as sy
hook = sy.TorchHook(verbose=True)
me = hook.local_worker
me.is_client_worker = False
bob = sy.VirtualWorker(id="bob", hook=hook, is_client_worker=False)
alice = sy.VirtualWorker(id="alice", hook=hook, is_client_worker=False)
# create our dataset
data = sy.FloatTensor([[0,0],[0,1],[1,0],[1,1]])
target = sy.FloatTensor([[0],[0],[1],[1]])
model = sy.zeros(2,1)
data
```
# Step 2: Encrypt the Model and Data
Encryption here comes in two steps. Since Secure Multi-Party Computation only works on Longs, in order to operate over numbers with decimal points (such as weights and activations), we need to encode all of our numbers using Fixed Precision, which will give us several bits of floating point precision. We do this by calling .fix_precision().
We can then call .share() as we have for other demos, which will encrypt all of the values by sharing them between Alice and Bob.
```
data = data.fix_precision().share(alice, bob)
target = target.fix_precision().share(alice, bob)
model = model.fix_precision().share(alice, bob)
```
# Step 3: Train
And now we can train using simple tensor logic. Note that autograd is not yet supported (but it will be in the Torch 1.0 refactor which you can [watch here](https://github.com/OpenMined/PySyft/issues/1587)).
```
for i in range(10):
pred = data.mm(model)
grad = pred - target
update = data.transpose(0,1).mm(grad)
model = model - update * 0.1
loss = grad.get().decode().abs().sum()
print(loss)
```
# Congratulations!!! - Time to Join the Community!
Congratulations on completing this notebook tutorial! If you enjoyed this and would like to join the movement toward privacy preserving, decentralized ownership of AI and the AI supply chain (data), you can do so in the following ways!
### Star PySyft on Github
The easiest way to help our community is just by starring the Repos! This helps raise awareness of the cool tools we're building.
- [Star PySyft](https://github.com/OpenMined/PySyft)
### Join our Slack!
The best way to keep up to date on the latest advancements is to join our community! You can do so by filling out the form at [http://slack.openmined.org](http://slack.openmined.org)
### Join a Code Project!
The best way to contribute to our community is to become a code contributor! At any time you can go to PySyft Github Issues page and filter for "Projects". This will show you all the top level Tickets giving an overview of what projects you can join! If you don't want to join a project, but you would like to do a bit of coding, you can also look for more "one off" mini-projects by searching for github issues marked "good first issue".
- [PySyft Projects](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3AProject)
- [Good First Issue Tickets](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
### Donate
If you don't have time to contribute to our codebase, but would still like to lend support, you can also become a Backer on our Open Collective. All donations go toward our web hosting and other community expenses such as hackathons and meetups!
[OpenMined's Open Collective Page](https://opencollective.com/openmined)
| github_jupyter |
```
import os
import numpy as np
import pandas as pd
import importlib as imp
from tqdm import tqdm, tqdm_notebook
import warnings
warnings.simplefilter('ignore')
pd.options.display.max_columns = 100
```
- **p01_c.txt**, the knapsack capacity. <br>
- **p01_w.txt**, the weights of the objects. <br>
- **p01_p.txt**, the profits of each object. <br>
- **p01_s.txt**, the optimal selection of weights. <br>
```
from utils import tools
tools = imp.reload(tools)
%%time
n = np.arange(1,8)
benchmarks = {k: tools.get_knapsack(n='0'+str(k)) for k in n}
benchmarks[1]
from utils import tools
tools = imp.reload(tools)
from algorithm import brute_force, greedy, genetic, bound_and_branches, dynamic
brute_force = imp.reload(brute_force)
greedy = imp.reload(greedy)
genetic = imp.reload(genetic)
bound_and_branches = imp.reload(bound_and_branches)
dynamic = imp.reload(dynamic)
i = 6
knap = benchmarks[i]
param = {'n_epoch': 5000,
'eps': 0.5,
'chaos': 20,
'n_chrom': 15}
knap = dict(knap, **param)
alg = genetic.Genetic(knap)
opt = alg.solve()
print('{} optimal solution {}'.format(alg.name, opt))
alg_knap = tools.compute_knapsack(knap, opt)
real_knap = tools.compute_knapsack(knap, knap['optimal'])
print('{} optimal profit {} and weight {}'.format(alg.name, alg_knap[1], alg_knap[0]))
print('Real optimal profit {} and maximum weight {}'.format(real_knap[1],real_knap[0]))
import glob
tools = imp.reload(tools)
complexity = ['low-dimensional', 'large_scale']
h = 1
data_lst = glob.glob('data/'+complexity[h]+'/*')
print(len(data_lst))
i = 7
print(data_lst[i])
kdct = tools.get_kdct(data_lst[i], with_optimum=True)
with open('data/'+complexity[h]+'-optimum/'+data_lst[i].split('\\')[-1] ,'r') as f:
print(f.read())
tools.compute_knapsack(kdct, kdct['optimal'])
```
## Low-dimensional
```
import pprint
from datetime import datetime
tools = imp.reload(tools)
complexity = 'low-dimensional'
data_lst = sorted(glob.glob('data/'+complexity+'/*'))
for data in data_lst:
# print(data)
if 'kp_15_375' in data:
# print(data)
continue
knapsack = tools.get_kdct(data, with_optimum=False)
bm_name = data.split('\\')[-1]
with open('data/'+complexity+'-optimum/'+bm_name, 'r') as f:
opt_profit = f.read()
z = 30
opt_w, opt_p = tools.compute_knapsack(knapsack, knapsack['optimal'])
print('\n--- \n')
print('\n' + '## Knapsack {}'.format(bm_name))
print(' - capacity: *{}*<br>'.format(knapsack['capacity'][0]))
print(' - optimal profit: *{}*<br>'.format(opt_profit))
param = {'n_epoch': 450,
'eps': 0.3,
'chaos': 15,
'n_chrom': 20}
algorithms = [greedy.Greedy,
bound_and_branches.BranchAndBound,
dynamic.Dynamic,
genetic.Genetic]
for alg in algorithms:
knapsack = dict(knapsack, **param)
start_time = datetime.now()
alg = alg(knapsack)
alg_opt = alg.solve()
end_time = datetime.now() - start_time
alg_opt_w, alg_opt_p = tools.compute_knapsack(knapsack, alg_opt)
# print(' **{}** optimal solution: ```{}```<br>'.format(alg.name, alg_opt))
print(' **{}** ```exec time {:.2f}s```<br>'.format(alg.name, end_time.total_seconds()))
print(' optimal weight: {}, and profit **{}**<br>'.format(alg_opt_w, alg_opt_p))
```
### Large-scale
```
import pprint
from datetime import datetime
tools = imp.reload(tools)
complexity = 'large_scale'
data_lst = sorted(glob.glob('data/'+complexity+'/*'))
for data in data_lst:
knapsack = tools.get_kdct(data, with_optimum=True)
bm_name = data.split('\\')[-1]
z = 30
opt_w, opt_p = tools.compute_knapsack(knapsack, knapsack['optimal'])
print('\n--- \n')
print('\n' + '## Knapsack {}'.format(bm_name))
print(' - capacity: *{}*<br>'.format(knapsack['capacity'][0]))
print(' - optimal weight: *{}*, optimal profit: *{}*<br>'.format(opt_w, opt_p))
param = {'n_epoch': 450,
'eps': 0.3,
'chaos': 15,
'n_chrom': 20}
algorithms = [greedy.Greedy,
bound_and_branches.BranchAndBound,
dynamic.Dynamic,
genetic.Genetic]
for alg in algorithms:
knapsack = dict(knapsack, **param)
start_time = datetime.now()
alg = alg(knapsack)
alg_opt = alg.solve()
end_time = datetime.now() - start_time
if alg_opt != -1:
alg_opt_w, alg_opt_p = tools.compute_knapsack(knapsack, alg_opt)
print(' **{}** ```exec time {:.2f}s```<br>'.format(alg.name, end_time.total_seconds()))
print(' optimal weight: {}, and profit **{}**<br>'.format(alg_opt_w, alg_opt_p))
import pprint
for key in benchmarks:
knapsack = benchmarks[key]
z = 30
opt_w, opt_p = tools.compute_knapsack(knapsack, knapsack['optimal'])
print('--- \n')
print('\n' + '## Knapsack 0{}'.format(key))
print(' - capacity: *{}*<br>'.format(knapsack['capacity'][0]))
print(' - optimal solution: ```{}```<br>'.format(knapsack['optimal']))
print(' - optimal weight: *{}*, and profit: *{}*<br>'.format(opt_w, opt_p))
param = {'n_epoch': 5000,
'eps': 0.5,
'chaos': 20,
'n_chrom': 15}
algorithms = [brute_force.BruteForce,
greedy.Greedy,
bound_and_branches.BranchAndBound,
dynamic.Dynamic,
genetic.Genetic]
for alg in algorithms:
knapsack = dict(knapsack, **param)
alg = alg(knapsack)
alg_opt = alg.solve()
alg_opt_w, alg_opt_p = tools.compute_knapsack(knapsack, alg_opt)
print(' **{}** optimal solution: ```{}```<br>'.format(alg.name, alg_opt))
print(' optimal weight: {}, and profit **{}**'.format(alg_opt_w, alg_opt_p))
# kdct
len(weights)
genetic_param = {'n_epoch': 500,
'eps': 0.3,
'chaos': 20,
'n_chrom': 15}
algorithms = [brute_force.BruteForce,
greedy.Greedy,
bound_and_branches.BranchAndBound,
dynamic.Dynamic,
genetic.Genetic]
alg_params = [{},
{},
{},
{},
genetic_param]
stat_df = tools.generate_stat(algorithms,
benchmarks,
alg_params,
n_observations=5)
stat = stat_df.groupby(['benchmark', 'algorithm']).agg({'execution':['mean','std'],
'capacity': 'median',
'optim_weight': 'median',
'optim_profit': 'median'})
stat.columns = [ 'execution mean', 'execution std', 'capacity','optim_weight', 'optim_profit']
stat = np.around(stat, 4).reset_index()
stat = stat.set_index(['benchmark'])
print(stat.to_markdown())
```
| github_jupyter |
We ran a Nadaraya-Watson photo-z algorithm from astroML's implementation trained on four photometry bands from DES's science verification data release. This notebook produces a comparison of the photometric redshift estimates reported by DES (described in Bonnett et al. 2015.), our Nadaraya-Watson redshift estimates based on DES's photometry, and the SDSS confirmed spectroscopic redshifts for all SDSS dr7 and dr12 quasars that were imaged in the DES science verification survey. We also indicate the object classification that DES applied to all objects to provide an insight into how the methods perform on point sources vs. extended sources in the DES catalog.
Match summary: <br>
Dr12 quasars matched to DES sva1 gold catalog to obtain DES photometry, then matched to spAll-DR12.fits at https://data.sdss.org/sas/dr12/env/BOSS_SPECTRO_REDUX/ to obtain DCR offsets, then matched to GAIA dr2 to obtain proper motion data.
Dr7 quasars matched to DES sva1 gold catalog to obtain DES photometry, then matched to the DR7 PhotoObj table to obtain DCR offsets, then matched to GAIA dr2 to obtain proper motion data
919 out of 297301 quasars from SDSS DR12 survive the matching process <br>
258 out of 105783 quasars from SDSS DR7 survive the matching process <br>
```
import numpy as np
from astropy.table import Table
from sklearn.model_selection import train_test_split, cross_val_predict
from sklearn.metrics import classification_report
from astroML.linear_model import NadarayaWatson
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import palettable
import richardsplot as rplot
%matplotlib inline
```
For the following code, the same process is repeated once for each of the four DES photo-z methods. The cells are marked with a comment at the top indicating which method each cell applies to. In general, only the ANNZ method cells are commented and should be used as the primary reference point.
```
#ANNZ
#read in data tables that contain des data and sdss dr7 and dr12 quasar data
dr7_annz = Table.read('dr7Q+sva1gold+offset+gaia_annz.fits')
dr12_annz = Table.read('dr12Q+sva1gold+spectro+gaia_annz.fits')
dr7_annz = dr7_annz.filled()
dr12_annz = dr12_annz.filled()
#BPZ
dr7_bpz = Table.read('dr7Q+sva1gold+offset+gaia_bpz.fits')
dr12_bpz = Table.read('dr12Q+sva1gold+spectro+gaia_bpz.fits')
dr7_bpz = dr7_bpz.filled()
dr12_bpz = dr12_bpz.filled()
#SKYNET
dr7_skynet = Table.read('dr7Q+sva1gold+offset+gaia_skynet.fits')
dr12_skynet = Table.read('dr12Q+sva1gold+spectro+gaia_skynet.fits')
dr7_skynet = dr7_skynet.filled()
dr12_skynet = dr12_skynet.filled()
#TPZ
dr7_tpz = Table.read('dr7Q+sva1gold+offset+gaia_tpz.fits')
dr12_tpz = Table.read('dr12Q+sva1gold+spectro+gaia_tpz.fits')
dr7_tpz = dr7_tpz.filled()
dr12_tpz = dr12_tpz.filled()
#ANNZ
#take the photometry bands, the absolute magnitude of proper motion, the DCR offset in u and g bands, the photometric redshift, and the object classification from DES's data for dr7 quasars
X1_annz = np.vstack([ dr7_annz['MAG_AUTO_G'], dr7_annz['MAG_AUTO_R'], dr7_annz['MAG_AUTO_I'], dr7_annz['MAG_AUTO_Z'], dr7_annz['Z_MEAN'], dr7_annz['MODEST_CLASS'] ]).T
#take the sdss dr7 spec-z
y1_annz = np.array(dr7_annz['z_1'])
#repeat the last two lines for dr12 quasars in DES sva1
X2_annz = np.vstack([ dr12_annz['MAG_AUTO_G'], dr12_annz['MAG_AUTO_R'], dr12_annz['MAG_AUTO_I'], dr12_annz['MAG_AUTO_Z'], dr12_annz['Z_MEAN'], dr12_annz['MODEST_CLASS'] ]).T
y2_annz = np.array(dr12_annz['Z_PIPE_1']) #this is the dr12 spec-z
#combine our two sets of quasars together
X_annz = np.concatenate((X1_annz, X2_annz))
y_annz = np.concatenate((y1_annz, y2_annz))
#split our quasars into test and training sets, 4/5 and 1/5 respectively
X_train_annz, X_test_annz, y_train_annz, y_test_annz = train_test_split(X_annz, y_annz, test_size=0.2, random_state=84)
#make some empty arrays to separate the photometry bands into
X_traintrue_annz = np.empty((X_train_annz.shape[0], X_train_annz.shape[1]-2), dtype=float)
X_testtrue_annz = np.empty((X_test_annz.shape[0], X_test_annz.shape[1]-2), dtype=float)
#more empty arrays to hold des photo-z and object class for plotting purposes
DesZs_annz = np.empty((X_test_annz.shape[0], 1), dtype=float)
ModestClass_annz = np.empty((X_test_annz.shape[0], 1), dtype=int)
#loop over the training set to separate out the photometries
for i in range(len(X_train_annz)):
X_traintrue_annz[i] = X_train_annz[i][:4] #just the photometry
#loop over each element in the test set to get photometry, des photo-z and des object class
for i in range(len(X_test_annz)):
X_testtrue_annz[i] = X_test_annz[i][:4] #just the photometry
DesZs_annz[i] = X_test_annz[i][4] #the DES photo-z
ModestClass_annz[i] = X_test_annz[i][5] #the DES object classification
#train our model
model_annz = NadarayaWatson('gaussian', 0.05) #gaussian kernel, width of 0.05
#fit the model to our training set, training on 5 DES photometry bands and sdss spec-z
model_annz.fit(X_traintrue_annz, y_train_annz)
#predict a redshift for all quasars in the test set
pred_annz = model_annz.predict(X_testtrue_annz)
#bpz
#take the photometry bands, the absolute magnitude of proper motion, the DCR offset in u and g bands, the photometric redshift, and the object classification from DES's data for dr7 quasars
X1_bpz = np.vstack([ dr7_bpz['MAG_AUTO_G'], dr7_bpz['MAG_AUTO_R'], dr7_bpz['MAG_AUTO_I'], dr7_bpz['MAG_AUTO_Z'], dr7_bpz['Z_MEAN'], dr7_bpz['MODEST_CLASS'] ]).T
#take the sdss dr7 spec-z
y1_bpz = np.array(dr7_bpz['z_1'])
#repeat the last two lines for dr12 quasars in DES sva1
X2_bpz = np.vstack([ dr12_bpz['MAG_AUTO_G'], dr12_bpz['MAG_AUTO_R'], dr12_bpz['MAG_AUTO_I'], dr12_bpz['MAG_AUTO_Z'], dr12_bpz['Z_MEAN'], dr12_bpz['MODEST_CLASS'] ]).T
y2_bpz = np.array(dr12_bpz['Z_PIPE_1']) #this is the dr12 spec-z
#combine our two sets of quasars together
X_bpz = np.concatenate((X1_bpz, X2_bpz))
y_bpz = np.concatenate((y1_bpz, y2_bpz))
#split our quasars into test and training sets, 4/5 and 1/5 respectively
X_train_bpz, X_test_bpz, y_train_bpz, y_test_bpz = train_test_split(X_bpz, y_bpz, test_size=0.2, random_state=84)
#make some empty arrays to separate the photometry bands into
X_traintrue_bpz = np.empty((X_train_bpz.shape[0], X_train_bpz.shape[1]-2), dtype=float)
X_testtrue_bpz = np.empty((X_test_bpz.shape[0], X_test_bpz.shape[1]-2), dtype=float)
#more empty arrays to hold des photo-z and object class for plotting purposes
DesZs_bpz = np.empty((X_test_bpz.shape[0], 1), dtype=float)
ModestClass_bpz = np.empty((X_test_bpz.shape[0], 1), dtype=int)
#loop over the training set to separate out the photometries
for i in range(len(X_train_bpz)):
X_traintrue_bpz[i] = X_train_bpz[i][:4] #just the photometry
#loop over each element in the test set to get photometry, des photo-z and des object class
for i in range(len(X_test_bpz)):
X_testtrue_bpz[i] = X_test_bpz[i][:4] #just the photometry
DesZs_bpz[i] = X_test_bpz[i][4] #the DES photo-z
ModestClass_bpz[i] = X_test_bpz[i][5] #the DES object classification
#train our model
model_bpz = NadarayaWatson('gaussian', 0.05) #gaussian kernel, width of 0.05
#fit the model to our training set, training on 5 DES photometry bands and sdss spec-z
model_bpz.fit(X_traintrue_bpz, y_train_bpz)
#predict a redshift for all quasars in the test set
pred_bpz = model_bpz.predict(X_testtrue_bpz)
#skynet
#take the photometry bands, the absolute magnitude of proper motion, the DCR offset in u and g bands, the photometric redshift, and the object classification from DES's data for dr7 quasars
X1_skynet = np.vstack([ dr7_skynet['MAG_AUTO_G'], dr7_skynet['MAG_AUTO_R'], dr7_skynet['MAG_AUTO_I'], dr7_skynet['MAG_AUTO_Z'], dr7_skynet['Z_MEAN'], dr7_skynet['MODEST_CLASS'] ]).T
#take the sdss dr7 spec-z
y1_skynet = np.array(dr7_skynet['z_1'])
#repeat the last two lines for dr12 quasars in DES sva1
X2_skynet = np.vstack([ dr12_skynet['MAG_AUTO_G'], dr12_skynet['MAG_AUTO_R'], dr12_skynet['MAG_AUTO_I'], dr12_skynet['MAG_AUTO_Z'], dr12_skynet['Z_MEAN'], dr12_skynet['MODEST_CLASS'] ]).T
y2_skynet = np.array(dr12_skynet['Z_PIPE_1']) #this is the dr12 spec-z
#combine our two sets of quasars together
X_skynet = np.concatenate((X1_skynet, X2_skynet))
y_skynet = np.concatenate((y1_skynet, y2_skynet))
#split our quasars into test and training sets, 4/5 and 1/5 respectively
X_train_skynet, X_test_skynet, y_train_skynet, y_test_skynet = train_test_split(X_skynet, y_skynet, test_size=0.2, random_state=84)
#make some empty arrays to separate the photometry bands into
X_traintrue_skynet = np.empty((X_train_skynet.shape[0], X_train_skynet.shape[1]-2), dtype=float)
X_testtrue_skynet = np.empty((X_test_skynet.shape[0], X_test_skynet.shape[1]-2), dtype=float)
#more empty arrays to hold des photo-z and object class for plotting purposes
DesZs_skynet = np.empty((X_test_skynet.shape[0], 1), dtype=float)
ModestClass_skynet = np.empty((X_test_skynet.shape[0], 1), dtype=int)
#loop over the training set to separate out the photometries
for i in range(len(X_train_skynet)):
X_traintrue_skynet[i] = X_train_skynet[i][:4] #just the photometry
#loop over each element in the test set to get photometry, des photo-z and des object class
for i in range(len(X_test_skynet)):
X_testtrue_skynet[i] = X_test_skynet[i][:4] #just the photometry
DesZs_skynet[i] = X_test_skynet[i][4] #the DES photo-z
ModestClass_skynet[i] = X_test_skynet[i][5] #the DES object classification
#train our model
model_skynet = NadarayaWatson('gaussian', 0.05) #gaussian kernel, width of 0.05
#fit the model to our training set, training on 5 DES photometry bands and sdss spec-z
model_skynet.fit(X_traintrue_skynet, y_train_skynet)
#predict a redshift for all quasars in the test set
pred_skynet = model_skynet.predict(X_testtrue_skynet)
#tpz
#take the photometry bands, the absolute magnitude of proper motion, the DCR offset in u and g bands, the photometric redshift, and the object classification from DES's data for dr7 quasars
X1_tpz = np.vstack([ dr7_tpz['MAG_AUTO_G'], dr7_tpz['MAG_AUTO_R'], dr7_tpz['MAG_AUTO_I'], dr7_tpz['MAG_AUTO_Z'], dr7_tpz['Z_MEAN'], dr7_tpz['MODEST_CLASS'] ]).T
#take the sdss dr7 spec-z
y1_tpz = np.array(dr7_tpz['z_1'])
#repeat the last two lines for dr12 quasars in DES sva1
X2_tpz = np.vstack([ dr12_tpz['MAG_AUTO_G'], dr12_tpz['MAG_AUTO_R'], dr12_tpz['MAG_AUTO_I'], dr12_tpz['MAG_AUTO_Z'], dr12_tpz['Z_MEAN'], dr12_tpz['MODEST_CLASS'] ]).T
y2_tpz = np.array(dr12_tpz['Z_PIPE_1']) #this is the dr12 spec-z
#combine our two sets of quasars together
X_tpz = np.concatenate((X1_tpz, X2_tpz))
y_tpz = np.concatenate((y1_tpz, y2_tpz))
#split our quasars into test and training sets, 4/5 and 1/5 respectively
X_train_tpz, X_test_tpz, y_train_tpz, y_test_tpz = train_test_split(X_tpz, y_tpz, test_size=0.2, random_state=84)
#make some empty arrays to separate the photometry bands into
X_traintrue_tpz = np.empty((X_train_tpz.shape[0], X_train_tpz.shape[1]-2), dtype=float)
X_testtrue_tpz = np.empty((X_test_tpz.shape[0], X_test_tpz.shape[1]-2), dtype=float)
#more empty arrays to hold des photo-z and object class for plotting purposes
DesZs_tpz = np.empty((X_test_tpz.shape[0], 1), dtype=float)
ModestClass_tpz = np.empty((X_test_tpz.shape[0], 1), dtype=int)
#loop over the training set to separate out the photometries
for i in range(len(X_train_tpz)):
X_traintrue_tpz[i] = X_train_tpz[i][:4] #just the photometry
#loop over each element in the test set to get photometry, des photo-z and des object class
for i in range(len(X_test_tpz)):
X_testtrue_tpz[i] = X_test_tpz[i][:4] #just the photometry
DesZs_tpz[i] = X_test_tpz[i][4] #the DES photo-z
ModestClass_tpz[i] = X_test_tpz[i][5] #the DES object classification
#train our model
model_tpz = NadarayaWatson('gaussian', 0.05) #gaussian kernel, width of 0.05
#fit the model to our training set, training on 5 DES photometry bands and sdss spec-z
model_tpz.fit(X_traintrue_tpz, y_train_tpz)
#predict a redshift for all quasars in the test set
pred_tpz = model_tpz.predict(X_testtrue_tpz)
#ANNZ
#create some empty arrays to hold objects based on MODEST_CLASS from DES
stars_annz = np.empty(shape=(0,3))
gals_annz = np.empty(shape=(0,3))
uns_annz = np.empty(shape=(0,3))
#Loop through object classifications and sort objects accordingly, grabbing the Nadaraya-Watson prediction, Des's photo-z
#and the sdss spec-z
for i in range(len(ModestClass_annz)):
if ModestClass_annz[i] == 2:
stars_annz = np.append(stars_annz, [[pred_annz[i], DesZs_annz[i], y_test_annz[i]]], axis = 0)
elif ModestClass_annz[i] == 1:
gals_annz = np.append(gals_annz, [[pred_annz[i], DesZs_annz[i], y_test_annz[i]]], axis = 0)
else:
uns_annz = np.append(uns_annz, [[pred_annz[i], DesZs_annz[i], y_test_annz[i]]], axis = 0)
#BPZ
stars_bpz = np.empty(shape=(0,3))
gals_bpz = np.empty(shape=(0,3))
uns_bpz = np.empty(shape=(0,3))
for i in range(len(ModestClass_bpz)):
if ModestClass_bpz[i] == 2:
stars_bpz = np.append(stars_bpz, [[pred_bpz[i], DesZs_bpz[i], y_test_bpz[i]]], axis = 0)
elif ModestClass_bpz[i] == 1:
gals_bpz = np.append(gals_bpz, [[pred_bpz[i], DesZs_bpz[i], y_test_bpz[i]]], axis = 0)
else:
uns_bpz = np.append(uns_bpz, [[pred_bpz[i], DesZs_bpz[i], y_test_bpz[i]]], axis = 0)
#Skynet
stars_skynet = np.empty(shape=(0,3))
gals_skynet = np.empty(shape=(0,3))
uns_skynet = np.empty(shape=(0,3))
for i in range(len(ModestClass_skynet)):
if ModestClass_skynet[i] == 2:
stars_skynet = np.append(stars_skynet, [[pred_skynet[i], DesZs_skynet[i], y_test_skynet[i]]], axis = 0)
elif ModestClass_skynet[i] == 1:
gals_skynet = np.append(gals_skynet, [[pred_skynet[i], DesZs_skynet[i], y_test_skynet[i]]], axis = 0)
else:
uns_skynet = np.append(uns_skynet, [[pred_skynet[i], DesZs_skynet[i], y_test_skynet[i]]], axis = 0)
#TPZ
stars_tpz = np.empty(shape=(0,3))
gals_tpz = np.empty(shape=(0,3))
uns_tpz = np.empty(shape=(0,3))
for i in range(len(ModestClass_tpz)):
if ModestClass_tpz[i] == 2:
stars_tpz = np.append(stars_tpz, [[pred_tpz[i], DesZs_tpz[i], y_test_tpz[i]]], axis = 0)
elif ModestClass_tpz[i] == 1:
gals_tpz = np.append(gals_tpz, [[pred_tpz[i], DesZs_tpz[i], y_test_tpz[i]]], axis = 0)
else:
uns_tpz = np.append(uns_tpz, [[pred_tpz[i], DesZs_tpz[i], y_test_tpz[i]]], axis = 0)
#plotting with MODEST_CLASS
plt.figure(figsize=(16,16))
plt.subplot(221)
#note that stars_annz.T[0] is our photo-z prediction, stars_annz.T[1] is the DES prediction, and stars_annz.T[2] is the zspec
plt.scatter(stars_annz.T[0], stars_annz.T[2], s=25, facecolor='none', edgecolor='blue')
plt.scatter(stars_annz.T[1], stars_annz.T[2], s=10, c='blue')
#same for gals_annz and uns_annz
plt.scatter(gals_annz.T[0], gals_annz.T[2], s=25, facecolor='none', edgecolor='orange')
plt.scatter(gals_annz.T[1], gals_annz.T[2], s=10, c='orange')
#black points (undetermined objects) carry the legend tags for open circles and closed points
legendhelp1_annz = plt.scatter(uns_annz.T[0], uns_annz.T[2], s=25, facecolor='none', edgecolor='k', label = 'NW photo-z')
legendhelp2_annz = plt.scatter(uns_annz.T[1], uns_annz.T[2], s=10, c='k', label = 'DES ANNZ photo-z')
plt.plot([0,1,2,3,4,5], 'r') #plot a one-to-one line for reference
plt.xlim(0,5)
plt.ylim(0,5)
plt.xlabel('Photo-z Estimation')
plt.ylabel('SDSS z-spec')
plt.title('ANNZ')
#colored patches for the legend
orange_patch = mpatches.Patch(color='orange', label='DES Galaxy')
blue_patch = mpatches.Patch(color='blue', label='DES Star')
black_patch = mpatches.Patch(color='k', label='DES Undetermined')
plt.legend(handles=[blue_patch, orange_patch, black_patch, legendhelp1_annz, legendhelp2_annz], loc=1)
plt.subplot(222)
plt.scatter(stars_bpz.T[0], stars_bpz.T[2], s=25, facecolor='none', edgecolor='blue')
plt.scatter(stars_bpz.T[1], stars_bpz.T[2], s=10, c='blue')
plt.scatter(gals_bpz.T[0], gals_bpz.T[2], s=25, facecolor='none', edgecolor='orange')
plt.scatter(gals_bpz.T[1], gals_bpz.T[2], s=10, c='orange')
legendhelp1_bpz = plt.scatter(uns_bpz.T[0], uns_bpz.T[2], s=25, facecolor='none', edgecolor='k', label = 'NW photo-z')
legendhelp2_bpz = plt.scatter(uns_bpz.T[1], uns_bpz.T[2], s=10, c='k', label = 'DES BPZ photo-z')
plt.plot([0,1,2,3,4,5], 'r')
plt.xlim(0,5)
plt.ylim(0,5)
plt.xlabel('Photo-z Estimation')
plt.ylabel('SDSS z-spec')
plt.title('BPZ')
plt.legend(handles=[blue_patch, orange_patch, black_patch, legendhelp1_bpz, legendhelp2_bpz], loc=1)
plt.subplot(223)
plt.scatter(stars_skynet.T[0], stars_skynet.T[2], s=25, facecolor='none', edgecolor='blue')
plt.scatter(stars_skynet.T[1], stars_skynet.T[2], s=10, c='blue')
plt.scatter(gals_skynet.T[0], gals_skynet.T[2], s=25, facecolor='none', edgecolor='orange')
plt.scatter(gals_skynet.T[1], gals_skynet.T[2], s=10, c='orange')
legendhelp1_skynet = plt.scatter(uns_skynet.T[0], uns_skynet.T[2], s=25, facecolor='none', edgecolor='k', label = 'NW photo-z')
legendhelp2_skynet = plt.scatter(uns_skynet.T[1], uns_skynet.T[2], s=10, c='k', label = 'DES Skynet photo-z')
plt.plot([0,1,2,3,4,5], 'r')
plt.xlim(0,5)
plt.ylim(0,5)
plt.xlabel('Photo-z Estimation')
plt.ylabel('SDSS z-spec')
plt.title('Skynet')
plt.legend(handles=[blue_patch, orange_patch, black_patch, legendhelp1_skynet, legendhelp2_skynet], loc=1)
plt.subplot(224)
plt.scatter(stars_tpz.T[0], stars_tpz.T[2], s=25, facecolor='none', edgecolor='blue')
plt.scatter(stars_tpz.T[1], stars_tpz.T[2], s=10, c='blue')
plt.scatter(gals_tpz.T[0], gals_tpz.T[2], s=25, facecolor='none', edgecolor='orange')
plt.scatter(gals_tpz.T[1], gals_tpz.T[2], s=10, c='orange')
legendhelp1_tpz = plt.scatter(uns_tpz.T[0], uns_tpz.T[2], s=25, facecolor='none', edgecolor='k', label = 'NW photo-z')
legendhelp2_tpz = plt.scatter(uns_tpz.T[1], uns_tpz.T[2], s=10, c='k', label = 'DES TPZ photo-z')
plt.plot([0,1,2,3,4,5], 'r')
plt.xlim(0,5)
plt.ylim(0,5)
plt.xlabel('Photo-z Estimation')
plt.ylabel('SDSS z-spec')
plt.title('TPZ')
plt.legend(handles=[blue_patch, orange_patch, black_patch, legendhelp1_tpz, legendhelp2_tpz], loc=1)
```
w.r.t the above plots: Each panel compares one of DES's photo-z methods to astroML's Nadaraya-Watson implementation trained on the 4 DES photometry bands: g, r, i, and z. Open circles are NW photo-z estimations, dots are DES method photo-z estimations. The colors indicate the DES MODEST_CLASS object classification for each object. Blue points were classified as stars, orange points were classified as galaxies, black points were undetermined objects. Red line indicates the one-to-one line along which a given photo-z estimation would be exactly correct. We find that the DES photo-z methods do poorly for objects that are not dominated by the host galaxy i.e. the quasars that DES classifies as stars. We also find that the NW photo-z method handles low redshift quasars, but has a large scatter for higher redshift objects while still "correct" on average.
| github_jupyter |
```
import kalman
import observation_helpers
reload(observation_helpers)
def ConstructFilter(csvfile, obs_noise, system_noise, start_obs=2000,
stop_obs=2500, dt=.25, dim_u=0):
'''
Construct the Kalman filter instance for a cluster of sensors.
Parameters
----------
csvfile : integer
integer index of sensor in pandas dataframe.
obs_noise : np.array(z_dim, z_dim) (default=None)
Specify the observation noise covariance matrix. Scalar if z_dim=1 (observation dimension)
system_noise : np.array(mu_dim, mu_dim)
Specifies the system (process) covariance matrix mu_dim is the state dimension
start_obs : integer
Starting observation for estimating the initial baseline value
stop_obs : integer
Stopping observation for estimating the initial baseline value
dt : float
Time interval between observations
dim_u : dimension of control input.
Returns
----------
K : Kalman instance.
'''
# Get number of sensors in cluster.
nsensors = observation_helpers.GetNumSensors(csvfile)
# Get total observation vector.
Y = np.array([observation_helpers.GetTimeSeries(csvfile, i_sensor)
for i_sensor in range(1,nsensors+1)])
# Let's estimate the initial baseline using the median data points, excluding NaNs
baselines = np.array([np.median(Y[i_sensor,start_obs:stop_obs][~np.isnan(Y[i_sensor,start_obs:stop_obs])])
for i_sensor in range(0,nsensors)])
# Label and enumerate the state parameters.
state_params = ['D', 'Ddot', 'b'] # These are for each sensor
nparams = len(state_params) # Number of parameters/sensor
state_labels = [] # This will hold the full set of state labels
for i_sensor in range(nsensors):
for param in state_params:
state_labels.append(param + '_%i'%i_sensor)
#---------------------------------------------------
# Construct the transition matrix
A = np.zeros((nsensors*nparams, nsensors*nparams))
# First, just couple a sensor to itself
for i_sensor in range(nsensors):
for i_param, param in enumerate(state_params):
# Setup Newton's equations for each sensor with itself.
if param == 'D':
A[i_sensor*nparams+i_param, i_sensor*nparams+i_param+0] = 1 # Position
A[i_sensor*nparams+i_param, i_sensor*nparams+i_param+1] = dt # Velocity update
A[i_sensor*nparams+i_param+1, i_sensor*nparams+i_param+1] = 1 # Velocity update
if param == 'b':
A[i_sensor*nparams+i_param, i_sensor*nparams+i_param+0] = 1 # Position
# First observation that is not nan
Y0 = np.array([Y[i_sensor, np.argwhere(~np.isnan(Y[i_sensor]))[0]][0] for i_sensor in range(nsensors)])
# Estimate initial state as first observation
mu_0 = []
for i_sensor in range(nsensors):
mu_0 += [-Y0[i_sensor]+baselines[i_sensor], 0., baselines[i_sensor]]
mu_0 = np.array(mu_0)
#-----------------------------------------------
# Estimate for the initial state covariance.
# Assume diagonal, and identical uncertainties.
sigma_0 = np.diag((50, 10, 10)*nsensors)
# Control Model
B = np.zeros((len(mu_0),dim_u))
# Observation Matrix
C = np.zeros((nsensors, len(mu_0)))
for i_sensor in range(nsensors):
C[i_sensor,:] = np.array([0, 0, 0]*i_sensor + [-1, 0, +1] + [0, 0, 0]*(nsensors-i_sensor-1))
# Observation control matrix
D = None
# Process noise.
Q = system_noise
# Observation Noise
R = obs_noise
K = kalman.Kalman(mu_0, sigma_0, A, B, C, D, Q, R, state_labels)
return K
def AugmentFilterDifferentialTracking(K, process_noise, dt=.25):
'''
Expand the matrices in the Kalman filter to accomodate the pairwise displacement observations.
The observation length expands by: n_sensors*(n_sensors-1)/2
The state length expands by: n_sensors*(n_sensors-1) (difference and velocity)
Parameters
----------
K : Kalman filter instance.
process_noise : np.array(2)
Tuple or array specifying the process noise level for each differential and differential velocity.
Should probably be near [2*process_variance(d), 2*process_variance(v)]
dt : float
timestep
Returns
----------
K : Kalman filter instance.
'''
nsensors = K.C.shape[0]
# Number of parameters before expansion
old_params = K.A.shape[0]
# Number of new parameters
new_params = nsensors*(nsensors-1)
new_obs = new_params/2 # Number of new observables
#---------------------------------------------------
# Need to pad A, C, mu, sigma, Q, R
# Pad A along both dimensions
K.A = np.pad(K.A, pad_width=((0, new_params), (0, new_params)), mode='constant', constant_values=0)
# Pad C along the both dimensions, but the first dimension includes only differences not velocities.
K.C = np.pad(K.C, pad_width=((0, new_obs), (0, new_params)), mode='constant', constant_values=0)
# Pad Process Noise
K.Q = np.pad(K.Q, pad_width=((0, new_params), (0, new_params)), mode='constant', constant_values=0)
# Pad observtion noise
K.R = np.pad(K.R, pad_width=((0, new_obs), (0, new_obs)), mode='constant', constant_values=0)
#K.mu_0 = np.pad(K.mu, pad_width=(0, new_params), mode='constant', constant_values=0)
K.mu = np.pad(K.mu, pad_width=(0, new_params), mode='constant', constant_values=0)
K.sigma = np.pad(K.sigma, pad_width=((0, new_params), (0, new_params)), mode='constant', constant_values=0)
# Modify A to incorporate differential tracking.
K.A[old_params:,old_params:] = np.eye(new_params)
for i in range(old_params, old_params+new_params-1, 2):
K.A[i,i+1] = dt
# Modify covariance guess
K.sigma[old_params:,old_params:] = np.diag([50, 10]*(new_params/2))
#K.sigma_0[old_params:,old_params:] = np.diag([50, 10]*new_params/2)
# Modify C. Old number of observables is num sensors
# We just pick out the differential distance as the observable.
for i in range(0, new_obs):
K.C[i+nsensors,:] = np.array([0, 0, 0]*nsensors + [0, 0]*i + [1, 0] + [0, 0]*(new_obs-i-1))
# Modify Noise matrices.
# Covariance of the differences should be sigma_i^2 + sigma_j^2 and diaginal
cov = np.zeros((nsensors,nsensors))
for i in range(nsensors):
for j in range(nsensors):
cov[i,j] = K.R[i,i] + K.R[j,j]
# Pull upper diagonal part in flattened form.
upper = cov[numpy.triu_indices(nsensors, k=1)]
# These values make up the diagonal of the observation noise
K.R[nsensors:, nsensors:] = np.diag(upper)
# For process noise, it is passed in. Assumed to be equal for all pairs
K.Q[old_params:, old_params:] = np.diag((process_noise*(new_params/2)))
return K
def AugmentObservationsDifferentialTracking(Y):
'''
Return the augmented observation vector which includes appended differentials
between observations i,j
Parameters
----------
Y: np.array(n_sensors)
is the input from a single timestep from all sensors.
Returns
----------
diffs : np.array(2*n )
Returns the upper triangular elements of the pairwise displacement vectors.
'''
# Generate a displacement matrix between observations
diffs = np.zeros((Y.shape[0], Y.shape[0]))
for i, yi in enumerate(Y):
diffs[i,:] = yi-Y
# Select out only the above diagonal elements.
return np.append(Y, diffs[numpy.triu_indices(len(Y), k=1)])
def GenImputationMatrix(Y, K)
'''
Given observations at the current time step and a Kalma filter instance,
generate a transition matrix which imputes state from
the other sensors using the differential measurements.
Here we need to check for missing values. If an observation is missing,
then the transition matrix A needs to be modified to impute the missing values.
Differentials and differential velocites should not be imputed, but just maintain the track.
Eventually, A might be some non-stationary matrix which depends on the present system state
or external inputs such as temp/cloud-cover/etc..
Parameters
----------
Y: np.array()
Augmented observation vector for a single timestep.
Returns
----------
A : np.array( dim(mu), dim(mu) )
The modified transition matrix.
'''
# Search Y for NaNs.
# If none are NaN, just return regular transition matrix A
# otherwise, build the imputation matrix
if np.isnan(Y).any() == False:
return K.A
#============================================================
# Filter Parameters
#============================================================
# Define the cluster filename
csvfile = '../output/cluster_0_cleaned.pickle'
#csvfile = '../output/cluster_0_3sensors.pickle'
nsensors = observation_helpers.GetNumSensors(csvfile)
# Observations to use for estimating the baseline and sensor variance
start_obs, stop_obs = 2000, 2500
# Timestep
dt=.25
# System noise needs to be estimated, but for now let's guess.
system_noise = np.diag([1e-3, 1e-3, 1e-3]*nsensors)
system_noise_differential = [1e-3, 1e-3]
# Estimate the observation noise of each sensor from the initial summertime (no-snow) variance
obs_noise = np.array([observation_helpers.EstimateObservationNoise(csvfile,
sensor_number=i_sensor,
start_obs=start_obs,
stop_obs=stop_obs) for i_sensor in range(1,nsensors+1)])
#============================================================
# End Parameters
#============================================================
reload(kalman)
# Load the observation vectors
Y = np.array([observation_helpers.GetTimeSeries(csvfile, i_sensor)
for i_sensor in range(1,nsensors+1)])
nobs = Y.shape[1] # number of observations
# Build the joint Kalman filter
K = ConstructFilter(csvfile, np.diag(obs_noise), system_noise)
K = AugmentFilterDifferentialTracking(K, system_noise_differential, dt)
# np.set_printoptions(linewidth=120)
# print K.A
mu_list = np.zeros((nobs, len(K.mu)))
#Y_augmented = []
for i_obs in range(nobs):
K.predict()
Y_i_augmented = AugmentObservationsDifferentialTracking(Y[:,i_obs])
#Y_augmented.append(Y_i_augmented)
#print Y_i_augmented.
K.update(Y_i_augmented)
# Save the state at each step
mu_list[i_obs] = K.mu
if (i_obs%500)==0:
print '\rForward pass on observation %i of %i'%(i_obs,Y.shape[1]),
print
plt.figure(figsize=(6,8))
for i_sensor in range(nsensors):
for i_param in range(3):
plt.subplot(3,1,i_param+1)
plt.plot(mu_list[:,i_sensor*3+i_param],label='Sensor %i'%(i_sensor+1))
plt.subplot(311)
# Plot the snowdepth
plt.xlabel('Observation')
plt.ylabel('Snowdepth $D$ [mm]')
plt.grid(linestyle='-', alpha=.15)
plt.legend(loc=2, ncol=2, frameon=False, columnspacing=.2, labelspacing=.2)
plt.ylim(-500, 3000)
# -------------------------------
# Plot the velocity parameter
plt.subplot(312)
plt.xlabel('Observation')
plt.ylabel('$dD/dt$ [mm/hr]')
plt.ylim(-10,25)
plt.grid(linestyle='-', alpha=.15)
# -------------------------------
# Plot the baseline
plt.subplot(313)
plt.xlabel('Observation')
plt.ylabel('$b(t)$ [mm]')
plt.ylim(3.5e3,4.5e3)
plt.grid(linestyle='-', alpha=.15)
def GetDifferential(mu_list, nsensors, sensor1, sensor2):
'''Returns the differential and differential velocity for sensor1 relative to sensor2.
Parameters
----------
mu_list : np.array( (n_obs,len(mu)) )
State parameters at each timestep
nsensors : int
Number of sensors in set
sensor1 : int
First sensor number (1 indexed)
sensor2 : int
Second sensor number (1 indexed)
Returns
----------
diff : np.array( len(mu) )
The differential at each timestep in mm
diff_vel : np.array( len(mu) )
The differential velocity at each timestep in mm/hr
'''
if sensor1<1 or sensor2<1 or sensor1>nsensors or sensor2>nsensors:
raise Exception('Invalid sensor index')
# Number of differential parameters
new_params = nsensors*(nsensors-1)
# Need to map the Delta_{i,j} into the state space (i.e. into an upper triangular matrix)
upper = numpy.triu_indices(nsensors, k=1)
lookup = np.zeros((nsensors, nsensors)).astype(np.int32)
# This lookup matrix contains the indices for the delta_{i,j}.
lookup[upper] = np.arange(len(upper[0]))*2 + (mu_list.shape[1]-new_params)
# Now lookup[sensor1-1,sensor2-1]=index in state vector
# Retreive the relevant state time series
if sensor2 > sensor1:
idx = lookup[sensor1-1,sensor2-1]
diff, diff_vel = -mu_list[:,idx], -mu_list[:,idx+1]
elif sensor1 > sensor2:
idx = lookup[sensor2-1,sensor1-1]
diff, diff_vel = mu_list[:,idx], mu_list[:,idx+1]
else: # This means sensor1==sensor2, so return zeros
diff, diff_vel = np.zeros(mu_list.shape[0]), np.zeros(mu_list.shape[0])
return diff, diff_vel
plt.figure(figsize=(6,8))
sensor1 = 1
for i in range(2,11):
diff, diff_vel = GetDifferential(mu_list, nsensors, sensor1, i)
plt.subplot(211)
plt.plot(diff, label=r'$\Delta_{%i,%i}$'%(sensor1,i))
plt.subplot(212)
plt.plot(diff_vel, label=r'$\dot{\Delta}_{%i,%i}$'%(sensor1,i))
plt.subplot(211)
plt.ylabel('$\Delta$ [mm]')
plt.ylim(-1000, 1000)
plt.legend(frameon=False, ncol=3, loc=2)
plt.subplot(212)
plt.ylim(-10,10)
plt.ylabel('$\dot{\Delta}$ [mm/hr]')
plt.legend(frameon=False, ncol=3)
# Some junk to try on smaller sets of sensors...
import pandas as pd
df = pd.read_pickle('../output/cluster_0_cleaned.pickle')
for i in range(4,11):
df = df.drop('snowdepth_%i'%i, 1)
df = df.drop('snowdepth_2', 1)
#df.columns.values[-1] = 'snowdepth_3'
df.columns.values[-1] = 'snowdepth_2'
df.to_pickle('../output/cluster_0_2sensors.pickle')
```
| github_jupyter |
```
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/official/pipelines/metrics_viz_run_compare_kfp.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/official/pipelines/metrics_viz_run_compare_kfp.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
<td>
<a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/ai-platform-samples/raw/master/ai-platform-unified/notebooks/official/pipelines/metrics_viz_run_compare_kfp.ipynb">
Open in Google Cloud Notebooks
</a>
</td>
</table>
# Metrics visualization and run comparison using the KFP SDK
## Overview
In this notebook, you will learn how to use [the Kubeflow Pipelines (KFP) SDK](https://www.kubeflow.org/docs/components/pipelines/) to build [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines) that generate model metrics and metrics visualizations; and how to compare pipeline runs.
### Objective
In this example, you'll learn:
- how to generate ROC curve and confusion matrix visualizations for classification results
- how to write metrics
- how to compare metrics across pipeline runs
### Costs
This tutorial uses billable components of Google Cloud:
* Vertex AI Training
* Cloud Storage
Learn about pricing for [Vertex AI](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage](https://cloud.google.com/storage/pricing), and use the [Pricing
Calculator](https://cloud.google.com/products/calculator/)
to generate a cost estimate based on your projected usage.
### Set up your local development environment
**If you are using Colab or Google Cloud Notebooks**, your environment already meets
all the requirements to run this notebook. You can skip this step.
**Otherwise**, make sure your environment meets this notebook's requirements.
You need the following:
* The Google Cloud SDK
* Git
* Python 3
* virtualenv
* Jupyter notebook running in a virtual environment with Python 3
The Google Cloud guide to [Setting up a Python development
environment](https://cloud.google.com/python/setup) and the [Jupyter
installation guide](https://jupyter.org/install) provide detailed instructions
for meeting these requirements. The following steps provide a condensed set of
instructions:
1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)
1. [Install Python 3.](https://cloud.google.com/python/setup#installing_python)
1. [Install
virtualenv](https://cloud.google.com/python/setup#installing_and_using_virtualenv)
and create a virtual environment that uses Python 3. Activate the virtual environment.
1. To install Jupyter, run `pip install jupyter` on the
command-line in a terminal shell.
1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.
1. Open this notebook in the Jupyter Notebook Dashboard.
### Install additional packages
Install the KFP SDK and the Vertex SDK for Python.
```
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# Google Cloud Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
! pip install {USER_FLAG} kfp google-cloud-aiplatform matplotlib --upgrade
```
### Restart the kernel
After you install the additional packages, you need to restart the notebook kernel so it can find the packages.
```
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
```
Check the version of the package you installed. The KFP SDK version should be >=1.6.
```
!python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))"
```
## Before you begin
This notebook does not require a GPU runtime.
### Set up your Google Cloud project
**The following steps are required, regardless of your notebook environment.**
1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).
1. [Enable the Vertex AI, Cloud Storage, and Compute Engine APIs](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,storage-component.googleapis.com).
1. Follow the "**Configuring your project**" instructions from the Vertex Pipelines documentation.
1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).
1. Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
#### Set your project ID
**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
```
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
```
Otherwise, set your project ID here.
```
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "python-docs-samples-tests" # @param {type:"string"}
```
#### Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
```
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
```
### Authenticate your Google Cloud account
**If you are using Google Cloud Notebooks**, your environment is already
authenticated. Skip this step.
**If you are using Colab**, run the cell below and follow the instructions
when prompted to authenticate your account via oAuth.
**Otherwise**, follow these steps:
1. In the Cloud Console, go to the [**Create service account key**
page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).
2. Click **Create service account**.
3. In the **Service account name** field, enter a name, and
click **Create**.
4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "Vertex AI"
into the filter box, and select
**Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
5. Click *Create*. A JSON file that contains your key downloads to your
local environment.
6. Enter the path to your service account key as the
`GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell.
```
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# If on Google Cloud Notebooks, then don't execute this code
if not IS_GOOGLE_CLOUD_NOTEBOOK:
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
```
### Create a Cloud Storage bucket as necessary
You will need a Cloud Storage bucket for this example. If you don't have one that you want to use, you can make one now.
Set the name of your Cloud Storage bucket below. It must be unique across all
Cloud Storage buckets.
You may also change the `REGION` variable, which is used for operations
throughout the rest of this notebook. Make sure to [choose a region where Vertex AI services are
available](https://cloud.google.com/vertex-ai/docs/general/locations#available_regions). You may
not use a Multi-Regional Storage bucket for training with Vertex AI.
```
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
```
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
```
! gsutil mb -l $REGION $BUCKET_NAME
```
Finally, validate access to your Cloud Storage bucket by examining its contents:
```
! gsutil ls -al $BUCKET_NAME
```
### Import libraries and define constants
Define some constants.
```
PATH=%env PATH
%env PATH={PATH}:/home/jupyter/.local/bin
USER = "your-user-name" # <---CHANGE THIS
PIPELINE_ROOT = "{}/pipeline_root/{}".format(BUCKET_NAME, USER)
PIPELINE_ROOT
```
Do some imports:
```
from google.cloud import aiplatform
from kfp import dsl
from kfp.v2 import compiler
from kfp.v2.dsl import ClassificationMetrics, Metrics, Output, component
from kfp.v2.google.client import AIPlatformClient
```
### Initialize the Vertex SDK for Python
```
aiplatform.init(project=PROJECT_ID)
```
## Define a pipeline
In this section, you define a pipeline that demonstrates some of the metrics logging and visualization features.
The example pipeline has three steps.
First define three pipeline *components*, then define a pipeline that uses them.
### Define Pipeline components
In this section, you define some Python function-based components that use scikit-learn to train some classifiers and produce evaluations that can be visualized.
Note the use of the `@component()` decorator in the definitions below. You can optionally set a list of packages for the component to install; the base image to use (the default is a Python 3.7 image); and the name of a component YAML file to generate, so that the component definition can be shared and reused.
The first component shows how to visualize an *ROC curve*.
Note that the function definition includes an output called `wmetrics`, of type `Output[ClassificationMetrics]`. You can visualize the metrics in the Pipelines user interface in the Cloud Console.
To do this, this example uses the artifact's `log_roc_curve()` method. This method takes as input arrays with the false positive rates, true positive rates, and thresholds, as [generated by the `sklearn.metrics.roc_curve` function](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_curve.html).
When you evaluate the cell below, a task factory function called `wine_classification` is created, that is used to construct the pipeline definition. In addition, a component YAML file is created, which can be shared and loaded via file or URL to create the same task factory function.
```
@component(
packages_to_install=["sklearn"],
base_image="python:3.9",
output_component_file="wine_classif_component.yaml",
)
def wine_classification(wmetrics: Output[ClassificationMetrics]):
from sklearn.datasets import load_wine
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_curve
from sklearn.model_selection import cross_val_predict, train_test_split
X, y = load_wine(return_X_y=True)
# Binary classification problem for label 1.
y = y == 1
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
rfc = RandomForestClassifier(n_estimators=10, random_state=42)
rfc.fit(X_train, y_train)
y_scores = cross_val_predict(rfc, X_train, y_train, cv=3, method="predict_proba")
fpr, tpr, thresholds = roc_curve(
y_true=y_train, y_score=y_scores[:, 1], pos_label=True
)
wmetrics.log_roc_curve(fpr, tpr, thresholds)
```
The second component shows how to visualize a *confusion matrix*, in this case for a model trained using `SGDClassifier`.
As with the previous component, you create a `metricsc` output artifact of type `Output[ClassificationMetrics]`. Then, use the artifact's `log_confusion_matrix` method to visualize the confusion matrix results, as generated by the [sklearn.metrics.confusion_matrix](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html) function.
```
@component(packages_to_install=["sklearn"], base_image="python:3.9")
def iris_sgdclassifier(
test_samples_fraction: float,
metricsc: Output[ClassificationMetrics],
):
from sklearn import datasets, model_selection
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
iris_dataset = datasets.load_iris()
train_x, test_x, train_y, test_y = model_selection.train_test_split(
iris_dataset["data"],
iris_dataset["target"],
test_size=test_samples_fraction,
)
classifier = SGDClassifier()
classifier.fit(train_x, train_y)
predictions = model_selection.cross_val_predict(classifier, train_x, train_y, cv=3)
metricsc.log_confusion_matrix(
["Setosa", "Versicolour", "Virginica"],
confusion_matrix(
train_y, predictions
).tolist(), # .tolist() to convert np array to list.
)
```
The following component also uses the "iris" dataset, but trains a `LogisticRegression` model. It logs model `accuracy` in the `metrics` output artifact.
```
@component(
packages_to_install=["sklearn"],
base_image="python:3.9",
)
def iris_logregression(
input_seed: int,
split_count: int,
metrics: Output[Metrics],
):
from sklearn import datasets, model_selection
from sklearn.linear_model import LogisticRegression
# Load digits dataset
iris = datasets.load_iris()
# # Create feature matrix
X = iris.data
# Create target vector
y = iris.target
# test size
test_size = 0.20
# cross-validation settings
kfold = model_selection.KFold(
n_splits=split_count, random_state=input_seed, shuffle=True
)
# Model instance
model = LogisticRegression()
scoring = "accuracy"
results = model_selection.cross_val_score(model, X, y, cv=kfold, scoring=scoring)
print(f"results: {results}")
# split data
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=test_size, random_state=input_seed
)
# fit model
model.fit(X_train, y_train)
# accuracy on test set
result = model.score(X_test, y_test)
print(f"result: {result}")
metrics.log_metric("accuracy", (result * 100.0))
```
### Define a pipeline that uses the components
Next, define a simple pipeline that uses the components that were created in the previous section.
```
@dsl.pipeline(
# Default pipeline root. You can override it when submitting the pipeline.
pipeline_root=PIPELINE_ROOT,
# A name for the pipeline.
name="metrics-pipeline-v2",
)
def pipeline(seed: int, splits: int):
wine_classification_op = wine_classification() # noqa: F841
iris_logregression_op = iris_logregression( # noqa: F841
input_seed=seed, split_count=splits
)
iris_sgdclassifier_op = iris_sgdclassifier(test_samples_fraction=0.3) # noqa: F841
```
## Compile and run the pipeline
Now, you're ready to compile the pipeline:
```
from kfp.v2 import compiler # noqa: F811
compiler.Compiler().compile(
pipeline_func=pipeline, package_path="metrics_pipeline_job.json"
)
```
The pipeline compilation generates the `metrics_pipeline_job.json` job spec file.
Next, instantiate an API client object:
```
from kfp.v2.google.client import AIPlatformClient # noqa: F811
api_client = AIPlatformClient(
project_id=PROJECT_ID,
region=REGION,
)
```
Then, you run the defined pipeline like this:
```
response = api_client.create_run_from_job_spec(
job_spec_path="metrics_pipeline_job.json",
job_id=f"metrics-pipeline-v2{TIMESTAMP}-1",
# pipeline_root=PIPELINE_ROOT # this argument is necessary if you did not specify PIPELINE_ROOT as part of the pipeline definition.
parameter_values={"seed": 7, "splits": 10},
)
```
Click on the generated link to see your run in the Cloud Console.
## Pipeline run comparisons
Next, generate another pipeline run that uses a different `seed` and `split` for the `iris_logregression` step.
Submit the new pipeline run:
## Comparing pipeline runs in the UI
Next, generate another pipeline run that uses a different `seed` and `split` for the `iris_logregression` step.
Submit the new pipeline run:
```
response = api_client.create_run_from_job_spec(
job_spec_path="metrics_pipeline_job.json",
job_id=f"metrics-pipeline-v2{TIMESTAMP}-2",
# pipeline_root=PIPELINE_ROOT # this argument is necessary if you did not specify PIPELINE_ROOT as part of the pipeline definition.
parameter_values={"seed": 5, "splits": 7},
)
```
When both pipeline runs have finished, compare their results by navigating to the pipeline runs list in the Cloud Console, selecting both of them, and clicking **COMPARE** at the top of the Console panel.
## Comparing the parameters and metrics of pipeline runs from their tracked metadata
In this section, you use the Vertex SDK for Python to compare the parameters and metrics of the pipeline runs. Wait until the pipeline runs have finished to run this section.
### Extract metrics and parameters into a pandas dataframe for run comparison
Ingest the metadata for all runs of pipelines named `metrics-pipeline-v2` into a pandas dataframe.
```
pipeline_df = aiplatform.get_pipeline_df(pipeline="metrics-pipeline-v2")
pipeline_df
```
### Parallel coordinates plot of parameters and metrics
With the metric and parameters in a dataframe, you can perform further analysis to exetract useful information. The following example compares data from each run using a parallel coordinate plot.
```
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
plt.rcParams["figure.figsize"] = [15, 5]
pipeline_df["param.input:seed"] = pipeline_df["param.input:seed"].astype(np.float16)
pipeline_df["param.input:splits"] = pipeline_df["param.input:splits"].astype(np.float16)
ax = pd.plotting.parallel_coordinates(
pipeline_df.reset_index(level=0),
"run_name",
cols=["param.input:seed", "param.input:splits", "metric.accuracy"],
# color=['blue', 'green', 'pink', 'red'],
)
ax.set_yscale("symlog")
ax.legend(bbox_to_anchor=(1.0, 0.5))
```
### Plot ROC curve and calculate AUC number
In addition to basic metrics, you can extract complex metrics and perform further analysis using the `get_pipeline_df` method.
```
pipeline_df = aiplatform.get_pipeline_df(pipeline="metrics-pipeline-v2")
pipeline_df
df = pd.DataFrame(pipeline_df["metric.confidenceMetrics"][0])
auc = np.trapz(df["recall"], df["falsePositiveRate"])
plt.plot(df["falsePositiveRate"], df["recall"], label="auc=" + str(auc))
plt.legend(loc=4)
plt.show()
```
## Cleaning up
To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud
project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
- delete Cloud Storage objects that were created. Uncomment and run the command in the cell below **only if you are not using the `PIPELINE_ROOT` path for any other purpose**.
```
# Warning: this command will delete ALL Cloud Storage objects under the PIPELINE_ROOT path.
# ! gsutil -m rm -r $PIPELINE_ROOT
```
| github_jupyter |
### GluonTS Callbacks
This notebook illustrates how one can control the training with GluonTS Callback's. A callback is a function which gets called at one or more specific hook points during training.
You can use predefined GluonTS callbacks like the logging callback TrainingHistory, ModelAveraging or TerminateOnNaN, or you can implement your own callback.
#### 1. Using a single Callback
```
# fetching some data
from gluonts.dataset.repository.datasets import get_dataset
dataset = "m4_hourly"
dataset = get_dataset(dataset)
prediction_length = dataset.metadata.prediction_length
freq = dataset.metadata.freq
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.mx.trainer import Trainer
from gluonts.mx.trainer.callback import TrainingHistory
# defining a callback, which will log the training loss for each epoch
history = TrainingHistory()
trainer=Trainer(epochs=20, callbacks=history)
estimator = SimpleFeedForwardEstimator(prediction_length=prediction_length, freq = freq, trainer=trainer)
predictor = estimator.train(dataset.train, num_workers=None)
# print the training loss over the epochs
print(history.loss_history)
# in case you are using a validation dataset you can get the validation loss with
# history.validation_loss_history
```
#### 2. Using multiple Callbacks
To continue the training from a given predictor you can use the WarmStart Callback. When you want to use more than one callback, provide them as a list:
```
from gluonts.mx.trainer.callback import WarmStart
warm_start = WarmStart(predictor=predictor)
trainer=Trainer(epochs=10, callbacks=[history, warm_start])
estimator = SimpleFeedForwardEstimator(prediction_length=prediction_length, freq = freq, trainer=trainer)
predictor = estimator.train(dataset.train, num_workers=None)
print(history.loss_history) # The training loss history of all 20+10 epochs we trained the model
```
#### 3. Default Callbacks
In addition to the Callbacks you specify, the GluonTS Trainer uses the two default Callbacks ModelAveraging and LearningRateReduction. You can turn them off by setting add_default_callbacks=False when initializing the Trainer.
```
trainer=Trainer(epochs=20, callbacks=history) # use the TrainingHistory Callback and the default callbacks.
trainer=Trainer(epochs=20, callbacks=history, add_default_callbacks=False) # use only the TrainingHistory Callback
trainer=Trainer(epochs=20, add_default_callbacks=False) # use no callback at all
```
#### 4. Custom Callbacks
To implement your own Callback you can write a class which inherits from the GluonTS Callback class and overwrite one or more of the hooks.
```
# Have a look at the abstract Callback class, the hooks take different arguments which you can use.
# Hook methods with boolean return value stop the training if False is returned.
from gluonts.mx.trainer.callback import Callback
import inspect
lines = inspect.getsource(Callback)
print(lines)
# Here is an example implementation of a Metric value based early stopping custom callback implementation
# it only implements the hook method "on_epoch_end()"
# which gets called after all batches of one epoch have been processed
from gluonts.evaluation import Evaluator
from gluonts.dataset.common import Dataset
from gluonts.mx.model.predictor import GluonPredictor
from mxnet.gluon import nn
from mxnet import gluon
import numpy as np
import mxnet as mx
from gluonts.support.util import copy_parameters
class MetricInferenceEarlyStopping(Callback):
"""
Early Stopping mechanism based on the prediction network.
Can be used to base the Early Stopping directly on a metric of interest, instead of on the training/validation loss.
In the same way as test datasets are used during model evaluation,
the time series of the validation_dataset can overlap with the train dataset time series,
except for a prediction_length part at the end of each time series.
Parameters
----------
validation_dataset
An out-of-sample dataset which is used to monitor metrics
predictor
A gluon predictor, with a prediction network that matches the training network
evaluator
The Evaluator used to calculate the validation metrics.
metric
The metric on which to base the early stopping on.
patience
Number of epochs to train on given the metric did not improve more than min_delta.
min_delta
Minimum change in the monitored metric counting as an improvement
verbose
Controls, if the validation metric is printed after each epoch.
minimize_metric
The metric objective.
restore_best_network
Controls, if the best model, as assessed by the validation metrics is restored after training.
num_samples
The amount of samples drawn to calculate the inference metrics.
"""
def __init__(
self,
validation_dataset: Dataset,
predictor: GluonPredictor,
evaluator: Evaluator = Evaluator(num_workers=None),
metric: str = "MSE",
patience: int = 10,
min_delta: float = 0.0,
verbose: bool = True,
minimize_metric: bool = True,
restore_best_network: bool = True,
num_samples: int = 100,
):
assert (
patience >= 0
), "EarlyStopping Callback patience needs to be >= 0"
assert (
min_delta >= 0
), "EarlyStopping Callback min_delta needs to be >= 0.0"
assert (
num_samples >= 1
), "EarlyStopping Callback num_samples needs to be >= 1"
self.validation_dataset = list(validation_dataset)
self.predictor = predictor
self.evaluator = evaluator
self.metric = metric
self.patience = patience
self.min_delta = min_delta
self.verbose = verbose
self.restore_best_network = restore_best_network
self.num_samples = num_samples
if minimize_metric:
self.best_metric_value = np.inf
self.is_better = np.less
else:
self.best_metric_value = -np.inf
self.is_better = np.greater
self.validation_metric_history: List[float] = []
self.best_network = None
self.n_stale_epochs = 0
def on_epoch_end(
self,
epoch_no: int,
epoch_loss: float,
training_network: nn.HybridBlock,
trainer: gluon.Trainer,
best_epoch_info: dict,
ctx: mx.Context
) -> bool:
should_continue = True
copy_parameters(training_network, self.predictor.prediction_net)
from gluonts.evaluation.backtest import make_evaluation_predictions
forecast_it, ts_it = make_evaluation_predictions(
dataset=self.validation_dataset,
predictor=self.predictor,
num_samples=self.num_samples,
)
agg_metrics, item_metrics = self.evaluator(ts_it, forecast_it)
current_metric_value = agg_metrics[self.metric]
self.validation_metric_history.append(current_metric_value)
if self.verbose:
print(
f"Validation metric {self.metric}: {current_metric_value}, best: {self.best_metric_value}"
)
if self.is_better(current_metric_value, self.best_metric_value):
self.best_metric_value = current_metric_value
if self.restore_best_network:
training_network.save_parameters("best_network.params")
self.n_stale_epochs = 0
else:
self.n_stale_epochs += 1
if self.n_stale_epochs == self.patience:
should_continue = False
print(
f"EarlyStopping callback initiated stop of training at epoch {epoch_no}."
)
if self.restore_best_network:
print(
f"Restoring best network from epoch {epoch_no - self.patience}."
)
training_network.load_parameters("best_network.params")
return should_continue
# use the custom callback
from gluonts.dataset.repository.datasets import get_dataset
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.mx.trainer import Trainer
dataset = "m4_hourly"
dataset = get_dataset(dataset)
prediction_length = dataset.metadata.prediction_length
freq = dataset.metadata.freq
estimator = SimpleFeedForwardEstimator(prediction_length=prediction_length, freq = freq)
training_network = estimator.create_training_network()
transformation = estimator.create_transformation()
predictor = estimator.create_predictor(transformation=transformation, trained_network=training_network)
es_callback = MetricInferenceEarlyStopping(validation_dataset=dataset.test, predictor=predictor, metric="MSE")
trainer = Trainer(epochs=200, callbacks=es_callback)
estimator.trainer = trainer
pred = estimator.train(dataset.train)
```
| github_jupyter |
```
import math
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
class Encoder(nn.Module):
def __init__(self, seq_len, input_size, enc_hid_dim, num_gru, dec_hid_dim,
dropout_rate, device, use_pooling=False):
super().__init__()
self.seq_len = seq_len
self.input_size = input_size
self.num_gru = num_gru
self.enc_hid_dim = enc_hid_dim
self.dec_hid_dim = dec_hid_dim
self.dropout_rate = dropout_rate
self.device = device
self.use_pooling = use_pooling
self.rnn_stack = nn.ModuleList()
self.batch_norm_stack = nn.ModuleList()
for i in range(num_gru):
_input_size = input_size if i == 0 else enc_hid_dim
self.rnn_stack.append(self.biGru(input_size=_input_size,
hidden_size=enc_hid_dim, dropout_rate=dropout_rate))
for i in range(num_gru):
self.batch_norm_stack.append(
nn.BatchNorm1d(num_features=seq_len*enc_hid_dim).to(self.device))
self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim)
self.pool = nn.MaxPool2d(kernel_size=(2,1), stride=(2,1))
def forward(self, input):
for gru, batch_norm in zip(self.rnn_stack, self.batch_norm_stack):
output, h_n = self.layerBlock(gru, batch_norm, input)
input = output
init_hidden_decoder = self.fc(torch.cat((h_n[-2, :, : ], h_n[-1, :, : ]),
dim=1))
return output, h_n , init_hidden_decoder
def biGru(self, input_size, hidden_size, dropout_rate):
return nn.GRU(input_size=input_size, hidden_size=hidden_size, bias=True,
bidirectional=True, batch_first=True, dropout=dropout_rate)
def layerBlock(self, gru, batch_norm, input):
# input = [batch_size, seq_len, input_size]
output, h_n = gru(input)
# output = [batch_size, seq_len, enc_hid_dim * num_directions]
# h_n = [num_layers * num_directions, batch_size, enc_hid_dim]
output = output.contiguous().view(output.size(0), output.size(1), 2, -1)
output = output.sum(2)
output = output.view(output.size(0), output.size(1), -1)
b, s, h = output.size()
output = output.view(b, -1)
output = batch_norm(output)
output = output.view(b, s, h)
"""
#batch_norm = nn.BatchNorm1d(num_features=output.size(2)).to(self.device)
# batch_norm input -> (N,C,L), where C is num_features.
#output = batch_norm(output.permute(0, 2, 1)).permute(0, 2, 1)
# first permute to match batch_norm input convention
# then second permute to contruct original shape.
# output = [batch_size, seq_len, enc_hid_dim * num_directions]
"""
output = F.leaky_relu(output)
if self.use_pooling:
raise NotImplementedError('Implement pooling option for first 3 layer.')
"""
reminder = output.size(0) % h_n.size(0)
h_n = h_n.repeat(math.floor(output.size(0) / h_n.size(0)), 1, 1)
if not reminder == 0:
zeros = torch.zeros(output.size(0) % h_n.size(0), h_n.size(1), h_n.size(2))
h_n = torch.cat((h_n, zeros), dim=0)
merge_output = torch.cat((output, h_n), dim=2)
merge_output = merge_output.permute(1, 0, 2)
merge_output = merge_output.unsqueeze(1)
merge_output = pool(merge_output)
merge_output = merge_output.squeeze(1)
"""
return output, h_n
```
| github_jupyter |
```
import pandas as pd
star_wars = pd.read_csv("star_wars.csv", encoding = 'ISO-8859-1')
star_wars.head(10)
star_wars.columns
# Removing NaN rows of RespondentIDs
print(star_wars.shape)
star_wars = star_wars[pd.notnull(star_wars['RespondentID'])]
print(star_wars.shape)
yes_no = {
"Yes" : True,
"No" : False,
}
cols = ['Have you seen any of the 6 films in the Star Wars franchise?', 'Do you consider yourself to be a fan of the Star Wars film franchise?']
for col in cols:
star_wars[col] = star_wars[col].map(yes_no)
star_wars.head()
seen_movies = star_wars.columns[3:9]
print(seen_movies)
import numpy as np
movie_mapping = {
"Star Wars: Episode I The Phantom Menace": True,
np.nan: False,
"Star Wars: Episode II Attack of the Clones": True,
"Star Wars: Episode III Revenge of the Sith": True,
"Star Wars: Episode IV A New Hope": True,
"Star Wars: Episode V The Empire Strikes Back": True,
"Star Wars: Episode VI Return of the Jedi": True
}
for col in star_wars.columns[3:9]:
star_wars[col] = star_wars[col].map(movie_mapping)
star_wars = star_wars.rename(columns={
"Which of the following Star Wars films have you seen? Please select all that apply.": "seen_1",
"Unnamed: 4": "seen_2",
"Unnamed: 5": "seen_3",
"Unnamed: 6": "seen_4",
"Unnamed: 7": "seen_5",
"Unnamed: 8": "seen_6"
})
star_wars.head()
star_wars = star_wars.rename(columns={
"Please rank the Star Wars films in order of preference with 1 being your favorite film in the franchise and 6 being your least favorite film.": "ranking_1",
"Unnamed: 10": "ranking_2",
"Unnamed: 11": "ranking_3",
"Unnamed: 12": "ranking_4",
"Unnamed: 13": "ranking_5",
"Unnamed: 14": "ranking_6"
})
star_wars.head()
star_wars[star_wars.columns[9:15]] = star_wars[star_wars.columns[9:15]].astype(float)
ranking_films = star_wars[star_wars.columns[9:15]].mean()
%matplotlib inline
ranking_films.plot.bar()
```
# Ranking
So far, we have cleaned the data, column names. It should be noted that the films in 1, 2, 3 are relatively modern movies, where as movies in 4, 5, 6 columns are older generation movies in 77, 80 and 83 respectively. As expected, these ranked higher than the newer movies.
# The most viewd movie in the StarWars Series is
```
most_seen_films = star_wars[star_wars.columns[3:9]].sum()
print(most_seen_films)
most_seen_films.plot.bar()
```
# View counts
It looks like the older movies in the series is watched more than the newer movies of the series. This is consistent with what we have observed in the rankings
# Does Males and Females like different sets of movies in the StarWars series?
```
males = star_wars[star_wars['Gender'] == 'Male']
females = star_wars[star_wars['Gender'] == 'Female']
print(males.shape[0], females.shape[0])
```
Interesting: I expected number of males to be higher than the Females
### Most liked and seen movie in the Series by Males
```
males_ranking_films = males[males.columns[9:15]].mean()
males_ranking_films.plot.bar()
males_seen_films = males[males.columns[3:9]].mean()
males_seen_films.plot.bar()
```
### Most liked and seen movie in the Series by Females
```
females_ranking_films = females[females.columns[9:15]].mean()
females_ranking_films.plot.bar()
females_seen_films = females[females.columns[3:9]].mean()
females_seen_films.plot.bar()
```
**Observations:**
- Both Male and Female viewers show the overall trend. Both sets of viewers like and watched older movies in the series more the newer movies
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
# Tutorial #2: Deploy an image classification model in Azure Container Instance (ACI)
This tutorial is **part two of a two-part tutorial series**. In the [previous tutorial](img-classification-part1-training.ipynb), you trained machine learning models and then registered a model in your workspace on the cloud.
Now, you're ready to deploy the model as a web service in [Azure Container Instances](https://docs.microsoft.com/azure/container-instances/) (ACI). A web service is an image, in this case a Docker image, that encapsulates the scoring logic and the model itself.
In this part of the tutorial, you use Azure Machine Learning service (Preview) to:
> * Set up your testing environment
> * Retrieve the model from your workspace
> * Test the model locally
> * Deploy the model to ACI
> * Test the deployed model
ACI is a great solution for testing and understanding the workflow. For scalable production deployments, consider using Azure Kubernetes Service. For more information, see [how to deploy and where](https://docs.microsoft.com/azure/machine-learning/service/how-to-deploy-and-where).
## Prerequisites
Complete the model training in the [Tutorial #1: Train an image classification model with Azure Machine Learning](train-models.ipynb) notebook.
```
# If you did NOT complete the tutorial, you can instead run this cell
# This will register a model and download the data needed for this tutorial
# These prerequisites are created in the training tutorial
# Feel free to skip this cell if you completed the training tutorial
# register a model
from azureml.core import Workspace
ws = Workspace.from_config()
from azureml.core.model import Model
model_name = "sklearn_mnist"
model = Model.register(model_path="sklearn_mnist_model.pkl",
model_name=model_name,
tags={"data": "mnist", "model": "classification"},
description="Mnist handwriting recognition",
workspace=ws)
from azureml.core.environment import Environment
from azureml.core.conda_dependencies import CondaDependencies
# to install required packages
env = Environment('tutorial-env')
cd = CondaDependencies.create(pip_packages=['azureml-dataprep[pandas,fuse]>=1.1.14', 'azureml-defaults'], conda_packages = ['scikit-learn==0.22.1'])
env.python.conda_dependencies = cd
# Register environment to re-use later
env.register(workspace = ws)
```
## Set up the environment
Start by setting up a testing environment.
### Import packages
Import the Python packages needed for this tutorial.
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import azureml.core
# display the core SDK version number
print("Azure ML SDK Version: ", azureml.core.VERSION)
```
## Deploy as web service
Deploy the model as a web service hosted in ACI.
To build the correct environment for ACI, provide the following:
* A scoring script to show how to use the model
* A configuration file to build the ACI
* The model you trained before
### Create scoring script
Create the scoring script, called score.py, used by the web service call to show how to use the model.
You must include two required functions into the scoring script:
* The `init()` function, which typically loads the model into a global object. This function is run only once when the Docker container is started.
* The `run(input_data)` function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported.
```
%%writefile score.py
import json
import numpy as np
import os
import pickle
import joblib
def init():
global model
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# For multiple models, it points to the folder containing all deployed models (./azureml-models)
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'sklearn_mnist_model.pkl')
model = joblib.load(model_path)
def run(raw_data):
data = np.array(json.loads(raw_data)['data'])
# make prediction
y_hat = model.predict(data)
# you can return any data type as long as it is JSON-serializable
return y_hat.tolist()
```
### Create configuration file
Create a deployment configuration file and specify the number of CPUs and gigabyte of RAM needed for your ACI container. While it depends on your model, the default of 1 core and 1 gigabyte of RAM is usually sufficient for many models. If you feel you need more later, you would have to recreate the image and redeploy the service.
```
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
memory_gb=1,
tags={"data": "MNIST", "method" : "sklearn"},
description='Predict MNIST with sklearn')
```
### Deploy in ACI
Estimated time to complete: **about 2-5 minutes**
Configure the image and deploy. The following code goes through these steps:
1. Create environment object containing dependencies needed by the model using the environment file (`myenv.yml`)
1. Create inference configuration necessary to deploy the model as a web service using:
* The scoring file (`score.py`)
* envrionment object created in previous step
1. Deploy the model to the ACI container.
1. Get the web service HTTP endpoint.
```
%%time
import uuid
from azureml.core.webservice import Webservice
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
from azureml.core import Workspace
from azureml.core.model import Model
ws = Workspace.from_config()
model = Model(ws, 'sklearn_mnist')
myenv = Environment.get(workspace=ws, name="tutorial-env", version="1")
inference_config = InferenceConfig(entry_script="score.py", environment=myenv)
service_name = 'sklearn-mnist-svc-' + str(uuid.uuid4())[:4]
service = Model.deploy(workspace=ws,
name=service_name,
models=[model],
inference_config=inference_config,
deployment_config=aciconfig)
service.wait_for_deployment(show_output=True)
```
Get the scoring web service's HTTP endpoint, which accepts REST client calls. This endpoint can be shared with anyone who wants to test the web service or integrate it into an application.
```
print(service.scoring_uri)
```
## Test the model
### Download test data
Download the test data to the **./data/** directory
```
import os
from azureml.core import Dataset
from azureml.opendatasets import MNIST
data_folder = os.path.join(os.getcwd(), 'data')
os.makedirs(data_folder, exist_ok=True)
mnist_file_dataset = MNIST.get_file_dataset()
mnist_file_dataset.download(data_folder, overwrite=True)
```
### Load test data
Load the test data from the **./data/** directory created during the training tutorial.
```
from utils import load_data
import os
import glob
data_folder = os.path.join(os.getcwd(), 'data')
# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster
X_test = load_data(glob.glob(os.path.join(data_folder,"**/t10k-images-idx3-ubyte.gz"), recursive=True)[0], False) / 255.0
y_test = load_data(glob.glob(os.path.join(data_folder,"**/t10k-labels-idx1-ubyte.gz"), recursive=True)[0], True).reshape(-1)
```
### Predict test data
Feed the test dataset to the model to get predictions.
The following code goes through these steps:
1. Send the data as a JSON array to the web service hosted in ACI.
1. Use the SDK's `run` API to invoke the service. You can also make raw calls using any HTTP tool such as curl.
```
import json
test = json.dumps({"data": X_test.tolist()})
test = bytes(test, encoding='utf8')
y_hat = service.run(input_data=test)
```
### Examine the confusion matrix
Generate a confusion matrix to see how many samples from the test set are classified correctly. Notice the mis-classified value for the incorrect predictions.
```
from sklearn.metrics import confusion_matrix
conf_mx = confusion_matrix(y_test, y_hat)
print(conf_mx)
print('Overall accuracy:', np.average(y_hat == y_test))
```
Use `matplotlib` to display the confusion matrix as a graph. In this graph, the X axis represents the actual values, and the Y axis represents the predicted values. The color in each grid represents the error rate. The lighter the color, the higher the error rate is. For example, many 5's are mis-classified as 3's. Hence you see a bright grid at (5,3).
```
# normalize the diagonal cells so that they don't overpower the rest of the cells when visualized
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(111)
cax = ax.matshow(norm_conf_mx, cmap=plt.cm.bone)
ticks = np.arange(0, 10, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(ticks)
ax.set_yticklabels(ticks)
fig.colorbar(cax)
plt.ylabel('true labels', fontsize=14)
plt.xlabel('predicted values', fontsize=14)
plt.savefig('conf.png')
plt.show()
```
## Show predictions
Test the deployed model with a random sample of 30 images from the test data.
1. Print the returned predictions and plot them along with the input images. Red font and inverse image (white on black) is used to highlight the misclassified samples.
Since the model accuracy is high, you might have to run the following code a few times before you can see a misclassified sample.
```
import json
# find 30 random samples from test set
n = 30
sample_indices = np.random.permutation(X_test.shape[0])[0:n]
test_samples = json.dumps({"data": X_test[sample_indices].tolist()})
test_samples = bytes(test_samples, encoding='utf8')
# predict using the deployed model
result = service.run(input_data=test_samples)
# compare actual value vs. the predicted values:
i = 0
plt.figure(figsize = (20, 1))
for s in sample_indices:
plt.subplot(1, n, i + 1)
plt.axhline('')
plt.axvline('')
# use different color for misclassified sample
font_color = 'red' if y_test[s] != result[i] else 'black'
clr_map = plt.cm.gray if y_test[s] != result[i] else plt.cm.Greys
plt.text(x=10, y =-10, s=result[i], fontsize=18, color=font_color)
plt.imshow(X_test[s].reshape(28, 28), cmap=clr_map)
i = i + 1
plt.show()
```
You can also send raw HTTP request to test the web service.
```
import requests
# send a random row from the test set to score
random_index = np.random.randint(0, len(X_test)-1)
input_data = "{\"data\": [" + str(list(X_test[random_index])) + "]}"
headers = {'Content-Type':'application/json'}
# for AKS deployment you'd need to the service key in the header as well
# api_key = service.get_key()
# headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
resp = requests.post(service.scoring_uri, input_data, headers=headers)
print("POST to url", service.scoring_uri)
#print("input data:", input_data)
print("label:", y_test[random_index])
print("prediction:", resp.text)
```
## Clean up resources
To keep the resource group and workspace for other tutorials and exploration, you can delete only the ACI deployment using this API call:
```
service.delete()
```
If you're not going to use what you've created here, delete the resources you just created with this quickstart so you don't incur any charges. In the Azure portal, select and delete your resource group. You can also keep the resource group, but delete a single workspace by displaying the workspace properties and selecting the Delete button.
## Next steps
In this Azure Machine Learning tutorial, you used Python to:
> * Set up your testing environment
> * Retrieve the model from your workspace
> * Test the model locally
> * Deploy the model to ACI
> * Test the deployed model
You can also try out the [regression tutorial](regression-part1-data-prep.ipynb).

| github_jupyter |
# Training on CIFAR-10 data
## Define the model
We now define the `DeepNN` model and several functions that load and prepare the data. Finally, we arrive at the function `setup_and_train`, that defines, trains and evaluates the model, and takes the following parameters as input:
`activation` : Defines activations: CELU, ELU, GELU, LeakyReLU, Maxout, ReLU, ReLU6, RReLU, SELU, Sigmoid, Softplus
Swish, Tanh, APL, Mixture, PAU, PReLU, SLAF, AReLU
`epochs` : Number of epochs to train
`depth` : Depth of NN
`width` : Width of NN
`lr` : Learning rate
```
######################################################################
# Import and set manual seed
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import activations
torch.manual_seed(0)
########################################################################
# Download and define the training set.
batchsize = 128
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='/home/densechen/dataset', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batchsize,
shuffle=True, num_workers=2)
device = "cuda:0"
########################################################################
#Define input-output Jacobian
def get_jacobian(model, x):
nc = x.size()[0]
ny = x.size()[2]
nx = x.size()[1]
noutputs = 10
x = x.reshape(nc*nx*ny)
x = x.repeat(noutputs,1)
x.requires_grad_(True)
y = model(x.reshape(noutputs,nc,nx,ny))
y.backward(torch.eye(noutputs).to(device))
return x.grad.data
########################################################################
# Define activation with ELSA
class ELSA(nn.Module):
def __init__(self, activation: str = "ReLU", with_elsa: bool=False, **kwargs):
super().__init__()
self.activation = activations.__class_dict__[activation](**kwargs)
self.with_elsa = with_elsa
if self.with_elsa:
self.alpha = nn.Parameter(torch.tensor([kwargs.get("alpha", 0.90)]))
self.beta = nn.Parameter(torch.tensor([kwargs.get("beta", 2.0)]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.with_elsa:
alpha = torch.clamp(self.alpha, min=0.01, max=0.99)
beta = torch.sigmoid(self.beta)
return self.activation(x) + torch.where(x > 0, x * self.beta, x * self.alpha)
else:
return self.activation(x)
########################################################################
# Define fully connected network with different activation functions.
class DeepNN(nn.Module):
def __init__(self, width: int, depth: int, activation: str="ReLU", with_elsa: bool = False, **kwargs):
"""
:param width: the width of linear layer.
:param depth: the depth of linear layer.
:param activation: the activation used in this layer.
:param with_elsa: whether with elsa or not.
:param kwargs: the arguments delivered to activation functions.
"""
super(DeepNN, self).__init__()
self.linear_input = nn.Linear(3*32*32, width)
self.activation_input = ELSA(activation, with_elsa=with_elsa, **kwargs)
self.linear_layers = nn.ModuleList([nn.Sequential(nn.Linear(width, width),
ELSA(activation, with_elsa=with_elsa, **kwargs))
for i in range(depth)])
self.linear_output = nn.Linear(width, 10)
def forward(self, x):
x = x.view(-1, 3*32*32)
x = self.activation_input(self.linear_input(x))
for layer in self.linear_layers:
x = layer(x)
x = self.linear_output(x)
return x
def setup_and_train(
epochs: int=10, lr: float=1e-3, width: int=256, depth: int=2,
activation: str = "ReLU", plt_jacobian: bool = True, **kwargs):
def train(with_elsa: bool = False):
######################################################################
# Model setup
model = DeepNN(width, depth, activation, with_elsa=with_elsa, **kwargs)
model.to(device);
######################################################################
# Define criterion and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr = lr)
######################################################################
# Train the model
model.train()
for epoch in range(epochs): # loop over the dataset multiple times
epoch_start_time = time.time()
running_loss = 0.0
log_interval = 100
for batch, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data[0].to(device), data[1].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
cur_loss = running_loss / (batch+1)
print('| end of epoch {:3d} | time / epoch {:5.2f}s | loss {:5.2f}'.format
(epoch+1, (time.time() - epoch_start_time),cur_loss))
running_loss = 0.
return model
def gather(model):
d_collected = list()
u_collected = list()
for i in range(100):
src = torch.randn(3, 32, 32).to(device)
J = get_jacobian(model,src)
v, d, u = torch.svd(J.to('cpu'))
d_collected.append(d.numpy().tolist())
u_collected.append(u.numpy().tolist())
d_ = np.asarray(d_collected).flatten()
y = np.log(d_)/np.log(10)
return y
with PdfPages(f"pictures/{activation}.pdf") as pdf:
# train without elsa
print("train model without ELSA")
print("---"*10)
model = train(with_elsa=False)
y = gather(model)
fig, ax = plt.subplots()
opacity=.7
plt.ylim((0,4))
plt.xlim((-4,2))
ax.hist(y, bins = 10, alpha = opacity, label = activation + " w/o ELSA",
density = True)
# train with elsa
print("\ntrain model with ELSA")
print("---"*10)
model = train(with_elsa=True)
y = gather(model)
ax.hist(y, bins = 10, alpha = opacity, label = activation + " w/ ELSA",
density = True)
ax.legend(loc='upper right')
ax.set_xlabel(r'$log(\lambda_{io}$)')
pdf.savefig()
plt.show()
```
## CELU
```
setup_and_train(activation="CELU")
```
## ELU
```
setup_and_train(activation="ELU")
```
## GELU
```
setup_and_train(activation="GELU")
```
## LeakyReLu
```
setup_and_train(activation="LeakyReLU")
```
## Maxout
```
setup_and_train(activation="Maxout")
```
## ReLU
```
setup_and_train(activation="ReLU")
```
## ReLU6
```
setup_and_train(activation="ReLU6")
```
## RReLU
```
setup_and_train(activation="RReLU")
```
## SELU
```
setup_and_train(activation="SELU")
```
## Sigmoid
```
setup_and_train(activation="Sigmoid")
```
## Softplus
```
setup_and_train(activation="Softplus")
```
## Swish
```
setup_and_train(activation="Swish")
```
## Tanh
```
setup_and_train(activation="Tanh")
```
## APL
```
setup_and_train(activation="APL")
```
## Mixture
```
setup_and_train(activation="Mixture")
```
## PAU
```
setup_and_train(activation="PAU")
```
## PReLU
```
setup_and_train(activation="PReLU")
```
## SLAF
```
setup_and_train(activation="SLAF")
```
| github_jupyter |
### OkCupid DataSet: Classify using combination of text data and metadata
### Meeting 5, 03- 03- 2020
### Recap last meeting's decisions:
<ol>
<p>Meeting 4, 28- 01- 2020</p>
<li> Approach 1: </li>
<ul>
<li>Merge classs 1, 3 and 5</li>
<li>Under sample class 6 </li>
<li> Merge classes 6, 7, 8</li>
</ul>
<li> Approach 2:</li>
<ul>
<li>Merge classs 1, 3 and 5 as class 1</li>
<li> Merge classes 6, 7, 8 as class 8</li>
<li>Under sample class 8 </li>
</ul>
<li> collect metadata: </li>
<ul>
<li> Number of misspelled </li>
<li> Number of unique words </li>
<li> Avg no wordlength </li>
</ul>
</ol>
## Education level summary
<ol>
<p></p>
<img src="rep2_image/count_diag.JPG">
</ol>
<ol>
<p></p>
<img src="rep2_image/count_table.JPG">
</ol>
## Logistic regression after removing minority classes and undersampling
<ol>
<p></p>
<img src="rep2_image/log1.JPG">
</ol>
## Merge levels:
- Merge classs 1, 3 and 5 as class 1
- Merge classes 6, 7, 8 as class 8
- weight classes while classifying using Logistic regression
<ol>
<p></p>
<img src="rep2_image/count_table2.JPG">
</ol>
<ol>
<p></p>
### Logistic regression with undersampling
<img src="rep2_image/log_undersampling.JPG">
</ol>
<ol>
<p></p>
### Logistic regression with weighting
<img src="rep2_image/log_weight.JPG">
</ol>
### Add metadata to the dataset
```
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.feature_extraction.text import TfidfVectorizer
import seaborn as sns
from sklearn.metrics import classification_report
from sklearn.preprocessing import StandardScaler
from sklearn import preprocessing
from sklearn.preprocessing import FunctionTransformer
from sklearn.pipeline import Pipeline
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import FeatureUnion
from collections import Counter
from sklearn.naive_bayes import MultinomialNB
import numpy as np
import itertools
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import resample
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0])
, range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
df = pd.read_csv (r'../../../data/processed/stylo_cupid2.csv')
df.columns
# import readability
# from tqdm._tqdm_notebook import tqdm_notebook
# tqdm_notebook.pandas()
# def text_readability(text):
# results = readability.getmeasures(text, lang='en')
# return results['readability grades']['FleschReadingEase']
# df['readability'] = df.progress_apply(lambda x:text_readability(x['text']), axis=1)
df.head()
# Read metadata dataset to dataframe
# df = pd.read_csv (r'../../../data/processed/stylo_cupid2.csv')
df['sex'].mask(df['sex'].isin(['m']) , 0.0, inplace=True)
df['sex'].mask(df['sex'].isin(['f']) , 1.0, inplace=True)
# print(df['sex'].value_counts())
df['isced'].mask(df['isced'].isin([3.0, 5.0]) , 1.0, inplace=True)
df['isced'].mask(df['isced'].isin([6.0, 7.0]) , 8.0, inplace=True)
# # Separate majority and minority classes
# df_majority = df[df.isced==8.0]
# df_minority = df[df.isced==1.0]
# # Downsample majority class
# df_majority_downsampled = resample(df_majority,
# replace=False, # sample without replacement
# n_samples=10985, # to match minority class
# random_state=123) # reproducible results
# # Combine minority class with downsampled majority class
# df = pd.concat([df_majority_downsampled, df_minority])
print(sorted(Counter(df['isced']).items()))
df = df.dropna(subset=['clean_text', 'isced'])
corpus = df[['clean_text', 'count_char','count_word', '#anwps', 'count_punct', 'avg_wordlength', 'count_misspelled', 'word_uniqueness', 'age', 'sex']]
target = df["isced"]
# vectorization
X_train, X_val, y_train, y_val = train_test_split(corpus, target, train_size=0.75, stratify=target,
test_size=0.25, random_state = 0)
get_text_data = FunctionTransformer(lambda x: x['clean_text'], validate=False)
get_numeric_data = FunctionTransformer(lambda x: x[['count_char','count_word', '#anwps', 'count_punct', 'avg_wordlength', 'count_misspelled', 'word_uniqueness', 'age', 'sex']], validate=False)
# Solver = lbfgs
# merge vectorized text data and scaled numeric data
process_and_join_features = Pipeline([
('features', FeatureUnion([
('numeric_features', Pipeline([
('selector', get_numeric_data),
('scaler', preprocessing.StandardScaler())
])),
('text_features', Pipeline([
('selector', get_text_data),
('vec', CountVectorizer(binary=False, ngram_range=(1, 2), lowercase=True))
]))
])),
('clf', LogisticRegression(random_state=0,max_iter=1000, solver='lbfgs', penalty='l2', class_weight='balanced'))
])
# merge vectorized text data and scaled numeric data
process_and_join_features.fit(X_train, y_train)
predictions = process_and_join_features.predict(X_val)
print("Final Accuracy for Logistic: %s"% accuracy_score(y_val, predictions))
cm = confusion_matrix(y_val,predictions)
plt.figure()
plot_confusion_matrix(cm, classes=[0,1], normalize=False,
title='Confusion Matrix')
print(classification_report(y_val, predictions))
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
# scores = cross_val_score(process_and_join_features, X_train, y_train, cv=5)
# print(scores)
# print(scores.mean())
process_and_join_features.fit(X_train, y_train)
y_pred = cross_val_predict(process_and_join_features, corpus, target, cv=5)
conf_mat = confusion_matrix(target, y_pred)
print(conf_mat)
from sklearn.model_selection import cross_val_score, cross_val_predict
scores = cross_val_score(process_and_join_features, corpus, target, cv=5)
print(scores)
print(scores.mean())
from sklearn.model_selection import cross_val_score, cross_val_predict
scores = cross_val_score(process_and_join_features, corpus, target, cv=5)
print(scores)
print(scores.mean())
# merge vectorized text data and scaled numeric data
process_and_join_features = Pipeline([
('features', FeatureUnion([
('numeric_features', Pipeline([
('selector', get_numeric_data),
('scaler', preprocessing.StandardScaler())
])),
('text_features', Pipeline([
('selector', get_text_data),
('vec', CountVectorizer(binary=False, ngram_range=(1, 2), lowercase=True))
]))
])),
('clf', LogisticRegression(random_state=0,max_iter=5000, solver='sag', penalty='l2', class_weight='balanced'))
])
#
process_and_join_features.fit(X_train, y_train)
predictions = process_and_join_features.predict(X_val)
print("Final Accuracy for Logistic: %s"% accuracy_score(y_val, predictions))
cm = confusion_matrix(y_val,predictions)
plt.figure()
plot_confusion_matrix(cm, classes=[0,1], normalize=False,
title='Confusion Matrix')
print(classification_report(y_val, predictions))
# merge vectorized text data and scaled numeric data
process_and_join_features = Pipeline([
('features', FeatureUnion([
('numeric_features', Pipeline([
('selector', get_numeric_data),
('scaler', preprocessing.StandardScaler())
])),
('text_features', Pipeline([
('selector', get_text_data),
('vec', CountVectorizer(binary=False, ngram_range=(1, 2), lowercase=True))
]))
])),
('clf', LogisticRegression(n_jobs=-1, random_state=0,max_iter=3000, solver='saga', penalty='l2', class_weight='balanced'))
])
#
process_and_join_features.fit(X_train, y_train)
predictions = process_and_join_features.predict(X_val)
print("Final Accuracy for Logistic: %s"% accuracy_score(y_val, predictions))
cm = confusion_matrix(y_val,predictions)
plt.figure()
plot_confusion_matrix(cm, classes=[0,1], normalize=False,
title='Confusion Matrix')
print(classification_report(y_val, predictions))
```
| github_jupyter |
# Model Selection, Overfitting and Regularization
This tutorial is meant to be a gentle introduction to machine learning concepts. We present a simple polynomial fitting example using a least squares solution, which is a specific case of what is called maximum likelihood, but we will not get into details about this probabilistic view of least squares in this tutorial. We use this example to introduce important machine learning concepts using plain language that should be accessible to undergradiuate and graduate students with a minimum background of calculus.
The goals of this tutorial are:
- Explain how to develop an experiment. Split your data into development set (*i.e.*, train and validaion sets) and test set.
- Introduce how to select your model.
- Introduce the concepts of *over-fitting*, *under-fitting*, and *model generalization*.
- Introduce the concept of *regularization* for reducing *over-fitting*.
This tutorial is interactive and it corresponds to an adaptation of the example presented in chapter 1 of the book: **Christopher M. Bishop. 2006. Pattern Recognition and Machine Learning (Information Science and Statistics). Springer-Verlag New York, Inc., Secaucus, NJ, USA.**
## Designing your experiment
Machine learning builds models by learning from data. When designing your experiment, you need to split your data into a development set and a test set. The development set is split into 2 sets: a train set and a validation set. The train set is used to learn the parameters of the different models you are fititng (training). The validation set is employed to select hopefully what is the best model among the different models you trained, therefore it has a bias and cannot be used as proof of generalization. The test set is used to see if the selected model generalizes well to unseen data.
<img src="../Figures/train_val_test.png" alt="Drawing" style="width: 500px;"/>
## Generating synthetic data
```
# Directive to make plots inline as opposed to having pop-up plots
%matplotlib inline
import numpy as np # Import numpy with nickname np
import matplotlib.pylab as plt # plotting library
from ipywidgets import * # Interaction library
var = 0.2 #Noise variance
#Create data set
N = 25
x = np.linspace(0, 1, N)
y_noiseless = np.sin(2*np.pi*x) # signal
y = y_noiseless + np.random.normal(0, var, N) #signal + noise -> real measurements always come with noise
# Plot entire data set with and without noise
plt.figure()
plt.plot(x,y_noiseless,linewidth = 2.0,label = r'Data without noise: $sin(2 \pi x)$')
plt.scatter(x,y,color ='red', marker = 'x', label = r'Data with noise')
plt.legend(loc = (0.02, 0.18))
plt.xlabel("x")
plt.ylabel("y")
plt.show()
```
## Splitting the data into train, validation, and test sets
```
# Splitting the data in train/validation/test sets - size of each set was choosen arbitrarily
train_size = 10
val_size = 5
test_size = 10
indexes = np.arange(N, dtype =int)
np.random.seed(seed = 2) # Random seed to keep results always the same
np.random.shuffle(indexes) # Shuffling the data before the split
# Train set
aux = indexes[:train_size]
aux = np.sort(aux)
x_train = x[aux]
y_train = y[aux]
#Validation set
aux = indexes[train_size: train_size + val_size]
aux = np.sort(aux)
x_val= x[aux]
y_val = y[aux]
# Test set
aux = indexes[-test_size:]
aux = np.sort(aux)
x_test = x[aux]
y_test = y[aux]
# Plot train/val/test sets
plt.figure()
plt.plot(x,y_noiseless,linewidth = 2.0,label = r'Model no noise: $sin(2 \pi x)$')
plt.scatter(x_train,y_train,color ='red', marker = 'x', label = "Train set")
plt.scatter(x_val,y_val,color = 'blue',marker = '^' , label = "Validation set")
plt.scatter(x_test,y_test,color = 'green', marker = 'o', label = "Test set")
plt.legend(loc = (0.02, 0.18))
plt.xlabel("x")
plt.ylabel("y")
plt.show()
```
## Data
Observations: $$\boldsymbol{X} =[x_1,x_2,...,x_N]^T$$
Target: $$\boldsymbol{T} =[t_1,t_2,...,t_N]^T$$
Estimates: $$\boldsymbol{Y} =[y_1,y_2,...,y_N]^T$$
## Polynomial Model
$$y(x,\boldsymbol{W})= w_0 + w_1x +w_2x^2+...+w_mx^m = \sum^M_{j=0}w_jx^j$$
Weights (*i.e.*, what our model learns): $$\boldsymbol{W} =[t_1,t_2,...,t_M]^T$$
## Cost Function
Quadratic cost function: $$E(\boldsymbol{W})=\frac{1}{2}\sum_{n=1}^N\{y(x_n,\boldsymbol{W})-t_n\}^2$$
Computing the derivative of the cost function and making it equal to zero, we can find the vector **W*** that minimizes the error:
$$ \boldsymbol{W}^* = (\boldsymbol{A}^T\boldsymbol{A})^{-1}\boldsymbol{A} ^T\boldsymbol{T}$$
Where **A** is defined by:
$$\boldsymbol{A} = \begin{bmatrix}
1 & x_{1} & x_{1}^2 & \dots & x_{1}^M \\
1 & x_{2} & x_{2}^2 & \dots & x_{2}^M \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
1 & x_{N} & x_{N}^2 & \dots & x_{N}^M
\end{bmatrix}$$
```
#Least squares polynomial fitting solution
# Implementation of the equation shown above
def polynomial_fit(X,T,M):
A = np.power(X.reshape(-1,1),np.arange(0,M+1).reshape(1,-1))
T = T.reshape(-1,1)
W = np.dot(np.linalg.pinv(A),T)
return W.ravel()
```
Plotting the least squares result varying the polynomial degree between 0 a 9. **Which model is a good model?** Look at the plots but also the magnitude of the weights resulting from each polynomial fit.
```
def plotmodel(M):
coefs = polynomial_fit(x_train, y_train, M)[::-1]
print("Weights:\n", coefs)
p = np.poly1d(coefs)
plt.figure()
plt.plot(x,y_noiseless,linewidth = 1.5,label = r'Data no noise: $sin(2 \pi x)$')
plt.scatter(x_train,y_train,color='red',label= "Train set")
plt.xlabel("x")
plt.ylabel(r'y')
y_fit = p(x_train)
plt.plot(x_train,y_fit,linewidth = 1.0,label ="Polynomial Fit")
plt.legend(loc=(0.02,0.02))
plt.show()
interact(plotmodel,M=(0,9,1))
```
Depending on the degree, M, of the polynomial we fit to our data, our model falls under one of these categories:
- **Under-fitting**: the model is too inflexible and is not able to capture any patterns in the data.
- **Over-fitting**: the model is too flexible. It ends up tuning to the random noise in the data. The model may have a low error in the train set, but it is not expected to generalize well to new (unseen) data.
- **Good fit**: The model is able to capture patterns in our data, but it does not get tuned to the random noise in the data. Better chances to generalize to new (unseen) data.
A good exercise is to visually determine whether the model is under-fitting, over-fitting or it is a good model based on the polynomial degree in the interactive plot shown above.
## Root mean squared error and Model Selection
Root mean squared error is an error measure commonly emplyed in regression problems.
$$E_{RMS}=\sqrt{2E(\boldsymbol{W^*})/N}$$
We will analyze the root mean squared error in the validation set to select our model.
```
# Computes RMS error
def rms_error(X,T,W):
p = np.poly1d(W)
T_fit = p(X)
E = np.sqrt(((T - T_fit)**2/T.size).sum())
return E
m = range(10)
train = []
val = []
# Compute RMS error across different polynomial fits
for M in m:
W = polynomial_fit(x_train, y_train, M)[::-1]
error_train = rms_error(x_train,y_train,W)
error_val = rms_error(x_val,y_val,W)
train.append(error_train)
val.append(error_val)
# Plot the errors
plt.figure()
plt.plot(m,train,linewidth = 2.0,marker = 'o',markersize = 12,label = r'$E_{RMS}$ Train')
plt.plot(m,val,linewidth = 2.0,marker = 'x',markersize = 12,label = r'$E_{RMS}$ Validation')
plt.legend(loc = (0.02, 0.05))
plt.xlabel("Polynomial degree")
plt.ylabel(r'$E_{RMS}$')
plt.show()
# Model selection - the model with the lowest error in the validation set is selected. Then, the model
# generalizability is assessed on the test set.
best_M = np.argmin(val)
W = polynomial_fit(x_train, y_train, best_M)[::-1]
test_error = rms_error(x_test,y_test,W)
print("Model selected was a polynomial of degree %d" %best_M)
print("Root mean squared test error: %.3f" %test_error)
```
## Cost function with regularization
Regularization is a technique to avoid overfitting. Do you remember how the values of the estimated weights increased quickly for polynomial fits with high degrees in the example without regularization? That was the model tuning itself to the noise in the data. Regularization consists in adding a penalty term to the cost function. Let's add a quadratic penalty to the weights we are trying to estimate. The quadratic penalty is called **L2 regularization**.
$$E(\boldsymbol{W})=\frac{1}{2}\sum_{n=1}^N\{y(x_n,\boldsymbol{W})-t_n\}^2 +\frac{\lambda}{2}||\boldsymbol{W}||^2$$
The above equation also has a well-defined minimum point. Computing its derivative and making it equal to zero, the solution of the equation is given by:
$$\boldsymbol{W}^* = (\boldsymbol{A}^T\boldsymbol{A} + \lambda n\boldsymbol{I})^{-1}\boldsymbol{A} ^T\boldsymbol{T} $$
Note that our problem now has two hyper-parameters that we need to set. The polynomial degree (M) and the regularization factor ($\lambda$). Hyper-parameters are set by the user (*e.g.*, M and $\lambda$), while parameters are learned by the model (*e.g.*, the weights).
```
#Least square solution with regularization
def polynomial_fit_reg(X,T,M,lamb):
N = X.shape[0]
A = np.power(X.reshape(-1,1),np.arange(0,M+1).reshape(1,-1))
lambda_matrix = lamb*N*np.eye(M+1)
T = T.reshape(-1,1)
aux = np.dot(A.T,A) + lambda_matrix
aux = np.linalg.pinv(aux)
aux2 = np.dot(A.T,T)
W = np.dot(aux,aux2)
return W.ravel()
```
In the demo below, we show the influence of $log(\lambda)$ and $M$ in the polynomial fitting. Note the influence of $\lambda$ in the estimated weights.
```
def plotmodel2(M,log_lamb):
lamb = np.exp(log_lamb)
coefs = polynomial_fit_reg(x_train, y_train, M,lamb)[::-1]
print("Weights:\n",coefs)
print("Lambda\n", lamb)
p = np.poly1d(coefs)
plt.figure()
plt.plot(x,y_noiseless,linewidth = 1.5,label = r'Data no noise: $sin(2 \pi x)$')
plt.scatter(x_train,y_train,color='red',label= "Train set")
plt.xlabel("x")
plt.ylabel(r'y')
y_fit = p(x_train)
plt.plot(x_train,y_fit,linewidth = 1.0,label ="Polynomial Fit")
plt.legend(loc=(0.02,0.02))
plt.show()
interact(plotmodel2,M=(0,9,1),log_lamb = (-40,-9,.1))
```
When we fit our model to the training data, we do a grid search through different polynomial degrees (M) and different regularization values ($\lambda$) to search for the model with lowest error in the validation set, which again is the model we select. An alternative to the grid search is to perform a random search for the best set of model hyper-maraters.
```
log_lamb = range(-40,-8) # regularization values
M = range(7,10) # different polynomial degrees
train = np.zeros((len(log_lamb), len(M)))
val = np.zeros((len(log_lamb), len(M)))
for (i,m) in enumerate(M):
for (j,l) in enumerate(log_lamb):
lamb = np.exp(l)
coeffs = polynomial_fit_reg(x_train, y_train, m,lamb)[::-1]
train[j,i] = rms_error(x_train,y_train,coeffs)
val[j,i] = rms_error(x_val,y_val,coeffs)
plt.figure(figsize = (24,22), dpi = 300)
for (i,m) in enumerate(M):
plt.subplot(2, 2, i + 1)
plt.plot(log_lamb,train[:,i],linewidth = 1.0,marker = 'o',markersize = 12,label = r'$E_{RMS}$ Train')
plt.plot(log_lamb,val[:,i],linewidth = 1.0,marker = 'x',markersize = 12,label = r'$E_{RMS}$ Validation')
plt.legend(loc = (0.02, 0.075))
plt.xlabel(r'$ln\lambda$')
plt.ylabel(r'$E_{RMS}$')
plt.title("Polynomial degree %d" %m)
plt.show()
# Model selection
best_M_reg = np.unravel_index(val.argmin(), val.shape)
W = polynomial_fit_reg(x_train, y_train, M[best_M_reg[1]], np.exp(log_lamb[best_M_reg[0]]))[::-1]
test_error = rms_error(x_test,y_test,W)
print("Model selected was a polynome of degree %d with lambda = %e" %(M[best_M_reg[1]], np.exp(log_lamb[best_M_reg[0]])))
print("Root mean squared test error: %.3f" %test_error)
```
## Summary
That is all folks. In this tutorial, we presented a gentle introduction to model selection, over-fitting and regularization. We saw how to design our experiment by splitting our dataset into a development set (train + validation sets) and a test set. This method is commonly employed when we have very large datasets that may take days to train. For smaller datasets, a procedure called [cross-validation](https://en.wikipedia.org/wiki/Cross-validation_(statistics)#:~:text=Cross%2Dvalidation%2C%20sometimes%20called%20rotation,to%20an%20independent%20data%20set.) is often employed. We also saw that polynomials with high degrees tended to overfit to the data and by adding a regularization term to the cost function, over-fitting can be potentially mitigated. Another way to avoid over-fitting is by collecting more data (see activity suggestions), which is not always feasible.
The concepts explained in this tutorial are valid not just for polynomial fits, but also across diffrent machine learning models like neural networks and support vector machines.
## Activity suggestions
- Use more data for training your model;
- Change the input signal;
- Change the noise intensity;
| github_jupyter |
## Анализ результатов AB тестирования
* проанализировать АБ тест, проведенный на реальных пользователях Яндекса
* подтвердить или опровергнуть наличие изменений в пользовательском поведении между контрольной (control) и тестовой (exp) группами
* определить характер этих изменений и практическую значимость вводимого изменения
* понять, какая из пользовательских групп более всего проигрывает / выигрывает от тестируемого изменения (локализовать изменение)
### Задание 1
Основная метрика, на которой мы сосредоточимся в этой работе, — это количество пользовательских кликов на web-странице в зависимости от тестируемого изменения этой страницы.
Посчитайте, насколько в группе exp больше пользовательских кликов по сравнению с группой control в процентах от числа кликов в контрольной группе.
Полученный процент округлите до третьего знака после точки.
```
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from statsmodels.sandbox.stats.multicomp import multipletests
data = pd.read_csv('ab_browser_test.csv')
data.shape
data.head()
control = sum(data.loc[(data['slot'] == 'control')].n_clicks)
exp = sum(data.loc[(data['slot'] == 'exp')].n_clicks)*100 /control
exp-100
```
### Задание 2
Давайте попробуем посмотреть более внимательно на разницу между двумя группами (control и exp) относительно количества пользовательских кликов.
Для этого постройте с помощью бутстрепа 95% доверительный интервал для средних значений и медиан количества кликов в каждой из двух групп.
```
def get_bootstrap_samples(data, n_samples):
indices = np.random.randint(0, len(data), (n_samples, len(data)))
samples = data[indices]
return samples
def stat_intervals(stat, alpha):
boundaries = np.percentile(stat, [100 * alpha / 2., 100 * (1 - alpha / 2.)])
return boundaries
data.loc[data.slot == 'exp'].n_clicks
```
### Задание 3
Поскольку данных достаточно много (порядка полумиллиона уникальных пользователей), отличие в несколько процентов может быть не только практически значимым, но и значимым статистически. Последнее утверждение нуждается в дополнительной проверке.
Посмотрите на выданные вам данные и выберите все верные варианты ответа относительно проверки гипотезы о равенстве среднего количества кликов в группах.
```
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.hist(data.loc[data.slot == 'exp'].n_clicks, bins=100, edgecolor='k')
plt.axvline(x=(data.loc[data.slot == 'exp'].n_clicks).mean(), ymin=0, ymax=175000, c='r')
plt.xlim(0, 175)
plt.ylabel('Number of users')
plt.xlabel('Number of clicks')
plt.title('Experimental group')
plt.subplot(122)
plt.hist(data.loc[data.slot == 'control'].n_clicks, bins=100, edgecolor='k')
plt.axvline(x=(data.loc[data.slot == 'control'].n_clicks).mean(), ymin=0, ymax=175000, c='r')
plt.xlim(0, 175)
plt.ylabel('Number of users')
plt.xlabel('Number of clicks')
plt.title('Control group')
plt.show()
```
### Задание 4
Одним из возможных аналогов t-критерия, которым можно воспрользоваться, является тест Манна-Уитни. На достаточно обширном классе распределений он является асимптотически более эффективным, чем t-критерий, и при этом не требует параметрических предположений о характере распределения.
Разделите выборку на две части, соответствующие control и exp группам. Преобразуйте данные к виду, чтобы каждому пользователю соответствовало суммарное значение его кликов. С помощью критерия Манна-Уитни проверьте гипотезу о равенстве средних. Что можно сказать о получившемся значении достигаемого уровня значимости ?
```
control = data.loc[(data['slot'] == 'control')][['userID', 'n_clicks']]
control.head()
exp = data.loc[(data['slot'] == 'exp')][['userID', 'n_clicks']]
exp.head()
stats.mannwhitneyu(exp, control, alternative='two-sided')
```
| github_jupyter |
```
#|hide
#|skip
! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab
#|all_slow
#|default_exp callback.comet
#|export
from __future__ import annotations
import tempfile
from fastai.basics import *
from fastai.learner import Callback
#|hide
from nbdev.showdoc import *
```
# Comet.ml
> Integration with [Comet.ml](https://www.comet.ml/).
## Registration
1. Create account: [comet.ml/signup](https://www.comet.ml/signup).
2. Export API key to the environment variable (more help [here](https://www.comet.ml/docs/v2/guides/getting-started/quickstart/#get-an-api-key)). In your terminal run:
```
export COMET_API_KEY='YOUR_LONG_API_TOKEN'
```
or include it in your `./comet.config` file (**recommended**). More help is [here](https://www.comet.ml/docs/v2/guides/tracking-ml-training/jupyter-notebooks/#set-your-api-key-and-project-name).
## Installation
1. You need to install neptune-client. In your terminal run:
```
pip install comet_ml
```
or (alternative installation using conda). In your terminal run:
```
conda install -c anaconda -c conda-forge -c comet_ml comet_ml
```
## How to use?
Key is to create the callback `CometMLCallback` before you create `Learner()` like this:
```
from fastai.callback.comet import CometMLCallback
comet_ml_callback = CometCallback('PROJECT_NAME') # specify project
learn = Learner(dls, model,
cbs=comet_ml_callback
)
learn.fit_one_cycle(1)
```
```
#|export
import comet_ml
#|export
class CometCallback(Callback):
"Log losses, metrics, model weights, model architecture summary to neptune"
order = Recorder.order + 1
def __init__(self, project_name, log_model_weights=True):
self.log_model_weights = log_model_weights
self.keep_experiment_running = keep_experiment_running
self.project_name = project_name
self.experiment = None
def before_fit(self):
try:
self.experiment = comet_ml.Experiment(project_name=self.project_name)
except ValueError:
print("No active experiment")
try:
self.experiment.log_parameter("n_epoch", str(self.learn.n_epoch))
self.experiment.log_parameter("model_class", str(type(self.learn.model)))
except:
print(f"Did not log all properties.")
try:
with tempfile.NamedTemporaryFile(mode="w") as f:
with open(f.name, "w") as g:
g.write(repr(self.learn.model))
self.experiment.log_asset(f.name, "model_summary.txt")
except:
print("Did not log model summary. Check if your model is PyTorch model.")
if self.log_model_weights and not hasattr(self.learn, "save_model"):
print(
"Unable to log model to Comet.\n",
)
def after_batch(self):
# log loss and opt.hypers
if self.learn.training:
self.experiment.log_metric("batch__smooth_loss", self.learn.smooth_loss)
self.experiment.log_metric("batch__loss", self.learn.loss)
self.experiment.log_metric("batch__train_iter", self.learn.train_iter)
for i, h in enumerate(self.learn.opt.hypers):
for k, v in h.items():
self.experiment.log_metric(f"batch__opt.hypers.{k}", v)
def after_epoch(self):
# log metrics
for n, v in zip(self.learn.recorder.metric_names, self.learn.recorder.log):
if n not in ["epoch", "time"]:
self.experiment.log_metric(f"epoch__{n}", v)
if n == "time":
self.experiment.log_text(f"epoch__{n}", str(v))
# log model weights
if self.log_model_weights and hasattr(self.learn, "save_model"):
if self.learn.save_model.every_epoch:
_file = join_path_file(
f"{self.learn.save_model.fname}_{self.learn.save_model.epoch}",
self.learn.path / self.learn.model_dir,
ext=".pth",
)
else:
_file = join_path_file(
self.learn.save_model.fname,
self.learn.path / self.learn.model_dir,
ext=".pth",
)
self.experiment.log_asset(_file)
def after_fit(self):
try:
self.experiment.end()
except:
print("No neptune experiment to stop.")
```
| github_jupyter |
## Flight Price Prediction
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
```
## Importing Dataset
1. Since data is in form of excel file we have to use pandas read_excel to load the data
2. After loading it is important to check the complete information of data as it can indication many of the hidden infomation such as null values in a column or a row
3. Check whether any null values are there or not. if it is present then following can be done,
1. Imputing data using Imputation method in sklearn
2. Filling NaN values with mean, median and mode using fillna() method
4. Describe data --> which can give statistical analysis
```
train_data = pd.read_excel("Data_Train.xlsx")
pd.set_option("display.max_columns",None)
train_data.head()
train_data.info()
train_data["Duration"].value_counts()
train_data.shape
train_data.dropna(inplace=True)
train_data.info()
train_data.isnull().sum()
```
## Exploratory Data Analysis
From description we can see that Date_of_Journey is a object data type,
Therefore, we have to convert this datatype into timestamp to use this column properly for prediction
For this we require pandas to_datetime to convert object data type to datetime dtype.
<span style="color: red;">**.dt.day method will extract only day of that date**</span>\
<span style="color: red;">**.dt.month method will extract only month of that date**</span>
```
train_data["Journey_day"] =pd.to_datetime(train_data["Date_of_Journey"],format="%d/%m/%Y").dt.day
train_data["Journey_month"] =pd.to_datetime(train_data["Date_of_Journey"],format="%d/%m/%Y").dt.month
train_data.sample(5)
```
#since we have converted converted Date_of_Journey column into integers, Now we can drop as it is of no use
```
train_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Departure time is when a plane leaves the gate.
# Similar to the Date_of_Journey we can extract values from Dep_Time
#Extracting Hours
train_data["Dep_hour"] = pd.to_datetime(train_data["Dep_Time"]).dt.hour
# Extracting Minutes
train_data["Dep_min"] = pd.to_datetime(train_data["Dep_Time"]).dt.minute
# Now we can drop Dep_Time as it of no use
train_data.drop(["Dep_Time"], axis = 1, inplace = True)
train_data.head()
# Arrival time is when the plane pulls up to the gate
# Similar to Date_of_Journey we can extract values from Arrival_Time
#Extracting Hours
train_data["Arrival_hour"] = pd.to_datetime(train_data["Arrival_Time"]).dt.hour
# Extracting Minutes
train_data["Arrival_min"] = pd.to_datetime(train_data["Arrival_Time"]).dt.minute
# Now we can drop Dep_Time as it of no use
train_data.drop(["Arrival_Time"], axis = 1, inplace = True)
train_data.head()
# Time taken by plane to reach destination is called Duration
# It is the differnce betwwen Departure Time and Arrival time
# Assigning and converting Duration column into list
duration = list(train_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding duration_hours and duration_mins list to train_data dataframe
train_data["Duration_hours"] = duration_hours
train_data["Duration_mins"] =duration_mins
train_data.head()
train_data.drop(["Duration"], axis =1, inplace=True)
train_data.head()
```
# Handling Categorical Data
One can find many ways to handle categorical data. Some of the categorical data are
1. NOMINAL DATA which is data that are not in any order, We can use OneHotEncodet to handle this data
2. ORDINAL DATA which is data that are in order. We can use LabelEncoder to handle this data
```
train_data["Airline"].value_counts()
# From grap we can see that Jet Airways Business have the highest Price
# Apart from the first Airlines almost all are having similar median
# Airline vs price
sns.catplot(y= "Price", x = "Airline", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 6, aspect = 3)
plt.show()
# As Airline is Nominal Categorical data, we will perform OneHotEncoder
Airline = train_data[["Airline"]]
Airline = pd.get_dummies(Airline, drop_first =True)
Airline.head()
train_data["Source"].value_counts()
# Compare Source and Price
# We can see some outliers in Bangalore while the others place doe not too different
sns.catplot(y = "Price", x= "Source", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 6, aspect = 3)
# As source is Nominal categorical data, we will perform OneHotEncoding
# Bangalore Source can be representated by OOOO
Source =train_data[["Source"]]
Source =pd.get_dummies(Source, drop_first=True)
Source.head()
train_data["Destination"].value_counts()
# As Destination is Nominal categorical data, we will perform OneHotEncoding
Destination =train_data[["Destination"]]
Destination =pd.get_dummies(Destination, drop_first=True)
Destination.head()
train_data["Route"]
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
train_data.drop(["Route","Additional_Info"],axis=1, inplace=True)
train_data["Total_Stops"].value_counts()
# As this is case of Ordinal Categorical data, we can perform LabelEncoder
# Here values are assigned with corresponding keys
train_data.replace({"non-stop":0, "1 stop": 1, "2 stops": 2, "3 stops":3, "4 stops": 4}, inplace=True)
train_data.head()
# Concatenate dataframe that consist of train_data, Airline, Source, and Destination
data_train =pd.concat([train_data,Airline,Source,Destination], axis=1)
data_train.head()
data_train.drop(["Airline","Source", "Destination"], axis=1, inplace=True)
data_train.head()
data_train.shape
```
# Test Set
```
test_data =pd.read_excel(r"Test_set.xlsx")
test_data.head()
# Preprocessing
print("Test data Info")
print("-"*75)
print(test_data.info())
print()
print()
print("Null values :")
print("-"*75)
test_data.dropna(inplace = True)
print(test_data.isnull().sum())
# EDA
# Date_of_Journey
test_data["Journey_day"] = pd.to_datetime(test_data.Date_of_Journey, format="%d/%m/%Y").dt.day
test_data["Journey_month"] = pd.to_datetime(test_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month
test_data.drop(["Date_of_Journey"], axis = 1, inplace = True)
# Dep_Time
test_data["Dep_hour"] = pd.to_datetime(test_data["Dep_Time"]).dt.hour
test_data["Dep_min"] = pd.to_datetime(test_data["Dep_Time"]).dt.minute
test_data.drop(["Dep_Time"], axis = 1, inplace = True)
# Arrival_Time
test_data["Arrival_hour"] = pd.to_datetime(test_data.Arrival_Time).dt.hour
test_data["Arrival_min"] = pd.to_datetime(test_data.Arrival_Time).dt.minute
test_data.drop(["Arrival_Time"], axis = 1, inplace = True)
# Duration
duration = list(test_data["Duration"])
for i in range(len(duration)):
if len(duration[i].split()) != 2: # Check if duration contains only hour or mins
if "h" in duration[i]:
duration[i] = duration[i].strip() + " 0m" # Adds 0 minute
else:
duration[i] = "0h " + duration[i] # Adds 0 hour
duration_hours = []
duration_mins = []
for i in range(len(duration)):
duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration
duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration
# Adding Duration column to test set
test_data["Duration_hours"] = duration_hours
test_data["Duration_mins"] = duration_mins
test_data.drop(["Duration"], axis = 1, inplace = True)
# Categorical data
print("Airline")
print("-"*75)
print(test_data["Airline"].value_counts())
Airline = pd.get_dummies(test_data["Airline"], drop_first= True)
print()
print("Source")
print("-"*75)
print(test_data["Source"].value_counts())
Source = pd.get_dummies(test_data["Source"], drop_first= True)
print()
print("Destination")
print("-"*75)
print(test_data["Destination"].value_counts())
Destination = pd.get_dummies(test_data["Destination"], drop_first = True)
# Additional_Info contains almost 80% no_info
# Route and Total_Stops are related to each other
test_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True)
# Replacing Total_Stops
test_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True)
# Concatenate dataframe --> test_data + Airline + Source + Destination
data_test = pd.concat([test_data, Airline, Source, Destination], axis = 1)
data_test.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True)
print()
print()
print("Shape of test data : ", data_test.shape)
data_test.head()
```
## Feature Selection
Finding out the best feature which will contribute and have good relation with target variable.
Following are some of the feature selection methods,
1. <span style="color: red;">**heatmap**</span>
2. <span style="color: red;">**feature_importance_**</span>
3. <span style="color: red;">**SelectKBest**</span>
```
data_train.shape
data_train.columns
X = data_train.loc[:, ['Total_Stops','Journey_day', 'Journey_month', 'Dep_hour',
'Dep_min', 'Arrival_hour', 'Arrival_min', 'Duration_hours',
'Duration_mins', 'Airline_Air India', 'Airline_GoAir', 'Airline_IndiGo',
'Airline_Jet Airways', 'Airline_Jet Airways Business',
'Airline_Multiple carriers',
'Airline_Multiple carriers Premium economy', 'Airline_SpiceJet',
'Airline_Trujet', 'Airline_Vistara', 'Airline_Vistara Premium economy',
'Source_Chennai', 'Source_Delhi', 'Source_Kolkata', 'Source_Mumbai',
'Destination_Cochin', 'Destination_Delhi', 'Destination_Hyderabad',
'Destination_Kolkata', 'Destination_New Delhi']]
X.head()
y = data_train.iloc[:,1]
y.head()
#Find correlation between Independent(X) and dependent attributes(y)
plt.figure(figsize = (18,18))
sns.heatmap(train_data.corr(),annot= True, cmap = "coolwarm")
plt.show()
# Important feature using ExtraTreeRegressor
from sklearn.ensemble import ExtraTreesRegressor
selection =ExtraTreesRegressor()
selection.fit(X,y)
#ExtraTreesRegressor is used to choose the importan feature for the prediction
print(selection.feature_importances_)
# plot graph of important feature for better visualization
plt.figure(figsize =(12,8))
feat_importances =pd.Series(selection.feature_importances_,index =X.columns)
feat_importances.nlargest(20).plot(kind="barh")
plt.show()
```
## Fitting model using Random Forest
1. Split dataset into train and test set in order to prediction w.r.t X_test
2. If needed do scaling of data
* Scaling is not done in Random forest
3. Import model
4. Fit the data
5. Predict w.r.t X_test
6. In regression check **RSME** Score
7. Plot graph
```
from sklearn.model_selection import train_test_split
X_train, X_test,y_train,y_test =train_test_split(X,y,test_size = 0.2, random_state = 42)
from sklearn.ensemble import RandomForestRegressor
reg_rf = RandomForestRegressor()
reg_rf.fit(X_train,y_train)
y_pred =reg_rf.predict(X_test)
reg_rf.score(X_train,y_train)
reg_rf.score(X_test,y_test)
sns.displot(y_test-y_pred)
plt.show()
plt.scatter(y_test,y_pred,alpha =0.5,color="DarkBlue")
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
from sklearn import metrics
print("MAE:" , metrics.mean_absolute_error(y_test,y_pred))
print("MSE:" , metrics.mean_squared_error(y_test,y_pred))
print("RMSE:" , np.sqrt(metrics.mean_squared_error(y_test,y_pred)))
metrics.r2_score(y_test, y_pred)
```
## Hyperparameter Tuning
* Choose following method for hyperparameter tuning
1. **RandomizedSearchCV** --> Fast
2. **GridSearchCV**
* Assign hyperparameters in form of dictionery
* Fit the model
* Check best paramters and best score
```
from sklearn.model_selection import RandomizedSearchCV
#Randomized Search CV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1200, num = 12)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(5, 30, num = 6)]
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10, 15, 100]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 5, 10]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf}
# Random search of parameters, using 5 fold cross validation,
# search across 100 different combinations
rf_random = RandomizedSearchCV(estimator = reg_rf, param_distributions = random_grid,scoring='neg_mean_squared_error', n_iter = 10, cv = 5, verbose=2, random_state=42, n_jobs = 1)
rf_random.fit(X_train,y_train)
rf_random.best_params_
prediction = rf_random.predict(X_test)
plt.figure(figsize =(8,8))
sns.displot(y_test-prediction)
plt.show()
plt.scatter(y_test,prediction,alpha =0.5,color="DarkBlue")
plt.xlabel("y_test")
plt.ylabel("y_pred")
plt.show()
print("MAE:" , metrics.mean_absolute_error(y_test,prediction))
print("MSE:" , metrics.mean_squared_error(y_test,prediction))
print("RMSE:" , np.sqrt(metrics.mean_squared_error(y_test,prediction)))
```
# Save the model to reuse it again
```
import pickle
# open a file, where you ant to store the data
file = open('flight_rf.pkl', 'wb')
# dump information to that file
pickle.dump(rf_random, file)
model = open('flight_rf.pkl','rb')
forest = pickle.load(model)
y_prediction =forest.predict(X_test)
metrics.r2_score(y_test,y_prediction)
```
| github_jupyter |
### This notebook is used to perform gridsearch on asia dataset
```
%load_ext autoreload
%autoreload 2
import numpy as np
import pandas as pd
from sdgym import benchmark
from sdgym import load_dataset
from xgboost import XGBClassifier
from sklearn.neural_network import MLPClassifier
from synthsonic.models.kde_copula_nn_pdf import KDECopulaNNPdf
from synthsonic.models.categorical_utils import categorical_round, vec_translate, categorical_frequency_mapping, \
categorical_frequency_inverse_mapping, encode_one_hot, decode_one_hot
from pandas_profiling import ProfileReport
%matplotlib inline
```
### EDA
```
df, categorical_columns, ordinal_columns = load_dataset('asia')
explore_df = pd.DataFrame(df)
profile = ProfileReport(explore_df, title="EDA for asia dataset")
profile
```
### Observations:
* All 8 features in this dataset are categorical, so it's worth trying all the categorical encoding strategies
* Consider categorical as ordinal
* One hot encode categorical features
* Frequency mapping
### MLP classifier
```
def KDECopulaNNPdf_RoundCategorical(real_data, categorical_columns, ordinal_columns):
# Max's kde copula model with default parameters
all_features = list(range(real_data.shape[1]))
numerical_features = list(set(all_features) - set(categorical_columns + ordinal_columns))
data = np.float64(real_data)
n_samples = data.shape[0]
n_features = data.shape[1]
#print(data.shape)
kde = KDECopulaNNPdf(use_KDE=False, clf=MLPClassifier(random_state=0, max_iter=500, early_stopping=True))
kde = kde.fit(data)
X_gen, sample_weight = kde.sample(n_samples)
X_gen[:, categorical_columns+ordinal_columns] = np.round(X_gen[:, categorical_columns+ordinal_columns])
X_gen = np.float32(X_gen)
return X_gen
def KDECopulaNNPdf_woKDE_OneHotEncoded(real_data, categorical_columns, ordinal_columns):
all_features = list(range(real_data.shape[1]))
numerical_features = list(set(all_features) - set(categorical_columns+ordinal_columns))
## One hot encode the categorical features
unique_values, ohe = encode_one_hot(real_data, categorical_columns)
categorical_np = np.array(ohe)
n_samples = real_data.shape[0]
n_features = real_data.shape[1]
## Append the categorical one hot encoded data to numerical and ordinal
data = np.float64(np.hstack((real_data[:, numerical_features+ordinal_columns], categorical_np)))
kde = KDECopulaNNPdf(use_KDE=False, clf=MLPClassifier(random_state=0, max_iter=500, early_stopping=True))
kde = kde.fit(data)
X_gen, sample_weight = kde.sample(n_samples)
X_gen = np.float32(X_gen)
X_final = decode_one_hot(X_gen, categorical_columns, unique_values, n_features)
X_final[:, numerical_features+ordinal_columns] = X_gen[:, numerical_features+ordinal_columns]
print(X_final.shape)
return X_final
def KDECopulaNNPdf_woKDE_FreqMapping(real_data, categorical_columns, ordinal_columns):
all_features = list(range(real_data.shape[1]))
numerical_features = list(set(all_features) - set(categorical_columns+ordinal_columns))
data = np.float64(real_data)
n_samples = data.shape[0]
n_features = data.shape[1]
data, inv_mappings = categorical_frequency_mapping(data, categorical_columns)
kde = KDECopulaNNPdf(use_KDE=False, clf=MLPClassifier(random_state=0, max_iter=500, early_stopping=True))
kde = kde.fit(data)
X_gen, sample_weight = kde.sample(n_samples)
X_gen[:, categorical_columns] = np.round(X_gen[:, categorical_columns])
X_final = categorical_frequency_inverse_mapping(X_gen, categorical_columns, inv_mappings)
return X_final
asia_scores_mlp = benchmark(synthesizers=[KDECopulaNNPdf_RoundCategorical,
KDECopulaNNPdf_woKDE_OneHotEncoded,
KDECopulaNNPdf_woKDE_FreqMapping], datasets=['asia'])
asia_scores_mlp
def KDECopulaNNPdf_RoundCategorical(real_data, categorical_columns, ordinal_columns):
# Max's kde copula model with default parameters
all_features = list(range(real_data.shape[1]))
numerical_features = list(set(all_features) - set(categorical_columns + ordinal_columns))
data = np.float64(real_data)
n_samples = data.shape[0]
n_features = data.shape[1]
kde = KDECopulaNNPdf(use_KDE=False, clf=XGBClassifier(random_state=42, max_depth=6, alpha=0.2, subsample=0.5))
kde = kde.fit(data)
X_gen, sample_weight = kde.sample(n_samples)
X_gen[:, categorical_columns+ordinal_columns] = np.round(X_gen[:, categorical_columns+ordinal_columns])
X_gen = np.float32(X_gen)
return X_gen
def KDECopulaNNPdf_woKDE_OneHotEncoded(real_data, categorical_columns, ordinal_columns):
all_features = list(range(real_data.shape[1]))
numerical_features = list(set(all_features) - set(categorical_columns+ordinal_columns))
## One hot encode the categorical features
unique_values, ohe = encode_one_hot(real_data, categorical_columns)
categorical_np = np.array(ohe)
n_samples = real_data.shape[0]
n_features = real_data.shape[1]
## Append the categorical one hot encoded data to numerical and ordinal
data = np.float64(np.hstack((real_data[:, numerical_features+ordinal_columns], categorical_np)))
kde = KDECopulaNNPdf(use_KDE=False, clf=XGBClassifier(random_state=42, max_depth=6, alpha=0.2, subsample=0.5))
kde = kde.fit(data)
X_gen, sample_weight = kde.sample(n_samples)
X_gen = np.float32(X_gen)
X_final = decode_one_hot(X_gen, categorical_columns, unique_values, n_features)
X_final[:, numerical_features+ordinal_columns] = X_gen[:, numerical_features+ordinal_columns]
print(X_final.shape)
return X_final
def KDECopulaNNPdf_woKDE_FreqMapping(real_data, categorical_columns, ordinal_columns):
all_features = list(range(real_data.shape[1]))
numerical_features = list(set(all_features) - set(categorical_columns+ordinal_columns))
data = np.float64(real_data)
n_samples = data.shape[0]
n_features = data.shape[1]
data, inv_mappings = categorical_frequency_mapping(data, categorical_columns)
kde = KDECopulaNNPdf(use_KDE=False, clf=XGBClassifier(random_state=42, max_depth=6, alpha=0.2, subsample=0.5))
kde = kde.fit(data)
X_gen, sample_weight = kde.sample(n_samples)
X_gen[:, categorical_columns] = np.round(X_gen[:, categorical_columns])
X_final = categorical_frequency_inverse_mapping(X_gen, categorical_columns, inv_mappings)
return X_final
asia_scores_xgboost = benchmark(synthesizers=[KDECopulaNNPdf_RoundCategorical,
KDECopulaNNPdf_woKDE_OneHotEncoded,
KDECopulaNNPdf_woKDE_FreqMapping], datasets=['asia'])
asia_scores_xgboost
asia_scores_mlp['Classifier'] = 'MLP'
asia_scores_xgboost['Classifier'] = 'XGBoost'
asia_scores_mlp.iloc[0:9]['Classifier'] = 'N/A'
asia_scores = asia_scores_mlp.reset_index().append(asia_scores_xgboost.reset_index().iloc[-3:], ignore_index=True)
asia_scores
```
### Grid search
```
data = np.float64(df)
kde = KDECopulaNNPdf(use_KDE=False, clf=MLPClassifier())
kde.get_params().keys()
# then for the grid search do this, where all classifier options now have a prefix clf__:
from sklearn.model_selection import GridSearchCV
parameters = {
'clf__alpha': 10.0 ** -np.arange(1, 3),
'clf__hidden_layer_sizes': [(10,),(20,),(50,),(100,)],
'clf__activation': ['tanh', 'relu'],
'clf__solver': ['sgd', 'adam'],
'clf__alpha': [0.0001, 0.05],
'clf__learning_rate': ['constant','adaptive'],
}
grid = GridSearchCV(KDECopulaNNPdf(use_KDE=False), parameters, cv=5)
grid.fit(data)
print (grid.best_params_)
print (grid.best_params_)
def KDECopulaNNPdf_RoundCategorical(real_data, categorical_columns, ordinal_columns):
# Max's kde copula model with default parameters
all_features = list(range(real_data.shape[1]))
numerical_features = list(set(all_features) - set(categorical_columns + ordinal_columns))
data = np.float64(real_data)
n_samples = data.shape[0]
n_features = data.shape[1]
#print(data.shape)
kde = KDECopulaNNPdf(clf=MLPClassifier(hidden_layer_sizes=(100,), alpha=0.05,
max_iter=500, early_stopping=True, random_state=1), use_KDE=False)
kde = kde.fit(data)
X_gen, sample_weight = kde.sample(n_samples)
X_gen[:, categorical_columns+ordinal_columns] = np.round(X_gen[:, categorical_columns+ordinal_columns])
X_gen = np.float32(X_gen)
return X_gen
asia_scores = benchmark(synthesizers=[KDECopulaNNPdf_RoundCategorical], datasets=['asia'])
asia_scores
asia_scores.sort_values('asia/test_likelihood')
```
* With use_KDE=False, modifying the classification model or tuning the hyper-parameters don't make a difference.
| github_jupyter |
# Building the Best AND Gate
Let's import everything:
```
from qiskit import *
from qiskit.tools.visualization import plot_histogram
%config InlineBackend.figure_format = 'svg' # Makes the images look nice
from qiskit.providers.aer import noise
import numpy as np
```
In Problem Set 1, you made an AND gate with quantum gates. This time you'll do the same again, but for a real device. Using real devices gives you two major constraints to deal with. One is the connectivity, and the other is noise.
The connectivity tells you what `cx` gates it is possible to do perform directly. For example, the device `ibmq_5_tenerife` has five qubits numbered from 0 to 4. It has a connectivity defined by
```
coupling_map = [[1, 0], [2, 0], [2, 1], [3, 2], [3, 4], [4, 2]]
```
Here the `[1,0]` tells us that we can implement a `cx` with qubit 1 as control and qubit 0 as target, the `[2,0]` tells us we can have qubit 2 as control and 0 as target, and so on. The are the `cx` gates that the device can implement directly.
The 'noise' of a device is the collective effects of all the things that shouldn't happen, but nevertheless do happen. Noise results in the output not always having the result we expect. There is noise associated with all processes in a quantum circuit: preparing the initial states, applying gates and measuring the output. For the gates, noise levels can vary between different gates and between different qubits. The `cx` gates are typically more noisy than any single qubit gate.
We can also simulate noise using a noise model. And we can set the noise model based on measurements of the noise for a real device. The following noise model is based on `ibmq_5_tenerife`.
```
noise_dict = {'errors': [{'type': 'qerror', 'operations': ['u2'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0004721766167523067, 0.0004721766167523067, 0.0004721766167523067, 0.9985834701497431], 'gate_qubits': [[0]]}, {'type': 'qerror', 'operations': ['u2'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0005151090708174488, 0.0005151090708174488, 0.0005151090708174488, 0.9984546727875476], 'gate_qubits': [[1]]}, {'type': 'qerror', 'operations': ['u2'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0005151090708174488, 0.0005151090708174488, 0.0005151090708174488, 0.9984546727875476], 'gate_qubits': [[2]]}, {'type': 'qerror', 'operations': ['u2'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.000901556048412383, 0.000901556048412383, 0.000901556048412383, 0.9972953318547628], 'gate_qubits': [[3]]}, {'type': 'qerror', 'operations': ['u2'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0011592423249461303, 0.0011592423249461303, 0.0011592423249461303, 0.9965222730251616], 'gate_qubits': [[4]]}, {'type': 'qerror', 'operations': ['u3'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0009443532335046134, 0.0009443532335046134, 0.0009443532335046134, 0.9971669402994862], 'gate_qubits': [[0]]}, {'type': 'qerror', 'operations': ['u3'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0010302181416348977, 0.0010302181416348977, 0.0010302181416348977, 0.9969093455750953], 'gate_qubits': [[1]]}, {'type': 'qerror', 'operations': ['u3'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0010302181416348977, 0.0010302181416348977, 0.0010302181416348977, 0.9969093455750953], 'gate_qubits': [[2]]}, {'type': 'qerror', 'operations': ['u3'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.001803112096824766, 0.001803112096824766, 0.001803112096824766, 0.9945906637095256], 'gate_qubits': [[3]]}, {'type': 'qerror', 'operations': ['u3'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0023184846498922607, 0.0023184846498922607, 0.0023184846498922607, 0.9930445460503232], 'gate_qubits': [[4]]}, {'type': 'qerror', 'operations': ['cx'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'x', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.002182844139394187, 0.9672573379090872], 'gate_qubits': [[1, 0]]}, {'type': 'qerror', 'operations': ['cx'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'x', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.0020007412998552473, 0.9699888805021712], 'gate_qubits': [[2, 0]]}, {'type': 'qerror', 'operations': ['cx'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'x', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.002485439516158936, 0.9627184072576159], 'gate_qubits': [[2, 1]]}, {'type': 'qerror', 'operations': ['cx'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'x', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.0037502825428055767, 0.9437457618579164], 'gate_qubits': [[3, 2]]}, {'type': 'qerror', 'operations': ['cx'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'x', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.004401224333760022, 0.9339816349935997], 'gate_qubits': [[3, 4]]}, {'type': 'qerror', 'operations': ['cx'], 'instructions': [[{'name': 'x', 'qubits': [0]}], [{'name': 'y', 'qubits': [0]}], [{'name': 'z', 'qubits': [0]}], [{'name': 'x', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'x', 'qubits': [1]}], [{'name': 'y', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'y', 'qubits': [1]}], [{'name': 'z', 'qubits': [1]}], [{'name': 'x', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'y', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'z', 'qubits': [0]}, {'name': 'z', 'qubits': [1]}], [{'name': 'id', 'qubits': [0]}]], 'probabilities': [0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.0046188825262438934, 0.9307167621063416], 'gate_qubits': [[4, 2]]}, {'type': 'roerror', 'operations': ['measure'], 'probabilities': [[0.9372499999999999, 0.06275000000000008], [0.06275000000000008, 0.9372499999999999]], 'gate_qubits': [[0]]}, {'type': 'roerror', 'operations': ['measure'], 'probabilities': [[0.9345, 0.0655], [0.0655, 0.9345]], 'gate_qubits': [[1]]}, {'type': 'roerror', 'operations': ['measure'], 'probabilities': [[0.97075, 0.029249999999999998], [0.029249999999999998, 0.97075]], 'gate_qubits': [[2]]}, {'type': 'roerror', 'operations': ['measure'], 'probabilities': [[0.9742500000000001, 0.02574999999999994], [0.02574999999999994, 0.9742500000000001]], 'gate_qubits': [[3]]}, {'type': 'roerror', 'operations': ['measure'], 'probabilities': [[0.8747499999999999, 0.12525000000000008], [0.12525000000000008, 0.8747499999999999]], 'gate_qubits': [[4]]}], 'x90_gates': []}
noise_model = noise.noise_model.NoiseModel.from_dict( noise_dict )
```
Running directly on the device requires you to have an IBMQ account, and for you to sign in to it within your program. In order to not worry about all this, we'll instead use a simulation of the 5 qubit device defined by the constraints set above.
```
qr = QuantumRegister(5, 'qr')
cr = ClassicalRegister(1, 'cr')
backend = Aer.get_backend('qasm_simulator')
```
We now define the `AND` function. This has a few differences to the version in Exercise 1. Firstly, it is defined on a 5 qubit circuit, so you'll need to decide which of the 5 qubits are used to encode `input1`, `input2` and the output. Secondly, the output is a histogram of the number of times that each output is found when the process is repeated over 10000 samples.
```
def AND (input1,input2, q_1=0,q_2=1,q_out=2):
# The keyword q_1 specifies the qubit used to encode input1
# The keyword q_2 specifies qubit used to encode input2
# The keyword q_out specifies qubit to be as output
qc = QuantumCircuit(qr, cr)
# prepare input on qubits q1 and q2
if input1=='1':
qc.x( qr[ q_1 ] )
if input2=='1':
qc.x( qr[ q_2 ] )
qc.ccx(qr[ q_1 ],qr[ q_2 ],qr[ q_out ]) # the AND just needs a c
qc.measure(qr[ q_out ],cr[0]) # output from qubit 1 is measured
# the circuit is run on a simulator, but we do it so that the noise and connectivity of Tenerife are also reproduced
job = execute(qc, backend, shots=10000, noise_model=noise_model,
coupling_map=coupling_map,
basis_gates=noise_model.basis_gates)
output = job.result().get_counts()
return output
```
For example, here are the results when both inputs are `0`.
```
result = AND('0','0')
print( result )
plot_histogram( result )
```
We'll compare across all results to find the most unreliable.
```
worst = 1
for input1 in ['0','1']:
for input2 in ['0','1']:
print('\nProbability of correct answer for inputs',input1,input2)
prob = AND(input1,input2, q_1=0,q_2=1,q_out=2)[str(int( input1=='1' and input2=='1' ))]/10000
print( prob )
worst = min(worst,prob)
print('\nThe lowest of these probabilities was',worst)
```
The `AND` function above uses the `ccx` gate the implement the required operation. But you now know how to make your own. Find a way to implement an `AND` for which the lowest of the above probabilities is better than for a simple `ccx`.
```
import qiskit
qiskit.__qiskit_version__
```
| github_jupyter |
<img style="float: center;" src="../images/CI_horizontal.png" width="600">
<center>
<span style="font-size: 1.5em;">
<a href='https://www.coleridgeinitiative.org'>Website</a>
</span>
</center>
Ghani, Rayid, Frauke Kreuter, Julia Lane, Adrianne Bradford, Alex Engler, Nicolas Guetta Jeanrenaud, Graham Henke, Daniela Hochfellner, Clayton Hunter, Brian Kim, Avishek Kumar, and Jonathan Morgan.
# Data Preparation for Machine Learning - Creating Labels
----
## Python Setup
- Back to [Table of Contents](#Table-of-Contents)
Before we begin, run the code cell below to initialize the libraries we'll be using in this assignment. We're already familiar with `numpy`, `pandas`, and `psycopg2` from previous tutorials. Here we'll also be using [`scikit-learn`](http://scikit-learn.org) to fit modeling.
```
%pylab inline
import pandas as pd
import psycopg2
from sqlalchemy import create_engine
db_name = "appliedda"
hostname = "10.10.2.10"
```
## Creating Labels
Labels are the dependent variables, or *Y* variables, that we are trying to predict. In the machine learning framework, your labels are usually *binary*: true or false, encoded as 1 or 0.
In this case, our label is __whether an existing single unit employer in a given year disappears whithin a given number of years__. By convention, we will flag employers who still exist in the following year as 0, and those who no longer exist as 1.
Single unit employers can be flagged using the `multi_unit_code` (`multi_unit_code = '1'`). We create a unique firm ID using EIN (`ein`), SEIN Unit (`seinunit`) and Employer Number (`empr_no`).
We need to pick the year and quarter of prediction, and the number of years we look forward to see if the employer still exists. Let's use Q1 or 2013 as our date of prediction. Different projects might be interested in looking at short-term or long-term survivability of employers, but for this first example, we evaluate firm survivability within one year of the prediction date.
### Detailed Creation of Labels for a Given Year
For this example, let's use 2013 (Q1) as our reference year (year of prediction).
Let's start by creating the list of unique employers in that quarter:
```
conn = psycopg2.connect(database=db_name, host=hostname)
cursor = conn.cursor()
sql = '''
CREATE TEMP TABLE eins_2013q1 AS
SELECT DISTINCT CONCAT(ein, '-', seinunit, '-', empr_no) AS id, ein, seinunit, empr_no
FROM il_des_kcmo.il_qcew_employers
WHERE multi_unit_code = '1' AND year = 2013 AND quarter = 1;
COMMIT;
'''
cursor.execute(sql)
sql = '''
SELECT *
FROM eins_2013q1
LIMIT 10
'''
pd.read_sql(sql, conn)
```
Now let's create this same table one year later.
```
sql = '''
CREATE TEMP TABLE eins_2014q1 AS
SELECT DISTINCT CONCAT(ein, '-', seinunit, '-', empr_no) AS id,
ein, seinunit, empr_no
FROM il_des_kcmo.il_qcew_employers
WHERE multi_unit_code = '1' AND year = 2014 AND quarter = 1;
COMMIT;
'''
cursor.execute(sql)
sql = '''
SELECT *
FROM eins_2014q1
LIMIT 10
'''
pd.read_sql(sql, conn)
```
In order to assess whether a 2013 employer still exists in 2014, let's merge the 2014 table onto the 2013 list of employers. Notice that we create a `label` variable that takes the value `0` if the 2013 employer still exists in 2014, `1` if the employer disappears.
```
sql = '''
CREATE TABLE IF NOT EXISTS ada_18_uchi.labels_2013q1_2014q1 AS
SELECT a.*, CASE WHEN b.ein IS NULL THEN 1 ELSE 0 END AS label
FROM eins_2013q1 AS a
LEFT JOIN eins_2014q1 AS b
ON a.id = b.id AND a.ein = b.ein AND a.seinunit = b.seinunit AND a.empr_no = b.empr_no;
COMMIT;
ALTER TABLE ada_18_uchi.labels_2013q1_2014q1 OWNER TO ada_18_uchi_admin;
COMMIT;
'''
cursor.execute(sql)
# Load the 2013 Labels into Python Pandas
sql = '''
SELECT *
FROM ada_18_uchi.labels_2013q1_2014q1
'''
df_labels_2013 = pd.read_sql(sql, conn)
df_labels_2013.head(10)
```
Given these first rows, employers who survive seem to be more common than employers who disappear. Let's get an idea of the dsitribution of our label variable.
```
pd.crosstab(index = df_labels_2013['label'], columns = 'count')
```
### Repeating the Label Creation Process for Another Year
Since we need one training and one test set for our machine learning analysis, let's create the same labels table for the following year.
```
conn = psycopg2.connect(database=db_name, host=hostname)
cursor = conn.cursor()
sql = '''
CREATE TEMP TABLE eins_2014q1 AS
SELECT DISTINCT CONCAT(ein, '-', seinunit, '-', empr_no) AS id, ein, seinunit, empr_no
FROM il_des_kcmo.il_qcew_employers
WHERE multi_unit_code = '1' AND year = 2014 AND quarter = 1;
COMMIT;
CREATE TEMP TABLE eins_2015q1 AS
SELECT DISTINCT CONCAT(ein, '-', seinunit, '-', empr_no) AS id, ein, seinunit, empr_no
FROM il_des_kcmo.il_qcew_employers
WHERE multi_unit_code = '1' AND year = 2015 AND quarter = 1;
COMMIT;
CREATE TABLE IF NOT EXISTS ada_18_uchi.labels_2014q1_2015q1 AS
SELECT a.*, CASE WHEN b.ein IS NULL THEN 1 ELSE 0 END AS label
FROM eins_2014q1 AS a
LEFT JOIN eins_2015q1 AS b
ON a.id = b.id AND a.ein = b.ein AND a.seinunit = b.seinunit AND a.empr_no = b.empr_no;
COMMIT;
ALTER TABLE ada_18_uchi.labels_2014q1_2015q1 OWNER TO ada_18_uchi_admin;
COMMIT;
'''
cursor.execute(sql)
# Load the 2014 Labels into Python Pandas
sql = '''
SELECT *
FROM ada_18_uchi.labels_2014q1_2015q1
'''
df_labels_2014 = pd.read_sql(sql, conn)
df_labels_2014.head()
```
Let's get an idea of the dsitribution of our label variable.
```
pd.crosstab(index = df_labels_2014['label'], columns = 'count')
```
### Writing a Function to Create Labels
If you feel comfortable with the content we saw above, and expect to be creating labels for several different years as part of your project, the following code defines a Python function that generates the label table for any given year and quarter.
In the above, the whole SQL query was hard coded. In the below, we made a function with parameters for your choice of year and quarter, your choice of prediction horizon, your team's schema, etc. The complete list of parameters is given in parentheses after the `def generate_labels` statement. Some parameters are given a default value (like `delta_t=1`), others (like `year` and `qtr`) are not. More information on the different parameters is given below:
- `year`: The year at which we are doing the prediction.
- `qtr`: The quarter at which we are doing the prediction.
- `delta_t`: The forward-looking window, or number of years over which we are predicting employer survival or failure. The default value is 1, which means we are prediction at a given time whether an employer will still exist one year later.
- `schema`: Your team schema, where the label table will be written. The default value is set to `myschema`, which you define in the cell above the function.
- `db_name`: Database name. This is the name of the SQL database we are using. The default value is set to `db_name`, defined in the [Python Setup](#Python-Setup) section of this notebook.
- `hostname`: Host name. This is the host name for the SQL database we are using. The default value is set to `hostname`, defined in the [Python Setup](#Python-Setup) section of this notebook.
- `overwrite`: Whether you want the function to overwrite tables that already exist. Before writing a table, the function will check whether this table exists, and by default will not overwrite existing tables.
```
# Insert team schema name below:
myschema = 'ada_18_uchi'
def generate_labels(year, qtr, delta_t=1, schema=myschema, db_name=db_name, hostname=hostname, overwrite=False):
conn = psycopg2.connect(database=db_name, host = hostname) #database connection
cursor = conn.cursor()
sql = """
CREATE TEMP TABLE eins_{year}q{qtr} AS
SELECT DISTINCT CONCAT(ein, '-', seinunit, '-', empr_no) AS id, ein, seinunit, empr_no
FROM il_des_kcmo.il_qcew_employers
WHERE multi_unit_code = '1' AND year = {year} AND quarter = {qtr};
COMMIT;
CREATE TEMP TABLE eins_{year_pdelta}q{qtr} AS
SELECT DISTINCT CONCAT(ein, '-', seinunit, '-', empr_no) AS id, ein, seinunit, empr_no
FROM il_des_kcmo.il_qcew_employers
WHERE multi_unit_code = '1' AND year = {year_pdelta} AND quarter = {qtr};
COMMIT;
DROP TABLE IF EXISTS {schema}.labels_{year}q{qtr}_{year_pdelta}q{qtr};
CREATE TABLE {schema}.labels_{year}q{qtr}_{year_pdelta}q{qtr} AS
SELECT a.*, CASE WHEN b.ein IS NULL THEN 1 ELSE 0 END AS label
FROM eins_{year}q{qtr} AS a
LEFT JOIN eins_{year_pdelta}q{qtr} AS b
ON a.id = b.id AND a.ein = b.ein AND a.seinunit = b.seinunit AND a.empr_no = b.empr_no;
COMMIT;
ALTER TABLE {schema}.labels_{year}q{qtr}_{year_pdelta}q{qtr} OWNER TO {schema}_admin;
COMMIT;
""".format(year=year, year_pdelta=year+delta_t, qtr=qtr, schema=schema)
# Let's check if the table already exists:
# This query will return an empty table (with no rows) if the table does not exist
cursor.execute('''
SELECT * FROM information_schema.tables
WHERE table_name = 'labels_{year}q{qtr}_{year_pdelta}q{qtr}'
AND table_schema = '{schema}';
'''.format(year=year, year_pdelta=year+delta_t, qtr=qtr, schema=schema))
# Let's write table if it does not exist (or if overwrite = True)
if not(cursor.rowcount) or overwrite:
print("Creating table")
cursor.execute(sql)
else:
print("Table already exists")
cursor.close()
# Load table into pandas dataframe
sql = '''
SELECT * FROM {schema}.labels_{year}q{qtr}_{year_pdelta}q{qtr}
'''.format(year=year, year_pdelta=year+delta_t, qtr=qtr, schema=schema)
df = pd.read_sql(sql, conn)
return df
```
Let's run the defined function for a few different years:
```
# For 2012 Q1
df_labels_2012 = generate_labels(year=2012, qtr=1)
pd.crosstab(index = df_labels_2012['label'], columns = 'count')
# For 2012 Q1 with a 3 year forward looking window
df_labels_2012 = generate_labels(year=2012, qtr=1, delta_t=3)
pd.crosstab(index = df_labels_2012['label'], columns = 'count')
```
Why is the number of 1's higher in the second case?
```
df_labels_2015 = generate_labels(year=2015, qtr=1)
pd.crosstab(index = df_labels_2015['label'], columns = 'count')
```
Notice the surprising results in 2015. What is the underlying data problem?
| github_jupyter |
```
%config IPCompleter.greedy = True
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
%load_ext tensorboard
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
import tensorflow as tf
from datetime import datetime
pd.set_option('mode.chained_assignment', None)
sn.set(rc={'figure.figsize':(9,9)})
sn.set(font_scale=1.4)
# make results reproducible
seed = 0
np.random.seed(seed)
!pip install pydot
!rm -rf ./logs/
```
# TensorFlow Dataset
TensorFlow's [dataset](https://www.tensorflow.org/guide/data) object `tf.data.Dataset` allows us to write descriptive and efficient dataset input pipelines. It allows the following pattern:
1. Create a source dataset from the input data
2. Apply transformations to preprocess the data
3. Iterate over the dataset and process all the elements
The iteration happens via a streamlining method, which works well with datasets that are large and that don't have to completely fit into the memory.
We can consume any python iterable nested data structure by the `tf.data.Dataset` object, however we often use the following format that **Keras** expects, such as the following `(feature, label)` or `(X, y)` pairs is all that's needed for `tf.keras.Model.fit` and `tf.keras.Model.evaluate`.
Here is an example loading the digits dataset into a `tf.data.Dataset` object, using the `tf.data.Dataset.from_tensors()`
```
# Load the digits dataset that we have been using
from sklearn import datasets
from sklearn.model_selection import train_test_split
from tensorflow import keras
digits = datasets.load_digits()
(X, y) = datasets.load_digits(return_X_y=True)
X = X.astype(np.float32)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)
digits_train_ds = tf.data.Dataset.from_tensors((X_train, y_train))
print(list(digits_train_ds))
print('\n', digits_train_ds)
# Lets create a simple Dense Sequential NN and train it to illustrate passing the dataset object
model = keras.Sequential([
keras.layers.Dense(64, activation='relu', input_dim=64),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(10)
])
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer='adam',
loss=loss_fn,
metrics=['accuracy'])
history = model.fit(digits_train_ds, epochs=100, verbose=0)
dir(history)
print('Training accuracy : {:.3%}'.format(history.history['accuracy'][-1]))
```
We can also construct a `Dataset` using `tf.data.Dataset.from_tensor_slices()` or if the input data is stored in a file in the recommended TFRecord file format, we can use the `tf.data.TFRecordDataset()`.
```
digits_train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train))
```
We can easily transform our `Dataset` object, setting up our data processing pipeline, by chaining method calls on the object since it returns a new `Dataset` object type. As an example we can apply per-element transformations such as `Dataset.map()`, and multi-element transformations such as `Dataset.batch()`. More transforms can be seen [here](https://www.tensorflow.org/api_docs/python/tf/data/Dataset).
The dataset object is also iteterable, so we can consume it in a for loop i.e.
```
for i, elm in enumerate(digits_train_ds):
if i <= 1:
print(elm)
```
We can also generate a dataset, such that it doesn't have all have to exist in memory by consuming a generator
```
def random_numbers(stop):
i = 0
while i < stop:
yield np.random.randint(0, 10)
i += 1
print('Testing the generator\n')
for i in random_numbers(7):
print(i)
print('\n\nCreating a Dataset by consuming the generator\n')
ds_random = tf.data.Dataset.from_generator(random_numbers, args=[10], output_types=tf.int32, output_shapes = (), )
for element in ds_random:
print(element)
```
We can also injest datasets from the following formats with the following [functions](https://www.tensorflow.org/api_docs/python/tf/data):
|Data format|Function|
|-----------|--------|
|`TFRecord`|`tf.data.TFRecordDataset(file_paths)`|
|`Text file`|`tf.data.TextLineDataset(file_paths)`|
|`CSV`|`tf.data.experimental.CsvDataset(file_path)`|
Once we have our dataset, we can process it before using it for training.
#### Batching the dataset
We can turn our `Dataset` into a batched `Dataset`, i.e. stacking $n$ consecutive elements of a dataset into a single element, performed with `Dataset.batch(n)`
```
print('Before batching\n[')
for i in ds_random:
print(i)
print(']')
print('\nAfter batching\n[')
for i in ds_random.batch(3):
print(i)
print(']')
```
#### Repeating the dataset
We can repeat the dataset so that each original value is seen $n$ times
```
dataset = tf.data.Dataset.from_tensor_slices([0, 1, 2])
dataset = dataset.repeat(3)
list(dataset.as_numpy_iterator())
```
#### Randomly shuffling the input data
Randomly shuffle the elements of the dataset. This has as `buffer_size` parameter, where the dataset fills a buffer with `buffer_size` elements, then randomly samples elements from this buffer, replacing selected elements with new elements. Therefore for perfect shuffling, we need to specify a `buffer_size` greater than or equal to the full size of the dataset required
```
dataset = tf.data.Dataset.from_tensor_slices([0, 1, 2])
dataset = dataset.shuffle(3)
list(dataset.as_numpy_iterator())
```
#### Custom dataset operations
We can easily process the dataset with our own element wise function `f` that we define ourselves. And then call `Dataset.map(f)` to apply the transformation and return a new `Dataset`.
```
def f(x):
return x * 2
dataset = tf.data.Dataset.from_tensor_slices([0, 1, 2])
dataset = dataset.map(f)
list(dataset.as_numpy_iterator())
```
# Custom models in Keras
So far we have only used `tf.keras.Sequential` model, which is a simple stack of layers. However this cannot represent arbitrary models. We can use **Keras**'s *functional API* to build complex models (usually a directed acyclic graph of layers), which can have multi-input, multi-output, shared layers (the layers is called multiple times) and models with non-sequential data flows (residual connections).
This is possible with the TensorFlow integration as each layer instance takes a tensor as a callable parameter and returns a tensor, so we can connect layers up as we want. We use the input tensors and output tensors to define the `tf.keras.Model` instance, which allows us to train it and use all the model **Keras** model functionality we have seen so far.
We can create a fully-connected network using the functional API, e.g.
```
# Returns an input placeholder
inputs = tf.keras.Input(shape=(64,))
# A layer instance is callable on a tensor, and returns a tensor.
x = keras.layers.Dense(64, activation='relu')(inputs)
x = keras.layers.Dense(64, activation='relu')(x)
predictions = keras.layers.Dense(10)(x)
# Instantiate the model for the defined input and output tensors
model = tf.keras.Model(inputs=inputs, outputs=predictions, name='FirstCustomModel')
```
Once we have defined our model, we checkout what the model summary looks like by using `tf.keras.Model.summary()`
```
# For a dense layer each MLP unit in that layer is connected to each input layer unit
# plus one parameter per unit for the bias
print('Parameters for a dense layer = {}\n\n'.format(64*64 + 64))
model.summary()
```
We can also plot the model as graph natively
```
keras.utils.plot_model(model)
```
We can also show the input and output shapes for each layer in the graph
```
keras.utils.plot_model(model, show_shapes=True)
```
Once we have our model, we can use it like any other **Keras** model that we have seen, i.e. being able to train, evaluate and save the model simply.
```
# Specify the training configuration.
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# Trains for 5 epochs
history = model.fit(X_train, y_train, batch_size=32, epochs=5)
test_scores = model.evaluate(X_test, y_test, verbose=2)
print('Test loss: {:.4}'.format(test_scores[0]))
print('Test accuracy: {:.3%}'.format(test_scores[1]))
```
### Defining multiple models from the same graph of layers
Since the `tf.keras.Model` is really just a convenience object that encapsulates a connected set of layers, we can form multiple models, or connected sets of layers (sub-graphs) from one defined graph of layers (or computation graph).
To illustrate, let us create an *auto-encoder*, that takes an input mapping it to a low dimensional representation by a neural network and then maps the same low dimensional representation back to the output, i.e. to learn an efficient low-dimensional representation (efficient data encoding) of our sample in an unsupervised manner.
Here we can create one large model to encapsulate the entire graph, called the *auto-encoder*, however we may wish to create sub models such as the *encoder* model to map the input sample to the low-dimensional representation and the *decoder* model to map the low-dimensional representation back to the input sample dimensions.
Lets illustrate with an example
```
# Create one auto-encoder graph
encoder_input = keras.Input(shape=(28, 28, 1), name='img')
x = keras.layers.Conv2D(16, 3, activation='relu')(encoder_input)
x = keras.layers.Conv2D(32, 3, activation='relu')(x)
x = keras.layers.MaxPooling2D(3)(x)
x = keras.layers.Conv2D(32, 3, activation='relu')(x)
x = keras.layers.Conv2D(16, 3, activation='relu')(x)
encoder_output = keras.layers.GlobalMaxPooling2D()(x)
x = keras.layers.Reshape((4, 4, 1))(encoder_output)
x = keras.layers.Conv2DTranspose(16, 3, activation='relu')(x)
x = keras.layers.Conv2DTranspose(32, 3, activation='relu')(x)
x = keras.layers.UpSampling2D(3)(x)
x = keras.layers.Conv2DTranspose(16, 3, activation='relu')(x)
decoder_output = keras.layers.Conv2DTranspose(1, 3, activation='relu')(x)
autoencoder = keras.Model(encoder_input, decoder_output, name='autoencoder')
encoder = keras.Model(encoder_input, encoder_output, name='encoder')
print('Auto-encoder')
keras.utils.plot_model(autoencoder, show_shapes=True)
print('Encoder')
keras.utils.plot_model(encoder, show_shapes=True)
```
Due to the *auto-encoder* nature the architecture is symmetrical, since the reverse of a `Conv2D` layer is a `Conv2DTranspose` layer, and the reverse of a `MaxPooling2D` layer is an `UpSampling2D` layer.
We can also compose multiple models, as we can assume a model behaves like a layer, i.e. we can create the same *auto-encoder* architecture by composing the encoder and decoder model together, i.e.
```
# Create encoder graph
x = keras.layers.Conv2D(16, 3, activation='relu')(encoder_input)
x = keras.layers.Conv2D(32, 3, activation='relu')(x)
x = keras.layers.MaxPooling2D(3)(x)
x = keras.layers.Conv2D(32, 3, activation='relu')(x)
x = keras.layers.Conv2D(16, 3, activation='relu')(x)
encoder_output = keras.layers.GlobalMaxPooling2D()(x)
# Create decoder graph
decoder_input = keras.Input(shape=(16,), name='encoded_img')
x = keras.layers.Reshape((4, 4, 1))(decoder_input)
x = keras.layers.Conv2DTranspose(16, 3, activation='relu')(x)
x = keras.layers.Conv2DTranspose(32, 3, activation='relu')(x)
x = keras.layers.UpSampling2D(3)(x)
x = keras.layers.Conv2DTranspose(16, 3, activation='relu')(x)
decoder_output = keras.layers.Conv2DTranspose(1, 3, activation='relu')(x)
# Create models for each graph
encoder = keras.Model(encoder_input, encoder_output, name='encoder')
decoder = keras.Model(decoder_input, decoder_output, name='decoder')
# Connect the two models together
autoencoder_input = keras.Input(shape=(28, 28, 1), name='img')
encoded_img = encoder(autoencoder_input)
decoded_img = decoder(encoded_img)
# Create the auto-encoder model that composes the two encoder and decoder models
autoencoder = keras.Model(autoencoder_input, decoded_img, name='autoencoder')
autoencoder.summary()
```
A common case that we can use model nesting is to create an *ensemble* of models. Such as the example below combining multiple models and averaging their predictions.
```
def get_model():
inputs = keras.Input(shape=(128,))
outputs = keras.layers.Dense(1)(inputs)
return keras.Model(inputs, outputs)
model1 = get_model()
model2 = get_model()
model3 = get_model()
inputs = keras.Input(shape=(128,))
y1 = model1(inputs)
y2 = model2(inputs)
y3 = model3(inputs)
outputs = keras.layers.average([y1, y2, y3])
ensemble_model = keras.Model(inputs=inputs, outputs=outputs)
```
## Multi-Output & Multi-Input models
We may want to create a model that that takes multiple inputs and or outputs multiple outputs.
For example we may want to model to rank customer emails for a business, by priority and routing them to the correct group mailing list email for resolving.
This model could have three inputs:
* email subject as text input
* email body as text input
* any optional tags based existing categorical tags (that the company has about this email address already)
And two outputs:
* priority score between 0 and 1 (scalar sigmoid output)
* the group mailing list email that should resolve the inbound email (a softmax output over the set of departments)
```
amount_tags = 12 # Number of unique tags
amount_words = 10000 # Size of vocabulary obtained when preprocessing text data
amount_mailing_lists = 4 # Number of mailing lists for predictions
# Variable-length sequence of ints
subject_input = keras.Input(shape=(None,), name='subject')
# Variable-length sequence of ints
body_input = keras.Input(shape=(None,), name='body')
# Binary vectors of size `amount_tags`
tags_input = keras.Input(shape=(amount_tags,), name='tags')
# Embed each word in the subject into a 64-dimensional vector
subject_features = keras.layers.Embedding(amount_words, 64)(subject_input)
# Embed each word in the text into a 64-dimensional vector
body_features = keras.layers.Embedding(amount_words, 64)(body_input)
# Reduce sequence of embedded words in the subject into a single 128-dimensional vector
subject_features = keras.layers.LSTM(128)(subject_features)
# Reduce sequence of embedded words in the body into a single 32-dimensional vector
body_features = keras.layers.LSTM(32)(body_features)
# Merge all available features into a single large vector via concatenation
x = keras.layers.concatenate([subject_features, body_features, tags_input])
# Apply a sigmoid (logistic regression) for priority prediction on top of the features
priority_pred = keras.layers.Dense(1, name='priority')(x)
# Apply a mailing_list classifier on top of the features
mailing_list_pred = keras.layers.Dense(
amount_mailing_lists, name='mailing_list')(x)
# Instantiate an end-to-end model predicting both priority and mailing_list
model = keras.Model(inputs=[subject_input, body_input, tags_input],
outputs=[priority_pred, mailing_list_pred])
keras.utils.plot_model(model, show_shapes=True)
```
We can assign different losses to each output, and thus can assign different weights to each loss - to control their contribution to the total training loss when we compile the model.
```
model.compile(optimizer=keras.optimizers.RMSprop(1e-3),
loss=[keras.losses.BinaryCrossentropy(from_logits=True),
keras.losses.CategoricalCrossentropy(from_logits=True)],
loss_weights=[1., 0.2])
```
We can also specify the losses based on their names as well
```
model.compile(optimizer=keras.optimizers.RMSprop(1e-3),
loss={'priority':keras.losses.BinaryCrossentropy(from_logits=True),
'mailing_list': keras.losses.CategoricalCrossentropy(from_logits=True)},
loss_weights=[1., 0.2])
```
We can train the model, where we pass the data ( or yield it from the dataset object) as either as a:
* tuple of lists, e.g. `([X_subject, X_body, X_tags], [y_priority, y_mailing_list])`
* tuple of dictionaries, e.g. `({'subject': X_subject, 'body': X_body, 'tags': X_tags}, {'priority': y_priority, 'mailing_list': y_mailing_list})`
```
# Some random input data (X)
X_subject = np.random.randint(amount_words, size=(1280, 10))
X_body = np.random.randint(amount_words, size=(1280, 100))
X_tags = np.random.randint(2, size=(1280, amount_tags)).astype('float32')
# Some random targets (y)
y_priority = np.random.random(size=(1280, 1))
y_mailing_list = np.random.randint(2, size=(1280, amount_mailing_lists))
model.fit({'subject': X_subject, 'body': X_body, 'tags': X_tags},
{'priority': y_priority, 'mailing_list': y_mailing_list},
epochs=2,
batch_size=32)
```
## Non-linear networks
We can also create non-linear graphs, where the models with the layers are not connected sequentially.
An example of type of model that is non-linear is a *Residual Neural Network* (ResNet), which is a neural network that has *skip connections* or *shortcuts* to jump over some layers. Often implemented in double or triple layer skips that contain nonlinearities (ReLU) and batch normalization in between.
We can connect multiple connections into the same node by using the `keras.layers.add()` layer where we pass a list of of input tensors to add together. There also exists other layers to combine multiple layers such as the `subtract`, `average`, `concatenate`, `dot`, `maximum`, `minimum` and `multiply` layers in `keras.layers` module. A full list can be seen [here](https://www.tensorflow.org/api_docs/python/tf/keras/layers).
To illustrate lets create an example of a ResNet model:
```
inputs = keras.Input(shape=(32, 32, 3), name='img')
x = keras.layers.Conv2D(32, 3, activation='relu')(inputs)
x = keras.layers.Conv2D(64, 3, activation='relu')(x)
block_1_output = keras.layers.MaxPooling2D(3)(x)
x = keras.layers.Conv2D(64, 3, activation='relu', padding='same')(block_1_output)
x = keras.layers.Conv2D(64, 3, activation='relu', padding='same')(x)
block_2_output = keras.layers.add([x, block_1_output])
x = keras.layers.Conv2D(64, 3, activation='relu', padding='same')(block_2_output)
x = keras.layers.Conv2D(64, 3, activation='relu', padding='same')(x)
block_3_output = keras.layers.add([x, block_2_output])
x = keras.layers.Conv2D(64, 3, activation='relu')(block_3_output)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dense(256, activation='relu')(x)
x = keras.layers.Dropout(0.5)(x)
outputs = keras.layers.Dense(10)(x)
model = keras.Model(inputs, outputs, name='example_resnet')
model.summary()
keras.utils.plot_model(model, show_shapes=True)
```
## Share layers
We can also easily share the same layer in our model, i.e. a single layer instance is reused multiple times in the same model so that it learns a mapping that corresponds to multiple paths in the graph of layers.
Common use cases for sharing a layer would be to create a shared embedding (encoding inputs) if the inputs come from similar spaces.
For example
```
# Embedding for 10000 unique words mapped to 128-dimensional vectors
shared_embedding = keras.layers.Embedding(10000, 128)
# Variable-length sequence of integers
text_input_a = keras.Input(shape=(None,), dtype='int32')
# Variable-length sequence of integers
text_input_b = keras.Input(shape=(None,), dtype='int32')
# Reuse the same layer to encode both inputs
encoded_input_a = shared_embedding(text_input_a)
encoded_input_b = shared_embedding(text_input_b)
```
### Extract and reuse nodes
The graph of layers is a static data structure, thus it can be directly accessed and inspected. This means that you can access the outputs from each node in the graph and reuse them elsewhere, which is useful for feature extraction and taking parts of a pre-trained model.
For an example lets create a model that outputs all the output nodes for a given pre-trained graph, e.g. the VGG19 model with its weights trained on ImageNet:
```
vgg19 = tf.keras.applications.VGG19()
# query the graph data structure
features_list = [layer.output for layer in vgg19.layers]
# Create a new model that that outputs all the nodes values from the intermediate layers
feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list)
img = np.random.random((1, 224, 224, 3)).astype('float32')
extracted_features = feat_extraction_model(img)
```
## Custom layers
Although `tf.keras` includes many useful built-in layers, a few of [these](https://www.tensorflow.org/api_docs/python/tf/keras/layers) being:
* Convolutional layers: `Conv1D`, `Conv2D`, `Conv3D`, `Conv2DTranspose`
* Pooling layers: `MaxPooling1D`, `MaxPooling2D`, `MaxPooling3D`, `AveragePooling1D`
* RNN layers: `GRU`, `LSTM`, `ConvLSTM2D`
* `BatchNormalization`, `Dropout`, `Embedding`, etc.
We can simply create our own custom layer by subclassing `tf.keras.layers.Layer` and implementing the following methods:
* `__init__`: Save configuration in member variables
* `build()`: Create the weights of the layer. Add weights with the `add_weight()` method. Will be called once from `__call__`, when the shapes of the input and `dtype` is known.
* `call()`: Define the forward pass. I.e. applying the actual logic of applying the layer to the input tensors (which should be passed as the first argument)
* Optionally, a layer can be serialized by implementing the `get_config()` method and the `from_config()` class method.
Conviently the [layer class](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer), `tf.keras.layers.Layer` manages the weights, losses, updates and inter-layer connectivity for us.
Here's an example of a custom layer that implements a basic dense layer:
```
class CustomDense(keras.layers.Layer):
def __init__(self, units=32):
super(CustomDense, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(shape=(input_shape[-1], self.units),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(self.units,),
initializer='random_normal',
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
return {'units': self.units}
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
model = keras.Model(inputs, outputs)
# Example of serializing and deserializing the layer
config = model.get_config()
# deserializing the layer
new_model = keras.Model.from_config(
config, custom_objects={'CustomDense': CustomDense})
```
# Custom models
Another way to create our own models, slightly less flexible of constructing our custom models is to subclass the `tf.keras.Model` and define our own forward pass. Here we create layers in the `__init__()` method and use them as attributes of the class instance. We can define the forward pass in the `call()` method. However this is not the preferred way to create custom models *Keras*, the functional API described above is.
An example would be:
```
class MyModel(tf.keras.Model):
def __init__(self, num_classes=10):
super(MyModel, self).__init__(name='my_custom_model')
self.num_classes = num_classes
# Define your layers here.
self.dense_1 = keras.layers.Dense(32, activation='relu')
self.dense_2 = keras.layers.Dense(num_classes)
def call(self, inputs):
# Define your forward pass here,
# using layers you previously defined (in `__init__`).
x = self.dense_1(inputs)
return self.dense_2(x)
model = MyModel(num_classes=10)
```
# Keras Callbacks
Here a `tf.keras.callbacks.Callback` object can be passed to a model to customize its behaviour during training, predicting or testing. It is mainly used to customize training behaviour. We can write our own custom callbacks to process the current models state at a [particular step](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/Callback) within each iteration of training or using the model for instance at `on_batch_end`, `on_epoch_end` or `on_test_end` etc.
Common built-in callbacks in `tf.keras.callbacks` include:
* `tf.keras.callbacks.ModelCheckpoint`: Saves checkpoints of the model at regular intervals
* `tf.keras.callbacks.LearningRateScheduler`: Dynamically changes the learning rate
* `tf.keras.callbacks.EarlyStopping`: Interrupts training when validation performance has stopped improving
* `tf.keras.callbacks.TensorBoard`: Output a log for use in monitoring the model's behaviour using TensorBoard
We can use a `tf.keras.callbacks.Callback`, here for training by passing it to the models fit method:
```
callbacks = [
# Interrupt training if `val_loss` (Validation loss) stops improving for over 2 epochs
tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'),
# Write TensorBoard logs to `./tmp_logs` directory
tf.keras.callbacks.TensorBoard(log_dir='./tmp_logs')
]
# Create a simple model to use it in
model = keras.Sequential([
keras.layers.Dense(64, activation='relu', input_dim=64),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(10)
])
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer='adam',
loss=loss_fn,
metrics=['accuracy'])
model.fit(X_train, y_train, epochs=10, validation_split=0.2, callbacks=callbacks)
```
We can also write our own custom callbacks like the following:
```
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs):
self.losses = []
def on_batch_end(self, batch, logs):
self.losses.append(logs.get('loss'))
```
[[1](https://www.tensorflow.org/guide/keras/functional)]
| github_jupyter |
```
import os
import glob
base_dir = os.path.join('F:/0Sem 7/ML Lab/flower dataset/flowers')
daisy_dir = os.path.join(base_dir,'daisy')
dandelion_dir = os.path.join(base_dir,'dandelion')
rose_dir=os.path.join(base_dir,'rose')
sunflower_dir=os.path.join(base_dir,'sunflower')
tulip_dir=os.path.join(base_dir,'tulip')
daisy_files = glob.glob(daisy_dir+'/*.jpg')
dandelion_files = glob.glob(dandelion_dir+'/*.jpg')
rose_files = glob.glob(rose_dir+'/*.jpg')
sunflower_files = glob.glob(sunflower_dir+'/*.jpg')
tulip_files = glob.glob(tulip_dir+'/*.jpg')
print("Daisy samples:",len(daisy_files))
print("Dandelion samples:",len(dandelion_files))
print("Rose samples:",len(rose_files))
print("Sunflower samples:",len(sunflower_files))
print("Tulip samples:",len(tulip_files))
import numpy as np
import pandas as pd
np.random.seed(42)
files_df = pd.DataFrame({
'filename': daisy_files + dandelion_files + rose_files + sunflower_files + tulip_files,
'label': ['daisy'] * len(daisy_files) + ['dandelion'] * len(dandelion_files) + ['rose'] * len(rose_files) + ['sunflower'] * len(sunflower_files) + ['tulip'] * len(tulip_files)
}).sample(frac=1, random_state=42).reset_index(drop=True)
files_df.head()
files_df=files_df.sample(frac=1)
files_df=files_df.reset_index(drop=True)
files_df.head()
from sklearn.model_selection import train_test_split
from collections import Counter
train_files, test_files, train_labels, test_labels = train_test_split(files_df['filename'].values,
files_df['label'].values,
test_size=0.1, random_state=42)
print(train_files.shape, test_files.shape)
print('Train:', Counter(train_labels), '\nTest:', Counter(test_labels))
import cv2
from concurrent import futures
import threading
IMG_DIMS = (80, 80)
def get_img_data_parallel(idx, img, total_imgs):
if idx % 1000 == 0 or idx == (total_imgs - 1):
print('{}: working on img num: {}'.format(threading.current_thread().name,
idx))
img = cv2.imread(img)
#img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #colour to grey
img = cv2.resize(img, dsize=IMG_DIMS,
interpolation=cv2.INTER_CUBIC)
img = np.array(img, dtype=np.float32)
return img
ex = futures.ThreadPoolExecutor(max_workers=None)
train_data_inp = [(idx, img, len(train_files)) for idx, img in enumerate(train_files)]
test_data_inp = [(idx, img, len(test_files)) for idx, img in enumerate(test_files)]
print('Loading Train Images:')
train_data_map = ex.map(get_img_data_parallel,
[record[0] for record in train_data_inp],
[record[1] for record in train_data_inp],
[record[2] for record in train_data_inp])
train_data = np.array(list(train_data_map))
print('\nLoading Test Images:')
test_data_map = ex.map(get_img_data_parallel,
[record[0] for record in test_data_inp],
[record[1] for record in test_data_inp],
[record[2] for record in test_data_inp])
test_data = np.array(list(test_data_map))
train_data.shape, test_data.shape
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(1 , figsize = (10 , 10))
n = 0
for i in range(4):
n += 1
r = np.random.randint(0 , train_data.shape[0] , 1)
plt.subplot(4 , 4 , n)
plt.subplots_adjust(hspace = 0.5 , wspace = 0.5)
plt.imshow(train_data[r[0]]/255.)
plt.title('{}'.format(train_labels[r[0]]))
plt.xticks([]) , plt.yticks([])
BATCH_SIZE = 100
NUM_CLASSES = 5
EPOCHS = 25
INPUT_SHAPE = (80, 80, 3)
# encode text category labels
from sklearn.preprocessing import LabelEncoder
train_imgs_scaled = train_data / 255.
test_imgs_scaled = test_data / 255.
le = LabelEncoder()
le.fit(train_labels)
train_labels_enc = le.transform(train_labels)
test_labels_enc = le.transform(test_labels)
from keras.utils import to_categorical
train_labels_enc=to_categorical(train_labels_enc)
test_labels_enc=to_categorical(test_labels_enc)
print(train_labels[:4], train_labels_enc[:4])
print(test_labels[:4], test_labels_enc[:4])
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
def build_model():
model=Sequential()
model.add(layers.Conv2D(32, (5,5), activation='relu', input_shape=INPUT_SHAPE))
model.add(layers.Dropout(rate=0.8))
#model.add(layers.MaxPooling2D())
model.add(layers.Conv2D(64, (4,4), activation='relu'))
model.add(layers.BatchNormalization())
#model.add(layers.MaxPooling2D())
model.add(layers.Conv2D(96, (3,3), activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D())
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(5, activation='softmax'))
model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['accuracy'])
return model
model=build_model()
print(model.summary())
history = model.fit(x=train_imgs_scaled, y=train_labels_enc,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(test_imgs_scaled, test_labels_enc),
verbose=1)
# Plot the model accuracy vs. number of Epochs
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epochs')
plt.legend(['Train', 'Test'])
plt.show()
# Plot the Loss function vs. number of Epochs
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.legend(['Train', 'Test'])
plt.show()
```
| github_jupyter |
# Seminar 15
# Conjugate gradient method
## Reminder
1. Newton method
2. Convergence theorem
4. Comparison with gradient descent
5. Quasi-Newton methods
## Linear system vs. unconstrained minimization problem
Consider the problem
$$
\min_{x \in \mathbb{R}^n} \frac{1}{2}x^{\top}Ax - b^{\top}x,
$$
where $A \in \mathbb{S}^n_{++}$.
From the necessary optimality condition follows
$$
Ax^* = b
$$
Also denote gradient $f'(x_k) = Ax_k - b$ by $r_k$
## How to solve linear system $Ax = b$?
- Direct methods are based on the matrix decompositions:
- Dense matrix $A$: dimension is less then some thousands
- Sparse matrix $A$: dimension of the order $10^4 - 10^5$
- Iterative methods: the method of choice in many cases, the single approach which is appropriate for system with dimension $ > 10^6$
## Some history...
M. Hestenes and E. Stiefel proposed *conjugate gradient method* (CG)
to solve linear system in 1952 as **direct** method.
Many years CG was considered only as theoretical interest, because
- CG does not work with slide rule
- CG has not a lot advantages over Gaussian elimination while working with calculator
CG method has to be considered as **iterative method**, i.e. stop after
achieve required tolerance!
More details see [here](https://www.siam.org/meetings/la09/talks/oleary.pdf)
## Conjugate directions method
- Descent direction in gradient descent method is anti-gradient
- Convergence is veryyy **slow** for convex functions with pooorly conditioned hessian
**Idea:** move along directions that guarantee converegence in $n$ steps.
**Definition.** Nonzero vectors $\{p_0, \ldots, p_l\}$ are called *conjugate* with tespect to matrix $A \in \mathbb{S}^n_{++}$, where
$$
p^{\top}_iAp_j = 0, \qquad i \neq j
$$
**Claim.** For every initial guess vector $x_0 \in \mathbb{R}^n$ the sequence $\{x_k\}$, which is generated by conjugate gradient method, converges to solution of linear system $Ax = b$ not more than after $n$ steps.
```python
def ConjugateDirections(x0, A, b, p):
x = x0
r = A.dot(x) - b
for i in range(len(p)):
alpha = - (r.dot(p[i])) / (p[i].dot(A.dot(p[i])))
x = x + alpha * p[i]
r = A.dot(x) - b
return x
```
### Example of conjugate directions
- Eigenvectors of matrix $A$
- For every set of $n$ vectors one can perform analogue of Gram-Scmidt orthogonalization and get conjugate dorections
**Q:** What is Gram-Schmidt orthogonalization process? :)
### Geometrical interpretation (Mathematics Stack Exchange)
<center><img src="./cg.png" ></center>
## Conjugate gradient method
**Idea:** new direction $p_k$ is searched in the form $p_k = -r_k + \beta_k p_{k-1}$, where $\beta_k$ is based on the requirement of conjugacy of directions $p_k$ and $p_{k-1}$:
$$
\beta_k = \dfrac{p^{\top}_{k-1}Ar_k}{p^{\top}_{k-1}Ap^{\top}_{k-1}}
$$
Thus, to get the next conjugate direction $p_k$ it is necessary to store conjugate direction $p_{k-1}$ and residual $r_k$ from the previous iteration.
**Q:** how to select step size $\alpha_k$?
### Convergence theorems
**Theorem 1.** If matrix $A \in \mathbb{S}^n_{++}$ has only $r$ distinct eigenvalues, then conjugate gradient method converges in $r$ iterations.
**Theorem 2.** The following convergence estimate holds
$$
\| x_{k+1} - x^* \|_A \leq \left( \dfrac{\sqrt{\kappa(A)} - 1}{\sqrt{\kappa(A)} + 1} \right)^k \|x_0 - x^*\|_A,
$$
where $\|x\|_A = x^{\top}Ax$ and $\kappa(A) = \frac{\lambda_n(A)}{\lambda_1(A)}$ - condition number of matrix $A$
**Remark:** compare coefficient of the linear convergence with
corresponding coefficiet in gradient descent method.
### Interpretations of conjugate gradient method
- Gradient descent in the space $y = Sx$, where $S = [p_0, \ldots, p_n]$, in which the matrix $A$ is digonal (or identity if the conjugate directions are orthonormal)
- Search optimal solution in the [Krylov subspace](https://stanford.edu/class/ee364b/lectures/conj_grad_slides.pdf) $\mathcal{K}(A) = \{b, Ab, A^2b, \ldots \}$
### Improved version of CG method
In practice the following equations for step size $\alpha_k$ and coefficient $\beta_{k}$ are used.
$$
\alpha_k = \dfrac{r^{\top}_k r_k}{p^{\top}_{k}Ap_{k}} \qquad \beta_k = \dfrac{r^{\top}_k r_k}{r^{\top}_{k-1} r_{k-1}}
$$
**Q:** why do they better than base version?
### Pseudocode of CG method
```python
def ConjugateGradientQuadratic(x0, A, b):
r = A.dot(x0) - b
p = -r
while np.linalg.norm(r) != 0:
alpha = r.dot(r) / p.dot(A.dot(p))
x = x + alpha * p
r_next = r + alpha * A.dot(p)
beta = r_next.dot(r_next) / r.dot(r)
p = -r_next + beta * p
r = r_next
return x
```
## Using CG method in Newton method
- To find descent direction in Newton method one has to solve the following linear system $H(x_k) h_k = -f'(x_k)$
- If the objective function is strongly convex, then $H(x_k) \in \mathbb{S}^n_{++}$ and to solve this linear system one can use CG. In this case the merhod is called *inexact Newton method*.
- What's new?
- Explicit storage of hessian is not needed, it's enough to have function that perform multiplication hessian by vector
- One can control accuracy of solving linear system and do not solve it very accurate far away from minimizer. **Important**: inexact solution may be not descent direction!
- Convergence is only suprlinear if backtracking starts with $\alpha_0 = 1$ similarly to Newton method
## CG method for non-quadratic function
**Idea:** use gradients instead of residuals $r_k$ and backtracking for search $\alpha_k$ instead of analytical expression. We get Fletcher-Reeves method.
```python
def ConjugateGradientFR(f, gradf, x0):
x = x0
grad = gradf(x)
p = -grad
while np.linalg.norm(gradf(x)) != 0:
alpha = StepSearch(x, f, gradf, **kwargs)
x = x + alpha * p
grad_next = gradf(x)
beta = grad_next.dot(grad_next) / grad.dot(grad)
p = -grad_next + beta * p
grad = grad_next
if restart_condition:
p = -gradf(x)
return x
```
### Convergence theorem
**Theorem.** Assume
- level set $\mathcal{L}$ is bounded
- there exists $\gamma > 0$: $\| f'(x) \|_2 \leq \gamma$ for $x \in \mathcal{L}$
Then
$$
\lim_{j \to \infty} \| f'(x_{k_j}) \|_2 = 0
$$
### Restarts
1. To speed up convergence of CG one can use *restart* technique: remove stored history, consider current point as $x_0$ and run method from this point
2. There exist different conditions which indicate the necessity of restart, i.e.
- $k = n$
- $\dfrac{|\langle f'(x_k), f'(x_{k-1}) \rangle |}{\| f'(x_k) \|_2^2} \geq \nu \approx 0.1$
3. It can be shown (see Nocedal, Wright Numerical Optimization, Ch. 5, p. 125), that Fletcher-Reeves method without restarts can converge veryyy slow!
4. Polak-Ribiere method and its modifications have not this drawback
### Remarks
- The great notes "An Introduction to the Conjugate Gradient Method Without the Agonizing Pain" is available [here](https://www.cs.cmu.edu/~quake-papers/painless-conjugate-gradient.pdf)
- Besides Fletcher-Reeves method there exist other ways to compute $\beta_k$: Polak-Ribiere method, Hestens-Stiefel method...
- The CG method requires to store 4 vectors, what vectors?
- The bottleneck is matrix by vector multiplication
## Experiments
### Quadratic objective function
```
import numpy as np
n = 100
# Random
# A = np.random.randn(n, n)
# A = A.T.dot(A)
# Clustered eigenvalues
A = np.diagflat([np.ones(n//4), 10 * np.ones(n//4), 100*np.ones(n//4), 1000* np.ones(n//4)])
U = np.random.rand(n, n)
Q, _ = np.linalg.qr(U)
A = Q.dot(A).dot(Q.T)
A = (A + A.T) * 0.5
print("A is normal matrix: ||AA* - A*A|| =", np.linalg.norm(A.dot(A.T) - A.T.dot(A)))
b = np.random.randn(n)
# Hilbert matrix
# A = np.array([[1.0 / (i+j - 1) for i in xrange(1, n+1)] for j in xrange(1, n+1)])
# b = np.ones(n)
f = lambda x: 0.5 * x.dot(A.dot(x)) - b.dot(x)
grad_f = lambda x: A.dot(x) - b
x0 = np.zeros(n)
```
#### Eigenvalues distribution
```
USE_COLAB = False
%matplotlib inline
import matplotlib.pyplot as plt
if not USE_COLAB:
plt.rc("text", usetex=True)
plt.rc("font", family='serif')
if USE_COLAB:
!pip install git+https://github.com/amkatrutsa/liboptpy
import seaborn as sns
sns.set_context("talk")
eigs = np.linalg.eigvalsh(A)
plt.semilogy(np.unique(eigs))
plt.ylabel("Eigenvalues", fontsize=20)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
```
#### Exact solution
```
import scipy.optimize as scopt
def callback(x, array):
array.append(x)
scopt_cg_array = []
scopt_cg_callback = lambda x: callback(x, scopt_cg_array)
x = scopt.minimize(f, x0, method="CG", jac=grad_f, callback=scopt_cg_callback)
x = x.x
print("||f'(x*)|| =", np.linalg.norm(A.dot(x) - b))
print("f* =", f(x))
```
#### Implementation of conjugate gradient method
```
def ConjugateGradientQuadratic(x0, A, b, tol=1e-8, callback=None):
x = x0
r = A.dot(x0) - b
p = -r
while np.linalg.norm(r) > tol:
alpha = r.dot(r) / p.dot(A.dot(p))
x = x + alpha * p
if callback is not None:
callback(x)
r_next = r + alpha * A.dot(p)
beta = r_next.dot(r_next) / r.dot(r)
p = -r_next + beta * p
r = r_next
return x
import liboptpy.unconstr_solvers as methods
import liboptpy.step_size as ss
print("\t CG quadratic")
cg_quad = methods.fo.ConjugateGradientQuad(A, b)
x_cg = cg_quad.solve(x0, tol=1e-7, disp=True)
print("\t Gradient Descent")
gd = methods.fo.GradientDescent(f, grad_f, ss.ExactLineSearch4Quad(A, b))
x_gd = gd.solve(x0, tol=1e-7, disp=True)
print("Condition number of A =", abs(max(eigs)) / abs(min(eigs)))
```
#### Convergence plot
```
plt.figure(figsize=(8,6))
plt.semilogy([np.linalg.norm(grad_f(x)) for x in cg_quad.get_convergence()], label=r"$\|f'(x_k)\|^{CG}_2$", linewidth=2)
plt.semilogy([np.linalg.norm(grad_f(x)) for x in scopt_cg_array[:50]], label=r"$\|f'(x_k)\|^{CG_{PR}}_2$", linewidth=2)
plt.semilogy([np.linalg.norm(grad_f(x)) for x in gd.get_convergence()], label=r"$\|f'(x_k)\|^{G}_2$", linewidth=2)
plt.legend(loc="best", fontsize=20)
plt.xlabel(r"Iteration number, $k$", fontsize=20)
plt.ylabel("Convergence rate", fontsize=20)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
print([np.linalg.norm(grad_f(x)) for x in cg_quad.get_convergence()])
plt.figure(figsize=(8,6))
plt.plot([f(x) for x in cg_quad.get_convergence()], label=r"$f(x^{CG}_k)$", linewidth=2)
plt.plot([f(x) for x in scopt_cg_array], label=r"$f(x^{CG_{PR}}_k)$", linewidth=2)
plt.plot([f(x) for x in gd.get_convergence()], label=r"$f(x^{G}_k)$", linewidth=2)
plt.legend(loc="best", fontsize=20)
plt.xlabel(r"Iteration number, $k$", fontsize=20)
plt.ylabel("Function value", fontsize=20)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
```
### Non-quadratic function
```
import numpy as np
import sklearn.datasets as skldata
import scipy.special as scspec
n = 300
m = 1000
X, y = skldata.make_classification(n_classes=2, n_features=n, n_samples=m, n_informative=n//3)
C = 1
def f(w):
return np.linalg.norm(w)**2 / 2 + C * np.mean(np.logaddexp(np.zeros(X.shape[0]), -y * X.dot(w)))
def grad_f(w):
denom = scspec.expit(-y * X.dot(w))
return w - C * X.T.dot(y * denom) / X.shape[0]
# f = lambda x: -np.sum(np.log(1 - A.T.dot(x))) - np.sum(np.log(1 - x*x))
# grad_f = lambda x: np.sum(A.dot(np.diagflat(1 / (1 - A.T.dot(x)))), axis=1) + 2 * x / (1 - np.power(x, 2))
x0 = np.zeros(n)
print("Initial function value = {}".format(f(x0)))
print("Initial gradient norm = {}".format(np.linalg.norm(grad_f(x0))))
```
#### Implementation of Fletcher-Reeves method
```
def ConjugateGradientFR(f, gradf, x0, num_iter=100, tol=1e-8, callback=None, restart=False):
x = x0
grad = gradf(x)
p = -grad
it = 0
while np.linalg.norm(gradf(x)) > tol and it < num_iter:
alpha = utils.backtracking(x, p, method="Wolfe", beta1=0.1, beta2=0.4, rho=0.5, f=f, grad_f=gradf)
if alpha < 1e-18:
break
x = x + alpha * p
if callback is not None:
callback(x)
grad_next = gradf(x)
beta = grad_next.dot(grad_next) / grad.dot(grad)
p = -grad_next + beta * p
grad = grad_next.copy()
it += 1
if restart and it % restart == 0:
grad = gradf(x)
p = -grad
return x
```
#### Convergence plot
```
import scipy.optimize as scopt
import liboptpy.restarts as restarts
n_restart = 60
tol = 1e-5
max_iter = 600
scopt_cg_array = []
scopt_cg_callback = lambda x: callback(x, scopt_cg_array)
x = scopt.minimize(f, x0, tol=tol, method="CG", jac=grad_f, callback=scopt_cg_callback, options={"maxiter": max_iter})
x = x.x
print("\t CG by Polak-Rebiere")
print("Norm of garient = {}".format(np.linalg.norm(grad_f(x))))
print("Function value = {}".format(f(x)))
print("\t CG by Fletcher-Reeves")
cg_fr = methods.fo.ConjugateGradientFR(f, grad_f, ss.Backtracking("Wolfe", rho=0.9, beta1=0.1, beta2=0.4, init_alpha=1.))
x = cg_fr.solve(x0, tol=tol, max_iter=max_iter, disp=True)
print("\t CG by Fletcher-Reeves with restart n")
cg_fr_rest = methods.fo.ConjugateGradientFR(f, grad_f, ss.Backtracking("Wolfe", rho=0.9, beta1=0.1, beta2=0.4,
init_alpha=1.), restarts.Restart(n // n_restart))
x = cg_fr_rest.solve(x0, tol=tol, max_iter=max_iter, disp=True)
print("\t Gradient Descent")
gd = methods.fo.GradientDescent(f, grad_f, ss.Backtracking("Wolfe", rho=0.9, beta1=0.1, beta2=0.4, init_alpha=1.))
x = gd.solve(x0, max_iter=max_iter, tol=tol, disp=True)
plt.figure(figsize=(8, 6))
plt.semilogy([np.linalg.norm(grad_f(x)) for x in cg_fr.get_convergence()], label=r"$\|f'(x_k)\|^{CG_{FR}}_2$ no restart", linewidth=2)
plt.semilogy([np.linalg.norm(grad_f(x)) for x in cg_fr_rest.get_convergence()], label=r"$\|f'(x_k)\|^{CG_{FR}}_2$ restart", linewidth=2)
plt.semilogy([np.linalg.norm(grad_f(x)) for x in scopt_cg_array], label=r"$\|f'(x_k)\|^{CG_{PR}}_2$", linewidth=2)
plt.semilogy([np.linalg.norm(grad_f(x)) for x in gd.get_convergence()], label=r"$\|f'(x_k)\|^{G}_2$", linewidth=2)
plt.legend(loc="best", fontsize=16)
plt.xlabel(r"Iteration number, $k$", fontsize=20)
plt.ylabel("Convergence rate", fontsize=20)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
```
#### Running time
```
%timeit scopt.minimize(f, x0, method="CG", tol=tol, jac=grad_f, options={"maxiter": max_iter})
%timeit cg_fr.solve(x0, tol=tol, max_iter=max_iter)
%timeit cg_fr_rest.solve(x0, tol=tol, max_iter=max_iter)
%timeit gd.solve(x0, tol=tol, max_iter=max_iter)
```
## Recap
1. Conjugate directions
2. Conjugate gradient method
3. Convergence
4. Experiments
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
import seaborn as sns
from os.path import join
plt.style.use(["seaborn", "thesis"])
plt.rc("figure", figsize=(8,4))
```
# Dataset
```
from SCFInitialGuess.utilities.dataset import extract_triu_batch, AbstractDataset
from sklearn.model_selection import train_test_split
# fetch dataset
data_path = "../../../dataset/TSmall_sto3g/"
postfix = "TSmall_sto3g"
dim = 26
N_ELECTRONS = 30
#data_path = "../butadien/data/"
#postfix = ""
#dim = 26
def split(x, y, ind):
return x[:ind], y[:ind], x[ind:], y[ind:]
S = np.load(join(data_path, "S" + postfix + ".npy"))
P = np.load(join(data_path, "P" + postfix + ".npy"))
#F = np.load(join(data_path, "F" + postfix + ".npy"))
molecules = np.load(join(data_path, "molecules" + postfix + ".npy"))
ind = int(0.8 * len(molecules))
molecules = (molecules[:ind], molecules[ind:])
s_triu = extract_triu_batch(S, dim)
p_triu = extract_triu_batch(P, dim)
s_train, p_train, s_test, p_test = split(s_triu, p_triu, ind)
```
# Fetching Descriptor
```
import pickle
descriptor = pickle.load(open("../../../models/ButadienTDescriptor/descriptor.dump", "rb"))
mu, std = np.load("../../../models/ButadienTDescriptor/normalisation.npy")
from SCFInitialGuess.utilities.dataset import StaticDataset
dataset = StaticDataset(
train=(s_train, p_train),
validation=(None, None),
test=(s_test, p_test),
mu=0,
std=1
)
filepath = "../../../models/ButadienTDescriptor/model_descriptos_" + postfix + ".h5"
model = keras.models.load_model(filepath)
model.summary()
```
# Make Guess
```
from SCFInitialGuess.utilities.dataset import AbstractDataset
G = []
for mol in molecules[1]:
G.append(
descriptor.calculate_all_descriptors(mol).flatten()
)
G = np.asarray(G)
G_norm = AbstractDataset.normalize(G, mean=mu, std=std)[0]
G_norm.shape
p_nn = model.predict(G_norm)
p_nn.shape
```
# Ananlysis
```
from SCFInitialGuess.utilities.usermessages import Messenger as msg
msg.print_level = 0
from SCFInitialGuess.utilities.analysis import mf_initializer as mf_initializer
from SCFInitialGuess.utilities.analysis import make_results_str, measure_all_quantities
print(make_results_str(measure_all_quantities(
p_nn,
dataset,
molecules[1],
N_ELECTRONS,
mf_initializer,
dim,
is_triu=True,
is_dataset_triu=True
)))
12 / len(p_nn)
```
## Damping and DIIS
```
from SCFInitialGuess.utilities.analysis import mf_initializer_damping, measure_iterations, statistics
from SCFInitialGuess.utilities.dataset import make_matrix_batch
iterations = np.array(measure_iterations(
mf_initializer_damping,
make_matrix_batch(p_nn, dim, is_triu=True),
molecules[1]
))
print(statistics(iterations))
print(np.sum(iterations == 100))
print(statistics(iterations[iterations != 100]))
from SCFInitialGuess.utilities.analysis import mf_initializer_diis, measure_iterations, statistics
from SCFInitialGuess.utilities.dataset import make_matrix_batch
iterations = np.array(list(measure_iterations(
mf_initializer_diis,
make_matrix_batch(p_nn, dim, is_triu=True),
molecules[1]
)))
print(statistics(iterations))
print(np.sum(iterations == 100))
print(statistics(iterations[iterations != 100]))
```
| github_jupyter |
# Publications markdown generator for academicpages
Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `publications.py`. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one containing your data.
TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
## Data format
The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
- `excerpt` and `paper_url` can be blank, but the others must have values.
- `pub_date` must be formatted as YYYY-MM-DD.
- `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
This is how the raw file looks (it doesn't look pretty, use a spreadsheet or other program to edit and create).
```
!cat publications.tsv
```
## Import pandas
We are using the very handy pandas library for dataframes.
```
import pandas as pd
```
## Import TSV
Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
```
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
```
## Escape special characters
YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
```
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
```
## Creating the markdown files
This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
```
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
# if len(str(item.paper_url)) > 5:
# md += "\npaperurl: '" + item.paper_url + "'"
# md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n<font color='#c41e3a'>[Download PDF here.](" + item.paper_url + ")</font>\n"
if len(str(item.excerpt)) > 5:
md += "\n**Abstract**:" + html_escape(item.excerpt) + "\n"
# md += "\nAbstract: " + html_escape(item.description) + "\n"
md += "\n**Recommended citation**: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
```
These files are in the publications directory, one directory below where we're working from.
```
!ls ../_publications/
!cat ../_publications/2009-10-01-paper-title-number-1.md
```
| github_jupyter |
## Extract v4 from Greengenes and build a blast DB
```
%%bash
export DATA=~/Data
export PAYCHECK_DATA=$DATA/paycheck
qiime tools import \
--input-path $DATA/gg_13_8_otus/rep_set/99_otus.fasta \
--output-path $PAYCHECK_DATA/ref/99_otus.qza --type FeatureData[Sequence]
qiime feature-classifier extract-reads \
--i-sequences $PAYCHECK_DATA/ref/99_otus.qza \
--p-f-primer GTGYCAGCMGCCGCGGTAA --p-r-primer GGACTACNVGGGTWTCTAAT \
--o-reads $PAYCHECK_DATA/ref/99_otus_v4.qza
qiime tools export $PAYCHECK_DATA/ref/99_otus_v4.qza --output-dir .
mv dna-sequences.fasta $PAYCHECK_DATA/ref/99_otus_v4.fasta
makeblastdb -in $PAYCHECK_DATA/ref/99_otus_v4.fasta -dbtype nucl \
-out $PAYCHECK_DATA/ref/99_otus_v4
```
## Download stool data
```
%%bash
export RAW_STOOL=$PAYCHECK_DATA/raw/stool
export CTX=Deblur-illumina-16S-v4-150nt-10d7e0
redbiom search metadata 'where sample_type == "stool"' > $RAW_STOOL/samples
redbiom search metadata 'where sample_type == "Stool"' >> $RAW_STOOL/samples
redbiom fetch samples --from $RAW_STOOL/samples --context $CTX --output $RAW_STOOL/sv.biom
%%bash
export PAYCHECK_DATA=~/Data/paycheck
export BLAST_DB=$PAYCHECK_DATA/ref/99_otus_v4
export REF_STOOL=$PAYCHECK_DATA/ref/stool
export RAW_STOOL=$PAYCHECK_DATA/raw/stool
biom table-ids --observations -i $RAW_STOOL/sv.biom | awk '{print ">"$1"blast_rocks\n"$1}' > $REF_STOOL/sv.fasta
blastn -num_threads 4 -query $REF_STOOL/sv.fasta -outfmt "6 qacc sacc" \
-db $BLAST_DB -max_target_seqs 1 -out $REF_STOOL/sv_map.blast
sed -i '' 's/blast_rocks//' $REF_STOOL/sv_map.blast
%%bash
export PAYCHECK_DATA=~/Data/paycheck
export REF_STOOL=$PAYCHECK_DATA/ref/stool
export RAW_STOOL=$PAYCHECK_DATA/raw/stool
qiime tools import --type FeatureTable[Frequency] --input-path $RAW_STOOL/sv.biom --output-path $REF_STOOL/sv.qza
qiime clawback sequence-variants-from-feature-table --i-table $REF_STOOL/sv.qza --o-sequences $REF_STOOL/sv_seqs.qza
qiime feature-classifier classify-sklearn --i-reads $REF_STOOL/sv_seqs.qza \
--i-classifier $PAYCHECK_DATA/ref/gg-13-8-99-515-806-nb-classifier.qza \
--o-classification $REF_STOOL/sv_map.qza --p-confidence -1
```
## Download soil data
```
%%bash
export RAW_SOIL=~/Data/paycheck/raw/soil
export CTX=Deblur-illumina-16S-v4-150nt-10d7e0
redbiom search metadata 'where sample_type in ("soil", "Soil")' > $RAW_SOIL/samples
redbiom fetch samples --from $RAW_SOIL/samples --context $CTX --output $RAW_SOIL/sv.biom
%%bash
export PAYCHECK_DATA=~/Data/paycheck
export BLAST_DB=$PAYCHECK_DATA/ref/99_otus_v4
export REF_SOIL=$PAYCHECK_DATA/ref/soil
export RAW_SOIL=$PAYCHECK_DATA/raw/soil
biom table-ids --observations -i $RAW_SOIL/sv.biom | awk '{print ">"$1"blast_rocks\n"$1}' > $REF_SOIL/sv.fasta
blastn -num_threads 4 -query $REF_SOIL/sv.fasta -outfmt "6 qacc sacc" \
-db $BLAST_DB -max_target_seqs 1 -out $REF_SOIL/sv_map.blast
sed -i '' 's/blast_rocks//' $REF_SOIL/sv_map.blast
%%bash
export PAYCHECK_DATA=~/Data/paycheck
export REF_SOIL=$PAYCHECK_DATA/ref/soil
export RAW_SOIL=$PAYCHECK_DATA/raw/soil
qiime tools import --type FeatureTable[Frequency] --input-path $RAW_SOIL/sv.biom --output-path $REF_SOIL/sv.qza
qiime clawback sequence-variants-from-feature-table --i-table $REF_SOIL/sv.qza --o-sequences $REF_SOIL/sv_seqs.qza
qiime feature-classifier classify-sklearn --i-reads $REF_SOIL/sv_seqs.qza \
--i-classifier $PAYCHECK_DATA/ref/gg-13-8-99-515-806-nb-classifier.qza \
--o-classification $REF_SOIL/sv_map.qza --p-confidence -1
```
### Download tear data
```
%%bash
export CTX=Deblur-illumina-16S-v4-150nt-10d7e0
export PAYCHECK_DATA=~/Data/paycheck
export REF=$PAYCHECK_DATA/ref
export BLAST_DB=$PAYCHECK_DATA/ref/99_otus_v4
export REF_TEARS=$PAYCHECK_DATA/ref/tears
export RAW_TEARS=$PAYCHECK_DATA/raw/tears
export TEARS=$PAYCHECK_DATA/tears
redbiom search metadata 'where sample_type in ("Tears",)' > $RAW_TEARS/samples
redbiom fetch samples --from $RAW_TEARS/samples --context $CTX --output $RAW_TEARS/sv.biom
biom table-ids --observations -i $RAW_TEARS/sv.biom | awk '{print ">"$1"blast_rocks\n"$1}' > $REF_TEARS/sv.fasta
blastn -num_threads 4 -query $REF_TEARS/sv.fasta -outfmt "6 qacc sacc" \
-db $BLAST_DB -max_target_seqs 1 -out $REF_TEARS/sv_map.blast
sed -i '' 's/blast_rocks//' $REF_TEARS/sv_map.blast
qiime tools import --type FeatureTable[Frequency] --input-path $RAW_TEARS/sv.biom --output-path $REF_TEARS/sv.qza
qiime clawback sequence-variants-from-feature-table --i-table $REF_TEARS/sv.qza --o-sequences $REF_TEARS/sv_seqs.qza
qiime feature-classifier classify-sklearn --i-reads $REF_TEARS/sv_seqs.qza \
--i-classifier $PAYCHECK_DATA/ref/gg-13-8-99-515-806-nb-classifier.qza \
--o-classification $REF_TEARS/sv_map.qza --p-confidence -1
qiime clawback generate-class-weights --i-reference-taxonomy $REF/99_tax.qza \
--i-reference-sequences $REF/99_otus_v4.qza \
--i-samples $REF_TEARS/sv.qza \
--i-taxonomy-classification $REF_TEARS/sv_map.qza \
--o-class-weight $TEARS/weights/weights-normalise-False-unobserved-weight-1e-06.qza
%%bash
export CTX=Deblur-illumina-16S-v4-150nt-10d7e0
export PAYCHECK_DATA=~/Data/paycheck
export REF=$PAYCHECK_DATA/ref
export REF_TEARS=$PAYCHECK_DATA/ref/tears_cb
export RAW_TEARS=$PAYCHECK_DATA/raw/tears_cb
export TEARS=$PAYCHECK_DATA/tears_cb
qiime clawback fetch-QIITA-samples --p-sample-type Tears --p-context $CTX --o-samples $REF_TEARS/sv.qza
qiime clawback sequence-variants-from-samples --i-samples $REF_TEARS/sv.qza --o-sequences $REF_TEARS/sv_seqs.qza
qiime feature-classifier classify-sklearn --i-reads $REF_TEARS/sv_seqs.qza \
--i-classifier $PAYCHECK_DATA/ref/gg-13-8-99-515-806-nb-classifier.qza \
--o-classification $REF_TEARS/sv_map.qza --p-confidence -1
qiime clawback generate-class-weights --i-reference-taxonomy $REF/99_tax.qza \
--i-reference-sequences $REF/99_otus_v4.qza \
--i-samples $REF_TEARS/sv.qza \
--i-taxonomy-classification $REF_TEARS/sv_map.qza \
--o-class-weight $TEARS/weights/weights-normalise-False-unobserved-weight-1e-06.qza
```
### Download vaginal data
```
%%bash
export CTX=Deblur-illumina-16S-v4-150nt-10d7e0
export PAYCHECK_DATA=~/Data/paycheck
export BLAST_DB=$PAYCHECK_DATA/ref/99_otus_v4
export REF=$PAYCHECK_DATA/ref
export REF_VAGINAL=$PAYCHECK_DATA/ref/vaginal
export RAW_VAGINAL=$PAYCHECK_DATA/raw/vaginal
export VAGINAL=$PAYCHECK_DATA/vaginal
qiime clawback fetch-QIITA-samples --p-sample-type vaginal --p-context $CTX --o-samples $REF_VAGINAL/sv.qza
qiime clawback sequence-variants-from-samples --i-samples $REF_VAGINAL/sv.qza --o-sequences $REF_VAGINAL/sv_seqs.qza
qiime feature-classifier classify-sklearn --i-reads $REF_VAGINAL/sv_seqs.qza \
--i-classifier $PAYCHECK_DATA/ref/gg-13-8-99-515-806-nb-classifier.qza \
--o-classification $REF_VAGINAL/sv_map.qza --p-confidence -1
qiime clawback generate-class-weights --i-reference-taxonomy $REF/99_tax.qza \
--i-reference-sequences $REF/99_otus_v4.qza \
--i-samples $REF_VAGINAL/sv.qza \
--i-taxonomy-classification $REF_VAGINAL/sv_map.qza \
--o-class-weight $VAGINAL/weights/weights-normalise-False-unobserved-weight-1e-06.qza
qiime tools export --output-dir $REF_VAGINAL $REF_VAGINAL/sv.qza
mv $REF_VAGINAL/feature-table.biom $REF_VAGINAL/sv.biom
biom table-ids --observations -i $RAW_VAGINAL/sv.biom | awk '{print ">"$1"blast_rocks\n"$1}' > $REF_VAGINAL/sv.fasta
blastn -num_threads 4 -query $REF_VAGINAL/sv.fasta -outfmt "6 qacc sacc" \
-db $BLAST_DB -max_target_seqs 1 -out $REF_VAGINAL/sv_map.blast
sed -i '' 's/blast_rocks//' $REF_VAGINAL/sv_map.blast
%%bash
export CTX=Deblur-illumina-16S-v4-150nt-10d7e0
export PAYCHECK_DATA=~/Data/paycheck
export REF=$PAYCHECK_DATA/ref
export REF_VAGINAL=$PAYCHECK_DATA/ref/vaginal
export RAW_VAGINAL=$PAYCHECK_DATA/raw/vaginal
export VAGINAL=$PAYCHECK_DATA/vaginal
export BLAST_DB=$PAYCHECK_DATA/ref/99_otus_v4
blastn -num_threads 4 -query $REF_VAGINAL/sv.fasta -outfmt "6 qacc sacc" \
-db $BLAST_DB -max_target_seqs 1 -out $REF_VAGINAL/sv_map.blast
sed -i '' 's/blast_rocks//' $REF_VAGINAL/sv_map.blast
```
### Download empo_3 data
```
%%bash
export CTX=Deblur-NA-illumina-16S-v4-100nt-fbc5b2
```
| github_jupyter |
# First Exploratory Notebook
## Used for Data Exploration in Listings Summary File
```
import pandas as pd
import numpy as np
import nltk
import sklearn
import string, re
import urllib
import seaborn as sbn
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import OneHotEncoder,StandardScaler
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
from nltk.corpus import stopwords
data = pd.read_csv('../../Data/2018/listings42018sum.csv')
data.head()
data['availability_365'].value_counts
data['reviews_per_month'] = data['reviews_per_month'].fillna(0)
ohe = OneHotEncoder(sparse=False)
neigh_group = ohe.fit_transform(data[['neighbourhood_group']])
neigh_group_cat = ohe.categories_
neigh = ohe.fit_transform(data[['neighbourhood']])
neigh_cat = ohe.categories_
room = ohe.fit_transform(data[['room_type']])
room_cat = ohe.categories_
def rename(name_of_columns,pre_addition):
new_list = []
for x in name_of_columns:
for x in x:
new_list.append(pre_addition+ '' + x)
return new_list
new_neigh_group_cat = rename(neigh_group_cat,'neighbourhood_group: ')
new_neigh_cat = rename(neigh_cat,'neighbourhood: ')
new_room_cat = rename(room_cat,'room_type: ')
# Create categories for neighborhood_group, neighborhood and room_type
neigh_group_df = pd.DataFrame(data=neigh_group,columns=new_neigh_group_cat)
neigh_df = pd.DataFrame(data=neigh,columns=new_neigh_cat)
room_type_df = pd.DataFrame(data=room,columns = new_room_cat);
stopwords_list = stopwords.words('english') + list(string.punctuation)
vectorizer = TfidfVectorizer(strip_accents='unicode',stop_words=stopwords_list,min_df=10, ngram_range=(1,3))
# get rid of na in name column
data.fillna({'name':''}, inplace=True)
tf_idf = vectorizer.fit_transform(data['name'])
nlp_name = pd.DataFrame(tf_idf.toarray(), columns=vectorizer.get_feature_names())
clean_data = data.drop(columns = ['id','host_name','host_id','last_review','name','neighbourhood_group','neighbourhood','room_type'])
clean_data['longitude'] = clean_data['longitude'].round(decimals=5)
clean_data['latitude'] = clean_data['latitude'].round(decimals=5)
clean_data = pd.concat([clean_data,neigh_group_df,neigh_df,room_type_df,nlp_name],axis=1)
clean_data.head()
clean_data = clean_data[clean_data.price<800]
X = clean_data.drop(columns = ['price'])
y = clean_data['price']
ss = StandardScaler()
X_ss = ss.fit_transform(X)
Xtrain,Xtest,ytrain,ytest = train_test_split(X_ss,y)
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(Xtrain,ytrain)
ypred = lr.predict(Xtest)
lr.score(Xtrain,ytrain)
list(zip(lr.coef_,X.columns))[:10]
lr.intercept_
lr.score(Xtest,ytest)
list(zip(lr.coef_,X.columns));
# plt.scatter(range(0,len(Xtrain[:,4])), ytrain, color = "red")
# plt.scatter(range(0,len(Xtrain[:,0])), ytrain, color = "green")
# plt.title("Linear Regression Training Set")
# plt.xlabel("Number of Reviews")
# plt.ylabel("Price")
# plt.show()
rfr = RandomForestRegressor(n_estimators=1000,min_samples_split=5,min_samples_leaf=3,random_state=42)
# n_estimators=100,min_samples_split=5,min_samples_leaf=4,random_state=42
rfr.fit(Xtrain,ytrain)
rfr.score(Xtrain,ytrain)
ypredtrain = rfr.predict(Xtrain)
ypredtest = rfr.predict(Xtest)
from sklearn.metrics import r2_score
print(r2_score(ytrain,ypredtrain))
print(r2_score(ytest,ypredtest))
print(r2_score(np.exp(ytest),np.exp(ypredtest)))
rfr_param_grid = {
'n_estimators': [10, 100, 1000],
'criterion': ['mse'],
'max_depth': [None, 10, 25, 50,100],
'min_samples_split': [2,5, 10],
'min_samples_leaf': [3,6]
}
rf_grid = GridSearchCV(rfr,rfr_param_grid,cv=3)
rf_grid.fit(Xtrain,ytrain)
rf_grid.best_params_
sorted(list(zip(rfr.feature_importances_,clean_data.columns)),reverse=True)[0:10]
list(zip(np.exp(ytest),np.exp(ypredtest)))[0:10]
# pc = PCA()
# pc.fit(nlp_name)
# pc.components_
# sorted(list(zip(pc.singular_values_,nlp_name.columns)),reverse=True);
# rfr.fit(nlp_name,y)
# rfr.score(nlp_name,y)
# sorted(list(zip(rfr.feature_importances_,nlp_name.columns)),reverse=True)
import seaborn as sbn
import statsmodels
import statsmodels.api as sm
import matplotlib.pyplot as plt
import scipy.stats as stats
fig, axes = plt.subplots(1,3, figsize=(21,6))
sbn.distplot(np.log1p(clean_data['price']), ax=axes[0])
axes[0].set_xlabel('log(1+price)')
sm.qqplot(np.log1p(clean_data['price']), stats.norm, fit=True, line='45', ax=axes[1])
sbn.scatterplot(x= clean_data['latitude'], y=clean_data['longitude'],hue=clean_data['price'],ax=axes[2]);
clean_data = clean_data[clean_data.price>20]
clean_data = clean_data[clean_data.price<800]
clean_data['price']
clean_data['log_price'] = np.log1p(clean_data['price'])
clean_data = clean_data[clean_data.minimum_nights<21]
clean_data.describe()
sbn.distplot(clean_data['minimum_nights'])
clean_data = clean_data[clean_data.number_of_reviews>0]
clean_data
list(zip(ytest,ypredtest));
room_type_df.sum()
clean_data[clean_data['room_type: Shared room']==1]
```
| github_jupyter |
# Modeling and Simulation in Python
Chapter 9
Copyright 2017 Allen Downey
License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
```
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import everything from SymPy.
from sympy import *
# Set up Jupyter notebook to display math.
init_printing()
```
The following displays SymPy expressions and provides the option of showing results in LaTeX format.
```
from sympy.printing import latex
def show(expr, show_latex=False):
"""Display a SymPy expression.
expr: SymPy expression
show_latex: boolean
"""
if show_latex:
print(latex(expr))
return expr
```
### Analysis with SymPy
Create a symbol for time.
```
t = symbols('t')
```
If you combine symbols and numbers, you get symbolic expressions.
```
expr = t + 1
```
The result is an `Add` object, which just represents the sum without trying to compute it.
```
type(expr)
```
`subs` can be used to replace a symbol with a number, which allows the addition to proceed.
```
expr.subs(t, 2)
```
`f` is a special class of symbol that represents a function.
```
f = Function('f')
```
The type of `f` is `UndefinedFunction`
```
type(f)
```
SymPy understands that `f(t)` means `f` evaluated at `t`, but it doesn't try to evaluate it yet.
```
f(t)
```
`diff` returns a `Derivative` object that represents the time derivative of `f`
```
dfdt = diff(f(t), t)
type(dfdt)
```
We need a symbol for `alpha`
```
alpha = symbols('alpha')
```
Now we can write the differential equation for proportional growth.
```
eq1 = Eq(dfdt, alpha*f(t))
```
And use `dsolve` to solve it. The result is the general solution.
```
solution_eq = dsolve(eq1)
```
We can tell it's a general solution because it contains an unspecified constant, `C1`.
In this example, finding the particular solution is easy: we just replace `C1` with `p_0`
```
C1, p_0 = symbols('C1 p_0')
particular = solution_eq.subs(C1, p_0)
```
In the next example, we have to work a little harder to find the particular solution.
### Solving the quadratic growth equation
We'll use the (r, K) parameterization, so we'll need two more symbols:
```
r, K = symbols('r K')
```
Now we can write the differential equation.
```
eq2 = Eq(diff(f(t), t), r * f(t) * (1 - f(t)/K))
```
And solve it.
```
solution_eq = dsolve(eq2)
```
The result, `solution_eq`, contains `rhs`, which is the right-hand side of the solution.
```
general = solution_eq.rhs
```
We can evaluate the right-hand side at $t=0$
```
at_0 = general.subs(t, 0)
```
Now we want to find the value of `C1` that makes `f(0) = p_0`.
So we'll create the equation `at_0 = p_0` and solve for `C1`. Because this is just an algebraic identity, not a differential equation, we use `solve`, not `dsolve`.
The result from `solve` is a list of solutions. In this case, [we have reason to expect only one solution](https://en.wikipedia.org/wiki/Picard%E2%80%93Lindel%C3%B6f_theorem), but we still get a list, so we have to use the bracket operator, `[0]`, to select the first one.
```
solutions = solve(Eq(at_0, p_0), C1)
type(solutions), len(solutions)
value_of_C1 = solutions[0]
```
Now in the general solution, we want to replace `C1` with the value of `C1` we just figured out.
```
particular = general.subs(C1, value_of_C1)
```
The result is complicated, but SymPy provides a method that tries to simplify it.
```
particular = simplify(particular)
```
Often simplicity is in the eye of the beholder, but that's about as simple as this expression gets.
Just to double-check, we can evaluate it at `t=0` and confirm that we get `p_0`
```
particular.subs(t, 0)
```
This solution is called the [logistic function](https://en.wikipedia.org/wiki/Population_growth#Logistic_equation).
In some places you'll see it written in a different form:
$f(t) = \frac{K}{1 + A e^{-rt}}$
where $A = (K - p_0) / p_0$.
We can use SymPy to confirm that these two forms are equivalent. First we represent the alternative version of the logistic function:
```
A = (K - p_0) / p_0
logistic = K / (1 + A * exp(-r*t))
```
To see whether two expressions are equivalent, we can check whether their difference simplifies to 0.
```
simplify(particular - logistic)
```
This test only works one way: if SymPy says the difference reduces to 0, the expressions are definitely equivalent (and not just numerically close).
But if SymPy can't find a way to simplify the result to 0, that doesn't necessarily mean there isn't one. Testing whether two expressions are equivalent is a surprisingly hard problem; in fact, there is no algorithm that can solve it in general.
### Exercises
**Exercise:** Solve the quadratic growth equation using the alternative parameterization
$\frac{df(t)}{dt} = \alpha f(t) + \beta f^2(t) $
```
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
```
**Exercise:** Use [WolframAlpha](https://www.wolframalpha.com/) to solve the quadratic growth model, using either or both forms of parameterization:
df(t) / dt = alpha f(t) + beta f(t)^2
or
df(t) / dt = r f(t) (1 - f(t)/K)
Find the general solution and also the particular solution where `f(0) = p_0`.
| github_jupyter |
```
import os
import sys
import numpy as np
import pandas as pd
from geopy import distance
import json
import tensorflow as tf
import warnings
warnings.filterwarnings('ignore')
tf.compat.v1.disable_eager_execution()
```
# Declare Current Directory
```
root_path = os.path.abspath(os.path.join('..'))
```
# Read Dataset
```
FILE_DIR = 'datasets/kotlite_driver_dataset_KWB_with_ket.csv'
df = pd.read_csv(os.path.join(root_path, FILE_DIR))
df.describe(include='all')
df
```
# Build the function
## Route parser
```
def route_parser(data):
idx = data[0]
points = []
points.append([data[2], data[3]])
for point in json.loads(data[6]):
points.append(point)
points.append([data[4], data[5]])
return idx, points
```
## Recommendation System
```
class NearestNeighbor():
def __init__(self, k=1):
self.init = tf.compat.v1.global_variables_initializer()
# K value
self.k = k
# Data
self.train = None
self.query = None
# Graph Input
self.xtr = None
self.xqe = None
# Output
self.values = None
self.indices = None
self.result = self.values, self.indices
def fit(self, train, query):
self.train = train
self.query = query
self.xtr = tf.compat.v1.placeholder('float', [None, len(self.train[0])])
self.xqe = tf.compat.v1.placeholder('float', [None, len(self.query[0])])
def train(self):
# Manhattan distance
distance = tf.reduce_sum(tf.abs(tf.subtract(self.xtr, tf.expand_dims(self.xqe, axis=1))), axis=2)
# Nearest Data
values, indices = tf.nn.top_k(tf.negative(distance), k=self.k)
values = tf.negative(values)
with tf.compat.v1.Session() as sess:
sess.run(self.init)
self.values, self.indices = sess.run([values, indices], feed_dict={self.xtr:self.train,
self.xqe:self.query})
self.values = self.values.reshape(-1)
self.indices = self.indices.reshape(-1)
def fit_train(self, train, query):
self.fit(train, query)
self.transform()
data = df.copy()
query = [[-7.8838611,112.5381295], [-7.8786821,112.524145]] # from MAN 1 Batu to GOR Gajah Mada
dist = []
for dt in data.values:
idx, route = route_parser(dt)
model = NearestNeighbor()
model.fit_transform(route, query)
if model.indices[0] < model.indices[1]:
dist.append((model.values[0] + model.values[1], idx,
route[model.indices[0]], route[model.indices[1]]))
sorted_dist = sorted(dist)
recommendation = []
for sd in sorted_dist:
pick_dist = distance.distance(query[0], sd[2]).km
drop_dist = distance.distance(query[1], sd[3]).km
if (pick_dist <= 0.7) & (drop_dist <= 0.7):
recommendation.append(sd[1])
recommendation
df[df['driver_id'].isin(recommendation)]
```
# Result Analysis
The result of the recommendation system for passengers who want to depart from MAN 1 Kota Batu on `(-7.8838611,112.5381295)` to GOR Gajah Mada Kota Batu on `(-7.8786821,112.524145)`, by using a threshold of 0.7 km from the nearest point, the system recommends 2 drivers who have a similar route, namely the driver with id `[11,19]`. We're trying to see how effective the system is at providing driver recommendations to passengers. in this case we want to test using google maps to see and assess how effective this system is. The testing process uses a scenario that the driver will pick up passengers and then deliver them first before the driver goes to his final destination.
## Passanger with Driver_id 11
The driver with ID 11 will travel from his home in the Dau District area, heading to his workplace at the Transportation Museum. Here is the route Google maps suggests the driver to get to work.

It can be seen that if you go alone, the driver with ID 11 will be estimated to cover 12.1 km with an estimated time of 27 minutes.

however, if the driver with ID 11 picks up passengers and delivers them to the passenger's destination, the distance covered will be 12.5 km with an estimated travel time of 28 minutes. It is good enough, that the system can search for and recommend drivers who have the same direction to passengers. The results of the recommendations are also not burdensome or detrimental to drivers, because the maximum pick-up distance is limited to 0.7 Km from the point provided by the maps.
| github_jupyter |
##### Copyright 2018 The TF-Agents Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Drivers
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/agents/tutorials/4_drivers_tutorial">
<img src="https://www.tensorflow.org/images/tf_logo_32px.png" />
View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/agents/blob/master/docs/tutorials/4_drivers_tutorial.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/agents/blob/master/docs/tutorials/4_drivers_tutorial.ipynb">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/agents/docs/tutorials/4_drivers_tutorial.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
## Introduction
A common pattern in reinforcement learning is to execute a policy in an environment for a specified number of steps or episodes. This happens, for example, during data collection, evaluation and generating a video of the agent.
While this is relatively straightforward to write in python, it is much more complex to write and debug in TensorFlow because it involves `tf.while` loops, `tf.cond` and `tf.control_dependencies`. Therefore we abstract this notion of a run loop into a class called `driver`, and provide well tested implementations both in Python and TensorFlow.
Additionally, the data encountered by the driver at each step is saved in a named tuple called Trajectory and broadcast to a set of observers such as replay buffers and metrics. This data includes the observation from the environment, the action recommended by the policy, the reward obtained, the type of the current and the next step, etc.
## Setup
If you haven't installed tf-agents or gym yet, run:
```
!pip install --pre tf-agents[reverb]
!pip install gym
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.policies import random_py_policy
from tf_agents.policies import random_tf_policy
from tf_agents.metrics import py_metrics
from tf_agents.metrics import tf_metrics
from tf_agents.drivers import py_driver
from tf_agents.drivers import dynamic_episode_driver
tf.compat.v1.enable_v2_behavior()
```
## Python Drivers
The `PyDriver` class takes a python environment, a python policy and a list of observers to update at each step. The main method is `run()`, which steps the environment using actions from the policy until at least one of the following termination criteria is met: The number of steps reaches `max_steps` or the number of episodes reaches `max_episodes`.
The implementation is roughly as follows:
```python
class PyDriver(object):
def __init__(self, env, policy, observers, max_steps=1, max_episodes=1):
self._env = env
self._policy = policy
self._observers = observers or []
self._max_steps = max_steps or np.inf
self._max_episodes = max_episodes or np.inf
def run(self, time_step, policy_state=()):
num_steps = 0
num_episodes = 0
while num_steps < self._max_steps and num_episodes < self._max_episodes:
# Compute an action using the policy for the given time_step
action_step = self._policy.action(time_step, policy_state)
# Apply the action to the environment and get the next step
next_time_step = self._env.step(action_step.action)
# Package information into a trajectory
traj = trajectory.Trajectory(
time_step.step_type,
time_step.observation,
action_step.action,
action_step.info,
next_time_step.step_type,
next_time_step.reward,
next_time_step.discount)
for observer in self._observers:
observer(traj)
# Update statistics to check termination
num_episodes += np.sum(traj.is_last())
num_steps += np.sum(~traj.is_boundary())
time_step = next_time_step
policy_state = action_step.state
return time_step, policy_state
```
Now, let us run through the example of running a random policy on the CartPole environment, saving the results to a replay buffer and computing some metrics.
```
env = suite_gym.load('CartPole-v0')
policy = random_py_policy.RandomPyPolicy(time_step_spec=env.time_step_spec(),
action_spec=env.action_spec())
replay_buffer = []
metric = py_metrics.AverageReturnMetric()
observers = [replay_buffer.append, metric]
driver = py_driver.PyDriver(
env, policy, observers, max_steps=20, max_episodes=1)
initial_time_step = env.reset()
final_time_step, _ = driver.run(initial_time_step)
print('Replay Buffer:')
for traj in replay_buffer:
print(traj)
print('Average Return: ', metric.result())
```
## TensorFlow Drivers
We also have drivers in TensorFlow which are functionally similar to Python drivers, but use TF environments, TF policies, TF observers etc. We currently have 2 TensorFlow drivers: `DynamicStepDriver`, which terminates after a given number of (valid) environment steps and `DynamicEpisodeDriver`, which terminates after a given number of episodes. Let us look at an example of the DynamicEpisode in action.
```
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
tf_policy = random_tf_policy.RandomTFPolicy(action_spec=tf_env.action_spec(),
time_step_spec=tf_env.time_step_spec())
num_episodes = tf_metrics.NumberOfEpisodes()
env_steps = tf_metrics.EnvironmentSteps()
observers = [num_episodes, env_steps]
driver = dynamic_episode_driver.DynamicEpisodeDriver(
tf_env, tf_policy, observers, num_episodes=2)
# Initial driver.run will reset the environment and initialize the policy.
final_time_step, policy_state = driver.run()
print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
# Continue running from previous state
final_time_step, _ = driver.run(final_time_step, policy_state)
print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
```
| github_jupyter |
# ART1 demo
Adaptive Resonance Theory Neural Networks
by Aman Ahuja | github.com/amanahuja | twitter: @amanqa
## Overview
Reminders:
* ART1 accepts binary inputs only.
*
In this example:
* We'll use 10x10 ASCII blocks to demonstrate
### [Load data]
```
import numpy as np
data = np.array([" O ",
" O O",
" O",
" O O",
" O",
" O O",
" O",
" OO O",
" OO ",
" OO O",
" OO ",
"OOO ",
"OO ",
"O ",
"OO ",
"OOO ",
"OOOO ",
"OOOOO",
"O ",
" O ",
" O ",
" O ",
" O",
" O O",
" OO O",
" OO ",
"OOO ",
"OO ",
"OOOO ",
"OOOOO"])
## Simplied ART1
class ART1:
"""
ART class
modified Aman Ahuja
Usage example:
--------------
# Create a ART network with input of size 5 and 20 internal units
>>> network = ART(5,10,0.5)
"""
def __init__(self, n=5, m=10, rho=.5):
'''
Create network with specified shape
For Input array I of size n, we need n input nodes in F1.
Parameters:
-----------
n : int
feature dimension of input; number of nodes in F1
m : int
Number of neurons in F2 competition layer
max number of categories
compare to n_class
rho : float
Vigilance parameter
larger rho: less inclusive prototypes
smaller rho: more generalization
internal paramters
----------
F1: array of size (n)
array of F1 neurons
F2: array of size (m)
array of F2 neurons
Wf: array of shape (m x n)
Feed-Forward weights
These are Tk
Wb: array of shape (n x m)
Feed-back weights
n_cats : int
Number of F2 neurons that are active
(at any given time, number of category templates)
'''
# Comparison layer
self.F1 = np.ones(n)
# Recognition layer
self.F2 = np.ones(m)
# Feed-forward weights
self.Wf = np.random.random((m,n))
# Feed-back weights
self.Wb = np.random.random((n,m))
# Vigilance parameter
self.rho = rho
# Number of active units in F2
self.n_cats = 0
def reset(self):
"""Reset whole network to start conditions
"""
self.F1 = np.ones(n)
self.F2 = np.ones(m)
self.Wf = np.random.random((m,n))
self.Wb = np.random.random((n,m))
self.n_cats = 0
def learn(self, X):
"""Learn X
use i as index over inputs or F1
use k as index over categories or F2
"""
# Compute F2 output using feed forward weights
self.F2[...] = np.dot(self.Wf, X)
# collect and sort the output of each active node (C)
C = np.argsort(self.F2[:self.n_cats].ravel())[::-1]
for k in C:
# compute nearest memory
d = (self.Wb[:,k]*X).sum()/X.sum()
# Check if d is above the vigilance level
if d >= self.rho:
ww = self._learn_data(k, X)
return ww
else:
pass
# No match found within vigilance level
# If there's room, increase the number of active units
# and make the newly active unit to learn data
if self.n_cats < self.F2.size:
k = self.n_cats # index of last category
ww = self._learn_data(k, X)
self.n_cats += 1
return ww
else:
return None,None
def _learn_data(self, node, dat):
"""
node : i : F2 node
dat : X : input data
"""
self._validate_data(dat)
# Learn data
self.Wb[:,node] *= dat
self.Wf[node,:] = self.Wb[:,node]/(0.5+self.Wb[:,node].sum())
return self.Wb[:,node], node
def predict(self, X):
C = np.dot(self.Wf[:self.n_cats], X)
#return active F2 node, unless none are active
if np.all(C == 0):
return None
return np.argmax(C)
def _validate_data(self, dat):
"""
dat is a single input record
Checks: data must be 1s and 0s
"""
pass_checks = True
# Dimensions must match
if dat.shape[0] != len(self.F1):
pass_checks = False
msg = "Input dimensins mismatch."
# Data must be 1s or 0s
if not np.all((dat == 1) | (dat == 0)):
pass_checks = False
msg = "Input must be binary."
if pass_checks:
return True
else:
raise Exception("Data does not validate: {}".format(msg))
```
## Cleaning and Preprocessing
```
"""
Helper function
"""
from collections import Counter
def preprocess_data(data):
"""
Convert to numpy array
Convert to 1s and 0s
"""
# Look at first row
if data[0]:
irow = data[0]
# get size
idat_size = len(irow)
# get unique characters
chars = False
while not chars:
chars = get_unique_chars(irow, reverse=True)
char1, char2 = chars
outdata = []
idat = np.zeros(idat_size, dtype=bool)
for irow in data:
idat = [x==char1 for x in irow]
outdata.append(idat)
return np.array(outdata).astype(int)
def get_unique_chars(irow, reverse=False):
"""
Get unique characters in data
Helper function
----
reverse: bool
Reverses order of the two chars returned
"""
chars = Counter(irow)
if len(chars) > 2:
raise Exception("Data is not binary")
elif len(chars) < 2:
# first row doesn't contain both chars
return False, False
# Reorder here?
if reverse:
char2, char1 = chars.keys()
else:
char1, char2 = chars.keys()
return char1, char2
```
## DO
```
network = ART1(n=5, m=7, rho=0.5)
# preprocess data
data_cleaned = preprocess_data(data)
# learn data array, row by row
for row in data_cleaned:
network.learn(row)
print "n categories: ", network.n_cats
#print tt
```
#### predictions
Let's see the clusters created through training:
```
network.n_cats
from collections import defaultdict
output_dict = defaultdict(list)
for row, row_cleaned in zip (data, data_cleaned):
pred = network.predict(row_cleaned)
output_dict[pred].append(row)
for k,v in output_dict.iteritems():
print k
print '-----'
for row in v:
print row
print
# \ print "'{}':{}".format(
# row,
# network.predict(row_cleaned))
```
### Look at the weights as patterns
```
cluster_units = network.Wf[:network.n_cats]
for idx, CU_weights in enumerate(cluster_units):
pattern = CU_weights
pattern = pattern.astype(bool)
print "Pattern #{}".format(idx)
print pattern.astype(int)
print
#preprocess_data(pattern)
#print np.round(pattern)
```
| github_jupyter |
```
### MODULE 1
### Basic Modeling in scikit-learn
```
```
### Seen vs. unseen data
# The model is fit using X_train and y_train
model.fit(X_train, y_train)
# Create vectors of predictions
train_predictions = model.predict(X_train)
test_predictions = model.predict(X_test)
# Train/Test Errors
train_error = mae(y_true=y_train, y_pred=train_predictions)
test_error = mae(y_true=y_test, y_pred=test_predictions)
# Print the accuracy for seen and unseen data
print("Model error on seen data: {0:.2f}.".format(train_error))
print("Model error on unseen data: {0:.2f}.".format(test_error))
# Set parameters and fit a model
# Set the number of trees
rfr.n_estimators = 1000
# Add a maximum depth
rfr.max_depth = 6
# Set the random state
rfr.random_state = 11
# Fit the model
rfr.fit(X_train, y_train)
## Feature importances
# Fit the model using X and y
rfr.fit(X_train, y_train)
# Print how important each column is to the model
for i, item in enumerate(rfr.feature_importances_):
# Use i and item to print out the feature importance of each column
print("{0:s}: {1:.2f}".format(X_train.columns[i], item))
### lassification predictions
# Fit the rfc model.
rfc.fit(X_train, y_train)
# Create arrays of predictions
classification_predictions = rfc.predict(X_test)
probability_predictions = rfc.predict_proba(X_test)
# Print out count of binary predictions
print(pd.Series(classification_predictions).value_counts())
# Print the first value from probability_predictions
print('The first predicted probabilities are: {}'.format(probability_predictions[0]))
## Reusing model parameters
rfc = RandomForestClassifier(n_estimators=50, max_depth=6, random_state=1111)
# Print the classification model
print(rfc)
# Print the classification model's random state parameter
print('The random state is: {}'.format(rfc.random_state))
# Print all parameters
print('Printing the parameters dictionary: {}'.format(rfc.get_params()))
## Random forest classifier
from sklearn.ensemble import RandomForestClassifier
# Create a random forest classifier
rfc = RandomForestClassifier(n_estimators=50, max_depth=6, random_state=1111)
# Fit rfc using X_train and y_train
rfc.fit(X_train, y_train)
# Create predictions on X_test
predictions = rfc.predict(X_test)
print(predictions[0:5])
# Print model accuracy using score() and the testing data
print(rfc.score(X_test, y_test))
## MODULE 2
## Validation Basics
```
```
## Create one holdout set
# Create dummy variables using pandas
X = pd.get_dummies(tic_tac_toe.iloc[:,0:9])
y = tic_tac_toe.iloc[:, 9]
# Create training and testing datasets. Use 10% for the test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.1, random_state=1111)
## Create two holdout sets
# Create temporary training and final testing datasets
X_temp, X_test, y_temp, y_test =\
train_test_split(X, y, test_size=.2, random_state=1111)
# Create the final training and validation datasets
X_train, X_val, y_train, y_val = train_test_split(X_temp, y_temp, test_size=.25, random_state=1111)
### Mean absolute error
from sklearn.metrics import mean_absolute_error
# Manually calculate the MAE
n = len(predictions)
mae_one = sum(abs(y_test - predictions)) / n
print('With a manual calculation, the error is {}'.format(mae_one))
# Use scikit-learn to calculate the MAE
mae_two = mean_absolute_error(y_test, predictions)
print('Using scikit-lean, the error is {}'.format(mae_two))
# <script.py> output:
# With a manual calculation, the error is 5.9
# Using scikit-lean, the error is 5.9
### Mean squared error
from sklearn.metrics import mean_squared_error
n = len(predictions)
# Finish the manual calculation of the MSE
mse_one = sum(abs(y_test - predictions)**2) / n
print('With a manual calculation, the error is {}'.format(mse_one))
# Use the scikit-learn function to calculate MSE
mse_two = mean_squared_error(y_test, predictions)
print('Using scikit-lean, the error is {}'.format(mse_two))
### Performance on data subsets
# Find the East conference teams
east_teams = labels == "E"
# Create arrays for the true and predicted values
true_east = y_test[east_teams]
preds_east = predictions[east_teams]
# Print the accuracy metrics
print('The MAE for East teams is {}'.format(
mae(true_east, preds_east)))
# Print the West accuracy
print('The MAE for West conference is {}'.format(west_error))
### Confusion matrices
# Calculate and print the accuracy
accuracy = (324 + 491) / (953)
print("The overall accuracy is {0: 0.2f}".format(accuracy))
# Calculate and print the precision
precision = (491) / (491 + 15)
print("The precision is {0: 0.2f}".format(precision))
# Calculate and print the recall
recall = (491) / (491 + 123)
print("The recall is {0: 0.2f}".format(recall))
### Confusion matrices, again
from sklearn.metrics import confusion_matrix
# Create predictions
test_predictions = rfc.predict(X_test)
# Create and print the confusion matrix
cm = confusion_matrix(y_test, test_predictions)
print(cm)
# Print the true positives (actual 1s that were predicted 1s)
print("The number of true positives is: {}".format(cm[1, 1]))
## <script.py> output:
## [[177 123]
## [ 92 471]]
## The number of true positives is: 471
## Row 1, column 1 represents the number of actual 1s that were predicted 1s (the true positives).
## Always make sure you understand the orientation of the confusion matrix before you start using it!
### Precision vs. recall
from sklearn.metrics import precision_score
test_predictions = rfc.predict(X_test)
# Create precision or recall score based on the metric you imported
score = precision_score(y_test, test_predictions)
# Print the final result
print("The precision value is {0:.2f}".format(score))
### Error due to under/over-fitting
# Update the rfr model
rfr = RandomForestRegressor(n_estimators=25,
random_state=1111,
max_features=2)
rfr.fit(X_train, y_train)
# Print the training and testing accuracies
print('The training error is {0:.2f}'.format(
mae(y_train, rfr.predict(X_train))))
print('The testing error is {0:.2f}'.format(
mae(y_test, rfr.predict(X_test))))
## <script.py> output:
## The training error is 3.88
## The testing error is 9.15
# Update the rfr model
rfr = RandomForestRegressor(n_estimators=25,
random_state=1111,
max_features=11)
rfr.fit(X_train, y_train)
# Print the training and testing accuracies
print('The training error is {0:.2f}'.format(
mae(y_train, rfr.predict(X_train))))
print('The testing error is {0:.2f}'.format(
mae(y_test, rfr.predict(X_test))))
## <script.py> output:
## The training error is 3.57
## The testing error is 10.05
# Update the rfr model
rfr = RandomForestRegressor(n_estimators=25,
random_state=1111,
max_features=4)
rfr.fit(X_train, y_train)
# Print the training and testing accuracies
print('The training error is {0:.2f}'.format(
mae(y_train, rfr.predict(X_train))))
print('The testing error is {0:.2f}'.format(
mae(y_test, rfr.predict(X_test))))
## <script.py> output:
## The training error is 3.60
## The testing error is 8.79
### Am I underfitting?
from sklearn.metrics import accuracy_score
test_scores, train_scores = [], []
for i in [1, 2, 3, 4, 5, 10, 20, 50]:
rfc = RandomForestClassifier(n_estimators=i, random_state=1111)
rfc.fit(X_train, y_train)
# Create predictions for the X_train and X_test datasets.
train_predictions = rfc.predict(X_train)
test_predictions = rfc.predict(X_test)
# Append the accuracy score for the test and train predictions.
train_scores.append(round(accuracy_score(y_train, train_predictions), 2))
test_scores.append(round(accuracy_score(y_test, test_predictions), 2))
# Print the train and test scores.
print("The training scores were: {}".format(train_scores))
print("The testing scores were: {}".format(test_scores))
### MODULE 3
### Cross Validation
```
```
### Two samples
# Create two different samples of 200 observations
sample1 = tic_tac_toe.sample(200, random_state=1111)
sample2 = tic_tac_toe.sample(200, random_state=1171)
# Print the number of common observations
print(len([index for index in sample1.index if index in sample2.index]))
# Print the number of observations in the Class column for both samples
print(sample1['Class'].value_counts())
print(sample2['Class'].value_counts())
### scikit-learn's KFold()
from sklearn.model_selection import KFold
# Use KFold
kf = KFold(n_splits=5, shuffle=True, random_state=1111)
# Create splits
splits = kf.split(X)
# Print the number of indices
for train_index, val_index in splits:
print("Number of training indices: %s" % len(train_index))
print("Number of validation indices: %s" % len(val_index))
### Using KFold indices
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
rfc = RandomForestRegressor(n_estimators=25, random_state=1111)
# Access the training and validation indices of splits
for train_index, val_index in splits:
# Setup the training and validation data
X_train, y_train = X[train_index], y[train_index]
X_val, y_val = X[val_index], y[val_index]
# Fit the random forest model
rfc.fit(X_train, y_train)
# Make predictions, and print the accuracy
predictions = rfc.predict(X_val)
print("Split accuracy: " + str(mean_squared_error(y_val, predictions)))
### scikit-learn's methods
# Instruction 1: Load the cross-validation method
from sklearn.model_selection import cross_val_score
# Instruction 2: Load the random forest regression model
from sklearn.ensemble import RandomForestClassifier
# Instruction 3: Load the mean squared error method
# Instruction 4: Load the function for creating a scorer
from sklearn.metrics import mean_squared_error, make_scorer
## It is easy to see how all of the methods can get mixed up, but
## it is important to know the names of the methods you need.
## You can always review the scikit-learn documentation should you need any help
### Implement cross_val_score()
rfc = RandomForestRegressor(n_estimators=25, random_state=1111)
mse = make_scorer(mean_squared_error)
# Set up cross_val_score
cv = cross_val_score(estimator=rfc,
X=X_train,
y=y_train,
cv=10,
scoring=mse)
# Print the mean error
print(cv.mean())
### Leave-one-out-cross-validation
from sklearn.metrics import mean_absolute_error, make_scorer
# Create scorer
mae_scorer = make_scorer(mean_absolute_error)
rfr = RandomForestRegressor(n_estimators=15, random_state=1111)
# Implement LOOCV
scores = cross_val_score(estimator=rfr, X=X, y=y, cv=85, scoring=mae_scorer)
# Print the mean and standard deviation
print("The mean of the errors is: %s." % np.mean(scores))
print("The standard deviation of the errors is: %s." % np.std(scores))
### MODULE 4
### Selecting the best model with Hyperparameter tuning.
```
```
### Creating Hyperparameters
# Review the parameters of rfr
print(rfr.get_params())
# Maximum Depth
max_depth = [4, 8, 12]
# Minimum samples for a split
min_samples_split = [2, 5, 10]
# Max features
max_features = [4, 6, 8, 10]
### Running a model using ranges
from sklearn.ensemble import RandomForestRegressor
# Fill in rfr using your variables
rfr = RandomForestRegressor(
n_estimators=100,
max_depth=random.choice(max_depth),
min_samples_split=random.choice(min_samples_split),
max_features=random.choice(max_features))
# Print out the parameters
print(rfr.get_params())
### Preparing for RandomizedSearch
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import make_scorer, mean_squared_error
# Finish the dictionary by adding the max_depth parameter
param_dist = {"max_depth": [2, 4, 6, 8],
"max_features": [2, 4, 6, 8, 10],
"min_samples_split": [2, 4, 8, 16]}
# Create a random forest regression model
rfr = RandomForestRegressor(n_estimators=10, random_state=1111)
# Create a scorer to use (use the mean squared error)
scorer = make_scorer(mean_squared_error)
# Import the method for random search
from sklearn.model_selection import RandomizedSearchCV
# Build a random search using param_dist, rfr, and scorer
random_search =\
RandomizedSearchCV(
estimator=rfr,
param_distributions=param_dist,
n_iter=10,
cv=5,
scoring=scorer)
### Selecting the best precision model
from sklearn.metrics import precision_score, make_scorer
# Create a precision scorer
precision = make_scorer(precision_score)
# Finalize the random search
rs = RandomizedSearchCV(
estimator=rfc, param_distributions=param_dist,
scoring = precision,
cv=5, n_iter=10, random_state=1111)
rs.fit(X, y)
# print the mean test scores:
print('The accuracy for each run was: {}.'.format(rs.cv_results_['mean_test_score']))
# print the best model score:
print('The best accuracy for a single model was: {}'.format(rs.best_score_))
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Time windows
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l08c04_time_windows.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l08c04_time_windows.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
## Setup
```
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# Use the %tensorflow_version magic if in colab.
%tensorflow_version 2.x
except Exception:
pass
tf.enable_v2_behavior()
```
## Time Windows
First, we will train a model to forecast the next step given the previous 20 steps, therefore, we need to create a dataset of 20-step windows for training.
```
dataset = tf.data.Dataset.range(10)
for val in dataset:
print(val.numpy())
dataset = tf.data.Dataset.range(10)
dataset = dataset.window(5, shift=1)
for window_dataset in dataset:
for val in window_dataset:
print(val.numpy(), end=" ")
print()
dataset = tf.data.Dataset.range(10)
dataset = dataset.window(5, shift=1, drop_remainder=True)
for window_dataset in dataset:
for val in window_dataset:
print(val.numpy(), end=" ")
print()
dataset = tf.data.Dataset.range(10)
dataset = dataset.window(5, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(5))
for window in dataset:
print(window.numpy())
dataset = tf.data.Dataset.range(10)
dataset = dataset.window(5, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(5))
dataset = dataset.map(lambda window: (window[:-1], window[-1:]))
for x, y in dataset:
print(x.numpy(), y.numpy())
dataset = tf.data.Dataset.range(10)
dataset = dataset.window(5, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(5))
dataset = dataset.map(lambda window: (window[:-1], window[-1:]))
dataset = dataset.shuffle(buffer_size=10)
for x, y in dataset:
print(x.numpy(), y.numpy())
dataset = tf.data.Dataset.range(10)
dataset = dataset.window(5, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(5))
dataset = dataset.map(lambda window: (window[:-1], window[-1:]))
dataset = dataset.shuffle(buffer_size=10)
dataset = dataset.batch(2).prefetch(1)
for x, y in dataset:
print("x =", x.numpy())
print("y =", y.numpy())
def window_dataset(series, window_size, batch_size=32,
shuffle_buffer=1000):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))
dataset = dataset.shuffle(shuffle_buffer)
dataset = dataset.map(lambda window: (window[:-1], window[-1]))
dataset = dataset.batch(batch_size).prefetch(1)
return dataset
```
| github_jupyter |
<a href="https://colab.research.google.com/github/PWhiddy/jax-experiments/blob/main/nbody.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import jax.numpy as jnp
from jax import jit
from jax import vmap
import jax
from numpy import random
import matplotlib.pyplot as plt
from tqdm import tqdm
!pip install tensor-canvas
!pip install moviepy
import tensorcanvas as tc
#@title VideoWriter
#VideoWriter from Alexander Mordvintsev
#https://colab.research.google.com/github/znah/notebooks/blob/master/external_colab_snippets.ipynb
import os
import numpy as np
os.environ['FFMPEG_BINARY'] = 'ffmpeg'
import moviepy.editor as mvp
from moviepy.video.io.ffmpeg_writer import FFMPEG_VideoWriter
class VideoWriter:
def __init__(self, filename='_autoplay.mp4', fps=30.0, **kw):
self.writer = None
self.params = dict(filename=filename, fps=fps, **kw)
def add(self, img):
img = np.asarray(img)
if self.writer is None:
h, w = img.shape[:2]
self.writer = FFMPEG_VideoWriter(size=(w, h), **self.params)
if img.dtype in [np.float32, np.float64]:
img = np.uint8(img.clip(0, 1)*255)
if len(img.shape) == 2:
img = np.repeat(img[..., None], 3, -1)
self.writer.write_frame(img)
def close(self):
if self.writer:
self.writer.close()
def __enter__(self):
return self
def __exit__(self, *kw):
self.close()
if self.params['filename'] == '_autoplay.mp4':
self.show()
def show(self, **kw):
self.close()
fn = self.params['filename']
display(mvp.ipython_display(fn, **kw))
def draw_sim(parts_pos, parts_vel, grid_r_x, grid_r_y, opacity=1.0, p_size=4.0, pcol=jnp.array([1.0,0.0,0.0])):
canvas = jnp.zeros((grid_r_y, grid_r_x, 3))
col = opacity*pcol
# would be interesting to use jax.experimental.loops for these
for part_p, part_v in zip(parts_pos, parts_vel):
canvas = tc.draw_circle(part_p[0]*grid_r_y+grid_r_x*0.5-grid_r_y*0.5, part_p[1]*grid_r_y, p_size, col, canvas)
return jnp.clip(canvas, 0.0, 1.0)
def draw_sim_par(parts_pos, parts_vel, grid_r_x, grid_r_y, opacity=1.0, p_size=4.0, pcol=jnp.array([1.0,0.0,0.0])):
col = opacity*pcol
draw_single = lambda part_p, canv: tc.draw_circle(part_p[0]*grid_r_y+grid_r_x*0.5-grid_r_y*0.5, part_p[1]*grid_r_y, p_size, col, canv)
draw_all = vmap(draw_single)
return jnp.clip(draw_all(parts_pos, jnp.zeros((parts_pos.shape[0], grid_r_y, grid_r_x, 3))).sum(0), 0.0, 1.0)
def compute_forces(pos, scale, eps=0.1):
a, b = jnp.expand_dims(pos, 1), jnp.expand_dims(pos, 0)
diff = a - b
dist = (diff * diff).sum(axis=-1) ** 0.5
dist = jnp.expand_dims(dist, 2)
force = diff / ((dist * scale) ** 3 + eps)
return force.sum(0)
fast_compute_forces = jit(compute_forces)
def sim_update_force(parts_pos, parts_vel, t_delta=0.05, scale=5, repel_mag=0.1, center_mag=2.5, steps=10, damp=0.99):
p_p = jnp.array(parts_pos)
p_v = jnp.array(parts_vel)
# jax.experimental.loops
for _ in range(steps):
p_p = p_p + t_delta * p_v
force = fast_compute_forces(p_p, scale)
center_diff = p_p-0.5
centering_force = center_diff / ((center_diff ** 2).sum() ** 0.5)
p_v = damp * p_v - t_delta * (force * repel_mag + centering_force * center_mag)
return p_p, p_v
def make_init_state(p_count):
return random.rand(p_count, 2), random.rand(p_count, 2)-0.5
fast_draw = jit(draw_sim, static_argnums=(2,3))
fast_draw_par = jit(draw_sim_par, static_argnums=(2,3))
fast_sim_update_force = jit(sim_update_force, static_argnames=('steps'))
p_state, v_state = make_init_state(128)
v_state *= 0
grid_res = 384
for i in tqdm(range(1000)):
p_state, v_state = fast_sim_update_force(p_state, v_state, t_delta=0.05, scale=10, center_mag=0.5, repel_mag=0.05, damp=0.996, steps=2)
plt.imshow(fast_draw_par(p_state, v_state, grid_res, grid_res, p_size=4.0))
p_state, v_state = make_init_state(2048)
v_state *= 0
grid_res = 512
for i in tqdm(range(100)):
p_state, v_state = fast_sim_update_force(p_state, v_state, t_delta=0.05, scale=40, center_mag=0.5, repel_mag=0.05, damp=0.997, steps=20)
plt.imshow(fast_draw_par(p_state, v_state, grid_res, grid_res, p_size=3.0))
render_video = False
if render_video:
p_state, v_state = make_init_state(128)
v_state *= 0
grid_res = 384
with VideoWriter(fps=60) as vw:
for i in tqdm(range(1000)):
render = fast_draw_par(p_state, v_state, grid_res, grid_res, p_size=3.0)
vw.add(render)
p_state, v_state = fast_sim_update_force(p_state, v_state, t_delta=0.05, scale=10, center_mag=0.5, repel_mag=0.05, damp=0.996, steps=2)
if render_video:
p_state, v_state = make_init_state(512)
v_state *= 0
grid_res = 256
with VideoWriter(fps=60) as vw:
for i in tqdm(range(1000)):
render = fast_draw_par(p_state, v_state, grid_res, grid_res, opacity=0.5, p_size=3.0)
vw.add(render)
p_state, v_state = fast_sim_update_force(p_state, v_state, t_delta=0.05, scale=20, center_mag=0.5, repel_mag=0.05, damp=0.998, steps=4)
!nvidia-smi
p_test = 50
res_test = 512
%%timeit
draw_sim(*make_init_state(p_test), res_test, res_test)
%%timeit
draw_sim_par(*make_init_state(p_test), res_test, res_test)
%%timeit
fast_draw(*make_init_state(p_test), res_test, res_test)
%%timeit
fast_draw_par(*make_init_state(p_test), res_test, res_test)
import ffmpeg
import logging
import numpy as np
import os
import subprocess
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def start_ffmpeg_process2(key, width, height):
logger.info('Starting ffmpeg process2')
args = f'ffmpeg -re -f lavfi -i anullsrc=channel_layout=stereo:sample_rate=44100 -f rawvideo -s {width}x{height} -pix_fmt rgb24 -i pipe: -c:v libx264 -preset veryfast -b:v 3000k -maxrate 3000k -bufsize 6000k -pix_fmt yuv420p -g 50 -c:a aac -b:a 160k -ac 2 -ar 44100 -f flv rtmp://a.rtmp.youtube.com/live2/{key}'
return subprocess.Popen(args.split(), stdin=subprocess.PIPE)
def write_frame(process2, frame):
logger.debug('Writing frame')
process2.stdin.write(
frame
.astype(np.uint8)
.tobytes()
)
def run(key, process_frame, width, height):
process2 = start_ffmpeg_process2(key, width, height)
while True:
logger.debug('Processing frame')
out_frame = process_frame()#(in_frame)
write_frame(process2, out_frame)
logger.info('Waiting for ffmpeg process2')
process2.stdin.close()
process2.wait()
logger.info('Done')
import json
class SimRunner():
def __init__(self, pcount, grid_x, grid_y):
self.pcount = pcount
self.p_state, self.v_state = make_init_state(pcount)
self.v_state *= 0
self.grid_x = grid_x
self.grid_y = grid_y
self.fcount = 0
def next_frame(self):
with open('test_col.json') as f:
col = jnp.array(json.load(f))
render = fast_draw_par(self.p_state, self.v_state, self.grid_x, self.grid_y, opacity=0.8, p_size=5.0, pcol=col)
if (self.fcount % 800 == 799):
self.v_state += 0.2*(random.rand(self.pcount, 2)-0.5)
self.p_state, self.v_state = fast_sim_update_force(self.p_state, self.v_state, t_delta=0.05, scale=20, center_mag=0.5, repel_mag=0.05, damp=0.995, steps=2)
self.fcount += 1
return render*255
test = SimRunner(256, 512, 512)
test.next_frame().max()
#plt.imshow(test.next_frame())
try:
res_x, res_y = 1280, 720
sr = SimRunner(384, res_x, res_y)
run('gjhh-kvup-9fhh-fbe7-4402', sr.next_frame, res_x, res_y)
except ffmpeg.Error as e:
print('stdout:', e.stdout.decode('utf8'))
print('stderr:', e.stderr.decode('utf8'))
raise e
```
| github_jupyter |
# Training with Features
From notebook 14, we now have radio features. From notebook 13, we now have astronomical features and potential host galaxies. It's now time to put all of these together into a set of vectors and train a classifier.
I'll quickly go over the pipeline up to now. First, make sure you have MongoDB running with the `radio` database containing the Radio Galaxy Zoo data. Then, convert all of the raw RGZ classifications into sanitised and nice-to-work-with classifications:
```bash
python -m crowdastro raw_classifications crowdastro-data/processed.db classifications
```
Next, compile the consensus database. For now, I'm only dealing with ATLAS data, so remember to specify the `--atlas` flag.
```bash
python -m crowdastro consensuses crowdastro-data/processed.db classifications atlas_consensuses_raw --atlas
```
We need to generate the training data. If you don't have a Gator cache, it will be generated.
```bash
python -m crowdastro training_data \
crowdastro-data/processed.db atlas_consensuses_raw \
gator_cache \
crowdastro-data/training.h5 \
--atlas
```
This dumps a file with astronomy features and potential hosts. Then, run 15_cnn to get CNN features (or just use the h5 and json files I already prepared) and run 16_pca to get a PCA matrix.
The pipeline is as follows:
1. Get potential hosts from training.h5.
2. Using the CDFS/ELAIS images, get radio patches around each potential host.
3. Run patches through CNN. Output the second convolutional layer.
4. Run CNN output through PCA.
5. Append astronomy features from training.h5. This is the input data.
```
import itertools
import sys
import bson
import h5py
import keras.layers
import keras.models
import matplotlib.pyplot
import numpy
import pandas
import sklearn.cross_validation
import sklearn.dummy
import sklearn.linear_model
import sklearn.metrics
sys.path.insert(1, '..')
import crowdastro.data
import crowdastro.show
with pandas.HDFStore('../crowdastro-data/training.h5') as store:
data = store['data']
data.head()
```
We'll just look at a small number of potential hosts for now. I'll have to do batches to scale this up and I just want to check it works for now.
```
n = 5000
# I'm gathering up the radio patches first so I can run them through the CNN at the same time
# as one big matrix operation. In principle this would run on the GPU.
radio_patches = numpy.zeros((n, 80, 80))
labels = numpy.zeros((n,))
radius = 40
padding = 150
for idx, row in data.head(n).iterrows():
sid = bson.objectid.ObjectId(row['subject_id'][0].decode('ascii'))
x = row['x'][0]
y = row['y'][0]
label = row['is_host'][0]
labels[idx] = label
subject = crowdastro.data.db.radio_subjects.find_one({'_id': sid})
radio = crowdastro.data.get_radio(subject, size='5x5')
patch = radio[x - radius + padding : x + radius + padding, y - radius + padding : y + radius + padding]
radio_patches[idx, :] = patch
# Load the CNN.
with open('../crowdastro-data/cnn_model_2.json', 'r') as f:
cnn = keras.models.model_from_json(f.read())
cnn.load_weights('../crowdastro-data/cnn_weights_2.h5')
cnn.layers = cnn.layers[:5] # Pop the layers after the second convolution's activation.
cnn.add(keras.layers.Flatten())
cnn.compile(optimizer='sgd', loss='mse') # I don't actually care about the optimiser or loss.
# Load the PCA.
with h5py.File('../crowdastro-data/pca.h5') as f:
pca = f['conv_2'][:]
# Find the radio features.
radio_features = cnn.predict(radio_patches.reshape(n, 1, 80, 80)) @ pca.T
# Add on the astronomy features.
features = numpy.hstack([radio_features, data.ix[:n-1, 'flux_ap2_24':'flux_ap2_80'].as_matrix()])
features = numpy.nan_to_num(features)
# Split into training and testing data.
xs_train, xs_test, ts_train, ts_test = sklearn.cross_validation.train_test_split(features, labels, test_size=0.2)
# Classify!
lr = sklearn.linear_model.LogisticRegression(class_weight='balanced')
lr.fit(xs_train, ts_train)
lr.score(xs_test, ts_test)
sklearn.metrics.confusion_matrix(ts_test, lr.predict(xs_test), [0, 1])
```
So we get ~84% accuracy on just predicting labels. Let's compare to a random classifier.
```
dc = sklearn.dummy.DummyClassifier(strategy='stratified')
dc.fit(xs_train, ts_train)
dc.score(xs_test, ts_test)
```
A stratified random classifier gets 88% accuracy, which doesn't look good for our logistic regression!
I am curious as to whether we can do better if we're considering the full problem, i.e. we know that exactly one potential host is the true host. Note that I'm ignoring the problem of multiple radio emitters for now. Let's try that: We'll get a subject, find the potential hosts, get their patches, and use the logistic regression and dummy classifiers to predict all the associated probabilities, and hence find the radio emitter. I'll only look at subjects not in the first `n` potential hosts, else we'd overlap with the training data.
To get a feel for how the predictor works, I'll try colour-coding potential hosts based on how likely they are to be the true host. To do *that*, I'll softmax the scores.
```
def softmax(x):
exp = numpy.exp(x)
return exp / numpy.sum(exp, axis=0)
subject_ids = set()
for idx, row in data.ix[n:n * 2].iterrows():
sid = row['subject_id'][0]
subject_ids.add(sid)
for subject_id in itertools.islice(subject_ids, 0, 10):
# Pandas *really* doesn't like fancy indexing against string comparisons.
indices = (data['subject_id'] == subject_id).as_matrix().reshape(-1)
potential_hosts = numpy.nan_to_num(data.as_matrix()[indices][:, 1:-1].astype(float))
subject = crowdastro.data.db.radio_subjects.find_one({'_id': bson.objectid.ObjectId(subject_id.decode('ascii'))})
radio = crowdastro.data.get_radio(subject, size='5x5')
radio_patches = numpy.zeros((len(potential_hosts), 1, radius * 2, radius * 2))
for index, (x, y, *astro) in enumerate(potential_hosts):
patch = radio[x - radius + padding : x + radius + padding, y - radius + padding : y + radius + padding]
radio_patches[index, 0, :] = patch
radio_features = cnn.predict(radio_patches) @ pca.T
astro_features = potential_hosts[:, 2:]
features = numpy.hstack([radio_features, astro_features])
scores = lr.predict_proba(features)[:, 1].T
probs = softmax(scores)
crowdastro.show.subject(subject)
matplotlib.pyplot.scatter(potential_hosts[:, 0], potential_hosts[:, 1], c=probs)
matplotlib.pyplot.show()
```
This is quite interesting! Lots of points (blue) are not really considered, and sometimes there are a few candidates (red). These usually look pretty reasonable, but it also seems a lot like the predictor is just looking for bright things.
Let's try and get an accuracy out. There is still the problem of multiple radio sources, so I'll just say that if the predictor hits *any* true host, that's a hit.
```
hits = 0
attempts = 0
for subject_id in subject_ids:
indices = (data['subject_id'] == subject_id).as_matrix().reshape(-1)
potential_hosts = numpy.nan_to_num(data.as_matrix()[indices][:, 1:-1].astype(float))
labels = numpy.nan_to_num(data.as_matrix()[indices][:, -1].astype(bool))
subject = crowdastro.data.db.radio_subjects.find_one({'_id': bson.objectid.ObjectId(subject_id.decode('ascii'))})
radio = crowdastro.data.get_radio(subject, size='5x5')
radio_patches = numpy.zeros((len(potential_hosts), 1, radius * 2, radius * 2))
for index, (x, y, *astro) in enumerate(potential_hosts):
patch = radio[x - radius + padding : x + radius + padding, y - radius + padding : y + radius + padding]
radio_patches[index, 0, :] = patch
radio_features = cnn.predict(radio_patches) @ pca.T
astro_features = potential_hosts[:, 2:]
features = numpy.hstack([radio_features, astro_features])
scores = lr.predict_proba(features)[:, 1].reshape(-1)
predicted_host = scores.argmax()
if labels[predicted_host]:
hits += 1
attempts += 1
print('Accuracy: {:.02%}'.format(hits / attempts))
```
Against a random classifier...
```
hits = 0
attempts = 0
for subject_id in subject_ids:
indices = (data['subject_id'] == subject_id).as_matrix().reshape(-1)
potential_hosts = numpy.nan_to_num(data.as_matrix()[indices][:, 1:-1].astype(float))
labels = numpy.nan_to_num(data.as_matrix()[indices][:, -1].astype(bool))
subject = crowdastro.data.db.radio_subjects.find_one({'_id': bson.objectid.ObjectId(subject_id.decode('ascii'))})
radio = crowdastro.data.get_radio(subject, size='5x5')
radio_patches = numpy.zeros((len(potential_hosts), 1, radius * 2, radius * 2))
for index, (x, y, *astro) in enumerate(potential_hosts):
patch = radio[x - radius + padding : x + radius + padding, y - radius + padding : y + radius + padding]
radio_patches[index, 0, :] = patch
radio_features = cnn.predict(radio_patches) @ pca.T
astro_features = potential_hosts[:, 2:]
features = numpy.hstack([radio_features, astro_features])
scores = dc.predict_proba(features)[:, 1].reshape(-1)
predicted_host = scores.argmax()
if labels[predicted_host]:
hits += 1
attempts += 1
print('Accuracy: {:.02%}'.format(hits / attempts))
```
It would also be useful to know what the classifier considers "hard" to classify. I think an entropy approach might work (though there are problems with this...). Let's find the highest-entropy subject.
```
max_entropy = float('-inf')
max_subject = None
for subject_id in subject_ids:
indices = (data['subject_id'] == subject_id).as_matrix().reshape(-1)
potential_hosts = numpy.nan_to_num(data.as_matrix()[indices][:, 1:-1].astype(float))
labels = numpy.nan_to_num(data.as_matrix()[indices][:, -1].astype(bool))
subject = crowdastro.data.db.radio_subjects.find_one({'_id': bson.objectid.ObjectId(subject_id.decode('ascii'))})
radio = crowdastro.data.get_radio(subject, size='5x5')
radio_patches = numpy.zeros((len(potential_hosts), 1, radius * 2, radius * 2))
for index, (x, y, *astro) in enumerate(potential_hosts):
patch = radio[x - radius + padding : x + radius + padding, y - radius + padding : y + radius + padding]
radio_patches[index, 0, :] = patch
radio_features = cnn.predict(radio_patches) @ pca.T
astro_features = potential_hosts[:, 2:]
features = numpy.hstack([radio_features, astro_features])
probabilities = softmax(lr.predict_proba(features)[:, 1].reshape(-1))
entropy = -(probabilities * numpy.log(probabilities)).sum()
if entropy > max_entropy:
max_entropy = entropy
max_subject = subject
crowdastro.show.subject(max_subject)
indices = (data['subject_id'] == str(max_subject['_id']).encode('ascii')).as_matrix().reshape(-1)
potential_hosts = numpy.nan_to_num(data.as_matrix()[indices][:, 1:-1].astype(float))
subject = max_subject
radio = crowdastro.data.get_radio(subject, size='5x5')
radio_patches = numpy.zeros((len(potential_hosts), 1, radius * 2, radius * 2))
for index, (x, y, *astro) in enumerate(potential_hosts):
patch = radio[x - radius + padding : x + radius + padding, y - radius + padding : y + radius + padding]
radio_patches[index, 0, :] = patch
radio_features = cnn.predict(radio_patches) @ pca.T
astro_features = potential_hosts[:, 2:]
features = numpy.hstack([radio_features, astro_features])
scores = lr.predict_proba(features)[:, 1].T
probs = softmax(scores)
crowdastro.show.subject(subject)
matplotlib.pyplot.scatter(potential_hosts[:, 0], potential_hosts[:, 1], c=probs)
matplotlib.pyplot.show()
matplotlib.pyplot.plot(sorted(probs), marker='x')
```
| github_jupyter |
**[Introduction to Machine Learning Home Page](https://www.kaggle.com/learn/intro-to-machine-learning)**
---
# Introduction
Machine learning competitions are a great way to improve your data science skills and measure your progress.
In this exercise, you will create and submit predictions for a Kaggle competition. You can then improve your model (e.g. by adding features) to improve and see how you stack up to others taking this micro-course.
The steps in this notebook are:
1. Build a Random Forest model with all of your data (**X** and **y**)
2. Read in the "test" data, which doesn't include values for the target. Predict home values in the test data with your Random Forest model.
3. Submit those predictions to the competition and see your score.
4. Optionally, come back to see if you can improve your model by adding features or changing your model. Then you can resubmit to see how that stacks up on the competition leaderboard.
## Recap
Here's the code you've written so far. Start by running it again.
```
# set up code checking
import os
if not os.path.exists("../input/train.csv"):
os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv")
os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv")
from learntools.core import binder
binder.bind(globals())
from learntools.machine_learning.ex7 import *
# load train data
import pandas as pd
# path of the file to read.
# the directory structure was changed to simplify submitting to a competition
iowa_file_path = '../input/train.csv'
home_data = pd.read_csv(iowa_file_path)
# create target object and call it y
y = home_data['SalePrice']
# create X
features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']
X = home_data[features]
# split into validation and training data
from sklearn.model_selection import train_test_split
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
```
### DecisionTreeRegressor
```
# specify model
from sklearn.tree import DecisionTreeRegressor
iowa_model = DecisionTreeRegressor(random_state=1)
# fit model
iowa_model.fit(train_X, train_y)
# make validation predictions
val_predictions = iowa_model.predict(val_X)
# calculate mae
from sklearn.metrics import mean_absolute_error
val_mae = mean_absolute_error(val_y, val_predictions)
print(f"Validation MAE when not specifying max_leaf_nodes: {val_mae:,.0f}")
```
### DecisionTreeRegressor with `max_leaf_nodes`
```
# using best value for max_leaf_nodes
# specify model
iowa_model = DecisionTreeRegressor(max_leaf_nodes=100, random_state=1)
# fit model
iowa_model.fit(train_X, train_y)
# make validation predictions
val_predictions = iowa_model.predict(val_X)
# calculate mae
val_mae = mean_absolute_error(val_y, val_predictions)
print(f"Validation MAE for best value of max_leaf_nodes: {val_mae:,.0f}")
```
### RandomForestRegressor
```
# specify model
from sklearn.ensemble import RandomForestRegressor
rf_model = RandomForestRegressor(random_state=1)
# fit model
rf_model.fit(train_X, train_y)
# make validation predictions
rf_val_predictions = rf_model.predict(val_X)
# calculate mae
rf_val_mae = mean_absolute_error(val_y, rf_val_predictions)
print(f"Validation MAE for Random Forest Model: {rf_val_mae:,.0f}")
```
# Creating a Model For the Competition
Build a Random Forest model and train it on all of **X** and **y**.
```
# to improve accuracy, create a new Random Forest model which you will train on all training data
# specify model
rf_model_on_full_data = RandomForestRegressor(random_state=1)
# fit rf_model_on_full_data on all data from the training data
rf_model_on_full_data.fit(X, y)
# make full data predictions
rf_model_on_full_data_predictions = rf_model_on_full_data.predict(X)
# calculate mae
rf_model_on_full_data_mae = mean_absolute_error(y, rf_model_on_full_data_predictions)
print(f"Full Data MAE for Random Forest Model: {rf_model_on_full_data_mae:,.0f}")
```
# Make Predictions
Read the file of "test" data. And apply your model to make predictions
```
# load test data
# path to file you will use for predictions
test_data_path = '../input/test.csv'
# read test data file using pandas
test_data = pd.read_csv(test_data_path)
# create test_X which comes from test_data but includes only the columns you used for prediction.
# the list of columns is stored in a variable called features
test_X = test_data[features]
# make predictions which we will submit.
test_preds = rf_model_on_full_data.predict(test_X)
# save predictions in format used for competition scoring
output = pd.DataFrame({'Id': test_data.Id, 'SalePrice': test_preds})
output.to_csv('submission.csv', index=False)
```
Before submitting, run a check to make sure your `test_preds` have the right format.
```
# Check your answer
step_1.check()
# step_1.solution()
```
# Test Your Work
To test your results, you'll need to join the competition (if you haven't already). So open a new window by clicking on [this link](https://www.kaggle.com/c/home-data-for-ml-course). Then click on the **Join Competition** button.

Next, follow the instructions below:
1. Begin by clicking on the blue **COMMIT** button in the top right corner of this window. This will generate a pop-up window.
2. After your code has finished running, click on the blue **Open Version** button in the top right of the pop-up window. This brings you into view mode of the same page. You will need to scroll down to get back to these instructions.
3. Click on the **Output** tab on the left of the screen. Then, click on the **Submit to Competition** button to submit your results to the leaderboard.
You have now successfully submitted to the competition!
4. If you want to keep working to improve your performance, select the blue **Edit** button in the top right of the screen. Then you can change your model and repeat the process. There's a lot of room to improve your model, and you will climb up the leaderboard as you work.
# Continuing Your Progress
There are many ways to improve your model, and **experimenting is a great way to learn at this point.**
The best way to improve your model is to add features. Look at the list of columns and think about what might affect home prices. Some features will cause errors because of issues like missing values or non-numeric data types.
The **[Intermediate Machine Learning](https://www.kaggle.com/learn/intermediate-machine-learning)** micro-course will teach you how to handle these types of features. You will also learn to use **xgboost**, a technique giving even better accuracy than Random Forest.
# Other Micro-Courses
The **[Pandas](https://kaggle.com/Learn/Pandas)** micro-course will give you the data manipulation skills to quickly go from conceptual idea to implementation in your data science projects.
You are also ready for the **[Deep Learning](https://kaggle.com/Learn/Deep-Learning)** micro-course, where you will build models with better-than-human level performance at computer vision tasks.
---
**[Introduction to Machine Learning Home Page](https://www.kaggle.com/learn/intro-to-machine-learning)**
*Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum) to chat with other Learners.*
| github_jupyter |
# Self-Driving Car Engineer Nanodegree
## Project: **Finding Lane Lines on the Road**
***
In this project, the lanes on the road are detacted using Canny Edge Dectection and Hough Transform line detection. Meanwhile, I also use HSL color space, grayscaling, color selection ,color selection and Gaussian smoothing to reduce noise in pictures and vedios. To achieve optimal performance, this detection code is with memory of lanes in previous frames so the result is smooth. The code is verified by pictures and vedios. The code has good performance in challenge vedio, which has curved lane and shadow on the ground. All picture results are in folder 'test_image_output'. Vedio outputs are in 'test_vedios_output'.
Example picture output:
---
<figure>
<img src="test_images/solidWhiteRight.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Original Image </p>
</figcaption>
</figure>
<p></p>
<figure>
<img src="test_images_output/solidWhiteRight.png" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Lane Detaction Result</p>
</figcaption>
</figure>
## Python Code:
## Import Packages
```
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
from scipy import stats
%matplotlib inline
```
## Read in an Image
```
#reading in an image
image_sWR = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats.
print('This image is:', type(image_sWR), 'with dimensions:', image_sWR.shape)
```
Some important functions:
`find_hough_lines` Seperate left lane and right lane
`linear_regression_left/linear_regression_right` Use linear regression to extrapolate lanes
`create_lane_list` Use deque to store previous lanes
## Lane finding functions
```
import math
from collections import deque
def find_hough_lines(img,lines):
# Seperate left/right lanes
xl = []
yl = []
xr = []
yr = []
middel_x = img.shape[1]/2
for line in lines:
for x1,y1,x2,y2 in line:
if ((y2-y1)/(x2-x1))<0 and ((y2-y1)/(x2-x1))>-math.inf and x1<middel_x and x2<middel_x:
xl.append(x1)
xl.append(x2)
yl.append(y1)
yl.append(y2)
elif ((y2-y1)/(x2-x1))>0 and ((y2-y1)/(x2-x1))<math.inf and x1>middel_x and x2>middel_x:
xr.append(x1)
xr.append(x2)
yr.append(y1)
yr.append(y2)
return xl, yl, xr, yr
def linear_regression_left(xl,yl):
# Extrapolate left lane
slope_l, intercept_l, r_value_l, p_value_l, std_err = stats.linregress(xl, yl)
return slope_l, intercept_l
def linear_regression_right(xr,yr):
# Extrapolate right lane
slope_r, intercept_r, r_value_r, p_value_r, std_err = stats.linregress(xr, yr)
return slope_r, intercept_r
def create_lane_list():
# Use deque to store previous lanes
return deque(maxlen = 15)
def left_lane_mean(left_lane_que):
# Derive mean parameters of left lane based on memory
if len(left_lane_que) == 0:
return 0,0
slope_l_mean , intercept_l_mean = np.mean(left_lane_que,axis=0)
return slope_l_mean, intercept_l_mean
def right_lane_mean(right_lane_que):
# Derive mean parameters of right lane based on memory
if len(right_lane_que) == 0:
return 0,0
slope_r_mean , intercept_r_mean = np.mean(right_lane_que,axis=0)
return slope_r_mean, intercept_r_mean
def left_lane_add(left_lane_que,slope_l, intercept_l):
# Add left lane to memory
left_lane_que.append([slope_l,intercept_l])
return left_lane_que
def right_lane_add(right_lane_que,slope_r, intercept_r):
# Add right lane to memory
right_lane_que.append([slope_r,intercept_r])
return right_lane_que
def grayscale(img):
# Convert image to grayscale
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def canny(img, low_threshold, high_threshold):
#Applies the Canny transform
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
#Applies a Gaussian Noise kernel
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img):
# Defining a blank mask to start with
mask = np.zeros_like(img)
# Defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
vertices = get_vertices_for_img(img)
# Filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# Returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, intercept_l, slope_l,intercept_r, slope_r, xl, xr,color=[255, 0, 0], thickness=10):
# Draw lines based on mean intercept and slope
max_y = img.shape[0]
yl_LR = []
yr_LR = []
for x in xl:
yl_LR.append(intercept_l+slope_l*x)
for x in xr:
yr_LR.append(intercept_r+slope_r*x)
x_left_bottom = (max_y - intercept_l)/slope_l
x_right_bottom = (max_y - intercept_r)/slope_r
cv2.line(img, (int(x_left_bottom), int(max_y)), (int(max(xl)), int(min(yl_LR))), color, thickness)
cv2.line(img, (int(x_right_bottom), int(max_y)), (int(min(xr)), int(min(yr_LR))), color, thickness)
return img
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
# Derive Hough lines of the image, this would return the points on the edge
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
return line_img, lines
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
# Combine images with weights
return cv2.addWeighted(initial_img, α, img, β, γ)
def isolate_yellow_hsl(img):
# Extract yellow color in the HSL color space.
# We are interested in the yellow lanes on the ground
low_threshold = np.array([15, 38, 115], dtype=np.uint8)
high_threshold = np.array([35, 204, 255], dtype=np.uint8)
yellow_mask = cv2.inRange(img, low_threshold, high_threshold)
return yellow_mask
def isolate_white_hsl(img):
# Extract white color in the HSL color space.
# We are interested in the white lanes on the ground
low_threshold = np.array([0, 200, 0], dtype=np.uint8)
high_threshold = np.array([180, 255, 255], dtype=np.uint8)
white_mask = cv2.inRange(img, low_threshold, high_threshold)
return white_mask
def get_vertices_for_img(img):
# Get the top points of polygon based on the size of image for function 'region_of_interest'
height = img.shape[0]
width = img.shape[1]
if (width, height) == (960, 540):
bottom_left = (130 ,img.shape[0] - 1)
top_left = (410, 330)
top_right = (650, 350)
bottom_right = (img.shape[1] - 30,img.shape[0] - 1)
vert = np.array([[bottom_left , top_left, top_right, bottom_right]], dtype=np.int32)
else:
bottom_left = (200 , 680)
top_left = (600, 450)
top_right = (750, 450)
bottom_right = (1100, 680)
vert = np.array([[bottom_left , top_left, top_right, bottom_right]], dtype=np.int32)
return vert
```
## Test Images
Firstly, use images to test the lane detection piplane
```
import os
# Read in a image list
test_img_dir = 'test_images/'
test_image_names = os.listdir("test_images/")
test_image_names = list(map(lambda name: test_img_dir + name, test_image_names))
```
## Build a Lane Finding Pipeline
Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.
Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
```
# Read in images
image_wCLS = mpimg.imread('test_images/whiteCarLaneSwitch.jpg')
image_sYL = mpimg.imread('test_images/solidYellowLeft.jpg')
image_sYC2 = mpimg.imread('test_images/solidYellowCurve2.jpg')
image_sYC = mpimg.imread('test_images/solidYellowCurve.jpg')
image_sWC = mpimg.imread('test_images/solidWhiteCurve.jpg')
image_ch = mpimg.imread('test_images/challenge.jpg')
def Lane_Detect(image):
# Lane detection pipeline
image_hsl = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
image_yellow = isolate_yellow_hsl(image_hsl)
image_white = isolate_white_hsl(image_hsl)
# Combine white parts and yellow parts in a single pic
image_wy = cv2.bitwise_or(image_yellow,image_white)
# Combine yellow and white masks and original picture to derive the parts we are interested.
# This would reduce the noise and improve the performance if there is shadow on the ground.
image_com = cv2.bitwise_and(image,image,mask=image_wy)
image_gray = grayscale(image_com)
# Smoothing the image
kernal_size = 11
blur_image = cv2.GaussianBlur(image_gray,(kernal_size,kernal_size),0)
# Setup Canny
low_threshold = 10
high_threshold = 150
edges_image = cv2.Canny(blur_image,low_threshold,high_threshold)
# Define range of interest
masked_image = region_of_interest(edges_image)
bland_image, houghLines= hough_lines(masked_image, 1, np.pi/180, 1, 5, 1)
xl,yl,xr,yr = find_hough_lines(bland_image,houghLines)
slope_l, intercept_l = linear_regression_left(xl,yl)
slope_r, intercept_r = linear_regression_right(xr,yr)
hough_image = draw_lines(bland_image, intercept_l, slope_l, intercept_r, slope_r, xl, xr)
Final_image = weighted_img(hough_image,image)
return Final_image
# Process images and save
Final_wCLS = Lane_Detect(image_wCLS)
plt.imsave('test_images_output/whiteCarLaneSwitch.png',Final_wCLS)
Final_sWR = Lane_Detect(image_sWR)
plt.imsave('test_images_output/solidWhiteRight.png',Final_sWR)
Final_sYL = Lane_Detect(image_sYL)
plt.imsave('test_images_output/solidYellowLeft.png',Final_sYL)
Final_sYC2 = Lane_Detect(image_sYC2)
plt.imsave('test_images_output/solidYellowCurve2.png',Final_sYC2)
Final_sYC = Lane_Detect(image_sYC)
plt.imsave('test_images_output/solidYellowCurve.png',Final_sYC)
Final_sWC = Lane_Detect(image_sWC)
plt.imsave('test_images_output/solidWhiteCurve.png',Final_sWC)
Final_ch = Lane_Detect(image_ch)
plt.imsave('test_images_output/challenge.png',Final_ch)
```
## Test on Videos
```
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
# Set threshold to decide if the lane should be add to memory
MAXIMUM_SLOPE_DIFF = 0.1
MAXIMUM_INTERCEPT_DIFF = 50.0
class LaneDetectWithMemo:
def __init__(self):
self.left_lane_que = create_lane_list()
self.right_lane_que = create_lane_list()
def LanePipe(self,image):
image_hsl = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
image_yellow = isolate_yellow_hsl(image_hsl)
image_white = isolate_white_hsl(image_hsl)
# Combine white parts and yellow parts in a single pic
image_wy = cv2.bitwise_or(image_yellow,image_white)
# Combine yellow and white masks and original picture to derive the parts we are interested.
# This would reduce the noise and improve the performance if there is shadow on the ground.
image_com = cv2.bitwise_and(image,image,mask=image_wy)
image_gray = grayscale(image_com)
# Smoothing the image
kernal_size = 11
blur_image = cv2.GaussianBlur(image_gray,(kernal_size,kernal_size),0)
# Setup Canny
low_threshold = 10
high_threshold = 150
edges_image = cv2.Canny(blur_image,low_threshold,high_threshold)
# Define range of interest
masked_image = region_of_interest(edges_image)
bland_image, houghLines= hough_lines(masked_image, 1, np.pi/180, 1, 5, 1)
xl,yl,xr,yr = find_hough_lines(bland_image,houghLines)
slope_l, intercept_l = linear_regression_left(xl,yl)
slope_r, intercept_r = linear_regression_right(xr,yr)
# If the lane diverges too much, then use the mean value in memory to draw the lane
# If the lane is within thershold, then add it to memory and recalculate the mean value
if len(self.left_lane_que) == 0 and len(self.right_lane_que) == 0:
self.left_lane_que = left_lane_add(self.left_lane_que, slope_l, intercept_l)
self.right_lane_que = right_lane_add(self.right_lane_que, slope_r, intercept_r)
slope_l_mean, intercept_l_mean = left_lane_mean(self.left_lane_que)
slope_r_mean, intercept_r_mean = right_lane_mean(self.right_lane_que)
else:
slope_l_mean, intercept_l_mean = left_lane_mean(self.left_lane_que)
slope_r_mean, intercept_r_mean = right_lane_mean(self.right_lane_que)
slope_l_diff = abs(slope_l-slope_l_mean)
intercept_l_diff = abs(intercept_l-intercept_l_mean)
slope_r_diff = abs(slope_r-slope_r_mean)
intercept_r_diff = abs(intercept_r-intercept_r_mean)
if intercept_l_diff < MAXIMUM_INTERCEPT_DIFF and slope_l_diff < MAXIMUM_SLOPE_DIFF:
self.left_lane_que = left_lane_add(self.left_lane_que, slope_l, intercept_l)
slope_l_mean, intercept_l_mean = left_lane_mean(self.left_lane_que)
if intercept_r_diff < MAXIMUM_INTERCEPT_DIFF and slope_r_diff < MAXIMUM_SLOPE_DIFF:
self.right_lane_que = right_lane_add(self.right_lane_que, slope_r, intercept_r)
slope_r_mean, intercept_r_mean = right_lane_mean(self.right_lane_que)
hough_image = draw_lines(bland_image, intercept_l_mean, slope_l_mean,intercept_r_mean, slope_r_mean, xl, xr)
Final_image = weighted_img(hough_image,image)
return Final_image
# Test on the first vedio, with solid white lane on the right
LaneDetect_1 = LaneDetectWithMemo()
white_output = 'test_videos_output/solidWhiteRight.mp4'
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(LaneDetect_1.LanePipe) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
# Now for the one with the solid yellow lane on the left. This one's more tricky!
LaneDetect_2 = LaneDetectWithMemo()
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(LaneDetect_2.LanePipe)
%time yellow_clip.write_videofile(yellow_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(yellow_output))
```
## Optional Challenge
This vedio has curved lane and shadow on the ground. In the futrue I would use polynomial to represent the lane instead of a single line. The shadow is improved by extracting yellow and white in the picture and combine them with the original image, which represent the parts we are interested
```
LaneDetect_ch = LaneDetectWithMemo()
challenge_output = 'test_videos_output/challenge.mp4'
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(LaneDetect_ch.LanePipe)
%time challenge_clip.write_videofile(challenge_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(challenge_output))
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import re
from tqdm import tqdm
from common.bio.amino_acid import *
pd.set_option('display.max_colwidth', -1)
```
## Importing original uniprot file
```
#uniprot = pd.read_csv("../data/protein/cgan/data_sources/uniprot_all_not-hetero.tab", sep="\t").drop(["Entry","Entry name","Status", "Subunit structure [CC]"], axis=1)
uniprot = pd.read_csv("../data/protein/cgan/data_sources/uniprot_all_not-hetero_dropped.tab", sep="\t").drop(["Unnamed: 0"], axis=1)
uniprot.head()
```
Parsing file that contains all enzyme reactions. Note: Only taking a first reaction for now
```
f = open("../data/protein/cgan/data_sources/enzyme.dat", "r")
current_id = None
dictionary = {}
for line in f:
if line.startswith("CA") or line.startswith("ID"):
components = line.split(" ")
if components[0] == "ID":
current_id = components[1].replace("\n", "")
skip=False
else:
reaction = components[1]
if not current_id in dictionary:
dictionary[current_id] = components[1].replace("\n", "").replace(".", "").replace("(1) ", "")
elif components[1].startswith("(2) "):
skip = True
elif not skip:
dictionary[current_id] += " "+ components[1].replace("\n", "").replace(".", "")
with open('../data/protein/cgan/data_sources/enzyme_class_reaction.csv', 'w') as f:
for key in dictionary.keys():
f.write("%s\t%s\n"%(key,dictionary[key]))
```
## Cleaning reactions
```
uniprot = filter_non_standard_amino_acids(uniprot, "Sequence")
uniprot.shape
uniprot = uniprot[uniprot.Sequence.str.len() <=1024]
uniprot.shape
uniprot.to_csv("../data/protein/cgan/data_sources/uniprot_all_not-hetero_dropped.tab", sep="\t", index=None)
uniprot.head()
uniprot[["Catalytic activity", "EC number"]].drop_duplicates().sort_values("EC number")
```
## Splitting reaction into components
Getting substrates and product compounds
```
enzyme_class_reaction = pd.read_csv("../data/protein/cgan/data_sources/enzyme_class_reaction.csv", sep="\t", header=None, names=["EC class", "Reaction"])
substrates = enzyme_class_reaction.Reaction.str.split("\s\=\s",expand=True)[0]
products = enzyme_class_reaction.Reaction.str.split("\s\=\s",expand=True)[1]
product_components = products.str.split("\s\+\s",expand=True)
product_components.reset_index(drop=True, inplace=True)
p_columns_names = { index:"product_"+str(index+1) for index in range(8)}
product_components = product_components.rename(index=str, columns=p_columns_names)
for c in p_columns_names.values():
product_components[c] = product_components[c].str.strip()
```
Validation
```
product_components[product_components["product_8"].notnull()].head()
```
Getting substrate compounds
```
substrate_components = substrates.str.split("\s\+\s",expand=True)
substrate_components.reset_index(drop=True, inplace=True)
s_columns_names = { index:"substrate_"+str(index+1) for index in range(5)}
substrate_components = substrate_components.rename(index=str, columns=s_columns_names)
for c in s_columns_names.values():
substrate_components[c] = substrate_components[c].str.strip()
substrate_components[substrate_components["substrate_5"].notnull()].head()
```
Joining everything together
```
substrates_products = pd.concat([substrate_components, product_components], axis=1)
substrates_products.shape
enzyme_class_reaction.reset_index(drop=True, inplace=True)
substrates_products.reset_index(drop=True, inplace=True)
final_to_save = pd.concat([enzyme_class_reaction,substrates_products], axis=1)
final_to_save.shape
final_to_save.head()
final_to_save.to_csv("../data/protein/cgan/enzyme_reaction_splitted_in_components.csv", sep="\t")
```
## ChEBI (initial preprocessing)
Trying to create dictornary for synonyms <-> smiles
```
from rdkit.Chem import PandasTools
molecules = PandasTools.LoadSDF('../data/ChEBI_complete_3star.sdf',
smilesName='SMILES',
molColName='Molecule',
includeFingerprints=False)
molecules = molecules[["ChEBI ID","ChEBI Name", "Formulae", "SMILES", "Synonyms", "UniProt Database Links"]]
molecules.head()
molecules.to_csv("../data/ChEBI_select.csv", sep='\t')
```
## Filtered ChEBI preprocessing
```
molecules = pd.read_csv("../data/protein/cgan/ChEBI_select.csv", sep='\t')
molecules = molecules.drop("Unnamed: 0", axis=1)
molecules.head()
molecules.loc[molecules.Synonyms.notnull(), "Synonyms"] = molecules.loc[molecules.Synonyms.notnull(), "Synonyms"] + "\r\n" +molecules.loc[molecules.Synonyms.notnull(), "ChEBI Name"]
molecules.loc[molecules.Synonyms.isnull(), "Synonyms"] = molecules.loc[molecules.Synonyms.isnull(), "ChEBI Name"]
molecules[molecules.Synonyms.isnull()].head()
molecules.shape
molecules_selected = molecules[["Formulae", "SMILES", "Synonyms"]]
import common.preprocessing
molecules_expanded = split_dataframe_list_to_rows(molecules_selected, "Synonyms", "\r\n")
molecules_expanded.shape
molecules_expanded["Synonyms"] = molecules_expanded["Synonyms"].str.strip()
molecules_expanded["Formulae"] = molecules_expanded["Formulae"].str.strip()
molecules_expanded["SMILES"] = molecules_expanded["SMILES"].str.strip()
molecules_expanded = molecules_expanded.set_index(['Synonyms'])
molecules_expanded.head()
molecules_expanded.to_csv("../data/protein/cgan/smiles_dict.csv", sep='\t', quoting=csv.QUOTE_MINIMAL)
```
## Matching compounds with smiles
Find compouds that cannot be match automatically
```
import csv
molecules = pd.read_csv("../data/protein/cgan/smiles_dict.csv", sep='\t')
proteins = pd.read_csv("../data/protein/cgan/enzyme_reaction_splitted_in_components.csv", sep="\t")
molecules.shape, proteins.shape
proteins.head(1)
def concatenate(df, columns, all_values = []):
for c in columns:
all_values = np.concatenate((all_values, df[c].astype(str).unique()))
return all_values
all_uniq = None
all_uniq = concatenate(proteins, [col for col in proteins.columns if col.startswith("substrate")])
all_uniq = concatenate(proteins, [col for col in proteins.columns if col.startswith("product")], all_uniq)
all_uniq = all_uniq[all_uniq != 'nan']
all_uniq = list(set(all_uniq))
len(all_uniq)
i = 0
n_found = 0
results = []
for i in tqdm(range(len(all_uniq))):
compound = all_uniq[i]
compound = compound.replace('- ', '-')
compound = compound.replace('- ','-')
# compound = re.sub(r'\(\d+\) in tRNA.*$', '', compound)
# compound = re.sub(r'\(\d+\) in .* rRNA.*$', '', compound)
# compound = re.sub(r'\(\d+\) in .* rRNA.*$', '', compound)
# compound = re.sub(r' in .* rRNA.*$', '', compound)
# compound = re.sub(r' in .* RNA$', '', compound)
# compound = compound.replace(' in DNA', '')
matching_smiles = molecules[(molecules['Synonyms'] == compound)].SMILES.values
if len(matching_smiles) == 0:
smiles = "Not found"
else:
n_found = n_found + 1
smiles = matching_smiles[0]
results.append([all_uniq[i], smiles])
# if i == 250:
# break
print("Matched compounds: {0:.1%}".format(float(n_found)/i))
print("Not matched: {}".format(i - n_found))
#N(6)-methyladenine
molecules[molecules['Synonyms'].str.lower().str.contains("NADPH")].sort_values("Synonyms").head(100)
results
import csv
with open("../data/protein/cgan/compound_to_smile.csv", 'w', newline='') as myfile:
wr = csv.writer(myfile, delimiter='\t')
wr.writerows(results)
compound_smile_dict = pd.read_csv("../data/protein/cgan/compound_to_smile.csv", sep='\t', header=None, names = ["Compound", "Smiles"])
compound_smile_dict_filtered = compound_smile_dict[compound_smile_dict.Smiles == "Not found"]
compound_smile_list = compound_smile_dict_filtered['Compound'].tolist()
compound_smile_list
i = 0
n_found = 0
results = []
for i in tqdm(range(len(compound_smile_list))):
compound = compound_smile_list[i]
compound = compound.replace('- ', '-')
compound = compound.replace('- ','-')
compound = compound.replace('(Side 1)','')
compound = compound.replace('(Side 2)','')
# compound = re.sub('\(\d+\) in \d+S [t,r]RNA$', '', compound)
# compound = re.sub('\(\d+\) in [t,r]RNA$', '', compound)
compound = compound.replace('(In)','')
compound = compound.replace('(Out)','')
compound = compound.replace(' in DNA', '')
compound = compound.replace(' in rRNA', '')
compound = compound.replace(' in tRNA', '')
compound = re.sub('^[A,a] ', '', compound)
compound = re.sub('^[A,a]n ', '', compound)
compound = re.sub('^[\d+] ', '', compound)
compound = re.sub(' [\d+]$', '', compound)
compound = re.sub('^n ', '', compound)
compound = re.sub('^2n ', '', compound)
compound = compound.replace('(', '')
compound = compound.replace(')', '')
matching_smiles = molecules[(molecules['Synonyms'].str.lower() == compound.strip().lower())].SMILES.values
if len(matching_smiles) == 0:
smiles = "Not found"
#print ("Compound: {} \tSmiles: {} | original: {}".format(compound, smiles, original_compound))
else:
n_found = n_found + 1
smiles = matching_smiles[0]
#print ("Compound: {} \tSmiles: {}".format(compound, smiles))
compound_smile_dict.loc[compound_smile_dict.Compound == compound_smile_list[i], "Smiles"] = smiles
print("Matched compounds: {0:.1%}".format(float(n_found)/i))
print("Not matched: {}".format(i - n_found))
compound_smile_dict.to_csv("../data/protein/cgan/compound_to_smile.csv", sep='\t', header=True, index=None)
compound_smile_dict = pd.read_csv("../data/protein/cgan/compound_to_smile.csv", sep='\t').drop(["Unnamed: 0"], axis = 1 )
compound_smile_dict_filtered = compound_smile_dict[compound_smile_dict.Smiles == "Not found"]
compound_smile_dict[compound_smile_dict.Smiles == "Not found"].count()
compound_smile_list = compound_smile_dict_filtered['Compound'].tolist()
hand_collected_smiles = pd.read_csv("../data/protein/cgan/data_sources/missing_components_for_chosen_class-noproteinorrnaEC346.txt",
sep='\t', header=None, names = ["Component", "SMILES"])
i = 0
n_found = 0
results = []
for i in tqdm(range(len(compound_smile_list))):
compound = compound_smile_list[i]
matching_smiles = hand_collected_smiles[(hand_collected_smiles["Component"].str.strip() == compound.strip())].SMILES.values
if len(matching_smiles) == 0:
smiles = "Not found"
#print ("Compound: {} \tSmiles: {} | original: {}".format(compound, smiles, compound))
else:
n_found = n_found + 1
smiles = matching_smiles[0]
#print ("Compound: {} \tSmiles: {}".format(compound, smiles))
compound_smile_dict.loc[compound_smile_dict.Compound == compound_smile_list[i], "Smiles"] = smiles
print("Matched compounds: {0:.1%}".format(float(n_found)/(i+1)))
print("Not matched: {}".format(i + 1 - n_found))
compound_smile_dict[compound_smile_dict.Smiles == "Not found"].count()
compound_smile_dict.Smiles = compound_smile_dict.Smiles.str.strip()
compound_smile_dict.Smiles = compound_smile_dict.Smiles.str.replace(" ", "")
compound_smile_dict.to_csv("../data/protein/cgan/compound_to_smile.csv", sep='\t', header=True, index=None)
```
## Filtering out non common smiles characters
```
compound_smile_dict = pd.read_csv("../data/protein/cgan/compound_to_smile.csv", sep='\t')
compound_smile_dict = compound_smile_dict[compound_smile_dict.Compound != "Compound"]
compound_smile_dict = compound_smile_dict[compound_smile_dict.Smiles != "Not found"]
compound_smile_dict
compound_smile_dict[compound_smile_dict.Smiles.str.contains('%')]
A- "As", B- "Br", K - "K", M - "Mo", "Mg" W - "W" Z - "Zn" a - "Na", "Ca", d - "Cd", e - "Fe", g - "Mg", i - "Ni", l - "Cl",
r - "Br", u - "Cu"
set_smiles_characters = set([])
[ set_smiles_characters.update(set(val.strip())) for index, val in compound_smile_dict.Smiles.iteritems()]
set_smiles_characters.add(' ')
set_smiles_characters = list(sorted(set_smiles_characters))
common_characters = []
filter_out = []
for item in set_smiles_characters:
count = compound_smile_dict[compound_smile_dict.Smiles.str.contains(re.escape(item))].count().Compound
print("{} - count: {}".format(item, count))
if count >= 100:
common_characters.append(item)
else:
filter_out.append(item)
common_characters.append(" ")
common_characters = sorted(common_characters)
indexToCharacter = {i:common_characters[i] for i in range (len(common_characters))}
characterToIndex = {common_characters[i]:i for i in range (len(common_characters))}
indexToCharacter, characterToIndex
filtered_out = compound_smile_dict
for non_common_character in filter_out:
filtered_out = filtered_out[~filtered_out.Smiles.str.contains(re.escape(non_common_character))]
filtered_out.shape
filtered_out[filtered_out.Smiles.str.contains('%')]
filtered_out.to_csv("../data/protein/cgan/compound_to_smile_filtered.csv", sep='\t', header=True, index=None)
```
# Getting ready for CWGAN
```
#compound_smile_dict = pd.read_csv("../data/protein/cgan/compound_to_smile_filtered.csv", sep='\t')
compound_smile_dict = pd.read_csv("../data/protein/cgan/compound_to_smile.csv", sep='\t')
proteins = pd.read_csv("../data/protein/cgan/enzyme_reaction_splitted_in_components.csv", sep="\t").drop("Unnamed: 0", axis= 1)
compound_smile_dict.shape, proteins.shape
compound_smile_dict = compound_smile_dict[compound_smile_dict.Compound != "Compound"]
compound_smile_dict = compound_smile_dict[compound_smile_dict.Smiles != "Not found"]
compound_smile_dict.head(10)
proteins.head()
proteins = proteins[~proteins.product_3.notnull()]
proteins = proteins[~proteins.substrate_3.notnull()]
proteins.shape
smiles_columns = ["product_1", "product_2", "substrate_1", "substrate_2"]
proteins_for_joining = proteins[["EC class", *smiles_columns]]
#proteins_for_joining = proteins
proteins_for_joining.head(3)
proteins_for_joining.shape
column_list = proteins_for_joining.columns.tolist()
column_list.remove('EC class')
# column_list.remove('Reaction')
column_list
#for column in smiles_columns:
for column in column_list:
proteins_for_joining = pd.merge(proteins_for_joining, compound_smile_dict, left_on = column,
right_on="Compound", how="left").drop("Compound", axis=1)
proteins_for_joining = proteins_for_joining.rename(columns={"Smiles": "smiles_"+column})
#proteins_for_joining = proteins_for_joining[proteins_for_joining["smiles_"+column] != "Not found"]
proteins_for_joining.head(3)
promissing = pd.read_csv("../data/protein/cgan/promissing_classes_all.txt", sep='\t')
missing_components = pd.merge(promissing, proteins_for_joining, left_on = "EC class",
right_on="EC class", how="inner").set_index("EC class")
missing_components = missing_components[missing_components.product_4.isnull()]
missing_components = missing_components[missing_components.substrate_4.isnull()]
missing_components
product1 = missing_components[missing_components.smiles_product_1 == "Not found"].product_1.tolist()
product2 = missing_components[missing_components.smiles_product_2 == "Not found"].product_2.tolist()
substrate1 = missing_components[missing_components.smiles_substrate_1 == "Not found"].substrate_1.tolist()
substrate2 = missing_components[missing_components.smiles_substrate_2 == "Not found"].substrate_2.tolist()
with open('../data/protein/cgan/missing_components_for_chosen_class_2.txt', 'w') as f:
for item in set(product1 +product2 + substrate1 + substrate2):
f.write("%s\n" % item)
missing_components.to_csv("../data/protein/cgan/missing_components_for_interesting_classes_2.txt", "\t")
missing_components = missing_components[(missing_components.product_3.isna()) & (missing_components.substrate_3.isna())]
final_dataset = proteins_for_joining.drop(smiles_columns, axis= 1)
final_dataset[final_dataset["EC class"].str.startswith("1")].count()
proteins[proteins["EC class"].str.startswith("5.3.1.1")].head(100)
final_dataset.to_csv("../data/protein/cgan/enzyme_with_smiles.csv", sep='\t', index=None)
```
# Sequences with smiles to numpy
```
import pandas as pd
import numpy as np
from common.bio.smiles import *
from common.bio.amino_acid import *
SMILES_LENGTH=100
SEQUENCE_LENGTH=256
enzyme_with_smiles = pd.read_csv("../data/protein/cganenzyme_with_smiles.csv", sep='\t')
enzyme_with_smiles.head()
smiles_columns = enzyme_with_smiles.columns.values[1:]
list(smiles_columns)
lengths = []
for col in smiles_columns:
lengths.append(enzyme_with_smiles[col].str.len().max())
max(lengths)
enzyme_with_smiles.sort_values("EC class")
```
Filtering out rows that are too huge
```
filtered_sequences_with_smiles = sequences_with_smiles[sequences_with_smiles.Sequence.str.len()<=SEQUENCE_LENGTH]
filtered_sequences_with_smiles["smiles_product_1"].head()
for col in smiles_columns:
filtered_sequences_with_smiles = filtered_sequences_with_smiles[filtered_sequences_with_smiles[col].str.len()
<= SMILES_LENGTH]
filtered_sequences_with_smiles.shape
```
### Train and validation splits
```
val_classes = ["1.3.5.2","2.3.3.13", "3.5.1.18","4.1.1.49", "6.1.1.18", "2.7.1.216", "3.5.1.125", "4.1.1.87", "1.1.1.83",
"3.6.1.66", "1.15.1.1", "2.7.1.71", "6.3.1.5"]
val_split_classes = ["2.5.1.6", "1.6.1.66", "4.1.1.39", "6.1.1.15","3.5.4.13"]
train = filtered_sequences_with_smiles[~filtered_sequences_with_smiles["EC number"].isin(val_classes)]
train = train[~train["EC number"].isin(val_split_classes)]
train.shape
val = filtered_sequences_with_smiles[filtered_sequences_with_smiles["EC number"].isin(val_classes)]
val.shape
to_split = filtered_sequences_with_smiles[filtered_sequences_with_smiles["EC number"].isin(val_split_classes)]
to_split = to_split.sample(frac=1)
split_point = int(to_split.shape[0]/2)
add_to_train = to_split.iloc[:split_point, :]
add_to_val = to_split.iloc[split_point:, :]
add_to_train.shape, add_to_val.shape, split_point
val = val.append(add_to_val)
train = train.append(add_to_train)
train.shape, val.shape
```
Converting SMILES to IDs
```
for col in smiles_columns:
print("Working with Column {}".format(col))
filtered_sequences_with_smiles[col] = filtered_sequences_with_smiles[col].str.ljust(SMILES_LENGTH, '0')
filtered_sequences_with_smiles[col] = from_smiles_to_id(filtered_sequences_with_smiles, col)
```
Converting Amino acids to IDs
```
filtered_sequences_with_smiles.Sequence = filtered_sequences_with_smiles.Sequence.str.ljust(SEQUENCE_LENGTH, '0')
filtered_sequences_with_smiles["Sequence"] = from_amino_acid_to_id(filtered_sequences_with_smiles, "Sequence")
val.groupby("EC number").count()
```
### Code to find some data for validation
```
temp = (filtered_sequences_with_smiles.groupby("EC number").size().reset_index(name='counts')
.sort_values("counts", ascending=False))
#1.3.5.2, 2.3.3.13, 3.5.1.18, 4.1.1.49, 6.1.1.18
#2.5.1.6 784
#4.1.1.39 774
#3.6.1.66 768
#6.1.1.15 758
path = "../data/protein/cgan/{}/".format("mini_sample")
path
train
def save_cgan_data(data, data_type, path):
np.save(path+"{}_seq.npy".format(data_type), data.Sequence.values)
for col in smiles_columns:
np.save(path + "{}_{}.npy".format(data_type, col), data[col].values)
save_cgan_data(train, "train", path)
save_cgan_data(val, "val", path)
val.to_csv("../data/smiles/val_sequences_with_smiles.csv", sep='\t')
train.to_csv("../data/train_sequences_with_smiles.csv", sep='\t')
```
# Auto Encoder
```
SMILES_LENGTH = 128
import csv
kegg = pd.read_csv("../data/kegg_smiles.csv", header=None, names=["smiles"], quotechar="'")
kegg.size
kegg.smiles = kegg.smiles.str.strip()
kegg.head()
kegg = kegg[kegg.smiles.str.len() < SMILES_LENGTH]
kegg.size
kegg.smiles = kegg.smiles.str.ljust(SMILES_LENGTH, ' ')
kegg.smiles
set_smiles_characters = set([])
[ set_smiles_characters.update(set(val)) for index, val in kegg.smiles.iteritems() ]
set_smiles_characters = list(sorted(set_smiles_characters))
len(set_smiles_characters)
indexToCharacter = {i:set_smiles_characters[i] for i in range (len(set_smiles_characters))}
characterToIndex = {set_smiles_characters[i]:i for i in range (len(set_smiles_characters))}
indexToCharacter, characterToIndex
kegg["smiles_converted"] = [[characterToIndex[char] for char in val ] for index, val in kegg.smiles.iteritems()]
kegg.head()
from sklearn.model_selection import train_test_split
train, test = train_test_split(kegg, test_size=0.2)
len(train), len(test)
import numpy as np
def to_array(data, features):
return (np.asarray([ np.asarray(element) for element in data[features].values]))
train_feature_data= to_array(train,'smiles_converted')
val_feature_data = to_array(test,'smiles_converted')
train_feature_data.shape, val_feature_data.shape
np.save("../data/protein/smiles/train_smiles_"+str(SMILES_LENGTH),train_feature_data)
np.save("../data/protein/smiles/val_smiles_"+str(SMILES_LENGTH), val_feature_data)
```
| github_jupyter |
attempt 1
```
from openmmtools.testsystems import HostGuestExplicit
hge = HostGuestExplicit()
system, positions, topology = hge.system, hge.positions, hge.topology
from qmlify.openmm_torch.force_hybridization import HybridSystemFactory
from simtk import unit
import qmlify
qmlify
hge.system.getForces()
from openmmtools.testsystems import HostGuestExplicit
T = 300*unit.kelvin
system, positions, topology = hge.system, hge.positions, hge.topology
system.removeForce(system.getNumForces() - 1) # remove the CMMotionRemover force because it is unknown
_atoms = list(range(126,156)) #these atoms correspond to the guest. query these with the second residue in the topology
system.getForces()
hsf = HybridSystemFactory(topology = topology,
alchemical_residue_indices = [1],
system = system,
softcore_alpha_sterics = 0.5,
softcore_alpha_electrostatics = 0.5)
# grab the modified system and endstate system...
mod_system = hsf.system
endstate_system = hsf.endstate_system
```
now that we have the modified system, we want to get the energy at _this_ endstate and make sure the energy is bookkeeping well with the non-alchemically-modified state.
```
from openmmtools.integrators import LangevinIntegrator
from simtk import openmm
nonalch_int = LangevinIntegrator(temperature=T)
alch_int = LangevinIntegrator(temperature=T)
nonalch_context, alch_context = openmm.Context(system, nonalch_int), openmm.Context(mod_system, alch_int)
for context in [nonalch_context, alch_context]:
context.setPositions(positions)
context.setPeriodicBoxVectors(*system.getDefaultPeriodicBoxVectors())
nonalch_context.getState(getEnergy=True).getPotentialEnergy()
alch_context.getState(getEnergy=True).getPotentialEnergy()
from simtk.openmm import LocalEnergyMinimizer
LocalEnergyMinimizer.minimize(alch_context, maxIterations=10)
alch_context.getState(getPositions=True).getPositions(asNumpy=True)
```
we're only off by a thousandth of a kj/mol.
if this is an artifact of the nonbonded term, we can safely ignore it.
```
from qmlify.openmm_torch.utils import *
from openmmtools.constants import kB
beta = 1. / (T * kB)
from openmmtools import utils
platform = utils.get_fastest_platform()
compute_potential_components(nonalch_context, beta, platform)
compute_potential_components(alch_context, beta, platform)
```
so it is nonbonded. can we write a function that pushed the alchemical context to the opposite endstate and asserts that all of the custom forces go to zero?
first, let's gather the alchemical lambdas that must change...
```
swig_params = alch_context.getParameters()
for i in swig_params:
print(i, swig_params[i])
final_lambdas = {'lambda_MM_bonds' : 0.,
'lambda_MM_angles': 0.,
'lambda_MM_torsions': 0.,
'lambda_nonbonded_MM_sterics' : 1.,
'lambda_nonbonded_MM_electrostatics': 1.,
}
for key, val in final_lambdas.items():
alch_context.setParameter(key, val)
compute_potential_components(alch_context, beta, platform)
swig_params = alch_context.getParameters()
for i in swig_params:
print(i, swig_params[i])
```
alright! now can we add the torchforce?
```
from qmlify.openmm_torch.torchforce_generator import torch_alchemification_wrapper
ml_system, hsf_mod = torch_alchemification_wrapper(topology, system, [1])
ml_system.getForces()
ml_int = LangevinIntegrator(splitting = 'V0 V1 R O R V1 V0')
ml_context = openmm.Context(ml_system, ml_int)
ml_context.setPositions(positions)
ml_context.setPeriodicBoxVectors(*system.getDefaultPeriodicBoxVectors())
ml_context.getState(getEnergy=True).getPotentialEnergy()
ml_context.getParameters().items()
compute_potential_components(ml_context, beta, platform)
```
racket!
```
#out_ml_system = copy.deepcopy(ml_context.getSystem())
for param, val in ml_context.getParameters().items():
print(param, val)
ml_context.getState(getEnergy=True)
compute_potential_components(ml_context, beta, platform)
ml_params = ml_context.getParameters()
for (parameter, value) in ml_params.items():
print(parameter, value)
ml_context.setParameter('torch_scale', 1.)
ml_context.setParameter('auxiliary_torch_scale', 2.)
import tqdm
swig_params = ml_context.getParameters()
ml_context.getState(getEnergy=True, groups={0}).getPotentialEnergy()
ml_context.getState(getEnergy=True, groups={1}).getPotentialEnergy()
for i in swig_params:
print(i, swig_params[i])
ml_energies, energies = [], []
for i in tqdm.trange(500):
ml_int.step(1)
alch_int.step(1)
ml_energies.append(ml_context.getState(getEnergy=True).getPotentialEnergy())
energies.append(alch_context.getState(getEnergy=True).getPotentialEnergy())
import matplotlib.pyplot as plt
ml_energies = [energy.value_in_unit_system(unit.md_unit_system) for energy in ml_energies]
energies = [energy.value_in_unit_system(unit.md_unit_system) for energy in energies]
plt.plot(ml_energies)
plt.plot(energies)
```
| github_jupyter |
# Customer Churn Analysis
This notebook is using customer churn data from Kaggle (https://www.kaggle.com/sandipdatta/customer-churn-analysis) and has been adopted from the notebook available on Kaggle developed by SanD.
The notebook will go through the following steps:
1. Import Dataset
2. Analyze the Data
3. Prepare the data model building
4. Split data in test and train data
5. Train model using various machine learning algorithms for binary classification
6. Evaluate the models
7. Select the model best fit for the given data set
8. Save and deploy model to Watson Machine Learning
```
from sklearn import model_selection
from sklearn import tree
from sklearn import svm
from sklearn import ensemble
from sklearn import neighbors
from sklearn import linear_model
from sklearn import metrics
from sklearn import preprocessing
%matplotlib inline
from IPython.display import Image
import matplotlib as mlp
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import sklearn
import seaborn as sns
import json
```
## Dataset
The original dataset can be downloaded from https://www.kaggle.com/becksddf/churn-in-telecoms-dataset/data. Then upload it to IBM Watson Studio and insert the code to read the data using "insert to code > Insert panndas DataFrame".
```
# @hidden cell
# Click the 0100 data icon at the upper part of the page to open the Files subpanel.
# In the right part of the page, select the Customer Churn .csv data set. Click insert to code, and select Insert pandas DataFrame.
# make sure you assign the dataframe to the variable "df"
df = df_data_1
print (df.shape)
```
Examine the first 5 lines of the input
```
df.head()
y = df["churn"].value_counts()
sns.barplot(y.index, y.values)
y_True = df["churn"][df["churn"] == True]
print ("Churn Percentage = "+str( (y_True.shape[0] / df["churn"].shape[0]) * 100 )+"%")
```
## Descriptive Analysis of the Data
```
df.describe()
```
### Churn by State
```
df.groupby(["state", "churn"]).size().unstack().plot(kind='bar', stacked=True, figsize=(30,10))
```
### Churn by Area Code
```
df.groupby(["area code", "churn"]).size().unstack().plot(kind='bar', stacked=True, figsize=(5,5))
```
### Churn by customers with International Plan
```
df.groupby(["international plan", "churn"]).size().unstack().plot(kind='bar', stacked=True, figsize=(5,5))
```
### Churn By Customers with Voice mail plan
```
df.groupby(["voice mail plan", "churn"]).size().unstack().plot(kind='bar', stacked=True, figsize=(5,5))
```
## Data Preparation
The following preprocessing steps need to be done:
1. Turn categorical variables into discrete numerical variables
2. Create response vector
3. Drop superflous columns
4. Build feature matrix
5. Standardize feature matrix values
### Encode categorical columns
```
# Discreet value integer encoder
label_encoder = preprocessing.LabelEncoder()
# State, international plans and voice mail plan are strings and we want discreet integer values
df['state'] = label_encoder.fit_transform(df['state'])
df['international plan'] = label_encoder.fit_transform(df['international plan'])
df['voice mail plan'] = label_encoder.fit_transform(df['voice mail plan'])
print (df.dtypes)
print (df.shape)
df.head()
```
### Create response vector
```
y = df['churn'].values.astype(np.str)
y.size
```
### Drop superflous columns
```
# df = df.drop(["Id","Churn"], axis = 1, inplace=True)
df.drop(["phone number","churn"], axis = 1, inplace=True)
df.head()
```
### Build feature matrix
```
X = df.values.astype(np.float)
print(X)
X.shape
```
### Standardize Feature Matrix values
```
scaler = preprocessing.StandardScaler()
X = scaler.fit_transform(X)
X
```
This completes the data preparation steps.
## Split Train/Test Validation Data
We need to adopt Stratified Cross Validation - Since the Response values are not balanced
```
def stratified_cv(X, y, clf_class, shuffle=True, n_folds=10):
stratified_k_fold = model_selection.StratifiedKFold(n_splits=n_folds, shuffle=shuffle)
y_pred = y.copy()
# ii -> train
# jj -> test indices
for ii, jj in stratified_k_fold.split(X, y):
X_train, X_test = X[ii], X[jj]
y_train = y[ii]
clf = clf_class
clf.fit(X_train,y_train)
y_pred[jj] = clf.predict(X_test)
return y_pred
```
## Build Models and Train
We will build models using a variety of approaches to see how they compare:
```
# create classifiers
from sklearn.ensemble import GradientBoostingClassifier
gradient_boost = GradientBoostingClassifier()
from sklearn.svm import SVC
svc_model = SVC(gamma='auto')
from sklearn.ensemble import RandomForestClassifier
random_forest = RandomForestClassifier(n_estimators=10)
from sklearn.neighbors import KNeighborsClassifier
k_neighbors = KNeighborsClassifier()
from sklearn.linear_model import LogisticRegression
logistic_regression = LogisticRegression(solver='lbfgs')
print('Gradient Boosting Classifier: {:.2f}'.format(metrics.accuracy_score(y, stratified_cv(X, y, gradient_boost))))
print('Support vector machine(SVM): {:.2f}'.format(metrics.accuracy_score(y, stratified_cv(X, y, svc_model))))
print('Random Forest Classifier: {:.2f}'.format(metrics.accuracy_score(y, stratified_cv(X, y, random_forest))))
print('K Nearest Neighbor Classifier: {:.2f}'.format(metrics.accuracy_score(y, stratified_cv(X, y, k_neighbors))))
print('Logistic Regression: {:.2f}'.format(metrics.accuracy_score(y, stratified_cv(X, y, logistic_regression))))
```
## Model Evaluation
We will now generate confusion matrices for the various models to analyze the prediction in more detail.
### Gradient Boosting Classifier
```
grad_ens_conf_matrix = metrics.confusion_matrix(y, stratified_cv(X, y, gradient_boost))
sns.heatmap(grad_ens_conf_matrix, annot=True, fmt='');
title = 'Gradient Boosting'
plt.title(title);
```
### Support Vector Machines
```
svm_svc_conf_matrix = metrics.confusion_matrix(y, stratified_cv(X, y, svc_model))
sns.heatmap(svm_svc_conf_matrix, annot=True, fmt='');
title = 'SVM'
plt.title(title);
```
### Random Forest
```
random_forest_conf_matrix = metrics.confusion_matrix(y, stratified_cv(X, y, random_forest))
sns.heatmap(random_forest_conf_matrix, annot=True, fmt='');
title = 'Random Forest'
plt.title(title);
```
### Logistic Regression
```
logistic_regression_conf_matrix = metrics.confusion_matrix(y, stratified_cv(X, y, logistic_regression))
sns.heatmap(logistic_regression_conf_matrix, annot=True, fmt='');
title = 'Logistic Regression'
plt.title(title);
```
### Classification Report
```
print('Gradient Boosting Classifier:\n {}\n'.format(metrics.classification_report(y, stratified_cv(X, y, gradient_boost))))
print('Support vector machine(SVM):\n {}\n'.format(metrics.classification_report(y, stratified_cv(X, y, svc_model))))
print('Random Forest Classifier:\n {}\n'.format(metrics.classification_report(y, stratified_cv(X, y, random_forest))))
```
## Final Model Selection
Gradient Boosting seems to do comparatively better for this case
```
gbc = ensemble.GradientBoostingClassifier()
gbc.fit(X, y)
# Get Feature Importance from the classifier
feature_importance = gbc.feature_importances_
print (gbc.feature_importances_)
feat_importances = pd.Series(gbc.feature_importances_, index=df.columns)
feat_importances = feat_importances.nlargest(19)
feat_importances.plot(kind='barh' , figsize=(10,10))
```
## Save and Deploy model to Watson Machine Learning
```
# Provide your credentials
# If your WML service is deployed in US-South use the URL https://us-south.ml.cloud.ibm.com
# To generate a new API Key go to https://cloud.ibm.com/iam/apikeys and create one
from ibm_watson_machine_learning import APIClient
wml_credentials = {
"url": "https://us-south.ml.cloud.ibm.com",
"apikey":"YOUR_API_KEY_HERE"
}
client = APIClient(wml_credentials)
print(client.version)
!pip install -U ibm-watson-machine-learning
```
Working with spaces:
First of all, you need to create a space that will be used for your work. If you do not have space already created, you can use <a href="https://dataplatform.cloud.ibm.com/ml-runtime/spaces?context=cpdaas" target="_blank">Deployment Spaces Dashboard</a> to create one.
- Click New Deployment Space
- Create an empty space
- Select Cloud Object Storage
- Select Watson Machine Learning instance and press Create
- Copy space_id and paste it below, you will find it in your (Deployment Space)-URL, it will look like this: "1234a1b-cd5e-6fg7-8hi9-11jkl2mno34p"
Working with projects (for this tutorial you do not need to provide your Project ID):
- Go to your project, you can reach it from your Cloud Pak for Data as a Service Overview Page
- Copy project_id and paste it below, you will find it in your (Project)-URL, it will look like this: "1234a1b-cd5e-6fg7-8hi9-11jkl2mno34p"
```
# project id and space id
# both can be found in the URL
# project_id = ""
# client.set.default_project(project_id)
# deployment space
space_id = "YOUR_SPACE_ID_HERE"
client.set.default_space(space_id)
# Use this cell to do any cleanup of previously created models and deployments
client.repository.list_models()
client.deployments.list()
client.spaces.list(limit=10)
#client.repository.delete('GUID of stored model')
#client.deployments.delete('GUID of deployed model')
sofware_spec_uid = client.software_specifications.get_id_by_name("default_py3.7")
# store the model in WML
metadata={
client.repository.ModelMetaNames.NAME: "GBC Workshop Python 3.7 V2",
client.repository.ModelMetaNames.TYPE: 'scikit-learn_0.23',
client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid
}
published_model = client.repository.store_model(
model=gbc,
meta_props=metadata,
training_data=X, training_target=y
)
# new list of models
client.repository.list_models()
# get UID of our just stored model
model_uid = client.repository.get_model_uid(published_model)
print("Model id: {}".format(model_uid))
# create deployment
metadata = {
client.deployments.ConfigurationMetaNames.NAME: "Deployment of GBC Workshop Python 3.7 V2",
# client.deployments.ConfigurationMetaNames.VIRTUAL: {"export_format": "coreml"},
client.deployments.ConfigurationMetaNames.ONLINE:{}
}
created_deployment = client.deployments.create(model_uid, meta_props=metadata)
# test your model with some test data
deployment_id = client.deployments.get_id(created_deployment)
scoring_data = {
client.deployments.ScoringMetaNames.INPUT_DATA: [
{
'fields': ['state', 'account length', 'area code', 'international plan', 'voice mail plan', 'number vmail messages', 'total day minutes', 'total day calls', 'total day charge', 'total eve minutes', 'total eve calls', 'total eve charge', 'total night minutes', 'total night calls', 'total night charge', 'total intl minutes', 'total intl calls', 'total intl charge', 'customer service calls'],
'values': [[2,162,415,0,0,0,70.7,108,12.02,157.5,87,13.39,154.8,82,6.97,9.1,3,2.46,4]]
}]
}
predictions = client.deployments.score(deployment_id, scoring_data)
print("The Prediction output regarding customer churn will be displayed in this format 1 for True or 0 for False: \n ", predictions)
```
## Acknowledgement
The approach and code fragments have been adopted from the nootebook on Kaggle by Sandip Datta (https://www.kaggle.com/sandipdatta).
The full original notebook can be viewed here: https://www.kaggle.com/sandipdatta/customer-churn-analysis#
| github_jupyter |
# Batch Job Analysis - Data Prepare - Extract from SMF
*Note: for reference only, no input/output sample data file provided*
**This sample notebook will demonstrate how to extract Batch Job log data from SMF Type 30 record and prepare for further analytics.**
Input data file is n days of SMF Type 30 record collected on z/OS named as HLQ.T2019XXXX.SMF30:<br>
> HLQ.T20190001.SMF30<br>
HLQ.T20190002.SMF30<br>
HLQ.T20190003.SMF30<br>
.................<br>
**Key Steps includes:**
1. Extract everyday's batch jobs log data from SMF Type 30 record
2. Combine n days jobs data from csv files into one data frame for analysis
3. Remove uninterested batch job records
4. Calculate some interested metrics from original data
## Step 1: Extract batch job run log data from SMF Type 30 record
**Demonstrate how to read SMF Type 30 record into dataframe **</p>
***Note:***
>1.It should be run on WMLz Platform with MDS(Mainframe Data Service) driver installed<br>
2.Ask for your MDS administrator for ssid,username and password<br>
3.With Mainframe Data Service Studio, you could get predefined SQL statement and view for SMF Type 30 record<br>
4.Refer to SMF Type 30 document to understand every metrics
```
import dsdbc
CONN = dsdbc.connect(SSID='MDS_ssid', USER='MDS_user', PASSWORD='MDS_password')
import pandas as pd
import datetime
#path variable for SMF data output path, when change to another environment, need to change it according to data file location
SMF_DATA_PATH=r"/username/smf/"
START_TIME=datetime.datetime.now()
print("Start:",START_TIME)
FIRST_DAY=1
LAST_DAY=91
for INDEX in range(FIRST_DAY,LAST_DAY,1):
#to read SMF Type 30 record from mainframe system, HLQ.T2019001.SMF30 to get day 001 information,
#write to a csv file as 'df_D0001.csv' for further merge
#SMF record size maybe very large, recommand to start from small file, e.g. seperate by day
#convert to csv file will make further processing fast and convinient
DAY_STR=str(INDEX).rjust(4,'0')
SMF_FILE_PRE='SMF_03000'
SMF_FILE_POST= "__HLQ_T2019"+DAY_STR+"_SMF30" #align to real SMF record name
SMF_30_TABLE = SMF_FILE_PRE+SMF_FILE_POST
SMF_FILE_PRE='SMF_03000_SMF30'
SMF_30_ID_TABLE = SMF_FILE_PRE+'ID'+SMF_FILE_POST
SMF_30_CAS_TABLE = SMF_FILE_PRE+'CAS'+SMF_FILE_POST
SMF_30_PRF_TABLE = SMF_FILE_PRE+'PRF'+SMF_FILE_POST
SMF_30_URA_TABLE = SMF_FILE_PRE+'URA'+SMF_FILE_POST
SMF_30_SAP_TABLE = SMF_FILE_PRE+'SAP'+SMF_FILE_POST
QUERY='select SMF30JBN As JOB_NAME,SMF30JNM AS JOB_NUM,SMF_SSI AS TYPE,SMF_SID as SYSTEM,SMF30CLS as CLASS,\
SMF30RSD as REQUEST_D,SMF30RST REQUEST_T,SMF30STD START_D,SMF30SIT as START_T,\
SUBSTR(SMF_TIME,1,10) AS END_D,SUBSTR(SMF_TIME,12,11) AS END_T,\
SUBSTR(SMF_TIME,1,23) AS END_DTSTR,\
CAS.SMF30CPT/100 as TCB_CPU_SEC,CAS.SMF30CPS/100 as SRB_CPU_SEC,\
(SMF30CSU /160000) * SMF30SUS as TCB_CPU_MS,\
(SMF30SRB /160000) * SMF30SUS as SRB_CPU_MS,\
URA.SMF30TEP as EXCP,(URA.SMF30TCN/1000000)*128 as IO_CONN_SEC,URA.SMF30AIS as SSCH,\
PRF.SMF30SRV as SERV_UNIT,PRF.SMF30CSU as TCB_UNIT,PRF.SMF30SRB as SRB_UNIT,PRF.SMF30IO as IO_UNIT,PRF.SMF30MSO as MSO_UNIT,\
(PRF.SMF30JQT*1024/1000000) as JQ_SEC,(PRF.SMF30RQT*1024/1000000) as RQ_SEC,\
(PRF.SMF30HQT*1024/1000000) as HQ_SEC,(PRF.SMF30SQT*1024/1000000) as SQ_SEC,\
PRF.SMF30SCN as SERV_CLASS,\
ID.SMF30USR as USER_NAME,\
SAP.SMF30PGI as PAGE_IN,SAP.SMF30PGO as PAGE_OUT,SAP.SMF30NSW as PAGE_SWAP \
FROM '+SMF_30_TABLE+' A0 \
JOIN '+SMF_30_ID_TABLE+' ID ON A0.CHILD_KEY=ID.PARENT_KEY \
JOIN '+SMF_30_PRF_TABLE+' PRF ON A0.CHILD_KEY=PRF.PARENT_KEY \
JOIN '+SMF_30_CAS_TABLE+' CAS ON A0.CHILD_KEY=CAS.PARENT_KEY \
JOIN '+SMF_30_URA_TABLE+' URA ON A0.CHILD_KEY=URA.PARENT_KEY \
JOIN '+SMF_30_SAP_TABLE+' SAP ON A0.CHILD_KEY=SAP.PARENT_KEY \
where A0.SMF_STY=5'
#following statement will extract data from SMF record, may cost some time according to you SMF record size
DF=pd.read_sql(QUERY,con=CONN)
DF.to_csv(SMF_DATA_PATH +"df_D"+ DAY_STR + ".csv")
print("After write "+"df_D"+ DAY_STR + ".csv",datetime.datetime.now())
```
## Step 2: Merge n days jobs data from n csv files into one file
```
#calculation for SMF record for everyday's data, and then merge
DF = pd.DataFrame()
for INDEX in range(FIRST_DAY,LAST_DAY,1):
SMF_30_FILE = SMF_DATA_PATH +"df_D"+ str(INDEX).rjust(4,'0') + ".csv"
DF_1D=pd.read_csv(SMF_30_FILE, encoding='ISO-8859-1')
DF=DF.append(DF_1D)
DF.to_csv(SMF_DATA_PATH+'df_all.csv')
print("After write df_all.csv:",datetime.datetime.now())
```
## Step 3: Remove uninterested batch job records
```
#read from the merged CSV file,
#it could be a start point when you collect all necessary data from SMF
DF=pd.read_csv(SMF_DATA_PATH+'df_all.csv',encoding='ISO-8859-1')
print("After read df_all.csv:",datetime.datetime.now())
#only focused on batch window between 20pm to 1d+6am
DF['is_focused']=DF['START_T'].apply(lambda t:(t>=7200000) or (t<2160000))
DF=DF[DF['is_focused']==True].drop('is_focused',axis=1)
#only focused on jobs with 'JOBxxxxxx' JESID
DF['is_focused']=DF['JOB_NUM'].apply(lambda num:num[0:3]=='JOB')
DF=DF[DF['is_focused']==True].drop('is_focused',axis=1)
#remove BATCHXXX which may run several days
DF['is_focused']=DF['SERV_CLASS'].apply(lambda s:s!='BATCHXXX')
DF=DF[DF['is_focused']==True].drop('is_focused',axis=1)
```
## Step 4: Calculate some interested metrics from original SMF record
- START_DT: job start running date and time
- END_DT: job finish date and time
- ELAPSED_TIME: duration of job run, the second between START_DT and END_DT
- CPU_SEC: second of CPU run time on the job
- QUEUE_SEC: second of job waiting in various queue
```
#translate SMF datetime to normal python datetine
import datetime, time
def SmfConv2Dt(d,t):
YYY=int(int(d)/1000)
DDD=int(d)-1000*YYY
DT=datetime.datetime(YYY+1900,1,1)+datetime.timedelta(days=DDD-1)+datetime.timedelta(seconds=t/100)
return DT
def Dt2Sec(DT):
SEC=(DT-datetime.datetime(1970,1,1))/datetime.timedelta(seconds=1)
return SEC
DF['START_DT']=DF.apply(lambda row:SmfConv2Dt(row['START_D'],row['START_T']),axis=1)
DF['START_DTSTR']=DF['START_DT'].apply(lambda dt:datetime.datetime.strftime(dt,'%Y-%m-%d-%H.%M.%S.000000'))
DF['START_SEC']=DF['START_DT'].apply(lambda dt:Dt2Sec(dt))
DF['REQUEST_DT']=DF.apply(lambda row:SmfConv2Dt(row['REQUEST_D'],row['REQUEST_T']),axis=1)
DF['REQUEST_SEC']=DF['REQUEST_DT'].apply(lambda dt:Dt2Sec(dt))
DF['END_DT']=DF['END_DTSTR'].apply(lambda dt_str:datetime.datetime.strptime(dt_str,'%Y-%m-%d-%H.%M.%S.%f'))
DF['END_SEC']=DF['END_DT'].apply(lambda dt:Dt2Sec(dt))
DF['ELAPSED_SEC']=DF['END_SEC']-DF['START_SEC']
DF['CPU_SEC']=DF['TCB_CPU_SEC']+DF['SRB_CPU_SEC']
DF['CPU_MS']=DF['TCB_CPU_MS']+DF['SRB_CPU_MS']
DF['QUEUE_SEC']=DF['JQ_SEC']+DF['RQ_SEC']+DF['HQ_SEC']+DF['SQ_SEC']
#set job's batch_date as D-1 when START_TIME is earlier than 6am
DF['BATCH_DATE']=DF['START_DT'].apply(lambda dt:datetime.datetime.date(dt+datetime.timedelta(hours=-6)))
print(DF.shape)
print(DF.head(5))
DF.to_csv(SMF_DATA_PATH+'df_smf.csv',index=False)
print("After write df_smf.csv:",datetime.datetime.now())
#print end time used for data processing
#to prepare data in large size may cost long time
END_TIME=datetime.datetime.now()
print("Finish:",END_TIME)
```
## Sample output of smf data extracted from previous steps
In sample dataset, df_smf is sample data extracted from previous steps <p>
Following fields are useful for further elapsed time analysis: <p>
>JOB_NAME: Job name defined by user <br>
JOB_NUM: Job instance number <br>
START_D: Job start date<br>
START_T: Job start time<br>
START_DTSTR: Job start date time in string format<br>
END_D: Job end date<br>
END_T: Job end time<br>
END_DTSTR: Job end date time in string format<br>
ELAPSED_SEC: Job elapsed time in second <br>
```
#import following code automatically by clicking right icon of "find data" in top toolbar
#select "df_smf.csv"-Insert Pandas DataFrame
import pandas as pd
import dsx_core_utils
from dsx_core_utils import ProjectContext
# Add asset from data set
PC = ProjectContext.ProjectContext('Batch_Job_Analytics', '1_BatchJob_SMF30Extract', '', 'xx.xx.xx.xx')
FILE_PATH = dsx_core_utils.get_local_dataset(PC, 'DF_smf.csv')
DF_DATA_1 = pd.read_csv(FILE_PATH)
DF_DATA_1.head()
DF_DATA_1.describe()
```
| github_jupyter |
# Circuits
## Introduction
The [Circuit class](../api/circuit.html) represents a circuit of arbitrary topology, consisting of an arbitrary number of N-ports [Networks](../api/network.html) connected together. Like in an electronic circuit simulator, the circuit must have one (or more) `Port` connected to the circuit. The `Circuit` object allows one retrieving the M-ports `Network` (and thus its network parameters: $S$, $Z$, etc.), where M is the number of ports defined. Moreover, the `Circuit` object also allows calculating the scattering matrix $S$ of the entire circuit, that is the "internal" scattering matrices for the various intersections in the circuit. The calculation algorithm is based on ref [[1](#ref1)].
The figure below illustrates a network with 2 ports, `Network` elements $N_i$ and intersections:

one must must define the connection list ("netlist") of the circuit. This connexion list is defined as a List of List of interconnected Tuples `(network, port_number)`:
```
connexions = [
[(network1, network1_port_nb), (network2, network2_port_nb), (network2, network2_port_nb), ...],
...
]
```
For example, the connexion list to construct the above circuit could be:
```
connexions = [
[(port1, 0), (network1, 0), (network4, 0)],
[(network1, 1), (network2, 0), (network5, 0)],
[(network1, 2), (network3, 0)],
[(network2, 1), (network3, 1)],
[(network2, 2), (port2, 0)],
[(network5, 1), (ground1, 0)]
]
```
where we have assumed that `port1`, `port2`, `ground1` and all the `network1` to `network5` are scikit-rf [Networks](../api/network.html) objects with same `Frequency`. Networks can have different (real) characteristic impedances: mismatch are taken into account. Convenience methods are provided to create ports and grounded connexions: the [Circuit.Port()](../api/generated/skrf.circuit.Circuit.Port.html#skrf.circuit.Circuit.Port) and [Circuit.Ground()](../api/generated/skrf.circuit.Circuit.Ground.html#skrf.circuit.Circuit.Ground) methods. Note that the port 1 of the network4 is left open, so is not described in the connexion list.
Once the connexion list is defined, the `Circuit` with:
```
resulting_circuit = rf.Circuit(connexions)
```
`resulting_circuit` is a [Circuit](../api/circuit.html) object.
The resulting 2-ports `Network` is obtained with the [Circuit.network](../api/generated/skrf.circuit.Circuit.network.html#skrf.circuit.Circuit.network) parameter:
```
resulting_network = resulting_circuit.network
```
Note that it is also possible to create manually a circuit of multiple `Network` objects using the [connecting methods](../api/network.html#connecting-networks) of `scikit-rf`. Although the `Circuit` approach to build a multiple `Network` may appear to be more verbose than the 'classic' way for building a circuit, as the circuit complexity increases, in particular when components are connected in parallel, the `Circuit` approach is interesting as it increases the readability of the code. Moreover, `Circuit` circuit topology can be plotted using its `plot_graph` method, which is usefull to rapidly control if the circuit is built as expected.
## Examples
### Loaded transmission line
Assume that a $50\Omega$ lossless transmission line is loaded with a $Z_L=75\Omega$ impedance.

If the transmission line electric length is $\theta=0$, then one would thus expect the reflection coefficient to be:
$$
\rho = s = \frac{Z_L - Z_0}{Z_L + Z_0} = 0.2
$$
```
import skrf as rf
rf.stylely()
Z_0 = 50
Z_L = 75
theta = 0
# the necessary Frequency description
freq = rf.Frequency(start=1, stop=2, unit='GHz', npoints=3)
# The combination of a transmission line + a load can be created
# using the convenience delay_load method
# important: all the Network must have the parameter "name" defined
tline_media = rf.DefinedGammaZ0(freq, z0=Z_0)
delay_load = tline_media.delay_load(rf.zl_2_Gamma0(Z_0, Z_L), theta, unit='deg', name='delay_load')
# the input port of the circuit is defined with the Circuit.Port method
# In order for Circuit() to recognize the Network as a "port", its name must contains the word 'port':
port1 = rf.Circuit.Port(freq, 'port1', z0=Z_0)
# connexion list
cnx = [
[(port1, 0), (delay_load, 0)]
]
# building the circuit
cir = rf.Circuit(cnx)
# getting the resulting Network from the 'network' parameter:
ntw = cir.network
print(ntw)
# as expected the reflection coefficient is:
print(ntw.s[0])
```
It is also possible to build the above circuit using a series impedance Network, then shorted:

To do so, one would need to use the `Ground()` method to generate the required `Network` object.
```
port1 = rf.Circuit.Port(freq, 'port1', z0=Z_0)
# piece of transmission line and series impedance
trans_line = tline_media.line(theta, unit='deg', name='trans_line')
load = tline_media.resistor(Z_L, name='delay_load')
# ground network (short)
ground = rf.Circuit.Ground(freq, name='ground')
# connexion list
cnx = [
[(port1, 0), (trans_line, 0)],
[(trans_line, 1), (load, 0)],
[(load, 1), (ground, 0)]
]
# building the circuit
cir = rf.Circuit(cnx)
# the result if the same :
print(cir.network.s[0])
```
### LC Filter
Here we model a low-pass LC filter, with example values taken from [rf-tools.com](https://rf-tools.com/lc-filter/) :

```
freq = rf.Frequency(start=0.1, stop=10, unit='GHz', npoints=1001)
tl_media = rf.DefinedGammaZ0(freq, z0=50, gamma=1j*freq.w/rf.c)
C1 = tl_media.capacitor(3.222e-12, name='C1')
C2 = tl_media.capacitor(82.25e-15, name='C2')
C3 = tl_media.capacitor(3.222e-12, name='C3')
L2 = tl_media.inductor(8.893e-9, name='L2')
RL = tl_media.resistor(50, name='RL')
gnd = rf.Circuit.Ground(freq, name='gnd')
port1 = rf.Circuit.Port(freq, name='port1', z0=50)
port2 = rf.Circuit.Port(freq, name='port2', z0=50)
cnx = [
[(port1, 0), (C1, 0), (L2, 0), (C2, 0)],
[(L2, 1), (C2, 1), (C3, 0), (port2, 0)],
[(gnd, 0), (C1, 1), (C3, 1)],
]
cir = rf.Circuit(cnx)
ntw = cir.network
ntw.plot_s_db(m=0, n=0, lw=2, logx=True)
ntw.plot_s_db(m=1, n=0, lw=2, logx=True)
```
When building a `Circuit` made of few networks, it can be usefull to represent the connexion graphically, in order to check for possible errors. This is possible using the [Circuit.plot_graph()](../api/circuit.html#representing-a-circuit) method. Ports are indicated by triangles, Network with squares and interconnections with circles. It is possible to display the network names as well as their associated ports (and characteristic impedances):
```
cir.plot_graph(network_labels=True, network_fontsize=15,
port_labels=True, port_fontsize=15,
edge_labels=True, edge_fontsize=10)
```
## References
<div id="ref1"></div>[1] Hallbjörner, P., 2003. Method for calculating the scattering matrix of arbitrary microwave networks giving both internal and external scattering. Microw. Opt. Technol. Lett. 38, 99–102. https://doi.org/10/d27t7m
| github_jupyter |
## Power analysis for: Reproducibility of cerebellum atrophy involvement in advanced ET.
1. Working with only MNI dataset will result in underpowered research:
posthoc power analysis with alpha=0.05, et=38, nc=32 and effect size 0.61 (obtained from literature median, both 1-sided and 2-sided tests);
2. Increasing power: Number of matched NC subjects needed to achieve a higher power of 0.9 with alpha=0.05 and effect size 0.61 (both 1-sided and 2-sided);
3. Effect sizes from literature research;
4. Power achieved with increasing number of matched NC subjects.
```
from statsmodels.stats import power
import math
from numpy import array
import matplotlib.pyplot as plt
from statsmodels.stats.power import TTestIndPower
# 1. calculate the post-hoc power we can achieve with only MNI dataset
effect_size_expected=0.61; #From later literature review;
alpha_expected=0.05;
power_expected=0.9;
n_et=38; n_nc=32; #Number of subjects in each group before QC.
print('Study with only our MNI cohort will also be underpowered:\n')
# 1-sided test
print('1.1: Power achieved with only MNI dataset for 1-sided test @alpha='+str(alpha_expected)+', et='+str(n_et)+', nc='+str(n_nc)+' and expected effect size='+str(effect_size_expected)+': ')
power_1_mni=power.tt_ind_solve_power(effect_size=effect_size_expected, nobs1=n_et, ratio=n_et/n_nc, alpha=alpha_expected, power=None, alternative='larger')
print(power_1_mni)
# 2-sided test
print('1.2: Power achieved with only MNI dataset for 2-sided test @alpha='+str(alpha_expected)+', et='+str(n_et)+', nc='+str(n_nc)+' and expected effect size='+str(effect_size_expected)+': ')
power_2_mni=power.tt_ind_solve_power(effect_size=effect_size_expected, nobs1=n_et, ratio=n_et/n_nc, alpha=alpha_expected, power=None, alternative='two-sided')
print(power_2_mni)
## 2. number of matched NC subjects needed for high power(0.9) reearch
effect_size_expected=0.61; #From later literature review;
alpha_expected=0.05;
power_expected=0.9;
n_et=38; n_nc=32; #Number of subjects in each group before QC.
# 1-sided test
print('1.3: Number of Controls needed for 1-sided test @ alpha='+str(alpha_expected)+', power='+str(power_expected)+' and effect size='+str(effect_size_expected)+': ')
r_expected=power.tt_ind_solve_power(effect_size=effect_size_expected, nobs1=n_et, alpha=alpha_expected, power=power_expected, ratio=None, alternative='larger')
n_nc_needed = math.ceil(r_expected*n_et)
print(n_nc_needed, ', r=', r_expected, 'n_et=', n_et, ', n_nc=', n_nc_needed, ', total=', math.ceil((r_expected+1)*n_et) )
# 2-sided test
print('1.4: Number of Controls needed (from PPMI) 2-sided, for alpha='+str(alpha_expected)+', power='+str(power_expected)+' and effect size='+str(effect_size_expected)+': ')
r_d_expected=power.tt_ind_solve_power(effect_size=effect_size_expected, nobs1=n_et, alpha=alpha_expected, power=power_expected, ratio=None)
n_nc_needed_d = math.ceil(r_d_expected*n_et)
print(n_nc_needed_d, ', r=', r_d_expected, 'n_et=',n_et, ', n_nc=', n_nc_needed_d, ', total=', math.ceil((r_d_expected+1)*n_et) )
```
## Literature power analysis
```
# basic functions for calculating literature standard effect sizes.
from math import sqrt
from statsmodels.stats import power
import pandas as pd
def cohend_from_sts(n1,m1,s1,n2,m2,s2):
# Cohen's d for independent samples with different sample sizes from basic stats
import numpy as np
from math import sqrt
s1 = s1*s1; s2 = s2*s2;
s = sqrt(((n1 - 1) * s1 + (n2 - 1) * s2) / (n1 + n2 - 2)) # calculate the pooled standard deviation
d_coh_val = (m1 - m2) / s; # calculate the effect size
#print('Cohens d: %.3f' % d_coh_val)
return d_coh_val
def cohend_from_z(z,n):
# Calculate cohend from z value reported for 2 groups with same number of samples.
d_coh_val = z/sqrt(n);
return d_coh_val
def cohend_from_z2(z, n1, n2):
# Calculate cohend from z value reported for 2 groups with different number of samples.
d_coh_val = z*sqrt(1/n1+1/n2);
return d_coh_val
def cohend_from_p(p,n):
# Calculate cohend from p value reported for 2 groups with same number of samples.
from scipy.stats import norm
z=norm.ppf(1-p)
d_coh_val = cohend_from_z(z, n);
return d_coh_val
def cohend_from_p2(p,n1,n2):
# Calculate cohend from p value reported for 2 groups with different number of samples.
from scipy.stats import norm
z=norm.ppf(1-p)
d_coh_val = cohend_from_z2(z, n1, n2);
return d_coh_val
```
### 1. [Benito-León, et al. “Brain Structural Changes in Essential Tremor: Voxel-Based Morphometry at 3-Tesla.” Journal of the Neurological Sciences (December 15, 2009)](https://pubmed.ncbi.nlm.nih.gov/19717167/)
- Study type: VBM (peak z-score)
- Multiple comparison correction: No, with P=0.001
- covariates: age, gender and eTIV
- Study groups: **ET** (19=10+9, 69.8±9.4) verses **NC** (20=10+10, 68.9±10.0);\
- Reported ROIs: bilateral cerebellum, bilateral parietal lobes, right frontal lobe, and right insula.
```
### paper1
# only 2/11 has enough power
p1_n_et=19; p1_n_nc=20; p = 0.001;
p1_roi=['wm_Left_medulla', 'wm_Right_cerebellum_anterior_lobe', 'wm_Right_parietal_lobe_postcentral_gyrus', 'wm_Right_limbic_lobe_uncus',
'Right_frontal_lobe_MFG','Right_parietal_lobe_precuneus','Left_parietal_lobe_precuneus', 'Right_insula',
'Left_cerebellum_anterior_lobe', 'Right_cerebellum_anterior_lobe', 'Left_cerebellum_posterior_lobe', 'Left_cerebellum_posterior_lobe'];
p1_z=[3.89, 2.96, 4.36, 4.48, 4.25, 5.09, 4.33, 5.50, 3.31, 4.19, 3.71, 3.72];
p1_cohend = [cohend_from_z2(x, p1_n_et, p1_n_nc) for x in p1_z];
p1_samples_needed = [power.tt_ind_solve_power(effect_size=x, alpha=p, power=power_expected) for x in p1_cohend];
p1_power_achieved = [power.tt_ind_solve_power(effect_size=x, nobs1=p1_n_et, alpha=p, ratio=p1_n_nc/p1_n_et) for x in p1_cohend];
#, alternative='larger', VBM map for differences, 2 side test.
p1_res={"VBM_Region":p1_roi,"z-value":p1_z,"Cohen d":p1_cohend, "total n": p1_n_et+p1_n_nc, "Samples needed ("+str(p)+")":p1_samples_needed, "Power achieved with ET/NC("+str(p1_n_et)+"/"+str(p1_n_nc)+")":p1_power_achieved}
p1_df=pd.DataFrame(p1_res)
print("Benito-León paper power analysis with p=0.001 and ET/NC=19/20:\n")
print("The mean effect size of this research is: ")
display(p1_df['Cohen d'].describe())
display(p1_df)
```
### 2. [Bagepally, et al. “Decrease in Cerebral and Cerebellar Gray Matter in Essential Tremor: A Voxel-Based Morphometric Analysis under 3T MRI.” Journal of Neuroimaging (2012)](https://onlinelibrary.wiley.com/doi/full/10.1111/j.1552-6569.2011.00598.x?casa_token=FOs-GPZVoYAAAAAA%3AvQjMw6X0zV0MAnziTsMzUijUvWvH1MwFDb1wMjB_DLsECHUX1G5eJLcSPtmmurrKbxMNQoiGPEXILHY)
**No t or z values reported, skipped.**
Study type: Surface based analysis
Multiple comparison correction: No, with P=0.001
covariates: age, gender, age at onset, and eICV
Study groups: **ET** (19=15+5, 38.2±16.5) verses **NC** (17=14+3, 40.7±16.5); (stating age and sex matched)
Reported ROIs: bilateral cerebellum, bilateral parietal lobes, right frontal lobe, and right insula.
### 3. [Cerasa, A., et al. “Cerebellar Atrophy in Essential Tremor Using an Automated Segmentation Method.” American Journal of Neuroradiology (June 1, 2009)](http://www.ajnr.org/content/30/6/1240)
Study type: freesurfer segmentaitons, subcortical volumes
Multiple comparison correction: Bonferroni corrected but no significant results.
covariates: eTIV
Study groups: **arm-ET** (27=17+10, 65.0±12.8), **head-ET** (19=6+13, 70.7±7.8) and **NC** (28=14+14, 66.5±7.8); (stating age and sex matched for ET and NC but not for sub-group comparison.)
Reported ROIs: Cerebellar gray p<0.02 and white matter p<0.01 (in exploratory analysis without multiple comparison).
```
# paper3
p3_n_arm_et=27; p3_n_head_et=19; p3_n_nc=28; p = 0.05;
p3_roi=['ICV', 'Cortical gray matter', 'Cortical white matter', 'Cerebellar gray matter',
'Cerebellar white matter']
p3_m_arm_et = [1434.7, 413.5, 385.3, 89.6, 23.9];
p3_s_arm_et = [127.5, 49.5, 57.1, 11.1, 3];
p3_m_head_et = [1375.8, 393.8, 358.9, 86, 23.5];
p3_s_head_et = [119.7, 30.5, 41.1, 7.1, 3.3];
p3_m_nc = [1411.9, 404.1, 384.6, 91.9, 25.7];
p3_s_nc = [122.6, 32.6, 41.9, 8.2, 4.2];
p3_g_arm_cohend=[]; p3_g_head_cohend=[]
for i in range(len(p3_roi)):
p3_g_arm_cohend.append(cohend_from_sts(p3_n_arm_et,p3_m_arm_et[i],p3_s_arm_et[i],
p3_n_nc,p3_m_nc[i],p3_s_nc[i]));
p3_g_head_cohend.append(cohend_from_sts(p3_n_head_et,p3_m_head_et[i],p3_s_head_et[i],
p3_n_nc,p3_m_nc[i],p3_s_nc[i]));
p3_g_arm_samples_needed = [power.tt_ind_solve_power(effect_size=x, alpha=p, power=power_expected) for x in p3_g_arm_cohend];
p3_g_arm_power_achieved = [power.tt_ind_solve_power(effect_size=x, nobs1=p3_n_arm_et, alpha=p, ratio=p3_n_nc/p3_n_arm_et, alternative='smaller') for x in p3_g_arm_cohend];
p3_g_head_samples_needed = [power.tt_ind_solve_power(effect_size=x, alpha=p, power=power_expected) for x in p3_g_head_cohend];
p3_g_head_power_achieved = [power.tt_ind_solve_power(effect_size=x, nobs1=p3_n_head_et, alpha=p, ratio=p3_n_nc/p3_n_head_et, alternative='smaller') for x in p3_g_head_cohend];
p3_g_arm_res={"FS_Region":p3_roi,"Cohen d":p3_g_arm_cohend,"total n": p3_n_arm_et+p3_n_nc,"Samples needed ("+str(p)+")":p3_g_arm_samples_needed,
"Power achieved with ET/NC("+str(p3_n_arm_et)+"/"+str(p3_n_nc)+")":p3_g_arm_power_achieved}
p3_g_arm_df=pd.DataFrame(p3_g_arm_res)
print("Cerasa A. paper power analysis with p=0.05 and arm-ET/NC=27/28:\n")
print("The mean cerebellar effect size of this research is: ")
display(p3_g_arm_df['Cohen d'][3:].describe())
display(p3_g_arm_df)
print('\n')
p3_g_head_res={"FS_Region":p3_roi,"Cohen d":p3_g_head_cohend,"total n": p3_n_head_et+p3_n_nc,"Samples needed ("+str(p)+")":p3_g_head_samples_needed,
"Power achieved with ET/NC("+str(p3_n_head_et)+"/"+str(p3_n_nc)+")":p3_g_head_power_achieved}
p3_g_head_df=pd.DataFrame(p3_g_head_res)
print("Cerasa A. paper power analysis with p=0.05 and head-ET/NC=19/28:\n")
print("The mean cerebellar effect size of this research is: ")
display(p3_g_head_df['Cohen d'][3:].describe())
display(p3_g_head_df)
# none of the results shows enough power.
```
### 4. [Bhalsing, K. S., et al. “Association between Cortical Volume Loss and Cognitive Impairments in Essential Tremor.” European Journal of Neurology 21, no. 6 (2014).](https://onlinelibrary.wiley.com/doi/abs/10.1111/ene.12399)
**We have no cognitive impairment, skiped.**
Study type: VBM
Multiple comparison correction: Bonferroni corrected.
covariates: eTIV
Study groups: **ET** (25=19+6, 45.0±10.7) and **NC** (28=14+14, 45.4±10.7); (stating age and sex matched for ET and NC but not for sub-group comparison.)
Reported ROIs: Cognitive impairments were shown to correlate with GMV in the frontal parietal lobes, cingulate and insular cortices and cerebellum posterior lobe.
### 5. [Quattrone A, Cerasa A, Messina D, Nicoletti G, Hagberg GE, Lemieux L, Novellino F, Lanza P, Arabia G, Salsone M. Essential head tremor is associated with cerebellar vermis atrophy: a volumetric and voxel-based morphometry MR imaging study. American journal of neuroradiology. 2008 Oct 1;29(9):1692-7.](http://www.ajnr.org/content/29/9/1692.short)
Study type: VBM.
Multiple comparison correction: Bonferroni.
covariates: age, sex, eTIV
Study groups: familial **ET** (50=24+26, 65.2±14.3) and **NC** (32=16+16, 66.2±8.1, arm-ET: 18/12, 61.5±16.5; head-ET: 6/14, 70.6±7.6); (stating age and sex matched for ET and NC but not for sub-group comparison.)
Reported ROIs: No significant cerebellar atrophy was found in the whole ET group with respect to healthy subjects wiht VBM (right cerebellar clusters, right insula, right hippocampus). Vermis lobule IV can distinguish the 3 sub-groups. h-ET showedsignificant cerebellar atrophy at the level of the **anterior lobe**, with a marked atrophy of the vermis and partially of the paravermal regions with respect to controls.
```
# paper7
p7_n_arm_et=30; p7_n_head_et=20; p7_n_nc=32; p = 0.05;
p7_roi=['Midsagittal vermal area', 'Anterior lobule area', 'Posterior sup. lobule area', 'Posterior inf. lobule area'];
p7_m_arm_et = [849.8, 373.7, 201.1, 274.9];
p7_s_arm_et = [124.6, 53.9, 37.4, 56.6];
p7_m_head_et = [790.3, 343.8, 195.8, 250.6];
p7_s_head_et = [94.5, 37.9, 37.1, 43.1];
p7_m_nc = [898.6, 394.5, 209.7, 294.3];
p7_s_nc = [170.6, 74.6, 47.3, 69.5];
p7_cohend_arm_et=[]; p7_cohend_head_et=[];
for i in range(len(p7_roi)):
p7_cohend_arm_et.append(cohend_from_sts(p7_n_arm_et,p7_m_arm_et[i],p7_s_arm_et[i],p7_n_nc,p7_m_nc[i],p7_s_nc[i]));
p7_cohend_head_et.append(cohend_from_sts(p7_n_head_et,p7_m_head_et[i],p7_s_head_et[i],p7_n_nc,p7_m_nc[i],p7_s_nc[i]));
p7_samples_needed_arm_et = [power.tt_ind_solve_power(effect_size=x, alpha=p, power=power_expected) for x in p7_cohend_arm_et];
p7_power_achieved_arm_et = [power.tt_ind_solve_power(effect_size=x, nobs1=p7_n_arm_et, alpha=p, ratio=p7_n_arm_et/p7_n_nc, alternative='smaller') for x in p7_cohend_arm_et];
p7_samples_needed_head_et = [power.tt_ind_solve_power(effect_size=x, alpha=p, power=power_expected) for x in p7_cohend_head_et];
p7_power_achieved_head_et = [power.tt_ind_solve_power(effect_size=x, nobs1=p7_n_head_et, alpha=p, ratio=p7_n_head_et/p7_n_nc, alternative='smaller') for x in p7_cohend_head_et];
p7_arm_et_res={"ROI_Region":p7_roi,"Cohen d":p7_cohend_arm_et,"total n": p7_n_arm_et+p7_n_nc,"Samples needed ("+str(p)+")":p7_samples_needed_arm_et,
"Power achieved with armET/NC("+str(p7_n_arm_et)+"/"+str(p7_n_nc)+")":p7_power_achieved_arm_et}
p7_arm_et_df=pd.DataFrame(p7_arm_et_res)
print("Quattrone A. paper power analysis with p=0.05 and arm ET/NC=30/32:\n")
print("The mean cerebellar effect size of this research is: ")
display(p7_arm_et_df['Cohen d'].describe())
display(p7_arm_et_df)
print('\n')
p7_head_et_res={"ROI_Region":p7_roi,"Cohen d":p7_cohend_head_et,"total n": p7_n_head_et+p7_n_nc,"Samples needed ("+str(p)+")":p7_samples_needed_head_et,
"Power achieved with headET/NC ("+str(p7_n_head_et)+"/"+str(p7_n_nc)+")":p7_power_achieved_head_et}
p7_head_et_df=pd.DataFrame(p7_head_et_res)
print("Quattrone A. paper power analysis with p=0.05 and head ET/NC=20/32:\n")
print("The mean cerebellar effect size of this research is: ")
display(p7_head_et_df['Cohen d'].describe())
display(p7_head_et_df)
# None of the results shows enough power.
```
### 6. [Shin H, Lee DK, Lee JM, Huh YE, Youn J, Louis ED, Cho JW. Atrophy of the cerebellar vermis in essential tremor: segmental volumetric MRI analysis. The Cerebellum. 2016 Apr 1;15(2):174-81.](https://link.springer.com/content/pdf/10.1007/s12311-015-0682-8.pdf)
Study type: Cerebellar segmentation (28 lobules).
Multiple comparison correction: Bonferroni for groups.
covariates: eTIV
Study groups: **ET** (39=23+16, 63.7±13.0) and **NC** (36=19+17, 65.3±6.8, cerebellar-ET: 12/8, 66.4±13.4; classic-ET: 11/8, 60.9±12.2); (stating age and sex matched for ET and NC but not for sub-group comparison.)
Reported ROIs: volume ratio/eTIV, **vermis VI**, vermis VIIAt.
```
# paper5
p5_n_cere_et=20; p5_n_classic_et=19; p5_n_et=p5_n_cere_et+p5_n_classic_et; p5_n_nc=36; p = 0.05;
p5_roi=['cerebellar volume', 'Vermis VI', 'Vermis VIIAt'];
p5_m_et = [0.0818, 0.0030, 0.0008];
p5_s_et = [0.0071, 0.0006, 0.0004];
p5_m_cere_et = [0.0813, 0.0028, 0.0008];
p5_s_cere_et = [0.0059, 0.0006, 0.0002];
p5_m_classic_et = [0.0824, 0.0032, 0.0010];
p5_s_classic_et = [0.0084, 0.0004, 0.0005];
p5_m_nc = [0.0833, 0.0033, 0.0009];
p5_s_nc = [0.0065, 0.0006, 0.0003];
p5_g_et_cohend=[]; p5_cere_class_cohend=[]
for i in range(len(p5_roi)):
p5_g_et_cohend.append(cohend_from_sts(p5_n_et,p5_m_et[i],p5_s_et[i],p5_n_nc,p5_m_nc[i],p5_s_nc[i]));
p5_cere_class_cohend.append(cohend_from_sts(p5_n_cere_et,p5_m_cere_et[i],p5_s_cere_et[i],
p5_n_classic_et,p5_m_classic_et[i],p5_s_classic_et[i]));
p5_g_et_samples_needed = [power.tt_ind_solve_power(effect_size=x, alpha=p, power=power_expected) for x in p5_g_et_cohend];
p5_g_et_power_achieved = [power.tt_ind_solve_power(effect_size=x, nobs1=p5_n_et, alpha=p, ratio=p5_n_et/p5_n_nc, alternative='smaller') for x in p5_g_et_cohend];
p5_g_cere_samples_needed = [power.tt_ind_solve_power(effect_size=x, alpha=p, power=power_expected) for x in p5_cere_class_cohend];
p5_g_cere_power_achieved = [power.tt_ind_solve_power(effect_size=x, nobs1=p5_n_cere_et, alpha=p, ratio=p5_n_cere_et/p5_n_classic_et, alternative='smaller') for x in p5_cere_class_cohend];
p5_g_et_res={"ROI_Region":p5_roi,"Cohen d":p5_g_et_cohend,"total n": p5_n_et+p5_n_nc,"Samples needed ("+str(p)+")":p5_g_et_samples_needed,
"Power achieved with ET/NC("+str(p5_n_et)+"/"+str(p5_n_nc)+")":p5_g_et_power_achieved}
p5_g_et_df=pd.DataFrame(p5_g_et_res)
print("Shin H. paper power analysis with p=0.05 and ET/NC=39/36:\n")
print("The mean cerebellar effect size of this research is: ")
display(p5_g_et_df['Cohen d'].describe())
display(p5_g_et_df)
print('\n')
p5_g_cere_res={"ROI_Region":p5_roi,"Cohen d":p5_cere_class_cohend,"total n": p5_n_cere_et+p5_n_classic_et,"Samples needed ("+str(p)+")":p5_g_cere_samples_needed,
"Power achieved with cerebellarET/classicET ("+str(p5_n_cere_et)+"/"+str(p5_n_classic_et)+")":p5_g_cere_power_achieved}
p5_g_cere_df=pd.DataFrame(p5_g_cere_res)
print("Shin H. paper power analysis with p=0.05 and cerebellarET/classicET=20/19:\n")
print("The mean cerebellar effect size of this research is: ")
display(p5_g_cere_df['Cohen d'].describe())
display(p5_g_cere_df)
# None of the results show enough power.
```
### 7. [Dyke JP, Cameron E, Hernandez N, Dydak U, Louis ED. Gray matter density loss in essential tremor: a lobule by lobule analysis of the cerebellum. Cerebellum & ataxias. 2017 Dec;4(1):1-7.](https://cerebellumandataxias.biomedcentral.com/articles/10.1186/s40673-017-0069-3)
Study type: Cerebellar segmentation (43 lobules, SUIT).
Multiple comparison correction: Benjamini-Hochberg False Discovery Rate procedure (BH FDR)@ alpha=0.1.
covariates: age, gender, MOCA score and group, no eTIV.
Study groups: **ET** (47=24+23, 76.0±6.8, head ET, voice ET and arm ET) and **NC** (36=10+26, 73.2±6.7); (sex not matched, did not give details of subgroups.)
Reported ROIs: %GM density differences (dpa equavalent). For head ET:, Right_IX, Left_V, Left_VIIIa, Left_IX, Vermis_VIIb, Left_VIIb, Left_X, Left_I_IV and Right_V. For voice ET: Right_IX, Vermis_VIIb, Left_IX, Left_V, Left_X, Vermis_CrusII, Vermis_CrusI, Vermis_VI, Left_I_IV, Vermis_VIIIb and Right_V. Severe tremor (TTS ≥ 23; n = 20) showed no significant decreases compared to controls after correcting for multiple comparisons.
```
# paper6
p6_n_head_et=27; p6_n_voice_et=22; p6_n_nc=36; p = 0.05;
p6_roi_head=['Left_IIV', 'Left_V', 'Left_VIIb', 'Left_VIIIa', 'Left_IX', 'Left_X', 'Right_V', 'Right_IX', 'Vermis_VIIb'];
p6_roi_voice=['Left_IIV', 'Left_V', 'Left_IX', 'Left_X', 'Right_V', 'Right_IX', 'Vermis_CrusI', 'Vermis_CrusII', 'Vermis_VI','Vermis_VIIb', 'Vermis_VIIIb'];
p6_p_head_et = [0.018, 0.004, 0.013, 0.009, 0.010, 0.014, 0.021, 0.001, 0.011];
p6_p_voice_et = [0.025, 0.005, 0.005, 0.008, 0.026, 0.001, 0.016, 0.012, 0.019, 0.004, 0.026];
p6_cohend_head_et = [cohend_from_p2(x,p6_n_head_et,p6_n_nc) for x in p6_p_head_et];
p6_cohend_voice_et = [cohend_from_p2(x,p6_n_voice_et,p6_n_nc) for x in p6_p_voice_et];
p6_sample_needed_head_et = [power.tt_ind_solve_power(effect_size=x, alpha=p, power=power_expected) for x in p6_cohend_head_et];
p6_power_achieved_head_et = [power.tt_ind_solve_power(effect_size=x, nobs1=p6_n_head_et, alpha=p, ratio=p6_n_nc/p6_n_head_et, alternative='larger')
for x in p6_cohend_head_et];
p6_sample_needed_voice_et = [power.tt_ind_solve_power(effect_size=x, alpha=p, power=power_expected) for x in p6_cohend_voice_et];
p6_power_achieved_voice_et = [power.tt_ind_solve_power(effect_size=x, nobs1=p6_n_voice_et, alpha=p, ratio=p6_n_nc/p6_n_voice_et, alternative='larger')
for x in p6_cohend_voice_et];
p6_head_et_res={"ROI_Region":p6_roi_head,"Cohen d":p6_cohend_head_et,"total n": p6_n_head_et+p6_n_nc,"Samples needed ("+str(p)+")":p6_sample_needed_head_et,
"Power achieved with headET/NC("+str(p6_n_head_et)+"/"+str(p6_n_nc)+")":p6_power_achieved_head_et}
p6_head_et_df=pd.DataFrame(p6_head_et_res)
print("Dyke JP. paper power analysis with p=0.05 and head ET/NC=27/36:\n")
print("The mean cerebellar effect size of this research is: ")
display(p6_head_et_df['Cohen d'].describe())
display(p6_head_et_df)
print('\n')
p6_voice_et_res={"ROI_Region":p6_roi_voice,"Cohen d":p6_cohend_voice_et,"total n": p6_n_voice_et+p6_n_nc,"Samples needed ("+str(p)+")":p6_sample_needed_voice_et,
"Power achieved with voiceET/NC ("+str(p6_n_voice_et)+"/"+str(p6_n_nc)+")":p6_power_achieved_voice_et}
p6_voice_et_df=pd.DataFrame(p6_voice_et_res)
print("Dyke JP. paper power analysis with p=0.05 and voice ET/NC=22/36:\n")
print("The mean cerebellar effect size of this research is: ")
display(p6_voice_et_df['Cohen d'].describe())
display(p6_voice_et_df)
# None of the results shows enough power. Largest: Right_IX=0.839158
```
## Summary of literature effect sizes and power.
```
###### Number of samples needed to detect the empirical effect sizes with power of 0.9; and actrual sample sizes.
# pool data
pd_roi_lit=pd.concat([p3_g_head_df.loc[3:,['Cohen d','total n']], p3_g_arm_df.loc[3:, ['Cohen d','total n']],
p7_arm_et_df.loc[:,['Cohen d','total n']], p7_head_et_df.loc[:, ['Cohen d','total n']],
p5_g_et_df.loc[:,['Cohen d','total n']], p5_g_cere_df.loc[:, ['Cohen d','total n']],
p6_head_et_df.loc[:,['Cohen d','total n']], p6_voice_et_df.loc[:,['Cohen d','total n']]], ignore_index=True)
pd_roi_lit.loc[:,'Cohen d']=abs(pd_roi_lit.loc[:,'Cohen d']);
pd_vbm_lit=p1_df.loc[:,['Cohen d','total n']]; pd_vbm_lit.loc[:,'Cohen d']=abs(pd_vbm_lit.loc[:,'Cohen d']);
pd_lit=pd.concat([pd_roi_lit, pd_vbm_lit]);
es_lit=round(pd_lit.loc[:,'Cohen d'].median(),2);
print(es_lit)
print('4. Samples needed to achieve power='+str(power_expected)+' for literature claims: \n')
print('The median of the effect size is: ', pd_lit.loc[:, 'Cohen d'].median())
print('ROI Cohens d summary:')
print('The median the effect size is: ', pd_roi_lit.loc[:, 'Cohen d'].median())
display(pd_roi_lit.loc[:, 'Cohen d'].describe())
print('VBM Cohens d summary:')
print('The median the effect size is: ', pd_vbm_lit.loc[:, 'Cohen d'].median())
display(pd_vbm_lit.loc[:,'Cohen d'].describe())
# Visualizae the literature effect size VS sample size, calculate the power .9 line for our dataset with fixed 38 ETs and augmented NCs.
cohend_lit = array(array(range(54, 400))/100)
n_et=38;
alpha_expected=0.05;
power_expected=0.9;
r_d_expected_list=[ power.tt_ind_solve_power(effect_size=x, nobs1=n_et, alpha=alpha_expected, power=power_expected, ratio=None) for x in cohend_lit];
r_d_expected=power.tt_ind_solve_power(effect_size=es_lit, nobs1=n_et, alpha=alpha_expected, power=power_expected, ratio=None)
n_nc_needed = [math.ceil(x*n_et) for x in r_d_expected_list]
n_total = [x+n_et for x in n_nc_needed]
#Power achieved with Number of borrowed subjects
n_matched=array(array(range(1,300))); n_total_power=[x+n_et for x in n_matched]
power_matched_1side=[ power.tt_ind_solve_power(effect_size=es_lit, nobs1=n_et, alpha=alpha_expected, ratio=x/n_et, alternative='larger') for x in n_matched];
power_matched_2side=[ power.tt_ind_solve_power(effect_size=es_lit, nobs1=n_et, alpha=alpha_expected, ratio=x/n_et) for x in n_matched];
# subplot1: literature effect sizes
fig, ax = plt.subplots(1, 2, figsize=(20, 10))
ax[0].plot(n_total, cohend_lit, 'gray')
ax[0].scatter(pd_roi_lit['total n'], pd_roi_lit['Cohen d'], c='b', marker='x')
ax[0].scatter(pd_vbm_lit['total n'], pd_vbm_lit['Cohen d'], c='g', marker='x')
# print and plot the literature effect size
r_d_lit=power.tt_ind_solve_power(effect_size=es_lit, nobs1=n_et, alpha=alpha_expected, power=power_expected, ratio=None)
n_total_aug=math.ceil((r_d_lit+1)*n_et)
print("literature median effect size: ", es_lit, ', the total number of samples needed: ',n_total_aug)
ax[0].vlines(n_total_aug, ymin=0, ymax=2, colors='r', linestyles='--', label='power=0.9')
# costumize
ax[0].set_xlim([0, 200]); ax[0].set_ylim([0, 2]);
ax[0].set_ylabel('Effect sizes (Cohen\'s d)',fontsize=20)
ax[0].set_xlabel('Total number of subjects',fontsize=20)
ax[0].set_title(r'Literature effect sizes',fontsize=20)
ax[0].legend(['Power='+str(power_expected)+' from '+str(n_et)+' ET and \nincreasing number of controls','ROI Literature','VBM Literature', 'Power=0.9 and effect size\n(Literature median) ='+str(es_lit)], loc='upper right',fontsize='x-large')
ax[0].text(0.025*200, 0.975*2, '(a)', fontsize=20, verticalalignment='top')
# subplot2: power with increasing NC subjcts
POW_LIM=[0.2, 1.0]
ax[1].plot(n_total_power, power_matched_1side, 'b')
ax[1].plot(n_total_power, power_matched_2side, 'g')
r_9=power.tt_ind_solve_power(effect_size=es_lit, nobs1=n_et, alpha=alpha_expected, ratio=None, power=0.9)
n_nc_needed=math.ceil(r_9*n_et)
ax[1].vlines(n_total_aug, ymin=POW_LIM[0], ymax=POW_LIM[1], colors='r', linestyles='--', label='power=0.9')
ax[1].set_xlim([0, 200]); ax[1].set_ylim(POW_LIM);
ax[1].set_xlabel('Total number of subjects (including 38 ET)',fontsize=20)
ax[1].set_ylabel('Power', fontsize=20)
ax[1].set_title(r'Power ($\alpha=0.05$, effect size='+str(es_lit)+')',fontsize=20)
ax[1].legend(['1-sided test','2-sided test', str(n_nc_needed)+' matched NCs needed\n for 2-sided test with\n Power='+str(power_expected)], loc='right',fontsize='x-large')
ax[1].text(0.025*200, 0.975*1, '(b)', fontsize=20, verticalalignment='top')
fig.savefig("power_analysis.jpg",dpi=300)
```
### 8. [Mavroudis, I., Petrides, F., Karantali, E., Chatzikonstantinou, S., McKenna, J., Ciobica, A., Iordache, A.-C., Dobrin, R., Trus, C., & Kazis, D. (2021). A Voxel-Wise Meta-Analysis on the Cerebellum in Essential Tremor. Medicina, 57(3), 264.](https://www.mdpi.com/1648-9144/57/3/264)
The power of studies mentioned in Mavroudis's meta analysis paper.
Study type: meta analysis.
```
# New meta analysis added.
sample_et = [36, 9, 45, 17, 47, 27, 14, 32, 19, 19, 20, 14, 25, 50, 19, 10]
sample_nc = [30, 9, 39, 17, 36, 27, 20, 12, 18, 20, 17, 23, 25, 32, 19, 12]
# Fang paper is not included for it is a rs-fMRI.
import numpy as np
n_et=np. median(sample_et)
n_nc=np. median(sample_nc)
print('Medians of sample sizes of the mentioned studies (ET/NC): ', np. median(sample_et), '/',np. median(sample_nc))
from statsmodels.stats import power
import math
from numpy import array
import matplotlib.pyplot as plt
from statsmodels.stats.power import TTestIndPower
# statsitical pre defined values: setting es=0.8
effect_size_expected=0.61;
alpha_expected=0.05;
power_expected=0.9;
# should pay attetnion to 1 sided or 2 sided tests
print('Medians of power of the mentioned studies (ET|NC): ', power.tt_ind_solve_power(effect_size=effect_size_expected, alpha=alpha_expected, nobs1=n_et, ratio=n_nc/n_et),
'|', power.tt_ind_solve_power(effect_size=effect_size_expected, alpha=alpha_expected/10, nobs1=n_et, ratio=n_nc/n_et))
pow_a=[]; pow_a_10=[]
for i in range(len(sample_et)):
pow_a.append(power.tt_ind_solve_power(effect_size=effect_size_expected, alpha=alpha_expected, nobs1=sample_et[i], ratio=sample_nc[i]/sample_et[i]))
pow_a_10.append(power.tt_ind_solve_power(effect_size=effect_size_expected, alpha=alpha_expected/10, nobs1=sample_et[i], ratio=sample_nc[i]/sample_et[i]))
#print([round(x, 4) for x in pow_a], '\n' , [round(x,4) for x in pow_a_10])
print('Medians of power of the mentioned studies (a=0.05|a=0.05/10): ', round(np.median(pow_a),4), '|', round(np.median(pow_a_10),4))
```
| github_jupyter |
For classes with mostly new coders, the python section alone will take >75 minutes.
Here is how I used 2 days on this:
Day 1: Got through try/pair/share and stopped before loops.
Day 2:
1. Answer Q&A. Tell them there is participation credit for offering website fixed.
1. Give 3 HW tips: google "csv pandas", look for relative path in textbook, and check out functions
1. Prof demo _only_ (students watch, prof sends code chunks for copy-paste at end):
- Loops syntax, understanding code flow
- Indents / If syntax
- Debug presentation
1. Cover how to start and clone HW, deliverable expecations
1. revisit performance of stocks from day one quiz
1. poll on preferences for group coding during future classes (random or persistent groups)
(I would
---
Final summary from notes:
## Good ideas/summary
1. Use comments to explain your code (what a single line or block of code is doing, what args a function can have, why you made certain choices, etc)
1. Naming variables: USE GOOD ONES!
1. TAB: will suggest autocompletions (eg possible functions)
1. SHIFT-TAB: will describe function syntax
1. Everything is an object in python
1. Indentations matter! Indented lines belong to block above that isn't i indented
1. Have a syntax error? Try to fix with google but after 15 minutes, ask a classmate! (Then the TA...
1. LOOK AT YOUR DATA A LOT
## Warnings
- **DON'T USE IS:** `is` and `==` are NOT the same!!!
- variables are pointers: don't use `b=a`, instead use `b = a.copy()`
- Don't use `^` to take powers, use `**`
## Hello everyone! Before class:
1. In GH Desktop: Fetch/pull your class notes repo and the lecture repo.
1. Copy the textbook file `/content/01/06_python.ipynb` to your `class notes/notes` folder
3. In JLab: In the class notes repo, inside the "notes" subfolder, start a new file to save our notes from today
4. Suggested: Snap those 2 files side-by-side in the JLab window
Hopefully this goes better than Chrissy Teigen's experience with Py:

## First things first
The assignment will be posted ASAP.
If you didn't receive a notification from GH tonight from my post on the classmates discussion board, please let me or the TA know immediately.
## Outline
1. Python essentials: Learning by doing
- Q: Feedback: Did anyone try and like any of the tutorial options?
1. Debugging
## Comments
Remember to use comments! In fact, over use them in the beginning.
They help you, future you, and others understand what
- A particular line of code is doing
- What blocks of code are doing
- The idea behind why you wrote the code as you did (big picture)
Else, you'll find yourself in this photo soon:

## Guided discussion + practice
We will run parts of the textbook file in our open notes file.
1. Arithmetic --> 1 practice prob
1. Logic/booleans
- booleans, operators: comparison, logic, membership
- for now: don't use `is` or `is not`! (let's make a cell for "warnings")
- complex logic gates
1. Key lessons: Naming objects, `<TAB>` and `<SHIFT>+<TAB>`, pointers, Objects,
1. Data structures: lists, tuple, dict, set (next slide), then practice
### Built in data structures
Listed from most commonly used to least:
Type Name | Example | Description
--- | --- | ---
list | [1, 2, 3] | Ordered collection
dict | {'a':1, 'b':2, 'c':3} | Unordered (key,value) mapping
set | {1, 2, 3} | Unordered collection of unique values
tuple | (1, 2, 3) | Immutable ordered collection
### Lists:
- Define with brackets: `L = [3, 1, 4, 15, 9]`
- "Zero indexed": `L[0]` returns 3, `L[1]` returns 1
- Can access from the end: `L[-1]` returns 9, `L[-2]` returns 15
- Lots of built in functions: len, sort, reversed, max, min, sum, count, append, extend,...
- Prof demo: Show the py reference in Jlab, search for "data structures"
- Prof demo: Find this outside JLab
- Can access "slices": `L[<start at index> : <go till index>]`
- `L[0:3]` and `L[:3]` return `[3,1,4]`
- `L[-3]` returns `[4,15,9]`
Now, let's all define this vector: `L=[8, 5, 6, 3, 7]`.
**TRY, PAIR, SHARE: Write code that does the following. Discuss with the person next to you as needed:**
1. Returns the length.
1. Returns the largest element.
1. Returns the smallest element.
1. Returns the total of the vector.
2. Returns the first element. See [this awesome answer](https://stackoverflow.com/questions/509211/understanding-slice-notation?rq=1) to learn about "slicing" lists in Python. If that link is dead: https://stackoverflow.com/questions/509211/understanding-slice-notation?rq=1
2. Returns the last element.
2. Returns the first 2 elements.
2. Returns the last 2 elements.
2. Returns the odd numbered elements (i.e. [8,6,7].
## Guided discussion + practice
We will run parts of the textbook file in our open notes file.
1. Arithmetic --> 1 practice prob
1. Logic/booleans
- booleans, operators: comparison, logic, membership
- for now: don't use `is` or `is not`! (let's make a cell for "warnings")
- complex logic gates
1. Key lessons: Naming objects, `<TAB>` and `<SHIFT>+<TAB>`, pointers, Objects,
1. Data structures --> try, pair (3 people/room), share
1. **Flow control syntax and thoughts: for loop (next slides), indentation, if-elif-else**
```
stonks = ['GME','AMC','BB','Bankrupt!']
# syntax: for <element> in <iterable>:
# you must include the colon.
# everything in the for loop must be indented
# name the element something that makes sense (not generic!)!
for stonk in stonks:
print(stonk)
print(stonk)
# two common for-loop devices:
# 1. looping over a range
for i in range(7):
print i
# incrementing some calculation
mynumbers = [2,4,8]
tot = 0
for num in mynumbers:
tot = tot + num
print(tot)
```
Doesn't that seem silly?
Q1: Isn't there a quicker way to compute that?
Q2: Then why is that a useful construct? (answer on next slide)
```
tot = 0
for stonk in stonks:
price = get_price_of(stonk) # do some intermediate steps
tot = tot + price # then increment!
```
### If, elif, else
Syntax:
```python
if <condition #1>: # you must use the colon!
<do some stuff if condition is true>
elif <condition #2>: # as in "Else-If"
<if condition #1 is false, and
condition #2 is true, run this block>
else:
<if neither #1 or #2 are true, do this>
```
Comments:
- You can include zero or as many `elif` code blocks as you want
- You can omit the `else` block entirely
- Whatever is in `<condition>` must evaluate to True or False or 1 or 0
- See the **"Logic and comparisons"** section above on how Python evaluates conditions
## PYTHON AND INDENTATION
- Python has strong opinions about indentations
- Indentations at the beginning of lines are not "up to you"
- **Indentations indicate a "block" of code that is run as a unit.** (like the for loops above)
Example on next slide, prof demo.
```
x = 7
if x < 5: # this is false, obviously,
z = x+2 # so this never runs and
print('I am not here.') # this doesn't print
print('starting new block')
if x < 5: # this is false, obviously,
z = x+2 # so this never runs and
print('I am here') # this does print
```
### Other notes in the textbook
- `while`: We probably won't use while in this class.
- `functions`: Useful!
- A list of popular packages we will add to our code as the semester proceeds
### TIPS:
1. For each package we use, note the most common and useful functions, and copy "cookbook" uses of the packages which you can paste into new programs.
Example: How to open a CSV file
2. _I do not personally, nor do many programmers, commit to memory many functions of many packages. We simply **know what can be done** and when needed, we search (tab completion/google/stack overflow) for the command/recipe for that function._
# BREAK!

Now we move on to the second section of class... "debugging"
## Tips for fixing and avoiding errors
Computers are extremely powerful but incredibly stupid. We want to both
- Fix bugs when they happen
- Prevent them from happening!
Bugs can be syntax or other errors that break the code.
The worst bugs are the type that let your code keep "working". Just lurking there, ruining your work...

### To fix bugs, you need to
1. Realize that you have a bug
3. Figure out where it is
2. Make it repeatable (and you'll understand the bug)
4. Fix it (duh) and test it (the existence of the bug should disabuse you of your coding invincibility!)
1 and 2 are easy with syntax errors - python will tell you.
_**Advice that could save (or cost) you thousands:**_ Those steps are general, and work for other things besides code, like plumbing and electrical work on your parent's house.
1. Read the error codes! Then google them. Then ask your @classmates.
- REMINDER: the 15 minute rule
2. `%debug` - [Covered in nice detail here](https://jakevdp.github.io/PythonDataScienceHandbook/01.06-errors-and-debugging.html#Debugging:-When-Reading-Tracebacks-Is-Not-Enough).
3. Hunt the bug: Flipping switches / divide and conquer (next slide)


After slaving over your computer and a piece of paper (you smartly planned out your code before you went in head first), you've found a clever solution to your problem. Your code is beautiful and elegant, like this:
```py
2+2 # imagine this is a bunch of code
2+2 # imagine this is a bunch of code
2+2 # imagine this is a bunch of code
Error # somewhere in the code is an error. But in real programming you don’t know the error is here!
2+2 # imagine this is a bunch of code
2+2 # imagine this is a bunch of code
```
Let's turn parts of your code off:
You can just comment out parts of the code
```py
2+2 # imagine this is a bunch of code
2+2 # imagine this is a bunch of code
# 2+2 # imagine this is a bunch of code
# Error # somewhere in the code is an error. But in real programming you don’t know the error is here!
# 2+2 # imagine this is a bunch of code
# 2+2 # imagine this is a bunch of code
```
Well, this would work, so you know the error is in the lines you hid.
# But what about the bugs that let your code keep "working"?
These are usually often due to either
- planning errors (you thought you could do A->B->C, but C is impossible after B)
- **Or because you did something that didn't do exactly what you thought**
The latter happens a TON with big data projects, and we will talk about techniques to reduce such errors.
But I have **ONE BIG, WEIRD TRICK** and let me tell, you, BUGS HATE IT:
# Look at your data and objects OFTEN! Print, print, print!
Seriously...
# Look at your data and objects OFTEN! Print, print, print!
This isn't even a "debugging" point per se.
- You know a 6 is a 6.
- In big datasets, it's easy to make rather large changes without knowing exactly what you have done, ... or not done ... , if you don't see into the object.
- Are you sure that object is exactly what you think it is, containing exactly what you think it does? Thus, the `print` statement and other ways of "glancing into" datasets are crucial. Even when you've all become pythonic pros, you should tend towards examining your objects "too much".
Options to look at datasets:
1. Print parts of it in Jupyter, along with many summary stats.
2. Output to a csv file and open in Excel.
3. Use the `spyder` program that came with Anaconda.
4. There is a "variable" explorer extension for JLab that lets you click and see objects.
## Are you still stuck?
It'll happen! We will try to build some ambitious code this semester!
So if you've tried the above, what can you do?
- Writing smart code will save us from getting into intractable problems. More on that next class.
- Again, see the resources tab of our website!
- Finally, the resources tab also suggests that clearing your head and [getting a mental break might be a good idea](https://media.giphy.com/media/h6cFYO8miNhok/giphy.gif).
## Summary
- We covered some basic python coding together today. Going forward, it's headfirst into the breach. (?)
- You can figure out syntax issues, but use the 15 minute rule
- Get exposure to possible functions and algorithms to solve different problems via the resources and youtube videos
- LOOK AT YOUR DATA AND OBJECTS A LOT!

## Shutting down
And at the end of each class,
- Restart your kernal and rerun your code
- save all open code files,
- close JLab (and the terminal running it)
- commit and push your Class Notes repo
- open the GitHub.com version of the repo
| github_jupyter |
# NumPy
NumPy ist ein Erweiterungsmodul für numerische Berechnungen mit Python. Es beinhaltet grundlegende Datenstrukturen, sprich Matrizen und mehrdimensionale Arrays. Selbst ist NumPy in C umgesetzt worden und bietet mithilfe der Python-Schnittstelle die Möglichkeit Berechnungen schnell durchzuführen. Die Module SciPy, Matplotlib und Pandas greifen auf die erwähnten Datenstrukturen zurück, daher stellt NumPy das Fundament der Scientific-Python-Libraries auf.
Mehr zu NumPy auf der offiziellen Website: http://numpy.org/
### Download von NumPy
Mit Python wird automatisch pip (Package-Manager für Python-Module auf PyPI.org) installiert. Selbst steht pip für "pip installs packages", was der Komandosyntax entspricht mit der Python-Module heruntergeladen werden.
```
# nicht starten, da NumPy bereits installiert wurde und die notwendigen Rechte fehlen
!pip3 install numpy
```
### Verwenden von Math
```
from math import *
zahlen = [1, 2, 3, 4, 5, 6]
ergebnis = []
for x in zahlen:
y = sin(x)
ergebnis.append(y)
print(ergebnis)
type(zahlen)
```
### Verwenden von NumPy
```
import numpy as np
np.__version__
```
## Arrays / Vektoren
$zahlen = \left(
\begin{array}{ccc}
1 \\ 2 \\ 3 \\ 4 \\
\end{array}
\right)$
```
zahlen = np.array([1, 2, 3, 4])
ergebnis = np.sin(zahlen)
print(ergebnis)
type(zahlen)
```
<b><font color="red">Hinweis:</font></b> Die Sinus-Funktion `sin()` aus dem Modul `math` und `numpy` sind nicht dieselben! Python erkennt anhand des Typs von `zahlen` auf welche Sinus-Funktion zugegriffen werden soll.
- `math` -> `list`
- `numpy` -> `numpy.ndarray`
### Typen der NumPy-Werte
Das Array `zahlen` enthält nur Integers (Ganze Zahlen), daher wird der Typ des Vektors auf `int64` gesetzt. Die Ausgabe von `ergebnis` gibt bei der Berechnung der Sinuswerte von `zahlen` als Typ Float (Gleitpunktzahlen/Dezimalzahlen), also `float64` an.
```
zahlen.dtype
ergebnis.dtype
```
### Definition des Typs der Arrays
```
# Ausgabe einer Gleitpunktzahl
x = np.array([2,4,8,16], dtype=float)
x
# Ausgabe einer Komplexen Zahl
y = np.array([1,2,5,7], dtype=complex)
y
```
## Matrizen
$M_1\ = \left(
\begin{array}{ccc}
1 & 2 & 3 \\
4 & 5 & 6 \\
\end{array}
\right)$
```
M1 = np.array([[1, 2, 3], [4, 5, 6]])
M1
```
### Anzeigen der Dimension der Matrix
```
M1.shape
```
### Spezielle Funktionen
#### 3x3-Nullmatrix
```
M2 = np.zeros((3, 3))
M2
```
#### 3x4-Einheitsmatrix
```
M3 = np.ones((3, 4))
M3
```
#### Nullvektor
```
x = np.zeros(3)
x
```
#### Einheitsvektor
```
y = np.ones(3)
y
```
### `arange()` und `linspace()` für Sequenzen von Zahlen
Syntax: `arange(startwert, endwert, inkrement/schrittweite)`
<b><font color="red">Hinweis:</font></b> Wie in der `range()`-Funktion ist der Startwert inklusiv und der Endwert exklusiv.
```
time = np.arange(0, 5, 0.5)
time
```
Syntax: `linspace(startwert, endwert, anzahl der arrays)`
```
t = np.linspace(0, 5, 11)
t
```
### Operationen
```
x = np.arange(1, 6, 1)
y = np.arange(2, 12, 2)
```
$x=\left(
\begin{array}{ccc}
1 \\ 2 \\ 3 \\ 4 \\ 5 \\
\end{array}
\right)$
```
x
```
$y=\left(
\begin{array}{ccc}
2 \\ 4 \\ 6 \\ 8 \\ 10 \\
\end{array}
\right)$
```
y
```
### Addition
$\left(
\begin{array}{ccc}
1 \\ 2 \\ 3 \\ 4 \\ 5 \\
\end{array}
\right)
+
\left(
\begin{array}{ccc}
2 \\ 4 \\ 6 \\ 8 \\ 10 \\
\end{array}
\right)
=
\left(
\begin{array}{ccc}
3 \\ 6 \\ 9 \\ 12 \\ 15 \\
\end{array}
\right)$
```
x + y
```
### Subtraktion
$\left(
\begin{array}{ccc}
1 \\ 2 \\ 3 \\ 4 \\ 5 \\
\end{array}
\right)
-
\left(
\begin{array}{ccc}
2 \\ 4 \\ 6 \\ 8 \\ 10 \\
\end{array}
\right)
=
\left(
\begin{array}{ccc}
-1 \\ -2 \\ -3 \\ -4 \\ -5 \\
\end{array}
\right)
$
```
x - y
```
### Erweiterung
$\left(
\begin{array}{ccc}
1 \\ 2 \\ 3 \\ 4 \\ 5 \\
\end{array}
\right)
\cdot 4
=
\left(
\begin{array}{ccc}
4 \\ 8 \\ 12 \\ 16 \\ 20 \\
\end{array}
\right)
$
```
x*4
```
### Achtung!
-> Sehr gewöhnungsbedürftig ist, dass die Multiplikation und Division, als auch die Potenz und Wurzel von Arrays und Matrizen möglich ist
#### Multiplikation
<b><font color="red">Hinweis:</font></b> Nicht zu verwechseln mit dem Skalarprodukt!
$\left(
\begin{array}{ccc}
1 \\ 2 \\ 3 \\ 4 \\ 5 \\
\end{array}
\right)
\cdot
\left(
\begin{array}{ccc}
2 \\ 4 \\ 6 \\ 8 \\ 10 \\
\end{array}
\right)
=
\left(
\begin{array}{ccc}
2 \\ 8 \\ 18 \\ 32 \\ 50 \\
\end{array}
\right)
$
```
x * y
```
#### Division
$\left(
\begin{array}{ccc}
1 \\ 2 \\ 3 \\ 4 \\ 5 \\
\end{array}
\right)
/
\left(
\begin{array}{ccc}
2 \\ 4 \\ 6 \\ 8 \\ 10 \\
\end{array}
\right)
=
\left(
\begin{array}{ccc}
0.5 \\ 0.5 \\ 0.5 \\ 0.5 \\ 0.5 \\
\end{array}
\right)
$
```
x / y
```
#### Potenz
$\left(
\begin{array}{ccc}
1 \\ 2 \\ 3 \\ 4 \\ 5 \\
\end{array}
\right) ^2\
=
\left(
\begin{array}{ccc}
1 \\ 4 \\ 9 \\ 16 \\ 25 \\
\end{array}
\right)$
```
x**2
```
<b><font color="red">Hinweis:</font></b> Die Verwendung der `pow()`-Funktion aus dem `math`-Modul führt zu einer Fehlermeldung.
#### Wurzel
$\sqrt{
\left(
\begin{array}{ccc}
1 \\ 2 \\ 3 \\ 4 \\ 5 \\
\end{array}
\right)}
=
\left(
\begin{array}{ccc}
1.000 \\ 1.414 \\ 1.732 \\ 2.000 \\ 2.236 \\
\end{array}
\right)$
```
x**0.5
```
<b><font color="red">Hinweis:</font></b> Die Verwendung der `sqrt()`-Funktion aus dem `math`-Modul führt zu einer Fehlermeldung.
## Vektoren- und Matrizenberechnungen
### Skalarprodukt
(auch Innere Produkt)
$a\cdot b = \left(
\begin{array}{ccc}
1 \\ 2 \\ 3 \\
\end{array}
\right)
\cdot
\left(
\begin{array}{ccc}
0 \\ 1 \\ 0 \\
\end{array}
\right)
= 2
$
```
a = np.array([1,2,3])
b = np.array([0,1,0])
print(np.inner(a, b)) # 1-D-Array
print(np.dot(a, b)) # N-D-Array
print(a @ b)
```
### Matrizenprodukt
```
a = np.array([[1,2],[3,4]])
b = np.array([[11,12],[13,14]])
print(np.inner(a, b))
print(np.dot(a, b))
print(a @ b)
A = np.array([[11, 12, 13, 14], [21, 22, 23, 24], [31, 32, 33, 34]])
B = np.array([[5, 4, 2], [1, 0, 2], [3, 8, 2], [24, 12, 57]])
# print(np.inner(A, B)) # Fehlermeldung
print(np.dot(A, B))
print(A @ B)
```
### Kreuzprodukt
$a\times b = \left(
\begin{array}{ccc}
1 \\ 2 \\ 3 \\
\end{array}
\right)
\times
\left(
\begin{array}{ccc}
0 \\ 1 \\ 0 \\
\end{array}
\right)
=
\left(
\begin{array}{ccc}
-3 \\ 6 \\ -3 \\
\end{array}
\right)
$
```
x = np.array([1, 2, 3])
y = np.array([4, 5, 6])
np.cross(x, y)
```
| github_jupyter |
# Widgets Demonstration
As well as providing working code that readers can experiment with, the textbook also provides a number of widgets to help explain specific concepts. This page contains a selection of these as an index. Run each cell to interact with the widget.
**NOTE:** You will need to enable interactivity by pressing 'Try' in the bottom left corner of a code cell, or by viewing this page in the [IBM Quantum Experience](https://quantum-computing.ibm.com/jupyter/user/qiskit-textbook/content/widgets-index.ipynb).
### Interactive Code
The most important interactive element of the textbook is the ability to change and experiment with the code. This is possible directly on the textbook webpage, but readers can also view the textbook as Jupyter notebooks where they are able to add more cells and save their changes. Interactive Python code also allows for widgets through [ipywidgets](https://ipywidgets.readthedocs.io/en/latest/), and the rest of this page is dedicated to demonstrating some of the widgets provided by the Qiskit Textbook.
```
# Click 'try' then 'run' to see the output
print("This is code works!")
```
### Gate Demo
This widget shows the effects of a number of gates on a qubit, illustrated through the Bloch sphere. It is used a lot in [Single Qubit Gates](https://qiskit.org/textbook/ch-states/single-qubit-gates.html).
```
from qiskit_textbook.widgets import gate_demo
gate_demo()
```
### Binary Demonstration
This simple widget allows the reader to interact with a binary number. It is found in [The Atoms of Computation](https://qiskit.org/textbook/ch-states/atoms-computation.html).
```
from qiskit_textbook.widgets import binary_widget
binary_widget(nbits=5)
```
### Scalable Circuit Widget
When working with circuits such as those in the [Quantum Fourier Transform Chapter](https://qiskit.org/textbook/ch-algorithms/quantum-fourier-transform.html), it's often useful to see how these scale to different numbers of qubits. If our function takes a circuit (`QuantumCircuit`) and a number of qubits (`int`) as positional inputs, we can see how it scales using the widget below. Try changing the code inside these functions and re-run the cell.
```
from qiskit_textbook.widgets import scalable_circuit
from numpy import pi
def qft_rotations(circuit, n):
"""Performs qft on the first n qubits in circuit (without swaps)"""
if n == 0:
return circuit
n -= 1
circuit.h(n)
for qubit in range(n):
circuit.cp(pi/2**(n-qubit), qubit, n)
# At the end of our function, we call the same function again on
# the next qubits (we reduced n by one earlier in the function)
qft_rotations(circuit, n)
def swap_qubits(circuit, n):
"""Reverse the order of qubits"""
for qubit in range(n//2):
circuit.swap(qubit, n-qubit-1)
return circuit
def qft(circuit, n):
"""QFT on the first n qubits in circuit"""
qft_rotations(circuit, n)
swap_qubits(circuit, n)
return circuit
scalable_circuit(qft)
```
### Bernstein-Vazirani Widget
Through this widget, the reader can follow the mathematics through an instance of the [Bernstein-Vazirani algorithm](https://qiskit.org/textbook/ch-algorithms/bernstein-vazirani.html). Press the buttons to apply the different steps of the algorithm. The first argument sets the number of qubits, and the second sets the hidden binary string, then re-run the cell. You can also reveal the contents of the oracle by setting `hide_oracle=False` and re-running the cell.
```
from qiskit_textbook.widgets import bv_widget
bv_widget(2, "11", hide_oracle=True)
```
### Deutsch-Jozsa Widget
Similarly to the Bernstein-Vazirani widget, through the Deutsch-Jozsa widget the reader can follow the mathematics through an instance of the [Deutsch-Jozsa algorithm](https://qiskit.org/textbook/ch-algorithms/deutsch-josza.html). Press the buttons to apply the different steps of the algorithm. `case` can be "balanced" or "constant", and `size` can be "small" or "large". Re-run the cell for a randomly selected oracle. You can also reveal the contents of the oracle by setting `hide_oracle=False` and re-running the cell.
```
from qiskit_textbook.widgets import dj_widget
dj_widget(size="large", case="balanced", hide_oracle=True)
```
| github_jupyter |
# CIFAR-10: Part 2
Welcome back! If you have not completed [Part 1](*), please do so before running the code in this notebook.
In Part 2 we will assume you have the training and testing lmdbs, as well as the trained model .pb files from Part 1. As you may recall from Part 1, we created the dataset in the form of lmdbs then trained a model and saved the trained model in the form of a *predict_net.pb* and an *init_net.pb*. In this notebook, we will show how to test that saved model with the test lmdb and how to continue training to increase our test accuracy.
Recall the objectives of the two part CIFAR-10 tutorial:
**Part 1:**
- Download dataset
- Write images to lmdbs
- Define and train a model with checkpoints
- Save the trained model
**Part 2:**
- Load pre-trained model from Part 1
- Run inference on testing lmdb
- Continue training to improve test accuracy
- Test the retrained model
As before, let's start with some necessary imports.
```
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import os
import shutil
import operator
import glob
from caffe2.python import core,model_helper,optimizer,workspace,brew,utils
from caffe2.proto import caffe2_pb2
import matplotlib.pyplot as plt
from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags
```
## Check Inputs
Before we get started, let's make sure you have the necessary Part 1 files. We will use the saved model from the most recent run of Part 1.
```
# Train lmdb
TRAIN_LMDB = os.path.join(os.path.expanduser('~'),"caffe2_notebooks/tutorial_data/cifar10/training_lmdb")
# Test lmdb
TEST_LMDB = os.path.join(os.path.expanduser('~'),"caffe2_notebooks/tutorial_data/cifar10/testing_lmdb")
# Extract protobuf files from most recent Part 1 run
part1_runs_path = os.path.join(os.path.expanduser('~'), "caffe2_notebooks", "tutorial_files", "tutorial_cifar10")
runs = sorted(glob.glob(part1_runs_path + "/*"))
# Init net
INIT_NET = os.path.join(runs[-1], "cifar10_init_net.pb")
# Predict net
PREDICT_NET = os.path.join(runs[-1], "cifar10_predict_net.pb")
# Make sure they all exist
if (not os.path.exists(TRAIN_LMDB)) or (not os.path.exists(TEST_LMDB)) or (not os.path.exists(INIT_NET)) or (not os.path.exists(PREDICT_NET)):
print("ERROR: input not found!")
else:
print("Success, you may continue!")
```
### Repeat Helper Functions
If these functions look familiar, you are correct; they have been copied-and-pasted from Part 1. To summarize, we will need the *AddInputLayer* function to connect our models to the lmdbs, and the *Add_Original_CIFAR10_Model* function to provide the architecture of the network.
```
def AddInputLayer(model, batch_size, db, db_type):
# load the data
#data_uint8, label = brew.db_input(
# model,
# blobs_out=["data_uint8", "label"],
# batch_size=batch_size,
# db=db,
# db_type=db_type,
#)
data_uint8, label = model.TensorProtosDBInput([], ["data_uint8", "label"], batch_size=batch_size, db=db, db_type=db_type)
# cast the data to float
data = model.Cast(data_uint8, "data", to=core.DataType.FLOAT)
# scale data from [0,255] down to [0,1]
data = model.Scale(data, data, scale=float(1./256))
# don't need the gradient for the backward pass
data = model.StopGradient(data, data)
return data, label
def update_dims(height, width, kernel, stride, pad):
new_height = ((height - kernel + 2*pad)//stride) + 1
new_width = ((width - kernel + 2*pad)//stride) + 1
return new_height, new_width
def Add_Original_CIFAR10_Model(model, data, num_classes, image_height, image_width, image_channels):
# Convolutional layer 1
conv1 = brew.conv(model, data, 'conv1', dim_in=image_channels, dim_out=32, kernel=5, stride=1, pad=2)
h,w = update_dims(height=image_height, width=image_width, kernel=5, stride=1, pad=2)
# Pooling layer 1
pool1 = brew.max_pool(model, conv1, 'pool1', kernel=3, stride=2)
h,w = update_dims(height=h, width=w, kernel=3, stride=2, pad=0)
# ReLU layer 1
relu1 = brew.relu(model, pool1, 'relu1')
# Convolutional layer 2
conv2 = brew.conv(model, relu1, 'conv2', dim_in=32, dim_out=32, kernel=5, stride=1, pad=2)
h,w = update_dims(height=h, width=w, kernel=5, stride=1, pad=2)
# ReLU layer 2
relu2 = brew.relu(model, conv2, 'relu2')
# Pooling layer 1
pool2 = brew.average_pool(model, relu2, 'pool2', kernel=3, stride=2)
h,w = update_dims(height=h, width=w, kernel=3, stride=2, pad=0)
# Convolutional layer 3
conv3 = brew.conv(model, pool2, 'conv3', dim_in=32, dim_out=64, kernel=5, stride=1, pad=2)
h,w = update_dims(height=h, width=w, kernel=5, stride=1, pad=2)
# ReLU layer 3
relu3 = brew.relu(model, conv3, 'relu3')
# Pooling layer 3
pool3 = brew.average_pool(model, relu3, 'pool3', kernel=3, stride=2)
h,w = update_dims(height=h, width=w, kernel=3, stride=2, pad=0)
# Fully connected layers
fc1 = brew.fc(model, pool3, 'fc1', dim_in=64*h*w, dim_out=64)
fc2 = brew.fc(model, fc1, 'fc2', dim_in=64, dim_out=num_classes)
# Softmax layer
softmax = brew.softmax(model, fc2, 'softmax')
return softmax
```
## Test Saved Model From Part 1
### Construct Model for Testing
The first thing we need is a model helper object that we can attach the lmdb reader to.
```
# Create a ModelHelper object with init_params=False
arg_scope = {"order": "NCHW"}
test_model = model_helper.ModelHelper(name="test_model", arg_scope=arg_scope, init_params=False)
# Add the data input layer to the model, pointing at the TEST_LMDB
data,_ = AddInputLayer(test_model,1,TEST_LMDB,'lmdb')
```
### Populate the Model Helper with Saved Model Params
To format a model for testing, we do not need to create params in the model helper, nor do we need to add gradient operators as we will only be performing forward passes. All we really need to do is populate the *.net* and *.param_init_net* members of the model helper with the contents of the saved *predict_net.pb* and *init_net.pb*, respectively. To accomplish this, we construct *caffe2_pb* objects with the protobuf from the pb files, create *Net* objects with the *caffe2_pb* objects, then **append** the net objects to the *.net* and *.param_init_net* members of the model helper. Appending is very important here! If we do not append, we would wipe out the input data layer stuff that we just added.
Recall from Part 1, the saved model expected an input named *data* and produced an output called *softmax*. Conveniently (but not accidentally), the *AddInputLayer* function reads from the lmdb and puts the information into the workspace in a blob called *data*. It is also important to remember what each of the saved nets that we are appending to our model contains. The *predict_net* contains the structure of the model, including the ops involved in the forward pass. It has the definitions of the convolutional, pooling, and fc layers in the model. The *init_net* contains the weight initializations for the parameters that the ops in the *predict_net* expect. For example, if there is an op in the *predict_net* named 'fc1', the *init_net* will contain the trained weights (*fc1_w*), and biases (*fc1_b*) for that layer.
After we append the nets, we add an accuracy layer to the model which uses the *softmax* output from the saved model and the *label* input from the lmdb. Note, we could manually fetch the softmax blob from the workspace after every iteration and check whether or not the class with the highest softmax score is the true label, but instead we opt for the simpler accuacy layer.
```
# Populate the model helper obj with the init net stuff, which provides the
# weight initializations for the model
init_net_proto = caffe2_pb2.NetDef()
with open(INIT_NET, "r") as f:
init_net_proto.ParseFromString(f.read())
test_model.param_init_net = test_model.param_init_net.AppendNet(core.Net(init_net_proto))
# Populate the model helper obj with the predict net stuff, which defines
# the structure of the model
predict_net_proto = caffe2_pb2.NetDef()
with open(PREDICT_NET, "r") as f:
predict_net_proto.ParseFromString(f.read())
test_model.net = test_model.net.AppendNet(core.Net(predict_net_proto))
# Add an accuracy feature to the model for convenient reporting during testing
accuracy = brew.accuracy(test_model, ['softmax', 'label' ], 'accuracy')
```
### Run Testing
At this point, our model is initialized as the saved model from Part 1. We can now run the testing loop and check the accuracy.
```
# Run the param init net to put the trained model info into the workspace
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net, overwrite=True)
# Stat keeper
avg_accuracy = 0.0
# Number of test iterations to run here, since the full test set is 10k images and the
# batch size is 1, we will run 10000 test batches to cover the entire test set
test_iters = 10000
# Main testing loop
for i in range(test_iters):
workspace.RunNet(test_model.net)
acc = workspace.FetchBlob('accuracy')
avg_accuracy += acc
if (i % 500 == 0) and (i > 0):
print("Iter: {}, Current Accuracy: {}".format(i, avg_accuracy/float(i)))
# Report final test accuracy score as the number of correct predictions divided by 10,000
print("*********************************************")
print("Final Test Accuracy: ",avg_accuracy/float(test_iters))
```
## Continue Training
Our model is performing significantly better than random guessing, but I think we can do a little better with more training. To do this we will:
- create a new model helper
- specify that the train data will come from the training lmdb
- re-define the model architecture with the Add_Original_CIFAR10_Model function
- grab the trained weights and biases from the saved init_net.pb
- resume training
### Construct Model for Re-Training
Here we create a new model helper object for training. Nothing here should look new but take notice that we set **init_params=False**. This is important, as we do not want brew (in *Add_Original_CIFAR10_Model* function) to automatically initialize the params, rather we want to set them ourselves. Once we construct the model helper, we add the input layer and point it to the training lmdb, brew in the model architecture, and finally initialize the parameters by appending the contents of the saved *init_net.pb* to the *.param_init_net* member of the train model.
```
# Number of iterations to train for here
training_iters = 3000
# Reset workspace to clear all of the information from the testing stage
workspace.ResetWorkspace()
# Create new model
arg_scope = {"order": "NCHW"}
train_model = model_helper.ModelHelper(name="cifar10_train", arg_scope=arg_scope, init_params=False)
# Add the data layer to the model
data,_ = AddInputLayer(train_model,100,TRAIN_LMDB,'lmdb')
softmax = Add_Original_CIFAR10_Model(train_model, data, 10, 32, 32, 3)
# Populate the param_init_net of the model obj with the contents of the init net
init_net_proto = caffe2_pb2.NetDef()
with open(INIT_NET, "r") as f:
init_net_proto.ParseFromString(f.read())
tmp_init_net = core.Net(init_net_proto)
train_model.param_init_net = train_model.param_init_net.AppendNet(tmp_init_net)
```
### Specify Loss Function and Optimizer
We can now proceed as normal by specifying the loss function, adding the gradient operators, and building the optimizier. Here, we opt for the same loss function and optimizer that we used in Part 1.
```
# Add the "training operators" to the model
xent = train_model.LabelCrossEntropy([softmax, 'label'], 'xent')
# compute the expected loss
loss = train_model.AveragedLoss(xent, "loss")
# track the accuracy of the model
accuracy = brew.accuracy(train_model, [softmax, 'label'], "accuracy")
# use the average loss we just computed to add gradient operators to the model
train_model.AddGradientOperators([loss])
# Specify Optimization Algorithm
optimizer.build_sgd(
train_model,
base_learning_rate=0.01,
policy="fixed",
momentum=0.9,
weight_decay=0.004
)
```
**Important Note**
Check out the results of the *GetOptimizationParamInfo* function. The *params* that this function returns are the parameters that will be optimized by the optimization function. If you are attempting to retrain a model in a different way, and your model doesnt seem to be learning, check the return value of this fuction. If it returns nothing, look no further for your problem! This is exactly the reason that we brew'ed in the layers of the train model with the *Add_Original_CIFAR10_Model* function, because it creates the params in the model automatically. If we had appended the *.net* member of the Model Helper as we did for the test model, this function would return nothing, meaning no parameters would get optimized. A workaround if you appended the net would be to manually create the params with the *create_param* function, which feels like a bit of a hack, especially if you have the add model code on-hand.
```
for param in train_model.GetOptimizationParamInfo():
print("Param to be optimized: ",param)
```
### Run Training
**This step will take a while!**
With our model helper setup we can now run the training as normal. Note, the accuracy and loss reported here is as measured on the *training* batches. Recall that the accuracy reported in Part 1 was the validation accuracy. Be careful how you interpret this number!
```
# Prime the workspace
workspace.RunNetOnce(train_model.param_init_net)
workspace.CreateNet(train_model.net, overwrite=True)
# Run the training loop
for i in range(training_iters):
workspace.RunNet(train_model.net)
acc = workspace.FetchBlob('accuracy')
loss = workspace.FetchBlob('loss')
if i % 100 == 0:
print ("Iter: {}, Loss: {}, Accuracy: {}".format(i,loss,acc))
```
## Test the Retrained Model
We will test the retrained model, just as we did in the first part of this notebook. However, since the params already exist in the workspace from the retraining step, we do not need to set the *.param_init_net*. Rather, we set **init_params=False** and brew in the model architecture with *Add_Original_CIFAR10_Model*. When we create the net, the model will find that the required blobs are already in the workspace. Then, we can run the main testing loop, which will report a final test accuracy score (which is hopefully higher).
```
arg_scope = {"order": "NCHW"}
# Construct the model
test_model = model_helper.ModelHelper(name="test_model", arg_scope=arg_scope, init_params=False)
# Set the input as the test lmdb
data,_ = AddInputLayer(test_model,1,TEST_LMDB,'lmdb')
# brew in the model architecture
softmax = Add_Original_CIFAR10_Model(test_model, data, 10, 32, 32, 3)
accuracy = brew.accuracy(test_model, ['softmax', 'label' ], 'accuracy')
# Prime the net
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net, overwrite=True)
# Confusion Matrix for CIFAR-10
cmat = np.zeros((10,10))
# Stat keepers
avg_accuracy = 0.0
test_iters = 10000
# Main testing loop
for i in range(test_iters):
workspace.RunNet(test_model.net)
acc = workspace.FetchBlob('accuracy')
avg_accuracy += acc
if (i % 500 == 0) and (i > 0):
print("Iter: {}, Current Accuracy: {}".format(i, avg_accuracy/float(i)))
# Get the top-1 prediction
results = workspace.FetchBlob('softmax')[0]
label = workspace.FetchBlob('label')[0]
max_index, max_value = max(enumerate(results), key=operator.itemgetter(1))
# Update confusion matrix
cmat[label,max_index] += 1
# Report final testing results
print("*********************************************")
print("Final Test Accuracy: ",avg_accuracy/float(test_iters))
```
### Check Results
Notice, the result from testing the re-trained model is better than the original test accuracy. If you wish, you can save the new model as .pb files just as in Part 1, but we will leave that to you. The last thing we will do is attempt to visualize the performance of our classifier by plotting a confusion matrix and looking for a **strong diagonal** trend.
```
# Plot confusion matrix
fig = plt.figure(figsize=(10,10))
plt.tight_layout()
ax = fig.add_subplot(111)
res = ax.imshow(cmat, cmap=plt.cm.rainbow,interpolation='nearest')
width, height = cmat.shape
for x in xrange(width):
for y in xrange(height):
ax.annotate(str(cmat[x,y]), xy=(y, x),horizontalalignment='center',verticalalignment='center')
classes = ['Airplane','Automobile','Bird','Cat','Deer','Dog','Frog','Horse','Ship','Truck']
plt.xticks(range(width), classes, rotation=0)
plt.yticks(range(height), classes, rotation=0)
ax.set_xlabel('Predicted Class')
ax.set_ylabel('True Class')
plt.title('CIFAR-10 Confusion Matrix')
plt.show()
```
| github_jupyter |
# Analyze Order Book Data
## Imports & Settings
```
import pandas as pd
from pathlib import Path
import numpy as np
from collections import Counter
from time import time
from datetime import datetime, timedelta, time
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from math import pi
from bokeh.plotting import figure, show, output_file, output_notebook
from scipy.stats import normaltest
%matplotlib inline
pd.set_option('display.float_format', lambda x: '%.2f' % x)
plt.style.use('fivethirtyeight')
data_path = Path('data')
itch_store = str(data_path / 'itch.h5')
order_book_store = str(data_path / 'order_book.h5')
stock = 'AAPL'
date = '20190327'
title = '{} | {}'.format(stock, pd.to_datetime(date).date())
```
## Load system event data
```
with pd.HDFStore(itch_store) as store:
sys_events = store['S'].set_index('event_code').drop_duplicates()
sys_events.timestamp = sys_events.timestamp.add(pd.to_datetime(date)).dt.time
market_open = sys_events.loc['Q', 'timestamp']
market_close = sys_events.loc['M', 'timestamp']
```
## Trade Summary
We will combine the messages that refer to actual trades to learn about the volumes for each asset.
```
with pd.HDFStore(itch_store) as store:
stocks = store['R']
stocks.info()
```
As expected, a small number of the over 8,500 equity securities traded on this day account for most trades
```
with pd.HDFStore(itch_store) as store:
stocks = store['R'].loc[:, ['stock_locate', 'stock']]
trades = store['P'].append(store['Q'].rename(columns={'cross_price': 'price'}), sort=False).merge(stocks)
trades['value'] = trades.shares.mul(trades.price)
trades['value_share'] = trades.value.div(trades.value.sum())
trade_summary = trades.groupby('stock').value_share.sum().sort_values(ascending=False)
trade_summary.iloc[:50].plot.bar(figsize=(14, 6), color='darkblue', title='% of Traded Value')
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda y, _: '{:.0%}'.format(y)))
```
## AAPL Trade Summary
```
with pd.HDFStore(order_book_store) as store:
trades = store['{}/trades'.format(stock)]
trades.price = trades.price.mul(1e-4)
trades = trades[trades.cross == 0]
trades = trades.between_time(market_open, market_close).drop('cross', axis=1)
trades.info()
```
## Tick Bars
The trade data is indexed by nanoseconds and is very noisy. The bid-ask bounce, for instance, causes the price to oscillate between the bid and ask prices when trade initiation alternates between buy and sell market orders. To improve the noise-signal ratio and improve the statistical properties, we need to resample and regularize the tick data by aggregating the trading activity.
We typically collect the open (first), low, high, and closing (last) price for the aggregated period, alongside the volume-weighted average price (VWAP), the number of shares traded, and the timestamp associated with the data.
```
tick_bars = trades.copy()
tick_bars.index = tick_bars.index.time
tick_bars.price.plot(figsize=(10, 5), title='{} | {}'.format(stock, pd.to_datetime(date).date()), lw=1)
plt.xlabel('')
plt.tight_layout();
```
### Test for Normality of tick returns
```
normaltest(tick_bars.price.pct_change().dropna())
```
## Regularizing Tick Data
### Price-Volume Chart
We will use the `price_volume` function to compare the price-volume relation for various regularization methods.
```
def price_volume(df, price='vwap', vol='vol', suptitle=title):
fig, axes = plt.subplots(nrows=2, sharex=True, figsize=(15,8))
axes[0].plot(df.index, df[price])
axes[1].bar(df.index, df[vol], width=1/(len(df.index)), color='r')
# formatting
xfmt = mpl.dates.DateFormatter('%H:%M')
axes[1].xaxis.set_major_locator(mpl.dates.HourLocator(interval=3))
axes[1].xaxis.set_major_formatter(xfmt)
axes[1].get_xaxis().set_tick_params(which='major', pad=25)
axes[0].set_title('Price', fontsize=14)
axes[1].set_title('Volume', fontsize=14)
fig.autofmt_xdate()
fig.suptitle(suptitle)
fig.tight_layout()
plt.subplots_adjust(top=0.9)
```
### Time Bars
Time bars involve trade aggregation by period.
```
def get_bar_stats(agg_trades):
vwap = agg_trades.apply(lambda x: np.average(x.price, weights=x.shares)).to_frame('vwap')
ohlc = agg_trades.price.ohlc()
vol = agg_trades.shares.sum().to_frame('vol')
txn = agg_trades.shares.size().to_frame('txn')
return pd.concat([ohlc, vwap, vol, txn], axis=1)
```
We create time bars using the `.resample()` method with the desired period.
```
resampled = trades.resample('1Min')
time_bars = get_bar_stats(resampled)
normaltest(time_bars.vwap.pct_change().dropna())
price_volume(time_bars)
```
### Bokeh Candlestick Chart
Alternative visualization using the the [bokeh](https://bokeh.pydata.org/en/latest/) library:
```
resampled = trades.resample('5Min') # 5 Min bars for better print
df = get_bar_stats(resampled)
increase = df.close > df.open
decrease = df.open > df.close
w = 2.5 * 60 * 1000 # 2.5 min in ms
WIDGETS = "pan, wheel_zoom, box_zoom, reset, save"
p = figure(x_axis_type='datetime', tools=WIDGETS, plot_width=1500, title = "AAPL Candlestick")
p.xaxis.major_label_orientation = pi/4
p.grid.grid_line_alpha=0.4
p.segment(df.index, df.high, df.index, df.low, color="black")
p.vbar(df.index[increase], w, df.open[increase], df.close[increase], fill_color="#D5E1DD", line_color="black")
p.vbar(df.index[decrease], w, df.open[decrease], df.close[decrease], fill_color="#F2583E", line_color="black")
show(p)
```

### Volume Bars
Time bars smooth some of the noise contained in the raw tick data but may fail to account for the fragmentation of orders. Execution-focused algorithmic trading may aim to match the volume weighted average price (VWAP) over a given period, and will divide a single order into multiple trades and place orders according to historical patterns. Time bars would treat the same order differently, even though no new information has arrived in the market.
Volume bars offer an alternative by aggregating trade data according to volume. We can accomplish this as follows:
```
with pd.HDFStore(order_book_store) as store:
trades = store['{}/trades'.format(stock)]
trades.price = trades.price.mul(1e-4)
trades = trades[trades.cross == 0]
trades = trades.between_time(market_open, market_close).drop('cross', axis=1)
trades.info()
trades_per_min = trades.shares.sum()/(60*7.5) # min per trading day
trades['cumul_vol'] = trades.shares.cumsum()
df = trades.reset_index()
by_vol = df.groupby(df.cumul_vol.div(trades_per_min).round().astype(int))
vol_bars = pd.concat([by_vol.timestamp.last().to_frame('timestamp'), get_bar_stats(by_vol)], axis=1)
vol_bars.head()
price_volume(vol_bars.set_index('timestamp'))
normaltest(vol_bars.vwap.dropna())
```
| github_jupyter |
<a name="top"></a>
<div style="width:1000 px">
<div style="float:right; width:98 px; height:98px;">
<img src="https://raw.githubusercontent.com/Unidata/MetPy/master/metpy/plots/_static/unidata_150x150.png" alt="Unidata Logo" style="height: 98px;">
</div>
<h1>Siphon Overview</h1>
<h3>Unidata Python Workshop</h3>
<div style="clear:both"></div>
</div>
<hr style="height:2px;">
<div style="float:right; width:250 px"><img src="https://unidata.github.io/siphon/latest/_static/siphon_150x150.png" alt="TDS" style="height: 200px;"></div>
## Overview:
* **Teaching:** 15 minutes
* **Exercises:** 15 minutes
### Questions
1. What is a THREDDS Data Server (TDS)?
1. How can I use Siphon to access a TDS?
### Objectives
1. <a href="#threddsintro">Use siphon to access a THREDDS catalog</a>
1. <a href="#filtering">Find data within the catalog that we wish to access</a>
1. <a href="#dataaccess">Use siphon to perform remote data access</a>
<a name="threddsintro"></a>
## 1. What is THREDDS?
* Server for providing remote access to datasets
* Variety of services for accesing data:
- HTTP Download
- Web Mapping/Coverage Service (WMS/WCS)
- OPeNDAP
- NetCDF Subset Service
- CDMRemote
* Provides a more uniform way to access different types/formats of data
## THREDDS Demo
http://thredds.ucar.edu
### THREDDS Catalogs
- XML descriptions of data and metadata
- Access methods
- Easily handled with `siphon.catalog.TDSCatalog`
```
from datetime import datetime, timedelta
from siphon.catalog import TDSCatalog
date = datetime.utcnow() - timedelta(days=1)
cat = TDSCatalog('http://thredds.ucar.edu/thredds/catalog/nexrad/level3/'
'N0Q/LRX/{dt:%Y%m%d}/catalog.xml'.format(dt=date))
```
<a href="#top">Top</a>
<hr style="height:2px;">
<a name="filtering"></a>
## 2. Filtering data
We *could* manually figure out what dataset we're looking for and generate that name (or index). Siphon provides some helpers to simplify this process, provided the names of the dataset follow a pattern with the timestamp in the name:
```
from datetime import datetime, timedelta
request_time = date.replace(hour=18, minute=30, second=0, microsecond=0)
ds = cat.datasets.filter_time_nearest(request_time)
ds
```
We can also find the list of datasets within a time range:
```
datasets = cat.datasets.filter_time_range(request_time, request_time + timedelta(hours=1))
print(datasets)
```
### Exercise
* Starting from http://thredds.ucar.edu/thredds/catalog/satellite/SFC-T/SUPER-NATIONAL_1km/catalog.html, find the composites for the previous day.
* Grab the URL and create a TDSCatalog instance.
* Using Siphon, find the data available in the catalog between 12Z and 18Z on the previous day.
```
# YOUR CODE GOES HERE
```
#### Solution
```
# %load solutions/datasets.py
```
<a href="#top">Top</a>
<hr style="height:2px;">
<a name="dataaccess"></a>
## 3. Accessing data
Accessing catalogs is only part of the story; Siphon is much more useful if you're trying to access/download datasets.
For instance, using our data that we just retrieved:
```
ds = datasets[0]
```
We can ask Siphon to download the file locally:
```
ds.download()
import os; os.listdir()
```
Or better yet, get a file-like object that lets us `read` from the file as if it were local:
```
fobj = ds.remote_open()
data = fobj.read()
print(len(data))
```
This is handy if you have Python code to read a particular format.
It's also possible to get access to the file through services that provide netCDF4-like access, but for the remote file. This access allows downloading information only for variables of interest, or for (index-based) subsets of that data:
```
nc = ds.remote_access()
```
By default this uses CDMRemote (if available), but it's also possible to ask for OPeNDAP (using netCDF4-python).
```
print(list(nc.variables))
```
<a href="#top">Top</a>
<hr style="height:2px;">
| github_jupyter |
# Анализ оттока клиентов в сети фитнес-клубов
Сеть фитнес-центров «Культурист-датасаентист» разрабатывает стратегию взаимодействия с клиентами на основе аналитических данных.
Распространённая проблема фитнес-клубов и других сервисов — отток клиентов.
Для фитнес-центра можно считать, что клиент попал в отток, если за последний месяц ни разу не посетил спортзал.
Необходимо провести анализ и подготовить план действий по удержанию клиентов.
Наши основные задачи:
- научиться прогнозировать вероятность оттока (на уровне следующего месяца) для каждого клиента;
- сформировать типичные портреты клиентов: выделить несколько наиболее ярких групп и охарактеризовать их основные свойства;
- проанализировать основные признаки, наиболее сильно влияющие на отток;
- сформулировать основные выводы и разработать рекомендации по повышению качества работы с клиентами:
1) выделить целевые группы клиентов;
2) предложить меры по снижению оттока;
3) определить другие особенности взаимодействия с клиентами.
Набор данных включает следующие поля:
- `Churn` — факт оттока в текущем месяце;
Текущие поля в датасете:
Данные клиента за предыдущий до проверки факта оттока месяц:
* `gender` — пол;
* `Near_Location` — проживание или работа в районе, где находится фитнес-центр;
* `Partner` — сотрудник компании-партнёра клуба (сотрудничество с компаниями, чьи сотрудники могут получать скидки на абонемент — в таком случае фитнес-центр хранит информацию о работодателе клиента);
* `Promo_friends` — факт первоначальной записи в рамках акции «приведи друга» (использовал промо-код от знакомого при оплате первого абонемента);
* `Phone` — наличие контактного телефона;
* `Age` — возраст;
* `Lifetime` — время с момента первого обращения в фитнес-центр (в месяцах).
Информация на основе журнала посещений, покупок и информация о текущем статусе абонемента клиента:
* `Contract_period` — длительность текущего действующего абонемента (месяц, 3 месяца, 6 месяцев, год);
* `Month_to_end_contract` — срок до окончания текущего действующего абонемента (в месяцах);
* `Group_visits` — факт посещения групповых занятий;
* `Avg_class_frequency_total` — средняя частота посещений в неделю за все время с начала действия абонемента;
* `Avg_class_frequency_current_month` — средняя частота посещений в неделю за предыдущий месяц;
* `Avg_additional_charges_total` — суммарная выручка от других услуг фитнес-центра: кафе, спорт-товары, косметический и массажный салон.
<h1>Содержание<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Шаг-1.-Загрузите-данные" data-toc-modified-id="Шаг-1.-Загрузите-данные-1"><span class="toc-item-num">1 </span>Шаг 1. Загрузите данные</a></span><ul class="toc-item"><li><span><a href="#Выводы" data-toc-modified-id="Выводы-1.1"><span class="toc-item-num">1.1 </span>Выводы</a></span></li></ul></li><li><span><a href="#Шаг-2.-Проведите-исследовательский-анализ-данных-(EDA)" data-toc-modified-id="Шаг-2.-Проведите-исследовательский-анализ-данных-(EDA)-2"><span class="toc-item-num">2 </span>Шаг 2. Проведите исследовательский анализ данных (EDA)</a></span><ul class="toc-item"><li><span><a href="#Выводы" data-toc-modified-id="Выводы-2.1"><span class="toc-item-num">2.1 </span>Выводы</a></span></li></ul></li><li><span><a href="#Шаг-3.-Постройте-модель-прогнозирования-оттока-клиентов" data-toc-modified-id="Шаг-3.-Постройте-модель-прогнозирования-оттока-клиентов-3"><span class="toc-item-num">3 </span>Шаг 3. Постройте модель прогнозирования оттока клиентов</a></span><ul class="toc-item"><li><span><a href="#Выводы" data-toc-modified-id="Выводы-3.1"><span class="toc-item-num">3.1 </span>Выводы</a></span></li></ul></li><li><span><a href="#Шаг-4.-Сделайте-кластеризацию-клиентов" data-toc-modified-id="Шаг-4.-Сделайте-кластеризацию-клиентов-4"><span class="toc-item-num">4 </span>Шаг 4. Сделайте кластеризацию клиентов</a></span><ul class="toc-item"><li><span><a href="#Выводы" data-toc-modified-id="Выводы-4.1"><span class="toc-item-num">4.1 </span>Выводы</a></span></li></ul></li><li><span><a href="#Шаг-5.-Сформулируйте-выводы-и-сделайте-базовые-рекомендации-по-работе-с-клиентами" data-toc-modified-id="Шаг-5.-Сформулируйте-выводы-и-сделайте-базовые-рекомендации-по-работе-с-клиентами-5"><span class="toc-item-num">5 </span>Шаг 5. Сформулируйте выводы и сделайте базовые рекомендации по работе с клиентами</a></span></li></ul></div>
## Шаг 1. Загрузите данные
```
import pandas as pd
pd.set_option('display.max_columns', None)
pd.set_option('max_colwidth', 120)
pd.set_option('display.width', 500)
import numpy as np
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import seaborn as sns
sns.set()
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from scipy.cluster.hierarchy import dendrogram, linkage
from sklearn.cluster import KMeans
from sklearn.metrics import accuracy_score, precision_score, recall_score
import warnings
warnings.simplefilter('ignore')
RANDOM_SEED = 0
df = pd.read_csv("../datasets/gym_churn.csv")
```
Выведем произвольные строки из нашей таблицы чтобы увидеть данные.
```
display(pd.concat([df.sample(5, random_state=RANDOM_SEED)]).reset_index(drop=True))
```
Переведем названия столбцов к нижнему регистру.
```
df.columns = map(str.lower, df.columns)
df.info()
```
Пропущенных данных нет, всего в таблице 4000 строк и 14 столбцов. Отсутствующих признаков не наблюдается.
Видим, что можем понизить разрядность данных чтобы оптимизировать работу кода.
```
signed_features = df.select_dtypes(include='int64').columns
float_features = df.select_dtypes(include='float64').columns
df[signed_features] = df[signed_features].apply(pd.to_numeric, downcast='signed')
df[float_features] = df[float_features].apply(pd.to_numeric, downcast='float')
df.info()
```
После обработки оптимизировали работу кода почти в 4 раза.
### Выводы
В данном блоке мы оценили размер датафрейма - всего в таблице 4000 строк и 14 столбцов. Пропущенных данных нет, отсутствующих признаков не наблюдается. Перевели названия столбцов к нижнему регистру, а также оптимизировали работу кода почти в 4 раза, понизив разрядность данных.
## Шаг 2. Проведите исследовательский анализ данных (EDA)
```
df.describe().T
```
Из таблицы видим, что наибольший разброс в данных наблюдается у показателя `avg_additional_charges_total` (стандартное отклонение 96.35), при этом среднее - 146.9 (суммарная выручка от дополнительных процедур в фитнес центре). Почти у 85% клиентов фитнес центр находится рядом с работой или домом, примерно 41% клиентов посещают групповые занятия, 31% пришли по рекомендации друзей. Средний возраст клиентов - 29 лет, но зал посещают люди от 18 до 41 года и в гендерном соотношении разделены практически одинаково. Почти половина клиентов - сотрудники компании-партнёра клуба. Факт оттока в текущем месяце
зафиксирован у 26% клиентов.
```
df.groupby('churn').mean().reset_index()
```
В текущем месяце был зафиксирован равномерный отток как мужчин, так и женщин, осталось тоже одинаковое соотношение полов. Близкая локация сыграла интересную роль, почти 76% из тех, кто прекратил посещать зал либо работают либо живут возле фитнес центра. Примерно в первый месяц люди перестают посещать зал, но при этом те, кто полны энтузиазма песещают зал в среднем 5 месяцев. Люди, посещающие зал в настоящее время в среднем тратят больше денег на дополнительные процедуры и сервисы.
**Постройте столбчатые гистограммы и распределения признаков для тех, кто ушёл (отток) и тех, кто остался (не попали в отток);**
```
WIDTH = 3
plot_amount = len(df.columns)
height = plot_amount//WIDTH + 1
fig, axs = plt.subplots(height, WIDTH, figsize=(15, 25))
fig.suptitle('Гистограммы признаков', y=1.003, size=14)
for item, ax in zip(df.columns, np.ravel(axs)):
sns.histplot(data = df, x=item, hue='churn', ax=ax, kde=True)
ax.set_title(item.capitalize().replace('_', ' '), size=12)
plt.tight_layout()
plt.show()
```
Ближе всего к нормальному распределение признака возраста посещающих фитнес центр. Причем это касается как клиентов, которые регулярно посещают фитнес центр, так и клиентов попавших в фактор оттока. Чуть больше 200 человек, у которых не был зафиксирован факт оттока воспользовались дополнительными услугами фитнес центра и принесли выручку в районе 200 у.е. с человека. По гистограмме видим, что все те, кто уходят, делают это в первые месяцы посещения зала. Чаще всего люди покупают абонемент на месяц, но при этом у данной категории клиентов наблюдается факт оттока в большей степени. Те, кто покупают абонемент на 12 месяцев, реже всего уходят в дальнейшем.
```
corr_matrix = df.corr()
plt.figure(figsize = (13, 10))
plt.title('Тепловая карта корреляционной матрицы', size = 15)
sns_plot = sns.heatmap(corr_matrix, annot=True, fmt='.2f',
linewidth=1, linecolor='black', vmax=1, center=0, cmap='ocean')
fig = sns_plot.get_figure()
plt.xlabel('Наименование признаков')
plt.ylabel('Наименование признаков')
plt.show()
```
Видим наличие корреляции между переменными `month_to_end_contract` и `contract_period`, а также между переменными `avg_class_frequency_total` и `avg_class_frequency_current_month`, что неудевительно, это взаимозависимые переменные.
### Выводы
* Наибольший разброс в данных наблюдается у показателя avg_additional_charges_total (стандартное отклонение 96.35), при этом среднее - 146.9 (суммарная выручка от дополнительных процедур в фитнес центре).
* Почти у 85% клиентов фитнес центр находится рядом с работой или домом, примерно 41% клиентов посещают групповые занятия, 31% пришли по рекомендации друзей.
* Средний возраст клиентов - 29 лет, но зал посещают люди от 18 до 41 года и в гендерном соотношении разделены практически одинаково.
* Почти половина клиентов - сотрудники компании-партнёра клуба. Факт оттока в текущем месяце зафиксирован у 26% клиентов.
* В текущем месяце был зафиксирован равномерный отток как мужчин, так и женщин, осталось тоже одинаковое соотношение полов. Близкая локация сыграла интересную роль, почти 76% из тех, кто прекратил посещать зал либо работают либо живут возле фитнес центра.
* Примерно в первый месяц люди перестают посещать зал, но при этом те, кто полны энтузиазма песещают зал в среднем 5 месяцев. Люди, посещающие зал в настоящее время в среднем тратят больше денег на дополнительные процедуры и сервисы.
* Ближе всего к нормальному распределение признака возраста посещающих фитнес центр. Причем это касается как клиентов, которые регулярно посещают фитнес центр, так и клиентов попавших в фактор оттока.
* Чуть больше 200 человек, у которых не был зафиксирован факт оттока воспользовались дополнительными услугами фитнес центра и принесли выручку в районе 200 у.е. с человека.
* Все те, кто уходят, делают это в первые месяцы посещения зала. Чаще всего люди покупают абонемент на месяц, но при этом у данной категории клиентов наблюдается факт оттока в большей степени. Те, кто покупают абонемент на 12 месяцев, реже всего уходят в дальнейшем.
* Мы зафиксировали наличие корреляции между переменными month_to_end_contract и contract_period, а также между переменными avg_class_frequency_total и avg_class_frequency_current_month, что неудевительно, это взаимозависимые переменные.
## Шаг 3. Постройте модель прогнозирования оттока клиентов
Разделим наши данные на признаки (матрица X) и целевую переменную (y).
```
X = df.drop('churn', axis=1)
y = df['churn']
```
Разделяем модель на обучающую и валидационную выборку.
```
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state = RANDOM_SEED)
# обучаем StandartScaler на обучающей выборке
scaler = StandardScaler()
scaler.fit(X_train)
#Преобразовываем обучающий и валидационные наборы данных
X_train_st = scaler.transform(X_train)
X_test_st = scaler.transform(X_test)
#Задаем алгоритм для модели логистической регрессии
lr_model = LogisticRegression(solver = 'lbfgs', random_state=RANDOM_SEED)
#Обучим модель
lr_model.fit(X_train_st, y_train)
#Воспользуемся обученной моделью чтобы сделать прогнозы
lr_predictions = lr_model.predict(X_test_st)
lr_probabilities = lr_model.predict_proba(X_test_st)[:, 1]
# зададим алгоритм для новой модели на основе алгоритма случайного леса
rf_model = RandomForestClassifier(n_estimators = 100, random_state=RANDOM_SEED)
# обучим модель случайного леса
rf_model.fit(X_train, y_train)
# воспользуемся обученной моделью, чтобы сделать прогнозы
rf_predictions = rf_model.predict(X_test)
rf_probabilities = rf_model.predict_proba(X_test)[:,1]
def print_all_metrics(y_true, y_pred, y_proba, title='Метрики классификации'):
'''
y_true - зависимая переменная валидационной выборки
y_pred - прогнозы обученной модели
y_proba - вероятности
'''
print(title)
print('\tAccuracy: {:.2f}'.format(accuracy_score(y_true, y_pred)))
print('\tPrecision: {:.2f}'.format(precision_score(y_true, y_pred)))
print('\tRecall: {:.2f}'.format(recall_score(y_true, y_pred)))
print_all_metrics(
y_test,
lr_predictions,
lr_probabilities,
title='Метрики для модели логистической регрессии:',
)
print_all_metrics(
y_test,
rf_predictions,
rf_probabilities,
title = 'Метрики для модели случайного леса:'
)
```
Метрика Accuracy одинакова в обоих моделях и равна 0.92, что является неплохим результатом - доля верно угаданных ответов из всех прогнозов. Чем ближе значение accuracy к 100%, тем лучше. Метрика Precision характеризует долю правильных ответов только среди целевого класса. В модели логистической регрессии данная метрика лучше и равна 0.85. Recall метрика показывает, сколько реальных объектов "1" класса мы смогли обнаружить с помощью модели. Для случая логистической регрессии данная метрика также лучше.
Следовательно, ***модель логистической регрессии на основании метрик показала себя лучше.***
### Выводы
Мы построили модели прогнозирования оттока клиентов: модель логистической регрессии и модель случайного леса.
Метрика Accuracy одинакова в обоих моделях и равна 0.92, что является неплохим результатом - доля верно угаданных ответов из всех прогнозов. Чем ближе значение accuracy к 100%, тем лучше. Метрика Precision характеризует долю правильных ответов только среди целевого класса. В модели логистической регрессии данная метрика лучше и равна 0.85. Recall метрика показывает, сколько реальных объектов "1" класса мы смогли обнаружить с помощью модели. Для случая логистической регрессии данная метрика также лучше.
Следовательно, модель логистической регрессии на основании метрик показала себя лучше.
## Шаг 4. Сделайте кластеризацию клиентов
```
# стандартизация данных
sc = StandardScaler()
X_sc = sc.fit_transform(X)
#Построение матрицы расстояний
linked = linkage(X_sc, method = 'ward')
plt.figure(figsize=(15, 10))
dendrogram(linked, orientation='top')
plt.title('K-Means кластеризация. Дендрограмма', size=18)
plt.show()
```
На основании полученного графика можно выделить 4 класса.
Обучим модель кластеризации на основании алгоритма K-Means и спрогнозируем кластеры клиентов. Договоримся за число кластеров принять n=5.
```
km = KMeans(n_clusters = 5, random_state=RANDOM_SEED) # задаём число кластеров, равное 5
labels = km.fit_predict(X_sc) # применяем алгоритм к данным и формируем вектор кластеров
# сохраняем метки кластера в поле нашего датасета
df['cluster_km'] = labels
# выводим статистику по средним значениям наших признаков по кластеру
df.groupby('cluster_km').mean()
```
В гендерном соотношении все кластеры имеют схожее распределение мужчин/женщин, кроме кластера 4 - у него наибольшее среднее значение 0.56. Все клиенты, принадлежащие кластеру 3 либо проживают рядом с фитнес залом/ либо работают неподалеку, напротив, клиенты, относящиеся к кластеру 0 живут далеко от фитнес центра.
Средние значения признака `partner` - сотрудник компании-партнёра клуба сильно варьируются от кластера к кластеру. Наименьшее значение у кластера 3 - 0,35, а наибольшее у кластера 0 - 0,78. Посещение зала по рекомендации друга: для данного признака также замечена сильная вариабельность от кластера к кластеру - для кластера 2, например, среднее значение равно 0.08, а для кластера 0 - аж 0.57. Среднее для признака длительность текущего абонемента наибольшее у кластера 0 - 10.88. Среднее признака групповых посещений занятий в зале наименьшее у кластера 2 - 0,22. Возраст клиентов не сильно варьируется от кластера к кластеру и везде составляет около 30 лет. Среднее срока окончания контракта наименьшее у кластера 3 - 1.8, а наибольшее у кластера 0 - почти 9.95.
```
WIDTH = 3
height = 5
fig, axs = plt.subplots(height, WIDTH, figsize=(15, 25))
fig.suptitle('Гистограммы признаков по кластерам', y=1.003, size=14)
for item, ax in zip(df.columns, np.ravel(axs)):
sns.histplot(data = df, x=item, hue='cluster_km', ax=ax, kde=True, palette='plasma', multiple='dodge')
ax.set_title(item.capitalize().replace('_', ' '), size=12)
plt.tight_layout()
plt.show()
```
* Кластер 0 характеризуется тем, что в нем сосредоточена наибольшая часть сотрудников компаний-партнеров клуба, также он характерен тем, что в нем много клиентов длительность текущего действующего абонемента которых самая большая - 12 месяцев. Клиенты, попавшие в данный кластер больше других посещают групповые занятия и срок до окончания действия контракта составляет порядка 12 месяцев в большинстве случаев. Для данного кластера доля оттока клиентов наименьшая.
* Кластер 1 характерен тем, что у всех клиентов данной группы отсутствует номер телефона и средние значения всех признаков меньше, чем у клиентов из других кластеров. И при этом в целом в данной группе наименьшее количество людей.
* Кластер 2 характеризуется тем, что в нем больше всего клиентов, у которых фитнес зал находится далеко от дома/работы. При этом в целом в группе около 500 клиентов.
* Кластер 3 характеризуется наибольшим количеством клиентов среди всех остальных кластеров, в нем у всех клиентов зал находится рядом с домом/работой. В этом кластере много людей пришло по рекомендации друзей, но также в нем у многих клиентов длительность текущего действующего абонемента месяц - 3 месяца.
* Кластер 4 характеризуется тем, что у всех клиентов фитнес клуба есть номера телефонов, при этом данная группа сильно уступает другим кластерам в значениях, при этом почти у всех клиентов в данном кластере зафиксирован факт оттока.
Для каждого полученного кластера посчитаем долю оттока.
```
df.groupby('cluster_km').agg({'churn':'mean'}).reset_index().rename(columns={'churn':'churn_rate'})
```
Наиболее перспективные кластеры - 2 и 3 кластер. Склонные к оттоку - кластеры 0 и 4.
### Выводы
На основании полученной дендрограммы мы определили, что можно выделить 4 класса.
Мы обучили модель кластеризации на основании алгоритма K-Means и спрогнозировали кластеры клиентов и получили, что:
* Кластер 0 характеризуется тем, что в нем сосредоточена наибольшая часть сотрудников компаний-партнеров клуба, также он характерен тем, что в нем много клиентов длительность текущего действующего абонемента которых самая большая - 12 месяцев. Клиенты, попавшие в данный кластер больше других посещают групповые занятия и срок до окончания действия контракта составляет порядка 12 месяцев в большинстве случаев. Для данного кластера доля оттока клиентов наименьшая.
* Кластер 1 характерен тем, что у всех клиентов данной группы отсутствует номер телефона и средние значения всех признаков меньше, чем у клиентов из других кластеров. И при этом в целом в данной группе наименьшее количество людей.
* Кластер 2 характеризуется тем, что в нем больше всего клиентов, у которых фитнес зал находится далеко от дома/работы. При этом в целом в группе около 500 клиентов.
* Кластер 3 характеризуется наибольшим количеством клиентов среди всех остальных кластеров, в нем у всех клиентов зал находится рядом с домом/работой. В этом кластере много людей пришло по рекомендации друзей, но также в нем у многих клиентов длительность текущего действующего абонемента месяц - 3 месяца.
* Кластер 4 характеризуется тем, что у всех клиентов фитнес клуба есть номера телефонов, при этом данная группа сильно уступает другим кластерам в значениях, при этом почти у всех клиентов в данном кластере зафиксирован факт оттока.
В гендерном соотношении все кластеры имеют схожее распределение мужчин/женщин, кроме кластера 4 - у него наибольшее среднее значение 0.56. Все клиенты, принадлежащие кластеру 3 либо проживают рядом с фитнес залом/ либо работают неподалеку, напротив, клиенты, относящиеся к кластеру 0 живут далеко от фитнес центра.
Средние значения признака `partner` - сотрудник компании-партнёра клуба сильно варьируются от кластера к кластеру. Наименьшее значение у кластера 3 - 0,35, а наибольшее у кластера 0 - 0,78. Посещение зала по рекомендации друга: для данного признака также замечена сильная вариабельность от кластера к кластеру - для кластера 2, например, среднее значение равно 0.08, а для кластера 0 - аж 0.57. Среднее для признака длительность текущего абонемента наибольшее у кластера 0 - 10.88. Среднее признака групповых посещений занятий в зале наименьшее у кластера 2 - 0,22. Возраст клиентов не сильно варьируется от кластера к кластеру и везде составляет около 30 лет. Среднее срока окончания контракта наименьшее у кластера 3 - 1.8, а наибольшее у кластера 0 - почти 9.95.
Наиболее перспективные кластеры - 2 и 3 кластер. Склонные к оттоку - кластеры 0 и 4.
## Шаг 5. Сформулируйте выводы и сделайте базовые рекомендации по работе с клиентами
* Почти у 85% клиентов фитнес центр находится рядом с работой или домом, примерно 41% клиентов посещают групповые занятия, 31% пришли по рекомендации друзей.
* Средний возраст клиентов - 29 лет, но зал посещают люди от 18 до 41 года и в гендерном соотношении разделены практически одинаково.
* Почти половина клиентов - сотрудники компании-партнёра клуба. Факт оттока в текущем месяце зафиксирован у 26% клиентов.
* В текущем месяце был зафиксирован равномерный отток как мужчин, так и женщин, осталось тоже одинаковое соотношение полов. Близкая локация сыграла интересную роль, почти 76% из тех, кто прекратил посещать зал либо работают либо живут возле фитнес центра.
* Примерно в первый месяц люди перестают посещать зал, но при этом те, кто полны энтузиазма песещают зал в среднем 5 месяцев. Люди, посещающие зал в настоящее время в среднем тратят больше денег на дополнительные процедуры и сервисы.
* Чуть больше 200 человек, у которых не был зафиксирован факт оттока воспользовались дополнительными услугами фитнес центра и принесли выручку в районе 200 у.е. с человека.
* Все те, кто уходят, делают это в первые месяцы посещения зала. Чаще всего люди покупают абонемент на месяц, но при этом у данной категории клиентов наблюдается факт оттока в большей степени. Те, кто покупают абонемент на 12 месяцев, реже всего уходят в дальнейшем.
* Мы зафиксировали наличие корреляции между переменными month_to_end_contract и contract_period, а также между переменными avg_class_frequency_total и avg_class_frequency_current_month, что неудевительно, это взаимозависимые переменные.
***Рекомендации для стратегии взаимодействия с клиентами и их удержания:***
* Модель логистической регрессии на основании метрик показала себя лучше, следовательно для прогнозирования оттока клиентов лучше использовать именно ее.
* Наличие в базе фитнес центра номера телефона клиента поможет избежать факта оттока, поскольку администратор, например, может иногда звонить и напоминать об преимуществах использования абонемента/ возможно предлагать какие-то акции или дополнительные процедуры.
* Поскольку люди чаще всего бросают занятия фитнесом в первый месяц, можно придумать стратегии стимуляции интереса клиента продолжать посещать зал - например, после 10 посещений занятий в фитнес центре предлагать бесплатную процедуру массажа.
* Поскольку люди чаще всего покупают абонемент на месяц и у этой части клиентов факт оттока выражен в большей степени, можно устроить акции/розыгрыши - типа покупка абонемента на 3 месяца по цене абонемента на 1 месяц.
| github_jupyter |
```
%matplotlib inline
```
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the `lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
```
print(__doc__)
# Author: Emmanuelle Gouillart <emmanuelle.gouillart@nsup.org>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x].astype(np.float64)
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2.) ** 2 + (y - l / 2.) ** 2 < (l / 2.) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return np.logical_xor(res, ndimage.binary_erosion(res))
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
```
| github_jupyter |
```
import pandas as pd
import geopandas as gpd
import seaborn as sns
import matplotlib.pyplot as plt
import husl
from legendgram import legendgram
import mapclassify
from matplotlib_scalebar.scalebar import ScaleBar
from matplotlib.colors import ListedColormap
from random import shuffle
from tqdm import tqdm
clusters = pd.read_csv('/Users/martin/Dropbox/Academia/Data/Geo/Prague/Clustering/complete data/200218_clusters_complete_n20.csv', index_col=0)
file = '/Users/martin/Dropbox/Academia/Contracts/UAP Prague/2020.01_Zakázka MF/01_data/202004_Zakazka MF_predana data/20200421_ZakazkaMF_data_validacni.gdb'
import fiona
fiona.listlayers(file)
qual = gpd.read_file(file, layer='URK_LokalityStav_p')
buildings = gpd.read_file('/Users/martin/Dropbox/Academia/Data/Geo/Prague/Clustering/geometry.gpkg', layer='buildings')
buildings['cent'] = buildings.centroid
buildings = buildings.set_geometry('cent')
buildings = buildings.to_crs(qual.crs)
joined = gpd.sjoin(buildings, qual, how='left')
joined = joined.merge(clusters, how='left', on='uID')
joined.head(2)
joined = joined.set_geometry('geometry')
```
## analyse
```
import numpy as np
def show_values_on_bars(axs):
def _show_on_single_plot(ax):
for p in ax.patches:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height() + 0.02
value = '{:.2f}'.format(p.get_height())
ax.text(_x, _y, value, ha="center")
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax)
else:
_show_on_single_plot(axs)
pal = cols
data = joined.loc[joined['cluster'].isin()]['STRUKTURA_STAV'].value_counts(sort=False, normalize=True)
sns.set(context="paper", style="ticks", rc={'patch.force_edgecolor': False})
fig, ax = plt.subplots(figsize=(10, 5))
sns.barplot(ax=ax, x=data.index, y=data, order=data.index, palette=pal)
sns.despine(offset=10)
plt.ylabel('frequency')
plt.xlabel('historical period')
plt.ylim(0, 1)
show_values_on_bars(ax)
sample = joined.loc[joined['STRUKTURA_STAV'].isin([1, 2, 5, 6, 7, 8, 9])]
data = sample.loc[sample['cluster'].isin([11])]['STRUKTURA_STAV'].value_counts(sort=False, normalize=True)
labels = ['organic', 'perimeter block', 'village', 'garden city', 'modernism', 'production', 'services']
sns.set(context="paper", style="ticks", rc={'patch.force_edgecolor': False})
fig, ax = plt.subplots(figsize=(10, 5))
sns.barplot(ax=ax, x=data.index, y=data, order=[1, 2, 5, 6, 7, 8, 9], palette=pal)
sns.despine(offset=10)
plt.ylabel('frequency')
plt.xlabel('qualitative typology')
plt.ylim(0, 1)
ax.set_xticklabels(labels)
show_values_on_bars(ax)
# save all clusters
for cl in range(20):
data = sample.loc[sample['cluster'].isin([cl])]['STRUKTURA_STAV'].value_counts(sort=False, normalize=True)
fig, ax = plt.subplots(figsize=(10, 5))
sns.barplot(ax=ax, x=data.index, y=data, order=[1, 2, 5, 6, 7, 8, 9], palette=pal)
sns.despine(offset=10)
plt.ylabel('frequency')
plt.xlabel('qualitative typology')
plt.ylim(0, 1)
ax.set_xticklabels(labels)
show_values_on_bars(ax)
for ext in ['pdf', 'png']:
plt.savefig('figures/PRG_cluster_' + str(cl) + '_structure.' + ext, bbox_inches='tight')
plt.close()
fig, ax = plt.subplots(2, 2, figsize=(14, 10))
data = sample.loc[sample['cluster'].isin([11])]['STRUKTURA_STAV'].value_counts(sort=False, normalize=True)
sns.barplot(ax=ax[0, 0], x=data.index, y=data, order=[1, 2, 5, 6, 7, 8, 9], palette=pal)
sns.despine(offset=10)
ax[0,0].set_ylabel('frequency')
ax[0,0].set_xlabel('qualitative typology')
ax[0,0].set_title('cluster 11')
ax[0,0].set_ylim(0, 1)
ax[0,0].set_xticklabels(labels)
show_values_on_bars(ax[0, 0])
data = sample.loc[sample['cluster'].isin([5])]['STRUKTURA_STAV'].value_counts(sort=False, normalize=True)
sns.barplot(ax=ax[0, 1], x=data.index, y=data, order=[1, 2, 5, 6, 7, 8, 9], palette=pal)
sns.despine(offset=10)
ax[0,1].set_ylabel('frequency')
ax[0,1].set_xlabel('qualitative typology')
ax[0,1].set_title('cluster 5')
ax[0,1].set_ylim(0, 1)
ax[0,1].set_xticklabels(labels)
show_values_on_bars(ax[0, 1])
data = sample.loc[sample['cluster'].isin([12])]['STRUKTURA_STAV'].value_counts(sort=False, normalize=True)
sns.barplot(ax=ax[1, 0], x=data.index, y=data, order=[1, 2, 5, 6, 7, 8, 9], palette=pal)
sns.despine(offset=10)
ax[1,0].set_ylabel('frequency')
ax[1,0].set_xlabel('qualitative typology')
ax[1,0].set_title('cluster 12')
ax[1,0].set_ylim(0, 1)
ax[1,0].set_xticklabels(labels)
show_values_on_bars(ax[1, 0])
data = sample.loc[sample['cluster'].isin([13])]['STRUKTURA_STAV'].value_counts(sort=False, normalize=True)
sns.barplot(ax=ax[1, 1], x=data.index, y=data, order=[1, 2, 5, 6, 7, 8, 9], palette=pal)
sns.despine(offset=10)
ax[1,1].set_ylabel('frequency')
ax[1,1].set_xlabel('qualitative typology')
ax[1,1].set_title('cluster 13')
ax[1,1].set_ylim(0, 1)
ax[1,1].set_xticklabels(labels)
show_values_on_bars(ax[1, 1])
plt.tight_layout()
plt.savefig('figures/PRG_cluster_structure_subplot.pdf')
fig, ax = plt.subplots(2, 2, figsize=(14, 10))
data = sample.loc[sample['cluster'].isin([11, 15, 5])]['STRUKTURA_STAV'].value_counts(sort=False, normalize=True)
sns.barplot(ax=ax[0, 0], x=data.index, y=data, order=[1, 2, 5, 6, 7, 8, 9], palette=pal)
sns.despine(offset=10)
ax[0,0].set_ylabel('frequency')
ax[0,0].set_xlabel('qualitative typology')
ax[0,0].set_title('compact city')
ax[0,0].set_ylim(0, 1)
ax[0,0].set_xticklabels(labels)
show_values_on_bars(ax[0, 0])
data = sample.loc[sample['cluster'].isin([3, 0, 8, 9, 13, 17])]['STRUKTURA_STAV'].value_counts(sort=False, normalize=True)
sns.barplot(ax=ax[0, 1], x=data.index, y=data, order=[1, 2, 5, 6, 7, 8, 9], palette=pal)
sns.despine(offset=10)
ax[0,1].set_ylabel('frequency')
ax[0,1].set_xlabel('qualitative typology')
ax[0,1].set_title('low-rise city')
ax[0,1].set_ylim(0, 1)
ax[0,1].set_xticklabels(labels)
show_values_on_bars(ax[0, 1])
data = sample.loc[sample['cluster'].isin([1, 19])]['STRUKTURA_STAV'].value_counts(sort=False, normalize=True)
sns.barplot(ax=ax[1, 0], x=data.index, y=data, order=[1, 2, 5, 6, 7, 8, 9], palette=pal)
sns.despine(offset=10)
ax[1,0].set_ylabel('frequency')
ax[1,0].set_xlabel('qualitative typology')
ax[1,0].set_title('industrial city')
ax[1,0].set_ylim(0, 1)
ax[1,0].set_xticklabels(labels)
show_values_on_bars(ax[1, 0])
data = sample.loc[sample['cluster'].isin([12, 14, 2, 10])]['STRUKTURA_STAV'].value_counts(sort=False, normalize=True)
sns.barplot(ax=ax[1, 1], x=data.index, y=data, order=[1, 2, 5, 6, 7, 8, 9], palette=pal)
sns.despine(offset=10)
ax[1,1].set_ylabel('frequency')
ax[1,1].set_xlabel('qualitative typology')
ax[1,1].set_title('heterogenous dense city branch')
ax[1,1].set_ylim(0, 1)
ax[1,1].set_xticklabels(labels)
show_values_on_bars(ax[1, 1])
plt.tight_layout()
plt.savefig('figures/PRG_branch_structure_subplot.pdf')
import scipy.stats as ss
import numpy as np
def cramers_v(x, y):
confusion_matrix = pd.crosstab(x,y)
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2/n
r,k = confusion_matrix.shape
phi2corr = max(0, phi2-((k-1)*(r-1))/(n-1))
rcorr = r-((r-1)**2)/(n-1)
kcorr = k-((k-1)**2)/(n-1)
return np.sqrt(phi2corr/min((kcorr-1),(rcorr-1)))
cramers_v(sample.cluster, sample.STRUKTURA_STAV)
confusion_matrix = pd.crosstab(sample.cluster, sample.STRUKTURA_STAV)
chi, p, dof, exp = ss.chi2_contingency(confusion_matrix)
p
dof
chi
```
| github_jupyter |
# Markov Chain Monte Carlo (MCMC)
GPflow allows you to approximate the posterior over the latent functions of its models (and over the hyperparameters after setting a prior for those) using Hamiltonian Monte Carlo (HMC).
```
import numpy as np
import matplotlib.pyplot as plt
import gpflow
from gpflow.test_util import notebook_niter, is_continuous_integration
import matplotlib
%matplotlib inline
matplotlib.rcParams['figure.figsize'] = (12, 6)
plt = matplotlib.pyplot
from multiclass_classification import plot_from_samples, colors
```
In this notebook, we provide three examples:
* [Sampling hyperparameters in GP regression](#example_1)
* [Sparse Variational MC for multiclass classification](#example_2)
* [Fully Bayesian inference for generalised GP models with HMC](#example_3)
<a id='example_1'></a>
## Sampling hyperparameters in GP regression
We first consider the GP regression (with Gaussian noise) for which the marginal likelihood $p(\mathbf y\,|\,\theta)$ can be computed exactly.
The GPR model parameterised by $\theta = [\tau]$ is given by
$$ Y_i = f(X_i) + \varepsilon_i$$
where $f \sim \mathcal{GP}(\mu(.), k(., .))$, and $\varepsilon \sim \mathcal{N}(0, \tau^2 I)$.
See [Basic (Gaussian likelihood) GP regression model](../basics/regression.ipynb) for more details on GPR and for a treatment of the direct likelihood maximisation.
### Data for a one-dimensional regression problem
```
N = 12
X = np.random.rand(N,1)
Y = np.sin(12*X) + 0.66*np.cos(25*X) + np.random.randn(N,1)*0.1 + 3
plt.plot(X, Y, 'kx', mew=2)
plt.xlabel('$X$')
plt.ylabel('$Y$')
plt.title('toy data')
plt.show()
```
### MCMC for hyperparameters $\theta$
We now want to sample from the posterior over $\theta$:
$$p(\theta|\mathbf{y}) \propto p(\mathbf{y}|\theta)p(\theta)$$
Firstly, we build the GPR model:
```
gpflow.reset_default_graph_and_session()
k = gpflow.kernels.Matern52(1, lengthscales=0.3)
meanf = gpflow.mean_functions.Linear(1.0, 0.0)
m = gpflow.models.GPR(X, Y, k, meanf)
m.likelihood.variance = 0.01
```
Secondly, we initialise the model to the maximum likelihood solution:
```
gpflow.train.ScipyOptimizer().minimize(m)
print('log likelihood at optimum:', m.compute_log_likelihood())
```
Thirdly, we add priors to the hyperparameters:
```
m.clear()
m.kern.lengthscales.prior = gpflow.priors.Gamma(1., 1.)
m.kern.variance.prior = gpflow.priors.Gamma(1., 1.)
m.likelihood.variance.prior = gpflow.priors.Gamma(1., 1.)
m.mean_function.A.prior = gpflow.priors.Gaussian(0., 10.)
m.mean_function.b.prior = gpflow.priors.Gaussian(0., 10.)
m.compile()
m.as_pandas_table()
```
We now sample from the posterior using HMC:
```
sampler = gpflow.train.HMC()
samples = sampler.sample(m, num_samples=gpflow.test_util.notebook_niter(500),
epsilon=0.05, lmin=10, lmax=20, logprobs=False)
```
Next we display the sampled hyperparameters:
```
plt.figure(figsize=(8,4))
for i, col in samples.iteritems():
plt.plot(col, label=col.name)
plt.legend(bbox_to_anchor=(1., 1.))
plt.xlabel('hmc iteration')
plt.ylabel('parameter value')
```
You can also inspect the marginal distribution of samples:
```
hyperparameters = ['GPR/kern/lengthscales',
'GPR/kern/variance',
'GPR/likelihood/variance',
'GPR/mean_function/A',
'GPR/mean_function/b']
fig, axarr = plt.subplots(1, len(hyperparameters), figsize=(15,3))
for i, hyp in enumerate(hyperparameters):
ax = axarr[i]
ax.hist(np.stack(samples[hyp]).reshape(-1,1),bins=20)
ax.set_title(hyp);#
plt.show()
```
**NOTE:** The sampler runs in unconstrained space (so that positive parameters remain positive, and parameters that are not trainable are ignored).
However, GPflow returns a dataframe with values in the true units.
This notebook is for illustrative purposes only; for serious analysis you would most certainly want to run the sampler for longer, with multiple chains and convergence checks.
```
f, axs = plt.subplots(1,3, figsize=(12,4))
axs[0].plot(samples['GPR/likelihood/variance'],
samples['GPR/kern/variance'], 'k.', alpha = 0.15)
axs[0].set_xlabel('noise_variance')
axs[0].set_ylabel('signal_variance')
axs[1].plot(samples['GPR/likelihood/variance'],
samples['GPR/kern/lengthscales'], 'k.', alpha = 0.15)
axs[1].set_xlabel('noise_variance')
axs[1].set_ylabel('lengthscale')
axs[2].plot(samples['GPR/kern/lengthscales'],
samples['GPR/kern/variance'], 'k.', alpha = 0.1)
axs[2].set_xlabel('lengthscale')
axs[2].set_ylabel('signal_variance')
```
To plot the posterior of predictions, we'll iterate through the samples and set the model state with each sample. Then, for that state (the set of hyperparameters) we'll draw some samples from the prediction function.
```
#plot the function posterior
xx = np.linspace(-0.1, 1.1, 100)[:,None]
plt.figure(figsize=(12, 6))
for i, s in samples.iloc[::20].iterrows():
f = m.predict_f_samples(xx, 1, initialize=False, feed_dict=m.sample_feed_dict(s))
plt.plot(xx, f[0,:,:], 'C0', lw=2, alpha=0.3)
plt.plot(X, Y, 'kx', mew=2)
_ = plt.xlim(xx.min(), xx.max())
_ = plt.ylim(0, 6)
plt.xlabel('$x$')
plt.ylabel('$f|X,Y$')
plt.title('Posterior GP samples')
plt.show()
```
<a id='example_2'></a>
## Sparse Variational MC for multiclass classification
We now consider the [multiclass classification](../advanced/multiclass_classification.ipynb) problem. Here the marginal likelihood is not available in closed form. Instead we use a sparse variational approximation where we approximate the posterior for each GP as $q(f_c) \propto p(f_c|\mathbf{u}_c)q(\mathbf{u}_c)$
In the standard Sparse Variational Gaussian Process (SVGP) formulation, $q(\mathbf{u_c})$ is parameterised as a multivariate Gaussian.
An alternative is to directly sample from the optimal $q(\mathbf{u}_c)$; this is what sparse variational GP using MCMC (SGPMC) does.
```
gpflow.reset_default_graph_and_session()
from gpflow.test_util import notebook_niter, is_continuous_integration
```
We first build a multiclass classification dataset:
```
# Number of functions and number of data points
C, N = 3, 100
# Input
X = np.random.rand(N, 1)
# RBF kernel matrix
kern = gpflow.kernels.RBF(1, lengthscales=0.1)
K = kern.compute_K_symm(X) + np.eye(N) * 1e-6
# Latents prior sample
f = np.random.multivariate_normal(mean=np.zeros(N), cov=K, size=(C)).T
# Hard max observation
Y = np.argmax(f, 1).reshape(-1,).astype(int)
# One-hot encoding
Y_hot = np.zeros((N, C), dtype=bool)
Y_hot[np.arange(N), Y] = 1
plt.figure(figsize=(12, 6))
order = np.argsort(X.reshape(-1,))
for c in range(C):
plt.plot(X[order], f[order, c], '.', color=colors[c], label=str(c))
plt.plot(X[order], Y_hot[order, c], '-', color=colors[c])
plt.legend()
plt.xlabel('$X$')
plt.ylabel('Latent (dots) and one-hot labels (lines)')
plt.title('Sample from the joint $p(Y, \mathbf{f})$')
plt.grid()
plt.show()
```
We then build the SGPMC model:
```
with gpflow.defer_build():
m = gpflow.models.SGPMC(X, Y,
kern=gpflow.kernels.Matern32(1, lengthscales=0.1) + gpflow.kernels.White(1, variance=0.01),
likelihood=gpflow.likelihoods.MultiClass(3),
Z=X[::5].copy(), num_latent=3)
m.kern.kernels[0].variance.prior = gpflow.priors.Gamma(1., 1.)
m.kern.kernels[0].lengthscales.prior = gpflow.priors.Gamma(2., 2.)
m.kern.kernels[1].variance.trainable = False
m.compile()
```
The chain of samples for $\mathbf{u}_c, \theta$ is initialised at the value maximising $p(Y|\mathbf{u}_c, \theta)$:
```
opt = gpflow.train.ScipyOptimizer()
opt.minimize(m, maxiter=notebook_niter(10))
```
Sampling starts with a 'burn in' period:
```
ci = is_continuous_integration()
burn = 0 if ci else 100
thin = 1 if ci else 10
hmc = gpflow.train.HMC()
samples = hmc.sample(m, num_samples=notebook_niter(500),
epsilon=0.04, lmax=15, logprobs=False)
```
Statistics of the posterior samples can now be reported:
```
plot_from_samples(m, samples, burn, thin)
```
You can also display the sequence of sampled hyperparameters:
```
hyperparameters = ['SGPMC/kern/kernels/0/lengthscales',
'SGPMC/kern/kernels/0/variance']
plt.figure(figsize=(8,4))
for i, col in samples[hyperparameters].iteritems():
plt.plot(col, label=col.name)
plt.legend(bbox_to_anchor=(1., 1.))
plt.xlabel('hmc iteration')
plt.ylabel('hyper-parameter value')
```
<a id='example_3'></a>
## Fully Bayesian inference for generalised GP models with HMC
You can construct very flexible models with Gaussian processes by combining them with different likelihoods (sometimes called 'families' in the Generalised Linear Model literature). This makes inference of the GP intractable because the likelihoods are not generally conjugate to the Gaussian process. The general form of the model is:
$$\theta \sim p(\theta)\\f \sim \mathcal {GP}(m(x; \theta),\, k(x, x'; \theta))\\y_i \sim p(y | g(f(x_i))\,.$$
To perform inference in this model, we'll run MCMC over the function values and the parameters $\theta$ jointly, using Hamiltonian Monte Carlo (HMC). The key to an effective scheme is rotation of the field using the Cholesky decomposition. We write:
$$\theta \sim p(\theta)\\v \sim \mathcal {N}(0,\, I)\\LL^\top = K\\f = m + Lv\\y_i \sim p(y | g(f(x_i))\,.$$
Joint HMC over $v$ and the function values is not widely adopted in the literature because of the difficulty in differentiating $LL^\top=K$. We've made this derivative available in TensorFlow, and so application of HMC is relatively straightforward.
### Exponential Regression
We consider an exponential regression model:
$$\theta \sim p(\theta)\\f \sim \mathcal {GP}(0, k(x, x'; \theta))\\f_i = f(x_i)\\y_i \sim \mathcal {Exp} (e^{f_i})$$
We'll use MCMC to deal with both the kernel parameters $\theta$ and the latent function values $f$. First, generate a dataset:
```
X = np.linspace(-3,3,20)
Y = np.random.exponential(np.sin(X)**2)
plt.figure()
plt.plot(X,Y,'x')
plt.xlabel('input $X$')
plt.ylabel('output $Y$')
plt.title('toy dataset')
plt.show()
```
GPflow's model for fully Bayesian MCMC is called GPMC. It's constructed like any other model, but contains a parameter `V` which represents the centered values of the function.
```
gpflow.reset_default_graph_and_session()
with gpflow.defer_build():
k = gpflow.kernels.Matern32(1, ARD=False) + gpflow.kernels.Bias(1)
l = gpflow.likelihoods.Exponential()
m = gpflow.models.GPMC(X[:,None], Y[:,None], k, l)
```
The `V` parameter already has a prior applied. We'll add priors to the parameters also (these are arbitrary, for illustration).
```
m.kern.kernels[0].lengthscales.prior = gpflow.priors.Gamma(1., 1.)
m.kern.kernels[0].variance.prior = gpflow.priors.Gamma(1., 1.)
m.kern.kernels[1].variance.prior = gpflow.priors.Gamma(1., 1.)
```
Running HMC is pretty similar to optimising a model. GPflow only has HMC sampling for the moment, and it's a relatively vanilla implementation; for example, No U-Turn Sampler (NUTS) is not implemented.
There are two things to tune, the step size (epsilon) and the number of steps $[L_{min}, L_{max}]$. Each proposal takes a random number of steps between $L_{min}$ and $L_{max}$, each of length $\epsilon$.
We initialise HMC at the maximum a posteriori (MAP) parameter value.
```
m.compile()
o = gpflow.train.AdamOptimizer(0.01)
o.minimize(m, maxiter=notebook_niter(15)) # start near maximum a posteriori (MAP)
```
We then run the sampler:
```
s = gpflow.train.HMC()
samples = s.sample(m, notebook_niter(500),
epsilon=0.12, lmax=20, lmin=5, thin=5, logprobs=False)#, verbose=True)
```
Then we compute the posterior prediction on a grid for plotting purposes:
```
xtest = np.linspace(-4,4,100)[:,None]
f_samples = []
for i, s in samples.iterrows():
f = m.predict_f_samples(xtest, 5, initialize=False, feed_dict=m.sample_feed_dict(s))
f_samples.append(f)
f_samples = np.vstack(f_samples)
rate_samples = np.exp(f_samples[:, :, 0])
line, = plt.plot(xtest, np.mean(rate_samples, 0), lw=2)
plt.fill_between(xtest[:,0],
np.percentile(rate_samples, 5, axis=0),
np.percentile(rate_samples, 95, axis=0),
color=line.get_color(), alpha = 0.2)
plt.plot(X, Y, 'kx', mew=2)
plt.ylim(-0.1, np.max(np.percentile(rate_samples, 95, axis=0)))
```
You can also display the sequence of sampled hyperparameters:
```
hyperparameters = ['GPMC/kern/kernels/0/variance',
'GPMC/kern/kernels/0/lengthscales',
'GPMC/kern/kernels/1/variance']
plt.figure(figsize=(8,4))
for i, col in samples[hyperparameters].iteritems():
plt.plot(col, label=col.name)
plt.legend(bbox_to_anchor=(1., 1.))
plt.xlabel('hmc iteration')
plt.ylabel('hyper-parameter value')
plt.show()
```
You can also inspect the marginal of the posterior samples:
```
fig, axarr = plt.subplots(1, len(hyperparameters), sharex=True, figsize=(12,4))
for i, hyp in enumerate(hyperparameters):
ax = axarr[i]
ax.hist(samples[hyp],bins=20)
ax.set_title(hyp);
plt.tight_layout()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/carvalheirafc/imd0033_2018_2/blob/master/aula26/Lesson_26_Measures_of_Variability.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# 1 - The Range
So far we've focused entirely on summarizing distributions using the **mean**, the **weighted mean**, the **median**, and the **mode**. An interesting distribution property we haven't yet discussed is **variability**. Consider, for instance, these two distributions of numerical values:
<img width="150" src="https://drive.google.com/uc?export=view&id=1oLZvk-JGfbK9vrxSW3vOgyRKKGhWHHVN">
The values of the distribution A don't vary — each value is 4. The values in distribution B show some variability — they are not all identical, and the values can be either 8 or 0. **If we were to quantify variability**, we could assign a value of 0 to A to indicate that it has no variability. But **what variability value should we assign to distribution B?**
We need a measure to summarize the **variability** of these two distributions. The summary metrics we've learned so far don't tell us anything about variability. The **mean**, the **median**, and the **mode** of distribution A are all 4, and distribution B has a **mean** and a **median** of 4, and **no mode**. If we were to judge variability based on these values, we'd probably have to conclude that the variabilities of the two distributions are equal, which is wrong.
One intuitive way to measure the variability of a distribution is to find the **difference between the maximum and the minimum value**. Both the maximum and the minimum of distribution A is 4, so the variability of distribution is 0:
$$
max(A) - min(A) = 4 - 4 = 0
$$
We call this measure of variability the **range**. So the range of distribution A is 0. The range of distribution B is 8:
$$
max(B) - min(B) = 8 - 0 = 8
$$
In more general terms, the range of distribution X, where X can be any distribution of real numbers, is:
$$
range(X) = max(X) - min(X)
$$
We'll continue working in this mission with the data set on house prices we used for the last three lessons. Here's a short extract from the data set to help you recollect its structure:
| | Order | PID | MS SubClass | MS Zoning | Lot Frontage | Lot Area | Street | Alley | Lot Shape | Sale Condition | SalePrice |
|-------|-----|-------------|-----------|--------------|----------|--------|-------|-----------|----------------|-----------|--------|
| 0 | 1 | 526301100 | 20 | RL | 141.0 | 131770 | Pave | NaN | WD | Normal | 215000 |
| 1 | 2 | 526350040 | 20 | RH | 80.0 | 11622 | Pave | NaN | WD | Normal | 105000 |
| 2 | 3 | 526351010 | 20 | RL | 81.0 | 14267 | Pave | NaN | WD | Normal | 172000 |
| 3 | 4 | 526353030 | 20 | RL | 93.0 | 11160 | Pave | NaN | WD | Normal | 244000 |
| 4 | 5 | 527105010 | 60 | RL | 74.0 | 13830 | Pave | NaN | WD | Normal | 189900 |
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- Write a function that takes in an array of numerical values and returns the range of that array.
- Using the function you wrote, measure the range of the **SalePrice** variable for each year of sales. You can find the year of sale in the **Yr Sold** column.
- Store the measures in a dictionary named **range_by_year**. The keys should be the individual years, and the dictionary values should be the ranges. This is how the dictionary should look like: **{2010: 598868, 2009: 575100, 2008: 601900,...}**.
- Using the measures of **variability** you got, asses the truth value of the following sentences:
- Prices had the greatest variability in 2008.
- If you consider this sentence true, assign the boolean **True** to a variable named **one**, otherwise assign **False**.
- Prices variability had a peak in 2007, then the variability started to decrease until 2010 when there was a short increase in variability compared to the previous year (2009).
- If you consider this sentence true, assign the boolean **True** to a variable named **two**, otherwise assign **False**.
```
import pandas as pd
houses = pd.read_table('AmesHousing_1.txt')
def get_range(iterable_object):
return max(iterable_object) - min(iterable_object)
years = houses['Yr Sold'].unique()
years.sort()
range_by_year = {}
for year in years:
range_by_year[year] = get_range(houses[houses['Yr Sold'] == year]['SalePrice'])
range_by_year
import matplotlib.pyplot as plt
labels = list(range_by_year.keys())
plt.bar(range(len(labels)),range_by_year.values(),tick_label=labels)
one = False
two = True
```
# 2 - The Average Distance
The problem with the **range** is that it considers only two values in the distribution — the **minimum** and the **maximum** value. Consider this distribution C:
$$
C = [1,1,1,1,1,1,1,1,1,21]
$$
We can see there's not much **variability** in distribution C - we have nine values of 1, and a single value of 21. Intuitively, we'd expect the variability of distribution C to be greater than 0 because there is some variability after all, but not much greater than 0 (remember from the last screen that a distribution whose values don't vary should ideally have a variability of 0).
Despite our expectations, the range indicates that the variability of distribution C is 20.
$$
max(C) - min(C) = 21 - 1 = 20
$$
This is signficantly greater than 0 and doesn't seem like a reasonable measure of variability for distribution C. The root of the problem is that the range considers only the two extreme values, and this makes it extremely sensitive to outliers. To get a more balanced measure of variability for distribution C, we need to take into account each value in the distribution.
To take into account each value when measuring variability we could:
1. Take a reference value, and measure the distance of each value in the distribution from that reference value.
- We can take the mean of the distribution as a reference value.
- Then, we measure the distance between each value in the distribution and the mean.
2. Find the mean of the distances.
- We first need to sum up all the distances.
- Then we need to divide the total by the number of distances.
<img width="300" src="https://drive.google.com/uc?export=view&id=1F6z138WEkF049XXla0fYXXAqV4OMjKug">
By measuring the distance of each value relative to a reference point and then taking the mean of the distances, we practically measure how much the values of a distribution vary on average with respect to that reference point.
It's also very easy to define algebraically this method for any population of values $[x_1,x_2,\ldots,x_N]$ with mean $\mu$:
<img width="500" src="https://drive.google.com/uc?export=view&id=1pPKfQKYjX_6eLDjJxgAQwNoS681AG0wj">
We'll continue discussing about this method in the next screen, but now let's use the formula above to measure the variability of distribution C.
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- Write a function that takes in a numerical array and returns the average distance (as explained above). Inside the function's defition:
- Compute the mean of the array.
- Initialize an empty list.
- Loop through the values of the array. For each iteration:
- Compute the distance between the current value and the mean. Use **value - mean** every time, as indicated by the formula.
- Append the distance to the list we initialized before the loop.
- At the end of the loop, the list should contain all the distances.
- Return the mean of the list.
- Compute the average distance for distribution C using the function you wrote, and assign the result to a variable named **avg_distance.**
- Print the result. Why do you think we got that value? (Hint: The mean is the balance point of a distribution.)
```
C = [1,1,1,1,1,1,1,1,1,21]
def average_distance(iterable):
iterable_mean = sum(iterable)/len(iterable)
distances = []
for it in iterable:
distances.append(it - iterable_mean)
return sum(distances)/len(distances)
avg_distance = average_distance(C)
avg_distance
```
# 3 -Mean Absolute Deviation
In the last exercise the average distance was 0. This is because the **mean** is the **balance point** of the distribution and, as we've learned, the total distance of the values that are above the mean is the same as the total distance of the values below the mean. The mean $\mu$ of the distribution C is 3, so we have:
<img width="400" src="https://drive.google.com/uc?export=view&id=1nZHuW_kHSzl9h8lUWx6P7rflshCkjcQm">
Plugging the distances into the formula we used in the previous screen will make the numerator amount to 0, which in turn will make the average distance 0:
$$
\text{average distance} = \frac{-18 + 18}{10} = \frac{0}{10}
$$
To solve this problem, we can take the absolute value of each distance, and then sum up the absolute values. The **absolute value** (also called **modulus**) of a number is the positive version of that number, regardless of its sign. For instance, the absolute value of -7 is +7, and the absolute value of +7 is +7. In mathematical notation we write:
$$
|-7| = +7\\
|+7| = +7
$$
We'll update the formula used previously to reflect the fact the we're summing up the absolute distances instead:
$$
\text{mean absolute distance} = \frac{|x_1 - \mu| + |x_2 - \mu| + \ldots + |x_N - \mu|}{N} = \frac{\displaystyle \sum_{i=1}^{N} |x_i - \mu|}{N}
$$
We call this measure of variability **mean absolute distance**. In statistical jargon, however, the distance of a value from the mean is called **deviation**. So the mean absolute distance is more commonly known as **mean absolute deviation** or **average absolute deviation.**
Let's take the mean absolute deviation of distribution C and see whether this metric does better than the range. Remember that the range is 20, but we expect a smaller value (which is greater than 0 at the same time).
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- Write a function that takes in a numerical array and returns the **mean absolute deviation**. Inside the function:
- Compute the **mean** of the array.
- Loop through the values of the array. For each iteration:
- Compute the absolute distance (deviation). You can use the **abs()** function.
- Append the absolute distance to a list.
- Return the mean of the list containing all the absolute distances.
- Compute the mean absolute deviation of distribution C, and assign the result to a variable named **mad**.
- Is the result considerably less than 20 but greater than 0, as we expected?
```
C = [1,1,1,1,1,1,1,1,1,21]
def get_mean_absolute_deviation(iterable):
iterable_mean = sum(iterable)/len(iterable)
absolute_dev = []
for it in iterable:
absolute_dev.append(abs(iterable_mean - it))
return sum(absolute_dev)/len(absolute_dev)
mad = get_mean_absolute_deviation(C)
mad
```
# 4- Variance
In the previous screen we transformed the distances to absolute values to avoid having the sum of distances amount to 0 in the numerator. Another way to solve this problem is to square each distance and then find the mean of all the squared distances:
$$
\text{mean squared distance} = \frac{(x_1 - \mu)^2 + (x_2 - \mu)^2 + \ldots + (x_N - \mu)^2}{N} = \frac{\displaystyle \sum_{i=1}^{N} (x_i - \mu)^2}{N}
$$
This measure of variability is sometimes called **mean squared distance** or **mean squared deviation** (remember that "distance" and "deviation" are synonymous in this context). However, it's more commonly known as **variance.**
Squaring the distances or taking their absolute values ensure that we get a variability value that is greater than 0 for all distributions that show some variability. Notice, however, that variance and mean absolute deviation will still be 0 for distributions that show no variability.
Consider distribution $D = [2,2,2]$ , which has a variance and a mean absolute deviation of 0:
<img width="500" src="https://drive.google.com/uc?export=view&id=1eWnhIv6R4izaRdpppfF2oIqw-F-fUpI3">
In the previous exercise, we got a mean absolute deviation of 3.6 for our distribution $C = [1,1,1,1,1,1,1,1,1,21]$. A value of 3.6 fitted well our expectations because we had expected a variability value greater than 0, but significantly less than 20. Let's see how well variance does with measuring the variability of distribution C.
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- Write a function that takes in a numerical array and returns the variance of that array. Inside the function:
- Compute the mean of the array.
- Loop through the values of the array. For each iteration:
- Compute the squared distance (squared deviation).
- Append the squared distance to a list.
- Return the mean of the list of squared distances.
- Compute the variance of distribution C, and assign the result to a variable named **variance_C.**
- Is the result considerably less than 20 but greater than 0, as we expected?
```
C = [1,1,1,1,1,1,1,1,1,21]
def get_variance(iterable):
iterable_mean = sum(iterable)/len(iterable)
computed_list = []
for it in iterable:
computed_list.append(pow(it - iterable_mean , 2))
return sum(computed_list)/len(computed_list)
variance_C = get_variance(C)
variance_C
```
# 5 - Standard Deviation
In the previous exercise, we got a variance of 36 for distribution $C = [1,1,1,1,1,1,1,1,1,21]$ , which was much more than we had expected. This high variability value is the direct result of the squaring process, which makes most distances much bigger than they actually are.
Squaring the distances also has the drawback of squaring the units of measurement. Let's consider this small sample from the **Bedroom AbvGr** variable (which describes the number of bedrooms in a house):
$$
[0,7,8]
$$
For computational purposes, and sometimes for simplicity, we tend to leave out the units of measurement in practice, but theoretically we should write out the units of measurement:
$$
[\text{0 bedroom}, \text{1 bedrooms}, \text{2 bedrooms}]
$$
The units of measurement are subject to algebraic operations, so the variance of the sample above will be (for formatting purposes, we'll abbreviate "bedrooms" with "b"):
<img width="400" src="https://drive.google.com/uc?export=view&id=1wyImzSVrOO4ydqCuE4nRwtwvTY5rcxOj">
The variance of this distribution is $12.\overline{6}$, which is very counterintuitive ($12.\overline{6}$ is the abbrevation for $12.6666\ldots 666\ldots$). To solve this problem and also reduce the variability value, we can take the square root of variance.
$$
\sqrt{variance} = \sqrt{12.\overline{6} \ \ bedrooms^2} = 3.6 \ \ bedrooms
$$
The square root of variance is called **standard deviation** (remember that "deviation" is synonymous with "distance"), and it can be expressed like this in an algebraic definition:
$$
\text{standard deviation} = \sqrt{\frac{(x_1 - \mu)^2 + (x_2 - \mu)^2 + \ldots + (x_N - \mu)^2}{N}} = \sqrt{\frac{\displaystyle \sum_{i=1}^{N} (x_i - \mu)^2}{N}}
$$
Notice that the standard deviation is simply the square root of variance:
$$
\sqrt{variance} = standard \ \ deviation
$$
Let's return to our distribution $C=[1,1,1,1,1,1,1,1,1,21]$, and see how well standard deviation does on measuring its variability.
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- Write a function that takes in a numerical array and returns the standard deviation of that array. Inside the function:
- Compute the mean of the array.
- Loop through the values of the array. For each iteration:
- Compute the squared distance (squared deviation).
- Append the squared distance to a list.
- Compute the mean of the list of squared distances — this is the variance.
- Return the square root of the variance.
- Compute the standard deviation of distribution C, and assign the result to a variable named **standard_deviation_C.**
- Is the result considerably less than 20 but greater than 0, as we expected?
```
from math import sqrt
C = [1,1,1,1,1,1,1,1,1,21]
def get_standard_deviation(iterable):
iterable_mean = sum(iterable)/len(iterable)
computed_list = []
for it in iterable:
computed_list.append(pow(it - iterable_mean , 2))
return sqrt(sum(computed_list)/len(computed_list))
standard_deviation_C = get_standard_deviation(C)
standard_deviation_C
```
# 6 - Average Variability Around the Mean
In practice, **standard deviation** is perhaps the most used measure of variability. Let's try to get a better understanding of it by measuring the variability of the **SalePrice** variable in our data set. We'll use the **standard_deviation()** function we wrote for the previous exercise:
```
def standard_deviation(array):
reference_point = sum(array) / len(array)
distances = []
for value in array:
squared_distance = (value - reference_point)**2
distances.append(squared_distance)
variance = sum(distances) / len(distances)
return sqrt(variance)
standard_deviation(houses['SalePrice'])
```
**Standard deviation** tells us how much the values in a distribution vary (on average) around the mean of that distribution. The mean of the SalePrice variable is approximately 180,796:
```
houses['SalePrice'].mean()
```
The **mean** tells us that the **average price** of a house is roughly 180,796, but this doesn't mean that each house (or most of them) costs exactly 180,796. One house could cost 120,000, another 240,000, and it could be that no house actually costs exactly 180,796. The **standard deviation** gives us a picture about this variability around the mean sale price. So, on average, sale prices vary by roughly 79,873 above and below a mean of 180,796.
Below, we'll try to visualize this variability around the mean by:
- Generating a histogram for the distribution of the **SalePrice** variable.
- Using vertical lines to mark the mean and the average deviations above and below the mean.
```
import matplotlib.pyplot as plt
mean = houses['SalePrice'].mean()
st_dev = standard_deviation(houses['SalePrice'])
houses['SalePrice'].plot.hist()
plt.axvline(mean, color = 'Black', label = 'Mean')
plt.axvline(mean - st_dev, color = 'Red', label = 'Below')
plt.axvline(mean + st_dev, color = 'Violet', label = 'Above')
plt.legend()
```
Notice in the histogram that prices can vary around the mean much more or much less than 79,873. Some outliers around 700,000 are more than 500,000 above the mean and a couple of houses around 30,000 are more than 150,000 below the mean. The standard deviation doesn't set boundaries for the values in a distribution: the prices can go above and below the mean more than 79,873.
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- The standard deviation of the **SalePrice** variable should give us a picture about the diversity of prices on the real estate market.
- Find the year with the greatest variability of prices and assign the answer as an integer to the variable **greatest_variability.**
- Find the year with the lowest variability of prices and assign the answer as an integer to the variable **lowest_variability.**
- Use the function you wrote in the previous screen to measure the standard deviation of each year.
- You can find information about the years of sale in the **Yr Sold** column.
- There are many ways you can solve this exercise. If you get stuck, you can check the hint or the solution code.
- tip: max(years, key = years.get), where years is a dictionary.
```
years = houses['Yr Sold'].unique()
years.sort()
range_by_year = {}
for year in years:
range_by_year[year] = get_standard_deviation(houses[houses['Yr Sold'] == year]['SalePrice'])
range_by_year
```
# 7 - A Measure of Spread
Another way to understand **standard deviation** is as a measure of spread in a distribution — values in a distribution can be more or less spread. We took four random samples of 50 sample points each from the **SalePrice** distribution, and plotted their histograms to visualize the spread for each sample:
<img width="500" src="https://drive.google.com/uc?export=view&id=1JRlXfYk9guthhb9IztYdWztEPD-Omw6s">
According to our visual estimates, sample 2 has the biggest spread, while the other three samples have a similar spread, with sample 3 seemingly having the lowest spread. The **standard deviations** of these four distributions fit our visual estimates fairly well:
```
for i in range(1,5):
sample = houses['SalePrice'].sample(50, random_state = i)
# we used the same random states for the samples in the graph above
st_dev = standard_deviation(sample)
print('Sample ' + str(i) + ': ' + str(st_dev))
```
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- We took two samples of 50 sample points each from the distribution of the **Year Built** variable. Examine the graph below, and estimate visually which sample has a bigger spread.
- Assign your answer to a variable named **bigger_spread**. If you think sample 1 has a bigger spread, assign the string **'sample 1'** to **bigger_spread**, otherwise assign **'sample 2'**.
- Sanity check your visual estimate by computing and comparing the **standard deviations** of the two samples.
- You can see the two samples already saved in the code editor.
- Assign the standard deviation of sample 1 to a variable named **st_dev1**. Compute the standard deviation using the **standard_deviation()** function.
- Assign the standard deviation of sample 2 to a variable named **st_dev2**. Compute the standard deviation using the **standard_deviation()** function.
<img width="400" src="https://drive.google.com/uc?export=view&id=1LGf0paBBpbTq9rmwjOLxm-W1VzcncTxf">
```
sample1 = houses['Year Built'].sample(50, random_state = 1)
sample2 = houses['Year Built'].sample(50, random_state = 2)
def standard_deviation(array):
reference_point = sum(array) / len(array)
distances = []
for value in array:
squared_distance = (value - reference_point)**2
distances.append(squared_distance)
variance = sum(distances) / len(distances)
return sqrt(variance)
bigger_spread = 'sample_2'
std_dev1 = standard_deviation(sample1)
std_dev2 = standard_deviation(sample2)
print(std_dev1)
print(std_dev2)
```
# 8 - The Sample Standard Deviation
In practice, **we generally work with samples**, but most of the time we're not actually interested in describing the samples. Rather, we want to use the samples to make inferences about their corresponding populations. **Let's find out whether the standard deviation of a sample is a good estimate for the standard deviation in the corresponding population**.
Remember that we defined the standard deviation (SD) as:
$$
SD = \sqrt{\frac{(x_1 - \mu)^2 + (x_2 - \mu)^2 + \ldots + (x_N - \mu)^2}{N}} = \sqrt{\frac{\displaystyle \sum_{i=1}^{N} (x_i - \mu)^2}{N}}
$$
Notice in the formula that we used the population mean $\mu$, which means that if we wanted to compute the standard deviation of a sample, we'd have to know $\mu$. In practice, $\mu$ is almost never known, and we can't find it from our sample either, but we can estimate $\mu$ using the sample mean $\overline{x}$.
We update slightly the formula for the sample standard deviation by changing to and to (remember that describes the number of data points in a population, while describes the number of data points in a sample):
$$
SD_{sample} = \sqrt{\frac{(x_1 - \overline{x})^2 + (x_2 - \overline{x})^2 + \ldots + (x_n - \overline{x})^2}{n}} = \sqrt{\frac{\displaystyle \sum_{i=1}^{n} (x_i - \overline{x})^2}{n}}
$$
Now that we have a working formula, can use it to reliably estimate the population standard deviation? One way we can check this is by sampling repeatedly a known population and see how the sample standard deviations compare on average to the population standard deviation.
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- Let's consider the data we have for **SalePrice** a population and sample it 5000 times. For each of the 5000 iterations of a for loop:
- Sample 10 data points from the **SalePrice** variable using the **Series.sample()** method.
- The **random_state** of **Series.sample()** should be 0 for the first iteration, 1 for the second iteration, 2 for the third, and so on.
- Compute the standard deviation of the sample using the **standard_deviation()** function.
- Append the standard deviation to a list that will eventually store all the 5000 sample standard deviations.
- Generate a histogram using **plt.hist()** to visualize the distribution of the 5000 sample standard deviations.
- Draw a vertical line using **plt.axvline()** to mark the population standard deviation.
- Examine the histogram and try to figure out whether most sample standard deviations cluster above or below the population standard deviation, or right at the center of it.
```
def standard_deviation(array):
reference_point = sum(array) / len(array)
distances = []
for value in array:
squared_distance = (value - reference_point)**2
distances.append(squared_distance)
variance = sum(distances) / len(distances)
return sqrt(variance)
import matplotlib.pyplot as plt
import seaborn as sns
fig, ax = plt.subplots()
standard_deviation_list = []
for it in range(5000):
sample = houses['SalePrice'].sample(n=10, random_state=it)
standard_deviation_list.append(standard_deviation(sample))
ax = sns.distplot(standard_deviation_list)
ax.axvline(x=standard_deviation(houses['SalePrice']), color='red')
```
# 9 -Bessel's Correction
In the last exercise, we plotted the histogram of 5000 sample standard deviations and compared them against the population standard deviation. Notice that most sample standard deviations are clustered below the population standard deviation:
<img width="400" src="https://drive.google.com/uc?export=view&id=1j5P6v31Q-FtsIxE8SATNBIv2iG4gmpAt">
This suggests that the sample standard deviation usually underestimates the population standard deviation. We can also see that the mean of the 5000 sample standard deviations is below the population standard deviation:
```
#st_devs - a list with all the 5000 st. deviations
sum(st_devs) / 5000
standard_deviation(houses['SalePrice'])
```
So we can say that the sample standard deviation underestimates on average the population standard deviation. Some sample standard deviations are lower than the population standard deviation, some are greater, some may even be equal to the population standard deviation, but on average the sample standard deviation is lower than the population standard deviation.
We can get a good intuition for why the sample standard deviation underestimates if we think in terms of distribution spread. When we sample a population, it's generally more likely to get a sample with a spread that's lower than the population's spread. This generally translates to a lower standard standard deviation than in the population.
<img width="600" src="https://drive.google.com/uc?export=view&id=1DcLZ_g1qhY5CHg7IGj30U8y0beDDvMIW">
Getting a sample with a higher standard deviation than in the population is possible, but this is less likely. This is mostly specific to samples with a high spread and no clusters.
<img width="300" src="https://drive.google.com/uc?export=view&id=1_KZdDxi3M2tsQDbYJbJep1qAuoSe7Mrl">
To correct the underestimation problem, we can try to slightly modify the sample standard deviation formula to return higher values. One way to do that is to decrease the value of the denominator. For instance, in $\frac{12}{6} = 2$, the denominator is 6. If we decrease the value of the denominator, we get a greater result: $\frac{12}{4}=3$.
We'll decrease by 1 the denominator in the sample standard deviation formula, which now becomes:
$$
SD_{sample} = \sqrt{\frac{(x_1 - \overline{x})^2 + (x_2 - \overline{x})^2 + \ldots + (x_n - \overline{x})^2}{n-1}} = \sqrt{\frac{\displaystyle \sum_{i=1}^{n} (x_i - \overline{x})^2}{n-1}}
$$
This small correction we added to the sample standard deviation (dividing by $n-1$ instead of $n$) is called **Bessel's correction**. Let's implement Bessel's correction to our **standard_deviation()** function and repeat the steps in the last exercise to see if Bessel's correction adds any improvements.
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- Modify the code we wrote in the previous exercise by implementing Bessel's correction, and generate the histogram again.
- If you want to challenge yourself, delete the display code and recode everything from scratch.
- Does it look like Bessel's correction added any improvement?
```
def standard_deviation(array):
reference_point = sum(array) / len(array)
distances = []
for value in array:
squared_distance = (value - reference_point)**2
distances.append(squared_distance)
variance = sum(distances) / len(distances)
return sqrt(variance)
import matplotlib.pyplot as plt
st_devs = []
for i in range(5000):
sample = houses['SalePrice'].sample(10, random_state = i)
st_dev = standard_deviation(sample)
st_devs.append(st_dev)
plt.hist(st_devs)
plt.axvline(standard_deviation(houses['SalePrice']))
```
# 10 - Standard Notation
It looks like Bessel's correction added some visible improvements and partially corrected the underestimation problem:
<img width="500" src="https://drive.google.com/uc?export=view&id=1Bxxtf1bIfOnZ1zgLghoGs4QiNzfXX3iq">
The improvement brought by Bessel's correction is more obvious when we compare the average values of the two distributions above. The mean of the 5000 sample standard deviations without Bessel's correction is roughly 71304, while the mean standard deviation of the sample standard deviations having the correction is roughly 75161. This is significantly closer to the population standard deviation, which is approximately 79887.
We could decrease the denominator more (dividing by $n-2$ maybe) to try improving the correction. However, we need a single mathematical definition for the sample standard deviation, and we have to choose between $n$, $n-1$, $n-2$, etc. Remember that in practice we don't know the population standard deviation, so we can't tell which correction would work best for each sample standard deviation.
Statisticians agree that $n-1$ is the best choice for the sample standard deviation formula, and we'll explore a strong argument in support of this in the next screen.
Now that we have know what formulae to use for samples and populations, we introduce some standard notation that will help you understand other statistics resources. The population standard deviation is denoted with the Greek letter $\sigma$ (read "sigma", or "lowercase sigma"):
$$
\sigma = \sqrt{\frac{(x_1 - \mu)^2 + (x_2 - \mu)^2 + \ldots + (x_N - \mu)^2}{N}} = \sqrt{\frac{\displaystyle \sum_{i=1}^{N} (x_i - \mu)^2}{N}}
$$
Remember that the population standard deviation $\sigma$ is just the square root of the population variance. For this reason, the population variance is written as $\sigma^2$ (such that taking the square root of the variance $\sigma^2$ results in the standard deviation $\sigma$: $\sqrt{\sigma^2}=\sigma$):
$$
\sigma^2 = \frac{(x_1 - \mu)^2 + (x_2 - \mu)^2 + \ldots + (x_N - \mu)^2}{N} = \frac{\displaystyle \sum_{i=1}^{N} (x_i - \mu)^2}{N}
$$
The sample standard deviation is simply denoted with $s$, while the sample variance is denoted with $s^2$ (also notice Bessel's correction in the denominator):
<img width="500" src="https://drive.google.com/uc?export=view&id=1RJkvnRbRHyP2KIKmfRzbGtrxNmNN0dMp">
The main takeaway is that we need to use the $s$ and $s^2$ formulae (with Bessel's correction) for samples. For populations, we can use the $\sigma$ or $\sigma^2$ formulae (without Bessel's correction).
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- We already sampled our data set and saved the sample in a variable named **sample.**
- Use the [Series.std()](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.std.html) method to compute the sample standard deviation for the **SalePrice** column. You can use the **ddof** parameter to choose between $n$ and $n-1$. Save the result to a variable named **pandas_stdev.**
- Use the [numpy.std()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.std.html) function to compute the sample standard deviation for the **SalePrice** column. You can use the **ddof** parameter to choose between $n$ and $n-1$. Save the result to a variable named **numpy_stdev.**
- Compare **pandas_stdev** with **numpy_stdev** using the == operator. Assign the result of the comparison to a variable named **equal_stdevs.**
- Use the **Series.var()** method to compute the sample variance for the **SalePrice** column. Assign the result to **pandas_var.**
- Use the **numpy.var()** function to compute the sample variance for the **SalePrice** column. Assign the result to **numpy_var.**
- Compare **pandas_var** with **numpy_var** using the == operator. Assign the result of the comparison to a variable named **equal_vars.**
```
sample = houses.sample(100, random_state = 1)
from numpy import std, var
pandas_stdev = houses["SalePrice"].std()
numpy_stdev = std(houses["SalePrice"])
equal_stdevs = pandas_stdev == numpy_stdev
print(equal_stdevs)
pandas_var = houses["SalePrice"].var()
numpy_var = var(houses["SalePrice"])
equal_vars = pandas_var == numpy_var
print(equal_vars)
```
# 11 - Sample Variance — Unbiased Estimator
In the previous screen, we stated that statisticians agree that $n-1$ is better than $n$ or $n-2$ for computing the sample standard deviation $s$. An argument supporting this comes from the fact that the sample variance $s^2$ (which uses $n-1$) is an **unbiased estimator** for the population variance $\sigma^2$. Since standard deviation is just the square root of variance, it makes sense to use $n-1$ as well (although standard deviation is not an unbiased estimator, as we'll see).
As we learned previously when we discussed the mean, we call a statistic an unbiased estimator when that statistic is equal on average to the parameter it estimates. Remember that the sample mean $\overline{x}$ is an unbiased estimator for the population mean $\mu$ no matter whether we sample with or without replacement. The sample variance $s^2$ is an unbiased estimator for the population variance $\sigma^2$ only when we sample with replacement. In the diagram below, we will:
- Take all possible samples of size $n=2$ from the population $[0,3,6]$ with $\sigma^2=6$ .
- Compute the sample variance $s^2$ for each sample.
- Take the mean of all the sample variances $s^2$ . You can see that the mean is 6, which is the same as the population variance $\sigma^2$, which shows that the sample variance $s^2$ is an unbiased estimator for the population variance $\sigma^2$.
<img width="300" src="https://drive.google.com/uc?export=view&id=1edozHcIsz32Da_2yNLH-lExQ1BKtkk7t">
Although the sample variance $s^2$ is an unbiased estimator, and the sample standard deviation $s$ is basically $\sqrt{s^2}$, the unbiasedness doesn't carry over ($\sigma$ is roughly 2.45 for the population $[0,3,6]$).
<img width="300" src="https://drive.google.com/uc?export=view&id=1mROVmvyTedV1awalmFhfNnIJbY6pS3_M">
In the exercise below, we'll see that the sample variance $s^2$ and the sample standard deviation $s$ are biased when we sample without replacement.
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- In the cell below, you can see all the possible samples of size $n=2$ for the population $[0,3,6]$ when we sample without replacement.
- Compute the sample variance and sample standard deviation for each sample.
- Take the mean of all the sample variances. Compare the mean variance with the population variance (which you'll have to compute) using the == operator, and assign the result to a variable **equal_var.**
- If the sample variance is biased in this case, the result should be **False**.
- Take the mean of all the sample standard deviations. Compare the mean standard deviation with the population standard deviation using the == operator, and assign the result to **equal_stdev.**
- If the sample variance is biased in this case, the result should be **False.**
```
population = [0, 3, 6]
samples = [[0,3], [0,6],
[3,0], [3,6],
[6,0], [6,3]
]
variances = []
standard_deviation = []
for sample in samples:
variances.append(var(sample))
standard_deviation.append(std(sample))
mean_variance = np.mean(variances)
equal_var = mean_variance == np.mean(population)
print(equal_var)
mean_standard_deviation = np.mean(standard_deviation)
equal_stdev = mean_standard_deviation == std(population)
print(equal_stdev)
population = [0, 3, 6]
samples = [[0,3], [0,6],
[3,0], [3,6],
[6,0], [6,3]
]
from numpy import var, std
pop_var = var(population, ddof = 0)
pop_std = std(population, ddof = 0)
st_devs = []
variances = []
for sample in samples:
st_devs.append(std(sample, ddof = 1))
variances.append(var(sample, ddof = 1))
mean_std = sum(st_devs) / len(st_devs)
mean_var = sum(variances) / len(variances)
equal_stdev = pop_std == mean_std
equal_var = pop_var == mean_var
equal_stdev
```
# 12 - Next Steps
In this lesson, we learned how to measure the **variability** of a distribution using the **range**, the **mean absolute deviation**, the **variance**, and the **standard deviation**. These metrics are ideal for measuring the variability of distributions whose values are measured on an interval or ratio scale.
Measuring variability for ordinal and nominal data is much harder because we can't quantify the differences between values. For this reason, little is written in the literature about measuring variability for ordinal and nominal data. If you want to dig more into this, you can start by reading [this paper](https://www.tandfonline.com/doi/full/10.1080/10691898.2007.11889465).
Next in this course, we'll build on what we know about the mean and the standard deviation and learn about **z-scores.**
| github_jupyter |
```
# EOReader Imports
import os
import xarray as xr
from eoreader.reader import Reader
from eoreader.products import SensorType
from eoreader.bands.alias import *
from sertit import display
reader = Reader()
# Create logger
import logging
from sertit import logs
logs.init_logger(logging.getLogger("eoreader"))
# Set a DEM
from eoreader.env_vars import DEM_PATH
os.environ[DEM_PATH] = os.path.join("/home", "data", "DS2", "BASES_DE_DONNEES", "GLOBAL", "COPDEM_30m",
"COPDEM_30m.vrt")
# Paths
stack_folder = os.path.join("/home", "data", "DS3", "CI", "eoreader", "others")
opt_path = os.path.join(stack_folder, "20200310T030415_WV02_Ortho_BGRN_STK.tif")
sar_path = os.path.join(stack_folder, "20210827T162210_ICEYE_SC_GRD_STK.tif")
# Optical minimum example
opt_prod = reader.open(opt_path,
custom=True,
sensor_type="OPTICAL", # With a string
band_map={BLUE: 1, GREEN: 2, RED: 3, NIR: 4, SWIR_1: 5})
opt_stack = opt_prod.stack([BLUE, GREEN, RED])
xr.plot.imshow(opt_stack.copy(data=display.scale(opt_stack.data)))
opt_stack
# SAR minimum example
sar_prod = reader.open(sar_path,
custom=True,
sensor_type=SensorType.SAR, # With the Enum
band_map={VV: 1, VV_DSPK: 2})
sar_stack = sar_prod.stack([SLOPE, VV, VV_DSPK])
xr.plot.imshow(sar_stack.copy(data=display.scale(sar_stack.data)))
sar_stack
# You can compute the footprint and the extent
base = opt_prod.extent.plot(color='cyan', edgecolor='black')
opt_prod.footprint.plot(ax=base, color='blue', edgecolor='black', alpha=0.5)
base = sar_prod.extent.plot(color='cyan', edgecolor='black')
sar_prod.footprint.plot(ax=base, color='blue', edgecolor='black', alpha=0.5)
# Optical
opt_prod = reader.open(
opt_path,
custom=True,
name="20200310T030415_WV02_Ortho",
acquisition_datetime="20200310T030415",
sensor_type=SensorType.OPTICAL,
platform="WV02",
product_type="Ortho",
default_resolution=2.0,
sun_azimuth=10.0,
sun_zenith=20.0,
band_map={BLUE: 1, GREEN: 2, RED: 3, NIR: 4, SWIR_1: 5},
)
hillshade = opt_prod.load(HILLSHADE)[HILLSHADE]
hillshade.plot()
hillshade
# SAR
sar_prod = reader.open(
sar_path,
custom=True,
sensor_type=SensorType.SAR,
name="20210827T162210_ICEYE_SC_GRD",
acquisition_datetime="20210827T162210",
platform="ICEYE",
product_type="GRD",
default_resolution=6.0,
band_map={VV: 1, VV_DSPK: 2},
)
vv = sar_prod.load(VV)[VV]
vv
```
| github_jupyter |
# Neural networks with PyTorch
Deep learning networks tend to be massive with dozens or hundreds of layers, that's where the term "deep" comes from. You can build one of these deep networks using only weight matrices as we did in the previous notebook, but in general it's very cumbersome and difficult to implement. PyTorch has a nice module `nn` that provides a nice way to efficiently build large neural networks.
```
# Import necessary packages
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import torch
import helper
import matplotlib.pyplot as plt
```
Now we're going to build a larger network that can solve a (formerly) difficult problem, identifying text in an image. Here we'll use the MNIST dataset which consists of greyscale handwritten digits. Each image is 28x28 pixels, you can see a sample below
<img src='assets/mnist.png'>
Our goal is to build a neural network that can take one of these images and predict the digit in the image.
First up, we need to get our dataset. This is provided through the `torchvision` package. The code below will download the MNIST dataset, then create training and test datasets for us. Don't worry too much about the details here, you'll learn more about this later.
```
### Run this cell
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
])
# Download and load the training data
trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
```
We have the training data loaded into `trainloader` and we make that an iterator with `iter(trainloader)`. Later, we'll use this to loop through the dataset for training, like
```python
for image, label in trainloader:
## do things with images and labels
```
You'll notice I created the `trainloader` with a batch size of 64, and `shuffle=True`. The batch size is the number of images we get in one iteration from the data loader and pass through our network, often called a *batch*. And `shuffle=True` tells it to shuffle the dataset every time we start going through the data loader again. But here I'm just grabbing the first batch so we can check out the data. We can see below that `images` is just a tensor with size `(64, 1, 28, 28)`. So, 64 images per batch, 1 color channel, and 28x28 images.
```
dataiter = iter(trainloader)
images, labels = dataiter.next()
print(type(images))
print(images.shape)
print(labels.shape)
```
This is what one of the images looks like.
```
plt.imshow(images[1].numpy().squeeze(), cmap='Greys_r');
```
First, let's try to build a simple network for this dataset using weight matrices and matrix multiplications. Then, we'll see how to do it using PyTorch's `nn` module which provides a much more convenient and powerful method for defining network architectures.
The networks you've seen so far are called *fully-connected* or *dense* networks. Each unit in one layer is connected to each unit in the next layer. In fully-connected networks, the input to each layer must be a one-dimensional vector (which can be stacked into a 2D tensor as a batch of multiple examples). However, our images are 28x28 2D tensors, so we need to convert them into 1D vectors. Thinking about sizes, we need to convert the batch of images with shape `(64, 1, 28, 28)` to a have a shape of `(64, 784)`, 784 is 28 times 28. This is typically called *flattening*, we flattened the 2D images into 1D vectors.
Previously you built a network with one output unit. Here we need 10 output units, one for each digit. We want our network to predict the digit shown in an image, so what we'll do is calculate probabilities that the image is of any one digit or class. This ends up being a discrete probability distribution over the classes (digits) that tells us the most likely class for the image. That means we need 10 output units for the 10 classes (digits). We'll see how to convert the network output into a probability distribution next.
> **Exercise:** Flatten the batch of images `images`. Then build a multi-layer network with 784 input units, 256 hidden units, and 10 output units using random tensors for the weights and biases. For now, use a sigmoid activation for the hidden layer. Leave the output layer without an activation, we'll add one that gives us a probability distribution next.
```
## Your solution
def sigmoid(x):
return 1/ (1 + torch.exp(-x))
# Flatten the batch of images images
batch_size = images.shape[0]
inputs = images.view((batch_size,28*28))
# Then build a multi-layer network with 784 input units, 256 hidden units, and 10 output units
#using random tensors for the weights and biases
input_units = 28*28
hidden_units = 256
output_units = 10
wi = torch.randn(input_units,hidden_units)
bi = torch.randn(hidden_units)
wh = torch.randn(hidden_units,output_units)
bh = torch.randn(output_units)
# For now, use a sigmoid activation for the hidden layer.
h = sigmoid(torch.mm(inputs, wi)+ bi)
out = torch.mm(h,wh) + bh
print(out.shape)
```
Now we have 10 outputs for our network. We want to pass in an image to our network and get out a probability distribution over the classes that tells us the likely class(es) the image belongs to. Something that looks like this:
<img src='assets/image_distribution.png' width=500px>
Here we see that the probability for each class is roughly the same. This is representing an untrained network, it hasn't seen any data yet so it just returns a uniform distribution with equal probabilities for each class.
To calculate this probability distribution, we often use the [**softmax** function](https://en.wikipedia.org/wiki/Softmax_function). Mathematically this looks like
$$
\Large \sigma(x_i) = \cfrac{e^{x_i}}{\sum_k^K{e^{x_k}}}
$$
What this does is squish each input $x_i$ between 0 and 1 and normalizes the values to give you a proper probability distribution where the probabilites sum up to one.
> **Exercise:** Implement a function `softmax` that performs the softmax calculation and returns probability distributions for each example in the batch. Note that you'll need to pay attention to the shapes when doing this. If you have a tensor `a` with shape `(64, 10)` and a tensor `b` with shape `(64,)`, doing `a/b` will give you an error because PyTorch will try to do the division across the columns (called broadcasting) but you'll get a size mismatch. The way to think about this is for each of the 64 examples, you only want to divide by one value, the sum in the denominator. So you need `b` to have a shape of `(64, 1)`. This way PyTorch will divide the 10 values in each row of `a` by the one value in each row of `b`. Pay attention to how you take the sum as well. You'll need to define the `dim` keyword in `torch.sum`. Setting `dim=0` takes the sum across the rows while `dim=1` takes the sum across the columns.
```
def softmax(x):
e_x = torch.exp(x)
return e_x / torch.sum(e_x, dim=1).view(-1,1)
# Here, out should be the output of the network in the previous excercise with shape (64,10)
probabilities = softmax(out)
# Does it have the right shape? Should be (64, 10)
print(probabilities.shape)
# Does it sum to 1?
print(probabilities.sum(dim=1))
```
## Building networks with PyTorch
PyTorch provides a module `nn` that makes building networks much simpler. Here I'll show you how to build the same one as above with 784 inputs, 256 hidden units, 10 output units and a softmax output.
```
from torch import nn
class Network(nn.Module):
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
self.hidden = nn.Linear(784, 256)
# Output layer, 10 units - one for each digit
self.output = nn.Linear(256, 10)
# Define sigmoid activation and softmax output
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
# Pass the input tensor through each of our operations
x = self.hidden(x)
x = self.sigmoid(x)
x = self.output(x)
x = self.softmax(x)
return x
```
Let's go through this bit by bit.
```python
class Network(nn.Module):
```
Here we're inheriting from `nn.Module`. Combined with `super().__init__()` this creates a class that tracks the architecture and provides a lot of useful methods and attributes. It is mandatory to inherit from `nn.Module` when you're creating a class for your network. The name of the class itself can be anything.
```python
self.hidden = nn.Linear(784, 256)
```
This line creates a module for a linear transformation, $x\mathbf{W} + b$, with 784 inputs and 256 outputs and assigns it to `self.hidden`. The module automatically creates the weight and bias tensors which we'll use in the `forward` method. You can access the weight and bias tensors once the network (`net`) is created with `net.hidden.weight` and `net.hidden.bias`.
```python
self.output = nn.Linear(256, 10)
```
Similarly, this creates another linear transformation with 256 inputs and 10 outputs.
```python
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=1)
```
Here I defined operations for the sigmoid activation and softmax output. Setting `dim=1` in `nn.Softmax(dim=1)` calculates softmax across the columns.
```python
def forward(self, x):
```
PyTorch networks created with `nn.Module` must have a `forward` method defined. It takes in a tensor `x` and passes it through the operations you defined in the `__init__` method.
```python
x = self.hidden(x)
x = self.sigmoid(x)
x = self.output(x)
x = self.softmax(x)
```
Here the input tensor `x` is passed through each operation a reassigned to `x`. We can see that the input tensor goes through the hidden layer, then a sigmoid function, then the output layer, and finally the softmax function. It doesn't matter what you name the variables here, as long as the inputs and outputs of the operations match the network architecture you want to build. The order in which you define things in the `__init__` method doesn't matter, but you'll need to sequence the operations correctly in the `forward` method.
Now we can create a `Network` object.
```
# Create the network and look at it's text representation
model = Network()
model
```
You can define the network somewhat more concisely and clearly using the `torch.nn.functional` module. This is the most common way you'll see networks defined as many operations are simple element-wise functions. We normally import this module as `F`, `import torch.nn.functional as F`.
```
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
self.hidden = nn.Linear(784, 256)
# Output layer, 10 units - one for each digit
self.output = nn.Linear(256, 10)
def forward(self, x):
# Hidden layer with sigmoid activation
x = F.sigmoid(self.hidden(x))
# Output layer with softmax activation
x = F.softmax(self.output(x), dim=1)
return x
```
### Activation functions
So far we've only been looking at the softmax activation, but in general any function can be used as an activation function. The only requirement is that for a network to approximate a non-linear function, the activation functions must be non-linear. Here are a few more examples of common activation functions: Tanh (hyperbolic tangent), and ReLU (rectified linear unit).
<img src="assets/activation.png" width=700px>
In practice, the ReLU function is used almost exclusively as the activation function for hidden layers.
### Your Turn to Build a Network
<img src="assets/mlp_mnist.png" width=600px>
> **Exercise:** Create a network with 784 input units, a hidden layer with 128 units and a ReLU activation, then a hidden layer with 64 units and a ReLU activation, and finally an output layer with a softmax activation as shown above. You can use a ReLU activation with the `nn.ReLU` module or `F.relu` function.
```
## Your solution here
from torch import nn
import torch.nn.functional as F
class ReLUNetwork(nn.Module):
def __init__(self):
super().__init__()
self.hidden1 = nn.Linear(784,128)
self.hidden2 = nn.Linear(128,64)
self.output = nn.Linear(64,10)
def forward(self, x):
x = F.relu(self.hidden1(x))
x = F.relu(self.hidden2(x))
x = F.softmax(self.output(x), dim=1)
return x
model = ReLUNetwork()
model
```
### Initializing weights and biases
The weights and such are automatically initialized for you, but it's possible to customize how they are initialized. The weights and biases are tensors attached to the layer you defined, you can get them with `model.fc1.weight` for instance.
```
print(model.hidden1.weight)
print(model.hidden1.bias)
```
For custom initialization, we want to modify these tensors in place. These are actually autograd *Variables*, so we need to get back the actual tensors with `model.fc1.weight.data`. Once we have the tensors, we can fill them with zeros (for biases) or random normal values.
```
# Set biases to all zeros
model.hidden1.bias.data.fill_(0)
# sample from random normal with standard dev = 0.01
model.hidden1.weight.data.normal_(std=0.01)
```
### Forward pass
Now that we have a network, let's see what happens when we pass in an image.
```
# Grab some data
dataiter = iter(trainloader)
images, labels = dataiter.next()
# Resize images into a 1D vector, new shape is (batch size, color channels, image pixels)
images.resize_(64, 1, 784)
# or images.resize_(images.shape[0], 1, 784) to automatically get batch size
# Forward pass through the network
img_idx = 0
ps = model.forward(images[img_idx,:])
img = images[img_idx]
helper.view_classify(img.view(1, 28, 28), ps)
```
As you can see above, our network has basically no idea what this digit is. It's because we haven't trained it yet, all the weights are random!
### Using `nn.Sequential`
PyTorch provides a convenient way to build networks like this where a tensor is passed sequentially through operations, `nn.Sequential` ([documentation](https://pytorch.org/docs/master/nn.html#torch.nn.Sequential)). Using this to build the equivalent network:
```
# Hyperparameters for our network
input_size = 784
hidden_sizes = [128, 64]
output_size = 10
# Build a feed-forward network
model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),
nn.ReLU(),
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
nn.ReLU(),
nn.Linear(hidden_sizes[1], output_size),
nn.Softmax(dim=1))
print(model)
# Forward pass through the network and display output
images, labels = next(iter(trainloader))
images.resize_(images.shape[0], 1, 784)
ps = model.forward(images[0,:])
helper.view_classify(images[0].view(1, 28, 28), ps)
```
Here our model is the same as before: 784 input units, a hidden layer with 128 units, ReLU activation, 64 unit hidden layer, another ReLU, then the output layer with 10 units, and the softmax output.
The operations are availble by passing in the appropriate index. For example, if you want to get first Linear operation and look at the weights, you'd use `model[0]`.
```
print(model[0])
model[0].weight
```
You can also pass in an `OrderedDict` to name the individual layers and operations, instead of using incremental integers. Note that dictionary keys must be unique, so _each operation must have a different name_.
```
from collections import OrderedDict
model = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_size, hidden_sizes[0])),
('relu1', nn.ReLU()),
('fc2', nn.Linear(hidden_sizes[0], hidden_sizes[1])),
('relu2', nn.ReLU()),
('output', nn.Linear(hidden_sizes[1], output_size)),
('softmax', nn.Softmax(dim=1))]))
model
```
Now you can access layers either by integer or the name
```
print(model[0])
print(model.fc1)
```
In the next notebook, we'll see how we can train a neural network to accuractly predict the numbers appearing in the MNIST images.
| github_jupyter |
# Graph Coloring with QAOA using PyQuil and Grove
We are going to color a graph using the near-term algorithm QAOA. The canonical example of QAOA was to solve a MaxCut problem, but graph coloring can be seen as a generalization of MaxCut, which is really graph coloring with only k = 2 colors
## Sample problem: Graph with n = 4 nodes and e = 5 edges, k = 3 colors
First let's make some imports:
```
# pyquil and grove imports
from grove.pyqaoa.qaoa import QAOA
from pyquil.api import QVMConnection, get_qc, WavefunctionSimulator
from pyquil.paulis import PauliTerm, PauliSum
from pyquil import Program
from pyquil.gates import CZ, H, RY, CNOT, X
# useful additional packages
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
```
### Generate a graph
Un-colored graph with 4 nodes, 5 edges, 3 colors:
```
# generate graph, specify nodes and edges
G = nx.Graph()
edges = [(0, 3), (3, 6), (6, 9), (3, 9), (0, 9)]
nodes = [0, 3, 6, 9]
G.add_nodes_from(nodes)
G.add_edges_from(edges)
# Let's draw this thing
colors = ['beige' for node in G.nodes()]
pos = nx.spring_layout(G)
default_axes = plt.axes(frameon=True)
nx.draw_networkx(G, node_color=colors, node_size=600, alpha=.8, ax=default_axes, pos=pos)
```
### Hamiltonians
To use QAOA, we need to consider two Hamiltonians:
* the Hamiltonian that best describes the cost function of our problem (cost Hamiltonian)
* and the Hamiltonian whose eigenstates span the solution space (mixer Hamiltonian)
Luckily for us, the cost Hamiltonian for graph coloring is the same as that for MaxCut:
$$H_{cost} = \sum_{i, j} \frac{1}{2}(\mathbb{1} - \sigma^z_i \sigma^z_j)$$
The mixer Hamiltonian must span the solution space, i.e. only those states that make any physical sense. If we allow $k=3$ qubits per node, and accept only W-states as solutions (100, 010, 001), then we can use the following mixer Hamiltonian:
$$H_{mixer} = \sum_{v, c, c'}\sigma^x_{v, c} \sigma^x_{v,c'} + \sigma^y_{v, c} \sigma^y_{v, c'}$$
Let's just create these Hamiltonians:
```
# define hamiltonians
def graph_coloring_cost_ham(graph, colors):
cost_operators = []
for k in range(len(colors)):
for i, j in graph.edges():
cost_operators.append(PauliTerm("Z", i + k, 0.5)*PauliTerm("Z", j + k) + PauliTerm("I", 0, -0.5))
return cost_operators
def graph_coloring_mixer_ham(graph, colors):
mixer_operators = []
for k in range(0, len(graph.nodes())*len(colors), len(colors)):
for i, j in colors:
mixer_operators.append(PauliTerm("X", i + k, -1.0)*PauliTerm("X", j + k) + PauliTerm("Y", i + k)*PauliTerm("Y", j + k, -1.0))
return mixer_operators
# above, note we've switched the sign of the Hamiltonians from those in the above equations
# this is because we use a classical minimizer, but we are actually trying to maximize the cost function
# instantiate mixer and cost
k = 3 # number of colors
colors = []
import itertools
for u, v in itertools.combinations(list(range(k)), 2):
colors.append((u, v))
cost = graph_coloring_cost_ham(G, colors)
mixer = graph_coloring_mixer_ham(G, colors)
print('Mixer Hamiltonian: ∑ XX + YY')
for operator in mixer:
print(operator)
print('\n')
print('Cost Hamiltonian: ∑ 1/2(I - ZZ)')
for operator in cost:
print(operator)
```
### Initial state
We must feed an inital reference state to QAOA that we will evolve to the ground state of the cost Hamiltonian. This initial state should ideally span the solution space, i.e. all physically relevant states. For our purposes, these would be the W-States.
First let's make functions that can create W-States:
```
# Define a F_gate
def F_gate(prog, i, j, n, k):
theta = np.arccos(np.sqrt(1/(n-k+1)))
prog += [RY(-theta, j),
CZ(i, j),
RY(theta, j)]
# Generate W-states
def wstategenerate(prog, q0, q1, q2):
prog += X(q2)
F_gate(prog, q2, q1, 3, 1)
F_gate(prog, q1, q0, 3, 2)
prog += CNOT(q1, q2)
prog += CNOT(q0, q1)
return prog
```
Now let's initialize W-states to feed our QAOA for the above graph:
```
# initialize state
initial_state = wstategenerate(Program(), 0, 1, 2) + wstategenerate(Program(), 3, 4, 5) + wstategenerate(Program(), 6, 7, 8) + wstategenerate(Program(), 9, 10, 11)
```
Quick test to make sure we are actually making W-states...
```
# qvm instantiation to run W-state generation
qvm_connection = QVMConnection()
# makes it easier to count up results
from collections import Counter
# get results with their counts
tests = qvm_connection.run_and_measure(initial_state, [9, 10, 11], trials=1000)
tests = [tuple(test) for test in tests]
tests_counter_tuples = Counter(tests)
most_common = tests_counter_tuples.most_common()
tests_counter = {}
for element in most_common:
result = element[0]
total = element[1]
result_string = ''
for bit in result:
result_string += str(bit)
tests_counter[result_string] = total
tests_counter
# import for histogram plotting
from qiskit.tools.visualization import plot_histogram
# plot the results with their counts
plot_histogram(tests_counter)
```
We only see the results 001, 010, and 100, so we're good!
### Use QAOA with specified parameters
Now let's instantiate QAOA with the specified cost, mixer, and number of steps:
```
# number of Trotterized steps for QAOA (I recommend two)
p = 2
# set initial beta and gamma angle values (you could try others, I find these work well)
initial_beta = [0, np.pi]
initial_gamma = [0, np.pi*2]
# arguments for the classical optimizer
minimizer_kwargs = {'method': 'Nelder-Mead',
'options': {'ftol': 1.0e-2, 'xtol': 1.0e-2,
'disp': False}}
# list of qubit ids on instantiated qvm we'll be using
num_qubits = len(colors)*len(G.nodes())
qubit_ids = list(range(num_qubits))
# instantiation of QAOA with requisite parameters
QAOA_inst = QAOA(qvm_connection, qubit_ids,
steps=p,
cost_ham=cost,
ref_ham=mixer,
driver_ref=initial_state,
init_betas=initial_beta,
init_gammas=initial_gamma,
minimizer_kwargs=minimizer_kwargs)
```
Solve for betas and gammas. All of the optimization happens here:
```
betas, gammas = QAOA_inst.get_angles()
print("Values of betas:", betas)
print("Values of gammas:", gammas)
print("And the most common measurement is... ")
most_common_result, _ = QAOA_inst.get_string(betas, gammas)
print(most_common_result)
```
### Reconstruct Program
Now that we've used QAOA to solve for the optimal beta and gamma values, we can reconstruct the ground state solution by initializing a new `Program()` object with these values
```
angles = np.hstack((betas, gammas))
# We take a template for quil program
param_prog = QAOA_inst.get_parameterized_program()
# We initialize this program with the angles we have found
prog = param_prog(angles)
```
### Run and Measure Program
Now that we've reconstructed the program with the proper angles, we can run and measure this program on the QVM many times to get statistics on the outcome
```
# Here we connect to the Forest API and run our program there.
# We do that 10000 times and after each one we measure the output.
measurements = qvm_connection.run_and_measure(prog, qubit_ids, trials=10000)
```
Just reformatting results into a dictionary...
```
# This is just a hack - we can't use Counter on a list of lists but we can on a list of tuples.
measurements = [tuple(measurement) for measurement in measurements]
measurements_counter = Counter(measurements)
# This line gives us the results in the diminishing order
most_common = measurements_counter.most_common()
most_common
measurements_counter = {}
for element in most_common:
result = element[0]
total = element[1]
result_string = ''
for bit in result:
result_string += str(bit)
measurements_counter[result_string] = total
measurements_counter
```
And now reformat bit strings into colors...
```
# Reformat these bit strings into colors
# Choose which state refers to red ('r'), blue ('b') or green ('g')
colors_totals = {}
for bitstring, total in measurements_counter.items():
node_0 = bitstring[0:3]
node_1 = bitstring[3:6]
node_2 = bitstring[6:9]
node_3 = bitstring[9:12]
nodes_list = [node_0, node_1, node_2, node_3]
node_colors_string = ''
for node in nodes_list:
if node == '100':
node = 'r'
elif node == '010':
node = 'b'
elif node == '001':
node = 'g'
else:
raise Exception('Invalid!')
node_colors_string += node
colors_totals[node_colors_string] = total
print(colors_totals)
```
### Visualize results
First let's plot the results as a histogram. There are tons of possible solutions ($k^n = 3^4 = 81$), but we should expect that 6 of them occur most often, so we're looking for 6 larger peaks. This is because for this particular graph and number of colors, there are 6 colorings that maximize the cost function.
```
plot_histogram(colors_totals, figsize=(25, 15))
```
Finally, let's color the graph using these solutions. The colors and their totals have already been ordered from most results to least, so we should expect that the first 6 (i.e. 0-5) colorings maximize the number of non-adjacent colors on the graph
```
# make graph
Graph = nx.Graph()
edges = [(0, 1), (1, 2), (2, 3), (3, 0), (1, 3)]
nodes = range(4)
Graph.add_nodes_from(nodes)
Graph.add_edges_from(edges)
# Let's draw this thing
# can increment the index at the end to get the max value and the next max totals
# i.e. try [0], [1], ... , [5]
colors = list(colors_totals.keys())[0]
# draw colored graph
pos = nx.spring_layout(Graph)
default_axes = plt.axes(frameon=True)
nx.draw_networkx(Graph, node_color=colors, node_size=600, alpha=.8, ax=default_axes, pos=pos)
```
<br>
| github_jupyter |
```
import pandas as pd
```
## Load in the "rosetta stone" file
I made this file using QGIS, the open-source mapping software. I loaded in the US Census 2010 block-level shapefile for Cook and DuPage counties in IL and the Chicago police boundaries shapefile [from here](https://data.cityofchicago.org/Public-Safety/Boundaries-Police-Districts-current-/fthy-xz3r). I then used the block centroids, provided by the census, to colect them within each zone. Since the centroids, by nature, are a "half a block" from the nearest street, this is more reliable than a polygon-in-polygon calculation. I then inspected the map visually for outliers.
I'll write up my steps for that soonest.
```
rosetta_df = pd.read_csv('../data/chicago/initial_rosetta.csv')
rosetta_df
```
## Adding in blocks that also fall into district 16
There were several blocks that didn't fall neatly into police districts out toward O'Hare airport when I did my initial mapping. So I exported those block lists from the mapping software and am adding them to the collection here.
```
dupage16_df = pd.read_csv('../data/chicago/to_16_dupage.csv')
cook16_df = pd.read_csv('../data/chicago/to_16.csv')
both_df = pd.concat([dupage16_df,cook16_df])
both_df
both_small = pd.DataFrame(both_df['GEOID10'])
both_small
both_small['dist_num'] = 16
both_small
rosetta_df2 = pd.concat([rosetta_df, both_small])
rosetta_df2.shape
```
## Make some fixes
```
170310814031007
=> 1
to_16.csv => 16
170318104003050
northern half is in 31st
southern half is in 16th
but the southern half seems to be mostly commercial. So putting it in 31st
=> 31
170438400002041
This block is in dupage county south of O’Hare
The southern half of hangs outside the 16th District … but that part of the block is a rail yard. So leaving it all in 16.
```
```
# let's see the current status of 170310814031007
rosetta_df2.loc[rosetta_df2['GEOID10'] == 170310814031007]
# adding a row
quick_row = pd.DataFrame([[170310814031007, 1]], columns=['GEOID10', 'dist_num'])
quick_row
rosetta_df3 = pd.concat([rosetta_df2,quick_row ])
rosetta_df3.shape
# let's see the current status of 170318104003050
rosetta_df3.loc[rosetta_df3['GEOID10'] == 170318104003050]
to_change = [170318104003050]
for item in to_change:
rosetta_df3.loc[rosetta_df3['GEOID10'] == item, ['dist_num']] = 31
rosetta_df3.loc[rosetta_df3['GEOID10'] == 170318104003050]
# let's see the current status of 170438400002041
rosetta_df3.loc[rosetta_df3['GEOID10'] == 170438400002041]
```
Leeaving that at 16
```
rosetta_df3.to_csv('../data/chicago/chicago_2010blocks_2020policedistricts_key.csv', index=False)
```
## Load in the population data
I downloaded the population files from [census.data.gov](https://census.data.gov).
Here are the [P3 and P5 census table files for Cook County](https://s3.amazonaws.com/media.johnkeefe.net/census-by-precinct/17031_Cook_County.zip). And here is the ["productDownload_2020-06-07T173132" zip file](https://s3.amazonaws.com/media.johnkeefe.net/census-by-precinct/productDownload_2020-06-07T173132.zip). It's a little messy, and the census doesn't label the files well, but I'm providing them as I got them. The CSVs you need are in there! Adjust your paths accordingly.
```
# census P3 for cook county by block
cook_df_p3 = pd.read_csv('/Volumes/JK_Smarts_Data/precinct_project/IL/17031_Cook_County/DECENNIALSF12010.P3_2020-06-07T150142/DECENNIALSF12010.P3_data_with_overlays_2020-06-07T150129.csv')
cook_df_p3.reset_index()
cook_df_p3.drop(0, inplace=True)
# census P3 for cook county by block
cook_df_p5 = pd.read_csv('/Volumes/JK_Smarts_Data/precinct_project/IL/17031_Cook_County/DECENNIALSF12010.P5_2020-06-07T145711/DECENNIALSF12010.P5_data_with_overlays_2020-06-07T145658.csv', low_memory=False)
cook_df_p5.reset_index()
cook_df_p5.drop(0, inplace=True)
cook_df_p3.shape, cook_df_p5.shape
cook_df = cook_df_p3.merge(cook_df_p5, on='GEO_ID')
cook_df.shape
cook_df
```
See note a few cells up about where to get thse files.
```
dupage_p3 = pd.read_csv('/Volumes/JK_Smarts_Data/precinct_project/IL/DuPage County/productDownload_2020-06-07T173132/DECENNIALSF12010.P3_data_with_overlays_2020-06-07T173122.csv')
dupage_p3.reset_index()
dupage_p3.drop(0, inplace=True)
dupage_p5 = pd.read_csv('/Volumes/JK_Smarts_Data/precinct_project/IL/DuPage County/productDownload_2020-06-07T173132/DECENNIALSF12010.P5_data_with_overlays_2020-06-07T173122.csv')
dupage_p5.reset_index()
dupage_p5.drop(0, inplace=True)
dupage_p3.shape,dupage_p5.shape
dupage_df = dupage_p3.merge(dupage_p5, on="GEO_ID")
chicago_df = pd.concat([cook_df,dupage_df])
chicago_df.shape
chicago_df.columns
chicago_df['GEOID10'] = chicago_df['GEO_ID'].str[9:].astype(int)
chicago_df.drop(columns=['NAME_y'], inplace = True)
chicago_df.columns
chicago_df.columns
rosetta_df3.shape
rosetta_df3.dtypes
chicago_df.dtypes
## Add demographic data to each chicago PD district block
block_data = rosetta_df3.merge(chicago_df, on="GEOID10", how="left")
block_data.shape
block_data
block_data.to_csv('./temp_data/chicago_2010blocks_2020policedistricts_population.csv', index=False)
# need to make all those columns numeric
block_data[['P003001', 'P003002', 'P003003', 'P003004',
'P003005', 'P003006', 'P003007', 'P003008', 'P005001', 'P005002',
'P005003', 'P005004', 'P005005', 'P005006', 'P005007', 'P005008',
'P005009', 'P005010', 'P005011', 'P005012', 'P005013', 'P005014',
'P005015', 'P005016', 'P005017']] = block_data[['P003001', 'P003002', 'P003003', 'P003004',
'P003005', 'P003006', 'P003007', 'P003008', 'P005001', 'P005002',
'P005003', 'P005004', 'P005005', 'P005006', 'P005007', 'P005008',
'P005009', 'P005010', 'P005011', 'P005012', 'P005013', 'P005014',
'P005015', 'P005016', 'P005017']].apply(pd.to_numeric)
## Check for duplicates
block_data.duplicated(subset='GEOID10', keep='first').sum()
import numpy as np
pivot = pd.pivot_table(block_data, index="dist_num", aggfunc=np.sum)
pivot
pivot.reset_index(inplace=True)
pivot.drop(columns=['GEOID10'], inplace=True)
pivot
pivot.to_csv('../data/chicago/chicago_2010pop_by_2020policedistricts.csv', index=False)
pivot['P003001'].sum()
```
Done!
| github_jupyter |
```
from typing import Union, Optional, Dict
from pathlib import Path
import json
import pandas as pd
from collections import defaultdict
def read_file(
data_filepath: Union[str, Path],
site: str,
network: str,
inlet: Optional[str] = None,
instrument: Optional[str] = "shinyei",
sampling_period: Optional[str] = None,
measurement_type: Optional[str] = None,
) -> Dict:
"""Read BEACO2N data files
Args:
filepath: Data filepath
site: Site name
Returns:
dict: Dictionary of data
"""
import pandas as pd
from numpy import nan as np_nan
# from openghg.util import load_json
from collections import defaultdict
# from openghg.util import clean_string
if sampling_period is None:
sampling_period = "NOT_SET"
datetime_columns = {"time": ["datetime"]}
rename_cols = {
"PM_ug/m3": "pm",
"PM_ug/m3_QC_level": "pm_qc",
"co2_ppm": "co2",
"co2_ppm_QC_level": "co2_qc",
"co_ppm": "co",
"co_ppm_QC_level": "co_qc",
}
use_cols = [1, 5, 6, 7, 8, 9, 10]
data_filepath = Path(data_filepath)
try:
data = pd.read_csv(
data_filepath,
index_col="time",
parse_dates=datetime_columns,
na_values=[-999.0, "1a"],
usecols=use_cols,
)
except ValueError as e:
raise ValueError(
f"Unable to read data file, please make sure it is in the standard BEACO2N format.\nError: {e}"
)
# beaco2n_site_data = load_json("beaco2n_site_data.json")
beaco2n_site_data = json.loads(Path("/Users/gar/Documents/Devel/openghg/openghg/data/beaco2n_site_data.json").read_text())
try:
site_metadata = beaco2n_site_data[site.upper()]
except KeyError:
raise ValueError(f"Site {site} not recognized.")
site_metadata["comment"] = "Retrieved from http://beacon.berkeley.edu/"
# Set all values below zero to NaN
data[data < 0] = np_nan
data = data.rename(columns=rename_cols)
measurement_types = ["pm", "co2"]
units = {"pm": "ug/m3", "co2": "ppm"}
gas_data: DefaultDict[str, Dict[str, Union[DataFrame, Dict]]] = defaultdict(dict)
for mt in measurement_types:
m_data = data[[mt, f"{mt}_qc"]]
# m_data = m_data.dropna(axis="rows", how="any")
species_metadata = {
"units": units[mt],
"site": str(site),
"species": str(mt),
"inlet": "NA",
"network": "beaco2n",
"sampling_period": str(sampling_period),
}
gas_data[mt]["data"] = m_data
gas_data[mt]["metadata"] = species_metadata
gas_data[mt]["attributes"] = site_metadata
# TODO - add CF Compliant attributes?
return gas_data
data_path = "/home/gar/Documents/Devel/RSE/web-scrape/beaco2n/data/174_HILLPARKSECONDARYSCHOOL.csv"
data = read_file(data_filepath=data_path, site="HILLPARKSECONDARYSCHOOL", network="BEACO2N", inlet="50m")
data
data_path = Path(data_path)
data_path = filepath = "/home/gar/Documents/Devel/RSE/web-scrape/beaco2n/data/156_KILLEARNSTIRLINGSHIREGLASGOWS22002.csv"
datetime_columns = {"time": ["datetime"]}
rename_cols = {
"PM_ug/m3": "pm",
"PM_ug/m3_QC_level": "pm_qc",
"co2_ppm": "co2",
"co2_ppm_QC_level": "co2_qc",
"co_ppm": "co",
"co_ppm_QC_level": "co_qc",
}
use_cols = [1, 5, 6, 7, 8, 9, 10]
na_values = [-999.0]
data = pd.read_csv(
data_path,
index_col="time",
usecols=use_cols,
parse_dates=datetime_columns,
na_values=[-999.0],
)
data = data.rename(columns=rename_cols)
measurement_types = ["pm", "co", "co2"]
# Set all values below zero to NaN
data.columns
data = data.dropna(axis=0, subset=measurement_types)
# data = data.to_xarray()
data
units = {"pm": "ug/m3", "co2": "ppm", "co": "ppm"}
gas_data = defaultdict(dict)
for mt in measurement_types:
m_data = data[[mt, f"{mt}_qc"]]
m_data = m_data.to_xarray()
species_metadata = {
"units": units[mt],
"site": "s",
"species": "spec",
"inlet": "inlet",
"network": "beaco2n",
"sampling_period": str(1),
}
gas_data[mt] = m_data
# TODO - add CF Compliant attributes?
gas_data
data.groupby("time.year")
list(data.groupby("time.year"))
```
| github_jupyter |
```
!pip install --no-index ../input/global-wheels/numpy-1.20.0-cp37-cp37m-manylinux2010_x86_64.whl --find-links=../input/numpyv3
!pip install --no-index ../input/global-wheels/natsort-7.1.1-py3-none-any.whl --find-links=../input/natsort
!pip install --no-index ../input/global-wheels/fastremap-1.11.1-cp37-cp37m-manylinux1_x86_64.whl --find-links=../input/fastremap
!pip install --no-index ../input/global-wheels/edt-2.0.2-cp37-cp37m-manylinux2010_x86_64.whl --find-links=../input/edtpackage
!pip install --no-index ../input/global-wheels/pytorch_ranger-0.1.1-py3-none-any.whl --find-links=../input/pytorchranger
!pip install --no-index ../input/global-wheels/torch_optimizer-0.1.0-py3-none-any.whl --find-links=../input/torchoptimzier
!pip install --no-index ../input/global-wheels/cellpose-0.7.2-py3-none-any.whl --find-links=../input/cellposelibrary
# !rm -rf ./cell
import shutil
import os
cell_dir_path = './cell'
if os.path.exists(cell_dir_path):
shutil.rmtree(cell_dir_path)
shutil.copytree("../input/somu-data-prep-for-cellpose-2-tif-w-1-chan-masks/cell_dataset", "./cell")
!python -m cellpose \
--train \
--use_gpu \
--dir "./cell/train" \
--test_dir "./cell/test" \
--n_epochs 30 \
--learning_rate 0.02 \
--pretrained_model cyto2torch_3
import cellpose
model_file = "/kaggle/working/cell/train/models/" + os.listdir("/kaggle/working/cell/train/models/")[0]
model_file
! cp $model_file ./
def rle_encode(img):
pixels = img.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
test_dir = "../input/sartorius-cell-instance-segmentation/test/"
test_img_dirs = [test_dir + i for i in os.listdir(test_dir)]
test_imgs = []
for i in test_img_dirs:
img = cv2.imread(i,cv2.IMREAD_COLOR)
test_imgs.append(img)
model = models.CellposeModel(gpu=True, pretrained_model=model_file, torch=True, diam_mean=30.0, net_avg=True, device=None, residual_on=True, style_on=True, concatenation=False)
masks_all = []
styles_all = []
flows_all = []
for img in test_imgs:
chan = [0,0] # for black and white imgs
#img = io.imread(filename)
masks, flows, styles = model.eval(img, diameter=60, channels=chan)
masks_all.append(masks)
flows_all.append(flows)
styles_all.append(styles)
# DISPLAY RESULTS
fig = plt.figure(figsize=(12,5))
plot.show_segmentation(fig, img, masks, flows[0], channels=chan)
plt.tight_layout()
plt.show()
#model = models.Cellpose(gpu=False, model_type='cyto')
#model = models.Cellpose(gpu=True, model_type='cyto')
```
| github_jupyter |
```
# from https://en.wikipedia.org/wiki/Inflation
document_text = """
In economics, inflation (or less frequently, price inflation) is a general rise in the price level of an economy
over a period of time.[1][2][3][4] When the general price level rises, each unit of currency buys fewer goods and
services; consequently, inflation reflects a reduction in the purchasing power per unit of money – a loss of real
value in the medium of exchange and unit of account within the economy.[5][6] The opposite of inflation is
deflation, a sustained decrease in the general price level of goods and services. The common measure of inflation
is the inflation rate, the annualised percentage change in a general price index, usually the consumer price
index, over time.[7]
Economists believe that very high rates of inflation and hyperinflation are harmful, and are caused by excessive
growth of the money supply.[8] Views on which factors determine low to moderate rates of inflation are more
varied. Low or moderate inflation may be attributed to fluctuations in real demand for goods and services, or
changes in available supplies such as during scarcities.[9] However, the consensus view is that a long sustained
period of inflation is caused by money supply growing faster than the rate of economic growth.[10][11]
Inflation affects economies in various positive and negative ways. The negative effects of inflation include an
increase in the opportunity cost of holding money, uncertainty over future inflation which may discourage
investment and savings, and if inflation were rapid enough, shortages of goods as consumers begin hoarding out
of concern that prices will increase in the future. Positive effects include reducing unemployment due to nominal
wage rigidity,[12] allowing the central bank greater freedom in carrying out monetary policy, encouraging loans
and investment instead of money hoarding, and avoiding the inefficiencies associated with deflation.
Today, most economists favour a low and steady rate of inflation.[13] Low (as opposed to zero or negative)
inflation reduces the severity of economic recessions by enabling the labor market to adjust more quickly in a
downturn, and reduces the risk that a liquidity trap prevents monetary policy from stabilising the economy.[14]
The task of keeping the rate of inflation low and stable is usually given to monetary authorities. Generally,
these monetary authorities are the central banks that control monetary policy through the setting of interest
rates, through open market operations, and through the setting of banking reserve requirements.[15]
"""
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
X_train = vectorizer.fit_transform([document_text])
X_train
vectorizer.get_feature_names()
# look at
## 'transmission',
## 'transmissions
## 'transmit'
# lemmatized words
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
# lemmatized words
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
print(stemmer.stem("transmission"))
print(stemmer.stem("transmissions"))
print(stemmer.stem("transmit"))
print(lemmatizer.lemmatize("transmission"))
print(lemmatizer.lemmatize("transmissions"))
print(lemmatizer.lemmatize("transmit"))
lemma_text = ' '.join(lemmatizer.lemmatize(w) for w in document_text.split())
lemma_text
stem_text = ' '.join(stemmer.stem(w) for w in document_text.split())
stem_text
stem_vectorizer = CountVectorizer()
stem_vectorizer.fit_transform([stem_text])
stem_vectorizer.get_feature_names()
# generate for lemmatized as well
# only alpha
import re
regex = re.compile('[^a-zA-Z]')
alpha_text = regex.sub(' ', document_text)
alpha_text = ' '.join(alpha_text.split())
alpha_text
# remove stop words
# remove stop words
from nltk.corpus import stopwords
stop = stopwords.words('english')
'or' in stop
nostop_text = ' '.join(word.lower() for word in alpha_text.lower().split() if word not in stop)
print(nostop_text)
# what happens if you use the original document_text. does it catch all the stop words?
# generate a stemmed, alpha, no stop word list
stem_text = ' '.join(stemmer.stem(w) for w in nostop_text.split())
stem_text
lemma_text = ' '.join(lemmatizer.lemmatize(w) for w in nostop_text.split())
lemma_text
stem_vectorizer = CountVectorizer()
stem_vectorizer = CountVectorizer()
stem_vectorizer.fit_transform([stem_text])
stem_vectorizer.get_feature_names()
```
| github_jupyter |
# Control and audit data exploration activities with Amazon SageMaker Studio and AWS Lake Formation
This notebook accompanies the blog post "Control and audit data exploration activities with Amazon SageMaker Studio and AWS Lake Formation". The notebook demonstrates how to use SageMaker Studio along with Lake Formation to provide granular access to a data lake for different data scientists. The queries used in this notebook are based on the [Amazon Customer Reviews Dataset](https://registry.opendata.aws/amazon-reviews/), which should be registered in an existing data lake before running this code.
To compare data permissions across users, you should run the same notebook using different SageMaker user profiles.
### Prerequisites
This implementation uses Amazon Athena and the [PyAthena](https://pypi.org/project/PyAthena/) client to query data on a data lake registered with AWS Lake Formation. We will also use Pandas to run queries and store the results as Dataframes.
First we install PyAthena and import the required libraries.
```
!pip install pyathena
from pyathena import connect
import pandas as pd
import boto3
```
The AWS Account ID and AWS Region will be used to create an S3 bucket where Athena will save query output files. The AWS Region will also be passed as parameter when connecting to our data lake through Athena using PyAthena.
```
sts = boto3.client("sts")
account_id = sts.get_caller_identity()["Account"]
region = boto3.session.Session().region_name
query_result_bucket_name = "sagemaker-audit-control-query-results-{}-{}".format(region, account_id)
```
### Create S3 bucket for query output files - SKIP THIS SECTION FOR THE SECOND DATA SCIENTIST USER
```
query_result_bucket = {}
if region == "us-east-1":
s3 = boto3.client("s3")
query_result_bucket = s3.create_bucket(
Bucket = query_result_bucket_name,
)
else:
s3 = boto3.client("s3", region_name=region)
query_result_bucket = s3.create_bucket(
Bucket = query_result_bucket_name,
CreateBucketConfiguration = {
"LocationConstraint": region
}
)
```
### Run queries using Amazon Athena and PyAthena
Once the prerequisites are configured, we can start running queries on the data lake through Athena using the PyAthena client.
First we create a connection to Athena using PyAthena's `connect` constructor. We will pass this object as a parameter when we run queries with Pandas `read_sql` method.
```
conn = connect(s3_staging_dir ="s3://{}/queries/".format(query_result_bucket_name), region_name=region)
```
Our first query will list all the databases to which this user has been granted access in the data lake.
```
db_name_df = pd.read_sql("SHOW DATABASES", conn)
db_name = db_name_df.iloc[0][0]
print(db_name)
```
Our second query will list all the tables in the previous database to which this user has been granted access.
```
tables_df = pd.read_sql("SHOW TABLES IN {}".format(db_name), conn)
table_name = tables_df.iloc[0][0]
print(table_name)
```
Finally we run a `SELECT` query to see all columns in the previous table to which this user has been granted access. If you have full permissions for the table, the `SELECT` query output will include the following columns:
- marketplace
- customer_id
- review_id
- product_id
- product_parent
- product_title
- star_rating
- helpful_votes
- total_votes
- vine
- verified_purchase
- review_headline
- review_body
- review_date
- year
- product_category
```
df = pd.read_sql("SELECT * FROM {}.{} LIMIT 10".format(db_name, table_name), conn)
df.head(10)
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# The Keras functional API in TensorFlow
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/keras/functional"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/keras/functional.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/keras/functional.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/keras/functional.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
## Setup
```
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
tf.keras.backend.clear_session() # For easy reset of notebook state.
```
## Introduction
You're already familiar with the use of `keras.Sequential()` to create models.
The Functional API is a way to create models that is more flexible than `Sequential`:
it can handle models with non-linear topology, models with shared layers,
and models with multiple inputs or outputs.
It's based on the idea that a deep learning model
is usually a directed acyclic graph (DAG) of layers.
The Functional API a set of tools for **building graphs of layers**.
Consider the following model:
```
(input: 784-dimensional vectors)
↧
[Dense (64 units, relu activation)]
↧
[Dense (64 units, relu activation)]
↧
[Dense (10 units, softmax activation)]
↧
(output: logits of a probability distribution over 10 classes)
```
It's a simple graph of 3 layers.
To build this model with the functional API,
you would start by creating an input node:
```
from tensorflow import keras
inputs = keras.Input(shape=(784,))
```
Here we just specify the shape of our data: 784-dimensional vectors.
Note that the batch size is always omitted, we only specify the shape of each sample.
For an input meant for images of shape `(32, 32, 3)`, we would have used:
```
# Just for demonstration purposes
img_inputs = keras.Input(shape=(32, 32, 3))
```
What gets returned, `inputs`, contains information about the shape and dtype of the
input data that you expect to feed to your model:
```
inputs.shape
inputs.dtype
```
You create a new node in the graph of layers by calling a layer on this `inputs` object:
```
from tensorflow.keras import layers
dense = layers.Dense(64, activation='relu')
x = dense(inputs)
```
The "layer call" action is like drawing an arrow from "inputs" to this layer we created.
We're "passing" the inputs to the `dense` layer, and out we get `x`.
Let's add a few more layers to our graph of layers:
```
x = layers.Dense(64, activation='relu')(x)
outputs = layers.Dense(10)(x)
```
At this point, we can create a `Model` by specifying its inputs and outputs in the graph of layers:
```
model = keras.Model(inputs=inputs, outputs=outputs)
```
To recap, here is our full model definition process:
```
inputs = keras.Input(shape=(784,), name='img')
x = layers.Dense(64, activation='relu')(inputs)
x = layers.Dense(64, activation='relu')(x)
outputs = layers.Dense(10)(x)
model = keras.Model(inputs=inputs, outputs=outputs, name='mnist_model')
```
Let's check out what the model summary looks like:
```
model.summary()
```
We can also plot the model as a graph:
```
keras.utils.plot_model(model, 'my_first_model.png')
```
And optionally display the input and output shapes of each layer in the plotted graph:
```
keras.utils.plot_model(model, 'my_first_model_with_shape_info.png', show_shapes=True)
```
This figure and the code we wrote are virtually identical. In the code version,
the connection arrows are simply replaced by the call operation.
A "graph of layers" is a very intuitive mental image for a deep learning model,
and the functional API is a way to create models that closely mirrors this mental image.
## Training, evaluation, and inference
Training, evaluation, and inference work exactly in the same way for models built
using the Functional API as for Sequential models.
Here is a quick demonstration.
Here we load MNIST image data, reshape it into vectors,
fit the model on the data (while monitoring performance on a validation split),
and finally we evaluate our model on the test data:
```
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784).astype('float32') / 255
x_test = x_test.reshape(10000, 784).astype('float32') / 255
model.compile(loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.RMSprop(),
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=64,
epochs=5,
validation_split=0.2)
test_scores = model.evaluate(x_test, y_test, verbose=2)
print('Test loss:', test_scores[0])
print('Test accuracy:', test_scores[1])
```
For a complete guide about model training and evaluation, see [guide to training and evaluation](./train_and_evaluate.ipynb).
## Saving and serialization
Saving and serialization work exactly in the same way for models built
using the Functional API as for Sequential models.
To standard way to save a Functional model is to call `model.save()` to save the whole model into a single file.
You can later recreate the same model from this file, even if you no longer have access to the code
that created the model.
This file includes:
- The model's architecture
- The model's weight values (which were learned during training)
- The model's training config (what you passed to `compile`), if any
- The optimizer and its state, if any (this enables you to restart training where you left off)
```
model.save('path_to_my_model')
del model
# Recreate the exact same model purely from the file:
model = keras.models.load_model('path_to_my_model')
```
For a complete guide about model saving, see [Guide to Saving and Serializing Models](./save_and_serialize.ipynb).
## Using the same graph of layers to define multiple models
In the functional API, models are created by specifying their inputs
and outputs in a graph of layers. That means that a single graph of layers
can be used to generate multiple models.
In the example below, we use the same stack of layers to instantiate two models:
an `encoder` model that turns image inputs into 16-dimensional vectors,
and an end-to-end `autoencoder` model for training.
```
encoder_input = keras.Input(shape=(28, 28, 1), name='img')
x = layers.Conv2D(16, 3, activation='relu')(encoder_input)
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.Conv2D(16, 3, activation='relu')(x)
encoder_output = layers.GlobalMaxPooling2D()(x)
encoder = keras.Model(encoder_input, encoder_output, name='encoder')
encoder.summary()
x = layers.Reshape((4, 4, 1))(encoder_output)
x = layers.Conv2DTranspose(16, 3, activation='relu')(x)
x = layers.Conv2DTranspose(32, 3, activation='relu')(x)
x = layers.UpSampling2D(3)(x)
x = layers.Conv2DTranspose(16, 3, activation='relu')(x)
decoder_output = layers.Conv2DTranspose(1, 3, activation='relu')(x)
autoencoder = keras.Model(encoder_input, decoder_output, name='autoencoder')
autoencoder.summary()
```
Note that we make the decoding architecture strictly symmetrical to the encoding architecture,
so that we get an output shape that is the same as the input shape `(28, 28, 1)`.
The reverse of a `Conv2D` layer is a `Conv2DTranspose` layer, and the reverse of a `MaxPooling2D`
layer is an `UpSampling2D` layer.
## All models are callable, just like layers
You can treat any model as if it were a layer, by calling it on an `Input` or on the output of another layer.
Note that by calling a model you aren't just reusing the architecture of the model, you're also reusing its weights.
Let's see this in action. Here's a different take on the autoencoder example that creates an encoder model, a decoder model,
and chain them in two calls to obtain the autoencoder model:
```
encoder_input = keras.Input(shape=(28, 28, 1), name='original_img')
x = layers.Conv2D(16, 3, activation='relu')(encoder_input)
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.Conv2D(16, 3, activation='relu')(x)
encoder_output = layers.GlobalMaxPooling2D()(x)
encoder = keras.Model(encoder_input, encoder_output, name='encoder')
encoder.summary()
decoder_input = keras.Input(shape=(16,), name='encoded_img')
x = layers.Reshape((4, 4, 1))(decoder_input)
x = layers.Conv2DTranspose(16, 3, activation='relu')(x)
x = layers.Conv2DTranspose(32, 3, activation='relu')(x)
x = layers.UpSampling2D(3)(x)
x = layers.Conv2DTranspose(16, 3, activation='relu')(x)
decoder_output = layers.Conv2DTranspose(1, 3, activation='relu')(x)
decoder = keras.Model(decoder_input, decoder_output, name='decoder')
decoder.summary()
autoencoder_input = keras.Input(shape=(28, 28, 1), name='img')
encoded_img = encoder(autoencoder_input)
decoded_img = decoder(encoded_img)
autoencoder = keras.Model(autoencoder_input, decoded_img, name='autoencoder')
autoencoder.summary()
```
As you can see, model can be nested: a model can contain submodels (since a model is just like a layer).
A common use case for model nesting is *ensembling*.
As an example, here's how to ensemble a set of models into a single model that averages their predictions:
```
def get_model():
inputs = keras.Input(shape=(128,))
outputs = layers.Dense(1)(inputs)
return keras.Model(inputs, outputs)
model1 = get_model()
model2 = get_model()
model3 = get_model()
inputs = keras.Input(shape=(128,))
y1 = model1(inputs)
y2 = model2(inputs)
y3 = model3(inputs)
outputs = layers.average([y1, y2, y3])
ensemble_model = keras.Model(inputs=inputs, outputs=outputs)
```
## Manipulating complex graph topologies
### Models with multiple inputs and outputs
The functional API makes it easy to manipulate multiple inputs and outputs.
This cannot be handled with the Sequential API.
Here's a simple example.
Let's say you're building a system for ranking custom issue tickets by priority and routing them to the right department.
You model will have 3 inputs:
- Title of the ticket (text input)
- Text body of the ticket (text input)
- Any tags added by the user (categorical input)
It will have two outputs:
- Priority score between 0 and 1 (scalar sigmoid output)
- The department that should handle the ticket (softmax output over the set of departments)
Let's built this model in a few lines with the Functional API.
```
num_tags = 12 # Number of unique issue tags
num_words = 10000 # Size of vocabulary obtained when preprocessing text data
num_departments = 4 # Number of departments for predictions
title_input = keras.Input(shape=(None,), name='title') # Variable-length sequence of ints
body_input = keras.Input(shape=(None,), name='body') # Variable-length sequence of ints
tags_input = keras.Input(shape=(num_tags,), name='tags') # Binary vectors of size `num_tags`
# Embed each word in the title into a 64-dimensional vector
title_features = layers.Embedding(num_words, 64)(title_input)
# Embed each word in the text into a 64-dimensional vector
body_features = layers.Embedding(num_words, 64)(body_input)
# Reduce sequence of embedded words in the title into a single 128-dimensional vector
title_features = layers.LSTM(128)(title_features)
# Reduce sequence of embedded words in the body into a single 32-dimensional vector
body_features = layers.LSTM(32)(body_features)
# Merge all available features into a single large vector via concatenation
x = layers.concatenate([title_features, body_features, tags_input])
# Stick a logistic regression for priority prediction on top of the features
priority_pred = layers.Dense(1, name='priority')(x)
# Stick a department classifier on top of the features
department_pred = layers.Dense(num_departments, name='department')(x)
# Instantiate an end-to-end model predicting both priority and department
model = keras.Model(inputs=[title_input, body_input, tags_input],
outputs=[priority_pred, department_pred])
```
Let's plot the model:
```
keras.utils.plot_model(model, 'multi_input_and_output_model.png', show_shapes=True)
```
When compiling this model, we can assign different losses to each output.
You can even assign different weights to each loss, to modulate their
contribution to the total training loss.
```
model.compile(optimizer=keras.optimizers.RMSprop(1e-3),
loss=[keras.losses.BinaryCrossentropy(from_logits=True),
keras.losses.CategoricalCrossentropy(from_logits=True)],
loss_weights=[1., 0.2])
```
Since we gave names to our output layers, we could also specify the loss like this:
```
model.compile(optimizer=keras.optimizers.RMSprop(1e-3),
loss={'priority':keras.losses.BinaryCrossentropy(from_logits=True),
'department': keras.losses.CategoricalCrossentropy(from_logits=True)},
loss_weights=[1., 0.2])
```
We can train the model by passing lists of Numpy arrays of inputs and targets:
```
import numpy as np
# Dummy input data
title_data = np.random.randint(num_words, size=(1280, 10))
body_data = np.random.randint(num_words, size=(1280, 100))
tags_data = np.random.randint(2, size=(1280, num_tags)).astype('float32')
# Dummy target data
priority_targets = np.random.random(size=(1280, 1))
dept_targets = np.random.randint(2, size=(1280, num_departments))
model.fit({'title': title_data, 'body': body_data, 'tags': tags_data},
{'priority': priority_targets, 'department': dept_targets},
epochs=2,
batch_size=32)
```
When calling fit with a `Dataset` object, it should yield either a
tuple of lists like `([title_data, body_data, tags_data], [priority_targets, dept_targets])`
or a tuple of dictionaries like
`({'title': title_data, 'body': body_data, 'tags': tags_data}, {'priority': priority_targets, 'department': dept_targets})`.
For more detailed explanation, refer to the complete guide [guide to training and evaluation](./train_and_evaluate.ipynb).
### A toy resnet model
In addition to models with multiple inputs and outputs,
the Functional API makes it easy to manipulate non-linear connectivity topologies,
that is to say, models where layers are not connected sequentially.
This also cannot be handled with the Sequential API (as the name indicates).
A common use case for this is residual connections.
Let's build a toy ResNet model for CIFAR10 to demonstrate this.
```
inputs = keras.Input(shape=(32, 32, 3), name='img')
x = layers.Conv2D(32, 3, activation='relu')(inputs)
x = layers.Conv2D(64, 3, activation='relu')(x)
block_1_output = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(64, 3, activation='relu', padding='same')(block_1_output)
x = layers.Conv2D(64, 3, activation='relu', padding='same')(x)
block_2_output = layers.add([x, block_1_output])
x = layers.Conv2D(64, 3, activation='relu', padding='same')(block_2_output)
x = layers.Conv2D(64, 3, activation='relu', padding='same')(x)
block_3_output = layers.add([x, block_2_output])
x = layers.Conv2D(64, 3, activation='relu')(block_3_output)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(256, activation='relu')(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(10)(x)
model = keras.Model(inputs, outputs, name='toy_resnet')
model.summary()
```
Let's plot the model:
```
keras.utils.plot_model(model, 'mini_resnet.png', show_shapes=True)
```
Let's train it:
```
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
model.compile(optimizer=keras.optimizers.RMSprop(1e-3),
loss=keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['acc'])
model.fit(x_train, y_train,
batch_size=64,
epochs=1,
validation_split=0.2)
```
## Sharing layers
Another good use for the functional API are models that use shared layers. Shared layers are layer instances that get reused multiple times in a same model: they learn features that correspond to multiple paths in the graph-of-layers.
Shared layers are often used to encode inputs that come from similar spaces (say, two different pieces of text that feature similar vocabulary), since they enable sharing of information across these different inputs, and they make it possible to train such a model on less data. If a given word is seen in one of the inputs, that will benefit the processing of all inputs that go through the shared layer.
To share a layer in the Functional API, just call the same layer instance multiple times. For instance, here's an `Embedding` layer shared across two different text inputs:
```
# Embedding for 1000 unique words mapped to 128-dimensional vectors
shared_embedding = layers.Embedding(1000, 128)
# Variable-length sequence of integers
text_input_a = keras.Input(shape=(None,), dtype='int32')
# Variable-length sequence of integers
text_input_b = keras.Input(shape=(None,), dtype='int32')
# We reuse the same layer to encode both inputs
encoded_input_a = shared_embedding(text_input_a)
encoded_input_b = shared_embedding(text_input_b)
```
## Extracting and reusing nodes in the graph of layers
Because the graph of layers you are manipulating in the Functional API is a static datastructure, it can be accessed and inspected. This is how we are able to plot Functional models as images, for instance.
This also means that we can access the activations of intermediate layers ("nodes" in the graph) and reuse them elsewhere. This is extremely useful for feature extraction, for example!
Let's look at an example. This is a VGG19 model with weights pre-trained on ImageNet:
```
from tensorflow.keras.applications import VGG19
vgg19 = VGG19()
```
And these are the intermediate activations of the model, obtained by querying the graph datastructure:
```
features_list = [layer.output for layer in vgg19.layers]
```
We can use these features to create a new feature-extraction model, that returns the values of the intermediate layer activations -- and we can do all of this in 3 lines.
```
feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list)
img = np.random.random((1, 224, 224, 3)).astype('float32')
extracted_features = feat_extraction_model(img)
```
This comes in handy when [implementing neural style transfer](https://medium.com/tensorflow/neural-style-transfer-creating-art-with-deep-learning-using-tf-keras-and-eager-execution-7d541ac31398), among other things.
## Extending the API by writing custom layers
tf.keras has a wide range of built-in layers. Here are a few examples:
- Convolutional layers: `Conv1D`, `Conv2D`, `Conv3D`, `Conv2DTranspose`, etc.
- Pooling layers: `MaxPooling1D`, `MaxPooling2D`, `MaxPooling3D`, `AveragePooling1D`, etc.
- RNN layers: `GRU`, `LSTM`, `ConvLSTM2D`, etc.
- `BatchNormalization`, `Dropout`, `Embedding`, etc.
If you don't find what you need, it's easy to extend the API by creating your own layers.
All layers subclass the `Layer` class and implement:
- A `call` method, that specifies the computation done by the layer.
- A `build` method, that creates the weights of the layer (note that this is just a style convention; you could create weights in `__init__` as well).
To learn more about creating layers from scratch, check out the guide [Guide to writing layers and models from scratch](./custom_layers_and_models.ipynb).
Here's a simple implementation of a `Dense` layer:
```
class CustomDense(layers.Layer):
def __init__(self, units=32):
super(CustomDense, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(shape=(input_shape[-1], self.units),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(self.units,),
initializer='random_normal',
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
model = keras.Model(inputs, outputs)
```
If you want your custom layer to support serialization, you should also define a `get_config` method,
that returns the constructor arguments of the layer instance:
```
class CustomDense(layers.Layer):
def __init__(self, units=32):
super(CustomDense, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(shape=(input_shape[-1], self.units),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(self.units,),
initializer='random_normal',
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
return {'units': self.units}
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
model = keras.Model(inputs, outputs)
config = model.get_config()
new_model = keras.Model.from_config(
config, custom_objects={'CustomDense': CustomDense})
```
Optionally, you could also implement the classmethod `from_config(cls, config)`, which is in charge of recreating a layer instance given its config dictionary. The default implementation of `from_config` is:
```python
def from_config(cls, config):
return cls(**config)
```
## When to use the Functional API
How to decide whether to use the Functional API to create a new model, or just subclass the `Model` class directly?
In general, the Functional API is higher-level, easier & safer to use, and has a number of features that subclassed Models do not support.
However, Model subclassing gives you greater flexibility when creating models that are not easily expressible as directed acyclic graphs of layers (for instance, you could not implement a Tree-RNN with the Functional API, you would have to subclass `Model` directly).
### Here are the strengths of the Functional API:
The properties listed below are all true for Sequential models as well (which are also data structures), but they aren't true for subclassed models (which are Python bytecode, not data structures).
#### It is less verbose.
No `super(MyClass, self).__init__(...)`, no `def call(self, ...):`, etc.
Compare:
```python
inputs = keras.Input(shape=(32,))
x = layers.Dense(64, activation='relu')(inputs)
outputs = layers.Dense(10)(x)
mlp = keras.Model(inputs, outputs)
```
With the subclassed version:
```python
class MLP(keras.Model):
def __init__(self, **kwargs):
super(MLP, self).__init__(**kwargs)
self.dense_1 = layers.Dense(64, activation='relu')
self.dense_2 = layers.Dense(10)
def call(self, inputs):
x = self.dense_1(inputs)
return self.dense_2(x)
# Instantiate the model.
mlp = MLP()
# Necessary to create the model's state.
# The model doesn't have a state until it's called at least once.
_ = mlp(tf.zeros((1, 32)))
```
#### It validates your model while you're defining it.
In the Functional API, your input specification (shape and dtype) is created in advance (via `Input`), and every time you call a layer, the layer checks that the specification passed to it matches its assumptions, and it will raise a helpful error message if not.
This guarantees that any model you can build with the Functional API will run. All debugging (other than convergence-related debugging) will happen statically during the model construction, and not at execution time. This is similar to typechecking in a compiler.
#### Your Functional model is plottable and inspectable.
You can plot the model as a graph, and you can easily access intermediate nodes in this graph -- for instance, to extract and reuse the activations of intermediate layers, as we saw in a previous example:
```python
features_list = [layer.output for layer in vgg19.layers]
feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list)
```
#### Your Functional model can be serialized or cloned.
Because a Functional model is a data structure rather than a piece of code, it is safely serializable and can be saved as a single file that allows you to recreate the exact same model without having access to any of the original code. See our [saving and serialization guide](./save_and_serialize.ipynb) for more details.
### Here are the weaknesses of the Functional API:
#### It does not support dynamic architectures.
The Functional API treats models as DAGs of layers. This is true for most deep learning architectures, but not all: for instance, recursive networks or Tree RNNs do not follow this assumption and cannot be implemented in the Functional API.
#### Sometimes, you just need to write everything from scratch.
When writing advanced architectures, you may want to do things that are outside the scope of "defining a DAG of layers": for instance, you may want to expose multiple custom training and inference methods on your model instance. This requires subclassing.
---
To dive more in-depth into the differences between the Functional API and Model subclassing, you can read [What are Symbolic and Imperative APIs in TensorFlow 2.0?](https://medium.com/tensorflow/what-are-symbolic-and-imperative-apis-in-tensorflow-2-0-dfccecb01021).
## Mix-and-matching different API styles
Importantly, choosing between the Functional API or Model subclassing isn't a binary decision that restricts you to one category of models. All models in the tf.keras API can interact with each, whether they're Sequential models, Functional models, or subclassed Models/Layers written from scratch.
You can always use a Functional model or Sequential model as part of a subclassed Model/Layer:
```
units = 32
timesteps = 10
input_dim = 5
# Define a Functional model
inputs = keras.Input((None, units))
x = layers.GlobalAveragePooling1D()(inputs)
outputs = layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
class CustomRNN(layers.Layer):
def __init__(self):
super(CustomRNN, self).__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation='tanh')
self.projection_2 = layers.Dense(units=units, activation='tanh')
# Our previously-defined Functional model
self.classifier = model
def call(self, inputs):
outputs = []
state = tf.zeros(shape=(inputs.shape[0], self.units))
for t in range(inputs.shape[1]):
x = inputs[:, t, :]
h = self.projection_1(x)
y = h + self.projection_2(state)
state = y
outputs.append(y)
features = tf.stack(outputs, axis=1)
print(features.shape)
return self.classifier(features)
rnn_model = CustomRNN()
_ = rnn_model(tf.zeros((1, timesteps, input_dim)))
```
Inversely, you can use any subclassed Layer or Model in the Functional API as long as it implements a `call` method that follows one of the following patterns:
- `call(self, inputs, **kwargs)` where `inputs` is a tensor or a nested structure of tensors (e.g. a list of tensors), and where `**kwargs` are non-tensor arguments (non-inputs).
- `call(self, inputs, training=None, **kwargs)` where `training` is a boolean indicating whether the layer should behave in training mode and inference mode.
- `call(self, inputs, mask=None, **kwargs)` where `mask` is a boolean mask tensor (useful for RNNs, for instance).
- `call(self, inputs, training=None, mask=None, **kwargs)` -- of course you can have both masking and training-specific behavior at the same time.
In addition, if you implement the `get_config` method on your custom Layer or Model, the Functional models you create with it will still be serializable and clonable.
Here's a quick example where we use a custom RNN written from scratch in a Functional model:
```
units = 32
timesteps = 10
input_dim = 5
batch_size = 16
class CustomRNN(layers.Layer):
def __init__(self):
super(CustomRNN, self).__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation='tanh')
self.projection_2 = layers.Dense(units=units, activation='tanh')
self.classifier = layers.Dense(1)
def call(self, inputs):
outputs = []
state = tf.zeros(shape=(inputs.shape[0], self.units))
for t in range(inputs.shape[1]):
x = inputs[:, t, :]
h = self.projection_1(x)
y = h + self.projection_2(state)
state = y
outputs.append(y)
features = tf.stack(outputs, axis=1)
return self.classifier(features)
# Note that we specify a static batch size for the inputs with the `batch_shape`
# arg, because the inner computation of `CustomRNN` requires a static batch size
# (when we create the `state` zeros tensor).
inputs = keras.Input(batch_shape=(batch_size, timesteps, input_dim))
x = layers.Conv1D(32, 3)(inputs)
outputs = CustomRNN()(x)
model = keras.Model(inputs, outputs)
rnn_model = CustomRNN()
_ = rnn_model(tf.zeros((1, 10, 5)))
```
This concludes our guide on the Functional API.
Now you have at your fingertips a powerful set of tools for building deep learning models.
| github_jupyter |
# Detect sequential data
> Marcos Duarte
> Laboratory of Biomechanics and Motor Control ([http://demotu.org/](http://demotu.org/))
> Federal University of ABC, Brazil
The function `detect_seq.py` detects initial and final indices of sequential data identical to parameter `value` (default = 0) in the 1D numpy array_like `x`.
Use parameter `min_seq` to set the minimum number of sequential values to detect (default = 1).
The signature of `detect_seq.py` is:
```python
inds = detect_seq(x, value=0, min_seq=1, show=False, ax=None)
```
Let's see how `detect_seq.py` works; first let's import the necessary Python libraries and configure the environment:
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import sys
sys.path.insert(1, r'./../functions') # add to pythonpath
from detect_seq import detect_seq
```
Let's run the function examples:
```
>>> x = [1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0]
>>> detect_seq(x)
```
There is an option to plot the results:
```
>>> detect_seq(x, value=0, min_seq=2, show=True)
```
## Function `detect_seq.py`
```
# %load ./../functions/detect_seq.py
"""Detect initial and final indices of sequential data identical to value."""
import numpy as np
__author__ = 'Marcos Duarte, https://github.com/demotu/BMC'
__version__ = "1.0.0"
__license__ = "MIT"
def detect_seq(x, value=0, min_seq=1, show=False, ax=None):
"""Detect initial and final indices of sequential data identical to value.
Detects initial and final indices of sequential data identical to
parameter value (default = 0) in a 1D numpy array_like.
Use parameter min_seq to set the minimum number of sequential values to
detect (default = 1).
There is an option to plot the results.
Parameters
----------
x : 1D numpy array_like
array to search for sequential data
value : number, optional (default = 0)
Value to detect as sequential data
min_seq : integer, optional (default = 1)
Minimum number of sequential values to detect
show : bool, optional (default = False)
Show plot (True) of not (False).
ax : matplotlib object, optional (default = None)
Matplotlib axis object where to plot.
Returns
-------
inds : 2D numpy array [indi, indf]
Initial and final indices of sequential data identical to value
References
----------
.. [1] http://nbviewer.jupyter.org/github/demotu/BMC/blob/master/notebooks/detect_seq.ipynb
Examples
--------
>>> import numpy as np
>>> x = [1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0]
>>> detect_seq(x)
>>> inds = detect_seq(x, value=0, min_seq=2, show=True)
"""
isvalue = np.concatenate(([0], np.equal(x, value), [0]))
inds = np.where(np.abs(np.diff(isvalue)) == 1)[0].reshape(-1, 2)
if min_seq > 1:
inds = inds[np.where(np.diff(inds, axis=1) >= min_seq)[0]]
inds[:, 1] = inds[:, 1] - 1
if show:
_plot(x, value, min_seq, ax, inds)
return inds
def _plot(x, value, min_seq, ax, inds):
"""Plot results of the detect_seq function, see its help."""
try:
import matplotlib.pyplot as plt
except ImportError:
print('matplotlib is not available.')
else:
x = np.asarray(x)
if ax is None:
_, ax = plt.subplots(1, 1, figsize=(8, 4))
if inds.size:
for (indi, indf) in inds:
if indi == indf:
ax.plot(indf, x[indf], 'ro', mec='r', ms=6)
else:
ax.plot(range(indi, indf+1), x[indi:indf+1], 'r', lw=1)
ax.axvline(x=indi, color='b', lw=1, ls='--')
ax.axvline(x=indf, color='b', lw=1, ls='--')
inds = np.vstack((np.hstack((0, inds[:, 1])),
np.hstack((inds[:, 0], x.size-1)))).T
for (indi, indf) in inds:
ax.plot(range(indi, indf+1), x[indi:indf+1], 'k', lw=1)
else:
ax.plot(x, 'k', lw=1)
ax.set_xlim(-.02*x.size, x.size*1.02-1)
ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
yrange = ymax - ymin if ymax > ymin else 1
ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)
text = 'Value=%.3g, minimum number=%d'
ax.set_title(text % (value, min_seq))
plt.show()
```
| github_jupyter |
**Chapter 4 – Training Linear Models**
_This notebook contains all the sample code and solutions to the exercices in chapter 4._
# Setup
First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
```
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "training_linear_models"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
```
# Linear regression using the Normal Equation
```
import numpy as np
X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.randn(100, 1)
plt.plot(X, y, "b.")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.axis([0, 2, 0, 15])
save_fig("generated_data_plot")
plt.show()
X_b = np.c_[np.ones((100, 1)), X] # add x0 = 1 to each instance
theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)
theta_best
X_new = np.array([[0], [2]])
X_new_b = np.c_[np.ones((2, 1)), X_new] # add x0 = 1 to each instance
y_predict = X_new_b.dot(theta_best)
y_predict
plt.plot(X_new, y_predict, "r-")
plt.plot(X, y, "b.")
plt.axis([0, 2, 0, 15])
plt.show()
```
The figure in the book actually corresponds to the following code, with a legend and axis labels:
```
plt.plot(X_new, y_predict, "r-", linewidth=2, label="Predictions")
plt.plot(X, y, "b.")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.legend(loc="upper left", fontsize=14)
plt.axis([0, 2, 0, 15])
save_fig("linear_model_predictions")
plt.show()
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
lin_reg.intercept_, lin_reg.coef_
lin_reg.predict(X_new)
```
# Linear regression using batch gradient descent
```
eta = 0.1
n_iterations = 1000
m = 100
theta = np.random.randn(2,1)
for iteration in range(n_iterations):
gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y)
theta = theta - eta * gradients
theta
X_new_b.dot(theta)
theta_path_bgd = []
def plot_gradient_descent(theta, eta, theta_path=None):
m = len(X_b)
plt.plot(X, y, "b.")
n_iterations = 1000
for iteration in range(n_iterations):
if iteration < 10:
y_predict = X_new_b.dot(theta)
style = "b-" if iteration > 0 else "r--"
plt.plot(X_new, y_predict, style)
gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y)
theta = theta - eta * gradients
if theta_path is not None:
theta_path.append(theta)
plt.xlabel("$x_1$", fontsize=18)
plt.axis([0, 2, 0, 15])
plt.title(r"$\eta = {}$".format(eta), fontsize=16)
np.random.seed(42)
theta = np.random.randn(2,1) # random initialization
plt.figure(figsize=(10,4))
plt.subplot(131); plot_gradient_descent(theta, eta=0.02)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.subplot(132); plot_gradient_descent(theta, eta=0.1, theta_path=theta_path_bgd)
plt.subplot(133); plot_gradient_descent(theta, eta=0.5)
save_fig("gradient_descent_plot")
plt.show()
np.random.seed(42)
theta = np.random.randn(2,1) # random initialization
plt.figure(figsize=(10,4))
plt.subplot(131); plot_gradient_descent(theta, eta=0.02)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.subplot(132); plot_gradient_descent(theta, eta=0.1, theta_path=theta_path_bgd)
plt.subplot(133); plot_gradient_descent(theta, eta=0.5)
save_fig("gradient_descent_plot")
plt.show()
```
# Stochastic Gradient Descent
```
theta_path_sgd = []
m = len(X_b)
np.random.seed(42)
n_epochs = 50
t0, t1 = 5, 50 # learning schedule hyperparameters
def learning_schedule(t):
return t0 / (t + t1)
theta = np.random.randn(2,1) # random initialization
for epoch in range(n_epochs):
for i in range(m):
if epoch == 0 and i < 20: # not shown in the book
y_predict = X_new_b.dot(theta) # not shown
style = "b-" if i > 0 else "r--" # not shown
plt.plot(X_new, y_predict, style) # not shown
random_index = np.random.randint(m)
xi = X_b[random_index:random_index+1]
yi = y[random_index:random_index+1]
gradients = 2 * xi.T.dot(xi.dot(theta) - yi)
eta = learning_schedule(epoch * m + i)
theta = theta - eta * gradients
theta_path_sgd.append(theta) # not shown
plt.plot(X, y, "b.") # not shown
plt.xlabel("$x_1$", fontsize=18) # not shown
plt.ylabel("$y$", rotation=0, fontsize=18) # not shown
plt.axis([0, 2, 0, 15]) # not shown
save_fig("sgd_plot") # not shown
plt.show() # not shown
theta
from sklearn.linear_model import SGDRegressor
sgd_reg = SGDRegressor(n_iter=50, penalty=None, eta0=0.1, random_state=42)
sgd_reg.fit(X, y.ravel())
sgd_reg.intercept_, sgd_reg.coef_
```
# Mini-batch gradient descent
```
theta_path_mgd = []
n_iterations = 50
minibatch_size = 20
np.random.seed(42)
theta = np.random.randn(2,1) # random initialization
t0, t1 = 10, 1000
def learning_schedule(t):
return t0 / (t + t1)
t = 0
for epoch in range(n_iterations):
shuffled_indices = np.random.permutation(m)
X_b_shuffled = X_b[shuffled_indices]
y_shuffled = y[shuffled_indices]
for i in range(0, m, minibatch_size):
t += 1
xi = X_b_shuffled[i:i+minibatch_size]
yi = y_shuffled[i:i+minibatch_size]
gradients = 2 * xi.T.dot(xi.dot(theta) - yi)
eta = learning_schedule(t)
theta = theta - eta * gradients
theta_path_mgd.append(theta)
theta
theta_path_bgd = np.array(theta_path_bgd)
theta_path_sgd = np.array(theta_path_sgd)
theta_path_mgd = np.array(theta_path_mgd)
plt.figure(figsize=(7,4))
plt.plot(theta_path_sgd[:, 0], theta_path_sgd[:, 1], "r-s", linewidth=1, label="Stochastic")
plt.plot(theta_path_mgd[:, 0], theta_path_mgd[:, 1], "g-+", linewidth=2, label="Mini-batch")
plt.plot(theta_path_bgd[:, 0], theta_path_bgd[:, 1], "b-o", linewidth=3, label="Batch")
plt.legend(loc="upper left", fontsize=16)
plt.xlabel(r"$\theta_0$", fontsize=20)
plt.ylabel(r"$\theta_1$ ", fontsize=20, rotation=0)
plt.axis([2.5, 4.5, 2.3, 3.9])
save_fig("gradient_descent_paths_plot")
plt.show()
```
# Polynomial regression
```
import numpy as np
import numpy.random as rnd
np.random.seed(42)
m = 100
X = 6 * np.random.rand(m, 1) - 3
y = 0.5 * X**2 + X + 2 + np.random.randn(m, 1)
plt.plot(X, y, "b.")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.axis([-3, 3, 0, 10])
save_fig("quadratic_data_plot")
plt.show()
from sklearn.preprocessing import PolynomialFeatures
poly_features = PolynomialFeatures(degree=2, include_bias=False)
X_poly = poly_features.fit_transform(X)
X[0]
X_poly[0]
lin_reg = LinearRegression()
lin_reg.fit(X_poly, y)
lin_reg.intercept_, lin_reg.coef_
X_new=np.linspace(-3, 3, 100).reshape(100, 1)
X_new_poly = poly_features.transform(X_new)
y_new = lin_reg.predict(X_new_poly)
plt.plot(X, y, "b.")
plt.plot(X_new, y_new, "r-", linewidth=2, label="Predictions")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.legend(loc="upper left", fontsize=14)
plt.axis([-3, 3, 0, 10])
save_fig("quadratic_predictions_plot")
plt.show()
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
for style, width, degree in (("g-", 1, 300), ("b--", 2, 2), ("r-+", 2, 1)):
polybig_features = PolynomialFeatures(degree=degree, include_bias=False)
std_scaler = StandardScaler()
lin_reg = LinearRegression()
polynomial_regression = Pipeline((
("poly_features", polybig_features),
("std_scaler", std_scaler),
("lin_reg", lin_reg),
))
polynomial_regression.fit(X, y)
y_newbig = polynomial_regression.predict(X_new)
plt.plot(X_new, y_newbig, style, label=str(degree), linewidth=width)
plt.plot(X, y, "b.", linewidth=3)
plt.legend(loc="upper left")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.axis([-3, 3, 0, 10])
save_fig("high_degree_polynomials_plot")
plt.show()
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
def plot_learning_curves(model, X, y):
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=10)
train_errors, val_errors = [], []
for m in range(1, len(X_train)):
model.fit(X_train[:m], y_train[:m])
y_train_predict = model.predict(X_train[:m])
y_val_predict = model.predict(X_val)
train_errors.append(mean_squared_error(y_train_predict, y_train[:m]))
val_errors.append(mean_squared_error(y_val_predict, y_val))
plt.plot(np.sqrt(train_errors), "r-+", linewidth=2, label="train")
plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="val")
plt.legend(loc="upper right", fontsize=14) # not shown in the book
plt.xlabel("Training set size", fontsize=14) # not shown
plt.ylabel("RMSE", fontsize=14) # not shown
lin_reg = LinearRegression()
plot_learning_curves(lin_reg, X, y)
plt.axis([0, 80, 0, 3]) # not shown in the book
save_fig("underfitting_learning_curves_plot") # not shown
plt.show() # not shown
from sklearn.pipeline import Pipeline
polynomial_regression = Pipeline((
("poly_features", PolynomialFeatures(degree=10, include_bias=False)),
("lin_reg", LinearRegression()),
))
plot_learning_curves(polynomial_regression, X, y)
plt.axis([0, 80, 0, 3]) # not shown
save_fig("learning_curves_plot") # not shown
plt.show() # not shown
```
# Regularized models
```
from sklearn.linear_model import Ridge
np.random.seed(42)
m = 20
X = 3 * np.random.rand(m, 1)
y = 1 + 0.5 * X + np.random.randn(m, 1) / 1.5
X_new = np.linspace(0, 3, 100).reshape(100, 1)
def plot_model(model_class, polynomial, alphas, **model_kargs):
for alpha, style in zip(alphas, ("b-", "g--", "r:")):
model = model_class(alpha, **model_kargs) if alpha > 0 else LinearRegression()
if polynomial:
model = Pipeline((
("poly_features", PolynomialFeatures(degree=10, include_bias=False)),
("std_scaler", StandardScaler()),
("regul_reg", model),
))
model.fit(X, y)
y_new_regul = model.predict(X_new)
lw = 2 if alpha > 0 else 1
plt.plot(X_new, y_new_regul, style, linewidth=lw, label=r"$\alpha = {}$".format(alpha))
plt.plot(X, y, "b.", linewidth=3)
plt.legend(loc="upper left", fontsize=15)
plt.xlabel("$x_1$", fontsize=18)
plt.axis([0, 3, 0, 4])
plt.figure(figsize=(8,4))
plt.subplot(121)
plot_model(Ridge, polynomial=False, alphas=(0, 10, 100), random_state=42)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.subplot(122)
plot_model(Ridge, polynomial=True, alphas=(0, 10**-5, 1), random_state=42)
save_fig("ridge_regression_plot")
plt.show()
from sklearn.linear_model import Ridge
ridge_reg = Ridge(alpha=1, solver="cholesky", random_state=42)
ridge_reg.fit(X, y)
ridge_reg.predict([[1.5]])
sgd_reg = SGDRegressor(penalty="l2", random_state=42)
sgd_reg.fit(X, y.ravel())
sgd_reg.predict([[1.5]])
ridge_reg = Ridge(alpha=1, solver="sag", random_state=42)
ridge_reg.fit(X, y)
ridge_reg.predict([[1.5]])
from sklearn.linear_model import Lasso
plt.figure(figsize=(8,4))
plt.subplot(121)
plot_model(Lasso, polynomial=False, alphas=(0, 0.1, 1), random_state=42)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.subplot(122)
plot_model(Lasso, polynomial=True, alphas=(0, 10**-7, 1), tol=1, random_state=42)
save_fig("lasso_regression_plot")
plt.show()
from sklearn.linear_model import Lasso
lasso_reg = Lasso(alpha=0.1)
lasso_reg.fit(X, y)
lasso_reg.predict([[1.5]])
from sklearn.linear_model import ElasticNet
elastic_net = ElasticNet(alpha=0.1, l1_ratio=0.5, random_state=42)
elastic_net.fit(X, y)
elastic_net.predict([[1.5]])
np.random.seed(42)
m = 100
X = 6 * np.random.rand(m, 1) - 3
y = 2 + X + 0.5 * X**2 + np.random.randn(m, 1)
X_train, X_val, y_train, y_val = train_test_split(X[:50], y[:50].ravel(), test_size=0.5, random_state=10)
poly_scaler = Pipeline((
("poly_features", PolynomialFeatures(degree=90, include_bias=False)),
("std_scaler", StandardScaler()),
))
X_train_poly_scaled = poly_scaler.fit_transform(X_train)
X_val_poly_scaled = poly_scaler.transform(X_val)
sgd_reg = SGDRegressor(n_iter=1,
penalty=None,
eta0=0.0005,
warm_start=True,
learning_rate="constant",
random_state=42)
n_epochs = 500
train_errors, val_errors = [], []
for epoch in range(n_epochs):
sgd_reg.fit(X_train_poly_scaled, y_train)
y_train_predict = sgd_reg.predict(X_train_poly_scaled)
y_val_predict = sgd_reg.predict(X_val_poly_scaled)
train_errors.append(mean_squared_error(y_train_predict, y_train))
val_errors.append(mean_squared_error(y_val_predict, y_val))
best_epoch = np.argmin(val_errors)
best_val_rmse = np.sqrt(val_errors[best_epoch])
plt.annotate('Best model',
xy=(best_epoch, best_val_rmse),
xytext=(best_epoch, best_val_rmse + 1),
ha="center",
arrowprops=dict(facecolor='black', shrink=0.05),
fontsize=16,
)
best_val_rmse -= 0.03 # just to make the graph look better
plt.plot([0, n_epochs], [best_val_rmse, best_val_rmse], "k:", linewidth=2)
plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="Validation set")
plt.plot(np.sqrt(train_errors), "r--", linewidth=2, label="Training set")
plt.legend(loc="upper right", fontsize=14)
plt.xlabel("Epoch", fontsize=14)
plt.ylabel("RMSE", fontsize=14)
save_fig("early_stopping_plot")
plt.show()
from sklearn.base import clone
sgd_reg = SGDRegressor(n_iter=1, warm_start=True, penalty=None,
learning_rate="constant", eta0=0.0005, random_state=42)
minimum_val_error = float("inf")
best_epoch = None
best_model = None
for epoch in range(1000):
sgd_reg.fit(X_train_poly_scaled, y_train) # continues where it left off
y_val_predict = sgd_reg.predict(X_val_poly_scaled)
val_error = mean_squared_error(y_val_predict, y_val)
if val_error < minimum_val_error:
minimum_val_error = val_error
best_epoch = epoch
best_model = clone(sgd_reg)
best_epoch, best_model
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
t1a, t1b, t2a, t2b = -1, 3, -1.5, 1.5
# ignoring bias term
t1s = np.linspace(t1a, t1b, 500)
t2s = np.linspace(t2a, t2b, 500)
t1, t2 = np.meshgrid(t1s, t2s)
T = np.c_[t1.ravel(), t2.ravel()]
Xr = np.array([[-1, 1], [-0.3, -1], [1, 0.1]])
yr = 2 * Xr[:, :1] + 0.5 * Xr[:, 1:]
J = (1/len(Xr) * np.sum((T.dot(Xr.T) - yr.T)**2, axis=1)).reshape(t1.shape)
N1 = np.linalg.norm(T, ord=1, axis=1).reshape(t1.shape)
N2 = np.linalg.norm(T, ord=2, axis=1).reshape(t1.shape)
t_min_idx = np.unravel_index(np.argmin(J), J.shape)
t1_min, t2_min = t1[t_min_idx], t2[t_min_idx]
t_init = np.array([[0.25], [-1]])
def bgd_path(theta, X, y, l1, l2, core = 1, eta = 0.1, n_iterations = 50):
path = [theta]
for iteration in range(n_iterations):
gradients = core * 2/len(X) * X.T.dot(X.dot(theta) - y) + l1 * np.sign(theta) + 2 * l2 * theta
theta = theta - eta * gradients
path.append(theta)
return np.array(path)
plt.figure(figsize=(12, 8))
for i, N, l1, l2, title in ((0, N1, 0.5, 0, "Lasso"), (1, N2, 0, 0.1, "Ridge")):
JR = J + l1 * N1 + l2 * N2**2
tr_min_idx = np.unravel_index(np.argmin(JR), JR.shape)
t1r_min, t2r_min = t1[tr_min_idx], t2[tr_min_idx]
levelsJ=(np.exp(np.linspace(0, 1, 20)) - 1) * (np.max(J) - np.min(J)) + np.min(J)
levelsJR=(np.exp(np.linspace(0, 1, 20)) - 1) * (np.max(JR) - np.min(JR)) + np.min(JR)
levelsN=np.linspace(0, np.max(N), 10)
path_J = bgd_path(t_init, Xr, yr, l1=0, l2=0)
path_JR = bgd_path(t_init, Xr, yr, l1, l2)
path_N = bgd_path(t_init, Xr, yr, np.sign(l1)/3, np.sign(l2), core=0)
plt.subplot(221 + i * 2)
plt.grid(True)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.contourf(t1, t2, J, levels=levelsJ, alpha=0.9)
plt.contour(t1, t2, N, levels=levelsN)
plt.plot(path_J[:, 0], path_J[:, 1], "w-o")
plt.plot(path_N[:, 0], path_N[:, 1], "y-^")
plt.plot(t1_min, t2_min, "rs")
plt.title(r"$\ell_{}$ penalty".format(i + 1), fontsize=16)
plt.axis([t1a, t1b, t2a, t2b])
plt.subplot(222 + i * 2)
plt.grid(True)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.contourf(t1, t2, JR, levels=levelsJR, alpha=0.9)
plt.plot(path_JR[:, 0], path_JR[:, 1], "w-o")
plt.plot(t1r_min, t2r_min, "rs")
plt.title(title, fontsize=16)
plt.axis([t1a, t1b, t2a, t2b])
for subplot in (221, 223):
plt.subplot(subplot)
plt.ylabel(r"$\theta_2$", fontsize=20, rotation=0)
for subplot in (223, 224):
plt.subplot(subplot)
plt.xlabel(r"$\theta_1$", fontsize=20)
save_fig("lasso_vs_ridge_plot")
plt.show()
```
# Logistic regression
```
t = np.linspace(-10, 10, 100)
sig = 1 / (1 + np.exp(-t))
plt.figure(figsize=(9, 3))
plt.plot([-10, 10], [0, 0], "k-")
plt.plot([-10, 10], [0.5, 0.5], "k:")
plt.plot([-10, 10], [1, 1], "k:")
plt.plot([0, 0], [-1.1, 1.1], "k-")
plt.plot(t, sig, "b-", linewidth=2, label=r"$\sigma(t) = \frac{1}{1 + e^{-t}}$")
plt.xlabel("t")
plt.legend(loc="upper left", fontsize=20)
plt.axis([-10, 10, -0.1, 1.1])
save_fig("logistic_function_plot")
plt.show()
from sklearn import datasets
iris = datasets.load_iris()
list(iris.keys())
print(iris.DESCR)
X = iris["data"][:, 3:] # petal width
y = (iris["target"] == 2).astype(np.int) # 1 if Iris-Virginica, else 0
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression(random_state=42)
log_reg.fit(X, y)
X_new = np.linspace(0, 3, 1000).reshape(-1, 1)
y_proba = log_reg.predict_proba(X_new)
plt.plot(X_new, y_proba[:, 1], "g-", linewidth=2, label="Iris-Virginica")
plt.plot(X_new, y_proba[:, 0], "b--", linewidth=2, label="Not Iris-Virginica")
```
The figure in the book actually is actually a bit fancier:
```
X_new = np.linspace(0, 3, 1000).reshape(-1, 1)
y_proba = log_reg.predict_proba(X_new)
decision_boundary = X_new[y_proba[:, 1] >= 0.5][0]
plt.figure(figsize=(8, 3))
plt.plot(X[y==0], y[y==0], "bs")
plt.plot(X[y==1], y[y==1], "g^")
plt.plot([decision_boundary, decision_boundary], [-1, 2], "k:", linewidth=2)
plt.plot(X_new, y_proba[:, 1], "g-", linewidth=2, label="Iris-Virginica")
plt.plot(X_new, y_proba[:, 0], "b--", linewidth=2, label="Not Iris-Virginica")
plt.text(decision_boundary+0.02, 0.15, "Decision boundary", fontsize=14, color="k", ha="center")
plt.arrow(decision_boundary, 0.08, -0.3, 0, head_width=0.05, head_length=0.1, fc='b', ec='b')
plt.arrow(decision_boundary, 0.92, 0.3, 0, head_width=0.05, head_length=0.1, fc='g', ec='g')
plt.xlabel("Petal width (cm)", fontsize=14)
plt.ylabel("Probability", fontsize=14)
plt.legend(loc="center left", fontsize=14)
plt.axis([0, 3, -0.02, 1.02])
save_fig("logistic_regression_plot")
plt.show()
decision_boundary
log_reg.predict([[1.7], [1.5]])
from sklearn.linear_model import LogisticRegression
X = iris["data"][:, (2, 3)] # petal length, petal width
y = (iris["target"] == 2).astype(np.int)
log_reg = LogisticRegression(C=10**10, random_state=42)
log_reg.fit(X, y)
x0, x1 = np.meshgrid(
np.linspace(2.9, 7, 500).reshape(-1, 1),
np.linspace(0.8, 2.7, 200).reshape(-1, 1),
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_proba = log_reg.predict_proba(X_new)
plt.figure(figsize=(10, 4))
plt.plot(X[y==0, 0], X[y==0, 1], "bs")
plt.plot(X[y==1, 0], X[y==1, 1], "g^")
zz = y_proba[:, 1].reshape(x0.shape)
contour = plt.contour(x0, x1, zz, cmap=plt.cm.brg)
left_right = np.array([2.9, 7])
boundary = -(log_reg.coef_[0][0] * left_right + log_reg.intercept_[0]) / log_reg.coef_[0][1]
plt.clabel(contour, inline=1, fontsize=12)
plt.plot(left_right, boundary, "k--", linewidth=3)
plt.text(3.5, 1.5, "Not Iris-Virginica", fontsize=14, color="b", ha="center")
plt.text(6.5, 2.3, "Iris-Virginica", fontsize=14, color="g", ha="center")
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.axis([2.9, 7, 0.8, 2.7])
save_fig("logistic_regression_contour_plot")
plt.show()
X = iris["data"][:, (2, 3)] # petal length, petal width
y = iris["target"]
softmax_reg = LogisticRegression(multi_class="multinomial",solver="lbfgs", C=10, random_state=42)
softmax_reg.fit(X, y)
x0, x1 = np.meshgrid(
np.linspace(0, 8, 500).reshape(-1, 1),
np.linspace(0, 3.5, 200).reshape(-1, 1),
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_proba = softmax_reg.predict_proba(X_new)
y_predict = softmax_reg.predict(X_new)
zz1 = y_proba[:, 1].reshape(x0.shape)
zz = y_predict.reshape(x0.shape)
plt.figure(figsize=(10, 4))
plt.plot(X[y==2, 0], X[y==2, 1], "g^", label="Iris-Virginica")
plt.plot(X[y==1, 0], X[y==1, 1], "bs", label="Iris-Versicolor")
plt.plot(X[y==0, 0], X[y==0, 1], "yo", label="Iris-Setosa")
from matplotlib.colors import ListedColormap
custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])
plt.contourf(x0, x1, zz, cmap=custom_cmap, linewidth=5)
contour = plt.contour(x0, x1, zz1, cmap=plt.cm.brg)
plt.clabel(contour, inline=1, fontsize=12)
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend(loc="center left", fontsize=14)
plt.axis([0, 7, 0, 3.5])
save_fig("softmax_regression_contour_plot")
plt.show()
softmax_reg.predict([[5, 2]])
softmax_reg.predict_proba([[5, 2]])
```
# Exercise solutions
## 1. to 11.
See appendix A.
## 12. Batch Gradient Descent with early stopping for Softmax Regression
(without using Scikit-Learn)
Let's start by loading the data. We will just reuse the Iris dataset we loaded earlier.
```
X = iris["data"][:, (2, 3)] # petal length, petal width
y = iris["target"]
```
We need to add the bias term for every instance ($x_0 = 1$):
```
X_with_bias = np.c_[np.ones([len(X), 1]), X]
```
And let's set the random seed so the output of this exercise solution is reproducible:
```
np.random.seed(2042)
```
The easiest option to split the dataset into a training set, a validation set and a test set would be to use Scikit-Learn's `train_test_split()` function, but the point of this exercise is to try understand the algorithms by implementing them manually. So here is one possible implementation:
```
test_ratio = 0.2
validation_ratio = 0.2
total_size = len(X_with_bias)
test_size = int(total_size * test_ratio)
validation_size = int(total_size * validation_ratio)
train_size = total_size - test_size - validation_size
rnd_indices = np.random.permutation(total_size)
X_train = X_with_bias[rnd_indices[:train_size]]
y_train = y[rnd_indices[:train_size]]
X_valid = X_with_bias[rnd_indices[train_size:-test_size]]
y_valid = y[rnd_indices[train_size:-test_size]]
X_test = X_with_bias[rnd_indices[-test_size:]]
y_test = y[rnd_indices[-test_size:]]
```
The targets are currently class indices (0, 1 or 2), but we need target class probabilities to train the Softmax Regression model. Each instance will have target class probabilities equal to 0.0 for all classes except for the target class which will have a probability of 1.0 (in other words, the vector of class probabilities for ay given instance is a one-hot vector). Let's write a small function to convert the vector of class indices into a matrix containing a one-hot vector for each instance:
```
def to_one_hot(y):
n_classes = y.max() + 1
m = len(y)
Y_one_hot = np.zeros((m, n_classes))
Y_one_hot[np.arange(m), y] = 1
return Y_one_hot
```
Let's test this function on the first 10 instances:
```
y_train[:10]
to_one_hot(y_train[:10])
```
Looks good, so let's create the target class probabilities matrix for the training set and the test set:
```
Y_train_one_hot = to_one_hot(y_train)
Y_valid_one_hot = to_one_hot(y_valid)
Y_test_one_hot = to_one_hot(y_test)
```
Now let's implement the Softmax function. Recall that it is defined by the following equation:
$\sigma\left(\mathbf{s}(\mathbf{x})\right)_k = \dfrac{\exp\left(s_k(\mathbf{x})\right)}{\sum\limits_{j=1}^{K}{\exp\left(s_j(\mathbf{x})\right)}}$
```
def softmax(logits):
exps = np.exp(logits)
exp_sums = np.sum(exps, axis=1, keepdims=True)
return exps / exp_sums
```
We are almost ready to start training. Let's define the number of inputs and outputs:
```
n_inputs = X_train.shape[1] # == 3 (2 features plus the bias term)
n_outputs = len(np.unique(y_train)) # == 3 (3 iris classes)
```
Now here comes the hardest part: training! Theoretically, it's simple: it's just a matter of translating the math equations into Python code. But in practice, it can be quite tricky: in particular, it's easy to mix up the order of the terms, or the indices. You can even end up with code that looks like it's working but is actually not computing exactly the right thing. When unsure, you should write down the shape of each term in the equation and make sure the corresponding terms in your code match closely. It can also help to evaluate each term independently and print them out. The good news it that you won't have to do this everyday, since all this is well implemented by Scikit-Learn, but it will help you understand what's going on under the hood.
So the equations we will need are the cost function:
$J(\mathbf{\Theta}) =
- \dfrac{1}{m}\sum\limits_{i=1}^{m}\sum\limits_{k=1}^{K}{y_k^{(i)}\log\left(\hat{p}_k^{(i)}\right)}$
And the equation for the gradients:
$\nabla_{\mathbf{\theta}^{(k)}} \, J(\mathbf{\Theta}) = \dfrac{1}{m} \sum\limits_{i=1}^{m}{ \left ( \hat{p}^{(i)}_k - y_k^{(i)} \right ) \mathbf{x}^{(i)}}$
Note that $\log\left(\hat{p}_k^{(i)}\right)$ may not be computable if $\hat{p}_k^{(i)} = 0$. So we will add a tiny value $\epsilon$ to $\log\left(\hat{p}_k^{(i)}\right)$ to avoid getting `nan` values.
```
eta = 0.01
n_iterations = 5001
m = len(X_train)
epsilon = 1e-7
Theta = np.random.randn(n_inputs, n_outputs)
for iteration in range(n_iterations):
logits = X_train.dot(Theta)
Y_proba = softmax(logits)
loss = -np.mean(np.sum(Y_train_one_hot * np.log(Y_proba + epsilon), axis=1))
error = Y_proba - Y_train_one_hot
if iteration % 500 == 0:
print(iteration, loss)
gradients = 1/m * X_train.T.dot(error)
Theta = Theta - eta * gradients
```
And that's it! The Softmax model is trained. Let's look at the model parameters:
```
Theta
```
Let's make predictions for the validation set and check the accuracy score:
```
logits = X_valid.dot(Theta)
Y_proba = softmax(logits)
y_predict = np.argmax(Y_proba, axis=1)
accuracy_score = np.mean(y_predict == y_valid)
accuracy_score
```
Well, this model looks pretty good. For the sake of the exercise, let's add a bit of $\ell_2$ regularization. The following training code is similar to the one above, but the loss now has an additional $\ell_2$ penalty, and the gradients have the proper additional term (note that we don't regularize the first element of `Theta` since this corresponds to the bias term). Also, let's try increasing the learning rate `eta`.
```
eta = 0.1
n_iterations = 5001
m = len(X_train)
epsilon = 1e-7
alpha = 0.1 # regularization hyperparameter
Theta = np.random.randn(n_inputs, n_outputs)
for iteration in range(n_iterations):
logits = X_train.dot(Theta)
Y_proba = softmax(logits)
xentropy_loss = -np.mean(np.sum(Y_train_one_hot * np.log(Y_proba + epsilon), axis=1))
l2_loss = 1/2 * np.sum(np.square(Theta[1:]))
loss = xentropy_loss + alpha * l2_loss
error = Y_proba - Y_train_one_hot
if iteration % 500 == 0:
print(iteration, loss)
gradients = 1/m * X_train.T.dot(error) + np.r_[np.zeros([1, n_inputs]), alpha * Theta[1:]]
Theta = Theta - eta * gradients
```
Because of the additional $\ell_2$ penalty, the loss seems greater than earlier, but perhaps this model will perform better? Let's find out:
```
logits = X_valid.dot(Theta)
Y_proba = softmax(logits)
y_predict = np.argmax(Y_proba, axis=1)
accuracy_score = np.mean(y_predict == y_valid)
accuracy_score
```
Cool, perfect accuracy! We probably just got lucky with this validation set, but still, it's pleasant.
Now let's add early stopping. For this we just need to measure the loss on the validation set at every iteration and stop when the error starts growing.
```
eta = 0.1
n_iterations = 5001
m = len(X_train)
epsilon = 1e-7
alpha = 0.1 # regularization hyperparameter
best_loss = np.infty
Theta = np.random.randn(n_inputs, n_outputs)
for iteration in range(n_iterations):
logits = X_train.dot(Theta)
Y_proba = softmax(logits)
xentropy_loss = -np.mean(np.sum(Y_train_one_hot * np.log(Y_proba + epsilon), axis=1))
l2_loss = 1/2 * np.sum(np.square(Theta[1:]))
loss = xentropy_loss + alpha * l2_loss
error = Y_proba - Y_train_one_hot
gradients = 1/m * X_train.T.dot(error) + np.r_[np.zeros([1, n_inputs]), alpha * Theta[1:]]
Theta = Theta - eta * gradients
logits = X_valid.dot(Theta)
Y_proba = softmax(logits)
xentropy_loss = -np.mean(np.sum(Y_valid_one_hot * np.log(Y_proba + epsilon), axis=1))
l2_loss = 1/2 * np.sum(np.square(Theta[1:]))
loss = xentropy_loss + alpha * l2_loss
if iteration % 500 == 0:
print(iteration, loss)
if loss < best_loss:
best_loss = loss
else:
print(iteration - 1, best_loss)
print(iteration, loss, "early stopping!")
break
logits = X_valid.dot(Theta)
Y_proba = softmax(logits)
y_predict = np.argmax(Y_proba, axis=1)
accuracy_score = np.mean(y_predict == y_valid)
accuracy_score
```
Still perfect, but faster.
Now let's plot the model's predictions on the whole dataset:
```
x0, x1 = np.meshgrid(
np.linspace(0, 8, 500).reshape(-1, 1),
np.linspace(0, 3.5, 200).reshape(-1, 1),
)
X_new = np.c_[x0.ravel(), x1.ravel()]
X_new_with_bias = np.c_[np.ones([len(X_new), 1]), X_new]
logits = X_new_with_bias.dot(Theta)
Y_proba = softmax(logits)
y_predict = np.argmax(Y_proba, axis=1)
zz1 = Y_proba[:, 1].reshape(x0.shape)
zz = y_predict.reshape(x0.shape)
plt.figure(figsize=(10, 4))
plt.plot(X[y==2, 0], X[y==2, 1], "g^", label="Iris-Virginica")
plt.plot(X[y==1, 0], X[y==1, 1], "bs", label="Iris-Versicolor")
plt.plot(X[y==0, 0], X[y==0, 1], "yo", label="Iris-Setosa")
from matplotlib.colors import ListedColormap
custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])
plt.contourf(x0, x1, zz, cmap=custom_cmap, linewidth=5)
contour = plt.contour(x0, x1, zz1, cmap=plt.cm.brg)
plt.clabel(contour, inline=1, fontsize=12)
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend(loc="upper left", fontsize=14)
plt.axis([0, 7, 0, 3.5])
plt.show()
```
And now let's measure the final model's accuracy on the test set:
```
logits = X_test.dot(Theta)
Y_proba = softmax(logits)
y_predict = np.argmax(Y_proba, axis=1)
accuracy_score = np.mean(y_predict == y_test)
accuracy_score
```
Our perfect model turns out to have slight imperfections. This variability is likely due to the very small size of the dataset: depending on how you sample the training set, validation set and the test set, you can get quite different results. Try changing the random seed and running the code again a few times, you will see that the results will vary.
| github_jupyter |
# TensorBoard
TensorBoard is the tensorflow's visualization tool which can be used to visualize the
computation graph. It can also be used to plot various quantitative metrics and results of
several intermediate calculations. Using tensorboard, we can easily visualize complex
models which would be useful for debugging and also sharing.
Now let us build a basic computation graph and visualize that in tensorboard.
First, let us import the library
```
import tensorflow as tf
```
Next, we initialize the variables
```
a = tf.constant(5)
b = tf.constant(4)
c = tf.multiply(a,b)
d = tf.constant(2)
e = tf.constant(3)
f = tf.multiply(d,e)
g = tf.add(c,f)
```
Now, we will create a tensorflow session, we will write the results of our graph to file
called event file using tf.summary.FileWriter()
```
with tf.Session() as sess:
writer = tf.summary.FileWriter("logs", sess.graph)
print(sess.run(g))
writer.close()
```
In order to run the tensorboard, go to your terminal, locate the working directory and
type
tensorboard --logdir=logs --port=6003
# Adding Scope
Scoping is used to reduce complexity and helps to better understand the model by
grouping the related nodes together, For instance, in the above example, we can break
down our graph into two different groups called computation and result. If you look at the
previous example we can see that nodes, a to e perform the computation and node g
calculate the result. So we can group them separately using the scope for easy
understanding. Scoping can be created using tf.name_scope() function.
```
with tf.name_scope("Computation"):
a = tf.constant(5)
b = tf.constant(4)
c = tf.multiply(a,b)
d = tf.constant(2)
e = tf.constant(3)
f = tf.multiply(d,e)
with tf.name_scope("Result"):
g = tf.add(c,f)
```
If you see the computation scope, we can further break down in to separate parts for even
more good understanding. Say we can create scope as part 1 which has nodes a to c and
scope as part 2 which has nodes d to e since part 1 and 2 are independent of each other.
```
with tf.name_scope("Computation"):
with tf.name_scope("Part1"):
a = tf.constant(5)
b = tf.constant(4)
c = tf.multiply(a,b)
with tf.name_scope("Part2"):
d = tf.constant(2)
e = tf.constant(3)
f = tf.multiply(d,e)
```
Scoping can be better understood by visualizing them in the tensorboard. The complete
code looks like as follows,
```
with tf.name_scope("Computation"):
with tf.name_scope("Part1"):
a = tf.constant(5)
b = tf.constant(4)
c = tf.multiply(a,b)
with tf.name_scope("Part2"):
d = tf.constant(2)
e = tf.constant(3)
f = tf.multiply(d,e)
with tf.name_scope("Result"):
g = tf.add(c,f)
with tf.Session() as sess:
writer = tf.summary.FileWriter("logs", sess.graph)
print(sess.run(g))
writer.close()
```
In order to run the tensorboard, go to your terminal, locate the working directory and
type
tensorboard --logdir=logs --port=6003
If you look at the TensorBoard you can easily understand how scoping helps us to reduce
complexity in understanding by grouping the similar nodes together. Scoping is widely
used while working on a complex projects to better understand the functionality and
dependencies of nodes.
| github_jupyter |
# LassoLars Regression
This Code template is for the regression analysis using a simple LassoLars Regression. It is a lasso model implemented using the LARS algorithm.
### Required Packages
```
import warnings
import numpy as np
import pandas as pd
import seaborn as se
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.linear_model import LassoLars
warnings.filterwarnings('ignore')
```
### Initialization
Filepath of CSV file
```
#filepath
file_path= ""
```
List of features which are required for model training .
```
#x_values
features=[]
```
Target feature for prediction.
```
#y_value
target=''
```
### Data Fetching
Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
```
df=pd.read_csv(file_path)
df.head()
```
### Feature Selections
It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
We will assign all the required input features to X and target/outcome to Y.
```
X=df[features]
Y=df[target]
```
### Data Preprocessing
Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
```
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
```
Calling preprocessing functions on the feature and target set.
```
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=NullClearner(Y)
X.head()
```
#### Correlation Map
In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
```
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
```
### Data Splitting
The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
```
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
```
### Model
LassoLars is a lasso model implemented using the LARS algorithm, and unlike the implementation based on coordinate descent, this yields the exact solution, which is piecewise linear as a function of the norm of its coefficients.
### Tuning parameters
> **fit_intercept** -> whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations
> **alpha** -> Constant that multiplies the penalty term. Defaults to 1.0. alpha = 0 is equivalent to an ordinary least square, solved by LinearRegression. For numerical reasons, using alpha = 0 with the LassoLars object is not advised and you should prefer the LinearRegression object.
> **eps** -> The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the tol parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization.
> **max_iter** -> Maximum number of iterations to perform.
> **positive** -> Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. Under the positive restriction the model coefficients will not converge to the ordinary-least-squares solution for small values of alpha. Only coefficients up to the smallest alpha value (alphas_[alphas_ > 0.].min() when fit_path=True) reached by the stepwise Lars-Lasso algorithm are typically in congruence with the solution of the coordinate descent Lasso estimator.
> **precompute** -> Whether to use a precomputed Gram matrix to speed up calculations.
```
model = LassoLars(random_state=123)
model.fit(x_train,y_train)
```
#### Model Accuracy
We will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model.
score: The score function returns the coefficient of determination R2 of the prediction.
```
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
```
> **r2_score**: The **r2_score** function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions.
> **mae**: The **mean abosolute error** function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model.
> **mse**: The **mean squared error** function squares the error(penalizes the model for large errors) by our model.
```
y_pred=model.predict(x_test)
print("R2 Score: {:.2f} %".format(r2_score(y_test,y_pred)*100))
print("Mean Absolute Error {:.2f}".format(mean_absolute_error(y_test,y_pred)))
print("Mean Squared Error {:.2f}".format(mean_squared_error(y_test,y_pred)))
```
#### Prediction Plot
First, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis.
For the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis.
```
plt.figure(figsize=(14,10))
plt.plot(range(20),y_test[0:20], color = "green")
plt.plot(range(20),model.predict(x_test[0:20]), color = "red")
plt.legend(["Actual","prediction"])
plt.title("Predicted vs True Value")
plt.xlabel("Record number")
plt.ylabel(target)
plt.show()
```
#### Creator: Thilakraj Devadiga , Github: [Profile](https://github.com/Thilakraj1998)
| github_jupyter |
```
#default_exp data
#export
from timeseries_fastai.imports import *
from timeseries_fastai.core import *
from fastai.basics import *
from fastai.torch_core import *
from fastai.vision.data import get_grid
```
# Data
> DataBlock API to construct the DataLoaders
```
#hide
from nbdev.showdoc import show_doc
```
We will create a DataBlock to process our UCR datasets
```
ucr_path = untar_data(URLs.UCR)
df_train, df_test = load_df_ucr(ucr_path, 'StarLightCurves')
df_train.head()
x_cols = df_train.columns[slice(0,-1)].to_list()
x_cols[0:5], x_cols[-1]
#export
def TSBlock(cls=TSeries):
"A TimeSeries Block to process one timeseries"
return TransformBlock(type_tfms=cls.create)
dblock = DataBlock(blocks=(TSBlock, CategoryBlock),
get_x=lambda o: o[x_cols].values.astype(np.float32),
get_y=ColReader('target'),
splitter=RandomSplitter(0.2))
```
A good way to debug the Block is using summary:
```
dblock.summary(df_train)
dls = dblock.dataloaders(df_train, bs=4)
```
The `show_batch` method is not very practical, let's redefine it on the `DataLoader` class
```
dls.show_batch()
```
A handy function to stack `df_train` and `df_valid` together, adds column to know which is which.
```
#export
def stack_train_valid(df_train, df_valid):
"Stack df_train and df_valid, adds `valid_col`=True/False for df_valid/df_train"
return pd.concat([df_train.assign(valid_col=False), df_valid.assign(valid_col=True)]).reset_index(drop=True)
```
## DataLoaders
> A custom TSeries DataLoaders class
```
#export
class TSDataLoaders(DataLoaders):
"A TimeSeries DataLoader"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, x_cols=None, label_col=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create a DataLoader from a pandas DataFrame"
y_block = ifnone(y_block, CategoryBlock)
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(TSBlock, y_block),
get_x=lambda o: o[x_cols].values.astype(np.float32),
get_y=ColReader(label_col),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_dfs(cls, df_train, df_valid, path='.', x_cols=None, label_col=None,
y_block=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create a DataLoader from a df_train and df_valid"
df = stack_train_valid(df_train, df_valid)
return cls.from_df(df, path, x_cols=x_cols, valid_col='valid_col', label_col=label_col,
y_block=y_block, item_tfms=item_tfms, batch_tfms=batch_tfms,**kwargs)
```
Overchaging `show_batch` function to add grid spacing.
```
#export
@typedispatch
def show_batch(x: TSeries, y, samples, ctxs=None, max_n=10,rows=None, cols=None, figsize=None, **kwargs):
"Show batch for TSeries objects"
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=rows, ncols=cols, add_vert=1, figsize=figsize)
ctxs = show_batch[object](x, y, samples=samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
```
Let's test the DataLoader
```
show_doc(TSDataLoaders.from_dfs)
dls = TSDataLoaders.from_dfs(df_train, df_test, x_cols=x_cols, label_col='target', bs=16, val_bs=64)
dls.show_batch()
```
## Profiling the DataLoader
```
len(dls.valid_ds)
def cycle_dl(dl):
for x,y in iter(dl):
pass
```
It is pretty slow
```
#slow
%time cycle_dl(dls.valid)
```
# Export -
```
# hide
from nbdev.export import *
notebook2script()
```
| github_jupyter |
# Fine-Tuning *RoBERTa-small-bulgarian* For Named-Entity Recognition
```
%%capture
!pip install transformers==3.0.2
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
# Get the dataset
!git clone https://github.com/usmiva/bg-ner
```
## Data Preprocessing
```
from transformers import RobertaTokenizerFast
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
import numpy as np
import string
import re
MODEL = "iarfmoose/roberta-small-bulgarian"
MAX_LEN = 128
BATCH_SIZE = 16
tokenizer = RobertaTokenizerFast.from_pretrained(MODEL, max_len=MAX_LEN)
tag_to_id = {
'O': 0,
'I-PRO': 1,
'I-PER': 2,
'I-ORG': 3,
'I-LOC': 4,
'I-EVT': 5,
'B-PRO': 6,
'B-PER': 7,
'B-ORG': 8,
'B-LOC': 9,
'B-EVT': 10
}
id_to_tag = {tag_to_id[tag]: tag for tag in tag_to_id}
def preprocess_data(filepath):
sentences, ner_tags = parse_dataset(filepath)
error_count = 0
data = []
for row in zip(sentences, ner_tags):
encoding = encode_sentence(row[0], row[1])
if encoding:
data.append(encoding)
else:
error_count += 1
if error_count > 0:
print('Was unable to encode {} examples'.format(error_count))
return data
def parse_dataset(filepath):
with open(filepath, encoding='utf-8') as file:
text = file.readlines()
text = [line.replace('\n', '') for line in text]
text = [line for line in text if len(line) > 0]
word_list = [line.split('\t')[0] for line in text]
label_list = [line.split('\t')[1] for line in text]
sentences = []
tags = []
current_sentence = []
current_tags = []
for item in zip(word_list, label_list):
current_sentence.append(item[0])
current_tags.append(item[1])
if item[0] == '.':
sentences.append(' '.join(current_sentence))
tags.append(current_tags)
current_sentence = []
current_tags = []
return sentences, tags
def encode_sentence(sentence, ner_tags):
sentence = preprocess_punctuation(sentence)
encoded_sentence = tokenizer(
sentence,
max_length=MAX_LEN,
padding='max_length',
truncation=True,
add_special_tokens=True,
return_offsets_mapping=True,
return_tensors='pt'
)
encoded_labels = encode_tags_last(ner_tags, encoded_sentence.offset_mapping)
if encoded_labels is not None:
return {
'input_ids': torch.squeeze(encoded_sentence.input_ids),
'attention_mask': torch.squeeze(encoded_sentence.attention_mask),
'labels': encoded_labels
}
else:
return None
def preprocess_punctuation(text):
text = text.replace('©', '-')
return text
# encodes labels in the first token position of each word
def encode_tags_first(ner_tags, offset_mapping):
offset_mapping = torch.squeeze(offset_mapping)
labels = [tag_to_id[tag] for tag in ner_tags]
encoded_labels = np.ones(len(offset_mapping), dtype=int) * -100
for i in range(1, len(offset_mapping)):
if ignore_mapping(offset_mapping[i-1]) or offset_mapping[i-1][-1] != offset_mapping[i][0]:
if not ignore_mapping(offset_mapping[i]):
try:
encoded_labels[i] = labels.pop(0)
except(IndexError):
return None
if len(labels) > 0:
return None
return torch.tensor(encoded_labels)
# encodes labels in the last token position of each word
def encode_tags_last(ner_tags, offset_mapping):
offset_mapping = torch.squeeze(offset_mapping)
labels = [tag_to_id[tag] for tag in ner_tags]
encoded_labels = np.ones(len(offset_mapping), dtype=int) * -100
for i in range(1, len(offset_mapping) - 1):
if offset_mapping[i][1] != offset_mapping[i+1][0]:
if not ignore_mapping(offset_mapping[i]):
try:
encoded_labels[i] = labels.pop(0)
except(IndexError):
return None
if len(labels) > 0:
return None
return torch.tensor(encoded_labels)
def ignore_mapping(mapping):
return mapping[0] == 0 and mapping[1] == 0
class NERDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
item = self.data[index]
item['input_ids'] = item['input_ids'].to(device)
item['attention_mask'] = item['attention_mask'].to(device)
item['labels'] = item['labels'].to(device)
return item
def __len__(self):
return len(self.data)
train_data = preprocess_data('bg-ner/train.txt')
train_set = NERDataset(train_data)
test_data = preprocess_data('bg-ner/test.txt')
dev_data, test_data = train_test_split(test_data, train_size=0.5, test_size=0.5)
dev_set = NERDataset(dev_data)
test_set = NERDataset(test_data)
train_loader = DataLoader(train_set, shuffle=True, batch_size=BATCH_SIZE)
dev_loader = DataLoader(dev_set, shuffle=False, batch_size=BATCH_SIZE)
test_loader = DataLoader(test_set, shuffle=False, batch_size=BATCH_SIZE)
# 01234567890123456789
sentence = 'Кучето ми е гладно .'
encoded_sentence = tokenizer(
sentence,
add_special_tokens=True,
return_offsets_mapping=True,
)
print(encoded_sentence['offset_mapping'])
```
## Model
```
from transformers import RobertaForTokenClassification
learning_rate = 1e-5
model = RobertaForTokenClassification.from_pretrained(
MODEL,
num_labels=len(tag_to_id)
)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
model.to(device)
```
## Training
```
LOG_INTERVAL = round(len(train_loader) / 10)
def train(epoch):
model.train()
total_loss = 0
for batch_index, batch in enumerate(train_loader):
model.zero_grad()
output = model(**batch)
loss = output[0]
loss.backward()
optimizer.step()
total_loss += loss.item()
if batch_index % LOG_INTERVAL == 0 and batch_index > 0:
current_loss = total_loss / LOG_INTERVAL
print('| epoch {:3d} | '
'{:5d}/{:5d} batches | '
'loss {:5.2f}'.format(
epoch,
batch_index, len(train_loader),
current_loss))
total_loss = 0
def test(data_loader):
model.eval()
total_score = 0
total_len = 0
with torch.no_grad():
for batch_index, batch in enumerate(data_loader):
output = model(**batch)
preds = np.argmax(output[1].cpu(), axis=2)
preds = preds[(batch['labels'] != -100)]
labels = batch['labels'][(batch['labels'] != -100)]
total_score += preds.eq(labels.cpu()).sum()
total_len += len(labels)
return (total_score.item() / total_len) * 100
EPOCHS = 5
accuracy = test(dev_loader)
print('| Pretraining Accuracy: {:.2f}%\n'.format(accuracy))
for epoch in range(1, EPOCHS + 1):
train(epoch)
accuracy = test(dev_loader)
print('| epoch {} | Accuracy: {:.2f}%\n'.format(epoch, accuracy))
accuracy = test(test_loader)
print('| Accuracy on test set: {:.2f}%'.format(accuracy))
```
| github_jupyter |
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from IPython.display import YouTubeVideo
from functools import partial
YouTubeVideo_formato = partial(YouTubeVideo, modestbranding=1, disablekb=0,
width=640, height=360, autoplay=0, rel=0, showinfo=0)
```
(unit2-linear-2)=
# Regresión lineal, Sobreajuste y Validación
## Introducción
Una **regresión** consiste en **ajustar** un modelo paramétrico del tipo
$$
f_\theta: x \rightarrow y
$$
El ajuste de este modelo nos permite
- Entender como dos o más variables se relacionan
- Predecir una variable en función de otras
Estos son los objetivos del **análisis de regresión**
Hablamos particularmente de **regresión lineal** cuando el modelo $f_\theta$ es **lineal en sus parámetros**. Es decir que lo podemos escribir como
$$
\begin{align}
f_\theta(x) &= \langle x, \theta \rangle \nonumber \\
&= \begin{pmatrix} x_1 & x_2 & \ldots & x_M \end{pmatrix} \begin{pmatrix} \theta_1 \\ \theta_2 \\ \vdots \\ \theta_M \end{pmatrix}
\end{align}
$$
donde $x$ representa los atributos (variables independientes) y $\theta$ los parámetros a ajustar. Ajustar el modelo se refiere a encontrar el valor óptimo de $\theta$. Como vimos la clase pasada
- Si nuestro sistema es cuadrado podemos usar inversión
- Si nuestro sistema es rectangular podemos usar **mínimos cuadrados**
El ajuste del modelo se realiza en base a **datos**, que podemos visualizar como un conjunto de $N$ tuplas $(\vec x_i, y_i)$ con $i=1,2,\ldots,N$. Por otro lado la cantidad parámetros del modelo es $M$, es decir el largo del vector $\theta$.
Luego
- Cada tupla o ejemplo aporta una ecuación al sistema
- Cada parámetro aporta una incognita al sistema
A continuación generalizaremos algunos conceptos vistos en {ref}`unit2-linear-1`
## Regresión lineal multivariada
En la lección anterior ajustamos el modelo
$$
f_\theta(x) = \theta_0 + \theta_1 x,
$$
con dos parámetros y una variable independiente. El modelo anterior corresponde al modelo lineal más básico: una recta.
En un caso más general podríamos querer ajustar un modelo con un $x$ multidimensional
Si tenemos $d$ atributos podemos construir un vector $\vec x = (x_1, x_2, \ldots, x_d)$ y considerar el siguiente modelo lineal
$$
\begin{align}
f_\theta(\vec x) &= \theta_0 + \theta_1 x_1 + \theta_2 x_2 + \ldots \theta_d x_d \nonumber \\
&= \theta_0 + \sum_{k=1}^d \theta_k x_k \nonumber \\
\end{align}
$$
Esto corresponde a ajustar un **hiperplano**
### Ejercicio práctico
Para los datos de consumo de helados, encuentre los parámetros del **hiperplano** que ajuste mejor los datos
$$
\text{consumo} = \theta_0 + \theta_1 \cdot \text{temperatura} + \theta_2 \cdot \text{precio}
$$
- Identifique y construya el vector $b$ y la matriz $A$ ¿Cuánto vale $N$ y $M$?
- ¿Es este un sistema cuadrado o rectangular? ¿ Es sobre o infra-determinado?
- Encuentre $\theta$ que minimiza la suma de errores cuadráticos
- Grafique el plano encontrado
**Solución paso a paso con comentarios**
```
YouTubeVideo_formato('h6KrwiQv5qU')
```
## Modelos lineales en sus parámetros pero no en sus entradas
Una regresión lineal puede considerar transformaciones "no lineales" sobre la entrada $x$. Llamaremos función base $\phi_j(\cdot)$ a estas transformaciones.
Luego el modelo más general de regresión lineal en sus parámetros es
$$
y = f_\theta (x) = \sum_{j=0}^N \theta_j \phi_j (x)
$$
El modelo sigue siendo lineal en sus parámetros. Por ende lo podemos ajustarnos con las mismas herramientas que vimos anteriormente. La ventaja de usar funciones base es que el modelo es más flexible, es decir que podemos modelar comportamientos más diversos en los datos.
Veamos algunos ejemplos concretos de regresión lineal con funciones base
**Ejemplo 1: Regresión polinomial**
Si usamos $\phi_j(x) = x^j$ tendríamos
$$
y = f_\theta (x) = \theta_0 + \theta_1 x + \theta_2 x^2 + \ldots,
$$
que nos puede servir cuando la relación entre las variables es cuadrática, cúbica o de orden superior
**Ejemplo 2: Regresión trigonométrica**
Si usamos $\phi_j(x) = \cos(2\pi j x)$ tendríamos
$$
y = f_\theta (x) = \theta_0 + \theta_1 \cos(2\pi x) + \theta_2 \cos(4 \pi x) + \ldots,
$$
que nos puede servir si queremos modelar funciones periódicas pares. Si usamos seno en lugar de coseno podríamos modelar funciones periódicas impares. Si usamos una combinación de seno y coseno podríamos modelar cualquier función periódica (serie de Fourier)
### Ejercicio práctico
Considere los siguientes datos:
```
np.random.seed(1234)
x = np.linspace(0, 2, num=10)
y = 2*np.cos(2.0*np.pi*x) + np.sin(4.0*np.pi*x) + 0.4*np.random.randn(len(x))
x_plot = np.linspace(np.amin(x), np.amax(x), num=100)
```
- Realice una regresión polinomial sobre $(x, y)$
- Muestre graficamente los datos y el resultado de $f_\theta(x_{plot})$
- Use Jupyter widgets para modificar dinamicamente el grado del polinomio entre $M\in[1, 15]$
**Solución paso a paso con comentarios**
```
YouTubeVideo_formato('KvIyri8lVq4')
```
¿Qué ocurre cuando $N\geq M$?
> Nuestro modelo se sobre ajusta a los datos
Estudiaremos esto en detalle más adelante
## Sistema infradeterminado (caso $N>M$)
El caso del sistema infradeterminado es aquel que tiene más incognitas (parámetros) que ecuaciones. Este tipo de sistema tiene infinitas soluciones
Considere por ejemplo las soluciones posibles de ajustar un sistema polinomial de segundo orden (tres parámetros) con sólo dos ejemplos
```
x = np.array([-2, 2])
y = np.array([4, 4])
fig, ax = plt.subplots(figsize=(6, 4), tight_layout=True)
x_plot = np.linspace(-3, 3, num=100)
thetas = np.zeros(shape=(200, 3))
for i, a in enumerate(np.linspace(-10, 10, num=thetas.shape[0])):
ax.plot(x_plot, a + (1 - a/4)*x_plot**2)
thetas[i:] = [a, 0, (1-a/4)]
ax.scatter(x, y, s=100, c='k', zorder=10);
```
Más en la práctica, la consecuencia de que el sistema sea infradeterminado es que $A^T A$ no es invertible.
Para resolver el problema infradeterminado se debe una restricción adicional. La más típica es que el vector solución tenga norma mínima, por ejemplo
$$
\min_\theta \| x \|_2^2 ~\text{s.a.}~ Ax =b
$$
que se resuelve usando $M$ [multiplicadores de Lagrange](https://es.wikipedia.org/wiki/Multiplicadores_de_Lagrange) $\lambda$ como sigue
$$
\begin{align}
\frac{d}{dx} \| x\|_2^2 + \lambda^T (b - Ax) &= 2x - \lambda^T A \nonumber \\
&= 2Ax - A A^T \lambda \nonumber \\
&= 2b - A A^T \lambda = 0
\end{align}
$$
De donde obtenemos que $\lambda = 2(AA^T)^{-1}b$ y por lo tanto $x = \frac{1}{2} A^T \lambda = A^T (A A^T)^{-1} b$, donde $A^T (A A^T)^{-1}$ se conoce como la pseudo-inversa "por la derecha"
La función `np.linalg.lstsq` usa la pseudo inversa izquierda automáticamente si $N<M$ o la pseudo inversa derecha si $N>M$
Es decir que NumPy asume que la mejor solución del sistema infradeterminado es la de **mínima norma euclidiana**
## Complejidad, sobreajuste y generalización
Un modelo con más parámetros es más flexible pero también más complejo
**Complejidad:** grados de libertad de un modelo
Como vimos en el ejercicio práctico anterior un exceso de flexibilidad puede producir un "ajuste perfecto". Un ajuste perfecto es generalmente una mala idea pues nuestros datos casi siempre tendrán ruido
**Sobreajuste:** Ocurre cuando el modelo se ajusta al ruido de los datos
Considere los siguientes datos ajustados con tres modelos de distinta complejidad
```
x = np.linspace(-3, 3, num=10)
x_plot = np.linspace(np.amin(x), np.amax(x), num=100)
y_clean = np.poly1d([2, -4, 20]) # 2*x**2 -4*x +20
np.random.seed(1234)
y = y_clean(x) + 3*np.random.randn(len(x))
poly_basis = lambda x, M : np.vstack([x**k for k in range(M)]).T
fig, ax = plt.subplots(1, 3, figsize=(8, 3),
tight_layout=True, sharex=True, sharey=True)
for i, (M, title) in enumerate(zip([2, 3, 10], ["muy simple", "adecuado", "muy complejo"])):
ax[i].plot(x_plot, y_clean(x_plot), lw=2, alpha=.5, label='Modelo real')
ax[i].scatter(x, y, label='observaciones');
theta = np.linalg.lstsq(poly_basis(x, M), y, rcond=None)[0]
ax[i].plot(x_plot, np.dot(poly_basis(x_plot, M), theta), 'k-', label='Modelo apredido')
ax[0].legend()
ax[i].set_title(title)
```
Del ejemplo podemos ver que cuando el modelo se sobreajusta pierde capacidad de generalización
**Generalización:** Capacidad de predecir adecuadamente los datos que no se usan en el ajuste
Los siguientes mecanísmos se pueden usar para evitar el sobreajuste y mejorar la capacidad de generalización
- Validación: Escoger la complejidad mediante pruebas de validación
- Regularización: Penalizar la complejidad de forma adicional
Se revisará en detalle la primera opción
### Introducción a las técnicas de validación cruzada
Validación cruzada es un conjunto de técnicas donde se busca dividir el conjunto de datos en tres subconjuntos
1. Entrenamiento: Datos que se ocupan para **ajustar el modelo**
1. Validación: Datos que se ocupan para **calibrar el modelo**
1. Prueba: Datos que se ocupan para **comparar distintos modelos**
La forma más simple de crear estos subconjuntos es permutar aleatoriamente los índices de los elementos y dividir los índices en distintas proporciones. Tipicamente se usa una proporción 60/20/20 o 80/10/10 dependiendo del tamaño de la base de datos original. Este tipo de validación cruzada se llama **hold-out**.
<img src="../img/validation1.png" width="700">
El permutar produce un particionamiento aleatorio que busca que cada subconjunto sea **representativo** de la base de datos original. Más adelante veremos técnicas de validación cruzada más sofisticadas.
Para evaluar la calidad de nuestro modelo medimos el error en cada uno de estos subconjuntos
1. El ajuste de los parámetros se realiza minimizando el **error de entrenamiento**
1. Calibrar el modelo, es decir seleccionar los mejores hiperparámetros del modelo, se realiza minimizando el **error de validación**. En el caso particular de la regresión polinomial el hiperparámetro que debemos calibrar es el grado del polinomio.
1. La capacidad de generalización del modelo final se mide usando el **error de prueba**
La siguiente figura muestra un esquema iterativo de validación
<img src="../img/validation2.png" width="700">
Usando este esquema podemos detectar facilmente un modelo sobreajustado ya que presentará un buen desempeño en entrenamiento pero un desempeño deficiente en validación
### Ejercicio práctico
Considere los siguientes datos
```
x = np.linspace(-5, 5, num=30)
x_plot = np.linspace(np.amin(x), np.amax(x), num=100)
y_clean = np.poly1d([0.1, -0.3, -2, 10])
np.random.seed(1234)
y = y_clean(x) + 1.5*np.random.randn(len(x))
poly_basis = lambda x, M : np.vstack([x**k for k in range(M)]).T
```
Considere el modelo de regresión polinomial
- Separé los datos $(x,y)$ aleatoriamente para crear conjuntos de entrenamiento y validación. Se recomienda usar la función `np.random.permutation`
- Entrene con el conjunto de entrenamiento
- Encuentre el grado de polinomio que mejor ajusta los datos del conjunto de validación en base al error cuadrático medio:
$$
\text{MSE} = \frac{1}{N} \sum_{i=1}^N e_i^2
$$
donde $e_i = y_i - f_\theta(x_i)$
**Solución paso a paso con comentarios**
```
YouTubeVideo_formato('Ydl2g6w3Wog')
```
### (Extra) ¿En qué consiste la regularización?
Consiste en agregar una penalización adicional al problema
El ejemplo clásico es agregar que la solución tenga norma mínima
$$
\min_x \|Ax-b\|_2^2 + \lambda \|x\|_2^2
$$
En este caso la solución es
$$
\hat x = (A^T A + \lambda I)^{-1} A^T b
$$
que se conoce como **ridge regression** o **regularización de Tikhonov**
$\lambda$ es un hiper-parámetro del modelo y debe ser escogido por el usuario (usando validación)
## Resumen de la lección
En esta lección hemos aprendido a:
- Resolver la regresión lineal multivariada
- Generalizar la regresión lineal con funciones base (polinomios)
- Calibrar nuestros modelos usando técnicas de validación
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.